Loading...
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 *
16 * Released under the GPL v2. (and only v2, not any later version)
17 */
18
19#include <traceevent/event-parse.h>
20#include <api/fs/tracing_path.h>
21#include "builtin.h"
22#include "util/color.h"
23#include "util/debug.h"
24#include "util/evlist.h"
25#include <subcmd/exec-cmd.h>
26#include "util/machine.h"
27#include "util/session.h"
28#include "util/thread.h"
29#include <subcmd/parse-options.h>
30#include "util/strlist.h"
31#include "util/intlist.h"
32#include "util/thread_map.h"
33#include "util/stat.h"
34#include "trace-event.h"
35#include "util/parse-events.h"
36#include "util/bpf-loader.h"
37
38#include <libaudit.h>
39#include <stdlib.h>
40#include <sys/mman.h>
41#include <linux/futex.h>
42#include <linux/err.h>
43
44/* For older distros: */
45#ifndef MAP_STACK
46# define MAP_STACK 0x20000
47#endif
48
49#ifndef MADV_HWPOISON
50# define MADV_HWPOISON 100
51
52#endif
53
54#ifndef MADV_MERGEABLE
55# define MADV_MERGEABLE 12
56#endif
57
58#ifndef MADV_UNMERGEABLE
59# define MADV_UNMERGEABLE 13
60#endif
61
62#ifndef EFD_SEMAPHORE
63# define EFD_SEMAPHORE 1
64#endif
65
66#ifndef EFD_NONBLOCK
67# define EFD_NONBLOCK 00004000
68#endif
69
70#ifndef EFD_CLOEXEC
71# define EFD_CLOEXEC 02000000
72#endif
73
74#ifndef O_CLOEXEC
75# define O_CLOEXEC 02000000
76#endif
77
78#ifndef SOCK_DCCP
79# define SOCK_DCCP 6
80#endif
81
82#ifndef SOCK_CLOEXEC
83# define SOCK_CLOEXEC 02000000
84#endif
85
86#ifndef SOCK_NONBLOCK
87# define SOCK_NONBLOCK 00004000
88#endif
89
90#ifndef MSG_CMSG_CLOEXEC
91# define MSG_CMSG_CLOEXEC 0x40000000
92#endif
93
94#ifndef PERF_FLAG_FD_NO_GROUP
95# define PERF_FLAG_FD_NO_GROUP (1UL << 0)
96#endif
97
98#ifndef PERF_FLAG_FD_OUTPUT
99# define PERF_FLAG_FD_OUTPUT (1UL << 1)
100#endif
101
102#ifndef PERF_FLAG_PID_CGROUP
103# define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
104#endif
105
106#ifndef PERF_FLAG_FD_CLOEXEC
107# define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
108#endif
109
110
111struct tp_field {
112 int offset;
113 union {
114 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
115 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
116 };
117};
118
119#define TP_UINT_FIELD(bits) \
120static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
121{ \
122 u##bits value; \
123 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
124 return value; \
125}
126
127TP_UINT_FIELD(8);
128TP_UINT_FIELD(16);
129TP_UINT_FIELD(32);
130TP_UINT_FIELD(64);
131
132#define TP_UINT_FIELD__SWAPPED(bits) \
133static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
134{ \
135 u##bits value; \
136 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
137 return bswap_##bits(value);\
138}
139
140TP_UINT_FIELD__SWAPPED(16);
141TP_UINT_FIELD__SWAPPED(32);
142TP_UINT_FIELD__SWAPPED(64);
143
144static int tp_field__init_uint(struct tp_field *field,
145 struct format_field *format_field,
146 bool needs_swap)
147{
148 field->offset = format_field->offset;
149
150 switch (format_field->size) {
151 case 1:
152 field->integer = tp_field__u8;
153 break;
154 case 2:
155 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
156 break;
157 case 4:
158 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
159 break;
160 case 8:
161 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
162 break;
163 default:
164 return -1;
165 }
166
167 return 0;
168}
169
170static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
171{
172 return sample->raw_data + field->offset;
173}
174
175static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
176{
177 field->offset = format_field->offset;
178 field->pointer = tp_field__ptr;
179 return 0;
180}
181
182struct syscall_tp {
183 struct tp_field id;
184 union {
185 struct tp_field args, ret;
186 };
187};
188
189static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
190 struct tp_field *field,
191 const char *name)
192{
193 struct format_field *format_field = perf_evsel__field(evsel, name);
194
195 if (format_field == NULL)
196 return -1;
197
198 return tp_field__init_uint(field, format_field, evsel->needs_swap);
199}
200
201#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
202 ({ struct syscall_tp *sc = evsel->priv;\
203 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
204
205static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
206 struct tp_field *field,
207 const char *name)
208{
209 struct format_field *format_field = perf_evsel__field(evsel, name);
210
211 if (format_field == NULL)
212 return -1;
213
214 return tp_field__init_ptr(field, format_field);
215}
216
217#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
218 ({ struct syscall_tp *sc = evsel->priv;\
219 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
220
221static void perf_evsel__delete_priv(struct perf_evsel *evsel)
222{
223 zfree(&evsel->priv);
224 perf_evsel__delete(evsel);
225}
226
227static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
228{
229 evsel->priv = malloc(sizeof(struct syscall_tp));
230 if (evsel->priv != NULL) {
231 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
232 goto out_delete;
233
234 evsel->handler = handler;
235 return 0;
236 }
237
238 return -ENOMEM;
239
240out_delete:
241 zfree(&evsel->priv);
242 return -ENOENT;
243}
244
245static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
246{
247 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
248
249 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
250 if (IS_ERR(evsel))
251 evsel = perf_evsel__newtp("syscalls", direction);
252
253 if (IS_ERR(evsel))
254 return NULL;
255
256 if (perf_evsel__init_syscall_tp(evsel, handler))
257 goto out_delete;
258
259 return evsel;
260
261out_delete:
262 perf_evsel__delete_priv(evsel);
263 return NULL;
264}
265
266#define perf_evsel__sc_tp_uint(evsel, name, sample) \
267 ({ struct syscall_tp *fields = evsel->priv; \
268 fields->name.integer(&fields->name, sample); })
269
270#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
271 ({ struct syscall_tp *fields = evsel->priv; \
272 fields->name.pointer(&fields->name, sample); })
273
274struct syscall_arg {
275 unsigned long val;
276 struct thread *thread;
277 struct trace *trace;
278 void *parm;
279 u8 idx;
280 u8 mask;
281};
282
283struct strarray {
284 int offset;
285 int nr_entries;
286 const char **entries;
287};
288
289#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
290 .nr_entries = ARRAY_SIZE(array), \
291 .entries = array, \
292}
293
294#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
295 .offset = off, \
296 .nr_entries = ARRAY_SIZE(array), \
297 .entries = array, \
298}
299
300static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
301 const char *intfmt,
302 struct syscall_arg *arg)
303{
304 struct strarray *sa = arg->parm;
305 int idx = arg->val - sa->offset;
306
307 if (idx < 0 || idx >= sa->nr_entries)
308 return scnprintf(bf, size, intfmt, arg->val);
309
310 return scnprintf(bf, size, "%s", sa->entries[idx]);
311}
312
313static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
314 struct syscall_arg *arg)
315{
316 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
317}
318
319#define SCA_STRARRAY syscall_arg__scnprintf_strarray
320
321#if defined(__i386__) || defined(__x86_64__)
322/*
323 * FIXME: Make this available to all arches as soon as the ioctl beautifier
324 * gets rewritten to support all arches.
325 */
326static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
327 struct syscall_arg *arg)
328{
329 return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
330}
331
332#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
333#endif /* defined(__i386__) || defined(__x86_64__) */
334
335static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
336 struct syscall_arg *arg);
337
338#define SCA_FD syscall_arg__scnprintf_fd
339
340static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
341 struct syscall_arg *arg)
342{
343 int fd = arg->val;
344
345 if (fd == AT_FDCWD)
346 return scnprintf(bf, size, "CWD");
347
348 return syscall_arg__scnprintf_fd(bf, size, arg);
349}
350
351#define SCA_FDAT syscall_arg__scnprintf_fd_at
352
353static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
354 struct syscall_arg *arg);
355
356#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
357
358static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
359 struct syscall_arg *arg)
360{
361 return scnprintf(bf, size, "%#lx", arg->val);
362}
363
364#define SCA_HEX syscall_arg__scnprintf_hex
365
366static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
367 struct syscall_arg *arg)
368{
369 return scnprintf(bf, size, "%d", arg->val);
370}
371
372#define SCA_INT syscall_arg__scnprintf_int
373
374static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
375 struct syscall_arg *arg)
376{
377 int printed = 0, prot = arg->val;
378
379 if (prot == PROT_NONE)
380 return scnprintf(bf, size, "NONE");
381#define P_MMAP_PROT(n) \
382 if (prot & PROT_##n) { \
383 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
384 prot &= ~PROT_##n; \
385 }
386
387 P_MMAP_PROT(EXEC);
388 P_MMAP_PROT(READ);
389 P_MMAP_PROT(WRITE);
390#ifdef PROT_SEM
391 P_MMAP_PROT(SEM);
392#endif
393 P_MMAP_PROT(GROWSDOWN);
394 P_MMAP_PROT(GROWSUP);
395#undef P_MMAP_PROT
396
397 if (prot)
398 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
399
400 return printed;
401}
402
403#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
404
405static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
406 struct syscall_arg *arg)
407{
408 int printed = 0, flags = arg->val;
409
410#define P_MMAP_FLAG(n) \
411 if (flags & MAP_##n) { \
412 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
413 flags &= ~MAP_##n; \
414 }
415
416 P_MMAP_FLAG(SHARED);
417 P_MMAP_FLAG(PRIVATE);
418#ifdef MAP_32BIT
419 P_MMAP_FLAG(32BIT);
420#endif
421 P_MMAP_FLAG(ANONYMOUS);
422 P_MMAP_FLAG(DENYWRITE);
423 P_MMAP_FLAG(EXECUTABLE);
424 P_MMAP_FLAG(FILE);
425 P_MMAP_FLAG(FIXED);
426 P_MMAP_FLAG(GROWSDOWN);
427#ifdef MAP_HUGETLB
428 P_MMAP_FLAG(HUGETLB);
429#endif
430 P_MMAP_FLAG(LOCKED);
431 P_MMAP_FLAG(NONBLOCK);
432 P_MMAP_FLAG(NORESERVE);
433 P_MMAP_FLAG(POPULATE);
434 P_MMAP_FLAG(STACK);
435#ifdef MAP_UNINITIALIZED
436 P_MMAP_FLAG(UNINITIALIZED);
437#endif
438#undef P_MMAP_FLAG
439
440 if (flags)
441 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
442
443 return printed;
444}
445
446#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
447
448static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
449 struct syscall_arg *arg)
450{
451 int printed = 0, flags = arg->val;
452
453#define P_MREMAP_FLAG(n) \
454 if (flags & MREMAP_##n) { \
455 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
456 flags &= ~MREMAP_##n; \
457 }
458
459 P_MREMAP_FLAG(MAYMOVE);
460#ifdef MREMAP_FIXED
461 P_MREMAP_FLAG(FIXED);
462#endif
463#undef P_MREMAP_FLAG
464
465 if (flags)
466 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
467
468 return printed;
469}
470
471#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
472
473static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
474 struct syscall_arg *arg)
475{
476 int behavior = arg->val;
477
478 switch (behavior) {
479#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
480 P_MADV_BHV(NORMAL);
481 P_MADV_BHV(RANDOM);
482 P_MADV_BHV(SEQUENTIAL);
483 P_MADV_BHV(WILLNEED);
484 P_MADV_BHV(DONTNEED);
485 P_MADV_BHV(REMOVE);
486 P_MADV_BHV(DONTFORK);
487 P_MADV_BHV(DOFORK);
488 P_MADV_BHV(HWPOISON);
489#ifdef MADV_SOFT_OFFLINE
490 P_MADV_BHV(SOFT_OFFLINE);
491#endif
492 P_MADV_BHV(MERGEABLE);
493 P_MADV_BHV(UNMERGEABLE);
494#ifdef MADV_HUGEPAGE
495 P_MADV_BHV(HUGEPAGE);
496#endif
497#ifdef MADV_NOHUGEPAGE
498 P_MADV_BHV(NOHUGEPAGE);
499#endif
500#ifdef MADV_DONTDUMP
501 P_MADV_BHV(DONTDUMP);
502#endif
503#ifdef MADV_DODUMP
504 P_MADV_BHV(DODUMP);
505#endif
506#undef P_MADV_PHV
507 default: break;
508 }
509
510 return scnprintf(bf, size, "%#x", behavior);
511}
512
513#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
514
515static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
516 struct syscall_arg *arg)
517{
518 int printed = 0, op = arg->val;
519
520 if (op == 0)
521 return scnprintf(bf, size, "NONE");
522#define P_CMD(cmd) \
523 if ((op & LOCK_##cmd) == LOCK_##cmd) { \
524 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
525 op &= ~LOCK_##cmd; \
526 }
527
528 P_CMD(SH);
529 P_CMD(EX);
530 P_CMD(NB);
531 P_CMD(UN);
532 P_CMD(MAND);
533 P_CMD(RW);
534 P_CMD(READ);
535 P_CMD(WRITE);
536#undef P_OP
537
538 if (op)
539 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
540
541 return printed;
542}
543
544#define SCA_FLOCK syscall_arg__scnprintf_flock
545
546static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
547{
548 enum syscall_futex_args {
549 SCF_UADDR = (1 << 0),
550 SCF_OP = (1 << 1),
551 SCF_VAL = (1 << 2),
552 SCF_TIMEOUT = (1 << 3),
553 SCF_UADDR2 = (1 << 4),
554 SCF_VAL3 = (1 << 5),
555 };
556 int op = arg->val;
557 int cmd = op & FUTEX_CMD_MASK;
558 size_t printed = 0;
559
560 switch (cmd) {
561#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
562 P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
563 P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
564 P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
565 P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
566 P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
567 P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
568 P_FUTEX_OP(WAKE_OP); break;
569 P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
570 P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
571 P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
572 P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
573 P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
574 P_FUTEX_OP(WAIT_REQUEUE_PI); break;
575 default: printed = scnprintf(bf, size, "%#x", cmd); break;
576 }
577
578 if (op & FUTEX_PRIVATE_FLAG)
579 printed += scnprintf(bf + printed, size - printed, "|PRIV");
580
581 if (op & FUTEX_CLOCK_REALTIME)
582 printed += scnprintf(bf + printed, size - printed, "|CLKRT");
583
584 return printed;
585}
586
587#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
588
589static const char *bpf_cmd[] = {
590 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
591 "MAP_GET_NEXT_KEY", "PROG_LOAD",
592};
593static DEFINE_STRARRAY(bpf_cmd);
594
595static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
596static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
597
598static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
599static DEFINE_STRARRAY(itimers);
600
601static const char *keyctl_options[] = {
602 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
603 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
604 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
605 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
606 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
607};
608static DEFINE_STRARRAY(keyctl_options);
609
610static const char *whences[] = { "SET", "CUR", "END",
611#ifdef SEEK_DATA
612"DATA",
613#endif
614#ifdef SEEK_HOLE
615"HOLE",
616#endif
617};
618static DEFINE_STRARRAY(whences);
619
620static const char *fcntl_cmds[] = {
621 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
622 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
623 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
624 "F_GETOWNER_UIDS",
625};
626static DEFINE_STRARRAY(fcntl_cmds);
627
628static const char *rlimit_resources[] = {
629 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
630 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
631 "RTTIME",
632};
633static DEFINE_STRARRAY(rlimit_resources);
634
635static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
636static DEFINE_STRARRAY(sighow);
637
638static const char *clockid[] = {
639 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
640 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
641 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
642};
643static DEFINE_STRARRAY(clockid);
644
645static const char *socket_families[] = {
646 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
647 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
648 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
649 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
650 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
651 "ALG", "NFC", "VSOCK",
652};
653static DEFINE_STRARRAY(socket_families);
654
655#ifndef SOCK_TYPE_MASK
656#define SOCK_TYPE_MASK 0xf
657#endif
658
659static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
660 struct syscall_arg *arg)
661{
662 size_t printed;
663 int type = arg->val,
664 flags = type & ~SOCK_TYPE_MASK;
665
666 type &= SOCK_TYPE_MASK;
667 /*
668 * Can't use a strarray, MIPS may override for ABI reasons.
669 */
670 switch (type) {
671#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
672 P_SK_TYPE(STREAM);
673 P_SK_TYPE(DGRAM);
674 P_SK_TYPE(RAW);
675 P_SK_TYPE(RDM);
676 P_SK_TYPE(SEQPACKET);
677 P_SK_TYPE(DCCP);
678 P_SK_TYPE(PACKET);
679#undef P_SK_TYPE
680 default:
681 printed = scnprintf(bf, size, "%#x", type);
682 }
683
684#define P_SK_FLAG(n) \
685 if (flags & SOCK_##n) { \
686 printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
687 flags &= ~SOCK_##n; \
688 }
689
690 P_SK_FLAG(CLOEXEC);
691 P_SK_FLAG(NONBLOCK);
692#undef P_SK_FLAG
693
694 if (flags)
695 printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
696
697 return printed;
698}
699
700#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
701
702#ifndef MSG_PROBE
703#define MSG_PROBE 0x10
704#endif
705#ifndef MSG_WAITFORONE
706#define MSG_WAITFORONE 0x10000
707#endif
708#ifndef MSG_SENDPAGE_NOTLAST
709#define MSG_SENDPAGE_NOTLAST 0x20000
710#endif
711#ifndef MSG_FASTOPEN
712#define MSG_FASTOPEN 0x20000000
713#endif
714
715static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
716 struct syscall_arg *arg)
717{
718 int printed = 0, flags = arg->val;
719
720 if (flags == 0)
721 return scnprintf(bf, size, "NONE");
722#define P_MSG_FLAG(n) \
723 if (flags & MSG_##n) { \
724 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
725 flags &= ~MSG_##n; \
726 }
727
728 P_MSG_FLAG(OOB);
729 P_MSG_FLAG(PEEK);
730 P_MSG_FLAG(DONTROUTE);
731 P_MSG_FLAG(TRYHARD);
732 P_MSG_FLAG(CTRUNC);
733 P_MSG_FLAG(PROBE);
734 P_MSG_FLAG(TRUNC);
735 P_MSG_FLAG(DONTWAIT);
736 P_MSG_FLAG(EOR);
737 P_MSG_FLAG(WAITALL);
738 P_MSG_FLAG(FIN);
739 P_MSG_FLAG(SYN);
740 P_MSG_FLAG(CONFIRM);
741 P_MSG_FLAG(RST);
742 P_MSG_FLAG(ERRQUEUE);
743 P_MSG_FLAG(NOSIGNAL);
744 P_MSG_FLAG(MORE);
745 P_MSG_FLAG(WAITFORONE);
746 P_MSG_FLAG(SENDPAGE_NOTLAST);
747 P_MSG_FLAG(FASTOPEN);
748 P_MSG_FLAG(CMSG_CLOEXEC);
749#undef P_MSG_FLAG
750
751 if (flags)
752 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
753
754 return printed;
755}
756
757#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
758
759static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
760 struct syscall_arg *arg)
761{
762 size_t printed = 0;
763 int mode = arg->val;
764
765 if (mode == F_OK) /* 0 */
766 return scnprintf(bf, size, "F");
767#define P_MODE(n) \
768 if (mode & n##_OK) { \
769 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
770 mode &= ~n##_OK; \
771 }
772
773 P_MODE(R);
774 P_MODE(W);
775 P_MODE(X);
776#undef P_MODE
777
778 if (mode)
779 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
780
781 return printed;
782}
783
784#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
785
786static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
787 struct syscall_arg *arg);
788
789#define SCA_FILENAME syscall_arg__scnprintf_filename
790
791static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
792 struct syscall_arg *arg)
793{
794 int printed = 0, flags = arg->val;
795
796 if (!(flags & O_CREAT))
797 arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
798
799 if (flags == 0)
800 return scnprintf(bf, size, "RDONLY");
801#define P_FLAG(n) \
802 if (flags & O_##n) { \
803 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
804 flags &= ~O_##n; \
805 }
806
807 P_FLAG(APPEND);
808 P_FLAG(ASYNC);
809 P_FLAG(CLOEXEC);
810 P_FLAG(CREAT);
811 P_FLAG(DIRECT);
812 P_FLAG(DIRECTORY);
813 P_FLAG(EXCL);
814 P_FLAG(LARGEFILE);
815 P_FLAG(NOATIME);
816 P_FLAG(NOCTTY);
817#ifdef O_NONBLOCK
818 P_FLAG(NONBLOCK);
819#elif O_NDELAY
820 P_FLAG(NDELAY);
821#endif
822#ifdef O_PATH
823 P_FLAG(PATH);
824#endif
825 P_FLAG(RDWR);
826#ifdef O_DSYNC
827 if ((flags & O_SYNC) == O_SYNC)
828 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
829 else {
830 P_FLAG(DSYNC);
831 }
832#else
833 P_FLAG(SYNC);
834#endif
835 P_FLAG(TRUNC);
836 P_FLAG(WRONLY);
837#undef P_FLAG
838
839 if (flags)
840 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
841
842 return printed;
843}
844
845#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
846
847static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
848 struct syscall_arg *arg)
849{
850 int printed = 0, flags = arg->val;
851
852 if (flags == 0)
853 return 0;
854
855#define P_FLAG(n) \
856 if (flags & PERF_FLAG_##n) { \
857 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
858 flags &= ~PERF_FLAG_##n; \
859 }
860
861 P_FLAG(FD_NO_GROUP);
862 P_FLAG(FD_OUTPUT);
863 P_FLAG(PID_CGROUP);
864 P_FLAG(FD_CLOEXEC);
865#undef P_FLAG
866
867 if (flags)
868 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
869
870 return printed;
871}
872
873#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
874
875static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
876 struct syscall_arg *arg)
877{
878 int printed = 0, flags = arg->val;
879
880 if (flags == 0)
881 return scnprintf(bf, size, "NONE");
882#define P_FLAG(n) \
883 if (flags & EFD_##n) { \
884 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
885 flags &= ~EFD_##n; \
886 }
887
888 P_FLAG(SEMAPHORE);
889 P_FLAG(CLOEXEC);
890 P_FLAG(NONBLOCK);
891#undef P_FLAG
892
893 if (flags)
894 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
895
896 return printed;
897}
898
899#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
900
901static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
902 struct syscall_arg *arg)
903{
904 int printed = 0, flags = arg->val;
905
906#define P_FLAG(n) \
907 if (flags & O_##n) { \
908 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
909 flags &= ~O_##n; \
910 }
911
912 P_FLAG(CLOEXEC);
913 P_FLAG(NONBLOCK);
914#undef P_FLAG
915
916 if (flags)
917 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
918
919 return printed;
920}
921
922#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
923
924static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
925{
926 int sig = arg->val;
927
928 switch (sig) {
929#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
930 P_SIGNUM(HUP);
931 P_SIGNUM(INT);
932 P_SIGNUM(QUIT);
933 P_SIGNUM(ILL);
934 P_SIGNUM(TRAP);
935 P_SIGNUM(ABRT);
936 P_SIGNUM(BUS);
937 P_SIGNUM(FPE);
938 P_SIGNUM(KILL);
939 P_SIGNUM(USR1);
940 P_SIGNUM(SEGV);
941 P_SIGNUM(USR2);
942 P_SIGNUM(PIPE);
943 P_SIGNUM(ALRM);
944 P_SIGNUM(TERM);
945 P_SIGNUM(CHLD);
946 P_SIGNUM(CONT);
947 P_SIGNUM(STOP);
948 P_SIGNUM(TSTP);
949 P_SIGNUM(TTIN);
950 P_SIGNUM(TTOU);
951 P_SIGNUM(URG);
952 P_SIGNUM(XCPU);
953 P_SIGNUM(XFSZ);
954 P_SIGNUM(VTALRM);
955 P_SIGNUM(PROF);
956 P_SIGNUM(WINCH);
957 P_SIGNUM(IO);
958 P_SIGNUM(PWR);
959 P_SIGNUM(SYS);
960#ifdef SIGEMT
961 P_SIGNUM(EMT);
962#endif
963#ifdef SIGSTKFLT
964 P_SIGNUM(STKFLT);
965#endif
966#ifdef SIGSWI
967 P_SIGNUM(SWI);
968#endif
969 default: break;
970 }
971
972 return scnprintf(bf, size, "%#x", sig);
973}
974
975#define SCA_SIGNUM syscall_arg__scnprintf_signum
976
977#if defined(__i386__) || defined(__x86_64__)
978/*
979 * FIXME: Make this available to all arches.
980 */
981#define TCGETS 0x5401
982
983static const char *tioctls[] = {
984 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
985 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
986 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
987 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
988 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
989 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
990 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
991 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
992 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
993 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
994 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
995 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
996 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
997 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
998 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
999};
1000
1001static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
1002#endif /* defined(__i386__) || defined(__x86_64__) */
1003
1004#define STRARRAY(arg, name, array) \
1005 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
1006 .arg_parm = { [arg] = &strarray__##array, }
1007
1008static struct syscall_fmt {
1009 const char *name;
1010 const char *alias;
1011 size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
1012 void *arg_parm[6];
1013 bool errmsg;
1014 bool timeout;
1015 bool hexret;
1016} syscall_fmts[] = {
1017 { .name = "access", .errmsg = true,
1018 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
1019 [1] = SCA_ACCMODE, /* mode */ }, },
1020 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
1021 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
1022 { .name = "brk", .hexret = true,
1023 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
1024 { .name = "chdir", .errmsg = true,
1025 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1026 { .name = "chmod", .errmsg = true,
1027 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1028 { .name = "chroot", .errmsg = true,
1029 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1030 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
1031 { .name = "close", .errmsg = true,
1032 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
1033 { .name = "connect", .errmsg = true, },
1034 { .name = "creat", .errmsg = true,
1035 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1036 { .name = "dup", .errmsg = true,
1037 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1038 { .name = "dup2", .errmsg = true,
1039 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1040 { .name = "dup3", .errmsg = true,
1041 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1042 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
1043 { .name = "eventfd2", .errmsg = true,
1044 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
1045 { .name = "faccessat", .errmsg = true,
1046 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1047 [1] = SCA_FILENAME, /* filename */ }, },
1048 { .name = "fadvise64", .errmsg = true,
1049 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1050 { .name = "fallocate", .errmsg = true,
1051 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1052 { .name = "fchdir", .errmsg = true,
1053 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1054 { .name = "fchmod", .errmsg = true,
1055 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1056 { .name = "fchmodat", .errmsg = true,
1057 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1058 [1] = SCA_FILENAME, /* filename */ }, },
1059 { .name = "fchown", .errmsg = true,
1060 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1061 { .name = "fchownat", .errmsg = true,
1062 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1063 [1] = SCA_FILENAME, /* filename */ }, },
1064 { .name = "fcntl", .errmsg = true,
1065 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1066 [1] = SCA_STRARRAY, /* cmd */ },
1067 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
1068 { .name = "fdatasync", .errmsg = true,
1069 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1070 { .name = "flock", .errmsg = true,
1071 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1072 [1] = SCA_FLOCK, /* cmd */ }, },
1073 { .name = "fsetxattr", .errmsg = true,
1074 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1075 { .name = "fstat", .errmsg = true, .alias = "newfstat",
1076 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1077 { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
1078 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1079 [1] = SCA_FILENAME, /* filename */ }, },
1080 { .name = "fstatfs", .errmsg = true,
1081 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1082 { .name = "fsync", .errmsg = true,
1083 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1084 { .name = "ftruncate", .errmsg = true,
1085 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1086 { .name = "futex", .errmsg = true,
1087 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
1088 { .name = "futimesat", .errmsg = true,
1089 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1090 [1] = SCA_FILENAME, /* filename */ }, },
1091 { .name = "getdents", .errmsg = true,
1092 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1093 { .name = "getdents64", .errmsg = true,
1094 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1095 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
1096 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1097 { .name = "getxattr", .errmsg = true,
1098 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1099 { .name = "inotify_add_watch", .errmsg = true,
1100 .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
1101 { .name = "ioctl", .errmsg = true,
1102 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1103#if defined(__i386__) || defined(__x86_64__)
1104/*
1105 * FIXME: Make this available to all arches.
1106 */
1107 [1] = SCA_STRHEXARRAY, /* cmd */
1108 [2] = SCA_HEX, /* arg */ },
1109 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
1110#else
1111 [2] = SCA_HEX, /* arg */ }, },
1112#endif
1113 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
1114 { .name = "kill", .errmsg = true,
1115 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1116 { .name = "lchown", .errmsg = true,
1117 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1118 { .name = "lgetxattr", .errmsg = true,
1119 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1120 { .name = "linkat", .errmsg = true,
1121 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1122 { .name = "listxattr", .errmsg = true,
1123 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1124 { .name = "llistxattr", .errmsg = true,
1125 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1126 { .name = "lremovexattr", .errmsg = true,
1127 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1128 { .name = "lseek", .errmsg = true,
1129 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1130 [2] = SCA_STRARRAY, /* whence */ },
1131 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
1132 { .name = "lsetxattr", .errmsg = true,
1133 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1134 { .name = "lstat", .errmsg = true, .alias = "newlstat",
1135 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1136 { .name = "lsxattr", .errmsg = true,
1137 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1138 { .name = "madvise", .errmsg = true,
1139 .arg_scnprintf = { [0] = SCA_HEX, /* start */
1140 [2] = SCA_MADV_BHV, /* behavior */ }, },
1141 { .name = "mkdir", .errmsg = true,
1142 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1143 { .name = "mkdirat", .errmsg = true,
1144 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1145 [1] = SCA_FILENAME, /* pathname */ }, },
1146 { .name = "mknod", .errmsg = true,
1147 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1148 { .name = "mknodat", .errmsg = true,
1149 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1150 [1] = SCA_FILENAME, /* filename */ }, },
1151 { .name = "mlock", .errmsg = true,
1152 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1153 { .name = "mlockall", .errmsg = true,
1154 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1155 { .name = "mmap", .hexret = true,
1156 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
1157 [2] = SCA_MMAP_PROT, /* prot */
1158 [3] = SCA_MMAP_FLAGS, /* flags */
1159 [4] = SCA_FD, /* fd */ }, },
1160 { .name = "mprotect", .errmsg = true,
1161 .arg_scnprintf = { [0] = SCA_HEX, /* start */
1162 [2] = SCA_MMAP_PROT, /* prot */ }, },
1163 { .name = "mq_unlink", .errmsg = true,
1164 .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
1165 { .name = "mremap", .hexret = true,
1166 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
1167 [3] = SCA_MREMAP_FLAGS, /* flags */
1168 [4] = SCA_HEX, /* new_addr */ }, },
1169 { .name = "munlock", .errmsg = true,
1170 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1171 { .name = "munmap", .errmsg = true,
1172 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1173 { .name = "name_to_handle_at", .errmsg = true,
1174 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1175 { .name = "newfstatat", .errmsg = true,
1176 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1177 [1] = SCA_FILENAME, /* filename */ }, },
1178 { .name = "open", .errmsg = true,
1179 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
1180 [1] = SCA_OPEN_FLAGS, /* flags */ }, },
1181 { .name = "open_by_handle_at", .errmsg = true,
1182 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1183 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
1184 { .name = "openat", .errmsg = true,
1185 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1186 [1] = SCA_FILENAME, /* filename */
1187 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
1188 { .name = "perf_event_open", .errmsg = true,
1189 .arg_scnprintf = { [1] = SCA_INT, /* pid */
1190 [2] = SCA_INT, /* cpu */
1191 [3] = SCA_FD, /* group_fd */
1192 [4] = SCA_PERF_FLAGS, /* flags */ }, },
1193 { .name = "pipe2", .errmsg = true,
1194 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
1195 { .name = "poll", .errmsg = true, .timeout = true, },
1196 { .name = "ppoll", .errmsg = true, .timeout = true, },
1197 { .name = "pread", .errmsg = true, .alias = "pread64",
1198 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1199 { .name = "preadv", .errmsg = true, .alias = "pread",
1200 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1201 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
1202 { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
1203 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1204 { .name = "pwritev", .errmsg = true,
1205 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1206 { .name = "read", .errmsg = true,
1207 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1208 { .name = "readlink", .errmsg = true,
1209 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
1210 { .name = "readlinkat", .errmsg = true,
1211 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1212 [1] = SCA_FILENAME, /* pathname */ }, },
1213 { .name = "readv", .errmsg = true,
1214 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1215 { .name = "recvfrom", .errmsg = true,
1216 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1217 [3] = SCA_MSG_FLAGS, /* flags */ }, },
1218 { .name = "recvmmsg", .errmsg = true,
1219 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1220 [3] = SCA_MSG_FLAGS, /* flags */ }, },
1221 { .name = "recvmsg", .errmsg = true,
1222 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1223 [2] = SCA_MSG_FLAGS, /* flags */ }, },
1224 { .name = "removexattr", .errmsg = true,
1225 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1226 { .name = "renameat", .errmsg = true,
1227 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1228 { .name = "rmdir", .errmsg = true,
1229 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1230 { .name = "rt_sigaction", .errmsg = true,
1231 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
1232 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
1233 { .name = "rt_sigqueueinfo", .errmsg = true,
1234 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1235 { .name = "rt_tgsigqueueinfo", .errmsg = true,
1236 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1237 { .name = "select", .errmsg = true, .timeout = true, },
1238 { .name = "sendmmsg", .errmsg = true,
1239 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1240 [3] = SCA_MSG_FLAGS, /* flags */ }, },
1241 { .name = "sendmsg", .errmsg = true,
1242 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1243 [2] = SCA_MSG_FLAGS, /* flags */ }, },
1244 { .name = "sendto", .errmsg = true,
1245 .arg_scnprintf = { [0] = SCA_FD, /* fd */
1246 [3] = SCA_MSG_FLAGS, /* flags */ }, },
1247 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
1248 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1249 { .name = "setxattr", .errmsg = true,
1250 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1251 { .name = "shutdown", .errmsg = true,
1252 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1253 { .name = "socket", .errmsg = true,
1254 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1255 [1] = SCA_SK_TYPE, /* type */ },
1256 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
1257 { .name = "socketpair", .errmsg = true,
1258 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1259 [1] = SCA_SK_TYPE, /* type */ },
1260 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
1261 { .name = "stat", .errmsg = true, .alias = "newstat",
1262 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1263 { .name = "statfs", .errmsg = true,
1264 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1265 { .name = "swapoff", .errmsg = true,
1266 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
1267 { .name = "swapon", .errmsg = true,
1268 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
1269 { .name = "symlinkat", .errmsg = true,
1270 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1271 { .name = "tgkill", .errmsg = true,
1272 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1273 { .name = "tkill", .errmsg = true,
1274 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1275 { .name = "truncate", .errmsg = true,
1276 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
1277 { .name = "uname", .errmsg = true, .alias = "newuname", },
1278 { .name = "unlinkat", .errmsg = true,
1279 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1280 [1] = SCA_FILENAME, /* pathname */ }, },
1281 { .name = "utime", .errmsg = true,
1282 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1283 { .name = "utimensat", .errmsg = true,
1284 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
1285 [1] = SCA_FILENAME, /* filename */ }, },
1286 { .name = "utimes", .errmsg = true,
1287 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1288 { .name = "vmsplice", .errmsg = true,
1289 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1290 { .name = "write", .errmsg = true,
1291 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1292 { .name = "writev", .errmsg = true,
1293 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1294};
1295
1296static int syscall_fmt__cmp(const void *name, const void *fmtp)
1297{
1298 const struct syscall_fmt *fmt = fmtp;
1299 return strcmp(name, fmt->name);
1300}
1301
1302static struct syscall_fmt *syscall_fmt__find(const char *name)
1303{
1304 const int nmemb = ARRAY_SIZE(syscall_fmts);
1305 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1306}
1307
1308struct syscall {
1309 struct event_format *tp_format;
1310 int nr_args;
1311 struct format_field *args;
1312 const char *name;
1313 bool is_exit;
1314 struct syscall_fmt *fmt;
1315 size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1316 void **arg_parm;
1317};
1318
1319static size_t fprintf_duration(unsigned long t, FILE *fp)
1320{
1321 double duration = (double)t / NSEC_PER_MSEC;
1322 size_t printed = fprintf(fp, "(");
1323
1324 if (duration >= 1.0)
1325 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1326 else if (duration >= 0.01)
1327 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1328 else
1329 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1330 return printed + fprintf(fp, "): ");
1331}
1332
1333/**
1334 * filename.ptr: The filename char pointer that will be vfs_getname'd
1335 * filename.entry_str_pos: Where to insert the string translated from
1336 * filename.ptr by the vfs_getname tracepoint/kprobe.
1337 */
1338struct thread_trace {
1339 u64 entry_time;
1340 u64 exit_time;
1341 bool entry_pending;
1342 unsigned long nr_events;
1343 unsigned long pfmaj, pfmin;
1344 char *entry_str;
1345 double runtime_ms;
1346 struct {
1347 unsigned long ptr;
1348 short int entry_str_pos;
1349 bool pending_open;
1350 unsigned int namelen;
1351 char *name;
1352 } filename;
1353 struct {
1354 int max;
1355 char **table;
1356 } paths;
1357
1358 struct intlist *syscall_stats;
1359};
1360
1361static struct thread_trace *thread_trace__new(void)
1362{
1363 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1364
1365 if (ttrace)
1366 ttrace->paths.max = -1;
1367
1368 ttrace->syscall_stats = intlist__new(NULL);
1369
1370 return ttrace;
1371}
1372
1373static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1374{
1375 struct thread_trace *ttrace;
1376
1377 if (thread == NULL)
1378 goto fail;
1379
1380 if (thread__priv(thread) == NULL)
1381 thread__set_priv(thread, thread_trace__new());
1382
1383 if (thread__priv(thread) == NULL)
1384 goto fail;
1385
1386 ttrace = thread__priv(thread);
1387 ++ttrace->nr_events;
1388
1389 return ttrace;
1390fail:
1391 color_fprintf(fp, PERF_COLOR_RED,
1392 "WARNING: not enough memory, dropping samples!\n");
1393 return NULL;
1394}
1395
1396#define TRACE_PFMAJ (1 << 0)
1397#define TRACE_PFMIN (1 << 1)
1398
1399static const size_t trace__entry_str_size = 2048;
1400
1401struct trace {
1402 struct perf_tool tool;
1403 struct {
1404 int machine;
1405 int open_id;
1406 } audit;
1407 struct {
1408 int max;
1409 struct syscall *table;
1410 struct {
1411 struct perf_evsel *sys_enter,
1412 *sys_exit;
1413 } events;
1414 } syscalls;
1415 struct record_opts opts;
1416 struct perf_evlist *evlist;
1417 struct machine *host;
1418 struct thread *current;
1419 u64 base_time;
1420 FILE *output;
1421 unsigned long nr_events;
1422 struct strlist *ev_qualifier;
1423 struct {
1424 size_t nr;
1425 int *entries;
1426 } ev_qualifier_ids;
1427 struct intlist *tid_list;
1428 struct intlist *pid_list;
1429 struct {
1430 size_t nr;
1431 pid_t *entries;
1432 } filter_pids;
1433 double duration_filter;
1434 double runtime_ms;
1435 struct {
1436 u64 vfs_getname,
1437 proc_getname;
1438 } stats;
1439 bool not_ev_qualifier;
1440 bool live;
1441 bool full_time;
1442 bool sched;
1443 bool multiple_threads;
1444 bool summary;
1445 bool summary_only;
1446 bool show_comm;
1447 bool show_tool_stats;
1448 bool trace_syscalls;
1449 bool force;
1450 bool vfs_getname;
1451 int trace_pgfaults;
1452};
1453
1454static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1455{
1456 struct thread_trace *ttrace = thread__priv(thread);
1457
1458 if (fd > ttrace->paths.max) {
1459 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
1460
1461 if (npath == NULL)
1462 return -1;
1463
1464 if (ttrace->paths.max != -1) {
1465 memset(npath + ttrace->paths.max + 1, 0,
1466 (fd - ttrace->paths.max) * sizeof(char *));
1467 } else {
1468 memset(npath, 0, (fd + 1) * sizeof(char *));
1469 }
1470
1471 ttrace->paths.table = npath;
1472 ttrace->paths.max = fd;
1473 }
1474
1475 ttrace->paths.table[fd] = strdup(pathname);
1476
1477 return ttrace->paths.table[fd] != NULL ? 0 : -1;
1478}
1479
1480static int thread__read_fd_path(struct thread *thread, int fd)
1481{
1482 char linkname[PATH_MAX], pathname[PATH_MAX];
1483 struct stat st;
1484 int ret;
1485
1486 if (thread->pid_ == thread->tid) {
1487 scnprintf(linkname, sizeof(linkname),
1488 "/proc/%d/fd/%d", thread->pid_, fd);
1489 } else {
1490 scnprintf(linkname, sizeof(linkname),
1491 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1492 }
1493
1494 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1495 return -1;
1496
1497 ret = readlink(linkname, pathname, sizeof(pathname));
1498
1499 if (ret < 0 || ret > st.st_size)
1500 return -1;
1501
1502 pathname[ret] = '\0';
1503 return trace__set_fd_pathname(thread, fd, pathname);
1504}
1505
1506static const char *thread__fd_path(struct thread *thread, int fd,
1507 struct trace *trace)
1508{
1509 struct thread_trace *ttrace = thread__priv(thread);
1510
1511 if (ttrace == NULL)
1512 return NULL;
1513
1514 if (fd < 0)
1515 return NULL;
1516
1517 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
1518 if (!trace->live)
1519 return NULL;
1520 ++trace->stats.proc_getname;
1521 if (thread__read_fd_path(thread, fd))
1522 return NULL;
1523 }
1524
1525 return ttrace->paths.table[fd];
1526}
1527
1528static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
1529 struct syscall_arg *arg)
1530{
1531 int fd = arg->val;
1532 size_t printed = scnprintf(bf, size, "%d", fd);
1533 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1534
1535 if (path)
1536 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1537
1538 return printed;
1539}
1540
1541static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1542 struct syscall_arg *arg)
1543{
1544 int fd = arg->val;
1545 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1546 struct thread_trace *ttrace = thread__priv(arg->thread);
1547
1548 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1549 zfree(&ttrace->paths.table[fd]);
1550
1551 return printed;
1552}
1553
1554static void thread__set_filename_pos(struct thread *thread, const char *bf,
1555 unsigned long ptr)
1556{
1557 struct thread_trace *ttrace = thread__priv(thread);
1558
1559 ttrace->filename.ptr = ptr;
1560 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1561}
1562
1563static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1564 struct syscall_arg *arg)
1565{
1566 unsigned long ptr = arg->val;
1567
1568 if (!arg->trace->vfs_getname)
1569 return scnprintf(bf, size, "%#x", ptr);
1570
1571 thread__set_filename_pos(arg->thread, bf, ptr);
1572 return 0;
1573}
1574
1575static bool trace__filter_duration(struct trace *trace, double t)
1576{
1577 return t < (trace->duration_filter * NSEC_PER_MSEC);
1578}
1579
1580static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1581{
1582 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1583
1584 return fprintf(fp, "%10.3f ", ts);
1585}
1586
1587static bool done = false;
1588static bool interrupted = false;
1589
1590static void sig_handler(int sig)
1591{
1592 done = true;
1593 interrupted = sig == SIGINT;
1594}
1595
1596static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1597 u64 duration, u64 tstamp, FILE *fp)
1598{
1599 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1600 printed += fprintf_duration(duration, fp);
1601
1602 if (trace->multiple_threads) {
1603 if (trace->show_comm)
1604 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1605 printed += fprintf(fp, "%d ", thread->tid);
1606 }
1607
1608 return printed;
1609}
1610
1611static int trace__process_event(struct trace *trace, struct machine *machine,
1612 union perf_event *event, struct perf_sample *sample)
1613{
1614 int ret = 0;
1615
1616 switch (event->header.type) {
1617 case PERF_RECORD_LOST:
1618 color_fprintf(trace->output, PERF_COLOR_RED,
1619 "LOST %" PRIu64 " events!\n", event->lost.lost);
1620 ret = machine__process_lost_event(machine, event, sample);
1621 default:
1622 ret = machine__process_event(machine, event, sample);
1623 break;
1624 }
1625
1626 return ret;
1627}
1628
1629static int trace__tool_process(struct perf_tool *tool,
1630 union perf_event *event,
1631 struct perf_sample *sample,
1632 struct machine *machine)
1633{
1634 struct trace *trace = container_of(tool, struct trace, tool);
1635 return trace__process_event(trace, machine, event, sample);
1636}
1637
1638static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1639{
1640 int err = symbol__init(NULL);
1641
1642 if (err)
1643 return err;
1644
1645 trace->host = machine__new_host();
1646 if (trace->host == NULL)
1647 return -ENOMEM;
1648
1649 if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
1650 return -errno;
1651
1652 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1653 evlist->threads, trace__tool_process, false,
1654 trace->opts.proc_map_timeout);
1655 if (err)
1656 symbol__exit();
1657
1658 return err;
1659}
1660
1661static int syscall__set_arg_fmts(struct syscall *sc)
1662{
1663 struct format_field *field;
1664 int idx = 0;
1665
1666 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1667 if (sc->arg_scnprintf == NULL)
1668 return -1;
1669
1670 if (sc->fmt)
1671 sc->arg_parm = sc->fmt->arg_parm;
1672
1673 for (field = sc->args; field; field = field->next) {
1674 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1675 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1676 else if (field->flags & FIELD_IS_POINTER)
1677 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1678 ++idx;
1679 }
1680
1681 return 0;
1682}
1683
1684static int trace__read_syscall_info(struct trace *trace, int id)
1685{
1686 char tp_name[128];
1687 struct syscall *sc;
1688 const char *name = audit_syscall_to_name(id, trace->audit.machine);
1689
1690 if (name == NULL)
1691 return -1;
1692
1693 if (id > trace->syscalls.max) {
1694 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1695
1696 if (nsyscalls == NULL)
1697 return -1;
1698
1699 if (trace->syscalls.max != -1) {
1700 memset(nsyscalls + trace->syscalls.max + 1, 0,
1701 (id - trace->syscalls.max) * sizeof(*sc));
1702 } else {
1703 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1704 }
1705
1706 trace->syscalls.table = nsyscalls;
1707 trace->syscalls.max = id;
1708 }
1709
1710 sc = trace->syscalls.table + id;
1711 sc->name = name;
1712
1713 sc->fmt = syscall_fmt__find(sc->name);
1714
1715 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1716 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1717
1718 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1719 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1720 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1721 }
1722
1723 if (IS_ERR(sc->tp_format))
1724 return -1;
1725
1726 sc->args = sc->tp_format->format.fields;
1727 sc->nr_args = sc->tp_format->format.nr_fields;
1728 /*
1729 * We need to check and discard the first variable '__syscall_nr'
1730 * or 'nr' that mean the syscall number. It is needless here.
1731 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1732 */
1733 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1734 sc->args = sc->args->next;
1735 --sc->nr_args;
1736 }
1737
1738 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1739
1740 return syscall__set_arg_fmts(sc);
1741}
1742
1743static int trace__validate_ev_qualifier(struct trace *trace)
1744{
1745 int err = 0, i;
1746 struct str_node *pos;
1747
1748 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1749 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1750 sizeof(trace->ev_qualifier_ids.entries[0]));
1751
1752 if (trace->ev_qualifier_ids.entries == NULL) {
1753 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1754 trace->output);
1755 err = -EINVAL;
1756 goto out;
1757 }
1758
1759 i = 0;
1760
1761 strlist__for_each(pos, trace->ev_qualifier) {
1762 const char *sc = pos->s;
1763 int id = audit_name_to_syscall(sc, trace->audit.machine);
1764
1765 if (id < 0) {
1766 if (err == 0) {
1767 fputs("Error:\tInvalid syscall ", trace->output);
1768 err = -EINVAL;
1769 } else {
1770 fputs(", ", trace->output);
1771 }
1772
1773 fputs(sc, trace->output);
1774 }
1775
1776 trace->ev_qualifier_ids.entries[i++] = id;
1777 }
1778
1779 if (err < 0) {
1780 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1781 "\nHint:\tand: 'man syscalls'\n", trace->output);
1782 zfree(&trace->ev_qualifier_ids.entries);
1783 trace->ev_qualifier_ids.nr = 0;
1784 }
1785out:
1786 return err;
1787}
1788
1789/*
1790 * args is to be interpreted as a series of longs but we need to handle
1791 * 8-byte unaligned accesses. args points to raw_data within the event
1792 * and raw_data is guaranteed to be 8-byte unaligned because it is
1793 * preceded by raw_size which is a u32. So we need to copy args to a temp
1794 * variable to read it. Most notably this avoids extended load instructions
1795 * on unaligned addresses
1796 */
1797
1798static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1799 unsigned char *args, struct trace *trace,
1800 struct thread *thread)
1801{
1802 size_t printed = 0;
1803 unsigned char *p;
1804 unsigned long val;
1805
1806 if (sc->args != NULL) {
1807 struct format_field *field;
1808 u8 bit = 1;
1809 struct syscall_arg arg = {
1810 .idx = 0,
1811 .mask = 0,
1812 .trace = trace,
1813 .thread = thread,
1814 };
1815
1816 for (field = sc->args; field;
1817 field = field->next, ++arg.idx, bit <<= 1) {
1818 if (arg.mask & bit)
1819 continue;
1820
1821 /* special care for unaligned accesses */
1822 p = args + sizeof(unsigned long) * arg.idx;
1823 memcpy(&val, p, sizeof(val));
1824
1825 /*
1826 * Suppress this argument if its value is zero and
1827 * and we don't have a string associated in an
1828 * strarray for it.
1829 */
1830 if (val == 0 &&
1831 !(sc->arg_scnprintf &&
1832 sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1833 sc->arg_parm[arg.idx]))
1834 continue;
1835
1836 printed += scnprintf(bf + printed, size - printed,
1837 "%s%s: ", printed ? ", " : "", field->name);
1838 if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1839 arg.val = val;
1840 if (sc->arg_parm)
1841 arg.parm = sc->arg_parm[arg.idx];
1842 printed += sc->arg_scnprintf[arg.idx](bf + printed,
1843 size - printed, &arg);
1844 } else {
1845 printed += scnprintf(bf + printed, size - printed,
1846 "%ld", val);
1847 }
1848 }
1849 } else {
1850 int i = 0;
1851
1852 while (i < 6) {
1853 /* special care for unaligned accesses */
1854 p = args + sizeof(unsigned long) * i;
1855 memcpy(&val, p, sizeof(val));
1856 printed += scnprintf(bf + printed, size - printed,
1857 "%sarg%d: %ld",
1858 printed ? ", " : "", i, val);
1859 ++i;
1860 }
1861 }
1862
1863 return printed;
1864}
1865
1866typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1867 union perf_event *event,
1868 struct perf_sample *sample);
1869
1870static struct syscall *trace__syscall_info(struct trace *trace,
1871 struct perf_evsel *evsel, int id)
1872{
1873
1874 if (id < 0) {
1875
1876 /*
1877 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1878 * before that, leaving at a higher verbosity level till that is
1879 * explained. Reproduced with plain ftrace with:
1880 *
1881 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1882 * grep "NR -1 " /t/trace_pipe
1883 *
1884 * After generating some load on the machine.
1885 */
1886 if (verbose > 1) {
1887 static u64 n;
1888 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1889 id, perf_evsel__name(evsel), ++n);
1890 }
1891 return NULL;
1892 }
1893
1894 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1895 trace__read_syscall_info(trace, id))
1896 goto out_cant_read;
1897
1898 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1899 goto out_cant_read;
1900
1901 return &trace->syscalls.table[id];
1902
1903out_cant_read:
1904 if (verbose) {
1905 fprintf(trace->output, "Problems reading syscall %d", id);
1906 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1907 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1908 fputs(" information\n", trace->output);
1909 }
1910 return NULL;
1911}
1912
1913static void thread__update_stats(struct thread_trace *ttrace,
1914 int id, struct perf_sample *sample)
1915{
1916 struct int_node *inode;
1917 struct stats *stats;
1918 u64 duration = 0;
1919
1920 inode = intlist__findnew(ttrace->syscall_stats, id);
1921 if (inode == NULL)
1922 return;
1923
1924 stats = inode->priv;
1925 if (stats == NULL) {
1926 stats = malloc(sizeof(struct stats));
1927 if (stats == NULL)
1928 return;
1929 init_stats(stats);
1930 inode->priv = stats;
1931 }
1932
1933 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1934 duration = sample->time - ttrace->entry_time;
1935
1936 update_stats(stats, duration);
1937}
1938
1939static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1940{
1941 struct thread_trace *ttrace;
1942 u64 duration;
1943 size_t printed;
1944
1945 if (trace->current == NULL)
1946 return 0;
1947
1948 ttrace = thread__priv(trace->current);
1949
1950 if (!ttrace->entry_pending)
1951 return 0;
1952
1953 duration = sample->time - ttrace->entry_time;
1954
1955 printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
1956 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1957 ttrace->entry_pending = false;
1958
1959 return printed;
1960}
1961
1962static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1963 union perf_event *event __maybe_unused,
1964 struct perf_sample *sample)
1965{
1966 char *msg;
1967 void *args;
1968 size_t printed = 0;
1969 struct thread *thread;
1970 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1971 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1972 struct thread_trace *ttrace;
1973
1974 if (sc == NULL)
1975 return -1;
1976
1977 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1978 ttrace = thread__trace(thread, trace->output);
1979 if (ttrace == NULL)
1980 goto out_put;
1981
1982 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1983
1984 if (ttrace->entry_str == NULL) {
1985 ttrace->entry_str = malloc(trace__entry_str_size);
1986 if (!ttrace->entry_str)
1987 goto out_put;
1988 }
1989
1990 if (!trace->summary_only)
1991 trace__printf_interrupted_entry(trace, sample);
1992
1993 ttrace->entry_time = sample->time;
1994 msg = ttrace->entry_str;
1995 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1996
1997 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1998 args, trace, thread);
1999
2000 if (sc->is_exit) {
2001 if (!trace->duration_filter && !trace->summary_only) {
2002 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
2003 fprintf(trace->output, "%-70s\n", ttrace->entry_str);
2004 }
2005 } else {
2006 ttrace->entry_pending = true;
2007 /* See trace__vfs_getname & trace__sys_exit */
2008 ttrace->filename.pending_open = false;
2009 }
2010
2011 if (trace->current != thread) {
2012 thread__put(trace->current);
2013 trace->current = thread__get(thread);
2014 }
2015 err = 0;
2016out_put:
2017 thread__put(thread);
2018 return err;
2019}
2020
2021static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
2022 union perf_event *event __maybe_unused,
2023 struct perf_sample *sample)
2024{
2025 long ret;
2026 u64 duration = 0;
2027 struct thread *thread;
2028 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2029 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2030 struct thread_trace *ttrace;
2031
2032 if (sc == NULL)
2033 return -1;
2034
2035 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2036 ttrace = thread__trace(thread, trace->output);
2037 if (ttrace == NULL)
2038 goto out_put;
2039
2040 if (trace->summary)
2041 thread__update_stats(ttrace, id, sample);
2042
2043 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2044
2045 if (id == trace->audit.open_id && ret >= 0 && ttrace->filename.pending_open) {
2046 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2047 ttrace->filename.pending_open = false;
2048 ++trace->stats.vfs_getname;
2049 }
2050
2051 ttrace->exit_time = sample->time;
2052
2053 if (ttrace->entry_time) {
2054 duration = sample->time - ttrace->entry_time;
2055 if (trace__filter_duration(trace, duration))
2056 goto out;
2057 } else if (trace->duration_filter)
2058 goto out;
2059
2060 if (trace->summary_only)
2061 goto out;
2062
2063 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
2064
2065 if (ttrace->entry_pending) {
2066 fprintf(trace->output, "%-70s", ttrace->entry_str);
2067 } else {
2068 fprintf(trace->output, " ... [");
2069 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2070 fprintf(trace->output, "]: %s()", sc->name);
2071 }
2072
2073 if (sc->fmt == NULL) {
2074signed_print:
2075 fprintf(trace->output, ") = %ld", ret);
2076 } else if (ret < 0 && sc->fmt->errmsg) {
2077 char bf[STRERR_BUFSIZE];
2078 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
2079 *e = audit_errno_to_name(-ret);
2080
2081 fprintf(trace->output, ") = -1 %s %s", e, emsg);
2082 } else if (ret == 0 && sc->fmt->timeout)
2083 fprintf(trace->output, ") = 0 Timeout");
2084 else if (sc->fmt->hexret)
2085 fprintf(trace->output, ") = %#lx", ret);
2086 else
2087 goto signed_print;
2088
2089 fputc('\n', trace->output);
2090out:
2091 ttrace->entry_pending = false;
2092 err = 0;
2093out_put:
2094 thread__put(thread);
2095 return err;
2096}
2097
2098static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
2099 union perf_event *event __maybe_unused,
2100 struct perf_sample *sample)
2101{
2102 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2103 struct thread_trace *ttrace;
2104 size_t filename_len, entry_str_len, to_move;
2105 ssize_t remaining_space;
2106 char *pos;
2107 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2108
2109 if (!thread)
2110 goto out;
2111
2112 ttrace = thread__priv(thread);
2113 if (!ttrace)
2114 goto out;
2115
2116 filename_len = strlen(filename);
2117
2118 if (ttrace->filename.namelen < filename_len) {
2119 char *f = realloc(ttrace->filename.name, filename_len + 1);
2120
2121 if (f == NULL)
2122 goto out;
2123
2124 ttrace->filename.namelen = filename_len;
2125 ttrace->filename.name = f;
2126 }
2127
2128 strcpy(ttrace->filename.name, filename);
2129 ttrace->filename.pending_open = true;
2130
2131 if (!ttrace->filename.ptr)
2132 goto out;
2133
2134 entry_str_len = strlen(ttrace->entry_str);
2135 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2136 if (remaining_space <= 0)
2137 goto out;
2138
2139 if (filename_len > (size_t)remaining_space) {
2140 filename += filename_len - remaining_space;
2141 filename_len = remaining_space;
2142 }
2143
2144 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2145 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2146 memmove(pos + filename_len, pos, to_move);
2147 memcpy(pos, filename, filename_len);
2148
2149 ttrace->filename.ptr = 0;
2150 ttrace->filename.entry_str_pos = 0;
2151out:
2152 return 0;
2153}
2154
2155static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
2156 union perf_event *event __maybe_unused,
2157 struct perf_sample *sample)
2158{
2159 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2160 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2161 struct thread *thread = machine__findnew_thread(trace->host,
2162 sample->pid,
2163 sample->tid);
2164 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2165
2166 if (ttrace == NULL)
2167 goto out_dump;
2168
2169 ttrace->runtime_ms += runtime_ms;
2170 trace->runtime_ms += runtime_ms;
2171 thread__put(thread);
2172 return 0;
2173
2174out_dump:
2175 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2176 evsel->name,
2177 perf_evsel__strval(evsel, sample, "comm"),
2178 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2179 runtime,
2180 perf_evsel__intval(evsel, sample, "vruntime"));
2181 thread__put(thread);
2182 return 0;
2183}
2184
2185static void bpf_output__printer(enum binary_printer_ops op,
2186 unsigned int val, void *extra)
2187{
2188 FILE *output = extra;
2189 unsigned char ch = (unsigned char)val;
2190
2191 switch (op) {
2192 case BINARY_PRINT_CHAR_DATA:
2193 fprintf(output, "%c", isprint(ch) ? ch : '.');
2194 break;
2195 case BINARY_PRINT_DATA_BEGIN:
2196 case BINARY_PRINT_LINE_BEGIN:
2197 case BINARY_PRINT_ADDR:
2198 case BINARY_PRINT_NUM_DATA:
2199 case BINARY_PRINT_NUM_PAD:
2200 case BINARY_PRINT_SEP:
2201 case BINARY_PRINT_CHAR_PAD:
2202 case BINARY_PRINT_LINE_END:
2203 case BINARY_PRINT_DATA_END:
2204 default:
2205 break;
2206 }
2207}
2208
2209static void bpf_output__fprintf(struct trace *trace,
2210 struct perf_sample *sample)
2211{
2212 print_binary(sample->raw_data, sample->raw_size, 8,
2213 bpf_output__printer, trace->output);
2214}
2215
2216static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
2217 union perf_event *event __maybe_unused,
2218 struct perf_sample *sample)
2219{
2220 trace__printf_interrupted_entry(trace, sample);
2221 trace__fprintf_tstamp(trace, sample->time, trace->output);
2222
2223 if (trace->trace_syscalls)
2224 fprintf(trace->output, "( ): ");
2225
2226 fprintf(trace->output, "%s:", evsel->name);
2227
2228 if (perf_evsel__is_bpf_output(evsel)) {
2229 bpf_output__fprintf(trace, sample);
2230 } else if (evsel->tp_format) {
2231 event_format__fprintf(evsel->tp_format, sample->cpu,
2232 sample->raw_data, sample->raw_size,
2233 trace->output);
2234 }
2235
2236 fprintf(trace->output, ")\n");
2237 return 0;
2238}
2239
2240static void print_location(FILE *f, struct perf_sample *sample,
2241 struct addr_location *al,
2242 bool print_dso, bool print_sym)
2243{
2244
2245 if ((verbose || print_dso) && al->map)
2246 fprintf(f, "%s@", al->map->dso->long_name);
2247
2248 if ((verbose || print_sym) && al->sym)
2249 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2250 al->addr - al->sym->start);
2251 else if (al->map)
2252 fprintf(f, "0x%" PRIx64, al->addr);
2253 else
2254 fprintf(f, "0x%" PRIx64, sample->addr);
2255}
2256
2257static int trace__pgfault(struct trace *trace,
2258 struct perf_evsel *evsel,
2259 union perf_event *event __maybe_unused,
2260 struct perf_sample *sample)
2261{
2262 struct thread *thread;
2263 struct addr_location al;
2264 char map_type = 'd';
2265 struct thread_trace *ttrace;
2266 int err = -1;
2267
2268 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2269 ttrace = thread__trace(thread, trace->output);
2270 if (ttrace == NULL)
2271 goto out_put;
2272
2273 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2274 ttrace->pfmaj++;
2275 else
2276 ttrace->pfmin++;
2277
2278 if (trace->summary_only)
2279 goto out;
2280
2281 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
2282 sample->ip, &al);
2283
2284 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
2285
2286 fprintf(trace->output, "%sfault [",
2287 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2288 "maj" : "min");
2289
2290 print_location(trace->output, sample, &al, false, true);
2291
2292 fprintf(trace->output, "] => ");
2293
2294 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
2295 sample->addr, &al);
2296
2297 if (!al.map) {
2298 thread__find_addr_location(thread, sample->cpumode,
2299 MAP__FUNCTION, sample->addr, &al);
2300
2301 if (al.map)
2302 map_type = 'x';
2303 else
2304 map_type = '?';
2305 }
2306
2307 print_location(trace->output, sample, &al, true, false);
2308
2309 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2310out:
2311 err = 0;
2312out_put:
2313 thread__put(thread);
2314 return err;
2315}
2316
2317static bool skip_sample(struct trace *trace, struct perf_sample *sample)
2318{
2319 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
2320 (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
2321 return false;
2322
2323 if (trace->pid_list || trace->tid_list)
2324 return true;
2325
2326 return false;
2327}
2328
2329static int trace__process_sample(struct perf_tool *tool,
2330 union perf_event *event,
2331 struct perf_sample *sample,
2332 struct perf_evsel *evsel,
2333 struct machine *machine __maybe_unused)
2334{
2335 struct trace *trace = container_of(tool, struct trace, tool);
2336 int err = 0;
2337
2338 tracepoint_handler handler = evsel->handler;
2339
2340 if (skip_sample(trace, sample))
2341 return 0;
2342
2343 if (!trace->full_time && trace->base_time == 0)
2344 trace->base_time = sample->time;
2345
2346 if (handler) {
2347 ++trace->nr_events;
2348 handler(trace, evsel, event, sample);
2349 }
2350
2351 return err;
2352}
2353
2354static int parse_target_str(struct trace *trace)
2355{
2356 if (trace->opts.target.pid) {
2357 trace->pid_list = intlist__new(trace->opts.target.pid);
2358 if (trace->pid_list == NULL) {
2359 pr_err("Error parsing process id string\n");
2360 return -EINVAL;
2361 }
2362 }
2363
2364 if (trace->opts.target.tid) {
2365 trace->tid_list = intlist__new(trace->opts.target.tid);
2366 if (trace->tid_list == NULL) {
2367 pr_err("Error parsing thread id string\n");
2368 return -EINVAL;
2369 }
2370 }
2371
2372 return 0;
2373}
2374
2375static int trace__record(struct trace *trace, int argc, const char **argv)
2376{
2377 unsigned int rec_argc, i, j;
2378 const char **rec_argv;
2379 const char * const record_args[] = {
2380 "record",
2381 "-R",
2382 "-m", "1024",
2383 "-c", "1",
2384 };
2385
2386 const char * const sc_args[] = { "-e", };
2387 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2388 const char * const majpf_args[] = { "-e", "major-faults" };
2389 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2390 const char * const minpf_args[] = { "-e", "minor-faults" };
2391 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2392
2393 /* +1 is for the event string below */
2394 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2395 majpf_args_nr + minpf_args_nr + argc;
2396 rec_argv = calloc(rec_argc + 1, sizeof(char *));
2397
2398 if (rec_argv == NULL)
2399 return -ENOMEM;
2400
2401 j = 0;
2402 for (i = 0; i < ARRAY_SIZE(record_args); i++)
2403 rec_argv[j++] = record_args[i];
2404
2405 if (trace->trace_syscalls) {
2406 for (i = 0; i < sc_args_nr; i++)
2407 rec_argv[j++] = sc_args[i];
2408
2409 /* event string may be different for older kernels - e.g., RHEL6 */
2410 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2411 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2412 else if (is_valid_tracepoint("syscalls:sys_enter"))
2413 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2414 else {
2415 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2416 return -1;
2417 }
2418 }
2419
2420 if (trace->trace_pgfaults & TRACE_PFMAJ)
2421 for (i = 0; i < majpf_args_nr; i++)
2422 rec_argv[j++] = majpf_args[i];
2423
2424 if (trace->trace_pgfaults & TRACE_PFMIN)
2425 for (i = 0; i < minpf_args_nr; i++)
2426 rec_argv[j++] = minpf_args[i];
2427
2428 for (i = 0; i < (unsigned int)argc; i++)
2429 rec_argv[j++] = argv[i];
2430
2431 return cmd_record(j, rec_argv, NULL);
2432}
2433
2434static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2435
2436static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2437{
2438 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2439
2440 if (IS_ERR(evsel))
2441 return false;
2442
2443 if (perf_evsel__field(evsel, "pathname") == NULL) {
2444 perf_evsel__delete(evsel);
2445 return false;
2446 }
2447
2448 evsel->handler = trace__vfs_getname;
2449 perf_evlist__add(evlist, evsel);
2450 return true;
2451}
2452
2453static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
2454 u64 config)
2455{
2456 struct perf_evsel *evsel;
2457 struct perf_event_attr attr = {
2458 .type = PERF_TYPE_SOFTWARE,
2459 .mmap_data = 1,
2460 };
2461
2462 attr.config = config;
2463 attr.sample_period = 1;
2464
2465 event_attr_init(&attr);
2466
2467 evsel = perf_evsel__new(&attr);
2468 if (!evsel)
2469 return -ENOMEM;
2470
2471 evsel->handler = trace__pgfault;
2472 perf_evlist__add(evlist, evsel);
2473
2474 return 0;
2475}
2476
2477static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2478{
2479 const u32 type = event->header.type;
2480 struct perf_evsel *evsel;
2481
2482 if (!trace->full_time && trace->base_time == 0)
2483 trace->base_time = sample->time;
2484
2485 if (type != PERF_RECORD_SAMPLE) {
2486 trace__process_event(trace, trace->host, event, sample);
2487 return;
2488 }
2489
2490 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2491 if (evsel == NULL) {
2492 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2493 return;
2494 }
2495
2496 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2497 sample->raw_data == NULL) {
2498 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2499 perf_evsel__name(evsel), sample->tid,
2500 sample->cpu, sample->raw_size);
2501 } else {
2502 tracepoint_handler handler = evsel->handler;
2503 handler(trace, evsel, event, sample);
2504 }
2505}
2506
2507static int trace__add_syscall_newtp(struct trace *trace)
2508{
2509 int ret = -1;
2510 struct perf_evlist *evlist = trace->evlist;
2511 struct perf_evsel *sys_enter, *sys_exit;
2512
2513 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2514 if (sys_enter == NULL)
2515 goto out;
2516
2517 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2518 goto out_delete_sys_enter;
2519
2520 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2521 if (sys_exit == NULL)
2522 goto out_delete_sys_enter;
2523
2524 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2525 goto out_delete_sys_exit;
2526
2527 perf_evlist__add(evlist, sys_enter);
2528 perf_evlist__add(evlist, sys_exit);
2529
2530 trace->syscalls.events.sys_enter = sys_enter;
2531 trace->syscalls.events.sys_exit = sys_exit;
2532
2533 ret = 0;
2534out:
2535 return ret;
2536
2537out_delete_sys_exit:
2538 perf_evsel__delete_priv(sys_exit);
2539out_delete_sys_enter:
2540 perf_evsel__delete_priv(sys_enter);
2541 goto out;
2542}
2543
2544static int trace__set_ev_qualifier_filter(struct trace *trace)
2545{
2546 int err = -1;
2547 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2548 trace->ev_qualifier_ids.nr,
2549 trace->ev_qualifier_ids.entries);
2550
2551 if (filter == NULL)
2552 goto out_enomem;
2553
2554 if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
2555 err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
2556
2557 free(filter);
2558out:
2559 return err;
2560out_enomem:
2561 errno = ENOMEM;
2562 goto out;
2563}
2564
2565static int trace__run(struct trace *trace, int argc, const char **argv)
2566{
2567 struct perf_evlist *evlist = trace->evlist;
2568 struct perf_evsel *evsel;
2569 int err = -1, i;
2570 unsigned long before;
2571 const bool forks = argc > 0;
2572 bool draining = false;
2573
2574 trace->live = true;
2575
2576 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2577 goto out_error_raw_syscalls;
2578
2579 if (trace->trace_syscalls)
2580 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2581
2582 if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
2583 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
2584 goto out_error_mem;
2585 }
2586
2587 if ((trace->trace_pgfaults & TRACE_PFMIN) &&
2588 perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
2589 goto out_error_mem;
2590
2591 if (trace->sched &&
2592 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2593 trace__sched_stat_runtime))
2594 goto out_error_sched_stat_runtime;
2595
2596 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2597 if (err < 0) {
2598 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2599 goto out_delete_evlist;
2600 }
2601
2602 err = trace__symbols_init(trace, evlist);
2603 if (err < 0) {
2604 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2605 goto out_delete_evlist;
2606 }
2607
2608 perf_evlist__config(evlist, &trace->opts);
2609
2610 signal(SIGCHLD, sig_handler);
2611 signal(SIGINT, sig_handler);
2612
2613 if (forks) {
2614 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2615 argv, false, NULL);
2616 if (err < 0) {
2617 fprintf(trace->output, "Couldn't run the workload!\n");
2618 goto out_delete_evlist;
2619 }
2620 }
2621
2622 err = perf_evlist__open(evlist);
2623 if (err < 0)
2624 goto out_error_open;
2625
2626 err = bpf__apply_obj_config();
2627 if (err) {
2628 char errbuf[BUFSIZ];
2629
2630 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2631 pr_err("ERROR: Apply config to BPF failed: %s\n",
2632 errbuf);
2633 goto out_error_open;
2634 }
2635
2636 /*
2637 * Better not use !target__has_task() here because we need to cover the
2638 * case where no threads were specified in the command line, but a
2639 * workload was, and in that case we will fill in the thread_map when
2640 * we fork the workload in perf_evlist__prepare_workload.
2641 */
2642 if (trace->filter_pids.nr > 0)
2643 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2644 else if (thread_map__pid(evlist->threads, 0) == -1)
2645 err = perf_evlist__set_filter_pid(evlist, getpid());
2646
2647 if (err < 0)
2648 goto out_error_mem;
2649
2650 if (trace->ev_qualifier_ids.nr > 0) {
2651 err = trace__set_ev_qualifier_filter(trace);
2652 if (err < 0)
2653 goto out_errno;
2654
2655 pr_debug("event qualifier tracepoint filter: %s\n",
2656 trace->syscalls.events.sys_exit->filter);
2657 }
2658
2659 err = perf_evlist__apply_filters(evlist, &evsel);
2660 if (err < 0)
2661 goto out_error_apply_filters;
2662
2663 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
2664 if (err < 0)
2665 goto out_error_mmap;
2666
2667 if (!target__none(&trace->opts.target))
2668 perf_evlist__enable(evlist);
2669
2670 if (forks)
2671 perf_evlist__start_workload(evlist);
2672
2673 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2674 evlist->threads->nr > 1 ||
2675 perf_evlist__first(evlist)->attr.inherit;
2676again:
2677 before = trace->nr_events;
2678
2679 for (i = 0; i < evlist->nr_mmaps; i++) {
2680 union perf_event *event;
2681
2682 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2683 struct perf_sample sample;
2684
2685 ++trace->nr_events;
2686
2687 err = perf_evlist__parse_sample(evlist, event, &sample);
2688 if (err) {
2689 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2690 goto next_event;
2691 }
2692
2693 trace__handle_event(trace, event, &sample);
2694next_event:
2695 perf_evlist__mmap_consume(evlist, i);
2696
2697 if (interrupted)
2698 goto out_disable;
2699
2700 if (done && !draining) {
2701 perf_evlist__disable(evlist);
2702 draining = true;
2703 }
2704 }
2705 }
2706
2707 if (trace->nr_events == before) {
2708 int timeout = done ? 100 : -1;
2709
2710 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2711 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2712 draining = true;
2713
2714 goto again;
2715 }
2716 } else {
2717 goto again;
2718 }
2719
2720out_disable:
2721 thread__zput(trace->current);
2722
2723 perf_evlist__disable(evlist);
2724
2725 if (!err) {
2726 if (trace->summary)
2727 trace__fprintf_thread_summary(trace, trace->output);
2728
2729 if (trace->show_tool_stats) {
2730 fprintf(trace->output, "Stats:\n "
2731 " vfs_getname : %" PRIu64 "\n"
2732 " proc_getname: %" PRIu64 "\n",
2733 trace->stats.vfs_getname,
2734 trace->stats.proc_getname);
2735 }
2736 }
2737
2738out_delete_evlist:
2739 perf_evlist__delete(evlist);
2740 trace->evlist = NULL;
2741 trace->live = false;
2742 return err;
2743{
2744 char errbuf[BUFSIZ];
2745
2746out_error_sched_stat_runtime:
2747 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2748 goto out_error;
2749
2750out_error_raw_syscalls:
2751 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2752 goto out_error;
2753
2754out_error_mmap:
2755 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2756 goto out_error;
2757
2758out_error_open:
2759 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2760
2761out_error:
2762 fprintf(trace->output, "%s\n", errbuf);
2763 goto out_delete_evlist;
2764
2765out_error_apply_filters:
2766 fprintf(trace->output,
2767 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2768 evsel->filter, perf_evsel__name(evsel), errno,
2769 strerror_r(errno, errbuf, sizeof(errbuf)));
2770 goto out_delete_evlist;
2771}
2772out_error_mem:
2773 fprintf(trace->output, "Not enough memory to run!\n");
2774 goto out_delete_evlist;
2775
2776out_errno:
2777 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2778 goto out_delete_evlist;
2779}
2780
2781static int trace__replay(struct trace *trace)
2782{
2783 const struct perf_evsel_str_handler handlers[] = {
2784 { "probe:vfs_getname", trace__vfs_getname, },
2785 };
2786 struct perf_data_file file = {
2787 .path = input_name,
2788 .mode = PERF_DATA_MODE_READ,
2789 .force = trace->force,
2790 };
2791 struct perf_session *session;
2792 struct perf_evsel *evsel;
2793 int err = -1;
2794
2795 trace->tool.sample = trace__process_sample;
2796 trace->tool.mmap = perf_event__process_mmap;
2797 trace->tool.mmap2 = perf_event__process_mmap2;
2798 trace->tool.comm = perf_event__process_comm;
2799 trace->tool.exit = perf_event__process_exit;
2800 trace->tool.fork = perf_event__process_fork;
2801 trace->tool.attr = perf_event__process_attr;
2802 trace->tool.tracing_data = perf_event__process_tracing_data;
2803 trace->tool.build_id = perf_event__process_build_id;
2804
2805 trace->tool.ordered_events = true;
2806 trace->tool.ordering_requires_timestamps = true;
2807
2808 /* add tid to output */
2809 trace->multiple_threads = true;
2810
2811 session = perf_session__new(&file, false, &trace->tool);
2812 if (session == NULL)
2813 return -1;
2814
2815 if (symbol__init(&session->header.env) < 0)
2816 goto out;
2817
2818 trace->host = &session->machines.host;
2819
2820 err = perf_session__set_tracepoints_handlers(session, handlers);
2821 if (err)
2822 goto out;
2823
2824 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2825 "raw_syscalls:sys_enter");
2826 /* older kernels have syscalls tp versus raw_syscalls */
2827 if (evsel == NULL)
2828 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2829 "syscalls:sys_enter");
2830
2831 if (evsel &&
2832 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2833 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2834 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2835 goto out;
2836 }
2837
2838 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2839 "raw_syscalls:sys_exit");
2840 if (evsel == NULL)
2841 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2842 "syscalls:sys_exit");
2843 if (evsel &&
2844 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2845 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2846 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2847 goto out;
2848 }
2849
2850 evlist__for_each(session->evlist, evsel) {
2851 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2852 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2853 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2854 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2855 evsel->handler = trace__pgfault;
2856 }
2857
2858 err = parse_target_str(trace);
2859 if (err != 0)
2860 goto out;
2861
2862 setup_pager();
2863
2864 err = perf_session__process_events(session);
2865 if (err)
2866 pr_err("Failed to process events, error %d", err);
2867
2868 else if (trace->summary)
2869 trace__fprintf_thread_summary(trace, trace->output);
2870
2871out:
2872 perf_session__delete(session);
2873
2874 return err;
2875}
2876
2877static size_t trace__fprintf_threads_header(FILE *fp)
2878{
2879 size_t printed;
2880
2881 printed = fprintf(fp, "\n Summary of events:\n\n");
2882
2883 return printed;
2884}
2885
2886static size_t thread__dump_stats(struct thread_trace *ttrace,
2887 struct trace *trace, FILE *fp)
2888{
2889 struct stats *stats;
2890 size_t printed = 0;
2891 struct syscall *sc;
2892 struct int_node *inode = intlist__first(ttrace->syscall_stats);
2893
2894 if (inode == NULL)
2895 return 0;
2896
2897 printed += fprintf(fp, "\n");
2898
2899 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2900 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2901 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2902
2903 /* each int_node is a syscall */
2904 while (inode) {
2905 stats = inode->priv;
2906 if (stats) {
2907 double min = (double)(stats->min) / NSEC_PER_MSEC;
2908 double max = (double)(stats->max) / NSEC_PER_MSEC;
2909 double avg = avg_stats(stats);
2910 double pct;
2911 u64 n = (u64) stats->n;
2912
2913 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2914 avg /= NSEC_PER_MSEC;
2915
2916 sc = &trace->syscalls.table[inode->i];
2917 printed += fprintf(fp, " %-15s", sc->name);
2918 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2919 n, avg * n, min, avg);
2920 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2921 }
2922
2923 inode = intlist__next(inode);
2924 }
2925
2926 printed += fprintf(fp, "\n\n");
2927
2928 return printed;
2929}
2930
2931/* struct used to pass data to per-thread function */
2932struct summary_data {
2933 FILE *fp;
2934 struct trace *trace;
2935 size_t printed;
2936};
2937
2938static int trace__fprintf_one_thread(struct thread *thread, void *priv)
2939{
2940 struct summary_data *data = priv;
2941 FILE *fp = data->fp;
2942 size_t printed = data->printed;
2943 struct trace *trace = data->trace;
2944 struct thread_trace *ttrace = thread__priv(thread);
2945 double ratio;
2946
2947 if (ttrace == NULL)
2948 return 0;
2949
2950 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2951
2952 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2953 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2954 printed += fprintf(fp, "%.1f%%", ratio);
2955 if (ttrace->pfmaj)
2956 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2957 if (ttrace->pfmin)
2958 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2959 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2960 printed += thread__dump_stats(ttrace, trace, fp);
2961
2962 data->printed += printed;
2963
2964 return 0;
2965}
2966
2967static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2968{
2969 struct summary_data data = {
2970 .fp = fp,
2971 .trace = trace
2972 };
2973 data.printed = trace__fprintf_threads_header(fp);
2974
2975 machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
2976
2977 return data.printed;
2978}
2979
2980static int trace__set_duration(const struct option *opt, const char *str,
2981 int unset __maybe_unused)
2982{
2983 struct trace *trace = opt->value;
2984
2985 trace->duration_filter = atof(str);
2986 return 0;
2987}
2988
2989static int trace__set_filter_pids(const struct option *opt, const char *str,
2990 int unset __maybe_unused)
2991{
2992 int ret = -1;
2993 size_t i;
2994 struct trace *trace = opt->value;
2995 /*
2996 * FIXME: introduce a intarray class, plain parse csv and create a
2997 * { int nr, int entries[] } struct...
2998 */
2999 struct intlist *list = intlist__new(str);
3000
3001 if (list == NULL)
3002 return -1;
3003
3004 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3005 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3006
3007 if (trace->filter_pids.entries == NULL)
3008 goto out;
3009
3010 trace->filter_pids.entries[0] = getpid();
3011
3012 for (i = 1; i < trace->filter_pids.nr; ++i)
3013 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3014
3015 intlist__delete(list);
3016 ret = 0;
3017out:
3018 return ret;
3019}
3020
3021static int trace__open_output(struct trace *trace, const char *filename)
3022{
3023 struct stat st;
3024
3025 if (!stat(filename, &st) && st.st_size) {
3026 char oldname[PATH_MAX];
3027
3028 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3029 unlink(oldname);
3030 rename(filename, oldname);
3031 }
3032
3033 trace->output = fopen(filename, "w");
3034
3035 return trace->output == NULL ? -errno : 0;
3036}
3037
3038static int parse_pagefaults(const struct option *opt, const char *str,
3039 int unset __maybe_unused)
3040{
3041 int *trace_pgfaults = opt->value;
3042
3043 if (strcmp(str, "all") == 0)
3044 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3045 else if (strcmp(str, "maj") == 0)
3046 *trace_pgfaults |= TRACE_PFMAJ;
3047 else if (strcmp(str, "min") == 0)
3048 *trace_pgfaults |= TRACE_PFMIN;
3049 else
3050 return -1;
3051
3052 return 0;
3053}
3054
3055static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
3056{
3057 struct perf_evsel *evsel;
3058
3059 evlist__for_each(evlist, evsel)
3060 evsel->handler = handler;
3061}
3062
3063int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
3064{
3065 const char *trace_usage[] = {
3066 "perf trace [<options>] [<command>]",
3067 "perf trace [<options>] -- <command> [<options>]",
3068 "perf trace record [<options>] [<command>]",
3069 "perf trace record [<options>] -- <command> [<options>]",
3070 NULL
3071 };
3072 struct trace trace = {
3073 .audit = {
3074 .machine = audit_detect_machine(),
3075 .open_id = audit_name_to_syscall("open", trace.audit.machine),
3076 },
3077 .syscalls = {
3078 . max = -1,
3079 },
3080 .opts = {
3081 .target = {
3082 .uid = UINT_MAX,
3083 .uses_mmap = true,
3084 },
3085 .user_freq = UINT_MAX,
3086 .user_interval = ULLONG_MAX,
3087 .no_buffering = true,
3088 .mmap_pages = UINT_MAX,
3089 .proc_map_timeout = 500,
3090 },
3091 .output = stderr,
3092 .show_comm = true,
3093 .trace_syscalls = true,
3094 };
3095 const char *output_name = NULL;
3096 const char *ev_qualifier_str = NULL;
3097 const struct option trace_options[] = {
3098 OPT_CALLBACK(0, "event", &trace.evlist, "event",
3099 "event selector. use 'perf list' to list available events",
3100 parse_events_option),
3101 OPT_BOOLEAN(0, "comm", &trace.show_comm,
3102 "show the thread COMM next to its id"),
3103 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
3104 OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
3105 OPT_STRING('o', "output", &output_name, "file", "output file name"),
3106 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
3107 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
3108 "trace events on existing process id"),
3109 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
3110 "trace events on existing thread id"),
3111 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
3112 "pids to filter (by the kernel)", trace__set_filter_pids),
3113 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
3114 "system-wide collection from all CPUs"),
3115 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
3116 "list of cpus to monitor"),
3117 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
3118 "child tasks do not inherit counters"),
3119 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
3120 "number of mmap data pages",
3121 perf_evlist__parse_mmap_pages),
3122 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
3123 "user to profile"),
3124 OPT_CALLBACK(0, "duration", &trace, "float",
3125 "show only events with duration > N.M ms",
3126 trace__set_duration),
3127 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
3128 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
3129 OPT_BOOLEAN('T', "time", &trace.full_time,
3130 "Show full timestamp, not time relative to first start"),
3131 OPT_BOOLEAN('s', "summary", &trace.summary_only,
3132 "Show only syscall summary with statistics"),
3133 OPT_BOOLEAN('S', "with-summary", &trace.summary,
3134 "Show all syscalls and summary with statistics"),
3135 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
3136 "Trace pagefaults", parse_pagefaults, "maj"),
3137 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
3138 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
3139 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
3140 "per thread proc mmap processing timeout in ms"),
3141 OPT_END()
3142 };
3143 const char * const trace_subcommands[] = { "record", NULL };
3144 int err;
3145 char bf[BUFSIZ];
3146
3147 signal(SIGSEGV, sighandler_dump_stack);
3148 signal(SIGFPE, sighandler_dump_stack);
3149
3150 trace.evlist = perf_evlist__new();
3151
3152 if (trace.evlist == NULL) {
3153 pr_err("Not enough memory to run!\n");
3154 err = -ENOMEM;
3155 goto out;
3156 }
3157
3158 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
3159 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3160
3161 if (trace.trace_pgfaults) {
3162 trace.opts.sample_address = true;
3163 trace.opts.sample_time = true;
3164 }
3165
3166 if (trace.evlist->nr_entries > 0)
3167 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
3168
3169 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
3170 return trace__record(&trace, argc-1, &argv[1]);
3171
3172 /* summary_only implies summary option, but don't overwrite summary if set */
3173 if (trace.summary_only)
3174 trace.summary = trace.summary_only;
3175
3176 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
3177 trace.evlist->nr_entries == 0 /* Was --events used? */) {
3178 pr_err("Please specify something to trace.\n");
3179 return -1;
3180 }
3181
3182 if (output_name != NULL) {
3183 err = trace__open_output(&trace, output_name);
3184 if (err < 0) {
3185 perror("failed to create output file");
3186 goto out;
3187 }
3188 }
3189
3190 if (ev_qualifier_str != NULL) {
3191 const char *s = ev_qualifier_str;
3192 struct strlist_config slist_config = {
3193 .dirname = system_path(STRACE_GROUPS_DIR),
3194 };
3195
3196 trace.not_ev_qualifier = *s == '!';
3197 if (trace.not_ev_qualifier)
3198 ++s;
3199 trace.ev_qualifier = strlist__new(s, &slist_config);
3200 if (trace.ev_qualifier == NULL) {
3201 fputs("Not enough memory to parse event qualifier",
3202 trace.output);
3203 err = -ENOMEM;
3204 goto out_close;
3205 }
3206
3207 err = trace__validate_ev_qualifier(&trace);
3208 if (err)
3209 goto out_close;
3210 }
3211
3212 err = target__validate(&trace.opts.target);
3213 if (err) {
3214 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3215 fprintf(trace.output, "%s", bf);
3216 goto out_close;
3217 }
3218
3219 err = target__parse_uid(&trace.opts.target);
3220 if (err) {
3221 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3222 fprintf(trace.output, "%s", bf);
3223 goto out_close;
3224 }
3225
3226 if (!argc && target__none(&trace.opts.target))
3227 trace.opts.target.system_wide = true;
3228
3229 if (input_name)
3230 err = trace__replay(&trace);
3231 else
3232 err = trace__run(&trace, argc, argv);
3233
3234out_close:
3235 if (output_name != NULL)
3236 fclose(trace.output);
3237out:
3238 return err;
3239}
1/*
2 * builtin-trace.c
3 *
4 * Builtin 'trace' command:
5 *
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
9 *
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 *
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
13 *
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
15 */
16
17#include "util/record.h"
18#include <api/fs/tracing_path.h>
19#ifdef HAVE_LIBBPF_SUPPORT
20#include <bpf/bpf.h>
21#include <bpf/libbpf.h>
22#ifdef HAVE_BPF_SKEL
23#include "bpf_skel/augmented_raw_syscalls.skel.h"
24#endif
25#endif
26#include "util/bpf_map.h"
27#include "util/rlimit.h"
28#include "builtin.h"
29#include "util/cgroup.h"
30#include "util/color.h"
31#include "util/config.h"
32#include "util/debug.h"
33#include "util/dso.h"
34#include "util/env.h"
35#include "util/event.h"
36#include "util/evsel.h"
37#include "util/evsel_fprintf.h"
38#include "util/synthetic-events.h"
39#include "util/evlist.h"
40#include "util/evswitch.h"
41#include "util/mmap.h"
42#include <subcmd/pager.h>
43#include <subcmd/exec-cmd.h>
44#include "util/machine.h"
45#include "util/map.h"
46#include "util/symbol.h"
47#include "util/path.h"
48#include "util/session.h"
49#include "util/thread.h"
50#include <subcmd/parse-options.h>
51#include "util/strlist.h"
52#include "util/intlist.h"
53#include "util/thread_map.h"
54#include "util/stat.h"
55#include "util/tool.h"
56#include "util/util.h"
57#include "trace/beauty/beauty.h"
58#include "trace-event.h"
59#include "util/parse-events.h"
60#include "util/tracepoint.h"
61#include "callchain.h"
62#include "print_binary.h"
63#include "string2.h"
64#include "syscalltbl.h"
65#include "rb_resort.h"
66#include "../perf.h"
67
68#include <errno.h>
69#include <inttypes.h>
70#include <poll.h>
71#include <signal.h>
72#include <stdlib.h>
73#include <string.h>
74#include <linux/err.h>
75#include <linux/filter.h>
76#include <linux/kernel.h>
77#include <linux/random.h>
78#include <linux/stringify.h>
79#include <linux/time64.h>
80#include <linux/zalloc.h>
81#include <fcntl.h>
82#include <sys/sysmacros.h>
83
84#include <linux/ctype.h>
85#include <perf/mmap.h>
86
87#ifdef HAVE_LIBTRACEEVENT
88#include <traceevent/event-parse.h>
89#endif
90
91#ifndef O_CLOEXEC
92# define O_CLOEXEC 02000000
93#endif
94
95#ifndef F_LINUX_SPECIFIC_BASE
96# define F_LINUX_SPECIFIC_BASE 1024
97#endif
98
99#define RAW_SYSCALL_ARGS_NUM 6
100
101/*
102 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
103 */
104struct syscall_arg_fmt {
105 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
106 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
107 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
108 void *parm;
109 const char *name;
110 u16 nr_entries; // for arrays
111 bool show_zero;
112};
113
114struct syscall_fmt {
115 const char *name;
116 const char *alias;
117 struct {
118 const char *sys_enter,
119 *sys_exit;
120 } bpf_prog_name;
121 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
122 u8 nr_args;
123 bool errpid;
124 bool timeout;
125 bool hexret;
126};
127
128struct trace {
129 struct perf_tool tool;
130 struct syscalltbl *sctbl;
131 struct {
132 struct syscall *table;
133 struct {
134 struct evsel *sys_enter,
135 *sys_exit,
136 *bpf_output;
137 } events;
138 } syscalls;
139#ifdef HAVE_BPF_SKEL
140 struct augmented_raw_syscalls_bpf *skel;
141#endif
142 struct record_opts opts;
143 struct evlist *evlist;
144 struct machine *host;
145 struct thread *current;
146 struct cgroup *cgroup;
147 u64 base_time;
148 FILE *output;
149 unsigned long nr_events;
150 unsigned long nr_events_printed;
151 unsigned long max_events;
152 struct evswitch evswitch;
153 struct strlist *ev_qualifier;
154 struct {
155 size_t nr;
156 int *entries;
157 } ev_qualifier_ids;
158 struct {
159 size_t nr;
160 pid_t *entries;
161 struct bpf_map *map;
162 } filter_pids;
163 double duration_filter;
164 double runtime_ms;
165 struct {
166 u64 vfs_getname,
167 proc_getname;
168 } stats;
169 unsigned int max_stack;
170 unsigned int min_stack;
171 int raw_augmented_syscalls_args_size;
172 bool raw_augmented_syscalls;
173 bool fd_path_disabled;
174 bool sort_events;
175 bool not_ev_qualifier;
176 bool live;
177 bool full_time;
178 bool sched;
179 bool multiple_threads;
180 bool summary;
181 bool summary_only;
182 bool errno_summary;
183 bool failure_only;
184 bool show_comm;
185 bool print_sample;
186 bool show_tool_stats;
187 bool trace_syscalls;
188 bool libtraceevent_print;
189 bool kernel_syscallchains;
190 s16 args_alignment;
191 bool show_tstamp;
192 bool show_duration;
193 bool show_zeros;
194 bool show_arg_names;
195 bool show_string_prefix;
196 bool force;
197 bool vfs_getname;
198 int trace_pgfaults;
199 char *perfconfig_events;
200 struct {
201 struct ordered_events data;
202 u64 last;
203 } oe;
204};
205
206struct tp_field {
207 int offset;
208 union {
209 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
210 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
211 };
212};
213
214#define TP_UINT_FIELD(bits) \
215static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
216{ \
217 u##bits value; \
218 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
219 return value; \
220}
221
222TP_UINT_FIELD(8);
223TP_UINT_FIELD(16);
224TP_UINT_FIELD(32);
225TP_UINT_FIELD(64);
226
227#define TP_UINT_FIELD__SWAPPED(bits) \
228static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
229{ \
230 u##bits value; \
231 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
232 return bswap_##bits(value);\
233}
234
235TP_UINT_FIELD__SWAPPED(16);
236TP_UINT_FIELD__SWAPPED(32);
237TP_UINT_FIELD__SWAPPED(64);
238
239static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
240{
241 field->offset = offset;
242
243 switch (size) {
244 case 1:
245 field->integer = tp_field__u8;
246 break;
247 case 2:
248 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
249 break;
250 case 4:
251 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
252 break;
253 case 8:
254 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
255 break;
256 default:
257 return -1;
258 }
259
260 return 0;
261}
262
263static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
264{
265 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
266}
267
268static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
269{
270 return sample->raw_data + field->offset;
271}
272
273static int __tp_field__init_ptr(struct tp_field *field, int offset)
274{
275 field->offset = offset;
276 field->pointer = tp_field__ptr;
277 return 0;
278}
279
280static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
281{
282 return __tp_field__init_ptr(field, format_field->offset);
283}
284
285struct syscall_tp {
286 struct tp_field id;
287 union {
288 struct tp_field args, ret;
289 };
290};
291
292/*
293 * The evsel->priv as used by 'perf trace'
294 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
295 * fmt: for all the other tracepoints
296 */
297struct evsel_trace {
298 struct syscall_tp sc;
299 struct syscall_arg_fmt *fmt;
300};
301
302static struct evsel_trace *evsel_trace__new(void)
303{
304 return zalloc(sizeof(struct evsel_trace));
305}
306
307static void evsel_trace__delete(struct evsel_trace *et)
308{
309 if (et == NULL)
310 return;
311
312 zfree(&et->fmt);
313 free(et);
314}
315
316/*
317 * Used with raw_syscalls:sys_{enter,exit} and with the
318 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
319 */
320static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
321{
322 struct evsel_trace *et = evsel->priv;
323
324 return &et->sc;
325}
326
327static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
328{
329 if (evsel->priv == NULL) {
330 evsel->priv = evsel_trace__new();
331 if (evsel->priv == NULL)
332 return NULL;
333 }
334
335 return __evsel__syscall_tp(evsel);
336}
337
338/*
339 * Used with all the other tracepoints.
340 */
341static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
342{
343 struct evsel_trace *et = evsel->priv;
344
345 return et->fmt;
346}
347
348static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
349{
350 struct evsel_trace *et = evsel->priv;
351
352 if (evsel->priv == NULL) {
353 et = evsel->priv = evsel_trace__new();
354
355 if (et == NULL)
356 return NULL;
357 }
358
359 if (et->fmt == NULL) {
360 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
361 if (et->fmt == NULL)
362 goto out_delete;
363 }
364
365 return __evsel__syscall_arg_fmt(evsel);
366
367out_delete:
368 evsel_trace__delete(evsel->priv);
369 evsel->priv = NULL;
370 return NULL;
371}
372
373static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
374{
375 struct tep_format_field *format_field = evsel__field(evsel, name);
376
377 if (format_field == NULL)
378 return -1;
379
380 return tp_field__init_uint(field, format_field, evsel->needs_swap);
381}
382
383#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
384 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
385 evsel__init_tp_uint_field(evsel, &sc->name, #name); })
386
387static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
388{
389 struct tep_format_field *format_field = evsel__field(evsel, name);
390
391 if (format_field == NULL)
392 return -1;
393
394 return tp_field__init_ptr(field, format_field);
395}
396
397#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
398 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
399 evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
400
401static void evsel__delete_priv(struct evsel *evsel)
402{
403 zfree(&evsel->priv);
404 evsel__delete(evsel);
405}
406
407static int evsel__init_syscall_tp(struct evsel *evsel)
408{
409 struct syscall_tp *sc = evsel__syscall_tp(evsel);
410
411 if (sc != NULL) {
412 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
413 evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
414 return -ENOENT;
415
416 return 0;
417 }
418
419 return -ENOMEM;
420}
421
422static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
423{
424 struct syscall_tp *sc = evsel__syscall_tp(evsel);
425
426 if (sc != NULL) {
427 struct tep_format_field *syscall_id = evsel__field(tp, "id");
428 if (syscall_id == NULL)
429 syscall_id = evsel__field(tp, "__syscall_nr");
430 if (syscall_id == NULL ||
431 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
432 return -EINVAL;
433
434 return 0;
435 }
436
437 return -ENOMEM;
438}
439
440static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
441{
442 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
443
444 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
445}
446
447static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
448{
449 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
450
451 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
452}
453
454static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
455{
456 if (evsel__syscall_tp(evsel) != NULL) {
457 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
458 return -ENOENT;
459
460 evsel->handler = handler;
461 return 0;
462 }
463
464 return -ENOMEM;
465}
466
467static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
468{
469 struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
470
471 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
472 if (IS_ERR(evsel))
473 evsel = evsel__newtp("syscalls", direction);
474
475 if (IS_ERR(evsel))
476 return NULL;
477
478 if (evsel__init_raw_syscall_tp(evsel, handler))
479 goto out_delete;
480
481 return evsel;
482
483out_delete:
484 evsel__delete_priv(evsel);
485 return NULL;
486}
487
488#define perf_evsel__sc_tp_uint(evsel, name, sample) \
489 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
490 fields->name.integer(&fields->name, sample); })
491
492#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
493 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
494 fields->name.pointer(&fields->name, sample); })
495
496size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
497{
498 int idx = val - sa->offset;
499
500 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
501 size_t printed = scnprintf(bf, size, intfmt, val);
502 if (show_suffix)
503 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
504 return printed;
505 }
506
507 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
508}
509
510size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
511{
512 int idx = val - sa->offset;
513
514 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
515 size_t printed = scnprintf(bf, size, intfmt, val);
516 if (show_prefix)
517 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
518 return printed;
519 }
520
521 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
522}
523
524static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
525 const char *intfmt,
526 struct syscall_arg *arg)
527{
528 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
529}
530
531static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
532 struct syscall_arg *arg)
533{
534 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
535}
536
537#define SCA_STRARRAY syscall_arg__scnprintf_strarray
538
539bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
540{
541 return strarray__strtoul(arg->parm, bf, size, ret);
542}
543
544bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
545{
546 return strarray__strtoul_flags(arg->parm, bf, size, ret);
547}
548
549bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
550{
551 return strarrays__strtoul(arg->parm, bf, size, ret);
552}
553
554size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
555{
556 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
557}
558
559size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
560{
561 size_t printed;
562 int i;
563
564 for (i = 0; i < sas->nr_entries; ++i) {
565 struct strarray *sa = sas->entries[i];
566 int idx = val - sa->offset;
567
568 if (idx >= 0 && idx < sa->nr_entries) {
569 if (sa->entries[idx] == NULL)
570 break;
571 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
572 }
573 }
574
575 printed = scnprintf(bf, size, intfmt, val);
576 if (show_prefix)
577 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
578 return printed;
579}
580
581bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
582{
583 int i;
584
585 for (i = 0; i < sa->nr_entries; ++i) {
586 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
587 *ret = sa->offset + i;
588 return true;
589 }
590 }
591
592 return false;
593}
594
595bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
596{
597 u64 val = 0;
598 char *tok = bf, *sep, *end;
599
600 *ret = 0;
601
602 while (size != 0) {
603 int toklen = size;
604
605 sep = memchr(tok, '|', size);
606 if (sep != NULL) {
607 size -= sep - tok + 1;
608
609 end = sep - 1;
610 while (end > tok && isspace(*end))
611 --end;
612
613 toklen = end - tok + 1;
614 }
615
616 while (isspace(*tok))
617 ++tok;
618
619 if (isalpha(*tok) || *tok == '_') {
620 if (!strarray__strtoul(sa, tok, toklen, &val))
621 return false;
622 } else
623 val = strtoul(tok, NULL, 0);
624
625 *ret |= (1 << (val - 1));
626
627 if (sep == NULL)
628 break;
629 tok = sep + 1;
630 }
631
632 return true;
633}
634
635bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
636{
637 int i;
638
639 for (i = 0; i < sas->nr_entries; ++i) {
640 struct strarray *sa = sas->entries[i];
641
642 if (strarray__strtoul(sa, bf, size, ret))
643 return true;
644 }
645
646 return false;
647}
648
649size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
650 struct syscall_arg *arg)
651{
652 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
653}
654
655#ifndef AT_FDCWD
656#define AT_FDCWD -100
657#endif
658
659static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
660 struct syscall_arg *arg)
661{
662 int fd = arg->val;
663 const char *prefix = "AT_FD";
664
665 if (fd == AT_FDCWD)
666 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
667
668 return syscall_arg__scnprintf_fd(bf, size, arg);
669}
670
671#define SCA_FDAT syscall_arg__scnprintf_fd_at
672
673static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
674 struct syscall_arg *arg);
675
676#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
677
678size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
679{
680 return scnprintf(bf, size, "%#lx", arg->val);
681}
682
683size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
684{
685 if (arg->val == 0)
686 return scnprintf(bf, size, "NULL");
687 return syscall_arg__scnprintf_hex(bf, size, arg);
688}
689
690size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
691{
692 return scnprintf(bf, size, "%d", arg->val);
693}
694
695size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
696{
697 return scnprintf(bf, size, "%ld", arg->val);
698}
699
700static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
701{
702 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
703 // fill missing comms using thread__set_comm()...
704 // here or in a special syscall_arg__scnprintf_pid_sched_tp...
705 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
706}
707
708#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
709
710static const char *bpf_cmd[] = {
711 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
712 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
713 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
714 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
715 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
716 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
717 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
718 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
719 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
720 "LINK_DETACH", "PROG_BIND_MAP",
721};
722static DEFINE_STRARRAY(bpf_cmd, "BPF_");
723
724static const char *fsmount_flags[] = {
725 [1] = "CLOEXEC",
726};
727static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
728
729#include "trace/beauty/generated/fsconfig_arrays.c"
730
731static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
732
733static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
734static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
735
736static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
737static DEFINE_STRARRAY(itimers, "ITIMER_");
738
739static const char *keyctl_options[] = {
740 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
741 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
742 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
743 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
744 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
745};
746static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
747
748static const char *whences[] = { "SET", "CUR", "END",
749#ifdef SEEK_DATA
750"DATA",
751#endif
752#ifdef SEEK_HOLE
753"HOLE",
754#endif
755};
756static DEFINE_STRARRAY(whences, "SEEK_");
757
758static const char *fcntl_cmds[] = {
759 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
760 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
761 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
762 "GETOWNER_UIDS",
763};
764static DEFINE_STRARRAY(fcntl_cmds, "F_");
765
766static const char *fcntl_linux_specific_cmds[] = {
767 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
768 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
769 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
770};
771
772static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
773
774static struct strarray *fcntl_cmds_arrays[] = {
775 &strarray__fcntl_cmds,
776 &strarray__fcntl_linux_specific_cmds,
777};
778
779static DEFINE_STRARRAYS(fcntl_cmds_arrays);
780
781static const char *rlimit_resources[] = {
782 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
783 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
784 "RTTIME",
785};
786static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
787
788static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
789static DEFINE_STRARRAY(sighow, "SIG_");
790
791static const char *clockid[] = {
792 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
793 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
794 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
795};
796static DEFINE_STRARRAY(clockid, "CLOCK_");
797
798static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
799 struct syscall_arg *arg)
800{
801 bool show_prefix = arg->show_string_prefix;
802 const char *suffix = "_OK";
803 size_t printed = 0;
804 int mode = arg->val;
805
806 if (mode == F_OK) /* 0 */
807 return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
808#define P_MODE(n) \
809 if (mode & n##_OK) { \
810 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
811 mode &= ~n##_OK; \
812 }
813
814 P_MODE(R);
815 P_MODE(W);
816 P_MODE(X);
817#undef P_MODE
818
819 if (mode)
820 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
821
822 return printed;
823}
824
825#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
826
827static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
828 struct syscall_arg *arg);
829
830#define SCA_FILENAME syscall_arg__scnprintf_filename
831
832static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
833 struct syscall_arg *arg)
834{
835 bool show_prefix = arg->show_string_prefix;
836 const char *prefix = "O_";
837 int printed = 0, flags = arg->val;
838
839#define P_FLAG(n) \
840 if (flags & O_##n) { \
841 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
842 flags &= ~O_##n; \
843 }
844
845 P_FLAG(CLOEXEC);
846 P_FLAG(NONBLOCK);
847#undef P_FLAG
848
849 if (flags)
850 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
851
852 return printed;
853}
854
855#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
856
857#ifndef GRND_NONBLOCK
858#define GRND_NONBLOCK 0x0001
859#endif
860#ifndef GRND_RANDOM
861#define GRND_RANDOM 0x0002
862#endif
863
864static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
865 struct syscall_arg *arg)
866{
867 bool show_prefix = arg->show_string_prefix;
868 const char *prefix = "GRND_";
869 int printed = 0, flags = arg->val;
870
871#define P_FLAG(n) \
872 if (flags & GRND_##n) { \
873 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
874 flags &= ~GRND_##n; \
875 }
876
877 P_FLAG(RANDOM);
878 P_FLAG(NONBLOCK);
879#undef P_FLAG
880
881 if (flags)
882 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
883
884 return printed;
885}
886
887#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
888
889#define STRARRAY(name, array) \
890 { .scnprintf = SCA_STRARRAY, \
891 .strtoul = STUL_STRARRAY, \
892 .parm = &strarray__##array, }
893
894#define STRARRAY_FLAGS(name, array) \
895 { .scnprintf = SCA_STRARRAY_FLAGS, \
896 .strtoul = STUL_STRARRAY_FLAGS, \
897 .parm = &strarray__##array, }
898
899#include "trace/beauty/arch_errno_names.c"
900#include "trace/beauty/eventfd.c"
901#include "trace/beauty/futex_op.c"
902#include "trace/beauty/futex_val3.c"
903#include "trace/beauty/mmap.c"
904#include "trace/beauty/mode_t.c"
905#include "trace/beauty/msg_flags.c"
906#include "trace/beauty/open_flags.c"
907#include "trace/beauty/perf_event_open.c"
908#include "trace/beauty/pid.c"
909#include "trace/beauty/sched_policy.c"
910#include "trace/beauty/seccomp.c"
911#include "trace/beauty/signum.c"
912#include "trace/beauty/socket_type.c"
913#include "trace/beauty/waitid_options.c"
914
915static const struct syscall_fmt syscall_fmts[] = {
916 { .name = "access",
917 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
918 { .name = "arch_prctl",
919 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
920 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
921 { .name = "bind",
922 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
923 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
924 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
925 { .name = "bpf",
926 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
927 { .name = "brk", .hexret = true,
928 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
929 { .name = "clock_gettime",
930 .arg = { [0] = STRARRAY(clk_id, clockid), }, },
931 { .name = "clock_nanosleep",
932 .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
933 { .name = "clone", .errpid = true, .nr_args = 5,
934 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
935 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
936 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
937 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, },
938 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, },
939 { .name = "close",
940 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
941 { .name = "connect",
942 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
943 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
944 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
945 { .name = "epoll_ctl",
946 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
947 { .name = "eventfd2",
948 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
949 { .name = "fchmodat",
950 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
951 { .name = "fchownat",
952 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
953 { .name = "fcntl",
954 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */
955 .strtoul = STUL_STRARRAYS,
956 .parm = &strarrays__fcntl_cmds_arrays,
957 .show_zero = true, },
958 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, },
959 { .name = "flock",
960 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
961 { .name = "fsconfig",
962 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
963 { .name = "fsmount",
964 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
965 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
966 { .name = "fspick",
967 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
968 [1] = { .scnprintf = SCA_FILENAME, /* path */ },
969 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
970 { .name = "fstat", .alias = "newfstat", },
971 { .name = "fstatat", .alias = "newfstatat", },
972 { .name = "futex",
973 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
974 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
975 { .name = "futimesat",
976 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
977 { .name = "getitimer",
978 .arg = { [0] = STRARRAY(which, itimers), }, },
979 { .name = "getpid", .errpid = true, },
980 { .name = "getpgid", .errpid = true, },
981 { .name = "getppid", .errpid = true, },
982 { .name = "getrandom",
983 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
984 { .name = "getrlimit",
985 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
986 { .name = "getsockopt",
987 .arg = { [1] = STRARRAY(level, socket_level), }, },
988 { .name = "gettid", .errpid = true, },
989 { .name = "ioctl",
990 .arg = {
991#if defined(__i386__) || defined(__x86_64__)
992/*
993 * FIXME: Make this available to all arches.
994 */
995 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
996 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
997#else
998 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
999#endif
1000 { .name = "kcmp", .nr_args = 5,
1001 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
1002 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
1003 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
1004 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
1005 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
1006 { .name = "keyctl",
1007 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
1008 { .name = "kill",
1009 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1010 { .name = "linkat",
1011 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1012 { .name = "lseek",
1013 .arg = { [2] = STRARRAY(whence, whences), }, },
1014 { .name = "lstat", .alias = "newlstat", },
1015 { .name = "madvise",
1016 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1017 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1018 { .name = "mkdirat",
1019 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1020 { .name = "mknodat",
1021 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1022 { .name = "mmap", .hexret = true,
1023/* The standard mmap maps to old_mmap on s390x */
1024#if defined(__s390x__)
1025 .alias = "old_mmap",
1026#endif
1027 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1028 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */
1029 .strtoul = STUL_STRARRAY_FLAGS,
1030 .parm = &strarray__mmap_flags, },
1031 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
1032 { .name = "mount",
1033 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1034 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1035 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1036 { .name = "move_mount",
1037 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ },
1038 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1039 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ },
1040 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1041 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1042 { .name = "mprotect",
1043 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1044 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, },
1045 { .name = "mq_unlink",
1046 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1047 { .name = "mremap", .hexret = true,
1048 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1049 { .name = "name_to_handle_at",
1050 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1051 { .name = "newfstatat",
1052 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1053 { .name = "open",
1054 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1055 { .name = "open_by_handle_at",
1056 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1057 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1058 { .name = "openat",
1059 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
1060 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1061 { .name = "perf_event_open",
1062 .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
1063 [2] = { .scnprintf = SCA_INT, /* cpu */ },
1064 [3] = { .scnprintf = SCA_FD, /* group_fd */ },
1065 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1066 { .name = "pipe2",
1067 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1068 { .name = "pkey_alloc",
1069 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, },
1070 { .name = "pkey_free",
1071 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, },
1072 { .name = "pkey_mprotect",
1073 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ },
1074 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
1075 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
1076 { .name = "poll", .timeout = true, },
1077 { .name = "ppoll", .timeout = true, },
1078 { .name = "prctl",
1079 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1080 .strtoul = STUL_STRARRAY,
1081 .parm = &strarray__prctl_options, },
1082 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1083 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1084 { .name = "pread", .alias = "pread64", },
1085 { .name = "preadv", .alias = "pread", },
1086 { .name = "prlimit64",
1087 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1088 { .name = "pwrite", .alias = "pwrite64", },
1089 { .name = "readlinkat",
1090 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1091 { .name = "recvfrom",
1092 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1093 { .name = "recvmmsg",
1094 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1095 { .name = "recvmsg",
1096 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1097 { .name = "renameat",
1098 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1099 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1100 { .name = "renameat2",
1101 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1102 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1103 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1104 { .name = "rt_sigaction",
1105 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1106 { .name = "rt_sigprocmask",
1107 .arg = { [0] = STRARRAY(how, sighow), }, },
1108 { .name = "rt_sigqueueinfo",
1109 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1110 { .name = "rt_tgsigqueueinfo",
1111 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1112 { .name = "sched_setscheduler",
1113 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1114 { .name = "seccomp",
1115 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ },
1116 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1117 { .name = "select", .timeout = true, },
1118 { .name = "sendfile", .alias = "sendfile64", },
1119 { .name = "sendmmsg",
1120 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1121 { .name = "sendmsg",
1122 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1123 { .name = "sendto",
1124 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1125 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1126 { .name = "set_tid_address", .errpid = true, },
1127 { .name = "setitimer",
1128 .arg = { [0] = STRARRAY(which, itimers), }, },
1129 { .name = "setrlimit",
1130 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1131 { .name = "setsockopt",
1132 .arg = { [1] = STRARRAY(level, socket_level), }, },
1133 { .name = "socket",
1134 .arg = { [0] = STRARRAY(family, socket_families),
1135 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1136 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1137 { .name = "socketpair",
1138 .arg = { [0] = STRARRAY(family, socket_families),
1139 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1140 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1141 { .name = "stat", .alias = "newstat", },
1142 { .name = "statx",
1143 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ },
1144 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1145 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, },
1146 { .name = "swapoff",
1147 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1148 { .name = "swapon",
1149 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1150 { .name = "symlinkat",
1151 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1152 { .name = "sync_file_range",
1153 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1154 { .name = "tgkill",
1155 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1156 { .name = "tkill",
1157 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1158 { .name = "umount2", .alias = "umount",
1159 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1160 { .name = "uname", .alias = "newuname", },
1161 { .name = "unlinkat",
1162 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1163 { .name = "utimensat",
1164 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1165 { .name = "wait4", .errpid = true,
1166 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1167 { .name = "waitid", .errpid = true,
1168 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1169};
1170
1171static int syscall_fmt__cmp(const void *name, const void *fmtp)
1172{
1173 const struct syscall_fmt *fmt = fmtp;
1174 return strcmp(name, fmt->name);
1175}
1176
1177static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts,
1178 const int nmemb,
1179 const char *name)
1180{
1181 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1182}
1183
1184static const struct syscall_fmt *syscall_fmt__find(const char *name)
1185{
1186 const int nmemb = ARRAY_SIZE(syscall_fmts);
1187 return __syscall_fmt__find(syscall_fmts, nmemb, name);
1188}
1189
1190static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts,
1191 const int nmemb, const char *alias)
1192{
1193 int i;
1194
1195 for (i = 0; i < nmemb; ++i) {
1196 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1197 return &fmts[i];
1198 }
1199
1200 return NULL;
1201}
1202
1203static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1204{
1205 const int nmemb = ARRAY_SIZE(syscall_fmts);
1206 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1207}
1208
1209/*
1210 * is_exit: is this "exit" or "exit_group"?
1211 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1212 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1213 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1214 */
1215struct syscall {
1216 struct tep_event *tp_format;
1217 int nr_args;
1218 int args_size;
1219 struct {
1220 struct bpf_program *sys_enter,
1221 *sys_exit;
1222 } bpf_prog;
1223 bool is_exit;
1224 bool is_open;
1225 bool nonexistent;
1226 struct tep_format_field *args;
1227 const char *name;
1228 const struct syscall_fmt *fmt;
1229 struct syscall_arg_fmt *arg_fmt;
1230};
1231
1232/*
1233 * We need to have this 'calculated' boolean because in some cases we really
1234 * don't know what is the duration of a syscall, for instance, when we start
1235 * a session and some threads are waiting for a syscall to finish, say 'poll',
1236 * in which case all we can do is to print "( ? ) for duration and for the
1237 * start timestamp.
1238 */
1239static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1240{
1241 double duration = (double)t / NSEC_PER_MSEC;
1242 size_t printed = fprintf(fp, "(");
1243
1244 if (!calculated)
1245 printed += fprintf(fp, " ");
1246 else if (duration >= 1.0)
1247 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1248 else if (duration >= 0.01)
1249 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1250 else
1251 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1252 return printed + fprintf(fp, "): ");
1253}
1254
1255/**
1256 * filename.ptr: The filename char pointer that will be vfs_getname'd
1257 * filename.entry_str_pos: Where to insert the string translated from
1258 * filename.ptr by the vfs_getname tracepoint/kprobe.
1259 * ret_scnprintf: syscall args may set this to a different syscall return
1260 * formatter, for instance, fcntl may return fds, file flags, etc.
1261 */
1262struct thread_trace {
1263 u64 entry_time;
1264 bool entry_pending;
1265 unsigned long nr_events;
1266 unsigned long pfmaj, pfmin;
1267 char *entry_str;
1268 double runtime_ms;
1269 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1270 struct {
1271 unsigned long ptr;
1272 short int entry_str_pos;
1273 bool pending_open;
1274 unsigned int namelen;
1275 char *name;
1276 } filename;
1277 struct {
1278 int max;
1279 struct file *table;
1280 } files;
1281
1282 struct intlist *syscall_stats;
1283};
1284
1285static struct thread_trace *thread_trace__new(void)
1286{
1287 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
1288
1289 if (ttrace) {
1290 ttrace->files.max = -1;
1291 ttrace->syscall_stats = intlist__new(NULL);
1292 }
1293
1294 return ttrace;
1295}
1296
1297static void thread_trace__free_files(struct thread_trace *ttrace);
1298
1299static void thread_trace__delete(void *pttrace)
1300{
1301 struct thread_trace *ttrace = pttrace;
1302
1303 if (!ttrace)
1304 return;
1305
1306 intlist__delete(ttrace->syscall_stats);
1307 ttrace->syscall_stats = NULL;
1308 thread_trace__free_files(ttrace);
1309 zfree(&ttrace->entry_str);
1310 free(ttrace);
1311}
1312
1313static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1314{
1315 struct thread_trace *ttrace;
1316
1317 if (thread == NULL)
1318 goto fail;
1319
1320 if (thread__priv(thread) == NULL)
1321 thread__set_priv(thread, thread_trace__new());
1322
1323 if (thread__priv(thread) == NULL)
1324 goto fail;
1325
1326 ttrace = thread__priv(thread);
1327 ++ttrace->nr_events;
1328
1329 return ttrace;
1330fail:
1331 color_fprintf(fp, PERF_COLOR_RED,
1332 "WARNING: not enough memory, dropping samples!\n");
1333 return NULL;
1334}
1335
1336
1337void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1338 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1339{
1340 struct thread_trace *ttrace = thread__priv(arg->thread);
1341
1342 ttrace->ret_scnprintf = ret_scnprintf;
1343}
1344
1345#define TRACE_PFMAJ (1 << 0)
1346#define TRACE_PFMIN (1 << 1)
1347
1348static const size_t trace__entry_str_size = 2048;
1349
1350static void thread_trace__free_files(struct thread_trace *ttrace)
1351{
1352 for (int i = 0; i < ttrace->files.max; ++i) {
1353 struct file *file = ttrace->files.table + i;
1354 zfree(&file->pathname);
1355 }
1356
1357 zfree(&ttrace->files.table);
1358 ttrace->files.max = -1;
1359}
1360
1361static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1362{
1363 if (fd < 0)
1364 return NULL;
1365
1366 if (fd > ttrace->files.max) {
1367 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1368
1369 if (nfiles == NULL)
1370 return NULL;
1371
1372 if (ttrace->files.max != -1) {
1373 memset(nfiles + ttrace->files.max + 1, 0,
1374 (fd - ttrace->files.max) * sizeof(struct file));
1375 } else {
1376 memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1377 }
1378
1379 ttrace->files.table = nfiles;
1380 ttrace->files.max = fd;
1381 }
1382
1383 return ttrace->files.table + fd;
1384}
1385
1386struct file *thread__files_entry(struct thread *thread, int fd)
1387{
1388 return thread_trace__files_entry(thread__priv(thread), fd);
1389}
1390
1391static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1392{
1393 struct thread_trace *ttrace = thread__priv(thread);
1394 struct file *file = thread_trace__files_entry(ttrace, fd);
1395
1396 if (file != NULL) {
1397 struct stat st;
1398 if (stat(pathname, &st) == 0)
1399 file->dev_maj = major(st.st_rdev);
1400 file->pathname = strdup(pathname);
1401 if (file->pathname)
1402 return 0;
1403 }
1404
1405 return -1;
1406}
1407
1408static int thread__read_fd_path(struct thread *thread, int fd)
1409{
1410 char linkname[PATH_MAX], pathname[PATH_MAX];
1411 struct stat st;
1412 int ret;
1413
1414 if (thread__pid(thread) == thread__tid(thread)) {
1415 scnprintf(linkname, sizeof(linkname),
1416 "/proc/%d/fd/%d", thread__pid(thread), fd);
1417 } else {
1418 scnprintf(linkname, sizeof(linkname),
1419 "/proc/%d/task/%d/fd/%d",
1420 thread__pid(thread), thread__tid(thread), fd);
1421 }
1422
1423 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1424 return -1;
1425
1426 ret = readlink(linkname, pathname, sizeof(pathname));
1427
1428 if (ret < 0 || ret > st.st_size)
1429 return -1;
1430
1431 pathname[ret] = '\0';
1432 return trace__set_fd_pathname(thread, fd, pathname);
1433}
1434
1435static const char *thread__fd_path(struct thread *thread, int fd,
1436 struct trace *trace)
1437{
1438 struct thread_trace *ttrace = thread__priv(thread);
1439
1440 if (ttrace == NULL || trace->fd_path_disabled)
1441 return NULL;
1442
1443 if (fd < 0)
1444 return NULL;
1445
1446 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1447 if (!trace->live)
1448 return NULL;
1449 ++trace->stats.proc_getname;
1450 if (thread__read_fd_path(thread, fd))
1451 return NULL;
1452 }
1453
1454 return ttrace->files.table[fd].pathname;
1455}
1456
1457size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
1458{
1459 int fd = arg->val;
1460 size_t printed = scnprintf(bf, size, "%d", fd);
1461 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1462
1463 if (path)
1464 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1465
1466 return printed;
1467}
1468
1469size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1470{
1471 size_t printed = scnprintf(bf, size, "%d", fd);
1472 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1473
1474 if (thread) {
1475 const char *path = thread__fd_path(thread, fd, trace);
1476
1477 if (path)
1478 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1479
1480 thread__put(thread);
1481 }
1482
1483 return printed;
1484}
1485
1486static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1487 struct syscall_arg *arg)
1488{
1489 int fd = arg->val;
1490 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1491 struct thread_trace *ttrace = thread__priv(arg->thread);
1492
1493 if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1494 zfree(&ttrace->files.table[fd].pathname);
1495
1496 return printed;
1497}
1498
1499static void thread__set_filename_pos(struct thread *thread, const char *bf,
1500 unsigned long ptr)
1501{
1502 struct thread_trace *ttrace = thread__priv(thread);
1503
1504 ttrace->filename.ptr = ptr;
1505 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1506}
1507
1508static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1509{
1510 struct augmented_arg *augmented_arg = arg->augmented.args;
1511 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1512 /*
1513 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1514 * we would have two strings, each prefixed by its size.
1515 */
1516 int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1517
1518 arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1519 arg->augmented.size -= consumed;
1520
1521 return printed;
1522}
1523
1524static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1525 struct syscall_arg *arg)
1526{
1527 unsigned long ptr = arg->val;
1528
1529 if (arg->augmented.args)
1530 return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1531
1532 if (!arg->trace->vfs_getname)
1533 return scnprintf(bf, size, "%#x", ptr);
1534
1535 thread__set_filename_pos(arg->thread, bf, ptr);
1536 return 0;
1537}
1538
1539static bool trace__filter_duration(struct trace *trace, double t)
1540{
1541 return t < (trace->duration_filter * NSEC_PER_MSEC);
1542}
1543
1544static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1545{
1546 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1547
1548 return fprintf(fp, "%10.3f ", ts);
1549}
1550
1551/*
1552 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1553 * using ttrace->entry_time for a thread that receives a sys_exit without
1554 * first having received a sys_enter ("poll" issued before tracing session
1555 * starts, lost sys_enter exit due to ring buffer overflow).
1556 */
1557static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1558{
1559 if (tstamp > 0)
1560 return __trace__fprintf_tstamp(trace, tstamp, fp);
1561
1562 return fprintf(fp, " ? ");
1563}
1564
1565static pid_t workload_pid = -1;
1566static volatile sig_atomic_t done = false;
1567static volatile sig_atomic_t interrupted = false;
1568
1569static void sighandler_interrupt(int sig __maybe_unused)
1570{
1571 done = interrupted = true;
1572}
1573
1574static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
1575 void *context __maybe_unused)
1576{
1577 if (info->si_pid == workload_pid)
1578 done = true;
1579}
1580
1581static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1582{
1583 size_t printed = 0;
1584
1585 if (trace->multiple_threads) {
1586 if (trace->show_comm)
1587 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1588 printed += fprintf(fp, "%d ", thread__tid(thread));
1589 }
1590
1591 return printed;
1592}
1593
1594static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1595 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1596{
1597 size_t printed = 0;
1598
1599 if (trace->show_tstamp)
1600 printed = trace__fprintf_tstamp(trace, tstamp, fp);
1601 if (trace->show_duration)
1602 printed += fprintf_duration(duration, duration_calculated, fp);
1603 return printed + trace__fprintf_comm_tid(trace, thread, fp);
1604}
1605
1606static int trace__process_event(struct trace *trace, struct machine *machine,
1607 union perf_event *event, struct perf_sample *sample)
1608{
1609 int ret = 0;
1610
1611 switch (event->header.type) {
1612 case PERF_RECORD_LOST:
1613 color_fprintf(trace->output, PERF_COLOR_RED,
1614 "LOST %" PRIu64 " events!\n", event->lost.lost);
1615 ret = machine__process_lost_event(machine, event, sample);
1616 break;
1617 default:
1618 ret = machine__process_event(machine, event, sample);
1619 break;
1620 }
1621
1622 return ret;
1623}
1624
1625static int trace__tool_process(struct perf_tool *tool,
1626 union perf_event *event,
1627 struct perf_sample *sample,
1628 struct machine *machine)
1629{
1630 struct trace *trace = container_of(tool, struct trace, tool);
1631 return trace__process_event(trace, machine, event, sample);
1632}
1633
1634static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1635{
1636 struct machine *machine = vmachine;
1637
1638 if (machine->kptr_restrict_warned)
1639 return NULL;
1640
1641 if (symbol_conf.kptr_restrict) {
1642 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1643 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1644 "Kernel samples will not be resolved.\n");
1645 machine->kptr_restrict_warned = true;
1646 return NULL;
1647 }
1648
1649 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1650}
1651
1652static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1653{
1654 int err = symbol__init(NULL);
1655
1656 if (err)
1657 return err;
1658
1659 trace->host = machine__new_host();
1660 if (trace->host == NULL)
1661 return -ENOMEM;
1662
1663 thread__set_priv_destructor(thread_trace__delete);
1664
1665 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1666 if (err < 0)
1667 goto out;
1668
1669 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1670 evlist->core.threads, trace__tool_process,
1671 true, false, 1);
1672out:
1673 if (err)
1674 symbol__exit();
1675
1676 return err;
1677}
1678
1679static void trace__symbols__exit(struct trace *trace)
1680{
1681 machine__exit(trace->host);
1682 trace->host = NULL;
1683
1684 symbol__exit();
1685}
1686
1687static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1688{
1689 int idx;
1690
1691 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1692 nr_args = sc->fmt->nr_args;
1693
1694 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1695 if (sc->arg_fmt == NULL)
1696 return -1;
1697
1698 for (idx = 0; idx < nr_args; ++idx) {
1699 if (sc->fmt)
1700 sc->arg_fmt[idx] = sc->fmt->arg[idx];
1701 }
1702
1703 sc->nr_args = nr_args;
1704 return 0;
1705}
1706
1707static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1708 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, },
1709 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1710};
1711
1712static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1713{
1714 const struct syscall_arg_fmt *fmt = fmtp;
1715 return strcmp(name, fmt->name);
1716}
1717
1718static const struct syscall_arg_fmt *
1719__syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb,
1720 const char *name)
1721{
1722 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1723}
1724
1725static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1726{
1727 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1728 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1729}
1730
1731static struct tep_format_field *
1732syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1733{
1734 struct tep_format_field *last_field = NULL;
1735 int len;
1736
1737 for (; field; field = field->next, ++arg) {
1738 last_field = field;
1739
1740 if (arg->scnprintf)
1741 continue;
1742
1743 len = strlen(field->name);
1744
1745 if (strcmp(field->type, "const char *") == 0 &&
1746 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1747 strstr(field->name, "path") != NULL))
1748 arg->scnprintf = SCA_FILENAME;
1749 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1750 arg->scnprintf = SCA_PTR;
1751 else if (strcmp(field->type, "pid_t") == 0)
1752 arg->scnprintf = SCA_PID;
1753 else if (strcmp(field->type, "umode_t") == 0)
1754 arg->scnprintf = SCA_MODE_T;
1755 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1756 arg->scnprintf = SCA_CHAR_ARRAY;
1757 arg->nr_entries = field->arraylen;
1758 } else if ((strcmp(field->type, "int") == 0 ||
1759 strcmp(field->type, "unsigned int") == 0 ||
1760 strcmp(field->type, "long") == 0) &&
1761 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1762 /*
1763 * /sys/kernel/tracing/events/syscalls/sys_enter*
1764 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1765 * 65 int
1766 * 23 unsigned int
1767 * 7 unsigned long
1768 */
1769 arg->scnprintf = SCA_FD;
1770 } else {
1771 const struct syscall_arg_fmt *fmt =
1772 syscall_arg_fmt__find_by_name(field->name);
1773
1774 if (fmt) {
1775 arg->scnprintf = fmt->scnprintf;
1776 arg->strtoul = fmt->strtoul;
1777 }
1778 }
1779 }
1780
1781 return last_field;
1782}
1783
1784static int syscall__set_arg_fmts(struct syscall *sc)
1785{
1786 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1787
1788 if (last_field)
1789 sc->args_size = last_field->offset + last_field->size;
1790
1791 return 0;
1792}
1793
1794static int trace__read_syscall_info(struct trace *trace, int id)
1795{
1796 char tp_name[128];
1797 struct syscall *sc;
1798 const char *name = syscalltbl__name(trace->sctbl, id);
1799
1800#ifdef HAVE_SYSCALL_TABLE_SUPPORT
1801 if (trace->syscalls.table == NULL) {
1802 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1803 if (trace->syscalls.table == NULL)
1804 return -ENOMEM;
1805 }
1806#else
1807 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1808 // When using libaudit we don't know beforehand what is the max syscall id
1809 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1810
1811 if (table == NULL)
1812 return -ENOMEM;
1813
1814 // Need to memset from offset 0 and +1 members if brand new
1815 if (trace->syscalls.table == NULL)
1816 memset(table, 0, (id + 1) * sizeof(*sc));
1817 else
1818 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1819
1820 trace->syscalls.table = table;
1821 trace->sctbl->syscalls.max_id = id;
1822 }
1823#endif
1824 sc = trace->syscalls.table + id;
1825 if (sc->nonexistent)
1826 return -EEXIST;
1827
1828 if (name == NULL) {
1829 sc->nonexistent = true;
1830 return -EEXIST;
1831 }
1832
1833 sc->name = name;
1834 sc->fmt = syscall_fmt__find(sc->name);
1835
1836 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1837 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1838
1839 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1840 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1841 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1842 }
1843
1844 /*
1845 * Fails to read trace point format via sysfs node, so the trace point
1846 * doesn't exist. Set the 'nonexistent' flag as true.
1847 */
1848 if (IS_ERR(sc->tp_format)) {
1849 sc->nonexistent = true;
1850 return PTR_ERR(sc->tp_format);
1851 }
1852
1853 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
1854 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
1855 return -ENOMEM;
1856
1857 sc->args = sc->tp_format->format.fields;
1858 /*
1859 * We need to check and discard the first variable '__syscall_nr'
1860 * or 'nr' that mean the syscall number. It is needless here.
1861 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1862 */
1863 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1864 sc->args = sc->args->next;
1865 --sc->nr_args;
1866 }
1867
1868 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1869 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1870
1871 return syscall__set_arg_fmts(sc);
1872}
1873
1874static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1875{
1876 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1877
1878 if (fmt != NULL) {
1879 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1880 return 0;
1881 }
1882
1883 return -ENOMEM;
1884}
1885
1886static int intcmp(const void *a, const void *b)
1887{
1888 const int *one = a, *another = b;
1889
1890 return *one - *another;
1891}
1892
1893static int trace__validate_ev_qualifier(struct trace *trace)
1894{
1895 int err = 0;
1896 bool printed_invalid_prefix = false;
1897 struct str_node *pos;
1898 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1899
1900 trace->ev_qualifier_ids.entries = malloc(nr_allocated *
1901 sizeof(trace->ev_qualifier_ids.entries[0]));
1902
1903 if (trace->ev_qualifier_ids.entries == NULL) {
1904 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1905 trace->output);
1906 err = -EINVAL;
1907 goto out;
1908 }
1909
1910 strlist__for_each_entry(pos, trace->ev_qualifier) {
1911 const char *sc = pos->s;
1912 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1913
1914 if (id < 0) {
1915 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1916 if (id >= 0)
1917 goto matches;
1918
1919 if (!printed_invalid_prefix) {
1920 pr_debug("Skipping unknown syscalls: ");
1921 printed_invalid_prefix = true;
1922 } else {
1923 pr_debug(", ");
1924 }
1925
1926 pr_debug("%s", sc);
1927 continue;
1928 }
1929matches:
1930 trace->ev_qualifier_ids.entries[nr_used++] = id;
1931 if (match_next == -1)
1932 continue;
1933
1934 while (1) {
1935 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1936 if (id < 0)
1937 break;
1938 if (nr_allocated == nr_used) {
1939 void *entries;
1940
1941 nr_allocated += 8;
1942 entries = realloc(trace->ev_qualifier_ids.entries,
1943 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1944 if (entries == NULL) {
1945 err = -ENOMEM;
1946 fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1947 goto out_free;
1948 }
1949 trace->ev_qualifier_ids.entries = entries;
1950 }
1951 trace->ev_qualifier_ids.entries[nr_used++] = id;
1952 }
1953 }
1954
1955 trace->ev_qualifier_ids.nr = nr_used;
1956 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
1957out:
1958 if (printed_invalid_prefix)
1959 pr_debug("\n");
1960 return err;
1961out_free:
1962 zfree(&trace->ev_qualifier_ids.entries);
1963 trace->ev_qualifier_ids.nr = 0;
1964 goto out;
1965}
1966
1967static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1968{
1969 bool in_ev_qualifier;
1970
1971 if (trace->ev_qualifier_ids.nr == 0)
1972 return true;
1973
1974 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1975 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1976
1977 if (in_ev_qualifier)
1978 return !trace->not_ev_qualifier;
1979
1980 return trace->not_ev_qualifier;
1981}
1982
1983/*
1984 * args is to be interpreted as a series of longs but we need to handle
1985 * 8-byte unaligned accesses. args points to raw_data within the event
1986 * and raw_data is guaranteed to be 8-byte unaligned because it is
1987 * preceded by raw_size which is a u32. So we need to copy args to a temp
1988 * variable to read it. Most notably this avoids extended load instructions
1989 * on unaligned addresses
1990 */
1991unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1992{
1993 unsigned long val;
1994 unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1995
1996 memcpy(&val, p, sizeof(val));
1997 return val;
1998}
1999
2000static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
2001 struct syscall_arg *arg)
2002{
2003 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
2004 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
2005
2006 return scnprintf(bf, size, "arg%d: ", arg->idx);
2007}
2008
2009/*
2010 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
2011 * as mount 'flags' argument that needs ignoring some magic flag, see comment
2012 * in tools/perf/trace/beauty/mount_flags.c
2013 */
2014static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
2015{
2016 if (fmt && fmt->mask_val)
2017 return fmt->mask_val(arg, val);
2018
2019 return val;
2020}
2021
2022static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
2023 struct syscall_arg *arg, unsigned long val)
2024{
2025 if (fmt && fmt->scnprintf) {
2026 arg->val = val;
2027 if (fmt->parm)
2028 arg->parm = fmt->parm;
2029 return fmt->scnprintf(bf, size, arg);
2030 }
2031 return scnprintf(bf, size, "%ld", val);
2032}
2033
2034static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
2035 unsigned char *args, void *augmented_args, int augmented_args_size,
2036 struct trace *trace, struct thread *thread)
2037{
2038 size_t printed = 0;
2039 unsigned long val;
2040 u8 bit = 1;
2041 struct syscall_arg arg = {
2042 .args = args,
2043 .augmented = {
2044 .size = augmented_args_size,
2045 .args = augmented_args,
2046 },
2047 .idx = 0,
2048 .mask = 0,
2049 .trace = trace,
2050 .thread = thread,
2051 .show_string_prefix = trace->show_string_prefix,
2052 };
2053 struct thread_trace *ttrace = thread__priv(thread);
2054
2055 /*
2056 * Things like fcntl will set this in its 'cmd' formatter to pick the
2057 * right formatter for the return value (an fd? file flags?), which is
2058 * not needed for syscalls that always return a given type, say an fd.
2059 */
2060 ttrace->ret_scnprintf = NULL;
2061
2062 if (sc->args != NULL) {
2063 struct tep_format_field *field;
2064
2065 for (field = sc->args; field;
2066 field = field->next, ++arg.idx, bit <<= 1) {
2067 if (arg.mask & bit)
2068 continue;
2069
2070 arg.fmt = &sc->arg_fmt[arg.idx];
2071 val = syscall_arg__val(&arg, arg.idx);
2072 /*
2073 * Some syscall args need some mask, most don't and
2074 * return val untouched.
2075 */
2076 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2077
2078 /*
2079 * Suppress this argument if its value is zero and
2080 * and we don't have a string associated in an
2081 * strarray for it.
2082 */
2083 if (val == 0 &&
2084 !trace->show_zeros &&
2085 !(sc->arg_fmt &&
2086 (sc->arg_fmt[arg.idx].show_zero ||
2087 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2088 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2089 sc->arg_fmt[arg.idx].parm))
2090 continue;
2091
2092 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2093
2094 if (trace->show_arg_names)
2095 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2096
2097 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2098 bf + printed, size - printed, &arg, val);
2099 }
2100 } else if (IS_ERR(sc->tp_format)) {
2101 /*
2102 * If we managed to read the tracepoint /format file, then we
2103 * may end up not having any args, like with gettid(), so only
2104 * print the raw args when we didn't manage to read it.
2105 */
2106 while (arg.idx < sc->nr_args) {
2107 if (arg.mask & bit)
2108 goto next_arg;
2109 val = syscall_arg__val(&arg, arg.idx);
2110 if (printed)
2111 printed += scnprintf(bf + printed, size - printed, ", ");
2112 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2113 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2114next_arg:
2115 ++arg.idx;
2116 bit <<= 1;
2117 }
2118 }
2119
2120 return printed;
2121}
2122
2123typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2124 union perf_event *event,
2125 struct perf_sample *sample);
2126
2127static struct syscall *trace__syscall_info(struct trace *trace,
2128 struct evsel *evsel, int id)
2129{
2130 int err = 0;
2131
2132 if (id < 0) {
2133
2134 /*
2135 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2136 * before that, leaving at a higher verbosity level till that is
2137 * explained. Reproduced with plain ftrace with:
2138 *
2139 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2140 * grep "NR -1 " /t/trace_pipe
2141 *
2142 * After generating some load on the machine.
2143 */
2144 if (verbose > 1) {
2145 static u64 n;
2146 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2147 id, evsel__name(evsel), ++n);
2148 }
2149 return NULL;
2150 }
2151
2152 err = -EINVAL;
2153
2154#ifdef HAVE_SYSCALL_TABLE_SUPPORT
2155 if (id > trace->sctbl->syscalls.max_id) {
2156#else
2157 if (id >= trace->sctbl->syscalls.max_id) {
2158 /*
2159 * With libaudit we don't know beforehand what is the max_id,
2160 * so we let trace__read_syscall_info() figure that out as we
2161 * go on reading syscalls.
2162 */
2163 err = trace__read_syscall_info(trace, id);
2164 if (err)
2165#endif
2166 goto out_cant_read;
2167 }
2168
2169 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2170 (err = trace__read_syscall_info(trace, id)) != 0)
2171 goto out_cant_read;
2172
2173 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2174 goto out_cant_read;
2175
2176 return &trace->syscalls.table[id];
2177
2178out_cant_read:
2179 if (verbose > 0) {
2180 char sbuf[STRERR_BUFSIZE];
2181 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2182 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2183 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2184 fputs(" information\n", trace->output);
2185 }
2186 return NULL;
2187}
2188
2189struct syscall_stats {
2190 struct stats stats;
2191 u64 nr_failures;
2192 int max_errno;
2193 u32 *errnos;
2194};
2195
2196static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2197 int id, struct perf_sample *sample, long err, bool errno_summary)
2198{
2199 struct int_node *inode;
2200 struct syscall_stats *stats;
2201 u64 duration = 0;
2202
2203 inode = intlist__findnew(ttrace->syscall_stats, id);
2204 if (inode == NULL)
2205 return;
2206
2207 stats = inode->priv;
2208 if (stats == NULL) {
2209 stats = zalloc(sizeof(*stats));
2210 if (stats == NULL)
2211 return;
2212
2213 init_stats(&stats->stats);
2214 inode->priv = stats;
2215 }
2216
2217 if (ttrace->entry_time && sample->time > ttrace->entry_time)
2218 duration = sample->time - ttrace->entry_time;
2219
2220 update_stats(&stats->stats, duration);
2221
2222 if (err < 0) {
2223 ++stats->nr_failures;
2224
2225 if (!errno_summary)
2226 return;
2227
2228 err = -err;
2229 if (err > stats->max_errno) {
2230 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2231
2232 if (new_errnos) {
2233 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2234 } else {
2235 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2236 thread__comm_str(thread), thread__pid(thread),
2237 thread__tid(thread));
2238 return;
2239 }
2240
2241 stats->errnos = new_errnos;
2242 stats->max_errno = err;
2243 }
2244
2245 ++stats->errnos[err - 1];
2246 }
2247}
2248
2249static int trace__printf_interrupted_entry(struct trace *trace)
2250{
2251 struct thread_trace *ttrace;
2252 size_t printed;
2253 int len;
2254
2255 if (trace->failure_only || trace->current == NULL)
2256 return 0;
2257
2258 ttrace = thread__priv(trace->current);
2259
2260 if (!ttrace->entry_pending)
2261 return 0;
2262
2263 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2264 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2265
2266 if (len < trace->args_alignment - 4)
2267 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2268
2269 printed += fprintf(trace->output, " ...\n");
2270
2271 ttrace->entry_pending = false;
2272 ++trace->nr_events_printed;
2273
2274 return printed;
2275}
2276
2277static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2278 struct perf_sample *sample, struct thread *thread)
2279{
2280 int printed = 0;
2281
2282 if (trace->print_sample) {
2283 double ts = (double)sample->time / NSEC_PER_MSEC;
2284
2285 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2286 evsel__name(evsel), ts,
2287 thread__comm_str(thread),
2288 sample->pid, sample->tid, sample->cpu);
2289 }
2290
2291 return printed;
2292}
2293
2294static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2295{
2296 void *augmented_args = NULL;
2297 /*
2298 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2299 * and there we get all 6 syscall args plus the tracepoint common fields
2300 * that gets calculated at the start and the syscall_nr (another long).
2301 * So we check if that is the case and if so don't look after the
2302 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2303 * which is fixed.
2304 *
2305 * We'll revisit this later to pass s->args_size to the BPF augmenter
2306 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2307 * copies only what we need for each syscall, like what happens when we
2308 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2309 * traffic to just what is needed for each syscall.
2310 */
2311 int args_size = raw_augmented_args_size ?: sc->args_size;
2312
2313 *augmented_args_size = sample->raw_size - args_size;
2314 if (*augmented_args_size > 0)
2315 augmented_args = sample->raw_data + args_size;
2316
2317 return augmented_args;
2318}
2319
2320static void syscall__exit(struct syscall *sc)
2321{
2322 if (!sc)
2323 return;
2324
2325 zfree(&sc->arg_fmt);
2326}
2327
2328static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2329 union perf_event *event __maybe_unused,
2330 struct perf_sample *sample)
2331{
2332 char *msg;
2333 void *args;
2334 int printed = 0;
2335 struct thread *thread;
2336 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2337 int augmented_args_size = 0;
2338 void *augmented_args = NULL;
2339 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2340 struct thread_trace *ttrace;
2341
2342 if (sc == NULL)
2343 return -1;
2344
2345 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2346 ttrace = thread__trace(thread, trace->output);
2347 if (ttrace == NULL)
2348 goto out_put;
2349
2350 trace__fprintf_sample(trace, evsel, sample, thread);
2351
2352 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2353
2354 if (ttrace->entry_str == NULL) {
2355 ttrace->entry_str = malloc(trace__entry_str_size);
2356 if (!ttrace->entry_str)
2357 goto out_put;
2358 }
2359
2360 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2361 trace__printf_interrupted_entry(trace);
2362 /*
2363 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2364 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2365 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2366 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2367 * so when handling, say the openat syscall, we end up getting 6 args for the
2368 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2369 * thinking that the extra 2 u64 args are the augmented filename, so just check
2370 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2371 */
2372 if (evsel != trace->syscalls.events.sys_enter)
2373 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2374 ttrace->entry_time = sample->time;
2375 msg = ttrace->entry_str;
2376 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2377
2378 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2379 args, augmented_args, augmented_args_size, trace, thread);
2380
2381 if (sc->is_exit) {
2382 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2383 int alignment = 0;
2384
2385 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2386 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2387 if (trace->args_alignment > printed)
2388 alignment = trace->args_alignment - printed;
2389 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2390 }
2391 } else {
2392 ttrace->entry_pending = true;
2393 /* See trace__vfs_getname & trace__sys_exit */
2394 ttrace->filename.pending_open = false;
2395 }
2396
2397 if (trace->current != thread) {
2398 thread__put(trace->current);
2399 trace->current = thread__get(thread);
2400 }
2401 err = 0;
2402out_put:
2403 thread__put(thread);
2404 return err;
2405}
2406
2407static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2408 struct perf_sample *sample)
2409{
2410 struct thread_trace *ttrace;
2411 struct thread *thread;
2412 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2413 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2414 char msg[1024];
2415 void *args, *augmented_args = NULL;
2416 int augmented_args_size;
2417
2418 if (sc == NULL)
2419 return -1;
2420
2421 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2422 ttrace = thread__trace(thread, trace->output);
2423 /*
2424 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2425 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2426 */
2427 if (ttrace == NULL)
2428 goto out_put;
2429
2430 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2431 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2432 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2433 fprintf(trace->output, "%s", msg);
2434 err = 0;
2435out_put:
2436 thread__put(thread);
2437 return err;
2438}
2439
2440static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2441 struct perf_sample *sample,
2442 struct callchain_cursor *cursor)
2443{
2444 struct addr_location al;
2445 int max_stack = evsel->core.attr.sample_max_stack ?
2446 evsel->core.attr.sample_max_stack :
2447 trace->max_stack;
2448 int err = -1;
2449
2450 addr_location__init(&al);
2451 if (machine__resolve(trace->host, &al, sample) < 0)
2452 goto out;
2453
2454 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2455out:
2456 addr_location__exit(&al);
2457 return err;
2458}
2459
2460static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2461{
2462 /* TODO: user-configurable print_opts */
2463 const unsigned int print_opts = EVSEL__PRINT_SYM |
2464 EVSEL__PRINT_DSO |
2465 EVSEL__PRINT_UNKNOWN_AS_ADDR;
2466
2467 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2468}
2469
2470static const char *errno_to_name(struct evsel *evsel, int err)
2471{
2472 struct perf_env *env = evsel__env(evsel);
2473
2474 return perf_env__arch_strerrno(env, err);
2475}
2476
2477static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2478 union perf_event *event __maybe_unused,
2479 struct perf_sample *sample)
2480{
2481 long ret;
2482 u64 duration = 0;
2483 bool duration_calculated = false;
2484 struct thread *thread;
2485 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2486 int alignment = trace->args_alignment;
2487 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2488 struct thread_trace *ttrace;
2489
2490 if (sc == NULL)
2491 return -1;
2492
2493 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2494 ttrace = thread__trace(thread, trace->output);
2495 if (ttrace == NULL)
2496 goto out_put;
2497
2498 trace__fprintf_sample(trace, evsel, sample, thread);
2499
2500 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2501
2502 if (trace->summary)
2503 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2504
2505 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2506 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2507 ttrace->filename.pending_open = false;
2508 ++trace->stats.vfs_getname;
2509 }
2510
2511 if (ttrace->entry_time) {
2512 duration = sample->time - ttrace->entry_time;
2513 if (trace__filter_duration(trace, duration))
2514 goto out;
2515 duration_calculated = true;
2516 } else if (trace->duration_filter)
2517 goto out;
2518
2519 if (sample->callchain) {
2520 struct callchain_cursor *cursor = get_tls_callchain_cursor();
2521
2522 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2523 if (callchain_ret == 0) {
2524 if (cursor->nr < trace->min_stack)
2525 goto out;
2526 callchain_ret = 1;
2527 }
2528 }
2529
2530 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2531 goto out;
2532
2533 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2534
2535 if (ttrace->entry_pending) {
2536 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2537 } else {
2538 printed += fprintf(trace->output, " ... [");
2539 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2540 printed += 9;
2541 printed += fprintf(trace->output, "]: %s()", sc->name);
2542 }
2543
2544 printed++; /* the closing ')' */
2545
2546 if (alignment > printed)
2547 alignment -= printed;
2548 else
2549 alignment = 0;
2550
2551 fprintf(trace->output, ")%*s= ", alignment, " ");
2552
2553 if (sc->fmt == NULL) {
2554 if (ret < 0)
2555 goto errno_print;
2556signed_print:
2557 fprintf(trace->output, "%ld", ret);
2558 } else if (ret < 0) {
2559errno_print: {
2560 char bf[STRERR_BUFSIZE];
2561 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2562 *e = errno_to_name(evsel, -ret);
2563
2564 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2565 }
2566 } else if (ret == 0 && sc->fmt->timeout)
2567 fprintf(trace->output, "0 (Timeout)");
2568 else if (ttrace->ret_scnprintf) {
2569 char bf[1024];
2570 struct syscall_arg arg = {
2571 .val = ret,
2572 .thread = thread,
2573 .trace = trace,
2574 };
2575 ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2576 ttrace->ret_scnprintf = NULL;
2577 fprintf(trace->output, "%s", bf);
2578 } else if (sc->fmt->hexret)
2579 fprintf(trace->output, "%#lx", ret);
2580 else if (sc->fmt->errpid) {
2581 struct thread *child = machine__find_thread(trace->host, ret, ret);
2582
2583 if (child != NULL) {
2584 fprintf(trace->output, "%ld", ret);
2585 if (thread__comm_set(child))
2586 fprintf(trace->output, " (%s)", thread__comm_str(child));
2587 thread__put(child);
2588 }
2589 } else
2590 goto signed_print;
2591
2592 fputc('\n', trace->output);
2593
2594 /*
2595 * We only consider an 'event' for the sake of --max-events a non-filtered
2596 * sys_enter + sys_exit and other tracepoint events.
2597 */
2598 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2599 interrupted = true;
2600
2601 if (callchain_ret > 0)
2602 trace__fprintf_callchain(trace, sample);
2603 else if (callchain_ret < 0)
2604 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2605out:
2606 ttrace->entry_pending = false;
2607 err = 0;
2608out_put:
2609 thread__put(thread);
2610 return err;
2611}
2612
2613static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2614 union perf_event *event __maybe_unused,
2615 struct perf_sample *sample)
2616{
2617 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2618 struct thread_trace *ttrace;
2619 size_t filename_len, entry_str_len, to_move;
2620 ssize_t remaining_space;
2621 char *pos;
2622 const char *filename = evsel__rawptr(evsel, sample, "pathname");
2623
2624 if (!thread)
2625 goto out;
2626
2627 ttrace = thread__priv(thread);
2628 if (!ttrace)
2629 goto out_put;
2630
2631 filename_len = strlen(filename);
2632 if (filename_len == 0)
2633 goto out_put;
2634
2635 if (ttrace->filename.namelen < filename_len) {
2636 char *f = realloc(ttrace->filename.name, filename_len + 1);
2637
2638 if (f == NULL)
2639 goto out_put;
2640
2641 ttrace->filename.namelen = filename_len;
2642 ttrace->filename.name = f;
2643 }
2644
2645 strcpy(ttrace->filename.name, filename);
2646 ttrace->filename.pending_open = true;
2647
2648 if (!ttrace->filename.ptr)
2649 goto out_put;
2650
2651 entry_str_len = strlen(ttrace->entry_str);
2652 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2653 if (remaining_space <= 0)
2654 goto out_put;
2655
2656 if (filename_len > (size_t)remaining_space) {
2657 filename += filename_len - remaining_space;
2658 filename_len = remaining_space;
2659 }
2660
2661 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2662 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2663 memmove(pos + filename_len, pos, to_move);
2664 memcpy(pos, filename, filename_len);
2665
2666 ttrace->filename.ptr = 0;
2667 ttrace->filename.entry_str_pos = 0;
2668out_put:
2669 thread__put(thread);
2670out:
2671 return 0;
2672}
2673
2674static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2675 union perf_event *event __maybe_unused,
2676 struct perf_sample *sample)
2677{
2678 u64 runtime = evsel__intval(evsel, sample, "runtime");
2679 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2680 struct thread *thread = machine__findnew_thread(trace->host,
2681 sample->pid,
2682 sample->tid);
2683 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2684
2685 if (ttrace == NULL)
2686 goto out_dump;
2687
2688 ttrace->runtime_ms += runtime_ms;
2689 trace->runtime_ms += runtime_ms;
2690out_put:
2691 thread__put(thread);
2692 return 0;
2693
2694out_dump:
2695 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2696 evsel->name,
2697 evsel__strval(evsel, sample, "comm"),
2698 (pid_t)evsel__intval(evsel, sample, "pid"),
2699 runtime,
2700 evsel__intval(evsel, sample, "vruntime"));
2701 goto out_put;
2702}
2703
2704static int bpf_output__printer(enum binary_printer_ops op,
2705 unsigned int val, void *extra __maybe_unused, FILE *fp)
2706{
2707 unsigned char ch = (unsigned char)val;
2708
2709 switch (op) {
2710 case BINARY_PRINT_CHAR_DATA:
2711 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
2712 case BINARY_PRINT_DATA_BEGIN:
2713 case BINARY_PRINT_LINE_BEGIN:
2714 case BINARY_PRINT_ADDR:
2715 case BINARY_PRINT_NUM_DATA:
2716 case BINARY_PRINT_NUM_PAD:
2717 case BINARY_PRINT_SEP:
2718 case BINARY_PRINT_CHAR_PAD:
2719 case BINARY_PRINT_LINE_END:
2720 case BINARY_PRINT_DATA_END:
2721 default:
2722 break;
2723 }
2724
2725 return 0;
2726}
2727
2728static void bpf_output__fprintf(struct trace *trace,
2729 struct perf_sample *sample)
2730{
2731 binary__fprintf(sample->raw_data, sample->raw_size, 8,
2732 bpf_output__printer, NULL, trace->output);
2733 ++trace->nr_events_printed;
2734}
2735
2736static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2737 struct thread *thread, void *augmented_args, int augmented_args_size)
2738{
2739 char bf[2048];
2740 size_t size = sizeof(bf);
2741 struct tep_format_field *field = evsel->tp_format->format.fields;
2742 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2743 size_t printed = 0;
2744 unsigned long val;
2745 u8 bit = 1;
2746 struct syscall_arg syscall_arg = {
2747 .augmented = {
2748 .size = augmented_args_size,
2749 .args = augmented_args,
2750 },
2751 .idx = 0,
2752 .mask = 0,
2753 .trace = trace,
2754 .thread = thread,
2755 .show_string_prefix = trace->show_string_prefix,
2756 };
2757
2758 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2759 if (syscall_arg.mask & bit)
2760 continue;
2761
2762 syscall_arg.len = 0;
2763 syscall_arg.fmt = arg;
2764 if (field->flags & TEP_FIELD_IS_ARRAY) {
2765 int offset = field->offset;
2766
2767 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2768 offset = format_field__intval(field, sample, evsel->needs_swap);
2769 syscall_arg.len = offset >> 16;
2770 offset &= 0xffff;
2771 if (tep_field_is_relative(field->flags))
2772 offset += field->offset + field->size;
2773 }
2774
2775 val = (uintptr_t)(sample->raw_data + offset);
2776 } else
2777 val = format_field__intval(field, sample, evsel->needs_swap);
2778 /*
2779 * Some syscall args need some mask, most don't and
2780 * return val untouched.
2781 */
2782 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2783
2784 /*
2785 * Suppress this argument if its value is zero and
2786 * we don't have a string associated in an
2787 * strarray for it.
2788 */
2789 if (val == 0 &&
2790 !trace->show_zeros &&
2791 !((arg->show_zero ||
2792 arg->scnprintf == SCA_STRARRAY ||
2793 arg->scnprintf == SCA_STRARRAYS) &&
2794 arg->parm))
2795 continue;
2796
2797 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2798
2799 if (trace->show_arg_names)
2800 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2801
2802 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2803 }
2804
2805 return printed + fprintf(trace->output, "%s", bf);
2806}
2807
2808static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2809 union perf_event *event __maybe_unused,
2810 struct perf_sample *sample)
2811{
2812 struct thread *thread;
2813 int callchain_ret = 0;
2814 /*
2815 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2816 * this event's max_events having been hit and this is an entry coming
2817 * from the ring buffer that we should discard, since the max events
2818 * have already been considered/printed.
2819 */
2820 if (evsel->disabled)
2821 return 0;
2822
2823 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2824
2825 if (sample->callchain) {
2826 struct callchain_cursor *cursor = get_tls_callchain_cursor();
2827
2828 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2829 if (callchain_ret == 0) {
2830 if (cursor->nr < trace->min_stack)
2831 goto out;
2832 callchain_ret = 1;
2833 }
2834 }
2835
2836 trace__printf_interrupted_entry(trace);
2837 trace__fprintf_tstamp(trace, sample->time, trace->output);
2838
2839 if (trace->trace_syscalls && trace->show_duration)
2840 fprintf(trace->output, "( ): ");
2841
2842 if (thread)
2843 trace__fprintf_comm_tid(trace, thread, trace->output);
2844
2845 if (evsel == trace->syscalls.events.bpf_output) {
2846 int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2847 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2848
2849 if (sc) {
2850 fprintf(trace->output, "%s(", sc->name);
2851 trace__fprintf_sys_enter(trace, evsel, sample);
2852 fputc(')', trace->output);
2853 goto newline;
2854 }
2855
2856 /*
2857 * XXX: Not having the associated syscall info or not finding/adding
2858 * the thread should never happen, but if it does...
2859 * fall thru and print it as a bpf_output event.
2860 */
2861 }
2862
2863 fprintf(trace->output, "%s(", evsel->name);
2864
2865 if (evsel__is_bpf_output(evsel)) {
2866 bpf_output__fprintf(trace, sample);
2867 } else if (evsel->tp_format) {
2868 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2869 trace__fprintf_sys_enter(trace, evsel, sample)) {
2870 if (trace->libtraceevent_print) {
2871 event_format__fprintf(evsel->tp_format, sample->cpu,
2872 sample->raw_data, sample->raw_size,
2873 trace->output);
2874 } else {
2875 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2876 }
2877 }
2878 }
2879
2880newline:
2881 fprintf(trace->output, ")\n");
2882
2883 if (callchain_ret > 0)
2884 trace__fprintf_callchain(trace, sample);
2885 else if (callchain_ret < 0)
2886 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2887
2888 ++trace->nr_events_printed;
2889
2890 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2891 evsel__disable(evsel);
2892 evsel__close(evsel);
2893 }
2894out:
2895 thread__put(thread);
2896 return 0;
2897}
2898
2899static void print_location(FILE *f, struct perf_sample *sample,
2900 struct addr_location *al,
2901 bool print_dso, bool print_sym)
2902{
2903
2904 if ((verbose > 0 || print_dso) && al->map)
2905 fprintf(f, "%s@", map__dso(al->map)->long_name);
2906
2907 if ((verbose > 0 || print_sym) && al->sym)
2908 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2909 al->addr - al->sym->start);
2910 else if (al->map)
2911 fprintf(f, "0x%" PRIx64, al->addr);
2912 else
2913 fprintf(f, "0x%" PRIx64, sample->addr);
2914}
2915
2916static int trace__pgfault(struct trace *trace,
2917 struct evsel *evsel,
2918 union perf_event *event __maybe_unused,
2919 struct perf_sample *sample)
2920{
2921 struct thread *thread;
2922 struct addr_location al;
2923 char map_type = 'd';
2924 struct thread_trace *ttrace;
2925 int err = -1;
2926 int callchain_ret = 0;
2927
2928 addr_location__init(&al);
2929 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2930
2931 if (sample->callchain) {
2932 struct callchain_cursor *cursor = get_tls_callchain_cursor();
2933
2934 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2935 if (callchain_ret == 0) {
2936 if (cursor->nr < trace->min_stack)
2937 goto out_put;
2938 callchain_ret = 1;
2939 }
2940 }
2941
2942 ttrace = thread__trace(thread, trace->output);
2943 if (ttrace == NULL)
2944 goto out_put;
2945
2946 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2947 ttrace->pfmaj++;
2948 else
2949 ttrace->pfmin++;
2950
2951 if (trace->summary_only)
2952 goto out;
2953
2954 thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
2955
2956 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2957
2958 fprintf(trace->output, "%sfault [",
2959 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2960 "maj" : "min");
2961
2962 print_location(trace->output, sample, &al, false, true);
2963
2964 fprintf(trace->output, "] => ");
2965
2966 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2967
2968 if (!al.map) {
2969 thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
2970
2971 if (al.map)
2972 map_type = 'x';
2973 else
2974 map_type = '?';
2975 }
2976
2977 print_location(trace->output, sample, &al, true, false);
2978
2979 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2980
2981 if (callchain_ret > 0)
2982 trace__fprintf_callchain(trace, sample);
2983 else if (callchain_ret < 0)
2984 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2985
2986 ++trace->nr_events_printed;
2987out:
2988 err = 0;
2989out_put:
2990 thread__put(thread);
2991 addr_location__exit(&al);
2992 return err;
2993}
2994
2995static void trace__set_base_time(struct trace *trace,
2996 struct evsel *evsel,
2997 struct perf_sample *sample)
2998{
2999 /*
3000 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
3001 * and don't use sample->time unconditionally, we may end up having
3002 * some other event in the future without PERF_SAMPLE_TIME for good
3003 * reason, i.e. we may not be interested in its timestamps, just in
3004 * it taking place, picking some piece of information when it
3005 * appears in our event stream (vfs_getname comes to mind).
3006 */
3007 if (trace->base_time == 0 && !trace->full_time &&
3008 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
3009 trace->base_time = sample->time;
3010}
3011
3012static int trace__process_sample(struct perf_tool *tool,
3013 union perf_event *event,
3014 struct perf_sample *sample,
3015 struct evsel *evsel,
3016 struct machine *machine __maybe_unused)
3017{
3018 struct trace *trace = container_of(tool, struct trace, tool);
3019 struct thread *thread;
3020 int err = 0;
3021
3022 tracepoint_handler handler = evsel->handler;
3023
3024 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3025 if (thread && thread__is_filtered(thread))
3026 goto out;
3027
3028 trace__set_base_time(trace, evsel, sample);
3029
3030 if (handler) {
3031 ++trace->nr_events;
3032 handler(trace, evsel, event, sample);
3033 }
3034out:
3035 thread__put(thread);
3036 return err;
3037}
3038
3039static int trace__record(struct trace *trace, int argc, const char **argv)
3040{
3041 unsigned int rec_argc, i, j;
3042 const char **rec_argv;
3043 const char * const record_args[] = {
3044 "record",
3045 "-R",
3046 "-m", "1024",
3047 "-c", "1",
3048 };
3049 pid_t pid = getpid();
3050 char *filter = asprintf__tp_filter_pids(1, &pid);
3051 const char * const sc_args[] = { "-e", };
3052 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
3053 const char * const majpf_args[] = { "-e", "major-faults" };
3054 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
3055 const char * const minpf_args[] = { "-e", "minor-faults" };
3056 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
3057 int err = -1;
3058
3059 /* +3 is for the event string below and the pid filter */
3060 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3061 majpf_args_nr + minpf_args_nr + argc;
3062 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3063
3064 if (rec_argv == NULL || filter == NULL)
3065 goto out_free;
3066
3067 j = 0;
3068 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3069 rec_argv[j++] = record_args[i];
3070
3071 if (trace->trace_syscalls) {
3072 for (i = 0; i < sc_args_nr; i++)
3073 rec_argv[j++] = sc_args[i];
3074
3075 /* event string may be different for older kernels - e.g., RHEL6 */
3076 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3077 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3078 else if (is_valid_tracepoint("syscalls:sys_enter"))
3079 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3080 else {
3081 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3082 goto out_free;
3083 }
3084 }
3085
3086 rec_argv[j++] = "--filter";
3087 rec_argv[j++] = filter;
3088
3089 if (trace->trace_pgfaults & TRACE_PFMAJ)
3090 for (i = 0; i < majpf_args_nr; i++)
3091 rec_argv[j++] = majpf_args[i];
3092
3093 if (trace->trace_pgfaults & TRACE_PFMIN)
3094 for (i = 0; i < minpf_args_nr; i++)
3095 rec_argv[j++] = minpf_args[i];
3096
3097 for (i = 0; i < (unsigned int)argc; i++)
3098 rec_argv[j++] = argv[i];
3099
3100 err = cmd_record(j, rec_argv);
3101out_free:
3102 free(filter);
3103 free(rec_argv);
3104 return err;
3105}
3106
3107static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3108
3109static bool evlist__add_vfs_getname(struct evlist *evlist)
3110{
3111 bool found = false;
3112 struct evsel *evsel, *tmp;
3113 struct parse_events_error err;
3114 int ret;
3115
3116 parse_events_error__init(&err);
3117 ret = parse_events(evlist, "probe:vfs_getname*", &err);
3118 parse_events_error__exit(&err);
3119 if (ret)
3120 return false;
3121
3122 evlist__for_each_entry_safe(evlist, evsel, tmp) {
3123 if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3124 continue;
3125
3126 if (evsel__field(evsel, "pathname")) {
3127 evsel->handler = trace__vfs_getname;
3128 found = true;
3129 continue;
3130 }
3131
3132 list_del_init(&evsel->core.node);
3133 evsel->evlist = NULL;
3134 evsel__delete(evsel);
3135 }
3136
3137 return found;
3138}
3139
3140static struct evsel *evsel__new_pgfault(u64 config)
3141{
3142 struct evsel *evsel;
3143 struct perf_event_attr attr = {
3144 .type = PERF_TYPE_SOFTWARE,
3145 .mmap_data = 1,
3146 };
3147
3148 attr.config = config;
3149 attr.sample_period = 1;
3150
3151 event_attr_init(&attr);
3152
3153 evsel = evsel__new(&attr);
3154 if (evsel)
3155 evsel->handler = trace__pgfault;
3156
3157 return evsel;
3158}
3159
3160static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3161{
3162 struct evsel *evsel;
3163
3164 evlist__for_each_entry(evlist, evsel) {
3165 evsel_trace__delete(evsel->priv);
3166 evsel->priv = NULL;
3167 }
3168}
3169
3170static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3171{
3172 const u32 type = event->header.type;
3173 struct evsel *evsel;
3174
3175 if (type != PERF_RECORD_SAMPLE) {
3176 trace__process_event(trace, trace->host, event, sample);
3177 return;
3178 }
3179
3180 evsel = evlist__id2evsel(trace->evlist, sample->id);
3181 if (evsel == NULL) {
3182 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3183 return;
3184 }
3185
3186 if (evswitch__discard(&trace->evswitch, evsel))
3187 return;
3188
3189 trace__set_base_time(trace, evsel, sample);
3190
3191 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3192 sample->raw_data == NULL) {
3193 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3194 evsel__name(evsel), sample->tid,
3195 sample->cpu, sample->raw_size);
3196 } else {
3197 tracepoint_handler handler = evsel->handler;
3198 handler(trace, evsel, event, sample);
3199 }
3200
3201 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3202 interrupted = true;
3203}
3204
3205static int trace__add_syscall_newtp(struct trace *trace)
3206{
3207 int ret = -1;
3208 struct evlist *evlist = trace->evlist;
3209 struct evsel *sys_enter, *sys_exit;
3210
3211 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3212 if (sys_enter == NULL)
3213 goto out;
3214
3215 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3216 goto out_delete_sys_enter;
3217
3218 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3219 if (sys_exit == NULL)
3220 goto out_delete_sys_enter;
3221
3222 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3223 goto out_delete_sys_exit;
3224
3225 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3226 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3227
3228 evlist__add(evlist, sys_enter);
3229 evlist__add(evlist, sys_exit);
3230
3231 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3232 /*
3233 * We're interested only in the user space callchain
3234 * leading to the syscall, allow overriding that for
3235 * debugging reasons using --kernel_syscall_callchains
3236 */
3237 sys_exit->core.attr.exclude_callchain_kernel = 1;
3238 }
3239
3240 trace->syscalls.events.sys_enter = sys_enter;
3241 trace->syscalls.events.sys_exit = sys_exit;
3242
3243 ret = 0;
3244out:
3245 return ret;
3246
3247out_delete_sys_exit:
3248 evsel__delete_priv(sys_exit);
3249out_delete_sys_enter:
3250 evsel__delete_priv(sys_enter);
3251 goto out;
3252}
3253
3254static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3255{
3256 int err = -1;
3257 struct evsel *sys_exit;
3258 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3259 trace->ev_qualifier_ids.nr,
3260 trace->ev_qualifier_ids.entries);
3261
3262 if (filter == NULL)
3263 goto out_enomem;
3264
3265 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3266 sys_exit = trace->syscalls.events.sys_exit;
3267 err = evsel__append_tp_filter(sys_exit, filter);
3268 }
3269
3270 free(filter);
3271out:
3272 return err;
3273out_enomem:
3274 errno = ENOMEM;
3275 goto out;
3276}
3277
3278#ifdef HAVE_BPF_SKEL
3279static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3280{
3281 struct bpf_program *pos, *prog = NULL;
3282 const char *sec_name;
3283
3284 if (trace->skel->obj == NULL)
3285 return NULL;
3286
3287 bpf_object__for_each_program(pos, trace->skel->obj) {
3288 sec_name = bpf_program__section_name(pos);
3289 if (sec_name && !strcmp(sec_name, name)) {
3290 prog = pos;
3291 break;
3292 }
3293 }
3294
3295 return prog;
3296}
3297
3298static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3299 const char *prog_name, const char *type)
3300{
3301 struct bpf_program *prog;
3302
3303 if (prog_name == NULL) {
3304 char default_prog_name[256];
3305 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->name);
3306 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3307 if (prog != NULL)
3308 goto out_found;
3309 if (sc->fmt && sc->fmt->alias) {
3310 scnprintf(default_prog_name, sizeof(default_prog_name), "tp/syscalls/sys_%s_%s", type, sc->fmt->alias);
3311 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3312 if (prog != NULL)
3313 goto out_found;
3314 }
3315 goto out_unaugmented;
3316 }
3317
3318 prog = trace__find_bpf_program_by_title(trace, prog_name);
3319
3320 if (prog != NULL) {
3321out_found:
3322 return prog;
3323 }
3324
3325 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3326 prog_name, type, sc->name);
3327out_unaugmented:
3328 return trace->skel->progs.syscall_unaugmented;
3329}
3330
3331static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3332{
3333 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3334
3335 if (sc == NULL)
3336 return;
3337
3338 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3339 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit");
3340}
3341
3342static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3343{
3344 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3345 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3346}
3347
3348static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3349{
3350 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3351 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_unaugmented);
3352}
3353
3354static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3355{
3356 struct tep_format_field *field, *candidate_field;
3357 int id;
3358
3359 /*
3360 * We're only interested in syscalls that have a pointer:
3361 */
3362 for (field = sc->args; field; field = field->next) {
3363 if (field->flags & TEP_FIELD_IS_POINTER)
3364 goto try_to_find_pair;
3365 }
3366
3367 return NULL;
3368
3369try_to_find_pair:
3370 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3371 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3372 struct bpf_program *pair_prog;
3373 bool is_candidate = false;
3374
3375 if (pair == NULL || pair == sc ||
3376 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3377 continue;
3378
3379 for (field = sc->args, candidate_field = pair->args;
3380 field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3381 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3382 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3383
3384 if (is_pointer) {
3385 if (!candidate_is_pointer) {
3386 // The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3387 continue;
3388 }
3389 } else {
3390 if (candidate_is_pointer) {
3391 // The candidate might copy a pointer we don't have, skip it.
3392 goto next_candidate;
3393 }
3394 continue;
3395 }
3396
3397 if (strcmp(field->type, candidate_field->type))
3398 goto next_candidate;
3399
3400 /*
3401 * This is limited in the BPF program but sys_write
3402 * uses "const char *" for its "buf" arg so we need to
3403 * use some heuristic that is kinda future proof...
3404 */
3405 if (strcmp(field->type, "const char *") == 0 &&
3406 !(strstr(field->name, "name") ||
3407 strstr(field->name, "path") ||
3408 strstr(field->name, "file") ||
3409 strstr(field->name, "root") ||
3410 strstr(field->name, "description")))
3411 goto next_candidate;
3412
3413 is_candidate = true;
3414 }
3415
3416 if (!is_candidate)
3417 goto next_candidate;
3418
3419 /*
3420 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3421 * then it may be collecting that and we then can't use it, as it would collect
3422 * more than what is common to the two syscalls.
3423 */
3424 if (candidate_field) {
3425 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3426 if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3427 goto next_candidate;
3428 }
3429
3430 pair_prog = pair->bpf_prog.sys_enter;
3431 /*
3432 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3433 * have been searched for, so search it here and if it returns the
3434 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3435 * program for a filtered syscall on a non-filtered one.
3436 *
3437 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3438 * useful for "renameat2".
3439 */
3440 if (pair_prog == NULL) {
3441 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3442 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3443 goto next_candidate;
3444 }
3445
3446 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3447 return pair_prog;
3448 next_candidate:
3449 continue;
3450 }
3451
3452 return NULL;
3453}
3454
3455static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3456{
3457 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3458 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3459 int err = 0, key;
3460
3461 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3462 int prog_fd;
3463
3464 if (!trace__syscall_enabled(trace, key))
3465 continue;
3466
3467 trace__init_syscall_bpf_progs(trace, key);
3468
3469 // It'll get at least the "!raw_syscalls:unaugmented"
3470 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3471 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3472 if (err)
3473 break;
3474 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3475 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3476 if (err)
3477 break;
3478 }
3479
3480 /*
3481 * Now lets do a second pass looking for enabled syscalls without
3482 * an augmenter that have a signature that is a superset of another
3483 * syscall with an augmenter so that we can auto-reuse it.
3484 *
3485 * I.e. if we have an augmenter for the "open" syscall that has
3486 * this signature:
3487 *
3488 * int open(const char *pathname, int flags, mode_t mode);
3489 *
3490 * I.e. that will collect just the first string argument, then we
3491 * can reuse it for the 'creat' syscall, that has this signature:
3492 *
3493 * int creat(const char *pathname, mode_t mode);
3494 *
3495 * and for:
3496 *
3497 * int stat(const char *pathname, struct stat *statbuf);
3498 * int lstat(const char *pathname, struct stat *statbuf);
3499 *
3500 * Because the 'open' augmenter will collect the first arg as a string,
3501 * and leave alone all the other args, which already helps with
3502 * beautifying 'stat' and 'lstat''s pathname arg.
3503 *
3504 * Then, in time, when 'stat' gets an augmenter that collects both
3505 * first and second arg (this one on the raw_syscalls:sys_exit prog
3506 * array tail call, then that one will be used.
3507 */
3508 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3509 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3510 struct bpf_program *pair_prog;
3511 int prog_fd;
3512
3513 if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3514 continue;
3515
3516 /*
3517 * For now we're just reusing the sys_enter prog, and if it
3518 * already has an augmenter, we don't need to find one.
3519 */
3520 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
3521 continue;
3522
3523 /*
3524 * Look at all the other syscalls for one that has a signature
3525 * that is close enough that we can share:
3526 */
3527 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3528 if (pair_prog == NULL)
3529 continue;
3530
3531 sc->bpf_prog.sys_enter = pair_prog;
3532
3533 /*
3534 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3535 * with the fd for the program we're reusing:
3536 */
3537 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3538 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3539 if (err)
3540 break;
3541 }
3542
3543 return err;
3544}
3545#endif // HAVE_BPF_SKEL
3546
3547static int trace__set_ev_qualifier_filter(struct trace *trace)
3548{
3549 if (trace->syscalls.events.sys_enter)
3550 return trace__set_ev_qualifier_tp_filter(trace);
3551 return 0;
3552}
3553
3554static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3555 size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3556{
3557 int err = 0;
3558#ifdef HAVE_LIBBPF_SUPPORT
3559 bool value = true;
3560 int map_fd = bpf_map__fd(map);
3561 size_t i;
3562
3563 for (i = 0; i < npids; ++i) {
3564 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3565 if (err)
3566 break;
3567 }
3568#endif
3569 return err;
3570}
3571
3572static int trace__set_filter_loop_pids(struct trace *trace)
3573{
3574 unsigned int nr = 1, err;
3575 pid_t pids[32] = {
3576 getpid(),
3577 };
3578 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3579
3580 while (thread && nr < ARRAY_SIZE(pids)) {
3581 struct thread *parent = machine__find_thread(trace->host,
3582 thread__ppid(thread),
3583 thread__ppid(thread));
3584
3585 if (parent == NULL)
3586 break;
3587
3588 if (!strcmp(thread__comm_str(parent), "sshd") ||
3589 strstarts(thread__comm_str(parent), "gnome-terminal")) {
3590 pids[nr++] = thread__tid(parent);
3591 break;
3592 }
3593 thread = parent;
3594 }
3595
3596 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3597 if (!err && trace->filter_pids.map)
3598 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3599
3600 return err;
3601}
3602
3603static int trace__set_filter_pids(struct trace *trace)
3604{
3605 int err = 0;
3606 /*
3607 * Better not use !target__has_task() here because we need to cover the
3608 * case where no threads were specified in the command line, but a
3609 * workload was, and in that case we will fill in the thread_map when
3610 * we fork the workload in evlist__prepare_workload.
3611 */
3612 if (trace->filter_pids.nr > 0) {
3613 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3614 trace->filter_pids.entries);
3615 if (!err && trace->filter_pids.map) {
3616 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3617 trace->filter_pids.entries);
3618 }
3619 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3620 err = trace__set_filter_loop_pids(trace);
3621 }
3622
3623 return err;
3624}
3625
3626static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3627{
3628 struct evlist *evlist = trace->evlist;
3629 struct perf_sample sample;
3630 int err = evlist__parse_sample(evlist, event, &sample);
3631
3632 if (err)
3633 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3634 else
3635 trace__handle_event(trace, event, &sample);
3636
3637 return 0;
3638}
3639
3640static int __trace__flush_events(struct trace *trace)
3641{
3642 u64 first = ordered_events__first_time(&trace->oe.data);
3643 u64 flush = trace->oe.last - NSEC_PER_SEC;
3644
3645 /* Is there some thing to flush.. */
3646 if (first && first < flush)
3647 return ordered_events__flush_time(&trace->oe.data, flush);
3648
3649 return 0;
3650}
3651
3652static int trace__flush_events(struct trace *trace)
3653{
3654 return !trace->sort_events ? 0 : __trace__flush_events(trace);
3655}
3656
3657static int trace__deliver_event(struct trace *trace, union perf_event *event)
3658{
3659 int err;
3660
3661 if (!trace->sort_events)
3662 return __trace__deliver_event(trace, event);
3663
3664 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3665 if (err && err != -1)
3666 return err;
3667
3668 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3669 if (err)
3670 return err;
3671
3672 return trace__flush_events(trace);
3673}
3674
3675static int ordered_events__deliver_event(struct ordered_events *oe,
3676 struct ordered_event *event)
3677{
3678 struct trace *trace = container_of(oe, struct trace, oe.data);
3679
3680 return __trace__deliver_event(trace, event->event);
3681}
3682
3683static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3684{
3685 struct tep_format_field *field;
3686 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3687
3688 if (evsel->tp_format == NULL || fmt == NULL)
3689 return NULL;
3690
3691 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3692 if (strcmp(field->name, arg) == 0)
3693 return fmt;
3694
3695 return NULL;
3696}
3697
3698static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3699{
3700 char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3701
3702 while ((tok = strpbrk(left, "=<>!")) != NULL) {
3703 char *right = tok + 1, *right_end;
3704
3705 if (*right == '=')
3706 ++right;
3707
3708 while (isspace(*right))
3709 ++right;
3710
3711 if (*right == '\0')
3712 break;
3713
3714 while (!isalpha(*left))
3715 if (++left == tok) {
3716 /*
3717 * Bail out, can't find the name of the argument that is being
3718 * used in the filter, let it try to set this filter, will fail later.
3719 */
3720 return 0;
3721 }
3722
3723 right_end = right + 1;
3724 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3725 ++right_end;
3726
3727 if (isalpha(*right)) {
3728 struct syscall_arg_fmt *fmt;
3729 int left_size = tok - left,
3730 right_size = right_end - right;
3731 char arg[128];
3732
3733 while (isspace(left[left_size - 1]))
3734 --left_size;
3735
3736 scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3737
3738 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3739 if (fmt == NULL) {
3740 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3741 arg, evsel->name, evsel->filter);
3742 return -1;
3743 }
3744
3745 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3746 arg, (int)(right - tok), tok, right_size, right);
3747
3748 if (fmt->strtoul) {
3749 u64 val;
3750 struct syscall_arg syscall_arg = {
3751 .parm = fmt->parm,
3752 };
3753
3754 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3755 char *n, expansion[19];
3756 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3757 int expansion_offset = right - new_filter;
3758
3759 pr_debug("%s", expansion);
3760
3761 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3762 pr_debug(" out of memory!\n");
3763 free(new_filter);
3764 return -1;
3765 }
3766 if (new_filter != evsel->filter)
3767 free(new_filter);
3768 left = n + expansion_offset + expansion_lenght;
3769 new_filter = n;
3770 } else {
3771 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3772 right_size, right, arg, evsel->name, evsel->filter);
3773 return -1;
3774 }
3775 } else {
3776 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3777 arg, evsel->name, evsel->filter);
3778 return -1;
3779 }
3780
3781 pr_debug("\n");
3782 } else {
3783 left = right_end;
3784 }
3785 }
3786
3787 if (new_filter != evsel->filter) {
3788 pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3789 evsel__set_filter(evsel, new_filter);
3790 free(new_filter);
3791 }
3792
3793 return 0;
3794}
3795
3796static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3797{
3798 struct evlist *evlist = trace->evlist;
3799 struct evsel *evsel;
3800
3801 evlist__for_each_entry(evlist, evsel) {
3802 if (evsel->filter == NULL)
3803 continue;
3804
3805 if (trace__expand_filter(trace, evsel)) {
3806 *err_evsel = evsel;
3807 return -1;
3808 }
3809 }
3810
3811 return 0;
3812}
3813
3814static int trace__run(struct trace *trace, int argc, const char **argv)
3815{
3816 struct evlist *evlist = trace->evlist;
3817 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3818 int err = -1, i;
3819 unsigned long before;
3820 const bool forks = argc > 0;
3821 bool draining = false;
3822
3823 trace->live = true;
3824
3825 if (!trace->raw_augmented_syscalls) {
3826 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3827 goto out_error_raw_syscalls;
3828
3829 if (trace->trace_syscalls)
3830 trace->vfs_getname = evlist__add_vfs_getname(evlist);
3831 }
3832
3833 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3834 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3835 if (pgfault_maj == NULL)
3836 goto out_error_mem;
3837 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3838 evlist__add(evlist, pgfault_maj);
3839 }
3840
3841 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3842 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3843 if (pgfault_min == NULL)
3844 goto out_error_mem;
3845 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3846 evlist__add(evlist, pgfault_min);
3847 }
3848
3849 /* Enable ignoring missing threads when -u/-p option is defined. */
3850 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3851
3852 if (trace->sched &&
3853 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
3854 goto out_error_sched_stat_runtime;
3855 /*
3856 * If a global cgroup was set, apply it to all the events without an
3857 * explicit cgroup. I.e.:
3858 *
3859 * trace -G A -e sched:*switch
3860 *
3861 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3862 * _and_ sched:sched_switch to the 'A' cgroup, while:
3863 *
3864 * trace -e sched:*switch -G A
3865 *
3866 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3867 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3868 * a cgroup (on the root cgroup, sys wide, etc).
3869 *
3870 * Multiple cgroups:
3871 *
3872 * trace -G A -e sched:*switch -G B
3873 *
3874 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3875 * to the 'B' cgroup.
3876 *
3877 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3878 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3879 */
3880 if (trace->cgroup)
3881 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3882
3883 err = evlist__create_maps(evlist, &trace->opts.target);
3884 if (err < 0) {
3885 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3886 goto out_delete_evlist;
3887 }
3888
3889 err = trace__symbols_init(trace, evlist);
3890 if (err < 0) {
3891 fprintf(trace->output, "Problems initializing symbol libraries!\n");
3892 goto out_delete_evlist;
3893 }
3894
3895 evlist__config(evlist, &trace->opts, &callchain_param);
3896
3897 if (forks) {
3898 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
3899 if (err < 0) {
3900 fprintf(trace->output, "Couldn't run the workload!\n");
3901 goto out_delete_evlist;
3902 }
3903 workload_pid = evlist->workload.pid;
3904 }
3905
3906 err = evlist__open(evlist);
3907 if (err < 0)
3908 goto out_error_open;
3909#ifdef HAVE_BPF_SKEL
3910 if (trace->syscalls.events.bpf_output) {
3911 struct perf_cpu cpu;
3912
3913 /*
3914 * Set up the __augmented_syscalls__ BPF map to hold for each
3915 * CPU the bpf-output event's file descriptor.
3916 */
3917 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
3918 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
3919 &cpu.cpu, sizeof(int),
3920 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
3921 cpu.cpu, 0),
3922 sizeof(__u32), BPF_ANY);
3923 }
3924 }
3925#endif
3926 err = trace__set_filter_pids(trace);
3927 if (err < 0)
3928 goto out_error_mem;
3929
3930#ifdef HAVE_BPF_SKEL
3931 if (trace->skel && trace->skel->progs.sys_enter)
3932 trace__init_syscalls_bpf_prog_array_maps(trace);
3933#endif
3934
3935 if (trace->ev_qualifier_ids.nr > 0) {
3936 err = trace__set_ev_qualifier_filter(trace);
3937 if (err < 0)
3938 goto out_errno;
3939
3940 if (trace->syscalls.events.sys_exit) {
3941 pr_debug("event qualifier tracepoint filter: %s\n",
3942 trace->syscalls.events.sys_exit->filter);
3943 }
3944 }
3945
3946 /*
3947 * If the "close" syscall is not traced, then we will not have the
3948 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3949 * fd->pathname table and were ending up showing the last value set by
3950 * syscalls opening a pathname and associating it with a descriptor or
3951 * reading it from /proc/pid/fd/ in cases where that doesn't make
3952 * sense.
3953 *
3954 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3955 * not in use.
3956 */
3957 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3958
3959 err = trace__expand_filters(trace, &evsel);
3960 if (err)
3961 goto out_delete_evlist;
3962 err = evlist__apply_filters(evlist, &evsel);
3963 if (err < 0)
3964 goto out_error_apply_filters;
3965
3966 err = evlist__mmap(evlist, trace->opts.mmap_pages);
3967 if (err < 0)
3968 goto out_error_mmap;
3969
3970 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
3971 evlist__enable(evlist);
3972
3973 if (forks)
3974 evlist__start_workload(evlist);
3975
3976 if (trace->opts.target.initial_delay) {
3977 usleep(trace->opts.target.initial_delay * 1000);
3978 evlist__enable(evlist);
3979 }
3980
3981 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
3982 perf_thread_map__nr(evlist->core.threads) > 1 ||
3983 evlist__first(evlist)->core.attr.inherit;
3984
3985 /*
3986 * Now that we already used evsel->core.attr to ask the kernel to setup the
3987 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
3988 * trace__resolve_callchain(), allowing per-event max-stack settings
3989 * to override an explicitly set --max-stack global setting.
3990 */
3991 evlist__for_each_entry(evlist, evsel) {
3992 if (evsel__has_callchain(evsel) &&
3993 evsel->core.attr.sample_max_stack == 0)
3994 evsel->core.attr.sample_max_stack = trace->max_stack;
3995 }
3996again:
3997 before = trace->nr_events;
3998
3999 for (i = 0; i < evlist->core.nr_mmaps; i++) {
4000 union perf_event *event;
4001 struct mmap *md;
4002
4003 md = &evlist->mmap[i];
4004 if (perf_mmap__read_init(&md->core) < 0)
4005 continue;
4006
4007 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4008 ++trace->nr_events;
4009
4010 err = trace__deliver_event(trace, event);
4011 if (err)
4012 goto out_disable;
4013
4014 perf_mmap__consume(&md->core);
4015
4016 if (interrupted)
4017 goto out_disable;
4018
4019 if (done && !draining) {
4020 evlist__disable(evlist);
4021 draining = true;
4022 }
4023 }
4024 perf_mmap__read_done(&md->core);
4025 }
4026
4027 if (trace->nr_events == before) {
4028 int timeout = done ? 100 : -1;
4029
4030 if (!draining && evlist__poll(evlist, timeout) > 0) {
4031 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4032 draining = true;
4033
4034 goto again;
4035 } else {
4036 if (trace__flush_events(trace))
4037 goto out_disable;
4038 }
4039 } else {
4040 goto again;
4041 }
4042
4043out_disable:
4044 thread__zput(trace->current);
4045
4046 evlist__disable(evlist);
4047
4048 if (trace->sort_events)
4049 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4050
4051 if (!err) {
4052 if (trace->summary)
4053 trace__fprintf_thread_summary(trace, trace->output);
4054
4055 if (trace->show_tool_stats) {
4056 fprintf(trace->output, "Stats:\n "
4057 " vfs_getname : %" PRIu64 "\n"
4058 " proc_getname: %" PRIu64 "\n",
4059 trace->stats.vfs_getname,
4060 trace->stats.proc_getname);
4061 }
4062 }
4063
4064out_delete_evlist:
4065 trace__symbols__exit(trace);
4066 evlist__free_syscall_tp_fields(evlist);
4067 evlist__delete(evlist);
4068 cgroup__put(trace->cgroup);
4069 trace->evlist = NULL;
4070 trace->live = false;
4071 return err;
4072{
4073 char errbuf[BUFSIZ];
4074
4075out_error_sched_stat_runtime:
4076 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4077 goto out_error;
4078
4079out_error_raw_syscalls:
4080 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4081 goto out_error;
4082
4083out_error_mmap:
4084 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4085 goto out_error;
4086
4087out_error_open:
4088 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4089
4090out_error:
4091 fprintf(trace->output, "%s\n", errbuf);
4092 goto out_delete_evlist;
4093
4094out_error_apply_filters:
4095 fprintf(trace->output,
4096 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
4097 evsel->filter, evsel__name(evsel), errno,
4098 str_error_r(errno, errbuf, sizeof(errbuf)));
4099 goto out_delete_evlist;
4100}
4101out_error_mem:
4102 fprintf(trace->output, "Not enough memory to run!\n");
4103 goto out_delete_evlist;
4104
4105out_errno:
4106 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4107 goto out_delete_evlist;
4108}
4109
4110static int trace__replay(struct trace *trace)
4111{
4112 const struct evsel_str_handler handlers[] = {
4113 { "probe:vfs_getname", trace__vfs_getname, },
4114 };
4115 struct perf_data data = {
4116 .path = input_name,
4117 .mode = PERF_DATA_MODE_READ,
4118 .force = trace->force,
4119 };
4120 struct perf_session *session;
4121 struct evsel *evsel;
4122 int err = -1;
4123
4124 trace->tool.sample = trace__process_sample;
4125 trace->tool.mmap = perf_event__process_mmap;
4126 trace->tool.mmap2 = perf_event__process_mmap2;
4127 trace->tool.comm = perf_event__process_comm;
4128 trace->tool.exit = perf_event__process_exit;
4129 trace->tool.fork = perf_event__process_fork;
4130 trace->tool.attr = perf_event__process_attr;
4131 trace->tool.tracing_data = perf_event__process_tracing_data;
4132 trace->tool.build_id = perf_event__process_build_id;
4133 trace->tool.namespaces = perf_event__process_namespaces;
4134
4135 trace->tool.ordered_events = true;
4136 trace->tool.ordering_requires_timestamps = true;
4137
4138 /* add tid to output */
4139 trace->multiple_threads = true;
4140
4141 session = perf_session__new(&data, &trace->tool);
4142 if (IS_ERR(session))
4143 return PTR_ERR(session);
4144
4145 if (trace->opts.target.pid)
4146 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4147
4148 if (trace->opts.target.tid)
4149 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4150
4151 if (symbol__init(&session->header.env) < 0)
4152 goto out;
4153
4154 trace->host = &session->machines.host;
4155
4156 err = perf_session__set_tracepoints_handlers(session, handlers);
4157 if (err)
4158 goto out;
4159
4160 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4161 trace->syscalls.events.sys_enter = evsel;
4162 /* older kernels have syscalls tp versus raw_syscalls */
4163 if (evsel == NULL)
4164 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
4165
4166 if (evsel &&
4167 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4168 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4169 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4170 goto out;
4171 }
4172
4173 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4174 trace->syscalls.events.sys_exit = evsel;
4175 if (evsel == NULL)
4176 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
4177 if (evsel &&
4178 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4179 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4180 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4181 goto out;
4182 }
4183
4184 evlist__for_each_entry(session->evlist, evsel) {
4185 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4186 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4187 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4188 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4189 evsel->handler = trace__pgfault;
4190 }
4191
4192 setup_pager();
4193
4194 err = perf_session__process_events(session);
4195 if (err)
4196 pr_err("Failed to process events, error %d", err);
4197
4198 else if (trace->summary)
4199 trace__fprintf_thread_summary(trace, trace->output);
4200
4201out:
4202 perf_session__delete(session);
4203
4204 return err;
4205}
4206
4207static size_t trace__fprintf_threads_header(FILE *fp)
4208{
4209 size_t printed;
4210
4211 printed = fprintf(fp, "\n Summary of events:\n\n");
4212
4213 return printed;
4214}
4215
4216DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4217 struct syscall_stats *stats;
4218 double msecs;
4219 int syscall;
4220)
4221{
4222 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4223 struct syscall_stats *stats = source->priv;
4224
4225 entry->syscall = source->i;
4226 entry->stats = stats;
4227 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4228}
4229
4230static size_t thread__dump_stats(struct thread_trace *ttrace,
4231 struct trace *trace, FILE *fp)
4232{
4233 size_t printed = 0;
4234 struct syscall *sc;
4235 struct rb_node *nd;
4236 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4237
4238 if (syscall_stats == NULL)
4239 return 0;
4240
4241 printed += fprintf(fp, "\n");
4242
4243 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n");
4244 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
4245 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n");
4246
4247 resort_rb__for_each_entry(nd, syscall_stats) {
4248 struct syscall_stats *stats = syscall_stats_entry->stats;
4249 if (stats) {
4250 double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4251 double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4252 double avg = avg_stats(&stats->stats);
4253 double pct;
4254 u64 n = (u64)stats->stats.n;
4255
4256 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4257 avg /= NSEC_PER_MSEC;
4258
4259 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4260 printed += fprintf(fp, " %-15s", sc->name);
4261 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4262 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4263 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4264
4265 if (trace->errno_summary && stats->nr_failures) {
4266 int e;
4267
4268 for (e = 0; e < stats->max_errno; ++e) {
4269 if (stats->errnos[e] != 0)
4270 fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
4271 }
4272 }
4273 }
4274 }
4275
4276 resort_rb__delete(syscall_stats);
4277 printed += fprintf(fp, "\n\n");
4278
4279 return printed;
4280}
4281
4282static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4283{
4284 size_t printed = 0;
4285 struct thread_trace *ttrace = thread__priv(thread);
4286 double ratio;
4287
4288 if (ttrace == NULL)
4289 return 0;
4290
4291 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4292
4293 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread));
4294 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4295 printed += fprintf(fp, "%.1f%%", ratio);
4296 if (ttrace->pfmaj)
4297 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4298 if (ttrace->pfmin)
4299 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4300 if (trace->sched)
4301 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4302 else if (fputc('\n', fp) != EOF)
4303 ++printed;
4304
4305 printed += thread__dump_stats(ttrace, trace, fp);
4306
4307 return printed;
4308}
4309
4310static unsigned long thread__nr_events(struct thread_trace *ttrace)
4311{
4312 return ttrace ? ttrace->nr_events : 0;
4313}
4314
4315DEFINE_RESORT_RB(threads,
4316 (thread__nr_events(thread__priv(a->thread)) <
4317 thread__nr_events(thread__priv(b->thread))),
4318 struct thread *thread;
4319)
4320{
4321 entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
4322}
4323
4324static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4325{
4326 size_t printed = trace__fprintf_threads_header(fp);
4327 struct rb_node *nd;
4328 int i;
4329
4330 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4331 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4332
4333 if (threads == NULL) {
4334 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4335 return 0;
4336 }
4337
4338 resort_rb__for_each_entry(nd, threads)
4339 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4340
4341 resort_rb__delete(threads);
4342 }
4343 return printed;
4344}
4345
4346static int trace__set_duration(const struct option *opt, const char *str,
4347 int unset __maybe_unused)
4348{
4349 struct trace *trace = opt->value;
4350
4351 trace->duration_filter = atof(str);
4352 return 0;
4353}
4354
4355static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4356 int unset __maybe_unused)
4357{
4358 int ret = -1;
4359 size_t i;
4360 struct trace *trace = opt->value;
4361 /*
4362 * FIXME: introduce a intarray class, plain parse csv and create a
4363 * { int nr, int entries[] } struct...
4364 */
4365 struct intlist *list = intlist__new(str);
4366
4367 if (list == NULL)
4368 return -1;
4369
4370 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4371 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4372
4373 if (trace->filter_pids.entries == NULL)
4374 goto out;
4375
4376 trace->filter_pids.entries[0] = getpid();
4377
4378 for (i = 1; i < trace->filter_pids.nr; ++i)
4379 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4380
4381 intlist__delete(list);
4382 ret = 0;
4383out:
4384 return ret;
4385}
4386
4387static int trace__open_output(struct trace *trace, const char *filename)
4388{
4389 struct stat st;
4390
4391 if (!stat(filename, &st) && st.st_size) {
4392 char oldname[PATH_MAX];
4393
4394 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4395 unlink(oldname);
4396 rename(filename, oldname);
4397 }
4398
4399 trace->output = fopen(filename, "w");
4400
4401 return trace->output == NULL ? -errno : 0;
4402}
4403
4404static int parse_pagefaults(const struct option *opt, const char *str,
4405 int unset __maybe_unused)
4406{
4407 int *trace_pgfaults = opt->value;
4408
4409 if (strcmp(str, "all") == 0)
4410 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4411 else if (strcmp(str, "maj") == 0)
4412 *trace_pgfaults |= TRACE_PFMAJ;
4413 else if (strcmp(str, "min") == 0)
4414 *trace_pgfaults |= TRACE_PFMIN;
4415 else
4416 return -1;
4417
4418 return 0;
4419}
4420
4421static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4422{
4423 struct evsel *evsel;
4424
4425 evlist__for_each_entry(evlist, evsel) {
4426 if (evsel->handler == NULL)
4427 evsel->handler = handler;
4428 }
4429}
4430
4431static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4432{
4433 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4434
4435 if (fmt) {
4436 const struct syscall_fmt *scfmt = syscall_fmt__find(name);
4437
4438 if (scfmt) {
4439 int skip = 0;
4440
4441 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4442 strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4443 ++skip;
4444
4445 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4446 }
4447 }
4448}
4449
4450static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4451{
4452 struct evsel *evsel;
4453
4454 evlist__for_each_entry(evlist, evsel) {
4455 if (evsel->priv || !evsel->tp_format)
4456 continue;
4457
4458 if (strcmp(evsel->tp_format->system, "syscalls")) {
4459 evsel__init_tp_arg_scnprintf(evsel);
4460 continue;
4461 }
4462
4463 if (evsel__init_syscall_tp(evsel))
4464 return -1;
4465
4466 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4467 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4468
4469 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4470 return -1;
4471
4472 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4473 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4474 struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4475
4476 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4477 return -1;
4478
4479 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4480 }
4481 }
4482
4483 return 0;
4484}
4485
4486/*
4487 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4488 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4489 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4490 *
4491 * It'd be better to introduce a parse_options() variant that would return a
4492 * list with the terms it didn't match to an event...
4493 */
4494static int trace__parse_events_option(const struct option *opt, const char *str,
4495 int unset __maybe_unused)
4496{
4497 struct trace *trace = (struct trace *)opt->value;
4498 const char *s = str;
4499 char *sep = NULL, *lists[2] = { NULL, NULL, };
4500 int len = strlen(str) + 1, err = -1, list, idx;
4501 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4502 char group_name[PATH_MAX];
4503 const struct syscall_fmt *fmt;
4504
4505 if (strace_groups_dir == NULL)
4506 return -1;
4507
4508 if (*s == '!') {
4509 ++s;
4510 trace->not_ev_qualifier = true;
4511 }
4512
4513 while (1) {
4514 if ((sep = strchr(s, ',')) != NULL)
4515 *sep = '\0';
4516
4517 list = 0;
4518 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4519 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4520 list = 1;
4521 goto do_concat;
4522 }
4523
4524 fmt = syscall_fmt__find_by_alias(s);
4525 if (fmt != NULL) {
4526 list = 1;
4527 s = fmt->name;
4528 } else {
4529 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4530 if (access(group_name, R_OK) == 0)
4531 list = 1;
4532 }
4533do_concat:
4534 if (lists[list]) {
4535 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4536 } else {
4537 lists[list] = malloc(len);
4538 if (lists[list] == NULL)
4539 goto out;
4540 strcpy(lists[list], s);
4541 }
4542
4543 if (!sep)
4544 break;
4545
4546 *sep = ',';
4547 s = sep + 1;
4548 }
4549
4550 if (lists[1] != NULL) {
4551 struct strlist_config slist_config = {
4552 .dirname = strace_groups_dir,
4553 };
4554
4555 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4556 if (trace->ev_qualifier == NULL) {
4557 fputs("Not enough memory to parse event qualifier", trace->output);
4558 goto out;
4559 }
4560
4561 if (trace__validate_ev_qualifier(trace))
4562 goto out;
4563 trace->trace_syscalls = true;
4564 }
4565
4566 err = 0;
4567
4568 if (lists[0]) {
4569 struct parse_events_option_args parse_events_option_args = {
4570 .evlistp = &trace->evlist,
4571 };
4572 struct option o = {
4573 .value = &parse_events_option_args,
4574 };
4575 err = parse_events_option(&o, lists[0], 0);
4576 }
4577out:
4578 free(strace_groups_dir);
4579 free(lists[0]);
4580 free(lists[1]);
4581 if (sep)
4582 *sep = ',';
4583
4584 return err;
4585}
4586
4587static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4588{
4589 struct trace *trace = opt->value;
4590
4591 if (!list_empty(&trace->evlist->core.entries)) {
4592 struct option o = {
4593 .value = &trace->evlist,
4594 };
4595 return parse_cgroups(&o, str, unset);
4596 }
4597 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4598
4599 return 0;
4600}
4601
4602static int trace__config(const char *var, const char *value, void *arg)
4603{
4604 struct trace *trace = arg;
4605 int err = 0;
4606
4607 if (!strcmp(var, "trace.add_events")) {
4608 trace->perfconfig_events = strdup(value);
4609 if (trace->perfconfig_events == NULL) {
4610 pr_err("Not enough memory for %s\n", "trace.add_events");
4611 return -1;
4612 }
4613 } else if (!strcmp(var, "trace.show_timestamp")) {
4614 trace->show_tstamp = perf_config_bool(var, value);
4615 } else if (!strcmp(var, "trace.show_duration")) {
4616 trace->show_duration = perf_config_bool(var, value);
4617 } else if (!strcmp(var, "trace.show_arg_names")) {
4618 trace->show_arg_names = perf_config_bool(var, value);
4619 if (!trace->show_arg_names)
4620 trace->show_zeros = true;
4621 } else if (!strcmp(var, "trace.show_zeros")) {
4622 bool new_show_zeros = perf_config_bool(var, value);
4623 if (!trace->show_arg_names && !new_show_zeros) {
4624 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4625 goto out;
4626 }
4627 trace->show_zeros = new_show_zeros;
4628 } else if (!strcmp(var, "trace.show_prefix")) {
4629 trace->show_string_prefix = perf_config_bool(var, value);
4630 } else if (!strcmp(var, "trace.no_inherit")) {
4631 trace->opts.no_inherit = perf_config_bool(var, value);
4632 } else if (!strcmp(var, "trace.args_alignment")) {
4633 int args_alignment = 0;
4634 if (perf_config_int(&args_alignment, var, value) == 0)
4635 trace->args_alignment = args_alignment;
4636 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4637 if (strcasecmp(value, "libtraceevent") == 0)
4638 trace->libtraceevent_print = true;
4639 else if (strcasecmp(value, "libbeauty") == 0)
4640 trace->libtraceevent_print = false;
4641 }
4642out:
4643 return err;
4644}
4645
4646static void trace__exit(struct trace *trace)
4647{
4648 int i;
4649
4650 strlist__delete(trace->ev_qualifier);
4651 zfree(&trace->ev_qualifier_ids.entries);
4652 if (trace->syscalls.table) {
4653 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4654 syscall__exit(&trace->syscalls.table[i]);
4655 zfree(&trace->syscalls.table);
4656 }
4657 syscalltbl__delete(trace->sctbl);
4658 zfree(&trace->perfconfig_events);
4659}
4660
4661#ifdef HAVE_BPF_SKEL
4662static int bpf__setup_bpf_output(struct evlist *evlist)
4663{
4664 int err = parse_event(evlist, "bpf-output/no-inherit=1,name=__augmented_syscalls__/");
4665
4666 if (err)
4667 pr_debug("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n");
4668
4669 return err;
4670}
4671#endif
4672
4673int cmd_trace(int argc, const char **argv)
4674{
4675 const char *trace_usage[] = {
4676 "perf trace [<options>] [<command>]",
4677 "perf trace [<options>] -- <command> [<options>]",
4678 "perf trace record [<options>] [<command>]",
4679 "perf trace record [<options>] -- <command> [<options>]",
4680 NULL
4681 };
4682 struct trace trace = {
4683 .opts = {
4684 .target = {
4685 .uid = UINT_MAX,
4686 .uses_mmap = true,
4687 },
4688 .user_freq = UINT_MAX,
4689 .user_interval = ULLONG_MAX,
4690 .no_buffering = true,
4691 .mmap_pages = UINT_MAX,
4692 },
4693 .output = stderr,
4694 .show_comm = true,
4695 .show_tstamp = true,
4696 .show_duration = true,
4697 .show_arg_names = true,
4698 .args_alignment = 70,
4699 .trace_syscalls = false,
4700 .kernel_syscallchains = false,
4701 .max_stack = UINT_MAX,
4702 .max_events = ULONG_MAX,
4703 };
4704 const char *output_name = NULL;
4705 const struct option trace_options[] = {
4706 OPT_CALLBACK('e', "event", &trace, "event",
4707 "event/syscall selector. use 'perf list' to list available events",
4708 trace__parse_events_option),
4709 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4710 "event filter", parse_filter),
4711 OPT_BOOLEAN(0, "comm", &trace.show_comm,
4712 "show the thread COMM next to its id"),
4713 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4714 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4715 trace__parse_events_option),
4716 OPT_STRING('o', "output", &output_name, "file", "output file name"),
4717 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4718 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4719 "trace events on existing process id"),
4720 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4721 "trace events on existing thread id"),
4722 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4723 "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4724 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4725 "system-wide collection from all CPUs"),
4726 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4727 "list of cpus to monitor"),
4728 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4729 "child tasks do not inherit counters"),
4730 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4731 "number of mmap data pages", evlist__parse_mmap_pages),
4732 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4733 "user to profile"),
4734 OPT_CALLBACK(0, "duration", &trace, "float",
4735 "show only events with duration > N.M ms",
4736 trace__set_duration),
4737 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4738 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4739 OPT_BOOLEAN('T', "time", &trace.full_time,
4740 "Show full timestamp, not time relative to first start"),
4741 OPT_BOOLEAN(0, "failure", &trace.failure_only,
4742 "Show only syscalls that failed"),
4743 OPT_BOOLEAN('s', "summary", &trace.summary_only,
4744 "Show only syscall summary with statistics"),
4745 OPT_BOOLEAN('S', "with-summary", &trace.summary,
4746 "Show all syscalls and summary with statistics"),
4747 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4748 "Show errno stats per syscall, use with -s or -S"),
4749 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4750 "Trace pagefaults", parse_pagefaults, "maj"),
4751 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4752 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4753 OPT_CALLBACK(0, "call-graph", &trace.opts,
4754 "record_mode[,record_size]", record_callchain_help,
4755 &record_parse_callchain_opt),
4756 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4757 "Use libtraceevent to print the tracepoint arguments."),
4758 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4759 "Show the kernel callchains on the syscall exit path"),
4760 OPT_ULONG(0, "max-events", &trace.max_events,
4761 "Set the maximum number of events to print, exit after that is reached. "),
4762 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4763 "Set the minimum stack depth when parsing the callchain, "
4764 "anything below the specified depth will be ignored."),
4765 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4766 "Set the maximum stack depth when parsing the callchain, "
4767 "anything beyond the specified depth will be ignored. "
4768 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4769 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4770 "Sort batch of events before processing, use if getting out of order events"),
4771 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4772 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4773 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4774 "per thread proc mmap processing timeout in ms"),
4775 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4776 trace__parse_cgroups),
4777 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
4778 "ms to wait before starting measurement after program "
4779 "start"),
4780 OPTS_EVSWITCH(&trace.evswitch),
4781 OPT_END()
4782 };
4783 bool __maybe_unused max_stack_user_set = true;
4784 bool mmap_pages_user_set = true;
4785 struct evsel *evsel;
4786 const char * const trace_subcommands[] = { "record", NULL };
4787 int err = -1;
4788 char bf[BUFSIZ];
4789 struct sigaction sigchld_act;
4790
4791 signal(SIGSEGV, sighandler_dump_stack);
4792 signal(SIGFPE, sighandler_dump_stack);
4793 signal(SIGINT, sighandler_interrupt);
4794
4795 memset(&sigchld_act, 0, sizeof(sigchld_act));
4796 sigchld_act.sa_flags = SA_SIGINFO;
4797 sigchld_act.sa_sigaction = sighandler_chld;
4798 sigaction(SIGCHLD, &sigchld_act, NULL);
4799
4800 trace.evlist = evlist__new();
4801 trace.sctbl = syscalltbl__new();
4802
4803 if (trace.evlist == NULL || trace.sctbl == NULL) {
4804 pr_err("Not enough memory to run!\n");
4805 err = -ENOMEM;
4806 goto out;
4807 }
4808
4809 /*
4810 * Parsing .perfconfig may entail creating a BPF event, that may need
4811 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4812 * is too small. This affects just this process, not touching the
4813 * global setting. If it fails we'll get something in 'perf trace -v'
4814 * to help diagnose the problem.
4815 */
4816 rlimit__bump_memlock();
4817
4818 err = perf_config(trace__config, &trace);
4819 if (err)
4820 goto out;
4821
4822 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4823 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4824
4825 /*
4826 * Here we already passed thru trace__parse_events_option() and it has
4827 * already figured out if -e syscall_name, if not but if --event
4828 * foo:bar was used, the user is interested _just_ in those, say,
4829 * tracepoint events, not in the strace-like syscall-name-based mode.
4830 *
4831 * This is important because we need to check if strace-like mode is
4832 * needed to decided if we should filter out the eBPF
4833 * __augmented_syscalls__ code, if it is in the mix, say, via
4834 * .perfconfig trace.add_events, and filter those out.
4835 */
4836 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4837 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4838 trace.trace_syscalls = true;
4839 }
4840 /*
4841 * Now that we have --verbose figured out, lets see if we need to parse
4842 * events from .perfconfig, so that if those events fail parsing, say some
4843 * BPF program fails, then we'll be able to use --verbose to see what went
4844 * wrong in more detail.
4845 */
4846 if (trace.perfconfig_events != NULL) {
4847 struct parse_events_error parse_err;
4848
4849 parse_events_error__init(&parse_err);
4850 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4851 if (err)
4852 parse_events_error__print(&parse_err, trace.perfconfig_events);
4853 parse_events_error__exit(&parse_err);
4854 if (err)
4855 goto out;
4856 }
4857
4858 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4859 usage_with_options_msg(trace_usage, trace_options,
4860 "cgroup monitoring only available in system-wide mode");
4861 }
4862
4863#ifdef HAVE_BPF_SKEL
4864 if (!trace.trace_syscalls)
4865 goto skip_augmentation;
4866
4867 trace.skel = augmented_raw_syscalls_bpf__open();
4868 if (!trace.skel) {
4869 pr_debug("Failed to open augmented syscalls BPF skeleton");
4870 } else {
4871 /*
4872 * Disable attaching the BPF programs except for sys_enter and
4873 * sys_exit that tail call into this as necessary.
4874 */
4875 struct bpf_program *prog;
4876
4877 bpf_object__for_each_program(prog, trace.skel->obj) {
4878 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
4879 bpf_program__set_autoattach(prog, /*autoattach=*/false);
4880 }
4881
4882 err = augmented_raw_syscalls_bpf__load(trace.skel);
4883
4884 if (err < 0) {
4885 libbpf_strerror(err, bf, sizeof(bf));
4886 pr_debug("Failed to load augmented syscalls BPF skeleton: %s\n", bf);
4887 } else {
4888 augmented_raw_syscalls_bpf__attach(trace.skel);
4889 trace__add_syscall_newtp(&trace);
4890 }
4891 }
4892
4893 err = bpf__setup_bpf_output(trace.evlist);
4894 if (err) {
4895 libbpf_strerror(err, bf, sizeof(bf));
4896 pr_err("ERROR: Setup BPF output event failed: %s\n", bf);
4897 goto out;
4898 }
4899 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
4900 assert(!strcmp(evsel__name(trace.syscalls.events.bpf_output), "__augmented_syscalls__"));
4901skip_augmentation:
4902#endif
4903 err = -1;
4904
4905 if (trace.trace_pgfaults) {
4906 trace.opts.sample_address = true;
4907 trace.opts.sample_time = true;
4908 }
4909
4910 if (trace.opts.mmap_pages == UINT_MAX)
4911 mmap_pages_user_set = false;
4912
4913 if (trace.max_stack == UINT_MAX) {
4914 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4915 max_stack_user_set = false;
4916 }
4917
4918#ifdef HAVE_DWARF_UNWIND_SUPPORT
4919 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4920 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4921 }
4922#endif
4923
4924 if (callchain_param.enabled) {
4925 if (!mmap_pages_user_set && geteuid() == 0)
4926 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4927
4928 symbol_conf.use_callchain = true;
4929 }
4930
4931 if (trace.evlist->core.nr_entries > 0) {
4932 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
4933 if (evlist__set_syscall_tp_fields(trace.evlist)) {
4934 perror("failed to set syscalls:* tracepoint fields");
4935 goto out;
4936 }
4937 }
4938
4939 if (trace.sort_events) {
4940 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4941 ordered_events__set_copy_on_queue(&trace.oe.data, true);
4942 }
4943
4944 /*
4945 * If we are augmenting syscalls, then combine what we put in the
4946 * __augmented_syscalls__ BPF map with what is in the
4947 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
4948 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
4949 *
4950 * We'll switch to look at two BPF maps, one for sys_enter and the
4951 * other for sys_exit when we start augmenting the sys_exit paths with
4952 * buffers that are being copied from kernel to userspace, think 'read'
4953 * syscall.
4954 */
4955 if (trace.syscalls.events.bpf_output) {
4956 evlist__for_each_entry(trace.evlist, evsel) {
4957 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
4958
4959 if (raw_syscalls_sys_exit) {
4960 trace.raw_augmented_syscalls = true;
4961 goto init_augmented_syscall_tp;
4962 }
4963
4964 if (trace.syscalls.events.bpf_output->priv == NULL &&
4965 strstr(evsel__name(evsel), "syscalls:sys_enter")) {
4966 struct evsel *augmented = trace.syscalls.events.bpf_output;
4967 if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
4968 evsel__init_augmented_syscall_tp_args(augmented))
4969 goto out;
4970 /*
4971 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
4972 * Above we made sure we can get from the payload the tp fields
4973 * that we get from syscalls:sys_enter tracefs format file.
4974 */
4975 augmented->handler = trace__sys_enter;
4976 /*
4977 * Now we do the same for the *syscalls:sys_enter event so that
4978 * if we handle it directly, i.e. if the BPF prog returns 0 so
4979 * as not to filter it, then we'll handle it just like we would
4980 * for the BPF_OUTPUT one:
4981 */
4982 if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
4983 evsel__init_augmented_syscall_tp_args(evsel))
4984 goto out;
4985 evsel->handler = trace__sys_enter;
4986 }
4987
4988 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
4989 struct syscall_tp *sc;
4990init_augmented_syscall_tp:
4991 if (evsel__init_augmented_syscall_tp(evsel, evsel))
4992 goto out;
4993 sc = __evsel__syscall_tp(evsel);
4994 /*
4995 * For now with BPF raw_augmented we hook into
4996 * raw_syscalls:sys_enter and there we get all
4997 * 6 syscall args plus the tracepoint common
4998 * fields and the syscall_nr (another long).
4999 * So we check if that is the case and if so
5000 * don't look after the sc->args_size but
5001 * always after the full raw_syscalls:sys_enter
5002 * payload, which is fixed.
5003 *
5004 * We'll revisit this later to pass
5005 * s->args_size to the BPF augmenter (now
5006 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5007 * so that it copies only what we need for each
5008 * syscall, like what happens when we use
5009 * syscalls:sys_enter_NAME, so that we reduce
5010 * the kernel/userspace traffic to just what is
5011 * needed for each syscall.
5012 */
5013 if (trace.raw_augmented_syscalls)
5014 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5015 evsel__init_augmented_syscall_tp_ret(evsel);
5016 evsel->handler = trace__sys_exit;
5017 }
5018 }
5019 }
5020
5021 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5022 return trace__record(&trace, argc-1, &argv[1]);
5023
5024 /* Using just --errno-summary will trigger --summary */
5025 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5026 trace.summary_only = true;
5027
5028 /* summary_only implies summary option, but don't overwrite summary if set */
5029 if (trace.summary_only)
5030 trace.summary = trace.summary_only;
5031
5032 if (output_name != NULL) {
5033 err = trace__open_output(&trace, output_name);
5034 if (err < 0) {
5035 perror("failed to create output file");
5036 goto out;
5037 }
5038 }
5039
5040 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5041 if (err)
5042 goto out_close;
5043
5044 err = target__validate(&trace.opts.target);
5045 if (err) {
5046 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5047 fprintf(trace.output, "%s", bf);
5048 goto out_close;
5049 }
5050
5051 err = target__parse_uid(&trace.opts.target);
5052 if (err) {
5053 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5054 fprintf(trace.output, "%s", bf);
5055 goto out_close;
5056 }
5057
5058 if (!argc && target__none(&trace.opts.target))
5059 trace.opts.target.system_wide = true;
5060
5061 if (input_name)
5062 err = trace__replay(&trace);
5063 else
5064 err = trace__run(&trace, argc, argv);
5065
5066out_close:
5067 if (output_name != NULL)
5068 fclose(trace.output);
5069out:
5070 trace__exit(&trace);
5071#ifdef HAVE_BPF_SKEL
5072 augmented_raw_syscalls_bpf__destroy(trace.skel);
5073#endif
5074 return err;
5075}