Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * builtin-trace.c
   3 *
   4 * Builtin 'trace' command:
   5 *
   6 * Display a continuously updated trace of any workload, CPU, specific PID,
   7 * system wide, etc.  Default format is loosely strace like, but any other
   8 * event may be specified using --event.
   9 *
  10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  11 *
  12 * Initially based on the 'trace' prototype by Thomas Gleixner:
  13 *
  14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
  15 *
  16 * Released under the GPL v2. (and only v2, not any later version)
  17 */
  18
 
  19#include <traceevent/event-parse.h>
  20#include <api/fs/tracing_path.h>
 
 
 
  21#include "builtin.h"
 
  22#include "util/color.h"
 
  23#include "util/debug.h"
 
 
 
 
 
 
  24#include "util/evlist.h"
 
 
 
  25#include <subcmd/exec-cmd.h>
  26#include "util/machine.h"
 
 
 
  27#include "util/session.h"
  28#include "util/thread.h"
  29#include <subcmd/parse-options.h>
  30#include "util/strlist.h"
  31#include "util/intlist.h"
  32#include "util/thread_map.h"
  33#include "util/stat.h"
 
 
 
  34#include "trace-event.h"
  35#include "util/parse-events.h"
  36#include "util/bpf-loader.h"
  37#include "callchain.h"
 
 
  38#include "syscalltbl.h"
  39#include "rb_resort.h"
 
  40
  41#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
 
 
 
  42#include <stdlib.h>
 
  43#include <linux/err.h>
  44#include <linux/filter.h>
  45#include <linux/audit.h>
  46#include <linux/random.h>
  47#include <linux/stringify.h>
  48#include <linux/time64.h>
 
 
 
 
 
 
  49
  50#ifndef O_CLOEXEC
  51# define O_CLOEXEC		02000000
  52#endif
  53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54struct trace {
  55	struct perf_tool	tool;
  56	struct syscalltbl	*sctbl;
  57	struct {
  58		int		max;
  59		struct syscall  *table;
 
 
 
 
 
  60		struct {
  61			struct perf_evsel *sys_enter,
  62					  *sys_exit;
 
  63		}		events;
 
  64	} syscalls;
 
 
 
  65	struct record_opts	opts;
  66	struct perf_evlist	*evlist;
  67	struct machine		*host;
  68	struct thread		*current;
 
 
  69	u64			base_time;
  70	FILE			*output;
  71	unsigned long		nr_events;
 
 
 
  72	struct strlist		*ev_qualifier;
  73	struct {
  74		size_t		nr;
  75		int		*entries;
  76	}			ev_qualifier_ids;
  77	struct {
  78		size_t		nr;
  79		pid_t		*entries;
 
  80	}			filter_pids;
  81	double			duration_filter;
  82	double			runtime_ms;
  83	struct {
  84		u64		vfs_getname,
  85				proc_getname;
  86	} stats;
  87	unsigned int		max_stack;
  88	unsigned int		min_stack;
 
 
 
 
  89	bool			not_ev_qualifier;
  90	bool			live;
  91	bool			full_time;
  92	bool			sched;
  93	bool			multiple_threads;
  94	bool			summary;
  95	bool			summary_only;
 
 
  96	bool			show_comm;
 
  97	bool			show_tool_stats;
  98	bool			trace_syscalls;
 
  99	bool			kernel_syscallchains;
 
 
 
 
 
 
 100	bool			force;
 101	bool			vfs_getname;
 102	int			trace_pgfaults;
 103	int			open_id;
 
 
 
 
 104};
 105
 106struct tp_field {
 107	int offset;
 108	union {
 109		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
 110		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
 111	};
 112};
 113
 114#define TP_UINT_FIELD(bits) \
 115static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
 116{ \
 117	u##bits value; \
 118	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 119	return value;  \
 120}
 121
 122TP_UINT_FIELD(8);
 123TP_UINT_FIELD(16);
 124TP_UINT_FIELD(32);
 125TP_UINT_FIELD(64);
 126
 127#define TP_UINT_FIELD__SWAPPED(bits) \
 128static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
 129{ \
 130	u##bits value; \
 131	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 132	return bswap_##bits(value);\
 133}
 134
 135TP_UINT_FIELD__SWAPPED(16);
 136TP_UINT_FIELD__SWAPPED(32);
 137TP_UINT_FIELD__SWAPPED(64);
 138
 139static int tp_field__init_uint(struct tp_field *field,
 140			       struct format_field *format_field,
 141			       bool needs_swap)
 142{
 143	field->offset = format_field->offset;
 144
 145	switch (format_field->size) {
 146	case 1:
 147		field->integer = tp_field__u8;
 148		break;
 149	case 2:
 150		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
 151		break;
 152	case 4:
 153		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
 154		break;
 155	case 8:
 156		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
 157		break;
 158	default:
 159		return -1;
 160	}
 161
 162	return 0;
 163}
 164
 
 
 
 
 
 165static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
 166{
 167	return sample->raw_data + field->offset;
 168}
 169
 170static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
 171{
 172	field->offset = format_field->offset;
 173	field->pointer = tp_field__ptr;
 174	return 0;
 175}
 176
 
 
 
 
 
 177struct syscall_tp {
 178	struct tp_field id;
 179	union {
 180		struct tp_field args, ret;
 181	};
 182};
 183
 184static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
 185					  struct tp_field *field,
 186					  const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187{
 188	struct format_field *format_field = perf_evsel__field(evsel, name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189
 190	if (format_field == NULL)
 191		return -1;
 192
 193	return tp_field__init_uint(field, format_field, evsel->needs_swap);
 194}
 195
 196#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
 197	({ struct syscall_tp *sc = evsel->priv;\
 198	   perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
 199
 200static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
 201					 struct tp_field *field,
 202					 const char *name)
 203{
 204	struct format_field *format_field = perf_evsel__field(evsel, name);
 205
 206	if (format_field == NULL)
 207		return -1;
 208
 209	return tp_field__init_ptr(field, format_field);
 210}
 211
 212#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
 213	({ struct syscall_tp *sc = evsel->priv;\
 214	   perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
 215
 216static void perf_evsel__delete_priv(struct perf_evsel *evsel)
 217{
 218	zfree(&evsel->priv);
 219	perf_evsel__delete(evsel);
 220}
 221
 222static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
 223{
 224	evsel->priv = malloc(sizeof(struct syscall_tp));
 225	if (evsel->priv != NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 226		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
 227			goto out_delete;
 228
 229		evsel->handler = handler;
 230		return 0;
 231	}
 232
 233	return -ENOMEM;
 234
 235out_delete:
 236	zfree(&evsel->priv);
 237	return -ENOENT;
 238}
 239
 240static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
 241{
 242	struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
 243
 244	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
 245	if (IS_ERR(evsel))
 246		evsel = perf_evsel__newtp("syscalls", direction);
 247
 248	if (IS_ERR(evsel))
 249		return NULL;
 250
 251	if (perf_evsel__init_syscall_tp(evsel, handler))
 252		goto out_delete;
 253
 254	return evsel;
 255
 256out_delete:
 257	perf_evsel__delete_priv(evsel);
 258	return NULL;
 259}
 260
 261#define perf_evsel__sc_tp_uint(evsel, name, sample) \
 262	({ struct syscall_tp *fields = evsel->priv; \
 263	   fields->name.integer(&fields->name, sample); })
 264
 265#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
 266	({ struct syscall_tp *fields = evsel->priv; \
 267	   fields->name.pointer(&fields->name, sample); })
 268
 269struct syscall_arg {
 270	unsigned long val;
 271	struct thread *thread;
 272	struct trace  *trace;
 273	void	      *parm;
 274	u8	      idx;
 275	u8	      mask;
 276};
 277
 278struct strarray {
 279	int	    offset;
 280	int	    nr_entries;
 281	const char **entries;
 282};
 
 283
 284#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
 285	.nr_entries = ARRAY_SIZE(array), \
 286	.entries = array, \
 287}
 288
 289#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
 290	.offset	    = off, \
 291	.nr_entries = ARRAY_SIZE(array), \
 292	.entries = array, \
 
 
 
 
 
 
 
 
 293}
 294
 295static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
 296						const char *intfmt,
 297					        struct syscall_arg *arg)
 298{
 299	struct strarray *sa = arg->parm;
 300	int idx = arg->val - sa->offset;
 301
 302	if (idx < 0 || idx >= sa->nr_entries)
 303		return scnprintf(bf, size, intfmt, arg->val);
 304
 305	return scnprintf(bf, size, "%s", sa->entries[idx]);
 306}
 307
 308static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
 309					      struct syscall_arg *arg)
 310{
 311	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
 312}
 313
 314#define SCA_STRARRAY syscall_arg__scnprintf_strarray
 315
 316#if defined(__i386__) || defined(__x86_64__)
 317/*
 318 * FIXME: Make this available to all arches as soon as the ioctl beautifier
 319 * 	  gets rewritten to support all arches.
 320 */
 321static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
 322						 struct syscall_arg *arg)
 
 
 
 
 
 
 
 
 
 323{
 324	return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
 325}
 326
 327#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
 328#endif /* defined(__i386__) || defined(__x86_64__) */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329
 330static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
 331					struct syscall_arg *arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332
 333#define SCA_FD syscall_arg__scnprintf_fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 334
 335#ifndef AT_FDCWD
 336#define AT_FDCWD	-100
 337#endif
 338
 339static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
 340					   struct syscall_arg *arg)
 341{
 342	int fd = arg->val;
 
 343
 344	if (fd == AT_FDCWD)
 345		return scnprintf(bf, size, "CWD");
 346
 347	return syscall_arg__scnprintf_fd(bf, size, arg);
 348}
 349
 350#define SCA_FDAT syscall_arg__scnprintf_fd_at
 351
 352static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
 353					      struct syscall_arg *arg);
 354
 355#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
 356
 357static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
 358					 struct syscall_arg *arg)
 359{
 360	return scnprintf(bf, size, "%#lx", arg->val);
 361}
 362
 363#define SCA_HEX syscall_arg__scnprintf_hex
 
 
 
 
 
 364
 365static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
 366					 struct syscall_arg *arg)
 367{
 368	return scnprintf(bf, size, "%d", arg->val);
 369}
 370
 371#define SCA_INT syscall_arg__scnprintf_int
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373static const char *bpf_cmd[] = {
 374	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
 375	"MAP_GET_NEXT_KEY", "PROG_LOAD",
 376};
 377static DEFINE_STRARRAY(bpf_cmd);
 
 
 
 
 
 
 
 
 
 378
 379static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
 380static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
 381
 382static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
 383static DEFINE_STRARRAY(itimers);
 384
 385static const char *keyctl_options[] = {
 386	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
 387	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
 388	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
 389	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
 390	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
 391};
 392static DEFINE_STRARRAY(keyctl_options);
 393
 394static const char *whences[] = { "SET", "CUR", "END",
 395#ifdef SEEK_DATA
 396"DATA",
 397#endif
 398#ifdef SEEK_HOLE
 399"HOLE",
 400#endif
 401};
 402static DEFINE_STRARRAY(whences);
 403
 404static const char *fcntl_cmds[] = {
 405	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
 406	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
 407	"F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
 408	"F_GETOWNER_UIDS",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 409};
 410static DEFINE_STRARRAY(fcntl_cmds);
 
 411
 412static const char *rlimit_resources[] = {
 413	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
 414	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
 415	"RTTIME",
 416};
 417static DEFINE_STRARRAY(rlimit_resources);
 418
 419static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
 420static DEFINE_STRARRAY(sighow);
 421
 422static const char *clockid[] = {
 423	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
 424	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
 425	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
 426};
 427static DEFINE_STRARRAY(clockid);
 428
 429static const char *socket_families[] = {
 430	"UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
 431	"BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
 432	"SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
 433	"RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
 434	"BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
 435	"ALG", "NFC", "VSOCK",
 436};
 437static DEFINE_STRARRAY(socket_families);
 438
 439static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
 440						 struct syscall_arg *arg)
 441{
 
 
 442	size_t printed = 0;
 443	int mode = arg->val;
 444
 445	if (mode == F_OK) /* 0 */
 446		return scnprintf(bf, size, "F");
 447#define	P_MODE(n) \
 448	if (mode & n##_OK) { \
 449		printed += scnprintf(bf + printed, size - printed, "%s", #n); \
 450		mode &= ~n##_OK; \
 451	}
 452
 453	P_MODE(R);
 454	P_MODE(W);
 455	P_MODE(X);
 456#undef P_MODE
 457
 458	if (mode)
 459		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
 460
 461	return printed;
 462}
 463
 464#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
 465
 466static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
 467					      struct syscall_arg *arg);
 468
 469#define SCA_FILENAME syscall_arg__scnprintf_filename
 470
 471static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
 472						struct syscall_arg *arg)
 473{
 
 
 474	int printed = 0, flags = arg->val;
 475
 476#define	P_FLAG(n) \
 477	if (flags & O_##n) { \
 478		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 479		flags &= ~O_##n; \
 480	}
 481
 482	P_FLAG(CLOEXEC);
 483	P_FLAG(NONBLOCK);
 484#undef P_FLAG
 485
 486	if (flags)
 487		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 488
 489	return printed;
 490}
 491
 492#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
 493
 494#if defined(__i386__) || defined(__x86_64__)
 495/*
 496 * FIXME: Make this available to all arches.
 497 */
 498#define TCGETS		0x5401
 499
 500static const char *tioctls[] = {
 501	"TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
 502	"TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
 503	"TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
 504	"TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
 505	"TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
 506	"TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
 507	"TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
 508	"TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
 509	"TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
 510	"TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
 511	"TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
 512	[0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
 513	"TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
 514	"TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
 515	"TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
 516};
 517
 518static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
 519#endif /* defined(__i386__) || defined(__x86_64__) */
 520
 521#ifndef GRND_NONBLOCK
 522#define GRND_NONBLOCK	0x0001
 523#endif
 524#ifndef GRND_RANDOM
 525#define GRND_RANDOM	0x0002
 526#endif
 527
 528static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
 529						   struct syscall_arg *arg)
 530{
 
 
 531	int printed = 0, flags = arg->val;
 532
 533#define	P_FLAG(n) \
 534	if (flags & GRND_##n) { \
 535		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 536		flags &= ~GRND_##n; \
 537	}
 538
 539	P_FLAG(RANDOM);
 540	P_FLAG(NONBLOCK);
 541#undef P_FLAG
 542
 543	if (flags)
 544		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 545
 546	return printed;
 547}
 548
 549#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
 550
 551#define STRARRAY(arg, name, array) \
 552	  .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
 553	  .arg_parm	 = { [arg] = &strarray__##array, }
 
 
 
 
 
 
 554
 
 555#include "trace/beauty/eventfd.c"
 556#include "trace/beauty/flock.c"
 557#include "trace/beauty/futex_op.c"
 
 558#include "trace/beauty/mmap.c"
 559#include "trace/beauty/mode_t.c"
 560#include "trace/beauty/msg_flags.c"
 561#include "trace/beauty/open_flags.c"
 562#include "trace/beauty/perf_event_open.c"
 563#include "trace/beauty/pid.c"
 564#include "trace/beauty/sched_policy.c"
 565#include "trace/beauty/seccomp.c"
 566#include "trace/beauty/signum.c"
 567#include "trace/beauty/socket_type.c"
 568#include "trace/beauty/waitid_options.c"
 569
 570static struct syscall_fmt {
 571	const char *name;
 572	const char *alias;
 573	size_t	   (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
 574	void	   *arg_parm[6];
 575	bool	   errmsg;
 576	bool	   errpid;
 577	bool	   timeout;
 578	bool	   hexret;
 579} syscall_fmts[] = {
 580	{ .name	    = "access",	    .errmsg = true,
 581	  .arg_scnprintf = { [1] = SCA_ACCMODE,  /* mode */ }, },
 582	{ .name	    = "arch_prctl", .errmsg = true, .alias = "prctl", },
 583	{ .name	    = "bpf",	    .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
 584	{ .name	    = "brk",	    .hexret = true,
 585	  .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
 586	{ .name	    = "chdir",	    .errmsg = true, },
 587	{ .name	    = "chmod",	    .errmsg = true, },
 588	{ .name	    = "chroot",	    .errmsg = true, },
 589	{ .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
 590	{ .name	    = "clone",	    .errpid = true, },
 591	{ .name	    = "close",	    .errmsg = true,
 592	  .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
 593	{ .name	    = "connect",    .errmsg = true, },
 594	{ .name	    = "creat",	    .errmsg = true, },
 595	{ .name	    = "dup",	    .errmsg = true, },
 596	{ .name	    = "dup2",	    .errmsg = true, },
 597	{ .name	    = "dup3",	    .errmsg = true, },
 598	{ .name	    = "epoll_ctl",  .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
 599	{ .name	    = "eventfd2",   .errmsg = true,
 600	  .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
 601	{ .name	    = "faccessat",  .errmsg = true, },
 602	{ .name	    = "fadvise64",  .errmsg = true, },
 603	{ .name	    = "fallocate",  .errmsg = true, },
 604	{ .name	    = "fchdir",	    .errmsg = true, },
 605	{ .name	    = "fchmod",	    .errmsg = true, },
 606	{ .name	    = "fchmodat",   .errmsg = true,
 607	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 608	{ .name	    = "fchown",	    .errmsg = true, },
 609	{ .name	    = "fchownat",   .errmsg = true,
 610	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 611	{ .name	    = "fcntl",	    .errmsg = true,
 612	  .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
 613	  .arg_parm	 = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
 614	{ .name	    = "fdatasync",  .errmsg = true, },
 615	{ .name	    = "flock",	    .errmsg = true,
 616	  .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
 617	{ .name	    = "fsetxattr",  .errmsg = true, },
 618	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat", },
 619	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat", },
 620	{ .name	    = "fstatfs",    .errmsg = true, },
 621	{ .name	    = "fsync",    .errmsg = true, },
 622	{ .name	    = "ftruncate", .errmsg = true, },
 623	{ .name	    = "futex",	    .errmsg = true,
 624	  .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
 625	{ .name	    = "futimesat", .errmsg = true,
 626	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 627	{ .name	    = "getdents",   .errmsg = true, },
 628	{ .name	    = "getdents64", .errmsg = true, },
 629	{ .name	    = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 
 
 
 
 630	{ .name	    = "getpid",	    .errpid = true, },
 631	{ .name	    = "getpgid",    .errpid = true, },
 632	{ .name	    = "getppid",    .errpid = true, },
 633	{ .name	    = "getrandom",  .errmsg = true,
 634	  .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
 635	{ .name	    = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
 636	{ .name	    = "getxattr",   .errmsg = true, },
 637	{ .name	    = "inotify_add_watch",	    .errmsg = true, },
 638	{ .name	    = "ioctl",	    .errmsg = true,
 639	  .arg_scnprintf = {
 640#if defined(__i386__) || defined(__x86_64__)
 641/*
 642 * FIXME: Make this available to all arches.
 643 */
 644			     [1] = SCA_STRHEXARRAY, /* cmd */
 645			     [2] = SCA_HEX, /* arg */ },
 646	  .arg_parm	 = { [1] = &strarray__tioctls, /* cmd */ }, },
 647#else
 648			     [2] = SCA_HEX, /* arg */ }, },
 649#endif
 650	{ .name	    = "keyctl",	    .errmsg = true, STRARRAY(0, option, keyctl_options), },
 651	{ .name	    = "kill",	    .errmsg = true,
 652	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
 653	{ .name	    = "lchown",    .errmsg = true, },
 654	{ .name	    = "lgetxattr",  .errmsg = true, },
 655	{ .name	    = "linkat",	    .errmsg = true,
 656	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 657	{ .name	    = "listxattr",  .errmsg = true, },
 658	{ .name	    = "llistxattr", .errmsg = true, },
 659	{ .name	    = "lremovexattr",  .errmsg = true, },
 660	{ .name	    = "lseek",	    .errmsg = true,
 661	  .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
 662	  .arg_parm	 = { [2] = &strarray__whences, /* whence */ }, },
 663	{ .name	    = "lsetxattr",  .errmsg = true, },
 664	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat", },
 665	{ .name	    = "lsxattr",    .errmsg = true, },
 666	{ .name     = "madvise",    .errmsg = true,
 667	  .arg_scnprintf = { [0] = SCA_HEX,	 /* start */
 668			     [2] = SCA_MADV_BHV, /* behavior */ }, },
 669	{ .name	    = "mkdir",    .errmsg = true, },
 670	{ .name	    = "mkdirat",    .errmsg = true,
 671	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 672	{ .name	    = "mknod",      .errmsg = true, },
 673	{ .name	    = "mknodat",    .errmsg = true,
 674	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
 675	{ .name	    = "mlock",	    .errmsg = true,
 676	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 677	{ .name	    = "mlockall",   .errmsg = true,
 678	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 679	{ .name	    = "mmap",	    .hexret = true,
 680	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
 681			     [2] = SCA_MMAP_PROT, /* prot */
 682			     [3] = SCA_MMAP_FLAGS, /* flags */ }, },
 683	{ .name	    = "mprotect",   .errmsg = true,
 684	  .arg_scnprintf = { [0] = SCA_HEX, /* start */
 685			     [2] = SCA_MMAP_PROT, /* prot */ }, },
 686	{ .name	    = "mq_unlink", .errmsg = true,
 687	  .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 688	{ .name	    = "mremap",	    .hexret = true,
 689	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */
 690			     [3] = SCA_MREMAP_FLAGS, /* flags */
 691			     [4] = SCA_HEX, /* new_addr */ }, },
 692	{ .name	    = "munlock",    .errmsg = true,
 693	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 694	{ .name	    = "munmap",	    .errmsg = true,
 695	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 696	{ .name	    = "name_to_handle_at", .errmsg = true,
 697	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 698	{ .name	    = "newfstatat", .errmsg = true,
 699	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 700	{ .name	    = "open",	    .errmsg = true,
 701	  .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
 702	{ .name	    = "open_by_handle_at", .errmsg = true,
 703	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
 704			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 705	{ .name	    = "openat",	    .errmsg = true,
 706	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
 707			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 708	{ .name	    = "perf_event_open", .errmsg = true,
 709	  .arg_scnprintf = { [2] = SCA_INT, /* cpu */
 710			     [3] = SCA_FD,  /* group_fd */
 711			     [4] = SCA_PERF_FLAGS,  /* flags */ }, },
 712	{ .name	    = "pipe2",	    .errmsg = true,
 713	  .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
 714	{ .name	    = "poll",	    .errmsg = true, .timeout = true, },
 715	{ .name	    = "ppoll",	    .errmsg = true, .timeout = true, },
 716	{ .name	    = "pread",	    .errmsg = true, .alias = "pread64", },
 717	{ .name	    = "preadv",	    .errmsg = true, .alias = "pread", },
 718	{ .name	    = "prlimit64",  .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
 719	{ .name	    = "pwrite",	    .errmsg = true, .alias = "pwrite64", },
 720	{ .name	    = "pwritev",    .errmsg = true, },
 721	{ .name	    = "read",	    .errmsg = true, },
 722	{ .name	    = "readlink",   .errmsg = true, },
 723	{ .name	    = "readlinkat", .errmsg = true,
 724	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 725	{ .name	    = "readv",	    .errmsg = true, },
 726	{ .name	    = "recvfrom",   .errmsg = true,
 727	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 728	{ .name	    = "recvmmsg",   .errmsg = true,
 729	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 730	{ .name	    = "recvmsg",    .errmsg = true,
 731	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
 732	{ .name	    = "removexattr", .errmsg = true, },
 733	{ .name	    = "renameat",   .errmsg = true,
 734	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 735	{ .name	    = "rmdir",    .errmsg = true, },
 736	{ .name	    = "rt_sigaction", .errmsg = true,
 737	  .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
 738	{ .name	    = "rt_sigprocmask",  .errmsg = true, STRARRAY(0, how, sighow), },
 739	{ .name	    = "rt_sigqueueinfo", .errmsg = true,
 740	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
 741	{ .name	    = "rt_tgsigqueueinfo", .errmsg = true,
 742	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
 743	{ .name	    = "sched_getattr",	      .errmsg = true, },
 744	{ .name	    = "sched_setattr",	      .errmsg = true, },
 745	{ .name	    = "sched_setscheduler",   .errmsg = true,
 746	  .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
 747	{ .name	    = "seccomp", .errmsg = true,
 748	  .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
 749			     [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
 750	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
 751	{ .name	    = "sendmmsg",    .errmsg = true,
 752	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 753	{ .name	    = "sendmsg",    .errmsg = true,
 754	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
 755	{ .name	    = "sendto",	    .errmsg = true,
 756	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
 
 
 
 
 
 
 
 
 
 757	{ .name	    = "set_tid_address", .errpid = true, },
 758	{ .name	    = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 759	{ .name	    = "setpgid",    .errmsg = true, },
 760	{ .name	    = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
 761	{ .name	    = "setxattr",   .errmsg = true, },
 762	{ .name	    = "shutdown",   .errmsg = true, },
 763	{ .name	    = "socket",	    .errmsg = true,
 764	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
 765			     [1] = SCA_SK_TYPE, /* type */ },
 766	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
 767	{ .name	    = "socketpair", .errmsg = true,
 768	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
 769			     [1] = SCA_SK_TYPE, /* type */ },
 770	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
 771	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat", },
 772	{ .name	    = "statfs",	    .errmsg = true, },
 773	{ .name	    = "swapoff",    .errmsg = true,
 774	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
 775	{ .name	    = "swapon",	    .errmsg = true,
 776	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
 777	{ .name	    = "symlinkat",  .errmsg = true,
 778	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 779	{ .name	    = "tgkill",	    .errmsg = true,
 780	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
 781	{ .name	    = "tkill",	    .errmsg = true,
 782	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
 783	{ .name	    = "truncate",   .errmsg = true, },
 784	{ .name	    = "uname",	    .errmsg = true, .alias = "newuname", },
 785	{ .name	    = "unlinkat",   .errmsg = true,
 786	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 787	{ .name	    = "utime",  .errmsg = true, },
 788	{ .name	    = "utimensat",  .errmsg = true,
 789	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
 790	{ .name	    = "utimes",  .errmsg = true, },
 791	{ .name	    = "vmsplice",  .errmsg = true, },
 
 
 792	{ .name	    = "wait4",	    .errpid = true,
 793	  .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
 794	{ .name	    = "waitid",	    .errpid = true,
 795	  .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
 796	{ .name	    = "write",	    .errmsg = true, },
 797	{ .name	    = "writev",	    .errmsg = true, },
 798};
 799
 800static int syscall_fmt__cmp(const void *name, const void *fmtp)
 801{
 802	const struct syscall_fmt *fmt = fmtp;
 803	return strcmp(name, fmt->name);
 804}
 805
 
 
 
 
 
 806static struct syscall_fmt *syscall_fmt__find(const char *name)
 807{
 808	const int nmemb = ARRAY_SIZE(syscall_fmts);
 809	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810}
 811
 
 
 
 
 
 
 812struct syscall {
 813	struct event_format *tp_format;
 814	int		    nr_args;
 815	struct format_field *args;
 816	const char	    *name;
 
 
 
 817	bool		    is_exit;
 
 
 
 
 818	struct syscall_fmt  *fmt;
 819	size_t		    (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
 820	void		    **arg_parm;
 821};
 822
 823static size_t fprintf_duration(unsigned long t, FILE *fp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824{
 825	double duration = (double)t / NSEC_PER_MSEC;
 826	size_t printed = fprintf(fp, "(");
 827
 828	if (duration >= 1.0)
 
 
 829		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
 830	else if (duration >= 0.01)
 831		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
 832	else
 833		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
 834	return printed + fprintf(fp, "): ");
 835}
 836
 837/**
 838 * filename.ptr: The filename char pointer that will be vfs_getname'd
 839 * filename.entry_str_pos: Where to insert the string translated from
 840 *                         filename.ptr by the vfs_getname tracepoint/kprobe.
 
 
 841 */
 842struct thread_trace {
 843	u64		  entry_time;
 844	bool		  entry_pending;
 845	unsigned long	  nr_events;
 846	unsigned long	  pfmaj, pfmin;
 847	char		  *entry_str;
 848	double		  runtime_ms;
 
 849        struct {
 850		unsigned long ptr;
 851		short int     entry_str_pos;
 852		bool	      pending_open;
 853		unsigned int  namelen;
 854		char	      *name;
 855	} filename;
 856	struct {
 857		int	  max;
 858		char	  **table;
 859	} paths;
 860
 861	struct intlist *syscall_stats;
 862};
 863
 864static struct thread_trace *thread_trace__new(void)
 865{
 866	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
 867
 868	if (ttrace)
 869		ttrace->paths.max = -1;
 870
 871	ttrace->syscall_stats = intlist__new(NULL);
 872
 873	return ttrace;
 874}
 875
 876static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
 877{
 878	struct thread_trace *ttrace;
 879
 880	if (thread == NULL)
 881		goto fail;
 882
 883	if (thread__priv(thread) == NULL)
 884		thread__set_priv(thread, thread_trace__new());
 885
 886	if (thread__priv(thread) == NULL)
 887		goto fail;
 888
 889	ttrace = thread__priv(thread);
 890	++ttrace->nr_events;
 891
 892	return ttrace;
 893fail:
 894	color_fprintf(fp, PERF_COLOR_RED,
 895		      "WARNING: not enough memory, dropping samples!\n");
 896	return NULL;
 897}
 898
 
 
 
 
 
 
 
 
 
 899#define TRACE_PFMAJ		(1 << 0)
 900#define TRACE_PFMIN		(1 << 1)
 901
 902static const size_t trace__entry_str_size = 2048;
 903
 904static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
 905{
 906	struct thread_trace *ttrace = thread__priv(thread);
 
 907
 908	if (fd > ttrace->paths.max) {
 909		char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
 910
 911		if (npath == NULL)
 912			return -1;
 913
 914		if (ttrace->paths.max != -1) {
 915			memset(npath + ttrace->paths.max + 1, 0,
 916			       (fd - ttrace->paths.max) * sizeof(char *));
 917		} else {
 918			memset(npath, 0, (fd + 1) * sizeof(char *));
 919		}
 920
 921		ttrace->paths.table = npath;
 922		ttrace->paths.max   = fd;
 923	}
 924
 925	ttrace->paths.table[fd] = strdup(pathname);
 
 926
 927	return ttrace->paths.table[fd] != NULL ? 0 : -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928}
 929
 930static int thread__read_fd_path(struct thread *thread, int fd)
 931{
 932	char linkname[PATH_MAX], pathname[PATH_MAX];
 933	struct stat st;
 934	int ret;
 935
 936	if (thread->pid_ == thread->tid) {
 937		scnprintf(linkname, sizeof(linkname),
 938			  "/proc/%d/fd/%d", thread->pid_, fd);
 939	} else {
 940		scnprintf(linkname, sizeof(linkname),
 941			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
 942	}
 943
 944	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
 945		return -1;
 946
 947	ret = readlink(linkname, pathname, sizeof(pathname));
 948
 949	if (ret < 0 || ret > st.st_size)
 950		return -1;
 951
 952	pathname[ret] = '\0';
 953	return trace__set_fd_pathname(thread, fd, pathname);
 954}
 955
 956static const char *thread__fd_path(struct thread *thread, int fd,
 957				   struct trace *trace)
 958{
 959	struct thread_trace *ttrace = thread__priv(thread);
 960
 961	if (ttrace == NULL)
 962		return NULL;
 963
 964	if (fd < 0)
 965		return NULL;
 966
 967	if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
 968		if (!trace->live)
 969			return NULL;
 970		++trace->stats.proc_getname;
 971		if (thread__read_fd_path(thread, fd))
 972			return NULL;
 973	}
 974
 975	return ttrace->paths.table[fd];
 976}
 977
 978static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
 979					struct syscall_arg *arg)
 980{
 981	int fd = arg->val;
 982	size_t printed = scnprintf(bf, size, "%d", fd);
 983	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
 984
 985	if (path)
 986		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
 987
 988	return printed;
 989}
 990
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 991static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
 992					      struct syscall_arg *arg)
 993{
 994	int fd = arg->val;
 995	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
 996	struct thread_trace *ttrace = thread__priv(arg->thread);
 997
 998	if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
 999		zfree(&ttrace->paths.table[fd]);
1000
1001	return printed;
1002}
1003
1004static void thread__set_filename_pos(struct thread *thread, const char *bf,
1005				     unsigned long ptr)
1006{
1007	struct thread_trace *ttrace = thread__priv(thread);
1008
1009	ttrace->filename.ptr = ptr;
1010	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1011}
1012
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1014					      struct syscall_arg *arg)
1015{
1016	unsigned long ptr = arg->val;
1017
 
 
 
1018	if (!arg->trace->vfs_getname)
1019		return scnprintf(bf, size, "%#x", ptr);
1020
1021	thread__set_filename_pos(arg->thread, bf, ptr);
1022	return 0;
1023}
1024
1025static bool trace__filter_duration(struct trace *trace, double t)
1026{
1027	return t < (trace->duration_filter * NSEC_PER_MSEC);
1028}
1029
1030static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1031{
1032	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1033
1034	return fprintf(fp, "%10.3f ", ts);
1035}
1036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037static bool done = false;
1038static bool interrupted = false;
1039
1040static void sig_handler(int sig)
1041{
1042	done = true;
1043	interrupted = sig == SIGINT;
1044}
1045
1046static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1047					u64 duration, u64 tstamp, FILE *fp)
1048{
1049	size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1050	printed += fprintf_duration(duration, fp);
1051
1052	if (trace->multiple_threads) {
1053		if (trace->show_comm)
1054			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1055		printed += fprintf(fp, "%d ", thread->tid);
1056	}
1057
1058	return printed;
1059}
1060
 
 
 
 
 
 
 
 
 
 
 
 
1061static int trace__process_event(struct trace *trace, struct machine *machine,
1062				union perf_event *event, struct perf_sample *sample)
1063{
1064	int ret = 0;
1065
1066	switch (event->header.type) {
1067	case PERF_RECORD_LOST:
1068		color_fprintf(trace->output, PERF_COLOR_RED,
1069			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1070		ret = machine__process_lost_event(machine, event, sample);
1071		break;
1072	default:
1073		ret = machine__process_event(machine, event, sample);
1074		break;
1075	}
1076
1077	return ret;
1078}
1079
1080static int trace__tool_process(struct perf_tool *tool,
1081			       union perf_event *event,
1082			       struct perf_sample *sample,
1083			       struct machine *machine)
1084{
1085	struct trace *trace = container_of(tool, struct trace, tool);
1086	return trace__process_event(trace, machine, event, sample);
1087}
1088
1089static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1090{
1091	struct machine *machine = vmachine;
1092
1093	if (machine->kptr_restrict_warned)
1094		return NULL;
1095
1096	if (symbol_conf.kptr_restrict) {
1097		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1098			   "Check /proc/sys/kernel/kptr_restrict.\n\n"
1099			   "Kernel samples will not be resolved.\n");
1100		machine->kptr_restrict_warned = true;
1101		return NULL;
1102	}
1103
1104	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1105}
1106
1107static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1108{
1109	int err = symbol__init(NULL);
1110
1111	if (err)
1112		return err;
1113
1114	trace->host = machine__new_host();
1115	if (trace->host == NULL)
1116		return -ENOMEM;
1117
1118	if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
1119		return -errno;
 
1120
1121	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1122					    evlist->threads, trace__tool_process, false,
1123					    trace->opts.proc_map_timeout);
 
1124	if (err)
1125		symbol__exit();
1126
1127	return err;
1128}
1129
1130static int syscall__set_arg_fmts(struct syscall *sc)
1131{
1132	struct format_field *field;
1133	int idx = 0, len;
 
 
 
1134
1135	sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1136	if (sc->arg_scnprintf == NULL)
 
 
 
 
 
 
 
1137		return -1;
1138
1139	if (sc->fmt)
1140		sc->arg_parm = sc->fmt->arg_parm;
 
 
1141
1142	for (field = sc->args; field; field = field->next) {
1143		if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1144			sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1145		else if (strcmp(field->type, "const char *") == 0 &&
1146			 (strcmp(field->name, "filename") == 0 ||
1147			  strcmp(field->name, "path") == 0 ||
1148			  strcmp(field->name, "pathname") == 0))
1149			sc->arg_scnprintf[idx] = SCA_FILENAME;
1150		else if (field->flags & FIELD_IS_POINTER)
1151			sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152		else if (strcmp(field->type, "pid_t") == 0)
1153			sc->arg_scnprintf[idx] = SCA_PID;
1154		else if (strcmp(field->type, "umode_t") == 0)
1155			sc->arg_scnprintf[idx] = SCA_MODE_T;
1156		else if ((strcmp(field->type, "int") == 0 ||
 
 
 
1157			  strcmp(field->type, "unsigned int") == 0 ||
1158			  strcmp(field->type, "long") == 0) &&
1159			 (len = strlen(field->name)) >= 2 &&
1160			 strcmp(field->name + len - 2, "fd") == 0) {
1161			/*
1162			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1163			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1164			 * 65 int
1165			 * 23 unsigned int
1166			 * 7 unsigned long
1167			 */
1168			sc->arg_scnprintf[idx] = SCA_FD;
 
 
 
 
 
 
 
1169		}
1170		++idx;
1171	}
1172
 
 
 
 
 
 
 
 
 
 
1173	return 0;
1174}
1175
1176static int trace__read_syscall_info(struct trace *trace, int id)
1177{
1178	char tp_name[128];
1179	struct syscall *sc;
1180	const char *name = syscalltbl__name(trace->sctbl, id);
1181
1182	if (name == NULL)
1183		return -1;
1184
1185	if (id > trace->syscalls.max) {
1186		struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1187
1188		if (nsyscalls == NULL)
1189			return -1;
 
 
 
 
 
 
 
 
 
 
 
1190
1191		if (trace->syscalls.max != -1) {
1192			memset(nsyscalls + trace->syscalls.max + 1, 0,
1193			       (id - trace->syscalls.max) * sizeof(*sc));
1194		} else {
1195			memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1196		}
 
1197
1198		trace->syscalls.table = nsyscalls;
1199		trace->syscalls.max   = id;
 
1200	}
1201
1202	sc = trace->syscalls.table + id;
1203	sc->name = name;
1204
1205	sc->fmt  = syscall_fmt__find(sc->name);
1206
1207	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1208	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1209
1210	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1211		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1212		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1213	}
1214
 
 
 
1215	if (IS_ERR(sc->tp_format))
1216		return -1;
1217
1218	sc->args = sc->tp_format->format.fields;
1219	sc->nr_args = sc->tp_format->format.nr_fields;
1220	/*
1221	 * We need to check and discard the first variable '__syscall_nr'
1222	 * or 'nr' that mean the syscall number. It is needless here.
1223	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1224	 */
1225	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1226		sc->args = sc->args->next;
1227		--sc->nr_args;
1228	}
1229
1230	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
 
1231
1232	return syscall__set_arg_fmts(sc);
1233}
1234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235static int trace__validate_ev_qualifier(struct trace *trace)
1236{
1237	int err = 0, i;
 
1238	struct str_node *pos;
 
1239
1240	trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1241	trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1242						 sizeof(trace->ev_qualifier_ids.entries[0]));
1243
1244	if (trace->ev_qualifier_ids.entries == NULL) {
1245		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1246		       trace->output);
1247		err = -EINVAL;
1248		goto out;
1249	}
1250
1251	i = 0;
1252
1253	strlist__for_each_entry(pos, trace->ev_qualifier) {
1254		const char *sc = pos->s;
1255		int id = syscalltbl__id(trace->sctbl, sc);
1256
1257		if (id < 0) {
1258			if (err == 0) {
1259				fputs("Error:\tInvalid syscall ", trace->output);
1260				err = -EINVAL;
 
 
 
 
1261			} else {
1262				fputs(", ", trace->output);
1263			}
1264
1265			fputs(sc, trace->output);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266		}
1267
1268		trace->ev_qualifier_ids.entries[i++] = id;
1269	}
1270
1271	if (err < 0) {
1272		fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1273		      "\nHint:\tand: 'man syscalls'\n", trace->output);
1274		zfree(&trace->ev_qualifier_ids.entries);
1275		trace->ev_qualifier_ids.nr = 0;
1276	}
1277out:
 
 
1278	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1279}
1280
1281/*
1282 * args is to be interpreted as a series of longs but we need to handle
1283 * 8-byte unaligned accesses. args points to raw_data within the event
1284 * and raw_data is guaranteed to be 8-byte unaligned because it is
1285 * preceded by raw_size which is a u32. So we need to copy args to a temp
1286 * variable to read it. Most notably this avoids extended load instructions
1287 * on unaligned addresses
1288 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289
1290static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1291				      unsigned char *args, struct trace *trace,
1292				      struct thread *thread)
1293{
1294	size_t printed = 0;
1295	unsigned char *p;
1296	unsigned long val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1297
1298	if (sc->args != NULL) {
1299		struct format_field *field;
1300		u8 bit = 1;
1301		struct syscall_arg arg = {
1302			.idx	= 0,
1303			.mask	= 0,
1304			.trace  = trace,
1305			.thread = thread,
1306		};
1307
1308		for (field = sc->args; field;
1309		     field = field->next, ++arg.idx, bit <<= 1) {
1310			if (arg.mask & bit)
1311				continue;
1312
1313			/* special care for unaligned accesses */
1314			p = args + sizeof(unsigned long) * arg.idx;
1315			memcpy(&val, p, sizeof(val));
 
 
 
 
1316
1317			/*
1318 			 * Suppress this argument if its value is zero and
1319 			 * and we don't have a string associated in an
1320 			 * strarray for it.
1321 			 */
1322			if (val == 0 &&
1323			    !(sc->arg_scnprintf &&
1324			      sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1325			      sc->arg_parm[arg.idx]))
 
 
 
1326				continue;
1327
1328			printed += scnprintf(bf + printed, size - printed,
1329					     "%s%s: ", printed ? ", " : "", field->name);
1330			if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1331				arg.val = val;
1332				if (sc->arg_parm)
1333					arg.parm = sc->arg_parm[arg.idx];
1334				printed += sc->arg_scnprintf[arg.idx](bf + printed,
1335								      size - printed, &arg);
1336			} else {
1337				printed += scnprintf(bf + printed, size - printed,
1338						     "%ld", val);
1339			}
1340		}
1341	} else if (IS_ERR(sc->tp_format)) {
1342		/*
1343		 * If we managed to read the tracepoint /format file, then we
1344		 * may end up not having any args, like with gettid(), so only
1345		 * print the raw args when we didn't manage to read it.
1346		 */
1347		int i = 0;
1348
1349		while (i < 6) {
1350			/* special care for unaligned accesses */
1351			p = args + sizeof(unsigned long) * i;
1352			memcpy(&val, p, sizeof(val));
1353			printed += scnprintf(bf + printed, size - printed,
1354					     "%sarg%d: %ld",
1355					     printed ? ", " : "", i, val);
1356			++i;
 
1357		}
1358	}
1359
1360	return printed;
1361}
1362
1363typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1364				  union perf_event *event,
1365				  struct perf_sample *sample);
1366
1367static struct syscall *trace__syscall_info(struct trace *trace,
1368					   struct perf_evsel *evsel, int id)
1369{
 
1370
1371	if (id < 0) {
1372
1373		/*
1374		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1375		 * before that, leaving at a higher verbosity level till that is
1376		 * explained. Reproduced with plain ftrace with:
1377		 *
1378		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1379		 * grep "NR -1 " /t/trace_pipe
1380		 *
1381		 * After generating some load on the machine.
1382 		 */
1383		if (verbose > 1) {
1384			static u64 n;
1385			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1386				id, perf_evsel__name(evsel), ++n);
1387		}
1388		return NULL;
1389	}
1390
1391	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1392	    trace__read_syscall_info(trace, id))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1393		goto out_cant_read;
1394
1395	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
 
 
1396		goto out_cant_read;
 
1397
1398	return &trace->syscalls.table[id];
1399
1400out_cant_read:
1401	if (verbose) {
1402		fprintf(trace->output, "Problems reading syscall %d", id);
1403		if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
 
1404			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1405		fputs(" information\n", trace->output);
1406	}
1407	return NULL;
1408}
1409
1410static void thread__update_stats(struct thread_trace *ttrace,
1411				 int id, struct perf_sample *sample)
 
 
 
 
 
 
 
1412{
1413	struct int_node *inode;
1414	struct stats *stats;
1415	u64 duration = 0;
1416
1417	inode = intlist__findnew(ttrace->syscall_stats, id);
1418	if (inode == NULL)
1419		return;
1420
1421	stats = inode->priv;
1422	if (stats == NULL) {
1423		stats = malloc(sizeof(struct stats));
1424		if (stats == NULL)
1425			return;
1426		init_stats(stats);
 
 
 
 
1427		inode->priv = stats;
1428	}
1429
1430	if (ttrace->entry_time && sample->time > ttrace->entry_time)
1431		duration = sample->time - ttrace->entry_time;
1432
1433	update_stats(stats, duration);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434}
1435
1436static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1437{
1438	struct thread_trace *ttrace;
1439	u64 duration;
1440	size_t printed;
 
1441
1442	if (trace->current == NULL)
1443		return 0;
1444
1445	ttrace = thread__priv(trace->current);
1446
1447	if (!ttrace->entry_pending)
1448		return 0;
1449
1450	duration = sample->time - ttrace->entry_time;
 
 
 
 
 
 
1451
1452	printed  = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
1453	printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1454	ttrace->entry_pending = false;
 
1455
1456	return printed;
1457}
1458
1459static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460			    union perf_event *event __maybe_unused,
1461			    struct perf_sample *sample)
1462{
1463	char *msg;
1464	void *args;
1465	size_t printed = 0;
1466	struct thread *thread;
1467	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
 
 
1468	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1469	struct thread_trace *ttrace;
1470
1471	if (sc == NULL)
1472		return -1;
1473
1474	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1475	ttrace = thread__trace(thread, trace->output);
1476	if (ttrace == NULL)
1477		goto out_put;
1478
 
 
1479	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1480
1481	if (ttrace->entry_str == NULL) {
1482		ttrace->entry_str = malloc(trace__entry_str_size);
1483		if (!ttrace->entry_str)
1484			goto out_put;
1485	}
1486
1487	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1488		trace__printf_interrupted_entry(trace, sample);
1489
 
 
 
 
 
 
 
 
 
 
 
1490	ttrace->entry_time = sample->time;
1491	msg = ttrace->entry_str;
1492	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1493
1494	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1495					   args, trace, thread);
1496
1497	if (sc->is_exit) {
1498		if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1499			trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
1500			fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
 
 
 
 
 
1501		}
1502	} else {
1503		ttrace->entry_pending = true;
1504		/* See trace__vfs_getname & trace__sys_exit */
1505		ttrace->filename.pending_open = false;
1506	}
1507
1508	if (trace->current != thread) {
1509		thread__put(trace->current);
1510		trace->current = thread__get(thread);
1511	}
1512	err = 0;
1513out_put:
1514	thread__put(thread);
1515	return err;
1516}
1517
1518static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1519				    struct perf_sample *sample,
1520				    struct callchain_cursor *cursor)
1521{
1522	struct addr_location al;
 
 
 
 
1523
1524	if (machine__resolve(trace->host, &al, sample) < 0 ||
1525	    thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
1526		return -1;
1527
1528	return 0;
 
 
1529}
1530
1531static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1532{
1533	/* TODO: user-configurable print_opts */
1534	const unsigned int print_opts = EVSEL__PRINT_SYM |
1535				        EVSEL__PRINT_DSO |
1536				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
1537
1538	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
 
 
 
 
 
 
 
 
1539}
1540
1541static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1542			   union perf_event *event __maybe_unused,
1543			   struct perf_sample *sample)
1544{
1545	long ret;
1546	u64 duration = 0;
 
1547	struct thread *thread;
1548	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
 
1549	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1550	struct thread_trace *ttrace;
1551
1552	if (sc == NULL)
1553		return -1;
1554
1555	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1556	ttrace = thread__trace(thread, trace->output);
1557	if (ttrace == NULL)
1558		goto out_put;
1559
1560	if (trace->summary)
1561		thread__update_stats(ttrace, id, sample);
1562
1563	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1564
1565	if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
 
 
 
1566		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1567		ttrace->filename.pending_open = false;
1568		++trace->stats.vfs_getname;
1569	}
1570
1571	if (ttrace->entry_time) {
1572		duration = sample->time - ttrace->entry_time;
1573		if (trace__filter_duration(trace, duration))
1574			goto out;
 
1575	} else if (trace->duration_filter)
1576		goto out;
1577
1578	if (sample->callchain) {
1579		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1580		if (callchain_ret == 0) {
1581			if (callchain_cursor.nr < trace->min_stack)
1582				goto out;
1583			callchain_ret = 1;
1584		}
1585	}
1586
1587	if (trace->summary_only)
1588		goto out;
1589
1590	trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
1591
1592	if (ttrace->entry_pending) {
1593		fprintf(trace->output, "%-70s", ttrace->entry_str);
1594	} else {
1595		fprintf(trace->output, " ... [");
1596		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1597		fprintf(trace->output, "]: %s()", sc->name);
 
1598	}
1599
 
 
 
 
 
 
 
 
 
1600	if (sc->fmt == NULL) {
 
 
1601signed_print:
1602		fprintf(trace->output, ") = %ld", ret);
1603	} else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
 
1604		char bf[STRERR_BUFSIZE];
1605		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
1606			   *e = audit_errno_to_name(-ret);
1607
1608		fprintf(trace->output, ") = -1 %s %s", e, emsg);
 
1609	} else if (ret == 0 && sc->fmt->timeout)
1610		fprintf(trace->output, ") = 0 Timeout");
1611	else if (sc->fmt->hexret)
1612		fprintf(trace->output, ") = %#lx", ret);
 
 
 
 
 
 
 
 
 
 
1613	else if (sc->fmt->errpid) {
1614		struct thread *child = machine__find_thread(trace->host, ret, ret);
1615
1616		if (child != NULL) {
1617			fprintf(trace->output, ") = %ld", ret);
1618			if (child->comm_set)
1619				fprintf(trace->output, " (%s)", thread__comm_str(child));
1620			thread__put(child);
1621		}
1622	} else
1623		goto signed_print;
1624
1625	fputc('\n', trace->output);
1626
 
 
 
 
 
 
 
1627	if (callchain_ret > 0)
1628		trace__fprintf_callchain(trace, sample);
1629	else if (callchain_ret < 0)
1630		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1631out:
1632	ttrace->entry_pending = false;
1633	err = 0;
1634out_put:
1635	thread__put(thread);
1636	return err;
1637}
1638
1639static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1640			      union perf_event *event __maybe_unused,
1641			      struct perf_sample *sample)
1642{
1643	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1644	struct thread_trace *ttrace;
1645	size_t filename_len, entry_str_len, to_move;
1646	ssize_t remaining_space;
1647	char *pos;
1648	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1649
1650	if (!thread)
1651		goto out;
1652
1653	ttrace = thread__priv(thread);
1654	if (!ttrace)
1655		goto out;
1656
1657	filename_len = strlen(filename);
 
 
1658
1659	if (ttrace->filename.namelen < filename_len) {
1660		char *f = realloc(ttrace->filename.name, filename_len + 1);
1661
1662		if (f == NULL)
1663				goto out;
1664
1665		ttrace->filename.namelen = filename_len;
1666		ttrace->filename.name = f;
1667	}
1668
1669	strcpy(ttrace->filename.name, filename);
1670	ttrace->filename.pending_open = true;
1671
1672	if (!ttrace->filename.ptr)
1673		goto out;
1674
1675	entry_str_len = strlen(ttrace->entry_str);
1676	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1677	if (remaining_space <= 0)
1678		goto out;
1679
1680	if (filename_len > (size_t)remaining_space) {
1681		filename += filename_len - remaining_space;
1682		filename_len = remaining_space;
1683	}
1684
1685	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1686	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1687	memmove(pos + filename_len, pos, to_move);
1688	memcpy(pos, filename, filename_len);
1689
1690	ttrace->filename.ptr = 0;
1691	ttrace->filename.entry_str_pos = 0;
 
 
1692out:
1693	return 0;
1694}
1695
1696static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1697				     union perf_event *event __maybe_unused,
1698				     struct perf_sample *sample)
1699{
1700        u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1701	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1702	struct thread *thread = machine__findnew_thread(trace->host,
1703							sample->pid,
1704							sample->tid);
1705	struct thread_trace *ttrace = thread__trace(thread, trace->output);
1706
1707	if (ttrace == NULL)
1708		goto out_dump;
1709
1710	ttrace->runtime_ms += runtime_ms;
1711	trace->runtime_ms += runtime_ms;
 
1712	thread__put(thread);
1713	return 0;
1714
1715out_dump:
1716	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1717	       evsel->name,
1718	       perf_evsel__strval(evsel, sample, "comm"),
1719	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1720	       runtime,
1721	       perf_evsel__intval(evsel, sample, "vruntime"));
1722	thread__put(thread);
1723	return 0;
1724}
1725
1726static void bpf_output__printer(enum binary_printer_ops op,
1727				unsigned int val, void *extra)
1728{
1729	FILE *output = extra;
1730	unsigned char ch = (unsigned char)val;
1731
1732	switch (op) {
1733	case BINARY_PRINT_CHAR_DATA:
1734		fprintf(output, "%c", isprint(ch) ? ch : '.');
1735		break;
1736	case BINARY_PRINT_DATA_BEGIN:
1737	case BINARY_PRINT_LINE_BEGIN:
1738	case BINARY_PRINT_ADDR:
1739	case BINARY_PRINT_NUM_DATA:
1740	case BINARY_PRINT_NUM_PAD:
1741	case BINARY_PRINT_SEP:
1742	case BINARY_PRINT_CHAR_PAD:
1743	case BINARY_PRINT_LINE_END:
1744	case BINARY_PRINT_DATA_END:
1745	default:
1746		break;
1747	}
 
 
1748}
1749
1750static void bpf_output__fprintf(struct trace *trace,
1751				struct perf_sample *sample)
1752{
1753	print_binary(sample->raw_data, sample->raw_size, 8,
1754		     bpf_output__printer, trace->output);
 
1755}
1756
1757static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1758				union perf_event *event __maybe_unused,
1759				struct perf_sample *sample)
1760{
 
1761	int callchain_ret = 0;
 
 
 
 
 
 
 
 
 
 
1762
1763	if (sample->callchain) {
1764		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1765		if (callchain_ret == 0) {
1766			if (callchain_cursor.nr < trace->min_stack)
1767				goto out;
1768			callchain_ret = 1;
1769		}
1770	}
1771
1772	trace__printf_interrupted_entry(trace, sample);
1773	trace__fprintf_tstamp(trace, sample->time, trace->output);
1774
1775	if (trace->trace_syscalls)
1776		fprintf(trace->output, "(         ): ");
1777
1778	fprintf(trace->output, "%s:", evsel->name);
 
1779
1780	if (perf_evsel__is_bpf_output(evsel)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1781		bpf_output__fprintf(trace, sample);
1782	} else if (evsel->tp_format) {
1783		event_format__fprintf(evsel->tp_format, sample->cpu,
1784				      sample->raw_data, sample->raw_size,
1785				      trace->output);
 
 
 
 
 
 
 
1786	}
1787
 
1788	fprintf(trace->output, ")\n");
1789
1790	if (callchain_ret > 0)
1791		trace__fprintf_callchain(trace, sample);
1792	else if (callchain_ret < 0)
1793		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
 
 
 
 
 
 
 
1794out:
 
1795	return 0;
1796}
1797
1798static void print_location(FILE *f, struct perf_sample *sample,
1799			   struct addr_location *al,
1800			   bool print_dso, bool print_sym)
1801{
1802
1803	if ((verbose || print_dso) && al->map)
1804		fprintf(f, "%s@", al->map->dso->long_name);
1805
1806	if ((verbose || print_sym) && al->sym)
1807		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1808			al->addr - al->sym->start);
1809	else if (al->map)
1810		fprintf(f, "0x%" PRIx64, al->addr);
1811	else
1812		fprintf(f, "0x%" PRIx64, sample->addr);
1813}
1814
1815static int trace__pgfault(struct trace *trace,
1816			  struct perf_evsel *evsel,
1817			  union perf_event *event __maybe_unused,
1818			  struct perf_sample *sample)
1819{
1820	struct thread *thread;
1821	struct addr_location al;
1822	char map_type = 'd';
1823	struct thread_trace *ttrace;
1824	int err = -1;
1825	int callchain_ret = 0;
1826
1827	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1828
1829	if (sample->callchain) {
1830		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1831		if (callchain_ret == 0) {
1832			if (callchain_cursor.nr < trace->min_stack)
1833				goto out_put;
1834			callchain_ret = 1;
1835		}
1836	}
1837
1838	ttrace = thread__trace(thread, trace->output);
1839	if (ttrace == NULL)
1840		goto out_put;
1841
1842	if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
1843		ttrace->pfmaj++;
1844	else
1845		ttrace->pfmin++;
1846
1847	if (trace->summary_only)
1848		goto out;
1849
1850	thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
1851			      sample->ip, &al);
1852
1853	trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
1854
1855	fprintf(trace->output, "%sfault [",
1856		evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
1857		"maj" : "min");
1858
1859	print_location(trace->output, sample, &al, false, true);
1860
1861	fprintf(trace->output, "] => ");
1862
1863	thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
1864				   sample->addr, &al);
1865
1866	if (!al.map) {
1867		thread__find_addr_location(thread, sample->cpumode,
1868					   MAP__FUNCTION, sample->addr, &al);
1869
1870		if (al.map)
1871			map_type = 'x';
1872		else
1873			map_type = '?';
1874	}
1875
1876	print_location(trace->output, sample, &al, true, false);
1877
1878	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
1879
1880	if (callchain_ret > 0)
1881		trace__fprintf_callchain(trace, sample);
1882	else if (callchain_ret < 0)
1883		pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
 
 
1884out:
1885	err = 0;
1886out_put:
1887	thread__put(thread);
1888	return err;
1889}
1890
1891static void trace__set_base_time(struct trace *trace,
1892				 struct perf_evsel *evsel,
1893				 struct perf_sample *sample)
1894{
1895	/*
1896	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1897	 * and don't use sample->time unconditionally, we may end up having
1898	 * some other event in the future without PERF_SAMPLE_TIME for good
1899	 * reason, i.e. we may not be interested in its timestamps, just in
1900	 * it taking place, picking some piece of information when it
1901	 * appears in our event stream (vfs_getname comes to mind).
1902	 */
1903	if (trace->base_time == 0 && !trace->full_time &&
1904	    (evsel->attr.sample_type & PERF_SAMPLE_TIME))
1905		trace->base_time = sample->time;
1906}
1907
1908static int trace__process_sample(struct perf_tool *tool,
1909				 union perf_event *event,
1910				 struct perf_sample *sample,
1911				 struct perf_evsel *evsel,
1912				 struct machine *machine __maybe_unused)
1913{
1914	struct trace *trace = container_of(tool, struct trace, tool);
1915	struct thread *thread;
1916	int err = 0;
1917
1918	tracepoint_handler handler = evsel->handler;
1919
1920	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1921	if (thread && thread__is_filtered(thread))
1922		return 0;
1923
1924	trace__set_base_time(trace, evsel, sample);
1925
1926	if (handler) {
1927		++trace->nr_events;
1928		handler(trace, evsel, event, sample);
1929	}
1930
 
1931	return err;
1932}
1933
1934static int trace__record(struct trace *trace, int argc, const char **argv)
1935{
1936	unsigned int rec_argc, i, j;
1937	const char **rec_argv;
1938	const char * const record_args[] = {
1939		"record",
1940		"-R",
1941		"-m", "1024",
1942		"-c", "1",
1943	};
1944
 
1945	const char * const sc_args[] = { "-e", };
1946	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
1947	const char * const majpf_args[] = { "-e", "major-faults" };
1948	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
1949	const char * const minpf_args[] = { "-e", "minor-faults" };
1950	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
 
1951
1952	/* +1 is for the event string below */
1953	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
1954		majpf_args_nr + minpf_args_nr + argc;
1955	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1956
1957	if (rec_argv == NULL)
1958		return -ENOMEM;
1959
1960	j = 0;
1961	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1962		rec_argv[j++] = record_args[i];
1963
1964	if (trace->trace_syscalls) {
1965		for (i = 0; i < sc_args_nr; i++)
1966			rec_argv[j++] = sc_args[i];
1967
1968		/* event string may be different for older kernels - e.g., RHEL6 */
1969		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
1970			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
1971		else if (is_valid_tracepoint("syscalls:sys_enter"))
1972			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
1973		else {
1974			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
1975			return -1;
1976		}
1977	}
1978
 
 
 
1979	if (trace->trace_pgfaults & TRACE_PFMAJ)
1980		for (i = 0; i < majpf_args_nr; i++)
1981			rec_argv[j++] = majpf_args[i];
1982
1983	if (trace->trace_pgfaults & TRACE_PFMIN)
1984		for (i = 0; i < minpf_args_nr; i++)
1985			rec_argv[j++] = minpf_args[i];
1986
1987	for (i = 0; i < (unsigned int)argc; i++)
1988		rec_argv[j++] = argv[i];
1989
1990	return cmd_record(j, rec_argv, NULL);
 
 
 
 
1991}
1992
1993static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
1994
1995static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
1996{
1997	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
 
 
 
1998
1999	if (IS_ERR(evsel))
 
 
 
 
 
 
2000		return false;
 
2001
2002	if (perf_evsel__field(evsel, "pathname") == NULL) {
2003		perf_evsel__delete(evsel);
2004		return false;
 
 
 
 
 
 
 
 
 
 
2005	}
2006
2007	evsel->handler = trace__vfs_getname;
2008	perf_evlist__add(evlist, evsel);
2009	return true;
2010}
2011
2012static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2013{
2014	struct perf_evsel *evsel;
2015	struct perf_event_attr attr = {
2016		.type = PERF_TYPE_SOFTWARE,
2017		.mmap_data = 1,
2018	};
2019
2020	attr.config = config;
2021	attr.sample_period = 1;
2022
2023	event_attr_init(&attr);
2024
2025	evsel = perf_evsel__new(&attr);
2026	if (evsel)
2027		evsel->handler = trace__pgfault;
2028
2029	return evsel;
2030}
2031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2032static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2033{
2034	const u32 type = event->header.type;
2035	struct perf_evsel *evsel;
2036
2037	if (type != PERF_RECORD_SAMPLE) {
2038		trace__process_event(trace, trace->host, event, sample);
2039		return;
2040	}
2041
2042	evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2043	if (evsel == NULL) {
2044		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2045		return;
2046	}
2047
 
 
 
2048	trace__set_base_time(trace, evsel, sample);
2049
2050	if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2051	    sample->raw_data == NULL) {
2052		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2053		       perf_evsel__name(evsel), sample->tid,
2054		       sample->cpu, sample->raw_size);
2055	} else {
2056		tracepoint_handler handler = evsel->handler;
2057		handler(trace, evsel, event, sample);
2058	}
 
 
 
2059}
2060
2061static int trace__add_syscall_newtp(struct trace *trace)
2062{
2063	int ret = -1;
2064	struct perf_evlist *evlist = trace->evlist;
2065	struct perf_evsel *sys_enter, *sys_exit;
2066
2067	sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2068	if (sys_enter == NULL)
2069		goto out;
2070
2071	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2072		goto out_delete_sys_enter;
2073
2074	sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2075	if (sys_exit == NULL)
2076		goto out_delete_sys_enter;
2077
2078	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2079		goto out_delete_sys_exit;
2080
2081	perf_evlist__add(evlist, sys_enter);
2082	perf_evlist__add(evlist, sys_exit);
 
 
 
2083
2084	if (callchain_param.enabled && !trace->kernel_syscallchains) {
2085		/*
2086		 * We're interested only in the user space callchain
2087		 * leading to the syscall, allow overriding that for
2088		 * debugging reasons using --kernel_syscall_callchains
2089		 */
2090		sys_exit->attr.exclude_callchain_kernel = 1;
2091	}
2092
2093	trace->syscalls.events.sys_enter = sys_enter;
2094	trace->syscalls.events.sys_exit  = sys_exit;
2095
2096	ret = 0;
2097out:
2098	return ret;
2099
2100out_delete_sys_exit:
2101	perf_evsel__delete_priv(sys_exit);
2102out_delete_sys_enter:
2103	perf_evsel__delete_priv(sys_enter);
2104	goto out;
2105}
2106
2107static int trace__set_ev_qualifier_filter(struct trace *trace)
2108{
2109	int err = -1;
2110	struct perf_evsel *sys_exit;
2111	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2112						trace->ev_qualifier_ids.nr,
2113						trace->ev_qualifier_ids.entries);
2114
2115	if (filter == NULL)
2116		goto out_enomem;
2117
2118	if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2119					  filter)) {
2120		sys_exit = trace->syscalls.events.sys_exit;
2121		err = perf_evsel__append_tp_filter(sys_exit, filter);
2122	}
2123
2124	free(filter);
2125out:
2126	return err;
2127out_enomem:
2128	errno = ENOMEM;
2129	goto out;
2130}
2131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132static int trace__run(struct trace *trace, int argc, const char **argv)
2133{
2134	struct perf_evlist *evlist = trace->evlist;
2135	struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2136	int err = -1, i;
2137	unsigned long before;
2138	const bool forks = argc > 0;
2139	bool draining = false;
2140
2141	trace->live = true;
2142
2143	if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2144		goto out_error_raw_syscalls;
 
2145
2146	if (trace->trace_syscalls)
2147		trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
 
2148
2149	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2150		pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2151		if (pgfault_maj == NULL)
2152			goto out_error_mem;
2153		perf_evlist__add(evlist, pgfault_maj);
 
2154	}
2155
2156	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2157		pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2158		if (pgfault_min == NULL)
2159			goto out_error_mem;
2160		perf_evlist__add(evlist, pgfault_min);
 
2161	}
2162
2163	if (trace->sched &&
2164	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2165				   trace__sched_stat_runtime))
2166		goto out_error_sched_stat_runtime;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167
2168	err = perf_evlist__create_maps(evlist, &trace->opts.target);
2169	if (err < 0) {
2170		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2171		goto out_delete_evlist;
2172	}
2173
2174	err = trace__symbols_init(trace, evlist);
2175	if (err < 0) {
2176		fprintf(trace->output, "Problems initializing symbol libraries!\n");
2177		goto out_delete_evlist;
2178	}
2179
2180	perf_evlist__config(evlist, &trace->opts, NULL);
2181
2182	if (callchain_param.enabled) {
2183		bool use_identifier = false;
2184
2185		if (trace->syscalls.events.sys_exit) {
2186			perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
2187						     &trace->opts, &callchain_param);
2188			use_identifier = true;
2189		}
2190
2191		if (pgfault_maj) {
2192			perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2193			use_identifier = true;
2194		}
2195
2196		if (pgfault_min) {
2197			perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2198			use_identifier = true;
2199		}
2200
2201		if (use_identifier) {
2202		       /*
2203			* Now we have evsels with different sample_ids, use
2204			* PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2205			* from a fixed position in each ring buffer record.
2206			*
2207			* As of this the changeset introducing this comment, this
2208			* isn't strictly needed, as the fields that can come before
2209			* PERF_SAMPLE_ID are all used, but we'll probably disable
2210			* some of those for things like copying the payload of
2211			* pointer syscall arguments, and for vfs_getname we don't
2212			* need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2213			* here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2214			*/
2215			perf_evlist__set_sample_bit(evlist, IDENTIFIER);
2216			perf_evlist__reset_sample_bit(evlist, ID);
2217		}
2218	}
2219
2220	signal(SIGCHLD, sig_handler);
2221	signal(SIGINT, sig_handler);
2222
2223	if (forks) {
2224		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2225						    argv, false, NULL);
2226		if (err < 0) {
2227			fprintf(trace->output, "Couldn't run the workload!\n");
2228			goto out_delete_evlist;
2229		}
2230	}
2231
2232	err = perf_evlist__open(evlist);
2233	if (err < 0)
2234		goto out_error_open;
2235
2236	err = bpf__apply_obj_config();
2237	if (err) {
2238		char errbuf[BUFSIZ];
2239
2240		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2241		pr_err("ERROR: Apply config to BPF failed: %s\n",
2242			 errbuf);
2243		goto out_error_open;
2244	}
2245
2246	/*
2247	 * Better not use !target__has_task() here because we need to cover the
2248	 * case where no threads were specified in the command line, but a
2249	 * workload was, and in that case we will fill in the thread_map when
2250	 * we fork the workload in perf_evlist__prepare_workload.
2251	 */
2252	if (trace->filter_pids.nr > 0)
2253		err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2254	else if (thread_map__pid(evlist->threads, 0) == -1)
2255		err = perf_evlist__set_filter_pid(evlist, getpid());
2256
2257	if (err < 0)
2258		goto out_error_mem;
2259
 
 
 
 
 
 
2260	if (trace->ev_qualifier_ids.nr > 0) {
2261		err = trace__set_ev_qualifier_filter(trace);
2262		if (err < 0)
2263			goto out_errno;
2264
2265		pr_debug("event qualifier tracepoint filter: %s\n",
2266			 trace->syscalls.events.sys_exit->filter);
 
 
2267	}
2268
2269	err = perf_evlist__apply_filters(evlist, &evsel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270	if (err < 0)
2271		goto out_error_apply_filters;
2272
2273	err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
 
 
 
2274	if (err < 0)
2275		goto out_error_mmap;
2276
2277	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2278		perf_evlist__enable(evlist);
2279
2280	if (forks)
2281		perf_evlist__start_workload(evlist);
2282
2283	if (trace->opts.initial_delay) {
2284		usleep(trace->opts.initial_delay * 1000);
2285		perf_evlist__enable(evlist);
2286	}
2287
2288	trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2289				  evlist->threads->nr > 1 ||
2290				  perf_evlist__first(evlist)->attr.inherit;
 
 
 
 
 
 
 
 
 
 
 
 
2291again:
2292	before = trace->nr_events;
2293
2294	for (i = 0; i < evlist->nr_mmaps; i++) {
2295		union perf_event *event;
 
2296
2297		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2298			struct perf_sample sample;
 
2299
 
2300			++trace->nr_events;
2301
2302			err = perf_evlist__parse_sample(evlist, event, &sample);
2303			if (err) {
2304				fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2305				goto next_event;
2306			}
2307
2308			trace__handle_event(trace, event, &sample);
2309next_event:
2310			perf_evlist__mmap_consume(evlist, i);
2311
2312			if (interrupted)
2313				goto out_disable;
2314
2315			if (done && !draining) {
2316				perf_evlist__disable(evlist);
2317				draining = true;
2318			}
2319		}
 
2320	}
2321
2322	if (trace->nr_events == before) {
2323		int timeout = done ? 100 : -1;
2324
2325		if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2326			if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2327				draining = true;
2328
2329			goto again;
 
 
 
2330		}
2331	} else {
2332		goto again;
2333	}
2334
2335out_disable:
2336	thread__zput(trace->current);
2337
2338	perf_evlist__disable(evlist);
 
 
 
2339
2340	if (!err) {
2341		if (trace->summary)
2342			trace__fprintf_thread_summary(trace, trace->output);
2343
2344		if (trace->show_tool_stats) {
2345			fprintf(trace->output, "Stats:\n "
2346					       " vfs_getname : %" PRIu64 "\n"
2347					       " proc_getname: %" PRIu64 "\n",
2348				trace->stats.vfs_getname,
2349				trace->stats.proc_getname);
2350		}
2351	}
2352
2353out_delete_evlist:
2354	perf_evlist__delete(evlist);
 
 
 
2355	trace->evlist = NULL;
2356	trace->live = false;
2357	return err;
2358{
2359	char errbuf[BUFSIZ];
2360
2361out_error_sched_stat_runtime:
2362	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2363	goto out_error;
2364
2365out_error_raw_syscalls:
2366	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2367	goto out_error;
2368
2369out_error_mmap:
2370	perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2371	goto out_error;
2372
2373out_error_open:
2374	perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2375
2376out_error:
2377	fprintf(trace->output, "%s\n", errbuf);
2378	goto out_delete_evlist;
2379
2380out_error_apply_filters:
2381	fprintf(trace->output,
2382		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
2383		evsel->filter, perf_evsel__name(evsel), errno,
2384		str_error_r(errno, errbuf, sizeof(errbuf)));
2385	goto out_delete_evlist;
2386}
2387out_error_mem:
2388	fprintf(trace->output, "Not enough memory to run!\n");
2389	goto out_delete_evlist;
2390
2391out_errno:
2392	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2393	goto out_delete_evlist;
2394}
2395
2396static int trace__replay(struct trace *trace)
2397{
2398	const struct perf_evsel_str_handler handlers[] = {
2399		{ "probe:vfs_getname",	     trace__vfs_getname, },
2400	};
2401	struct perf_data_file file = {
2402		.path  = input_name,
2403		.mode  = PERF_DATA_MODE_READ,
2404		.force = trace->force,
2405	};
2406	struct perf_session *session;
2407	struct perf_evsel *evsel;
2408	int err = -1;
2409
2410	trace->tool.sample	  = trace__process_sample;
2411	trace->tool.mmap	  = perf_event__process_mmap;
2412	trace->tool.mmap2	  = perf_event__process_mmap2;
2413	trace->tool.comm	  = perf_event__process_comm;
2414	trace->tool.exit	  = perf_event__process_exit;
2415	trace->tool.fork	  = perf_event__process_fork;
2416	trace->tool.attr	  = perf_event__process_attr;
2417	trace->tool.tracing_data = perf_event__process_tracing_data;
2418	trace->tool.build_id	  = perf_event__process_build_id;
 
2419
2420	trace->tool.ordered_events = true;
2421	trace->tool.ordering_requires_timestamps = true;
2422
2423	/* add tid to output */
2424	trace->multiple_threads = true;
2425
2426	session = perf_session__new(&file, false, &trace->tool);
2427	if (session == NULL)
2428		return -1;
2429
2430	if (trace->opts.target.pid)
2431		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
2432
2433	if (trace->opts.target.tid)
2434		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
2435
2436	if (symbol__init(&session->header.env) < 0)
2437		goto out;
2438
2439	trace->host = &session->machines.host;
2440
2441	err = perf_session__set_tracepoints_handlers(session, handlers);
2442	if (err)
2443		goto out;
2444
2445	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2446						     "raw_syscalls:sys_enter");
2447	/* older kernels have syscalls tp versus raw_syscalls */
2448	if (evsel == NULL)
2449		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2450							     "syscalls:sys_enter");
2451
2452	if (evsel &&
2453	    (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2454	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2455		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2456		goto out;
2457	}
2458
2459	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2460						     "raw_syscalls:sys_exit");
2461	if (evsel == NULL)
2462		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2463							     "syscalls:sys_exit");
2464	if (evsel &&
2465	    (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2466	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2467		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2468		goto out;
2469	}
2470
2471	evlist__for_each_entry(session->evlist, evsel) {
2472		if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2473		    (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2474		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2475		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2476			evsel->handler = trace__pgfault;
2477	}
2478
2479	setup_pager();
2480
2481	err = perf_session__process_events(session);
2482	if (err)
2483		pr_err("Failed to process events, error %d", err);
2484
2485	else if (trace->summary)
2486		trace__fprintf_thread_summary(trace, trace->output);
2487
2488out:
2489	perf_session__delete(session);
2490
2491	return err;
2492}
2493
2494static size_t trace__fprintf_threads_header(FILE *fp)
2495{
2496	size_t printed;
2497
2498	printed  = fprintf(fp, "\n Summary of events:\n\n");
2499
2500	return printed;
2501}
2502
2503DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2504	struct stats 	*stats;
2505	double		msecs;
2506	int		syscall;
2507)
2508{
2509	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2510	struct stats *stats = source->priv;
2511
2512	entry->syscall = source->i;
2513	entry->stats   = stats;
2514	entry->msecs   = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2515}
2516
2517static size_t thread__dump_stats(struct thread_trace *ttrace,
2518				 struct trace *trace, FILE *fp)
2519{
2520	size_t printed = 0;
2521	struct syscall *sc;
2522	struct rb_node *nd;
2523	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2524
2525	if (syscall_stats == NULL)
2526		return 0;
2527
2528	printed += fprintf(fp, "\n");
2529
2530	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
2531	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
2532	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
2533
2534	resort_rb__for_each_entry(nd, syscall_stats) {
2535		struct stats *stats = syscall_stats_entry->stats;
2536		if (stats) {
2537			double min = (double)(stats->min) / NSEC_PER_MSEC;
2538			double max = (double)(stats->max) / NSEC_PER_MSEC;
2539			double avg = avg_stats(stats);
2540			double pct;
2541			u64 n = (u64) stats->n;
2542
2543			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2544			avg /= NSEC_PER_MSEC;
2545
2546			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2547			printed += fprintf(fp, "   %-15s", sc->name);
2548			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2549					   n, syscall_stats_entry->msecs, min, avg);
2550			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 
 
 
 
 
 
 
 
 
 
2551		}
2552	}
2553
2554	resort_rb__delete(syscall_stats);
2555	printed += fprintf(fp, "\n\n");
2556
2557	return printed;
2558}
2559
2560static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2561{
2562	size_t printed = 0;
2563	struct thread_trace *ttrace = thread__priv(thread);
2564	double ratio;
2565
2566	if (ttrace == NULL)
2567		return 0;
2568
2569	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2570
2571	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2572	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2573	printed += fprintf(fp, "%.1f%%", ratio);
2574	if (ttrace->pfmaj)
2575		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2576	if (ttrace->pfmin)
2577		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2578	if (trace->sched)
2579		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2580	else if (fputc('\n', fp) != EOF)
2581		++printed;
2582
2583	printed += thread__dump_stats(ttrace, trace, fp);
2584
2585	return printed;
2586}
2587
2588static unsigned long thread__nr_events(struct thread_trace *ttrace)
2589{
2590	return ttrace ? ttrace->nr_events : 0;
2591}
2592
2593DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2594	struct thread *thread;
2595)
2596{
2597	entry->thread = rb_entry(nd, struct thread, rb_node);
2598}
2599
2600static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2601{
2602	DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2603	size_t printed = trace__fprintf_threads_header(fp);
2604	struct rb_node *nd;
 
2605
2606	if (threads == NULL) {
2607		fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2608		return 0;
2609	}
2610
2611	resort_rb__for_each_entry(nd, threads)
2612		printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
 
 
2613
2614	resort_rb__delete(threads);
 
2615
 
 
2616	return printed;
2617}
2618
2619static int trace__set_duration(const struct option *opt, const char *str,
2620			       int unset __maybe_unused)
2621{
2622	struct trace *trace = opt->value;
2623
2624	trace->duration_filter = atof(str);
2625	return 0;
2626}
2627
2628static int trace__set_filter_pids(const struct option *opt, const char *str,
2629				  int unset __maybe_unused)
2630{
2631	int ret = -1;
2632	size_t i;
2633	struct trace *trace = opt->value;
2634	/*
2635	 * FIXME: introduce a intarray class, plain parse csv and create a
2636	 * { int nr, int entries[] } struct...
2637	 */
2638	struct intlist *list = intlist__new(str);
2639
2640	if (list == NULL)
2641		return -1;
2642
2643	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2644	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2645
2646	if (trace->filter_pids.entries == NULL)
2647		goto out;
2648
2649	trace->filter_pids.entries[0] = getpid();
2650
2651	for (i = 1; i < trace->filter_pids.nr; ++i)
2652		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2653
2654	intlist__delete(list);
2655	ret = 0;
2656out:
2657	return ret;
2658}
2659
2660static int trace__open_output(struct trace *trace, const char *filename)
2661{
2662	struct stat st;
2663
2664	if (!stat(filename, &st) && st.st_size) {
2665		char oldname[PATH_MAX];
2666
2667		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2668		unlink(oldname);
2669		rename(filename, oldname);
2670	}
2671
2672	trace->output = fopen(filename, "w");
2673
2674	return trace->output == NULL ? -errno : 0;
2675}
2676
2677static int parse_pagefaults(const struct option *opt, const char *str,
2678			    int unset __maybe_unused)
2679{
2680	int *trace_pgfaults = opt->value;
2681
2682	if (strcmp(str, "all") == 0)
2683		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2684	else if (strcmp(str, "maj") == 0)
2685		*trace_pgfaults |= TRACE_PFMAJ;
2686	else if (strcmp(str, "min") == 0)
2687		*trace_pgfaults |= TRACE_PFMIN;
2688	else
2689		return -1;
2690
2691	return 0;
2692}
2693
2694static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2695{
2696	struct perf_evsel *evsel;
2697
2698	evlist__for_each_entry(evlist, evsel)
2699		evsel->handler = handler;
 
 
2700}
2701
2702int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2703{
2704	const char *trace_usage[] = {
2705		"perf trace [<options>] [<command>]",
2706		"perf trace [<options>] -- <command> [<options>]",
2707		"perf trace record [<options>] [<command>]",
2708		"perf trace record [<options>] -- <command> [<options>]",
2709		NULL
2710	};
2711	struct trace trace = {
2712		.syscalls = {
2713			. max = -1,
2714		},
2715		.opts = {
2716			.target = {
2717				.uid	   = UINT_MAX,
2718				.uses_mmap = true,
2719			},
2720			.user_freq     = UINT_MAX,
2721			.user_interval = ULLONG_MAX,
2722			.no_buffering  = true,
2723			.mmap_pages    = UINT_MAX,
2724			.proc_map_timeout  = 500,
2725		},
2726		.output = stderr,
2727		.show_comm = true,
2728		.trace_syscalls = true,
 
 
 
 
2729		.kernel_syscallchains = false,
2730		.max_stack = UINT_MAX,
 
2731	};
 
2732	const char *output_name = NULL;
2733	const char *ev_qualifier_str = NULL;
2734	const struct option trace_options[] = {
2735	OPT_CALLBACK(0, "event", &trace.evlist, "event",
2736		     "event selector. use 'perf list' to list available events",
2737		     parse_events_option),
 
 
2738	OPT_BOOLEAN(0, "comm", &trace.show_comm,
2739		    "show the thread COMM next to its id"),
2740	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
2741	OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
 
2742	OPT_STRING('o', "output", &output_name, "file", "output file name"),
2743	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
2744	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
2745		    "trace events on existing process id"),
2746	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
2747		    "trace events on existing thread id"),
2748	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
2749		     "pids to filter (by the kernel)", trace__set_filter_pids),
2750	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
2751		    "system-wide collection from all CPUs"),
2752	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
2753		    "list of cpus to monitor"),
2754	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
2755		    "child tasks do not inherit counters"),
2756	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
2757		     "number of mmap data pages",
2758		     perf_evlist__parse_mmap_pages),
2759	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
2760		   "user to profile"),
2761	OPT_CALLBACK(0, "duration", &trace, "float",
2762		     "show only events with duration > N.M ms",
2763		     trace__set_duration),
 
 
 
2764	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
2765	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
2766	OPT_BOOLEAN('T', "time", &trace.full_time,
2767		    "Show full timestamp, not time relative to first start"),
 
 
2768	OPT_BOOLEAN('s', "summary", &trace.summary_only,
2769		    "Show only syscall summary with statistics"),
2770	OPT_BOOLEAN('S', "with-summary", &trace.summary,
2771		    "Show all syscalls and summary with statistics"),
 
 
2772	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
2773		     "Trace pagefaults", parse_pagefaults, "maj"),
2774	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
2775	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
2776	OPT_CALLBACK(0, "call-graph", &trace.opts,
2777		     "record_mode[,record_size]", record_callchain_help,
2778		     &record_parse_callchain_opt),
 
 
2779	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
2780		    "Show the kernel callchains on the syscall exit path"),
 
 
2781	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
2782		     "Set the minimum stack depth when parsing the callchain, "
2783		     "anything below the specified depth will be ignored."),
2784	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
2785		     "Set the maximum stack depth when parsing the callchain, "
2786		     "anything beyond the specified depth will be ignored. "
2787		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
2788	OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
 
 
 
 
2789			"per thread proc mmap processing timeout in ms"),
2790	OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
 
 
2791		     "ms to wait before starting measurement after program "
2792		     "start"),
 
2793	OPT_END()
2794	};
2795	bool __maybe_unused max_stack_user_set = true;
2796	bool mmap_pages_user_set = true;
 
2797	const char * const trace_subcommands[] = { "record", NULL };
2798	int err;
2799	char bf[BUFSIZ];
2800
2801	signal(SIGSEGV, sighandler_dump_stack);
2802	signal(SIGFPE, sighandler_dump_stack);
 
 
2803
2804	trace.evlist = perf_evlist__new();
2805	trace.sctbl = syscalltbl__new();
2806
2807	if (trace.evlist == NULL || trace.sctbl == NULL) {
2808		pr_err("Not enough memory to run!\n");
2809		err = -ENOMEM;
2810		goto out;
2811	}
2812
 
 
 
 
 
 
 
 
 
 
 
 
 
2813	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
2814				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2815
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2816	err = bpf__setup_stdout(trace.evlist);
2817	if (err) {
2818		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
2819		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
2820		goto out;
2821	}
2822
2823	err = -1;
2824
 
 
 
 
 
 
 
 
2825	if (trace.trace_pgfaults) {
2826		trace.opts.sample_address = true;
2827		trace.opts.sample_time = true;
2828	}
2829
2830	if (trace.opts.mmap_pages == UINT_MAX)
2831		mmap_pages_user_set = false;
2832
2833	if (trace.max_stack == UINT_MAX) {
2834		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
2835		max_stack_user_set = false;
2836	}
2837
2838#ifdef HAVE_DWARF_UNWIND_SUPPORT
2839	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
2840		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
 
2841#endif
2842
2843	if (callchain_param.enabled) {
2844		if (!mmap_pages_user_set && geteuid() == 0)
2845			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
2846
2847		symbol_conf.use_callchain = true;
2848	}
2849
2850	if (trace.evlist->nr_entries > 0)
2851		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2852
2853	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
2854		return trace__record(&trace, argc-1, &argv[1]);
2855
 
 
 
 
2856	/* summary_only implies summary option, but don't overwrite summary if set */
2857	if (trace.summary_only)
2858		trace.summary = trace.summary_only;
2859
2860	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
2861	    trace.evlist->nr_entries == 0 /* Was --events used? */) {
2862		pr_err("Please specify something to trace.\n");
2863		return -1;
2864	}
2865
2866	if (!trace.trace_syscalls && ev_qualifier_str) {
2867		pr_err("The -e option can't be used with --no-syscalls.\n");
2868		goto out;
2869	}
2870
2871	if (output_name != NULL) {
2872		err = trace__open_output(&trace, output_name);
2873		if (err < 0) {
2874			perror("failed to create output file");
2875			goto out;
2876		}
2877	}
2878
2879	trace.open_id = syscalltbl__id(trace.sctbl, "open");
2880
2881	if (ev_qualifier_str != NULL) {
2882		const char *s = ev_qualifier_str;
2883		struct strlist_config slist_config = {
2884			.dirname = system_path(STRACE_GROUPS_DIR),
2885		};
2886
2887		trace.not_ev_qualifier = *s == '!';
2888		if (trace.not_ev_qualifier)
2889			++s;
2890		trace.ev_qualifier = strlist__new(s, &slist_config);
2891		if (trace.ev_qualifier == NULL) {
2892			fputs("Not enough memory to parse event qualifier",
2893			      trace.output);
2894			err = -ENOMEM;
2895			goto out_close;
2896		}
2897
2898		err = trace__validate_ev_qualifier(&trace);
2899		if (err)
2900			goto out_close;
2901	}
2902
2903	err = target__validate(&trace.opts.target);
2904	if (err) {
2905		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2906		fprintf(trace.output, "%s", bf);
2907		goto out_close;
2908	}
2909
2910	err = target__parse_uid(&trace.opts.target);
2911	if (err) {
2912		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2913		fprintf(trace.output, "%s", bf);
2914		goto out_close;
2915	}
2916
2917	if (!argc && target__none(&trace.opts.target))
2918		trace.opts.target.system_wide = true;
2919
2920	if (input_name)
2921		err = trace__replay(&trace);
2922	else
2923		err = trace__run(&trace, argc, argv);
2924
2925out_close:
2926	if (output_name != NULL)
2927		fclose(trace.output);
2928out:
 
2929	return err;
2930}
v5.14.15
   1/*
   2 * builtin-trace.c
   3 *
   4 * Builtin 'trace' command:
   5 *
   6 * Display a continuously updated trace of any workload, CPU, specific PID,
   7 * system wide, etc.  Default format is loosely strace like, but any other
   8 * event may be specified using --event.
   9 *
  10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  11 *
  12 * Initially based on the 'trace' prototype by Thomas Gleixner:
  13 *
  14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
 
 
  15 */
  16
  17#include "util/record.h"
  18#include <traceevent/event-parse.h>
  19#include <api/fs/tracing_path.h>
  20#include <bpf/bpf.h>
  21#include "util/bpf_map.h"
  22#include "util/rlimit.h"
  23#include "builtin.h"
  24#include "util/cgroup.h"
  25#include "util/color.h"
  26#include "util/config.h"
  27#include "util/debug.h"
  28#include "util/dso.h"
  29#include "util/env.h"
  30#include "util/event.h"
  31#include "util/evsel.h"
  32#include "util/evsel_fprintf.h"
  33#include "util/synthetic-events.h"
  34#include "util/evlist.h"
  35#include "util/evswitch.h"
  36#include "util/mmap.h"
  37#include <subcmd/pager.h>
  38#include <subcmd/exec-cmd.h>
  39#include "util/machine.h"
  40#include "util/map.h"
  41#include "util/symbol.h"
  42#include "util/path.h"
  43#include "util/session.h"
  44#include "util/thread.h"
  45#include <subcmd/parse-options.h>
  46#include "util/strlist.h"
  47#include "util/intlist.h"
  48#include "util/thread_map.h"
  49#include "util/stat.h"
  50#include "util/tool.h"
  51#include "util/util.h"
  52#include "trace/beauty/beauty.h"
  53#include "trace-event.h"
  54#include "util/parse-events.h"
  55#include "util/bpf-loader.h"
  56#include "callchain.h"
  57#include "print_binary.h"
  58#include "string2.h"
  59#include "syscalltbl.h"
  60#include "rb_resort.h"
  61#include "../perf.h"
  62
  63#include <errno.h>
  64#include <inttypes.h>
  65#include <poll.h>
  66#include <signal.h>
  67#include <stdlib.h>
  68#include <string.h>
  69#include <linux/err.h>
  70#include <linux/filter.h>
  71#include <linux/kernel.h>
  72#include <linux/random.h>
  73#include <linux/stringify.h>
  74#include <linux/time64.h>
  75#include <linux/zalloc.h>
  76#include <fcntl.h>
  77#include <sys/sysmacros.h>
  78
  79#include <linux/ctype.h>
  80#include <perf/mmap.h>
  81
  82#ifndef O_CLOEXEC
  83# define O_CLOEXEC		02000000
  84#endif
  85
  86#ifndef F_LINUX_SPECIFIC_BASE
  87# define F_LINUX_SPECIFIC_BASE	1024
  88#endif
  89
  90/*
  91 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
  92 */
  93struct syscall_arg_fmt {
  94	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
  95	bool	   (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
  96	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
  97	void	   *parm;
  98	const char *name;
  99	u16	   nr_entries; // for arrays
 100	bool	   show_zero;
 101};
 102
 103struct syscall_fmt {
 104	const char *name;
 105	const char *alias;
 106	struct {
 107		const char *sys_enter,
 108			   *sys_exit;
 109	}	   bpf_prog_name;
 110	struct syscall_arg_fmt arg[6];
 111	u8	   nr_args;
 112	bool	   errpid;
 113	bool	   timeout;
 114	bool	   hexret;
 115};
 116
 117struct trace {
 118	struct perf_tool	tool;
 119	struct syscalltbl	*sctbl;
 120	struct {
 
 121		struct syscall  *table;
 122		struct bpf_map  *map;
 123		struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
 124			struct bpf_map  *sys_enter,
 125					*sys_exit;
 126		}		prog_array;
 127		struct {
 128			struct evsel *sys_enter,
 129					  *sys_exit,
 130					  *augmented;
 131		}		events;
 132		struct bpf_program *unaugmented_prog;
 133	} syscalls;
 134	struct {
 135		struct bpf_map *map;
 136	} dump;
 137	struct record_opts	opts;
 138	struct evlist	*evlist;
 139	struct machine		*host;
 140	struct thread		*current;
 141	struct bpf_object	*bpf_obj;
 142	struct cgroup		*cgroup;
 143	u64			base_time;
 144	FILE			*output;
 145	unsigned long		nr_events;
 146	unsigned long		nr_events_printed;
 147	unsigned long		max_events;
 148	struct evswitch		evswitch;
 149	struct strlist		*ev_qualifier;
 150	struct {
 151		size_t		nr;
 152		int		*entries;
 153	}			ev_qualifier_ids;
 154	struct {
 155		size_t		nr;
 156		pid_t		*entries;
 157		struct bpf_map  *map;
 158	}			filter_pids;
 159	double			duration_filter;
 160	double			runtime_ms;
 161	struct {
 162		u64		vfs_getname,
 163				proc_getname;
 164	} stats;
 165	unsigned int		max_stack;
 166	unsigned int		min_stack;
 167	int			raw_augmented_syscalls_args_size;
 168	bool			raw_augmented_syscalls;
 169	bool			fd_path_disabled;
 170	bool			sort_events;
 171	bool			not_ev_qualifier;
 172	bool			live;
 173	bool			full_time;
 174	bool			sched;
 175	bool			multiple_threads;
 176	bool			summary;
 177	bool			summary_only;
 178	bool			errno_summary;
 179	bool			failure_only;
 180	bool			show_comm;
 181	bool			print_sample;
 182	bool			show_tool_stats;
 183	bool			trace_syscalls;
 184	bool			libtraceevent_print;
 185	bool			kernel_syscallchains;
 186	s16			args_alignment;
 187	bool			show_tstamp;
 188	bool			show_duration;
 189	bool			show_zeros;
 190	bool			show_arg_names;
 191	bool			show_string_prefix;
 192	bool			force;
 193	bool			vfs_getname;
 194	int			trace_pgfaults;
 195	char			*perfconfig_events;
 196	struct {
 197		struct ordered_events	data;
 198		u64			last;
 199	} oe;
 200};
 201
 202struct tp_field {
 203	int offset;
 204	union {
 205		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
 206		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
 207	};
 208};
 209
 210#define TP_UINT_FIELD(bits) \
 211static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
 212{ \
 213	u##bits value; \
 214	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 215	return value;  \
 216}
 217
 218TP_UINT_FIELD(8);
 219TP_UINT_FIELD(16);
 220TP_UINT_FIELD(32);
 221TP_UINT_FIELD(64);
 222
 223#define TP_UINT_FIELD__SWAPPED(bits) \
 224static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
 225{ \
 226	u##bits value; \
 227	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 228	return bswap_##bits(value);\
 229}
 230
 231TP_UINT_FIELD__SWAPPED(16);
 232TP_UINT_FIELD__SWAPPED(32);
 233TP_UINT_FIELD__SWAPPED(64);
 234
 235static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
 
 
 236{
 237	field->offset = offset;
 238
 239	switch (size) {
 240	case 1:
 241		field->integer = tp_field__u8;
 242		break;
 243	case 2:
 244		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
 245		break;
 246	case 4:
 247		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
 248		break;
 249	case 8:
 250		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
 251		break;
 252	default:
 253		return -1;
 254	}
 255
 256	return 0;
 257}
 258
 259static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
 260{
 261	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
 262}
 263
 264static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
 265{
 266	return sample->raw_data + field->offset;
 267}
 268
 269static int __tp_field__init_ptr(struct tp_field *field, int offset)
 270{
 271	field->offset = offset;
 272	field->pointer = tp_field__ptr;
 273	return 0;
 274}
 275
 276static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
 277{
 278	return __tp_field__init_ptr(field, format_field->offset);
 279}
 280
 281struct syscall_tp {
 282	struct tp_field id;
 283	union {
 284		struct tp_field args, ret;
 285	};
 286};
 287
 288/*
 289 * The evsel->priv as used by 'perf trace'
 290 * sc:	for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
 291 * fmt: for all the other tracepoints
 292 */
 293struct evsel_trace {
 294	struct syscall_tp	sc;
 295	struct syscall_arg_fmt  *fmt;
 296};
 297
 298static struct evsel_trace *evsel_trace__new(void)
 299{
 300	return zalloc(sizeof(struct evsel_trace));
 301}
 302
 303static void evsel_trace__delete(struct evsel_trace *et)
 304{
 305	if (et == NULL)
 306		return;
 307
 308	zfree(&et->fmt);
 309	free(et);
 310}
 311
 312/*
 313 * Used with raw_syscalls:sys_{enter,exit} and with the
 314 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
 315 */
 316static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
 317{
 318	struct evsel_trace *et = evsel->priv;
 319
 320	return &et->sc;
 321}
 322
 323static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
 324{
 325	if (evsel->priv == NULL) {
 326		evsel->priv = evsel_trace__new();
 327		if (evsel->priv == NULL)
 328			return NULL;
 329	}
 330
 331	return __evsel__syscall_tp(evsel);
 332}
 333
 334/*
 335 * Used with all the other tracepoints.
 336 */
 337static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
 338{
 339	struct evsel_trace *et = evsel->priv;
 340
 341	return et->fmt;
 342}
 343
 344static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
 345{
 346	struct evsel_trace *et = evsel->priv;
 347
 348	if (evsel->priv == NULL) {
 349		et = evsel->priv = evsel_trace__new();
 350
 351		if (et == NULL)
 352			return NULL;
 353	}
 354
 355	if (et->fmt == NULL) {
 356		et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
 357		if (et->fmt == NULL)
 358			goto out_delete;
 359	}
 360
 361	return __evsel__syscall_arg_fmt(evsel);
 362
 363out_delete:
 364	evsel_trace__delete(evsel->priv);
 365	evsel->priv = NULL;
 366	return NULL;
 367}
 368
 369static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
 370{
 371	struct tep_format_field *format_field = evsel__field(evsel, name);
 372
 373	if (format_field == NULL)
 374		return -1;
 375
 376	return tp_field__init_uint(field, format_field, evsel->needs_swap);
 377}
 378
 379#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
 380	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
 381	   evsel__init_tp_uint_field(evsel, &sc->name, #name); })
 382
 383static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
 
 
 384{
 385	struct tep_format_field *format_field = evsel__field(evsel, name);
 386
 387	if (format_field == NULL)
 388		return -1;
 389
 390	return tp_field__init_ptr(field, format_field);
 391}
 392
 393#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
 394	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
 395	   evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
 396
 397static void evsel__delete_priv(struct evsel *evsel)
 398{
 399	zfree(&evsel->priv);
 400	evsel__delete(evsel);
 401}
 402
 403static int evsel__init_syscall_tp(struct evsel *evsel)
 404{
 405	struct syscall_tp *sc = evsel__syscall_tp(evsel);
 406
 407	if (sc != NULL) {
 408		if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
 409		    evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
 410			return -ENOENT;
 411		return 0;
 412	}
 413
 414	return -ENOMEM;
 415}
 416
 417static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
 418{
 419	struct syscall_tp *sc = evsel__syscall_tp(evsel);
 420
 421	if (sc != NULL) {
 422		struct tep_format_field *syscall_id = evsel__field(tp, "id");
 423		if (syscall_id == NULL)
 424			syscall_id = evsel__field(tp, "__syscall_nr");
 425		if (syscall_id == NULL ||
 426		    __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
 427			return -EINVAL;
 428
 429		return 0;
 430	}
 431
 432	return -ENOMEM;
 433}
 434
 435static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
 436{
 437	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
 438
 439	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
 440}
 441
 442static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
 443{
 444	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
 445
 446	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
 447}
 448
 449static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
 450{
 451	if (evsel__syscall_tp(evsel) != NULL) {
 452		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
 453			return -ENOENT;
 454
 455		evsel->handler = handler;
 456		return 0;
 457	}
 458
 459	return -ENOMEM;
 
 
 
 
 460}
 461
 462static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
 463{
 464	struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
 465
 466	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
 467	if (IS_ERR(evsel))
 468		evsel = evsel__newtp("syscalls", direction);
 469
 470	if (IS_ERR(evsel))
 471		return NULL;
 472
 473	if (evsel__init_raw_syscall_tp(evsel, handler))
 474		goto out_delete;
 475
 476	return evsel;
 477
 478out_delete:
 479	evsel__delete_priv(evsel);
 480	return NULL;
 481}
 482
 483#define perf_evsel__sc_tp_uint(evsel, name, sample) \
 484	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
 485	   fields->name.integer(&fields->name, sample); })
 486
 487#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
 488	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
 489	   fields->name.pointer(&fields->name, sample); })
 490
 491size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
 492{
 493	int idx = val - sa->offset;
 
 
 
 
 
 494
 495	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
 496		size_t printed = scnprintf(bf, size, intfmt, val);
 497		if (show_suffix)
 498			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
 499		return printed;
 500	}
 501
 502	return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
 
 
 503}
 504
 505size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
 506{
 507	int idx = val - sa->offset;
 508
 509	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
 510		size_t printed = scnprintf(bf, size, intfmt, val);
 511		if (show_prefix)
 512			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
 513		return printed;
 514	}
 515
 516	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
 517}
 518
 519static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
 520						const char *intfmt,
 521					        struct syscall_arg *arg)
 522{
 523	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
 
 
 
 
 
 
 524}
 525
 526static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
 527					      struct syscall_arg *arg)
 528{
 529	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
 530}
 531
 532#define SCA_STRARRAY syscall_arg__scnprintf_strarray
 533
 534bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 535{
 536	return strarray__strtoul(arg->parm, bf, size, ret);
 537}
 538
 539bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 540{
 541	return strarray__strtoul_flags(arg->parm, bf, size, ret);
 542}
 543
 544bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 545{
 546	return strarrays__strtoul(arg->parm, bf, size, ret);
 547}
 548
 549size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
 550{
 551	return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
 552}
 553
 554size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
 555{
 556	size_t printed;
 557	int i;
 558
 559	for (i = 0; i < sas->nr_entries; ++i) {
 560		struct strarray *sa = sas->entries[i];
 561		int idx = val - sa->offset;
 562
 563		if (idx >= 0 && idx < sa->nr_entries) {
 564			if (sa->entries[idx] == NULL)
 565				break;
 566			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
 567		}
 568	}
 569
 570	printed = scnprintf(bf, size, intfmt, val);
 571	if (show_prefix)
 572		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
 573	return printed;
 574}
 575
 576bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
 577{
 578	int i;
 579
 580	for (i = 0; i < sa->nr_entries; ++i) {
 581		if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
 582			*ret = sa->offset + i;
 583			return true;
 584		}
 585	}
 586
 587	return false;
 588}
 589
 590bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
 591{
 592	u64 val = 0;
 593	char *tok = bf, *sep, *end;
 594
 595	*ret = 0;
 596
 597	while (size != 0) {
 598		int toklen = size;
 599
 600		sep = memchr(tok, '|', size);
 601		if (sep != NULL) {
 602			size -= sep - tok + 1;
 603
 604			end = sep - 1;
 605			while (end > tok && isspace(*end))
 606				--end;
 607
 608			toklen = end - tok + 1;
 609		}
 610
 611		while (isspace(*tok))
 612			++tok;
 613
 614		if (isalpha(*tok) || *tok == '_') {
 615			if (!strarray__strtoul(sa, tok, toklen, &val))
 616				return false;
 617		} else {
 618			bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
 619
 620			val = strtoul(tok, NULL, is_hexa ? 16 : 0);
 621		}
 622
 623		*ret |= (1 << (val - 1));
 624
 625		if (sep == NULL)
 626			break;
 627		tok = sep + 1;
 628	}
 629
 630	return true;
 631}
 632
 633bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
 634{
 635	int i;
 636
 637	for (i = 0; i < sas->nr_entries; ++i) {
 638		struct strarray *sa = sas->entries[i];
 639
 640		if (strarray__strtoul(sa, bf, size, ret))
 641			return true;
 642	}
 643
 644	return false;
 645}
 646
 647size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
 648					struct syscall_arg *arg)
 649{
 650	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
 651}
 652
 653#ifndef AT_FDCWD
 654#define AT_FDCWD	-100
 655#endif
 656
 657static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
 658					   struct syscall_arg *arg)
 659{
 660	int fd = arg->val;
 661	const char *prefix = "AT_FD";
 662
 663	if (fd == AT_FDCWD)
 664		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
 665
 666	return syscall_arg__scnprintf_fd(bf, size, arg);
 667}
 668
 669#define SCA_FDAT syscall_arg__scnprintf_fd_at
 670
 671static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
 672					      struct syscall_arg *arg);
 673
 674#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
 675
 676size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
 
 677{
 678	return scnprintf(bf, size, "%#lx", arg->val);
 679}
 680
 681size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
 682{
 683	if (arg->val == 0)
 684		return scnprintf(bf, size, "NULL");
 685	return syscall_arg__scnprintf_hex(bf, size, arg);
 686}
 687
 688size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
 
 689{
 690	return scnprintf(bf, size, "%d", arg->val);
 691}
 692
 693size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
 694{
 695	return scnprintf(bf, size, "%ld", arg->val);
 696}
 697
 698static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
 699{
 700	// XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
 701	//     fill missing comms using thread__set_comm()...
 702	//     here or in a special syscall_arg__scnprintf_pid_sched_tp...
 703	return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
 704}
 705
 706#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
 707
 708static const char *bpf_cmd[] = {
 709	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
 710	"MAP_GET_NEXT_KEY", "PROG_LOAD",
 711};
 712static DEFINE_STRARRAY(bpf_cmd, "BPF_");
 713
 714static const char *fsmount_flags[] = {
 715	[1] = "CLOEXEC",
 716};
 717static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
 718
 719#include "trace/beauty/generated/fsconfig_arrays.c"
 720
 721static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
 722
 723static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
 724static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
 725
 726static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
 727static DEFINE_STRARRAY(itimers, "ITIMER_");
 728
 729static const char *keyctl_options[] = {
 730	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
 731	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
 732	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
 733	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
 734	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
 735};
 736static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
 737
 738static const char *whences[] = { "SET", "CUR", "END",
 739#ifdef SEEK_DATA
 740"DATA",
 741#endif
 742#ifdef SEEK_HOLE
 743"HOLE",
 744#endif
 745};
 746static DEFINE_STRARRAY(whences, "SEEK_");
 747
 748static const char *fcntl_cmds[] = {
 749	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
 750	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
 751	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
 752	"GETOWNER_UIDS",
 753};
 754static DEFINE_STRARRAY(fcntl_cmds, "F_");
 755
 756static const char *fcntl_linux_specific_cmds[] = {
 757	"SETLEASE", "GETLEASE", "NOTIFY", [5] =	"CANCELLK", "DUPFD_CLOEXEC",
 758	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
 759	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
 760};
 761
 762static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
 763
 764static struct strarray *fcntl_cmds_arrays[] = {
 765	&strarray__fcntl_cmds,
 766	&strarray__fcntl_linux_specific_cmds,
 767};
 768
 769static DEFINE_STRARRAYS(fcntl_cmds_arrays);
 770
 771static const char *rlimit_resources[] = {
 772	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
 773	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
 774	"RTTIME",
 775};
 776static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
 777
 778static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
 779static DEFINE_STRARRAY(sighow, "SIG_");
 780
 781static const char *clockid[] = {
 782	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
 783	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
 784	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
 785};
 786static DEFINE_STRARRAY(clockid, "CLOCK_");
 
 
 
 
 
 
 
 
 
 
 787
 788static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
 789						 struct syscall_arg *arg)
 790{
 791	bool show_prefix = arg->show_string_prefix;
 792	const char *suffix = "_OK";
 793	size_t printed = 0;
 794	int mode = arg->val;
 795
 796	if (mode == F_OK) /* 0 */
 797		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
 798#define	P_MODE(n) \
 799	if (mode & n##_OK) { \
 800		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
 801		mode &= ~n##_OK; \
 802	}
 803
 804	P_MODE(R);
 805	P_MODE(W);
 806	P_MODE(X);
 807#undef P_MODE
 808
 809	if (mode)
 810		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
 811
 812	return printed;
 813}
 814
 815#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
 816
 817static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
 818					      struct syscall_arg *arg);
 819
 820#define SCA_FILENAME syscall_arg__scnprintf_filename
 821
 822static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
 823						struct syscall_arg *arg)
 824{
 825	bool show_prefix = arg->show_string_prefix;
 826	const char *prefix = "O_";
 827	int printed = 0, flags = arg->val;
 828
 829#define	P_FLAG(n) \
 830	if (flags & O_##n) { \
 831		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
 832		flags &= ~O_##n; \
 833	}
 834
 835	P_FLAG(CLOEXEC);
 836	P_FLAG(NONBLOCK);
 837#undef P_FLAG
 838
 839	if (flags)
 840		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 841
 842	return printed;
 843}
 844
 845#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
 846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847#ifndef GRND_NONBLOCK
 848#define GRND_NONBLOCK	0x0001
 849#endif
 850#ifndef GRND_RANDOM
 851#define GRND_RANDOM	0x0002
 852#endif
 853
 854static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
 855						   struct syscall_arg *arg)
 856{
 857	bool show_prefix = arg->show_string_prefix;
 858	const char *prefix = "GRND_";
 859	int printed = 0, flags = arg->val;
 860
 861#define	P_FLAG(n) \
 862	if (flags & GRND_##n) { \
 863		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
 864		flags &= ~GRND_##n; \
 865	}
 866
 867	P_FLAG(RANDOM);
 868	P_FLAG(NONBLOCK);
 869#undef P_FLAG
 870
 871	if (flags)
 872		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 873
 874	return printed;
 875}
 876
 877#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
 878
 879#define STRARRAY(name, array) \
 880	  { .scnprintf	= SCA_STRARRAY, \
 881	    .strtoul	= STUL_STRARRAY, \
 882	    .parm	= &strarray__##array, }
 883
 884#define STRARRAY_FLAGS(name, array) \
 885	  { .scnprintf	= SCA_STRARRAY_FLAGS, \
 886	    .strtoul	= STUL_STRARRAY_FLAGS, \
 887	    .parm	= &strarray__##array, }
 888
 889#include "trace/beauty/arch_errno_names.c"
 890#include "trace/beauty/eventfd.c"
 
 891#include "trace/beauty/futex_op.c"
 892#include "trace/beauty/futex_val3.c"
 893#include "trace/beauty/mmap.c"
 894#include "trace/beauty/mode_t.c"
 895#include "trace/beauty/msg_flags.c"
 896#include "trace/beauty/open_flags.c"
 897#include "trace/beauty/perf_event_open.c"
 898#include "trace/beauty/pid.c"
 899#include "trace/beauty/sched_policy.c"
 900#include "trace/beauty/seccomp.c"
 901#include "trace/beauty/signum.c"
 902#include "trace/beauty/socket_type.c"
 903#include "trace/beauty/waitid_options.c"
 904
 905static struct syscall_fmt syscall_fmts[] = {
 906	{ .name	    = "access",
 907	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
 908	{ .name	    = "arch_prctl",
 909	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
 910		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
 911	{ .name	    = "bind",
 912	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
 913		   [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
 914		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
 915	{ .name	    = "bpf",
 916	  .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
 
 
 917	{ .name	    = "brk",	    .hexret = true,
 918	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
 919	{ .name     = "clock_gettime",
 920	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
 921	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
 922	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
 923		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
 924		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
 925		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
 926		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
 927	{ .name	    = "close",
 928	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
 929	{ .name	    = "connect",
 930	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
 931		   [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
 932		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
 933	{ .name	    = "epoll_ctl",
 934	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
 935	{ .name	    = "eventfd2",
 936	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
 937	{ .name	    = "fchmodat",
 938	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 939	{ .name	    = "fchownat",
 940	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 941	{ .name	    = "fcntl",
 942	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD,  /* cmd */
 943			   .strtoul   = STUL_STRARRAYS,
 944			   .parm      = &strarrays__fcntl_cmds_arrays,
 945			   .show_zero = true, },
 946		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
 947	{ .name	    = "flock",
 948	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
 949	{ .name     = "fsconfig",
 950	  .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
 951	{ .name     = "fsmount",
 952	  .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
 953		   [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
 954	{ .name     = "fspick",
 955	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
 956		   [1] = { .scnprintf = SCA_FILENAME,	  /* path */ },
 957		   [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
 958	{ .name	    = "fstat", .alias = "newfstat", },
 959	{ .name	    = "fstatat", .alias = "newfstatat", },
 960	{ .name	    = "futex",
 961	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
 962		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
 963	{ .name	    = "futimesat",
 964	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 965	{ .name	    = "getitimer",
 966	  .arg = { [0] = STRARRAY(which, itimers), }, },
 967	{ .name	    = "getpid",	    .errpid = true, },
 968	{ .name	    = "getpgid",    .errpid = true, },
 969	{ .name	    = "getppid",    .errpid = true, },
 970	{ .name	    = "getrandom",
 971	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
 972	{ .name	    = "getrlimit",
 973	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
 974	{ .name	    = "gettid",	    .errpid = true, },
 975	{ .name	    = "ioctl",
 976	  .arg = {
 977#if defined(__i386__) || defined(__x86_64__)
 978/*
 979 * FIXME: Make this available to all arches.
 980 */
 981		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
 982		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
 
 983#else
 984		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
 985#endif
 986	{ .name	    = "kcmp",	    .nr_args = 5,
 987	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
 988		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
 989		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
 990		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
 991		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
 992	{ .name	    = "keyctl",
 993	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
 994	{ .name	    = "kill",
 995	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
 996	{ .name	    = "linkat",
 997	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 998	{ .name	    = "lseek",
 999	  .arg = { [2] = STRARRAY(whence, whences), }, },
1000	{ .name	    = "lstat", .alias = "newlstat", },
1001	{ .name     = "madvise",
1002	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
1003		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1004	{ .name	    = "mkdirat",
1005	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1006	{ .name	    = "mknodat",
1007	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 
 
 
 
 
 
 
1008	{ .name	    = "mmap",	    .hexret = true,
1009/* The standard mmap maps to old_mmap on s390x */
1010#if defined(__s390x__)
1011	.alias = "old_mmap",
1012#endif
1013	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
1014		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */
1015			   .strtoul   = STUL_STRARRAY_FLAGS,
1016			   .parm      = &strarray__mmap_flags, },
1017		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
1018	{ .name	    = "mount",
1019	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1020		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1021			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1022	{ .name	    = "move_mount",
1023	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* from_dfd */ },
1024		   [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1025		   [2] = { .scnprintf = SCA_FDAT,	/* to_dfd */ },
1026		   [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1027		   [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1028	{ .name	    = "mprotect",
1029	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1030		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ }, }, },
1031	{ .name	    = "mq_unlink",
1032	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1033	{ .name	    = "mremap",	    .hexret = true,
1034	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1035	{ .name	    = "name_to_handle_at",
1036	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1037	{ .name	    = "newfstatat",
1038	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1039	{ .name	    = "open",
1040	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1041	{ .name	    = "open_by_handle_at",
1042	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1043		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1044	{ .name	    = "openat",
1045	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1046		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1047	{ .name	    = "perf_event_open",
1048	  .arg = { [2] = { .scnprintf = SCA_INT,	/* cpu */ },
1049		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
1050		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1051	{ .name	    = "pipe2",
1052	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1053	{ .name	    = "pkey_alloc",
1054	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
1055	{ .name	    = "pkey_free",
1056	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
1057	{ .name	    = "pkey_mprotect",
1058	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1059		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
1060		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
1061	{ .name	    = "poll", .timeout = true, },
1062	{ .name	    = "ppoll", .timeout = true, },
1063	{ .name	    = "prctl",
1064	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1065			   .strtoul   = STUL_STRARRAY,
1066			   .parm      = &strarray__prctl_options, },
1067		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1068		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1069	{ .name	    = "pread", .alias = "pread64", },
1070	{ .name	    = "preadv", .alias = "pread", },
1071	{ .name	    = "prlimit64",
1072	  .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1073	{ .name	    = "pwrite", .alias = "pwrite64", },
1074	{ .name	    = "readlinkat",
1075	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1076	{ .name	    = "recvfrom",
1077	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1078	{ .name	    = "recvmmsg",
1079	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1080	{ .name	    = "recvmsg",
1081	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1082	{ .name	    = "renameat",
1083	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1084		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1085	{ .name	    = "renameat2",
1086	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1087		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1088		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1089	{ .name	    = "rt_sigaction",
1090	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1091	{ .name	    = "rt_sigprocmask",
1092	  .arg = { [0] = STRARRAY(how, sighow), }, },
1093	{ .name	    = "rt_sigqueueinfo",
1094	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1095	{ .name	    = "rt_tgsigqueueinfo",
1096	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1097	{ .name	    = "sched_setscheduler",
1098	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1099	{ .name	    = "seccomp",
1100	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
1101		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1102	{ .name	    = "select", .timeout = true, },
1103	{ .name	    = "sendfile", .alias = "sendfile64", },
1104	{ .name	    = "sendmmsg",
1105	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1106	{ .name	    = "sendmsg",
1107	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1108	{ .name	    = "sendto",
1109	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1110		   [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1111	{ .name	    = "set_tid_address", .errpid = true, },
1112	{ .name	    = "setitimer",
1113	  .arg = { [0] = STRARRAY(which, itimers), }, },
1114	{ .name	    = "setrlimit",
1115	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1116	{ .name	    = "socket",
1117	  .arg = { [0] = STRARRAY(family, socket_families),
1118		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1119		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1120	{ .name	    = "socketpair",
1121	  .arg = { [0] = STRARRAY(family, socket_families),
1122		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1123		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1124	{ .name	    = "stat", .alias = "newstat", },
1125	{ .name	    = "statx",
1126	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
1127		   [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1128		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
1129	{ .name	    = "swapoff",
1130	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1131	{ .name	    = "swapon",
1132	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1133	{ .name	    = "symlinkat",
1134	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1135	{ .name	    = "sync_file_range",
1136	  .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1137	{ .name	    = "tgkill",
1138	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1139	{ .name	    = "tkill",
1140	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1141	{ .name     = "umount2", .alias = "umount",
1142	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1143	{ .name	    = "uname", .alias = "newuname", },
1144	{ .name	    = "unlinkat",
1145	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1146	{ .name	    = "utimensat",
1147	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1148	{ .name	    = "wait4",	    .errpid = true,
1149	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1150	{ .name	    = "waitid",	    .errpid = true,
1151	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
 
 
1152};
1153
1154static int syscall_fmt__cmp(const void *name, const void *fmtp)
1155{
1156	const struct syscall_fmt *fmt = fmtp;
1157	return strcmp(name, fmt->name);
1158}
1159
1160static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
1161{
1162	return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1163}
1164
1165static struct syscall_fmt *syscall_fmt__find(const char *name)
1166{
1167	const int nmemb = ARRAY_SIZE(syscall_fmts);
1168	return __syscall_fmt__find(syscall_fmts, nmemb, name);
1169}
1170
1171static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
1172{
1173	int i;
1174
1175	for (i = 0; i < nmemb; ++i) {
1176		if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1177			return &fmts[i];
1178	}
1179
1180	return NULL;
1181}
1182
1183static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1184{
1185	const int nmemb = ARRAY_SIZE(syscall_fmts);
1186	return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1187}
1188
1189/*
1190 * is_exit: is this "exit" or "exit_group"?
1191 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1192 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1193 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1194 */
1195struct syscall {
1196	struct tep_event    *tp_format;
1197	int		    nr_args;
1198	int		    args_size;
1199	struct {
1200		struct bpf_program *sys_enter,
1201				   *sys_exit;
1202	}		    bpf_prog;
1203	bool		    is_exit;
1204	bool		    is_open;
1205	bool		    nonexistent;
1206	struct tep_format_field *args;
1207	const char	    *name;
1208	struct syscall_fmt  *fmt;
1209	struct syscall_arg_fmt *arg_fmt;
 
1210};
1211
1212/*
1213 * Must match what is in the BPF program:
1214 *
1215 * tools/perf/examples/bpf/augmented_raw_syscalls.c
1216 */
1217struct bpf_map_syscall_entry {
1218	bool	enabled;
1219	u16	string_args_len[6];
1220};
1221
1222/*
1223 * We need to have this 'calculated' boolean because in some cases we really
1224 * don't know what is the duration of a syscall, for instance, when we start
1225 * a session and some threads are waiting for a syscall to finish, say 'poll',
1226 * in which case all we can do is to print "( ? ) for duration and for the
1227 * start timestamp.
1228 */
1229static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1230{
1231	double duration = (double)t / NSEC_PER_MSEC;
1232	size_t printed = fprintf(fp, "(");
1233
1234	if (!calculated)
1235		printed += fprintf(fp, "         ");
1236	else if (duration >= 1.0)
1237		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1238	else if (duration >= 0.01)
1239		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1240	else
1241		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1242	return printed + fprintf(fp, "): ");
1243}
1244
1245/**
1246 * filename.ptr: The filename char pointer that will be vfs_getname'd
1247 * filename.entry_str_pos: Where to insert the string translated from
1248 *                         filename.ptr by the vfs_getname tracepoint/kprobe.
1249 * ret_scnprintf: syscall args may set this to a different syscall return
1250 *                formatter, for instance, fcntl may return fds, file flags, etc.
1251 */
1252struct thread_trace {
1253	u64		  entry_time;
1254	bool		  entry_pending;
1255	unsigned long	  nr_events;
1256	unsigned long	  pfmaj, pfmin;
1257	char		  *entry_str;
1258	double		  runtime_ms;
1259	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1260        struct {
1261		unsigned long ptr;
1262		short int     entry_str_pos;
1263		bool	      pending_open;
1264		unsigned int  namelen;
1265		char	      *name;
1266	} filename;
1267	struct {
1268		int	      max;
1269		struct file   *table;
1270	} files;
1271
1272	struct intlist *syscall_stats;
1273};
1274
1275static struct thread_trace *thread_trace__new(void)
1276{
1277	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1278
1279	if (ttrace) {
1280		ttrace->files.max = -1;
1281		ttrace->syscall_stats = intlist__new(NULL);
1282	}
1283
1284	return ttrace;
1285}
1286
1287static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1288{
1289	struct thread_trace *ttrace;
1290
1291	if (thread == NULL)
1292		goto fail;
1293
1294	if (thread__priv(thread) == NULL)
1295		thread__set_priv(thread, thread_trace__new());
1296
1297	if (thread__priv(thread) == NULL)
1298		goto fail;
1299
1300	ttrace = thread__priv(thread);
1301	++ttrace->nr_events;
1302
1303	return ttrace;
1304fail:
1305	color_fprintf(fp, PERF_COLOR_RED,
1306		      "WARNING: not enough memory, dropping samples!\n");
1307	return NULL;
1308}
1309
1310
1311void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1312				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1313{
1314	struct thread_trace *ttrace = thread__priv(arg->thread);
1315
1316	ttrace->ret_scnprintf = ret_scnprintf;
1317}
1318
1319#define TRACE_PFMAJ		(1 << 0)
1320#define TRACE_PFMIN		(1 << 1)
1321
1322static const size_t trace__entry_str_size = 2048;
1323
1324static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
1325{
1326	if (fd < 0)
1327		return NULL;
1328
1329	if (fd > ttrace->files.max) {
1330		struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1331
1332		if (nfiles == NULL)
1333			return NULL;
1334
1335		if (ttrace->files.max != -1) {
1336			memset(nfiles + ttrace->files.max + 1, 0,
1337			       (fd - ttrace->files.max) * sizeof(struct file));
1338		} else {
1339			memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1340		}
1341
1342		ttrace->files.table = nfiles;
1343		ttrace->files.max   = fd;
1344	}
1345
1346	return ttrace->files.table + fd;
1347}
1348
1349struct file *thread__files_entry(struct thread *thread, int fd)
1350{
1351	return thread_trace__files_entry(thread__priv(thread), fd);
1352}
1353
1354static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1355{
1356	struct thread_trace *ttrace = thread__priv(thread);
1357	struct file *file = thread_trace__files_entry(ttrace, fd);
1358
1359	if (file != NULL) {
1360		struct stat st;
1361		if (stat(pathname, &st) == 0)
1362			file->dev_maj = major(st.st_rdev);
1363		file->pathname = strdup(pathname);
1364		if (file->pathname)
1365			return 0;
1366	}
1367
1368	return -1;
1369}
1370
1371static int thread__read_fd_path(struct thread *thread, int fd)
1372{
1373	char linkname[PATH_MAX], pathname[PATH_MAX];
1374	struct stat st;
1375	int ret;
1376
1377	if (thread->pid_ == thread->tid) {
1378		scnprintf(linkname, sizeof(linkname),
1379			  "/proc/%d/fd/%d", thread->pid_, fd);
1380	} else {
1381		scnprintf(linkname, sizeof(linkname),
1382			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1383	}
1384
1385	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1386		return -1;
1387
1388	ret = readlink(linkname, pathname, sizeof(pathname));
1389
1390	if (ret < 0 || ret > st.st_size)
1391		return -1;
1392
1393	pathname[ret] = '\0';
1394	return trace__set_fd_pathname(thread, fd, pathname);
1395}
1396
1397static const char *thread__fd_path(struct thread *thread, int fd,
1398				   struct trace *trace)
1399{
1400	struct thread_trace *ttrace = thread__priv(thread);
1401
1402	if (ttrace == NULL || trace->fd_path_disabled)
1403		return NULL;
1404
1405	if (fd < 0)
1406		return NULL;
1407
1408	if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1409		if (!trace->live)
1410			return NULL;
1411		++trace->stats.proc_getname;
1412		if (thread__read_fd_path(thread, fd))
1413			return NULL;
1414	}
1415
1416	return ttrace->files.table[fd].pathname;
1417}
1418
1419size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
 
1420{
1421	int fd = arg->val;
1422	size_t printed = scnprintf(bf, size, "%d", fd);
1423	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1424
1425	if (path)
1426		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1427
1428	return printed;
1429}
1430
1431size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1432{
1433        size_t printed = scnprintf(bf, size, "%d", fd);
1434	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1435
1436	if (thread) {
1437		const char *path = thread__fd_path(thread, fd, trace);
1438
1439		if (path)
1440			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1441
1442		thread__put(thread);
1443	}
1444
1445        return printed;
1446}
1447
1448static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1449					      struct syscall_arg *arg)
1450{
1451	int fd = arg->val;
1452	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1453	struct thread_trace *ttrace = thread__priv(arg->thread);
1454
1455	if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1456		zfree(&ttrace->files.table[fd].pathname);
1457
1458	return printed;
1459}
1460
1461static void thread__set_filename_pos(struct thread *thread, const char *bf,
1462				     unsigned long ptr)
1463{
1464	struct thread_trace *ttrace = thread__priv(thread);
1465
1466	ttrace->filename.ptr = ptr;
1467	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1468}
1469
1470static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1471{
1472	struct augmented_arg *augmented_arg = arg->augmented.args;
1473	size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1474	/*
1475	 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1476	 * we would have two strings, each prefixed by its size.
1477	 */
1478	int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1479
1480	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1481	arg->augmented.size -= consumed;
1482
1483	return printed;
1484}
1485
1486static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1487					      struct syscall_arg *arg)
1488{
1489	unsigned long ptr = arg->val;
1490
1491	if (arg->augmented.args)
1492		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1493
1494	if (!arg->trace->vfs_getname)
1495		return scnprintf(bf, size, "%#x", ptr);
1496
1497	thread__set_filename_pos(arg->thread, bf, ptr);
1498	return 0;
1499}
1500
1501static bool trace__filter_duration(struct trace *trace, double t)
1502{
1503	return t < (trace->duration_filter * NSEC_PER_MSEC);
1504}
1505
1506static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1507{
1508	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1509
1510	return fprintf(fp, "%10.3f ", ts);
1511}
1512
1513/*
1514 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1515 * using ttrace->entry_time for a thread that receives a sys_exit without
1516 * first having received a sys_enter ("poll" issued before tracing session
1517 * starts, lost sys_enter exit due to ring buffer overflow).
1518 */
1519static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1520{
1521	if (tstamp > 0)
1522		return __trace__fprintf_tstamp(trace, tstamp, fp);
1523
1524	return fprintf(fp, "         ? ");
1525}
1526
1527static bool done = false;
1528static bool interrupted = false;
1529
1530static void sig_handler(int sig)
1531{
1532	done = true;
1533	interrupted = sig == SIGINT;
1534}
1535
1536static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
 
1537{
1538	size_t printed = 0;
 
1539
1540	if (trace->multiple_threads) {
1541		if (trace->show_comm)
1542			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1543		printed += fprintf(fp, "%d ", thread->tid);
1544	}
1545
1546	return printed;
1547}
1548
1549static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1550					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1551{
1552	size_t printed = 0;
1553
1554	if (trace->show_tstamp)
1555		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1556	if (trace->show_duration)
1557		printed += fprintf_duration(duration, duration_calculated, fp);
1558	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1559}
1560
1561static int trace__process_event(struct trace *trace, struct machine *machine,
1562				union perf_event *event, struct perf_sample *sample)
1563{
1564	int ret = 0;
1565
1566	switch (event->header.type) {
1567	case PERF_RECORD_LOST:
1568		color_fprintf(trace->output, PERF_COLOR_RED,
1569			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1570		ret = machine__process_lost_event(machine, event, sample);
1571		break;
1572	default:
1573		ret = machine__process_event(machine, event, sample);
1574		break;
1575	}
1576
1577	return ret;
1578}
1579
1580static int trace__tool_process(struct perf_tool *tool,
1581			       union perf_event *event,
1582			       struct perf_sample *sample,
1583			       struct machine *machine)
1584{
1585	struct trace *trace = container_of(tool, struct trace, tool);
1586	return trace__process_event(trace, machine, event, sample);
1587}
1588
1589static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1590{
1591	struct machine *machine = vmachine;
1592
1593	if (machine->kptr_restrict_warned)
1594		return NULL;
1595
1596	if (symbol_conf.kptr_restrict) {
1597		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1598			   "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1599			   "Kernel samples will not be resolved.\n");
1600		machine->kptr_restrict_warned = true;
1601		return NULL;
1602	}
1603
1604	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1605}
1606
1607static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1608{
1609	int err = symbol__init(NULL);
1610
1611	if (err)
1612		return err;
1613
1614	trace->host = machine__new_host();
1615	if (trace->host == NULL)
1616		return -ENOMEM;
1617
1618	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1619	if (err < 0)
1620		goto out;
1621
1622	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1623					    evlist->core.threads, trace__tool_process, false,
1624					    1);
1625out:
1626	if (err)
1627		symbol__exit();
1628
1629	return err;
1630}
1631
1632static void trace__symbols__exit(struct trace *trace)
1633{
1634	machine__exit(trace->host);
1635	trace->host = NULL;
1636
1637	symbol__exit();
1638}
1639
1640static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1641{
1642	int idx;
1643
1644	if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
1645		nr_args = sc->fmt->nr_args;
1646
1647	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1648	if (sc->arg_fmt == NULL)
1649		return -1;
1650
1651	for (idx = 0; idx < nr_args; ++idx) {
1652		if (sc->fmt)
1653			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1654	}
1655
1656	sc->nr_args = nr_args;
1657	return 0;
1658}
1659
1660static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1661	{ .name = "msr",	.scnprintf = SCA_X86_MSR,	  .strtoul = STUL_X86_MSR,	   },
1662	{ .name = "vector",	.scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1663};
1664
1665static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1666{
1667       const struct syscall_arg_fmt *fmt = fmtp;
1668       return strcmp(name, fmt->name);
1669}
1670
1671static struct syscall_arg_fmt *
1672__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1673{
1674       return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1675}
1676
1677static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1678{
1679       const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1680       return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1681}
1682
1683static struct tep_format_field *
1684syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1685{
1686	struct tep_format_field *last_field = NULL;
1687	int len;
1688
1689	for (; field; field = field->next, ++arg) {
1690		last_field = field;
1691
1692		if (arg->scnprintf)
1693			continue;
1694
1695		len = strlen(field->name);
1696
1697		if (strcmp(field->type, "const char *") == 0 &&
1698		    ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1699		     strstr(field->name, "path") != NULL))
1700			arg->scnprintf = SCA_FILENAME;
1701		else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1702			arg->scnprintf = SCA_PTR;
1703		else if (strcmp(field->type, "pid_t") == 0)
1704			arg->scnprintf = SCA_PID;
1705		else if (strcmp(field->type, "umode_t") == 0)
1706			arg->scnprintf = SCA_MODE_T;
1707		else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1708			arg->scnprintf = SCA_CHAR_ARRAY;
1709			arg->nr_entries = field->arraylen;
1710		} else if ((strcmp(field->type, "int") == 0 ||
1711			  strcmp(field->type, "unsigned int") == 0 ||
1712			  strcmp(field->type, "long") == 0) &&
1713			 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
 
1714			/*
1715			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1716			 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1717			 * 65 int
1718			 * 23 unsigned int
1719			 * 7 unsigned long
1720			 */
1721			arg->scnprintf = SCA_FD;
1722               } else {
1723			struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1724
1725			if (fmt) {
1726				arg->scnprintf = fmt->scnprintf;
1727				arg->strtoul   = fmt->strtoul;
1728			}
1729		}
 
1730	}
1731
1732	return last_field;
1733}
1734
1735static int syscall__set_arg_fmts(struct syscall *sc)
1736{
1737	struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1738
1739	if (last_field)
1740		sc->args_size = last_field->offset + last_field->size;
1741
1742	return 0;
1743}
1744
1745static int trace__read_syscall_info(struct trace *trace, int id)
1746{
1747	char tp_name[128];
1748	struct syscall *sc;
1749	const char *name = syscalltbl__name(trace->sctbl, id);
1750
1751#ifdef HAVE_SYSCALL_TABLE_SUPPORT
1752	if (trace->syscalls.table == NULL) {
1753		trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1754		if (trace->syscalls.table == NULL)
1755			return -ENOMEM;
1756	}
1757#else
1758	if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1759		// When using libaudit we don't know beforehand what is the max syscall id
1760		struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1761
1762		if (table == NULL)
1763			return -ENOMEM;
1764
1765		// Need to memset from offset 0 and +1 members if brand new
1766		if (trace->syscalls.table == NULL)
1767			memset(table, 0, (id + 1) * sizeof(*sc));
1768		else
1769			memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1770
1771		trace->syscalls.table	      = table;
1772		trace->sctbl->syscalls.max_id = id;
1773	}
1774#endif
1775	sc = trace->syscalls.table + id;
1776	if (sc->nonexistent)
1777		return 0;
1778
1779	if (name == NULL) {
1780		sc->nonexistent = true;
1781		return 0;
1782	}
1783
 
1784	sc->name = name;
 
1785	sc->fmt  = syscall_fmt__find(sc->name);
1786
1787	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1788	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1789
1790	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1791		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1792		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1793	}
1794
1795	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
1796		return -ENOMEM;
1797
1798	if (IS_ERR(sc->tp_format))
1799		return PTR_ERR(sc->tp_format);
1800
1801	sc->args = sc->tp_format->format.fields;
 
1802	/*
1803	 * We need to check and discard the first variable '__syscall_nr'
1804	 * or 'nr' that mean the syscall number. It is needless here.
1805	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1806	 */
1807	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1808		sc->args = sc->args->next;
1809		--sc->nr_args;
1810	}
1811
1812	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1813	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1814
1815	return syscall__set_arg_fmts(sc);
1816}
1817
1818static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1819{
1820	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1821
1822	if (fmt != NULL) {
1823		syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1824		return 0;
1825	}
1826
1827	return -ENOMEM;
1828}
1829
1830static int intcmp(const void *a, const void *b)
1831{
1832	const int *one = a, *another = b;
1833
1834	return *one - *another;
1835}
1836
1837static int trace__validate_ev_qualifier(struct trace *trace)
1838{
1839	int err = 0;
1840	bool printed_invalid_prefix = false;
1841	struct str_node *pos;
1842	size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1843
1844	trace->ev_qualifier_ids.entries = malloc(nr_allocated *
 
1845						 sizeof(trace->ev_qualifier_ids.entries[0]));
1846
1847	if (trace->ev_qualifier_ids.entries == NULL) {
1848		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1849		       trace->output);
1850		err = -EINVAL;
1851		goto out;
1852	}
1853
 
 
1854	strlist__for_each_entry(pos, trace->ev_qualifier) {
1855		const char *sc = pos->s;
1856		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1857
1858		if (id < 0) {
1859			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1860			if (id >= 0)
1861				goto matches;
1862
1863			if (!printed_invalid_prefix) {
1864				pr_debug("Skipping unknown syscalls: ");
1865				printed_invalid_prefix = true;
1866			} else {
1867				pr_debug(", ");
1868			}
1869
1870			pr_debug("%s", sc);
1871			continue;
1872		}
1873matches:
1874		trace->ev_qualifier_ids.entries[nr_used++] = id;
1875		if (match_next == -1)
1876			continue;
1877
1878		while (1) {
1879			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1880			if (id < 0)
1881				break;
1882			if (nr_allocated == nr_used) {
1883				void *entries;
1884
1885				nr_allocated += 8;
1886				entries = realloc(trace->ev_qualifier_ids.entries,
1887						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1888				if (entries == NULL) {
1889					err = -ENOMEM;
1890					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1891					goto out_free;
1892				}
1893				trace->ev_qualifier_ids.entries = entries;
1894			}
1895			trace->ev_qualifier_ids.entries[nr_used++] = id;
1896		}
 
 
1897	}
1898
1899	trace->ev_qualifier_ids.nr = nr_used;
1900	qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
 
 
 
 
1901out:
1902	if (printed_invalid_prefix)
1903		pr_debug("\n");
1904	return err;
1905out_free:
1906	zfree(&trace->ev_qualifier_ids.entries);
1907	trace->ev_qualifier_ids.nr = 0;
1908	goto out;
1909}
1910
1911static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1912{
1913	bool in_ev_qualifier;
1914
1915	if (trace->ev_qualifier_ids.nr == 0)
1916		return true;
1917
1918	in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1919				  trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1920
1921	if (in_ev_qualifier)
1922	       return !trace->not_ev_qualifier;
1923
1924	return trace->not_ev_qualifier;
1925}
1926
1927/*
1928 * args is to be interpreted as a series of longs but we need to handle
1929 * 8-byte unaligned accesses. args points to raw_data within the event
1930 * and raw_data is guaranteed to be 8-byte unaligned because it is
1931 * preceded by raw_size which is a u32. So we need to copy args to a temp
1932 * variable to read it. Most notably this avoids extended load instructions
1933 * on unaligned addresses
1934 */
1935unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1936{
1937	unsigned long val;
1938	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1939
1940	memcpy(&val, p, sizeof(val));
1941	return val;
1942}
1943
1944static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1945				      struct syscall_arg *arg)
1946{
1947	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1948		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1949
1950	return scnprintf(bf, size, "arg%d: ", arg->idx);
1951}
1952
1953/*
1954 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1955 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1956 * in tools/perf/trace/beauty/mount_flags.c
1957 */
1958static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
1959{
1960	if (fmt && fmt->mask_val)
1961		return fmt->mask_val(arg, val);
1962
1963	return val;
1964}
1965
1966static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
1967					     struct syscall_arg *arg, unsigned long val)
1968{
1969	if (fmt && fmt->scnprintf) {
1970		arg->val = val;
1971		if (fmt->parm)
1972			arg->parm = fmt->parm;
1973		return fmt->scnprintf(bf, size, arg);
1974	}
1975	return scnprintf(bf, size, "%ld", val);
1976}
1977
1978static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1979				      unsigned char *args, void *augmented_args, int augmented_args_size,
1980				      struct trace *trace, struct thread *thread)
1981{
1982	size_t printed = 0;
 
1983	unsigned long val;
1984	u8 bit = 1;
1985	struct syscall_arg arg = {
1986		.args	= args,
1987		.augmented = {
1988			.size = augmented_args_size,
1989			.args = augmented_args,
1990		},
1991		.idx	= 0,
1992		.mask	= 0,
1993		.trace  = trace,
1994		.thread = thread,
1995		.show_string_prefix = trace->show_string_prefix,
1996	};
1997	struct thread_trace *ttrace = thread__priv(thread);
1998
1999	/*
2000	 * Things like fcntl will set this in its 'cmd' formatter to pick the
2001	 * right formatter for the return value (an fd? file flags?), which is
2002	 * not needed for syscalls that always return a given type, say an fd.
2003	 */
2004	ttrace->ret_scnprintf = NULL;
2005
2006	if (sc->args != NULL) {
2007		struct tep_format_field *field;
 
 
 
 
 
 
 
2008
2009		for (field = sc->args; field;
2010		     field = field->next, ++arg.idx, bit <<= 1) {
2011			if (arg.mask & bit)
2012				continue;
2013
2014			arg.fmt = &sc->arg_fmt[arg.idx];
2015			val = syscall_arg__val(&arg, arg.idx);
2016			/*
2017			 * Some syscall args need some mask, most don't and
2018			 * return val untouched.
2019			 */
2020			val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2021
2022			/*
2023 			 * Suppress this argument if its value is zero and
2024 			 * and we don't have a string associated in an
2025 			 * strarray for it.
2026 			 */
2027			if (val == 0 &&
2028			    !trace->show_zeros &&
2029			    !(sc->arg_fmt &&
2030			      (sc->arg_fmt[arg.idx].show_zero ||
2031			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2032			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2033			      sc->arg_fmt[arg.idx].parm))
2034				continue;
2035
2036			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2037
2038			if (trace->show_arg_names)
2039				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2040
2041			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2042								  bf + printed, size - printed, &arg, val);
 
 
 
 
 
2043		}
2044	} else if (IS_ERR(sc->tp_format)) {
2045		/*
2046		 * If we managed to read the tracepoint /format file, then we
2047		 * may end up not having any args, like with gettid(), so only
2048		 * print the raw args when we didn't manage to read it.
2049		 */
2050		while (arg.idx < sc->nr_args) {
2051			if (arg.mask & bit)
2052				goto next_arg;
2053			val = syscall_arg__val(&arg, arg.idx);
2054			if (printed)
2055				printed += scnprintf(bf + printed, size - printed, ", ");
2056			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2057			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2058next_arg:
2059			++arg.idx;
2060			bit <<= 1;
2061		}
2062	}
2063
2064	return printed;
2065}
2066
2067typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2068				  union perf_event *event,
2069				  struct perf_sample *sample);
2070
2071static struct syscall *trace__syscall_info(struct trace *trace,
2072					   struct evsel *evsel, int id)
2073{
2074	int err = 0;
2075
2076	if (id < 0) {
2077
2078		/*
2079		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2080		 * before that, leaving at a higher verbosity level till that is
2081		 * explained. Reproduced with plain ftrace with:
2082		 *
2083		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2084		 * grep "NR -1 " /t/trace_pipe
2085		 *
2086		 * After generating some load on the machine.
2087 		 */
2088		if (verbose > 1) {
2089			static u64 n;
2090			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2091				id, evsel__name(evsel), ++n);
2092		}
2093		return NULL;
2094	}
2095
2096	err = -EINVAL;
2097
2098#ifdef HAVE_SYSCALL_TABLE_SUPPORT
2099	if (id > trace->sctbl->syscalls.max_id) {
2100#else
2101	if (id >= trace->sctbl->syscalls.max_id) {
2102		/*
2103		 * With libaudit we don't know beforehand what is the max_id,
2104		 * so we let trace__read_syscall_info() figure that out as we
2105		 * go on reading syscalls.
2106		 */
2107		err = trace__read_syscall_info(trace, id);
2108		if (err)
2109#endif
2110		goto out_cant_read;
2111	}
2112
2113	if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2114	    (err = trace__read_syscall_info(trace, id)) != 0)
2115		goto out_cant_read;
2116
2117	if (trace->syscalls.table[id].name == NULL) {
2118		if (trace->syscalls.table[id].nonexistent)
2119			return NULL;
2120		goto out_cant_read;
2121	}
2122
2123	return &trace->syscalls.table[id];
2124
2125out_cant_read:
2126	if (verbose > 0) {
2127		char sbuf[STRERR_BUFSIZE];
2128		fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2129		if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2130			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2131		fputs(" information\n", trace->output);
2132	}
2133	return NULL;
2134}
2135
2136struct syscall_stats {
2137	struct stats stats;
2138	u64	     nr_failures;
2139	int	     max_errno;
2140	u32	     *errnos;
2141};
2142
2143static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2144				 int id, struct perf_sample *sample, long err, bool errno_summary)
2145{
2146	struct int_node *inode;
2147	struct syscall_stats *stats;
2148	u64 duration = 0;
2149
2150	inode = intlist__findnew(ttrace->syscall_stats, id);
2151	if (inode == NULL)
2152		return;
2153
2154	stats = inode->priv;
2155	if (stats == NULL) {
2156		stats = malloc(sizeof(*stats));
2157		if (stats == NULL)
2158			return;
2159
2160		stats->nr_failures = 0;
2161		stats->max_errno   = 0;
2162		stats->errnos	   = NULL;
2163		init_stats(&stats->stats);
2164		inode->priv = stats;
2165	}
2166
2167	if (ttrace->entry_time && sample->time > ttrace->entry_time)
2168		duration = sample->time - ttrace->entry_time;
2169
2170	update_stats(&stats->stats, duration);
2171
2172	if (err < 0) {
2173		++stats->nr_failures;
2174
2175		if (!errno_summary)
2176			return;
2177
2178		err = -err;
2179		if (err > stats->max_errno) {
2180			u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2181
2182			if (new_errnos) {
2183				memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2184			} else {
2185				pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2186					 thread__comm_str(thread), thread->pid_, thread->tid);
2187				return;
2188			}
2189
2190			stats->errnos = new_errnos;
2191			stats->max_errno = err;
2192		}
2193
2194		++stats->errnos[err - 1];
2195	}
2196}
2197
2198static int trace__printf_interrupted_entry(struct trace *trace)
2199{
2200	struct thread_trace *ttrace;
 
2201	size_t printed;
2202	int len;
2203
2204	if (trace->failure_only || trace->current == NULL)
2205		return 0;
2206
2207	ttrace = thread__priv(trace->current);
2208
2209	if (!ttrace->entry_pending)
2210		return 0;
2211
2212	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2213	printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2214
2215	if (len < trace->args_alignment - 4)
2216		printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2217
2218	printed += fprintf(trace->output, " ...\n");
2219
 
 
2220	ttrace->entry_pending = false;
2221	++trace->nr_events_printed;
2222
2223	return printed;
2224}
2225
2226static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2227				 struct perf_sample *sample, struct thread *thread)
2228{
2229	int printed = 0;
2230
2231	if (trace->print_sample) {
2232		double ts = (double)sample->time / NSEC_PER_MSEC;
2233
2234		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2235				   evsel__name(evsel), ts,
2236				   thread__comm_str(thread),
2237				   sample->pid, sample->tid, sample->cpu);
2238	}
2239
2240	return printed;
2241}
2242
2243static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2244{
2245	void *augmented_args = NULL;
2246	/*
2247	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2248	 * and there we get all 6 syscall args plus the tracepoint common fields
2249	 * that gets calculated at the start and the syscall_nr (another long).
2250	 * So we check if that is the case and if so don't look after the
2251	 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2252	 * which is fixed.
2253	 *
2254	 * We'll revisit this later to pass s->args_size to the BPF augmenter
2255	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2256	 * copies only what we need for each syscall, like what happens when we
2257	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2258	 * traffic to just what is needed for each syscall.
2259	 */
2260	int args_size = raw_augmented_args_size ?: sc->args_size;
2261
2262	*augmented_args_size = sample->raw_size - args_size;
2263	if (*augmented_args_size > 0)
2264		augmented_args = sample->raw_data + args_size;
2265
2266	return augmented_args;
2267}
2268
2269static void syscall__exit(struct syscall *sc)
2270{
2271	if (!sc)
2272		return;
2273
2274	free(sc->arg_fmt);
2275}
2276
2277static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2278			    union perf_event *event __maybe_unused,
2279			    struct perf_sample *sample)
2280{
2281	char *msg;
2282	void *args;
2283	int printed = 0;
2284	struct thread *thread;
2285	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2286	int augmented_args_size = 0;
2287	void *augmented_args = NULL;
2288	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2289	struct thread_trace *ttrace;
2290
2291	if (sc == NULL)
2292		return -1;
2293
2294	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2295	ttrace = thread__trace(thread, trace->output);
2296	if (ttrace == NULL)
2297		goto out_put;
2298
2299	trace__fprintf_sample(trace, evsel, sample, thread);
2300
2301	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2302
2303	if (ttrace->entry_str == NULL) {
2304		ttrace->entry_str = malloc(trace__entry_str_size);
2305		if (!ttrace->entry_str)
2306			goto out_put;
2307	}
2308
2309	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2310		trace__printf_interrupted_entry(trace);
2311	/*
2312	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2313	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2314	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2315	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2316	 * so when handling, say the openat syscall, we end up getting 6 args for the
2317	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2318	 * thinking that the extra 2 u64 args are the augmented filename, so just check
2319	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2320	 */
2321	if (evsel != trace->syscalls.events.sys_enter)
2322		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2323	ttrace->entry_time = sample->time;
2324	msg = ttrace->entry_str;
2325	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2326
2327	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2328					   args, augmented_args, augmented_args_size, trace, thread);
2329
2330	if (sc->is_exit) {
2331		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2332			int alignment = 0;
2333
2334			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2335			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2336			if (trace->args_alignment > printed)
2337				alignment = trace->args_alignment - printed;
2338			fprintf(trace->output, "%*s= ?\n", alignment, " ");
2339		}
2340	} else {
2341		ttrace->entry_pending = true;
2342		/* See trace__vfs_getname & trace__sys_exit */
2343		ttrace->filename.pending_open = false;
2344	}
2345
2346	if (trace->current != thread) {
2347		thread__put(trace->current);
2348		trace->current = thread__get(thread);
2349	}
2350	err = 0;
2351out_put:
2352	thread__put(thread);
2353	return err;
2354}
2355
2356static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2357				    struct perf_sample *sample)
2358{
2359	struct thread_trace *ttrace;
2360	struct thread *thread;
2361	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2362	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2363	char msg[1024];
2364	void *args, *augmented_args = NULL;
2365	int augmented_args_size;
2366
2367	if (sc == NULL)
2368		return -1;
2369
2370	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2371	ttrace = thread__trace(thread, trace->output);
2372	/*
2373	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2374	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2375	 */
2376	if (ttrace == NULL)
2377		goto out_put;
2378
2379	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2380	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2381	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2382	fprintf(trace->output, "%s", msg);
2383	err = 0;
2384out_put:
2385	thread__put(thread);
2386	return err;
2387}
2388
2389static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2390				    struct perf_sample *sample,
2391				    struct callchain_cursor *cursor)
2392{
2393	struct addr_location al;
2394	int max_stack = evsel->core.attr.sample_max_stack ?
2395			evsel->core.attr.sample_max_stack :
2396			trace->max_stack;
2397	int err;
2398
2399	if (machine__resolve(trace->host, &al, sample) < 0)
 
2400		return -1;
2401
2402	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2403	addr_location__put(&al);
2404	return err;
2405}
2406
2407static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2408{
2409	/* TODO: user-configurable print_opts */
2410	const unsigned int print_opts = EVSEL__PRINT_SYM |
2411				        EVSEL__PRINT_DSO |
2412				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
2413
2414	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2415}
2416
2417static const char *errno_to_name(struct evsel *evsel, int err)
2418{
2419	struct perf_env *env = evsel__env(evsel);
2420	const char *arch_name = perf_env__arch(env);
2421
2422	return arch_syscalls__strerrno(arch_name, err);
2423}
2424
2425static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2426			   union perf_event *event __maybe_unused,
2427			   struct perf_sample *sample)
2428{
2429	long ret;
2430	u64 duration = 0;
2431	bool duration_calculated = false;
2432	struct thread *thread;
2433	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2434	int alignment = trace->args_alignment;
2435	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2436	struct thread_trace *ttrace;
2437
2438	if (sc == NULL)
2439		return -1;
2440
2441	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2442	ttrace = thread__trace(thread, trace->output);
2443	if (ttrace == NULL)
2444		goto out_put;
2445
2446	trace__fprintf_sample(trace, evsel, sample, thread);
 
2447
2448	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2449
2450	if (trace->summary)
2451		thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2452
2453	if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2454		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2455		ttrace->filename.pending_open = false;
2456		++trace->stats.vfs_getname;
2457	}
2458
2459	if (ttrace->entry_time) {
2460		duration = sample->time - ttrace->entry_time;
2461		if (trace__filter_duration(trace, duration))
2462			goto out;
2463		duration_calculated = true;
2464	} else if (trace->duration_filter)
2465		goto out;
2466
2467	if (sample->callchain) {
2468		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2469		if (callchain_ret == 0) {
2470			if (callchain_cursor.nr < trace->min_stack)
2471				goto out;
2472			callchain_ret = 1;
2473		}
2474	}
2475
2476	if (trace->summary_only || (ret >= 0 && trace->failure_only))
2477		goto out;
2478
2479	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2480
2481	if (ttrace->entry_pending) {
2482		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2483	} else {
2484		printed += fprintf(trace->output, " ... [");
2485		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2486		printed += 9;
2487		printed += fprintf(trace->output, "]: %s()", sc->name);
2488	}
2489
2490	printed++; /* the closing ')' */
2491
2492	if (alignment > printed)
2493		alignment -= printed;
2494	else
2495		alignment = 0;
2496
2497	fprintf(trace->output, ")%*s= ", alignment, " ");
2498
2499	if (sc->fmt == NULL) {
2500		if (ret < 0)
2501			goto errno_print;
2502signed_print:
2503		fprintf(trace->output, "%ld", ret);
2504	} else if (ret < 0) {
2505errno_print: {
2506		char bf[STRERR_BUFSIZE];
2507		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2508			   *e = errno_to_name(evsel, -ret);
2509
2510		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2511	}
2512	} else if (ret == 0 && sc->fmt->timeout)
2513		fprintf(trace->output, "0 (Timeout)");
2514	else if (ttrace->ret_scnprintf) {
2515		char bf[1024];
2516		struct syscall_arg arg = {
2517			.val	= ret,
2518			.thread	= thread,
2519			.trace	= trace,
2520		};
2521		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2522		ttrace->ret_scnprintf = NULL;
2523		fprintf(trace->output, "%s", bf);
2524	} else if (sc->fmt->hexret)
2525		fprintf(trace->output, "%#lx", ret);
2526	else if (sc->fmt->errpid) {
2527		struct thread *child = machine__find_thread(trace->host, ret, ret);
2528
2529		if (child != NULL) {
2530			fprintf(trace->output, "%ld", ret);
2531			if (child->comm_set)
2532				fprintf(trace->output, " (%s)", thread__comm_str(child));
2533			thread__put(child);
2534		}
2535	} else
2536		goto signed_print;
2537
2538	fputc('\n', trace->output);
2539
2540	/*
2541	 * We only consider an 'event' for the sake of --max-events a non-filtered
2542	 * sys_enter + sys_exit and other tracepoint events.
2543	 */
2544	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2545		interrupted = true;
2546
2547	if (callchain_ret > 0)
2548		trace__fprintf_callchain(trace, sample);
2549	else if (callchain_ret < 0)
2550		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2551out:
2552	ttrace->entry_pending = false;
2553	err = 0;
2554out_put:
2555	thread__put(thread);
2556	return err;
2557}
2558
2559static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2560			      union perf_event *event __maybe_unused,
2561			      struct perf_sample *sample)
2562{
2563	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2564	struct thread_trace *ttrace;
2565	size_t filename_len, entry_str_len, to_move;
2566	ssize_t remaining_space;
2567	char *pos;
2568	const char *filename = evsel__rawptr(evsel, sample, "pathname");
2569
2570	if (!thread)
2571		goto out;
2572
2573	ttrace = thread__priv(thread);
2574	if (!ttrace)
2575		goto out_put;
2576
2577	filename_len = strlen(filename);
2578	if (filename_len == 0)
2579		goto out_put;
2580
2581	if (ttrace->filename.namelen < filename_len) {
2582		char *f = realloc(ttrace->filename.name, filename_len + 1);
2583
2584		if (f == NULL)
2585			goto out_put;
2586
2587		ttrace->filename.namelen = filename_len;
2588		ttrace->filename.name = f;
2589	}
2590
2591	strcpy(ttrace->filename.name, filename);
2592	ttrace->filename.pending_open = true;
2593
2594	if (!ttrace->filename.ptr)
2595		goto out_put;
2596
2597	entry_str_len = strlen(ttrace->entry_str);
2598	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2599	if (remaining_space <= 0)
2600		goto out_put;
2601
2602	if (filename_len > (size_t)remaining_space) {
2603		filename += filename_len - remaining_space;
2604		filename_len = remaining_space;
2605	}
2606
2607	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2608	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2609	memmove(pos + filename_len, pos, to_move);
2610	memcpy(pos, filename, filename_len);
2611
2612	ttrace->filename.ptr = 0;
2613	ttrace->filename.entry_str_pos = 0;
2614out_put:
2615	thread__put(thread);
2616out:
2617	return 0;
2618}
2619
2620static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2621				     union perf_event *event __maybe_unused,
2622				     struct perf_sample *sample)
2623{
2624        u64 runtime = evsel__intval(evsel, sample, "runtime");
2625	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2626	struct thread *thread = machine__findnew_thread(trace->host,
2627							sample->pid,
2628							sample->tid);
2629	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2630
2631	if (ttrace == NULL)
2632		goto out_dump;
2633
2634	ttrace->runtime_ms += runtime_ms;
2635	trace->runtime_ms += runtime_ms;
2636out_put:
2637	thread__put(thread);
2638	return 0;
2639
2640out_dump:
2641	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2642	       evsel->name,
2643	       evsel__strval(evsel, sample, "comm"),
2644	       (pid_t)evsel__intval(evsel, sample, "pid"),
2645	       runtime,
2646	       evsel__intval(evsel, sample, "vruntime"));
2647	goto out_put;
 
2648}
2649
2650static int bpf_output__printer(enum binary_printer_ops op,
2651			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2652{
 
2653	unsigned char ch = (unsigned char)val;
2654
2655	switch (op) {
2656	case BINARY_PRINT_CHAR_DATA:
2657		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
 
2658	case BINARY_PRINT_DATA_BEGIN:
2659	case BINARY_PRINT_LINE_BEGIN:
2660	case BINARY_PRINT_ADDR:
2661	case BINARY_PRINT_NUM_DATA:
2662	case BINARY_PRINT_NUM_PAD:
2663	case BINARY_PRINT_SEP:
2664	case BINARY_PRINT_CHAR_PAD:
2665	case BINARY_PRINT_LINE_END:
2666	case BINARY_PRINT_DATA_END:
2667	default:
2668		break;
2669	}
2670
2671	return 0;
2672}
2673
2674static void bpf_output__fprintf(struct trace *trace,
2675				struct perf_sample *sample)
2676{
2677	binary__fprintf(sample->raw_data, sample->raw_size, 8,
2678			bpf_output__printer, NULL, trace->output);
2679	++trace->nr_events_printed;
2680}
2681
2682static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2683				       struct thread *thread, void *augmented_args, int augmented_args_size)
2684{
2685	char bf[2048];
2686	size_t size = sizeof(bf);
2687	struct tep_format_field *field = evsel->tp_format->format.fields;
2688	struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2689	size_t printed = 0;
2690	unsigned long val;
2691	u8 bit = 1;
2692	struct syscall_arg syscall_arg = {
2693		.augmented = {
2694			.size = augmented_args_size,
2695			.args = augmented_args,
2696		},
2697		.idx	= 0,
2698		.mask	= 0,
2699		.trace  = trace,
2700		.thread = thread,
2701		.show_string_prefix = trace->show_string_prefix,
2702	};
2703
2704	for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2705		if (syscall_arg.mask & bit)
2706			continue;
2707
2708		syscall_arg.len = 0;
2709		syscall_arg.fmt = arg;
2710		if (field->flags & TEP_FIELD_IS_ARRAY) {
2711			int offset = field->offset;
2712
2713			if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2714				offset = format_field__intval(field, sample, evsel->needs_swap);
2715				syscall_arg.len = offset >> 16;
2716				offset &= 0xffff;
2717			}
2718
2719			val = (uintptr_t)(sample->raw_data + offset);
2720		} else
2721			val = format_field__intval(field, sample, evsel->needs_swap);
2722		/*
2723		 * Some syscall args need some mask, most don't and
2724		 * return val untouched.
2725		 */
2726		val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2727
2728		/*
2729		 * Suppress this argument if its value is zero and
2730		 * and we don't have a string associated in an
2731		 * strarray for it.
2732		 */
2733		if (val == 0 &&
2734		    !trace->show_zeros &&
2735		    !((arg->show_zero ||
2736		       arg->scnprintf == SCA_STRARRAY ||
2737		       arg->scnprintf == SCA_STRARRAYS) &&
2738		      arg->parm))
2739			continue;
2740
2741		printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2742
2743		/*
2744		 * XXX Perhaps we should have a show_tp_arg_names,
2745		 * leaving show_arg_names just for syscalls?
2746		 */
2747		if (1 || trace->show_arg_names)
2748			printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2749
2750		printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2751	}
2752
2753	return printed + fprintf(trace->output, "%s", bf);
2754}
2755
2756static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2757				union perf_event *event __maybe_unused,
2758				struct perf_sample *sample)
2759{
2760	struct thread *thread;
2761	int callchain_ret = 0;
2762	/*
2763	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2764	 * this event's max_events having been hit and this is an entry coming
2765	 * from the ring buffer that we should discard, since the max events
2766	 * have already been considered/printed.
2767	 */
2768	if (evsel->disabled)
2769		return 0;
2770
2771	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2772
2773	if (sample->callchain) {
2774		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2775		if (callchain_ret == 0) {
2776			if (callchain_cursor.nr < trace->min_stack)
2777				goto out;
2778			callchain_ret = 1;
2779		}
2780	}
2781
2782	trace__printf_interrupted_entry(trace);
2783	trace__fprintf_tstamp(trace, sample->time, trace->output);
2784
2785	if (trace->trace_syscalls && trace->show_duration)
2786		fprintf(trace->output, "(         ): ");
2787
2788	if (thread)
2789		trace__fprintf_comm_tid(trace, thread, trace->output);
2790
2791	if (evsel == trace->syscalls.events.augmented) {
2792		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2793		struct syscall *sc = trace__syscall_info(trace, evsel, id);
2794
2795		if (sc) {
2796			fprintf(trace->output, "%s(", sc->name);
2797			trace__fprintf_sys_enter(trace, evsel, sample);
2798			fputc(')', trace->output);
2799			goto newline;
2800		}
2801
2802		/*
2803		 * XXX: Not having the associated syscall info or not finding/adding
2804		 * 	the thread should never happen, but if it does...
2805		 * 	fall thru and print it as a bpf_output event.
2806		 */
2807	}
2808
2809	fprintf(trace->output, "%s(", evsel->name);
2810
2811	if (evsel__is_bpf_output(evsel)) {
2812		bpf_output__fprintf(trace, sample);
2813	} else if (evsel->tp_format) {
2814		if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2815		    trace__fprintf_sys_enter(trace, evsel, sample)) {
2816			if (trace->libtraceevent_print) {
2817				event_format__fprintf(evsel->tp_format, sample->cpu,
2818						      sample->raw_data, sample->raw_size,
2819						      trace->output);
2820			} else {
2821				trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2822			}
2823		}
2824	}
2825
2826newline:
2827	fprintf(trace->output, ")\n");
2828
2829	if (callchain_ret > 0)
2830		trace__fprintf_callchain(trace, sample);
2831	else if (callchain_ret < 0)
2832		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2833
2834	++trace->nr_events_printed;
2835
2836	if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2837		evsel__disable(evsel);
2838		evsel__close(evsel);
2839	}
2840out:
2841	thread__put(thread);
2842	return 0;
2843}
2844
2845static void print_location(FILE *f, struct perf_sample *sample,
2846			   struct addr_location *al,
2847			   bool print_dso, bool print_sym)
2848{
2849
2850	if ((verbose > 0 || print_dso) && al->map)
2851		fprintf(f, "%s@", al->map->dso->long_name);
2852
2853	if ((verbose > 0 || print_sym) && al->sym)
2854		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2855			al->addr - al->sym->start);
2856	else if (al->map)
2857		fprintf(f, "0x%" PRIx64, al->addr);
2858	else
2859		fprintf(f, "0x%" PRIx64, sample->addr);
2860}
2861
2862static int trace__pgfault(struct trace *trace,
2863			  struct evsel *evsel,
2864			  union perf_event *event __maybe_unused,
2865			  struct perf_sample *sample)
2866{
2867	struct thread *thread;
2868	struct addr_location al;
2869	char map_type = 'd';
2870	struct thread_trace *ttrace;
2871	int err = -1;
2872	int callchain_ret = 0;
2873
2874	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2875
2876	if (sample->callchain) {
2877		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2878		if (callchain_ret == 0) {
2879			if (callchain_cursor.nr < trace->min_stack)
2880				goto out_put;
2881			callchain_ret = 1;
2882		}
2883	}
2884
2885	ttrace = thread__trace(thread, trace->output);
2886	if (ttrace == NULL)
2887		goto out_put;
2888
2889	if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2890		ttrace->pfmaj++;
2891	else
2892		ttrace->pfmin++;
2893
2894	if (trace->summary_only)
2895		goto out;
2896
2897	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
 
2898
2899	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2900
2901	fprintf(trace->output, "%sfault [",
2902		evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2903		"maj" : "min");
2904
2905	print_location(trace->output, sample, &al, false, true);
2906
2907	fprintf(trace->output, "] => ");
2908
2909	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
2910
2911	if (!al.map) {
2912		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
2913
2914		if (al.map)
2915			map_type = 'x';
2916		else
2917			map_type = '?';
2918	}
2919
2920	print_location(trace->output, sample, &al, true, false);
2921
2922	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2923
2924	if (callchain_ret > 0)
2925		trace__fprintf_callchain(trace, sample);
2926	else if (callchain_ret < 0)
2927		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2928
2929	++trace->nr_events_printed;
2930out:
2931	err = 0;
2932out_put:
2933	thread__put(thread);
2934	return err;
2935}
2936
2937static void trace__set_base_time(struct trace *trace,
2938				 struct evsel *evsel,
2939				 struct perf_sample *sample)
2940{
2941	/*
2942	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2943	 * and don't use sample->time unconditionally, we may end up having
2944	 * some other event in the future without PERF_SAMPLE_TIME for good
2945	 * reason, i.e. we may not be interested in its timestamps, just in
2946	 * it taking place, picking some piece of information when it
2947	 * appears in our event stream (vfs_getname comes to mind).
2948	 */
2949	if (trace->base_time == 0 && !trace->full_time &&
2950	    (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2951		trace->base_time = sample->time;
2952}
2953
2954static int trace__process_sample(struct perf_tool *tool,
2955				 union perf_event *event,
2956				 struct perf_sample *sample,
2957				 struct evsel *evsel,
2958				 struct machine *machine __maybe_unused)
2959{
2960	struct trace *trace = container_of(tool, struct trace, tool);
2961	struct thread *thread;
2962	int err = 0;
2963
2964	tracepoint_handler handler = evsel->handler;
2965
2966	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2967	if (thread && thread__is_filtered(thread))
2968		goto out;
2969
2970	trace__set_base_time(trace, evsel, sample);
2971
2972	if (handler) {
2973		++trace->nr_events;
2974		handler(trace, evsel, event, sample);
2975	}
2976out:
2977	thread__put(thread);
2978	return err;
2979}
2980
2981static int trace__record(struct trace *trace, int argc, const char **argv)
2982{
2983	unsigned int rec_argc, i, j;
2984	const char **rec_argv;
2985	const char * const record_args[] = {
2986		"record",
2987		"-R",
2988		"-m", "1024",
2989		"-c", "1",
2990	};
2991	pid_t pid = getpid();
2992	char *filter = asprintf__tp_filter_pids(1, &pid);
2993	const char * const sc_args[] = { "-e", };
2994	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2995	const char * const majpf_args[] = { "-e", "major-faults" };
2996	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2997	const char * const minpf_args[] = { "-e", "minor-faults" };
2998	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
2999	int err = -1;
3000
3001	/* +3 is for the event string below and the pid filter */
3002	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3003		majpf_args_nr + minpf_args_nr + argc;
3004	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3005
3006	if (rec_argv == NULL || filter == NULL)
3007		goto out_free;
3008
3009	j = 0;
3010	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3011		rec_argv[j++] = record_args[i];
3012
3013	if (trace->trace_syscalls) {
3014		for (i = 0; i < sc_args_nr; i++)
3015			rec_argv[j++] = sc_args[i];
3016
3017		/* event string may be different for older kernels - e.g., RHEL6 */
3018		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3019			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3020		else if (is_valid_tracepoint("syscalls:sys_enter"))
3021			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3022		else {
3023			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3024			goto out_free;
3025		}
3026	}
3027
3028	rec_argv[j++] = "--filter";
3029	rec_argv[j++] = filter;
3030
3031	if (trace->trace_pgfaults & TRACE_PFMAJ)
3032		for (i = 0; i < majpf_args_nr; i++)
3033			rec_argv[j++] = majpf_args[i];
3034
3035	if (trace->trace_pgfaults & TRACE_PFMIN)
3036		for (i = 0; i < minpf_args_nr; i++)
3037			rec_argv[j++] = minpf_args[i];
3038
3039	for (i = 0; i < (unsigned int)argc; i++)
3040		rec_argv[j++] = argv[i];
3041
3042	err = cmd_record(j, rec_argv);
3043out_free:
3044	free(filter);
3045	free(rec_argv);
3046	return err;
3047}
3048
3049static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3050
3051static bool evlist__add_vfs_getname(struct evlist *evlist)
3052{
3053	bool found = false;
3054	struct evsel *evsel, *tmp;
3055	struct parse_events_error err;
3056	int ret;
3057
3058	bzero(&err, sizeof(err));
3059	ret = parse_events(evlist, "probe:vfs_getname*", &err);
3060	if (ret) {
3061		free(err.str);
3062		free(err.help);
3063		free(err.first_str);
3064		free(err.first_help);
3065		return false;
3066	}
3067
3068	evlist__for_each_entry_safe(evlist, evsel, tmp) {
3069		if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3070			continue;
3071
3072		if (evsel__field(evsel, "pathname")) {
3073			evsel->handler = trace__vfs_getname;
3074			found = true;
3075			continue;
3076		}
3077
3078		list_del_init(&evsel->core.node);
3079		evsel->evlist = NULL;
3080		evsel__delete(evsel);
3081	}
3082
3083	return found;
 
 
3084}
3085
3086static struct evsel *evsel__new_pgfault(u64 config)
3087{
3088	struct evsel *evsel;
3089	struct perf_event_attr attr = {
3090		.type = PERF_TYPE_SOFTWARE,
3091		.mmap_data = 1,
3092	};
3093
3094	attr.config = config;
3095	attr.sample_period = 1;
3096
3097	event_attr_init(&attr);
3098
3099	evsel = evsel__new(&attr);
3100	if (evsel)
3101		evsel->handler = trace__pgfault;
3102
3103	return evsel;
3104}
3105
3106static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3107{
3108	struct evsel *evsel;
3109
3110	evlist__for_each_entry(evlist, evsel) {
3111		struct evsel_trace *et = evsel->priv;
3112
3113		if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
3114			continue;
3115
3116		free(et->fmt);
3117		free(et);
3118	}
3119}
3120
3121static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3122{
3123	const u32 type = event->header.type;
3124	struct evsel *evsel;
3125
3126	if (type != PERF_RECORD_SAMPLE) {
3127		trace__process_event(trace, trace->host, event, sample);
3128		return;
3129	}
3130
3131	evsel = evlist__id2evsel(trace->evlist, sample->id);
3132	if (evsel == NULL) {
3133		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3134		return;
3135	}
3136
3137	if (evswitch__discard(&trace->evswitch, evsel))
3138		return;
3139
3140	trace__set_base_time(trace, evsel, sample);
3141
3142	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3143	    sample->raw_data == NULL) {
3144		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3145		       evsel__name(evsel), sample->tid,
3146		       sample->cpu, sample->raw_size);
3147	} else {
3148		tracepoint_handler handler = evsel->handler;
3149		handler(trace, evsel, event, sample);
3150	}
3151
3152	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3153		interrupted = true;
3154}
3155
3156static int trace__add_syscall_newtp(struct trace *trace)
3157{
3158	int ret = -1;
3159	struct evlist *evlist = trace->evlist;
3160	struct evsel *sys_enter, *sys_exit;
3161
3162	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3163	if (sys_enter == NULL)
3164		goto out;
3165
3166	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3167		goto out_delete_sys_enter;
3168
3169	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3170	if (sys_exit == NULL)
3171		goto out_delete_sys_enter;
3172
3173	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3174		goto out_delete_sys_exit;
3175
3176	evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3177	evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3178
3179	evlist__add(evlist, sys_enter);
3180	evlist__add(evlist, sys_exit);
3181
3182	if (callchain_param.enabled && !trace->kernel_syscallchains) {
3183		/*
3184		 * We're interested only in the user space callchain
3185		 * leading to the syscall, allow overriding that for
3186		 * debugging reasons using --kernel_syscall_callchains
3187		 */
3188		sys_exit->core.attr.exclude_callchain_kernel = 1;
3189	}
3190
3191	trace->syscalls.events.sys_enter = sys_enter;
3192	trace->syscalls.events.sys_exit  = sys_exit;
3193
3194	ret = 0;
3195out:
3196	return ret;
3197
3198out_delete_sys_exit:
3199	evsel__delete_priv(sys_exit);
3200out_delete_sys_enter:
3201	evsel__delete_priv(sys_enter);
3202	goto out;
3203}
3204
3205static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3206{
3207	int err = -1;
3208	struct evsel *sys_exit;
3209	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3210						trace->ev_qualifier_ids.nr,
3211						trace->ev_qualifier_ids.entries);
3212
3213	if (filter == NULL)
3214		goto out_enomem;
3215
3216	if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
 
3217		sys_exit = trace->syscalls.events.sys_exit;
3218		err = evsel__append_tp_filter(sys_exit, filter);
3219	}
3220
3221	free(filter);
3222out:
3223	return err;
3224out_enomem:
3225	errno = ENOMEM;
3226	goto out;
3227}
3228
3229#ifdef HAVE_LIBBPF_SUPPORT
3230static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3231{
3232	if (trace->bpf_obj == NULL)
3233		return NULL;
3234
3235	return bpf_object__find_map_by_name(trace->bpf_obj, name);
3236}
3237
3238static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3239{
3240	trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3241}
3242
3243static void trace__set_bpf_map_syscalls(struct trace *trace)
3244{
3245	trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
3246	trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3247	trace->syscalls.prog_array.sys_exit  = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3248}
3249
3250static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3251{
3252	if (trace->bpf_obj == NULL)
3253		return NULL;
3254
3255	return bpf_object__find_program_by_title(trace->bpf_obj, name);
3256}
3257
3258static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3259							const char *prog_name, const char *type)
3260{
3261	struct bpf_program *prog;
3262
3263	if (prog_name == NULL) {
3264		char default_prog_name[256];
3265		scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3266		prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3267		if (prog != NULL)
3268			goto out_found;
3269		if (sc->fmt && sc->fmt->alias) {
3270			scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3271			prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3272			if (prog != NULL)
3273				goto out_found;
3274		}
3275		goto out_unaugmented;
3276	}
3277
3278	prog = trace__find_bpf_program_by_title(trace, prog_name);
3279
3280	if (prog != NULL) {
3281out_found:
3282		return prog;
3283	}
3284
3285	pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3286		 prog_name, type, sc->name);
3287out_unaugmented:
3288	return trace->syscalls.unaugmented_prog;
3289}
3290
3291static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3292{
3293	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3294
3295	if (sc == NULL)
3296		return;
3297
3298	sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3299	sc->bpf_prog.sys_exit  = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit  : NULL,  "exit");
3300}
3301
3302static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3303{
3304	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3305	return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3306}
3307
3308static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3309{
3310	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3311	return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3312}
3313
3314static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
3315{
3316	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3317	int arg = 0;
3318
3319	if (sc == NULL)
3320		goto out;
3321
3322	for (; arg < sc->nr_args; ++arg) {
3323		entry->string_args_len[arg] = 0;
3324		if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
3325			/* Should be set like strace -s strsize */
3326			entry->string_args_len[arg] = PATH_MAX;
3327		}
3328	}
3329out:
3330	for (; arg < 6; ++arg)
3331		entry->string_args_len[arg] = 0;
3332}
3333static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
3334{
3335	int fd = bpf_map__fd(trace->syscalls.map);
3336	struct bpf_map_syscall_entry value = {
3337		.enabled = !trace->not_ev_qualifier,
3338	};
3339	int err = 0;
3340	size_t i;
3341
3342	for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
3343		int key = trace->ev_qualifier_ids.entries[i];
3344
3345		if (value.enabled) {
3346			trace__init_bpf_map_syscall_args(trace, key, &value);
3347			trace__init_syscall_bpf_progs(trace, key);
3348		}
3349
3350		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
3351		if (err)
3352			break;
3353	}
3354
3355	return err;
3356}
3357
3358static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
3359{
3360	int fd = bpf_map__fd(trace->syscalls.map);
3361	struct bpf_map_syscall_entry value = {
3362		.enabled = enabled,
3363	};
3364	int err = 0, key;
3365
3366	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3367		if (enabled)
3368			trace__init_bpf_map_syscall_args(trace, key, &value);
3369
3370		err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
3371		if (err)
3372			break;
3373	}
3374
3375	return err;
3376}
3377
3378static int trace__init_syscalls_bpf_map(struct trace *trace)
3379{
3380	bool enabled = true;
3381
3382	if (trace->ev_qualifier_ids.nr)
3383		enabled = trace->not_ev_qualifier;
3384
3385	return __trace__init_syscalls_bpf_map(trace, enabled);
3386}
3387
3388static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3389{
3390	struct tep_format_field *field, *candidate_field;
3391	int id;
3392
3393	/*
3394	 * We're only interested in syscalls that have a pointer:
3395	 */
3396	for (field = sc->args; field; field = field->next) {
3397		if (field->flags & TEP_FIELD_IS_POINTER)
3398			goto try_to_find_pair;
3399	}
3400
3401	return NULL;
3402
3403try_to_find_pair:
3404	for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3405		struct syscall *pair = trace__syscall_info(trace, NULL, id);
3406		struct bpf_program *pair_prog;
3407		bool is_candidate = false;
3408
3409		if (pair == NULL || pair == sc ||
3410		    pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3411			continue;
3412
3413		for (field = sc->args, candidate_field = pair->args;
3414		     field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3415			bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3416			     candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3417
3418			if (is_pointer) {
3419			       if (!candidate_is_pointer) {
3420					// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3421					continue;
3422			       }
3423			} else {
3424				if (candidate_is_pointer) {
3425					// The candidate might copy a pointer we don't have, skip it.
3426					goto next_candidate;
3427				}
3428				continue;
3429			}
3430
3431			if (strcmp(field->type, candidate_field->type))
3432				goto next_candidate;
3433
3434			is_candidate = true;
3435		}
3436
3437		if (!is_candidate)
3438			goto next_candidate;
3439
3440		/*
3441		 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3442		 * then it may be collecting that and we then can't use it, as it would collect
3443		 * more than what is common to the two syscalls.
3444		 */
3445		if (candidate_field) {
3446			for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3447				if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3448					goto next_candidate;
3449		}
3450
3451		pair_prog = pair->bpf_prog.sys_enter;
3452		/*
3453		 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3454		 * have been searched for, so search it here and if it returns the
3455		 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3456		 * program for a filtered syscall on a non-filtered one.
3457		 *
3458		 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3459		 * useful for "renameat2".
3460		 */
3461		if (pair_prog == NULL) {
3462			pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3463			if (pair_prog == trace->syscalls.unaugmented_prog)
3464				goto next_candidate;
3465		}
3466
3467		pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3468		return pair_prog;
3469	next_candidate:
3470		continue;
3471	}
3472
3473	return NULL;
3474}
3475
3476static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3477{
3478	int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3479	    map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3480	int err = 0, key;
3481
3482	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3483		int prog_fd;
3484
3485		if (!trace__syscall_enabled(trace, key))
3486			continue;
3487
3488		trace__init_syscall_bpf_progs(trace, key);
3489
3490		// It'll get at least the "!raw_syscalls:unaugmented"
3491		prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3492		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3493		if (err)
3494			break;
3495		prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3496		err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3497		if (err)
3498			break;
3499	}
3500
3501	/*
3502	 * Now lets do a second pass looking for enabled syscalls without
3503	 * an augmenter that have a signature that is a superset of another
3504	 * syscall with an augmenter so that we can auto-reuse it.
3505	 *
3506	 * I.e. if we have an augmenter for the "open" syscall that has
3507	 * this signature:
3508	 *
3509	 *   int open(const char *pathname, int flags, mode_t mode);
3510	 *
3511	 * I.e. that will collect just the first string argument, then we
3512	 * can reuse it for the 'creat' syscall, that has this signature:
3513	 *
3514	 *   int creat(const char *pathname, mode_t mode);
3515	 *
3516	 * and for:
3517	 *
3518	 *   int stat(const char *pathname, struct stat *statbuf);
3519	 *   int lstat(const char *pathname, struct stat *statbuf);
3520	 *
3521	 * Because the 'open' augmenter will collect the first arg as a string,
3522	 * and leave alone all the other args, which already helps with
3523	 * beautifying 'stat' and 'lstat''s pathname arg.
3524	 *
3525	 * Then, in time, when 'stat' gets an augmenter that collects both
3526	 * first and second arg (this one on the raw_syscalls:sys_exit prog
3527	 * array tail call, then that one will be used.
3528	 */
3529	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3530		struct syscall *sc = trace__syscall_info(trace, NULL, key);
3531		struct bpf_program *pair_prog;
3532		int prog_fd;
3533
3534		if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3535			continue;
3536
3537		/*
3538		 * For now we're just reusing the sys_enter prog, and if it
3539		 * already has an augmenter, we don't need to find one.
3540		 */
3541		if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3542			continue;
3543
3544		/*
3545		 * Look at all the other syscalls for one that has a signature
3546		 * that is close enough that we can share:
3547		 */
3548		pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3549		if (pair_prog == NULL)
3550			continue;
3551
3552		sc->bpf_prog.sys_enter = pair_prog;
3553
3554		/*
3555		 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3556		 * with the fd for the program we're reusing:
3557		 */
3558		prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3559		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3560		if (err)
3561			break;
3562	}
3563
3564
3565	return err;
3566}
3567
3568static void trace__delete_augmented_syscalls(struct trace *trace)
3569{
3570	struct evsel *evsel, *tmp;
3571
3572	evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3573	evsel__delete(trace->syscalls.events.augmented);
3574	trace->syscalls.events.augmented = NULL;
3575
3576	evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3577		if (evsel->bpf_obj == trace->bpf_obj) {
3578			evlist__remove(trace->evlist, evsel);
3579			evsel__delete(evsel);
3580		}
3581
3582	}
3583
3584	bpf_object__close(trace->bpf_obj);
3585	trace->bpf_obj = NULL;
3586}
3587#else // HAVE_LIBBPF_SUPPORT
3588static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3589						   const char *name __maybe_unused)
3590{
3591	return NULL;
3592}
3593
3594static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3595{
3596}
3597
3598static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3599{
3600}
3601
3602static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
3603{
3604	return 0;
3605}
3606
3607static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
3608{
3609	return 0;
3610}
3611
3612static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3613							    const char *name __maybe_unused)
3614{
3615	return NULL;
3616}
3617
3618static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3619{
3620	return 0;
3621}
3622
3623static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3624{
3625}
3626#endif // HAVE_LIBBPF_SUPPORT
3627
3628static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3629{
3630	struct evsel *evsel;
3631
3632	evlist__for_each_entry(trace->evlist, evsel) {
3633		if (evsel == trace->syscalls.events.augmented ||
3634		    evsel->bpf_obj == trace->bpf_obj)
3635			continue;
3636
3637		return false;
3638	}
3639
3640	return true;
3641}
3642
3643static int trace__set_ev_qualifier_filter(struct trace *trace)
3644{
3645	if (trace->syscalls.map)
3646		return trace__set_ev_qualifier_bpf_filter(trace);
3647	if (trace->syscalls.events.sys_enter)
3648		return trace__set_ev_qualifier_tp_filter(trace);
3649	return 0;
3650}
3651
3652static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3653				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3654{
3655	int err = 0;
3656#ifdef HAVE_LIBBPF_SUPPORT
3657	bool value = true;
3658	int map_fd = bpf_map__fd(map);
3659	size_t i;
3660
3661	for (i = 0; i < npids; ++i) {
3662		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3663		if (err)
3664			break;
3665	}
3666#endif
3667	return err;
3668}
3669
3670static int trace__set_filter_loop_pids(struct trace *trace)
3671{
3672	unsigned int nr = 1, err;
3673	pid_t pids[32] = {
3674		getpid(),
3675	};
3676	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3677
3678	while (thread && nr < ARRAY_SIZE(pids)) {
3679		struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3680
3681		if (parent == NULL)
3682			break;
3683
3684		if (!strcmp(thread__comm_str(parent), "sshd") ||
3685		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
3686			pids[nr++] = parent->tid;
3687			break;
3688		}
3689		thread = parent;
3690	}
3691
3692	err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3693	if (!err && trace->filter_pids.map)
3694		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3695
3696	return err;
3697}
3698
3699static int trace__set_filter_pids(struct trace *trace)
3700{
3701	int err = 0;
3702	/*
3703	 * Better not use !target__has_task() here because we need to cover the
3704	 * case where no threads were specified in the command line, but a
3705	 * workload was, and in that case we will fill in the thread_map when
3706	 * we fork the workload in evlist__prepare_workload.
3707	 */
3708	if (trace->filter_pids.nr > 0) {
3709		err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3710						    trace->filter_pids.entries);
3711		if (!err && trace->filter_pids.map) {
3712			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3713						       trace->filter_pids.entries);
3714		}
3715	} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3716		err = trace__set_filter_loop_pids(trace);
3717	}
3718
3719	return err;
3720}
3721
3722static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3723{
3724	struct evlist *evlist = trace->evlist;
3725	struct perf_sample sample;
3726	int err = evlist__parse_sample(evlist, event, &sample);
3727
3728	if (err)
3729		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3730	else
3731		trace__handle_event(trace, event, &sample);
3732
3733	return 0;
3734}
3735
3736static int __trace__flush_events(struct trace *trace)
3737{
3738	u64 first = ordered_events__first_time(&trace->oe.data);
3739	u64 flush = trace->oe.last - NSEC_PER_SEC;
3740
3741	/* Is there some thing to flush.. */
3742	if (first && first < flush)
3743		return ordered_events__flush_time(&trace->oe.data, flush);
3744
3745	return 0;
3746}
3747
3748static int trace__flush_events(struct trace *trace)
3749{
3750	return !trace->sort_events ? 0 : __trace__flush_events(trace);
3751}
3752
3753static int trace__deliver_event(struct trace *trace, union perf_event *event)
3754{
3755	int err;
3756
3757	if (!trace->sort_events)
3758		return __trace__deliver_event(trace, event);
3759
3760	err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3761	if (err && err != -1)
3762		return err;
3763
3764	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
3765	if (err)
3766		return err;
3767
3768	return trace__flush_events(trace);
3769}
3770
3771static int ordered_events__deliver_event(struct ordered_events *oe,
3772					 struct ordered_event *event)
3773{
3774	struct trace *trace = container_of(oe, struct trace, oe.data);
3775
3776	return __trace__deliver_event(trace, event->event);
3777}
3778
3779static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3780{
3781	struct tep_format_field *field;
3782	struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3783
3784	if (evsel->tp_format == NULL || fmt == NULL)
3785		return NULL;
3786
3787	for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3788		if (strcmp(field->name, arg) == 0)
3789			return fmt;
3790
3791	return NULL;
3792}
3793
3794static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3795{
3796	char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3797
3798	while ((tok = strpbrk(left, "=<>!")) != NULL) {
3799		char *right = tok + 1, *right_end;
3800
3801		if (*right == '=')
3802			++right;
3803
3804		while (isspace(*right))
3805			++right;
3806
3807		if (*right == '\0')
3808			break;
3809
3810		while (!isalpha(*left))
3811			if (++left == tok) {
3812				/*
3813				 * Bail out, can't find the name of the argument that is being
3814				 * used in the filter, let it try to set this filter, will fail later.
3815				 */
3816				return 0;
3817			}
3818
3819		right_end = right + 1;
3820		while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3821			++right_end;
3822
3823		if (isalpha(*right)) {
3824			struct syscall_arg_fmt *fmt;
3825			int left_size = tok - left,
3826			    right_size = right_end - right;
3827			char arg[128];
3828
3829			while (isspace(left[left_size - 1]))
3830				--left_size;
3831
3832			scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3833
3834			fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3835			if (fmt == NULL) {
3836				pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3837				       arg, evsel->name, evsel->filter);
3838				return -1;
3839			}
3840
3841			pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3842				 arg, (int)(right - tok), tok, right_size, right);
3843
3844			if (fmt->strtoul) {
3845				u64 val;
3846				struct syscall_arg syscall_arg = {
3847					.parm = fmt->parm,
3848				};
3849
3850				if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3851					char *n, expansion[19];
3852					int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3853					int expansion_offset = right - new_filter;
3854
3855					pr_debug("%s", expansion);
3856
3857					if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3858						pr_debug(" out of memory!\n");
3859						free(new_filter);
3860						return -1;
3861					}
3862					if (new_filter != evsel->filter)
3863						free(new_filter);
3864					left = n + expansion_offset + expansion_lenght;
3865					new_filter = n;
3866				} else {
3867					pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3868					       right_size, right, arg, evsel->name, evsel->filter);
3869					return -1;
3870				}
3871			} else {
3872				pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3873				       arg, evsel->name, evsel->filter);
3874				return -1;
3875			}
3876
3877			pr_debug("\n");
3878		} else {
3879			left = right_end;
3880		}
3881	}
3882
3883	if (new_filter != evsel->filter) {
3884		pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3885		evsel__set_filter(evsel, new_filter);
3886		free(new_filter);
3887	}
3888
3889	return 0;
3890}
3891
3892static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3893{
3894	struct evlist *evlist = trace->evlist;
3895	struct evsel *evsel;
3896
3897	evlist__for_each_entry(evlist, evsel) {
3898		if (evsel->filter == NULL)
3899			continue;
3900
3901		if (trace__expand_filter(trace, evsel)) {
3902			*err_evsel = evsel;
3903			return -1;
3904		}
3905	}
3906
3907	return 0;
3908}
3909
3910static int trace__run(struct trace *trace, int argc, const char **argv)
3911{
3912	struct evlist *evlist = trace->evlist;
3913	struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3914	int err = -1, i;
3915	unsigned long before;
3916	const bool forks = argc > 0;
3917	bool draining = false;
3918
3919	trace->live = true;
3920
3921	if (!trace->raw_augmented_syscalls) {
3922		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3923			goto out_error_raw_syscalls;
3924
3925		if (trace->trace_syscalls)
3926			trace->vfs_getname = evlist__add_vfs_getname(evlist);
3927	}
3928
3929	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3930		pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3931		if (pgfault_maj == NULL)
3932			goto out_error_mem;
3933		evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3934		evlist__add(evlist, pgfault_maj);
3935	}
3936
3937	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3938		pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3939		if (pgfault_min == NULL)
3940			goto out_error_mem;
3941		evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3942		evlist__add(evlist, pgfault_min);
3943	}
3944
3945	if (trace->sched &&
3946	    evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
 
3947		goto out_error_sched_stat_runtime;
3948	/*
3949	 * If a global cgroup was set, apply it to all the events without an
3950	 * explicit cgroup. I.e.:
3951	 *
3952	 * 	trace -G A -e sched:*switch
3953	 *
3954	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3955	 * _and_ sched:sched_switch to the 'A' cgroup, while:
3956	 *
3957	 * trace -e sched:*switch -G A
3958	 *
3959	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3960	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3961	 * a cgroup (on the root cgroup, sys wide, etc).
3962	 *
3963	 * Multiple cgroups:
3964	 *
3965	 * trace -G A -e sched:*switch -G B
3966	 *
3967	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3968	 * to the 'B' cgroup.
3969	 *
3970	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3971	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3972	 */
3973	if (trace->cgroup)
3974		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3975
3976	err = evlist__create_maps(evlist, &trace->opts.target);
3977	if (err < 0) {
3978		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3979		goto out_delete_evlist;
3980	}
3981
3982	err = trace__symbols_init(trace, evlist);
3983	if (err < 0) {
3984		fprintf(trace->output, "Problems initializing symbol libraries!\n");
3985		goto out_delete_evlist;
3986	}
3987
3988	evlist__config(evlist, &trace->opts, &callchain_param);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3989
3990	if (forks) {
3991		err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
 
3992		if (err < 0) {
3993			fprintf(trace->output, "Couldn't run the workload!\n");
3994			goto out_delete_evlist;
3995		}
3996	}
3997
3998	err = evlist__open(evlist);
3999	if (err < 0)
4000		goto out_error_open;
4001
4002	err = bpf__apply_obj_config();
4003	if (err) {
4004		char errbuf[BUFSIZ];
4005
4006		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
4007		pr_err("ERROR: Apply config to BPF failed: %s\n",
4008			 errbuf);
4009		goto out_error_open;
4010	}
4011
4012	err = trace__set_filter_pids(trace);
 
 
 
 
 
 
 
 
 
 
4013	if (err < 0)
4014		goto out_error_mem;
4015
4016	if (trace->syscalls.map)
4017		trace__init_syscalls_bpf_map(trace);
4018
4019	if (trace->syscalls.prog_array.sys_enter)
4020		trace__init_syscalls_bpf_prog_array_maps(trace);
4021
4022	if (trace->ev_qualifier_ids.nr > 0) {
4023		err = trace__set_ev_qualifier_filter(trace);
4024		if (err < 0)
4025			goto out_errno;
4026
4027		if (trace->syscalls.events.sys_exit) {
4028			pr_debug("event qualifier tracepoint filter: %s\n",
4029				 trace->syscalls.events.sys_exit->filter);
4030		}
4031	}
4032
4033	/*
4034	 * If the "close" syscall is not traced, then we will not have the
4035	 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
4036	 * fd->pathname table and were ending up showing the last value set by
4037	 * syscalls opening a pathname and associating it with a descriptor or
4038	 * reading it from /proc/pid/fd/ in cases where that doesn't make
4039	 * sense.
4040	 *
4041	 *  So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
4042	 *  not in use.
4043	 */
4044	trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4045
4046	err = trace__expand_filters(trace, &evsel);
4047	if (err)
4048		goto out_delete_evlist;
4049	err = evlist__apply_filters(evlist, &evsel);
4050	if (err < 0)
4051		goto out_error_apply_filters;
4052
4053	if (trace->dump.map)
4054		bpf_map__fprintf(trace->dump.map, trace->output);
4055
4056	err = evlist__mmap(evlist, trace->opts.mmap_pages);
4057	if (err < 0)
4058		goto out_error_mmap;
4059
4060	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
4061		evlist__enable(evlist);
4062
4063	if (forks)
4064		evlist__start_workload(evlist);
4065
4066	if (trace->opts.initial_delay) {
4067		usleep(trace->opts.initial_delay * 1000);
4068		evlist__enable(evlist);
4069	}
4070
4071	trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4072				  evlist->core.threads->nr > 1 ||
4073				  evlist__first(evlist)->core.attr.inherit;
4074
4075	/*
4076	 * Now that we already used evsel->core.attr to ask the kernel to setup the
4077	 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4078	 * trace__resolve_callchain(), allowing per-event max-stack settings
4079	 * to override an explicitly set --max-stack global setting.
4080	 */
4081	evlist__for_each_entry(evlist, evsel) {
4082		if (evsel__has_callchain(evsel) &&
4083		    evsel->core.attr.sample_max_stack == 0)
4084			evsel->core.attr.sample_max_stack = trace->max_stack;
4085	}
4086again:
4087	before = trace->nr_events;
4088
4089	for (i = 0; i < evlist->core.nr_mmaps; i++) {
4090		union perf_event *event;
4091		struct mmap *md;
4092
4093		md = &evlist->mmap[i];
4094		if (perf_mmap__read_init(&md->core) < 0)
4095			continue;
4096
4097		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4098			++trace->nr_events;
4099
4100			err = trace__deliver_event(trace, event);
4101			if (err)
4102				goto out_disable;
 
 
4103
4104			perf_mmap__consume(&md->core);
 
 
4105
4106			if (interrupted)
4107				goto out_disable;
4108
4109			if (done && !draining) {
4110				evlist__disable(evlist);
4111				draining = true;
4112			}
4113		}
4114		perf_mmap__read_done(&md->core);
4115	}
4116
4117	if (trace->nr_events == before) {
4118		int timeout = done ? 100 : -1;
4119
4120		if (!draining && evlist__poll(evlist, timeout) > 0) {
4121			if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4122				draining = true;
4123
4124			goto again;
4125		} else {
4126			if (trace__flush_events(trace))
4127				goto out_disable;
4128		}
4129	} else {
4130		goto again;
4131	}
4132
4133out_disable:
4134	thread__zput(trace->current);
4135
4136	evlist__disable(evlist);
4137
4138	if (trace->sort_events)
4139		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4140
4141	if (!err) {
4142		if (trace->summary)
4143			trace__fprintf_thread_summary(trace, trace->output);
4144
4145		if (trace->show_tool_stats) {
4146			fprintf(trace->output, "Stats:\n "
4147					       " vfs_getname : %" PRIu64 "\n"
4148					       " proc_getname: %" PRIu64 "\n",
4149				trace->stats.vfs_getname,
4150				trace->stats.proc_getname);
4151		}
4152	}
4153
4154out_delete_evlist:
4155	trace__symbols__exit(trace);
4156	evlist__free_syscall_tp_fields(evlist);
4157	evlist__delete(evlist);
4158	cgroup__put(trace->cgroup);
4159	trace->evlist = NULL;
4160	trace->live = false;
4161	return err;
4162{
4163	char errbuf[BUFSIZ];
4164
4165out_error_sched_stat_runtime:
4166	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4167	goto out_error;
4168
4169out_error_raw_syscalls:
4170	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4171	goto out_error;
4172
4173out_error_mmap:
4174	evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4175	goto out_error;
4176
4177out_error_open:
4178	evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4179
4180out_error:
4181	fprintf(trace->output, "%s\n", errbuf);
4182	goto out_delete_evlist;
4183
4184out_error_apply_filters:
4185	fprintf(trace->output,
4186		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
4187		evsel->filter, evsel__name(evsel), errno,
4188		str_error_r(errno, errbuf, sizeof(errbuf)));
4189	goto out_delete_evlist;
4190}
4191out_error_mem:
4192	fprintf(trace->output, "Not enough memory to run!\n");
4193	goto out_delete_evlist;
4194
4195out_errno:
4196	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4197	goto out_delete_evlist;
4198}
4199
4200static int trace__replay(struct trace *trace)
4201{
4202	const struct evsel_str_handler handlers[] = {
4203		{ "probe:vfs_getname",	     trace__vfs_getname, },
4204	};
4205	struct perf_data data = {
4206		.path  = input_name,
4207		.mode  = PERF_DATA_MODE_READ,
4208		.force = trace->force,
4209	};
4210	struct perf_session *session;
4211	struct evsel *evsel;
4212	int err = -1;
4213
4214	trace->tool.sample	  = trace__process_sample;
4215	trace->tool.mmap	  = perf_event__process_mmap;
4216	trace->tool.mmap2	  = perf_event__process_mmap2;
4217	trace->tool.comm	  = perf_event__process_comm;
4218	trace->tool.exit	  = perf_event__process_exit;
4219	trace->tool.fork	  = perf_event__process_fork;
4220	trace->tool.attr	  = perf_event__process_attr;
4221	trace->tool.tracing_data  = perf_event__process_tracing_data;
4222	trace->tool.build_id	  = perf_event__process_build_id;
4223	trace->tool.namespaces	  = perf_event__process_namespaces;
4224
4225	trace->tool.ordered_events = true;
4226	trace->tool.ordering_requires_timestamps = true;
4227
4228	/* add tid to output */
4229	trace->multiple_threads = true;
4230
4231	session = perf_session__new(&data, false, &trace->tool);
4232	if (IS_ERR(session))
4233		return PTR_ERR(session);
4234
4235	if (trace->opts.target.pid)
4236		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4237
4238	if (trace->opts.target.tid)
4239		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4240
4241	if (symbol__init(&session->header.env) < 0)
4242		goto out;
4243
4244	trace->host = &session->machines.host;
4245
4246	err = perf_session__set_tracepoints_handlers(session, handlers);
4247	if (err)
4248		goto out;
4249
4250	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
 
4251	/* older kernels have syscalls tp versus raw_syscalls */
4252	if (evsel == NULL)
4253		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
 
4254
4255	if (evsel &&
4256	    (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4257	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4258		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4259		goto out;
4260	}
4261
4262	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
 
4263	if (evsel == NULL)
4264		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
 
4265	if (evsel &&
4266	    (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4267	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4268		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4269		goto out;
4270	}
4271
4272	evlist__for_each_entry(session->evlist, evsel) {
4273		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4274		    (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4275		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4276		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4277			evsel->handler = trace__pgfault;
4278	}
4279
4280	setup_pager();
4281
4282	err = perf_session__process_events(session);
4283	if (err)
4284		pr_err("Failed to process events, error %d", err);
4285
4286	else if (trace->summary)
4287		trace__fprintf_thread_summary(trace, trace->output);
4288
4289out:
4290	perf_session__delete(session);
4291
4292	return err;
4293}
4294
4295static size_t trace__fprintf_threads_header(FILE *fp)
4296{
4297	size_t printed;
4298
4299	printed  = fprintf(fp, "\n Summary of events:\n\n");
4300
4301	return printed;
4302}
4303
4304DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4305	struct syscall_stats *stats;
4306	double		     msecs;
4307	int		     syscall;
4308)
4309{
4310	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4311	struct syscall_stats *stats = source->priv;
4312
4313	entry->syscall = source->i;
4314	entry->stats   = stats;
4315	entry->msecs   = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4316}
4317
4318static size_t thread__dump_stats(struct thread_trace *ttrace,
4319				 struct trace *trace, FILE *fp)
4320{
4321	size_t printed = 0;
4322	struct syscall *sc;
4323	struct rb_node *nd;
4324	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4325
4326	if (syscall_stats == NULL)
4327		return 0;
4328
4329	printed += fprintf(fp, "\n");
4330
4331	printed += fprintf(fp, "   syscall            calls  errors  total       min       avg       max       stddev\n");
4332	printed += fprintf(fp, "                                     (msec)    (msec)    (msec)    (msec)        (%%)\n");
4333	printed += fprintf(fp, "   --------------- --------  ------ -------- --------- --------- ---------     ------\n");
4334
4335	resort_rb__for_each_entry(nd, syscall_stats) {
4336		struct syscall_stats *stats = syscall_stats_entry->stats;
4337		if (stats) {
4338			double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4339			double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4340			double avg = avg_stats(&stats->stats);
4341			double pct;
4342			u64 n = (u64)stats->stats.n;
4343
4344			pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4345			avg /= NSEC_PER_MSEC;
4346
4347			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4348			printed += fprintf(fp, "   %-15s", sc->name);
4349			printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4350					   n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4351			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
4352
4353			if (trace->errno_summary && stats->nr_failures) {
4354				const char *arch_name = perf_env__arch(trace->host->env);
4355				int e;
4356
4357				for (e = 0; e < stats->max_errno; ++e) {
4358					if (stats->errnos[e] != 0)
4359						fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
4360				}
4361			}
4362		}
4363	}
4364
4365	resort_rb__delete(syscall_stats);
4366	printed += fprintf(fp, "\n\n");
4367
4368	return printed;
4369}
4370
4371static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
4372{
4373	size_t printed = 0;
4374	struct thread_trace *ttrace = thread__priv(thread);
4375	double ratio;
4376
4377	if (ttrace == NULL)
4378		return 0;
4379
4380	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4381
4382	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
4383	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4384	printed += fprintf(fp, "%.1f%%", ratio);
4385	if (ttrace->pfmaj)
4386		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4387	if (ttrace->pfmin)
4388		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4389	if (trace->sched)
4390		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4391	else if (fputc('\n', fp) != EOF)
4392		++printed;
4393
4394	printed += thread__dump_stats(ttrace, trace, fp);
4395
4396	return printed;
4397}
4398
4399static unsigned long thread__nr_events(struct thread_trace *ttrace)
4400{
4401	return ttrace ? ttrace->nr_events : 0;
4402}
4403
4404DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
4405	struct thread *thread;
4406)
4407{
4408	entry->thread = rb_entry(nd, struct thread, rb_node);
4409}
4410
4411static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4412{
 
4413	size_t printed = trace__fprintf_threads_header(fp);
4414	struct rb_node *nd;
4415	int i;
4416
4417	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4418		DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
 
 
4419
4420		if (threads == NULL) {
4421			fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4422			return 0;
4423		}
4424
4425		resort_rb__for_each_entry(nd, threads)
4426			printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4427
4428		resort_rb__delete(threads);
4429	}
4430	return printed;
4431}
4432
4433static int trace__set_duration(const struct option *opt, const char *str,
4434			       int unset __maybe_unused)
4435{
4436	struct trace *trace = opt->value;
4437
4438	trace->duration_filter = atof(str);
4439	return 0;
4440}
4441
4442static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4443					      int unset __maybe_unused)
4444{
4445	int ret = -1;
4446	size_t i;
4447	struct trace *trace = opt->value;
4448	/*
4449	 * FIXME: introduce a intarray class, plain parse csv and create a
4450	 * { int nr, int entries[] } struct...
4451	 */
4452	struct intlist *list = intlist__new(str);
4453
4454	if (list == NULL)
4455		return -1;
4456
4457	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4458	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4459
4460	if (trace->filter_pids.entries == NULL)
4461		goto out;
4462
4463	trace->filter_pids.entries[0] = getpid();
4464
4465	for (i = 1; i < trace->filter_pids.nr; ++i)
4466		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4467
4468	intlist__delete(list);
4469	ret = 0;
4470out:
4471	return ret;
4472}
4473
4474static int trace__open_output(struct trace *trace, const char *filename)
4475{
4476	struct stat st;
4477
4478	if (!stat(filename, &st) && st.st_size) {
4479		char oldname[PATH_MAX];
4480
4481		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4482		unlink(oldname);
4483		rename(filename, oldname);
4484	}
4485
4486	trace->output = fopen(filename, "w");
4487
4488	return trace->output == NULL ? -errno : 0;
4489}
4490
4491static int parse_pagefaults(const struct option *opt, const char *str,
4492			    int unset __maybe_unused)
4493{
4494	int *trace_pgfaults = opt->value;
4495
4496	if (strcmp(str, "all") == 0)
4497		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4498	else if (strcmp(str, "maj") == 0)
4499		*trace_pgfaults |= TRACE_PFMAJ;
4500	else if (strcmp(str, "min") == 0)
4501		*trace_pgfaults |= TRACE_PFMIN;
4502	else
4503		return -1;
4504
4505	return 0;
4506}
4507
4508static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4509{
4510	struct evsel *evsel;
4511
4512	evlist__for_each_entry(evlist, evsel) {
4513		if (evsel->handler == NULL)
4514			evsel->handler = handler;
4515	}
4516}
4517
4518static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4519{
4520	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4521
4522	if (fmt) {
4523		struct syscall_fmt *scfmt = syscall_fmt__find(name);
4524
4525		if (scfmt) {
4526			int skip = 0;
4527
4528			if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4529			    strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4530				++skip;
4531
4532			memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4533		}
4534	}
4535}
4536
4537static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4538{
4539	struct evsel *evsel;
4540
4541	evlist__for_each_entry(evlist, evsel) {
4542		if (evsel->priv || !evsel->tp_format)
4543			continue;
4544
4545		if (strcmp(evsel->tp_format->system, "syscalls")) {
4546			evsel__init_tp_arg_scnprintf(evsel);
4547			continue;
4548		}
4549
4550		if (evsel__init_syscall_tp(evsel))
4551			return -1;
4552
4553		if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4554			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4555
4556			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4557				return -1;
4558
4559			evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4560		} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4561			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4562
4563			if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4564				return -1;
4565
4566			evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4567		}
4568	}
4569
4570	return 0;
4571}
4572
4573/*
4574 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4575 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4576 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4577 *
4578 * It'd be better to introduce a parse_options() variant that would return a
4579 * list with the terms it didn't match to an event...
4580 */
4581static int trace__parse_events_option(const struct option *opt, const char *str,
4582				      int unset __maybe_unused)
4583{
4584	struct trace *trace = (struct trace *)opt->value;
4585	const char *s = str;
4586	char *sep = NULL, *lists[2] = { NULL, NULL, };
4587	int len = strlen(str) + 1, err = -1, list, idx;
4588	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4589	char group_name[PATH_MAX];
4590	struct syscall_fmt *fmt;
4591
4592	if (strace_groups_dir == NULL)
4593		return -1;
4594
4595	if (*s == '!') {
4596		++s;
4597		trace->not_ev_qualifier = true;
4598	}
4599
4600	while (1) {
4601		if ((sep = strchr(s, ',')) != NULL)
4602			*sep = '\0';
4603
4604		list = 0;
4605		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4606		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4607			list = 1;
4608			goto do_concat;
4609		}
4610
4611		fmt = syscall_fmt__find_by_alias(s);
4612		if (fmt != NULL) {
4613			list = 1;
4614			s = fmt->name;
4615		} else {
4616			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4617			if (access(group_name, R_OK) == 0)
4618				list = 1;
4619		}
4620do_concat:
4621		if (lists[list]) {
4622			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4623		} else {
4624			lists[list] = malloc(len);
4625			if (lists[list] == NULL)
4626				goto out;
4627			strcpy(lists[list], s);
4628		}
4629
4630		if (!sep)
4631			break;
4632
4633		*sep = ',';
4634		s = sep + 1;
4635	}
4636
4637	if (lists[1] != NULL) {
4638		struct strlist_config slist_config = {
4639			.dirname = strace_groups_dir,
4640		};
4641
4642		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4643		if (trace->ev_qualifier == NULL) {
4644			fputs("Not enough memory to parse event qualifier", trace->output);
4645			goto out;
4646		}
4647
4648		if (trace__validate_ev_qualifier(trace))
4649			goto out;
4650		trace->trace_syscalls = true;
4651	}
4652
4653	err = 0;
4654
4655	if (lists[0]) {
4656		struct option o = {
4657			.value = &trace->evlist,
4658		};
4659		err = parse_events_option(&o, lists[0], 0);
4660	}
4661out:
4662	free(strace_groups_dir);
4663	free(lists[0]);
4664	free(lists[1]);
4665	if (sep)
4666		*sep = ',';
4667
4668	return err;
4669}
4670
4671static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4672{
4673	struct trace *trace = opt->value;
4674
4675	if (!list_empty(&trace->evlist->core.entries)) {
4676		struct option o = {
4677			.value = &trace->evlist,
4678		};
4679		return parse_cgroups(&o, str, unset);
4680	}
4681	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4682
4683	return 0;
4684}
4685
4686static int trace__config(const char *var, const char *value, void *arg)
4687{
4688	struct trace *trace = arg;
4689	int err = 0;
4690
4691	if (!strcmp(var, "trace.add_events")) {
4692		trace->perfconfig_events = strdup(value);
4693		if (trace->perfconfig_events == NULL) {
4694			pr_err("Not enough memory for %s\n", "trace.add_events");
4695			return -1;
4696		}
4697	} else if (!strcmp(var, "trace.show_timestamp")) {
4698		trace->show_tstamp = perf_config_bool(var, value);
4699	} else if (!strcmp(var, "trace.show_duration")) {
4700		trace->show_duration = perf_config_bool(var, value);
4701	} else if (!strcmp(var, "trace.show_arg_names")) {
4702		trace->show_arg_names = perf_config_bool(var, value);
4703		if (!trace->show_arg_names)
4704			trace->show_zeros = true;
4705	} else if (!strcmp(var, "trace.show_zeros")) {
4706		bool new_show_zeros = perf_config_bool(var, value);
4707		if (!trace->show_arg_names && !new_show_zeros) {
4708			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4709			goto out;
4710		}
4711		trace->show_zeros = new_show_zeros;
4712	} else if (!strcmp(var, "trace.show_prefix")) {
4713		trace->show_string_prefix = perf_config_bool(var, value);
4714	} else if (!strcmp(var, "trace.no_inherit")) {
4715		trace->opts.no_inherit = perf_config_bool(var, value);
4716	} else if (!strcmp(var, "trace.args_alignment")) {
4717		int args_alignment = 0;
4718		if (perf_config_int(&args_alignment, var, value) == 0)
4719			trace->args_alignment = args_alignment;
4720	} else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4721		if (strcasecmp(value, "libtraceevent") == 0)
4722			trace->libtraceevent_print = true;
4723		else if (strcasecmp(value, "libbeauty") == 0)
4724			trace->libtraceevent_print = false;
4725	}
4726out:
4727	return err;
4728}
4729
4730static void trace__exit(struct trace *trace)
4731{
4732	int i;
4733
4734	strlist__delete(trace->ev_qualifier);
4735	free(trace->ev_qualifier_ids.entries);
4736	if (trace->syscalls.table) {
4737		for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4738			syscall__exit(&trace->syscalls.table[i]);
4739		free(trace->syscalls.table);
4740	}
4741	syscalltbl__delete(trace->sctbl);
4742	zfree(&trace->perfconfig_events);
4743}
4744
4745int cmd_trace(int argc, const char **argv)
4746{
4747	const char *trace_usage[] = {
4748		"perf trace [<options>] [<command>]",
4749		"perf trace [<options>] -- <command> [<options>]",
4750		"perf trace record [<options>] [<command>]",
4751		"perf trace record [<options>] -- <command> [<options>]",
4752		NULL
4753	};
4754	struct trace trace = {
 
 
 
4755		.opts = {
4756			.target = {
4757				.uid	   = UINT_MAX,
4758				.uses_mmap = true,
4759			},
4760			.user_freq     = UINT_MAX,
4761			.user_interval = ULLONG_MAX,
4762			.no_buffering  = true,
4763			.mmap_pages    = UINT_MAX,
 
4764		},
4765		.output = stderr,
4766		.show_comm = true,
4767		.show_tstamp = true,
4768		.show_duration = true,
4769		.show_arg_names = true,
4770		.args_alignment = 70,
4771		.trace_syscalls = false,
4772		.kernel_syscallchains = false,
4773		.max_stack = UINT_MAX,
4774		.max_events = ULONG_MAX,
4775	};
4776	const char *map_dump_str = NULL;
4777	const char *output_name = NULL;
 
4778	const struct option trace_options[] = {
4779	OPT_CALLBACK('e', "event", &trace, "event",
4780		     "event/syscall selector. use 'perf list' to list available events",
4781		     trace__parse_events_option),
4782	OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4783		     "event filter", parse_filter),
4784	OPT_BOOLEAN(0, "comm", &trace.show_comm,
4785		    "show the thread COMM next to its id"),
4786	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4787	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4788		     trace__parse_events_option),
4789	OPT_STRING('o', "output", &output_name, "file", "output file name"),
4790	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4791	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4792		    "trace events on existing process id"),
4793	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4794		    "trace events on existing thread id"),
4795	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4796		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4797	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4798		    "system-wide collection from all CPUs"),
4799	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4800		    "list of cpus to monitor"),
4801	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4802		    "child tasks do not inherit counters"),
4803	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4804		     "number of mmap data pages", evlist__parse_mmap_pages),
 
4805	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4806		   "user to profile"),
4807	OPT_CALLBACK(0, "duration", &trace, "float",
4808		     "show only events with duration > N.M ms",
4809		     trace__set_duration),
4810#ifdef HAVE_LIBBPF_SUPPORT
4811	OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4812#endif
4813	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4814	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4815	OPT_BOOLEAN('T', "time", &trace.full_time,
4816		    "Show full timestamp, not time relative to first start"),
4817	OPT_BOOLEAN(0, "failure", &trace.failure_only,
4818		    "Show only syscalls that failed"),
4819	OPT_BOOLEAN('s', "summary", &trace.summary_only,
4820		    "Show only syscall summary with statistics"),
4821	OPT_BOOLEAN('S', "with-summary", &trace.summary,
4822		    "Show all syscalls and summary with statistics"),
4823	OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4824		    "Show errno stats per syscall, use with -s or -S"),
4825	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4826		     "Trace pagefaults", parse_pagefaults, "maj"),
4827	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4828	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4829	OPT_CALLBACK(0, "call-graph", &trace.opts,
4830		     "record_mode[,record_size]", record_callchain_help,
4831		     &record_parse_callchain_opt),
4832	OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4833		    "Use libtraceevent to print the tracepoint arguments."),
4834	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4835		    "Show the kernel callchains on the syscall exit path"),
4836	OPT_ULONG(0, "max-events", &trace.max_events,
4837		"Set the maximum number of events to print, exit after that is reached. "),
4838	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4839		     "Set the minimum stack depth when parsing the callchain, "
4840		     "anything below the specified depth will be ignored."),
4841	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4842		     "Set the maximum stack depth when parsing the callchain, "
4843		     "anything beyond the specified depth will be ignored. "
4844		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4845	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4846			"Sort batch of events before processing, use if getting out of order events"),
4847	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4848			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4849	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4850			"per thread proc mmap processing timeout in ms"),
4851	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4852		     trace__parse_cgroups),
4853	OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4854		     "ms to wait before starting measurement after program "
4855		     "start"),
4856	OPTS_EVSWITCH(&trace.evswitch),
4857	OPT_END()
4858	};
4859	bool __maybe_unused max_stack_user_set = true;
4860	bool mmap_pages_user_set = true;
4861	struct evsel *evsel;
4862	const char * const trace_subcommands[] = { "record", NULL };
4863	int err = -1;
4864	char bf[BUFSIZ];
4865
4866	signal(SIGSEGV, sighandler_dump_stack);
4867	signal(SIGFPE, sighandler_dump_stack);
4868	signal(SIGCHLD, sig_handler);
4869	signal(SIGINT, sig_handler);
4870
4871	trace.evlist = evlist__new();
4872	trace.sctbl = syscalltbl__new();
4873
4874	if (trace.evlist == NULL || trace.sctbl == NULL) {
4875		pr_err("Not enough memory to run!\n");
4876		err = -ENOMEM;
4877		goto out;
4878	}
4879
4880	/*
4881	 * Parsing .perfconfig may entail creating a BPF event, that may need
4882	 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4883	 * is too small. This affects just this process, not touching the
4884	 * global setting. If it fails we'll get something in 'perf trace -v'
4885	 * to help diagnose the problem.
4886	 */
4887	rlimit__bump_memlock();
4888
4889	err = perf_config(trace__config, &trace);
4890	if (err)
4891		goto out;
4892
4893	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4894				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4895
4896	/*
4897	 * Here we already passed thru trace__parse_events_option() and it has
4898	 * already figured out if -e syscall_name, if not but if --event
4899	 * foo:bar was used, the user is interested _just_ in those, say,
4900	 * tracepoint events, not in the strace-like syscall-name-based mode.
4901	 *
4902	 * This is important because we need to check if strace-like mode is
4903	 * needed to decided if we should filter out the eBPF
4904	 * __augmented_syscalls__ code, if it is in the mix, say, via
4905	 * .perfconfig trace.add_events, and filter those out.
4906	 */
4907	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4908	    trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4909		trace.trace_syscalls = true;
4910	}
4911	/*
4912	 * Now that we have --verbose figured out, lets see if we need to parse
4913	 * events from .perfconfig, so that if those events fail parsing, say some
4914	 * BPF program fails, then we'll be able to use --verbose to see what went
4915	 * wrong in more detail.
4916	 */
4917	if (trace.perfconfig_events != NULL) {
4918		struct parse_events_error parse_err;
4919
4920		bzero(&parse_err, sizeof(parse_err));
4921		err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4922		if (err) {
4923			parse_events_print_error(&parse_err, trace.perfconfig_events);
4924			goto out;
4925		}
4926	}
4927
4928	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4929		usage_with_options_msg(trace_usage, trace_options,
4930				       "cgroup monitoring only available in system-wide mode");
4931	}
4932
4933	evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4934	if (IS_ERR(evsel)) {
4935		bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4936		pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4937		goto out;
4938	}
4939
4940	if (evsel) {
4941		trace.syscalls.events.augmented = evsel;
4942
4943		evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4944		if (evsel == NULL) {
4945			pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4946			goto out;
4947		}
4948
4949		if (evsel->bpf_obj == NULL) {
4950			pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4951			goto out;
4952		}
4953
4954		trace.bpf_obj = evsel->bpf_obj;
4955
4956		/*
4957		 * If we have _just_ the augmenter event but don't have a
4958		 * explicit --syscalls, then assume we want all strace-like
4959		 * syscalls:
4960		 */
4961		if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4962			trace.trace_syscalls = true;
4963		/*
4964		 * So, if we have a syscall augmenter, but trace_syscalls, aka
4965		 * strace-like syscall tracing is not set, then we need to trow
4966		 * away the augmenter, i.e. all the events that were created
4967		 * from that BPF object file.
4968		 *
4969		 * This is more to fix the current .perfconfig trace.add_events
4970		 * style of setting up the strace-like eBPF based syscall point
4971		 * payload augmenter.
4972		 *
4973		 * All this complexity will be avoided by adding an alternative
4974		 * to trace.add_events in the form of
4975		 * trace.bpf_augmented_syscalls, that will be only parsed if we
4976		 * need it.
4977		 *
4978		 * .perfconfig trace.add_events is still useful if we want, for
4979		 * instance, have msr_write.msr in some .perfconfig profile based
4980		 * 'perf trace --config determinism.profile' mode, where for some
4981		 * particular goal/workload type we want a set of events and
4982		 * output mode (with timings, etc) instead of having to add
4983		 * all via the command line.
4984		 *
4985		 * Also --config to specify an alternate .perfconfig file needs
4986		 * to be implemented.
4987		 */
4988		if (!trace.trace_syscalls) {
4989			trace__delete_augmented_syscalls(&trace);
4990		} else {
4991			trace__set_bpf_map_filtered_pids(&trace);
4992			trace__set_bpf_map_syscalls(&trace);
4993			trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4994		}
4995	}
4996
4997	err = bpf__setup_stdout(trace.evlist);
4998	if (err) {
4999		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
5000		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
5001		goto out;
5002	}
5003
5004	err = -1;
5005
5006	if (map_dump_str) {
5007		trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
5008		if (trace.dump.map == NULL) {
5009			pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
5010			goto out;
5011		}
5012	}
5013
5014	if (trace.trace_pgfaults) {
5015		trace.opts.sample_address = true;
5016		trace.opts.sample_time = true;
5017	}
5018
5019	if (trace.opts.mmap_pages == UINT_MAX)
5020		mmap_pages_user_set = false;
5021
5022	if (trace.max_stack == UINT_MAX) {
5023		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5024		max_stack_user_set = false;
5025	}
5026
5027#ifdef HAVE_DWARF_UNWIND_SUPPORT
5028	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5029		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5030	}
5031#endif
5032
5033	if (callchain_param.enabled) {
5034		if (!mmap_pages_user_set && geteuid() == 0)
5035			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5036
5037		symbol_conf.use_callchain = true;
5038	}
5039
5040	if (trace.evlist->core.nr_entries > 0) {
5041		evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5042		if (evlist__set_syscall_tp_fields(trace.evlist)) {
5043			perror("failed to set syscalls:* tracepoint fields");
5044			goto out;
5045		}
5046	}
5047
5048	if (trace.sort_events) {
5049		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5050		ordered_events__set_copy_on_queue(&trace.oe.data, true);
5051	}
5052
5053	/*
5054	 * If we are augmenting syscalls, then combine what we put in the
5055	 * __augmented_syscalls__ BPF map with what is in the
5056	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5057	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5058	 *
5059	 * We'll switch to look at two BPF maps, one for sys_enter and the
5060	 * other for sys_exit when we start augmenting the sys_exit paths with
5061	 * buffers that are being copied from kernel to userspace, think 'read'
5062	 * syscall.
5063	 */
5064	if (trace.syscalls.events.augmented) {
5065		evlist__for_each_entry(trace.evlist, evsel) {
5066			bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
5067
5068			if (raw_syscalls_sys_exit) {
5069				trace.raw_augmented_syscalls = true;
5070				goto init_augmented_syscall_tp;
5071			}
5072
5073			if (trace.syscalls.events.augmented->priv == NULL &&
5074			    strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5075				struct evsel *augmented = trace.syscalls.events.augmented;
5076				if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5077				    evsel__init_augmented_syscall_tp_args(augmented))
5078					goto out;
5079				/*
5080				 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5081				 * Above we made sure we can get from the payload the tp fields
5082				 * that we get from syscalls:sys_enter tracefs format file.
5083				 */
5084				augmented->handler = trace__sys_enter;
5085				/*
5086				 * Now we do the same for the *syscalls:sys_enter event so that
5087				 * if we handle it directly, i.e. if the BPF prog returns 0 so
5088				 * as not to filter it, then we'll handle it just like we would
5089				 * for the BPF_OUTPUT one:
5090				 */
5091				if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5092				    evsel__init_augmented_syscall_tp_args(evsel))
5093					goto out;
5094				evsel->handler = trace__sys_enter;
5095			}
5096
5097			if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5098				struct syscall_tp *sc;
5099init_augmented_syscall_tp:
5100				if (evsel__init_augmented_syscall_tp(evsel, evsel))
5101					goto out;
5102				sc = __evsel__syscall_tp(evsel);
5103				/*
5104				 * For now with BPF raw_augmented we hook into
5105				 * raw_syscalls:sys_enter and there we get all
5106				 * 6 syscall args plus the tracepoint common
5107				 * fields and the syscall_nr (another long).
5108				 * So we check if that is the case and if so
5109				 * don't look after the sc->args_size but
5110				 * always after the full raw_syscalls:sys_enter
5111				 * payload, which is fixed.
5112				 *
5113				 * We'll revisit this later to pass
5114				 * s->args_size to the BPF augmenter (now
5115				 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5116				 * so that it copies only what we need for each
5117				 * syscall, like what happens when we use
5118				 * syscalls:sys_enter_NAME, so that we reduce
5119				 * the kernel/userspace traffic to just what is
5120				 * needed for each syscall.
5121				 */
5122				if (trace.raw_augmented_syscalls)
5123					trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5124				evsel__init_augmented_syscall_tp_ret(evsel);
5125				evsel->handler = trace__sys_exit;
5126			}
5127		}
5128	}
5129
5130	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5131		return trace__record(&trace, argc-1, &argv[1]);
5132
5133	/* Using just --errno-summary will trigger --summary */
5134	if (trace.errno_summary && !trace.summary && !trace.summary_only)
5135		trace.summary_only = true;
5136
5137	/* summary_only implies summary option, but don't overwrite summary if set */
5138	if (trace.summary_only)
5139		trace.summary = trace.summary_only;
5140
 
 
 
 
 
 
 
 
 
 
 
5141	if (output_name != NULL) {
5142		err = trace__open_output(&trace, output_name);
5143		if (err < 0) {
5144			perror("failed to create output file");
5145			goto out;
5146		}
5147	}
5148
5149	err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5150	if (err)
5151		goto out_close;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5152
5153	err = target__validate(&trace.opts.target);
5154	if (err) {
5155		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5156		fprintf(trace.output, "%s", bf);
5157		goto out_close;
5158	}
5159
5160	err = target__parse_uid(&trace.opts.target);
5161	if (err) {
5162		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5163		fprintf(trace.output, "%s", bf);
5164		goto out_close;
5165	}
5166
5167	if (!argc && target__none(&trace.opts.target))
5168		trace.opts.target.system_wide = true;
5169
5170	if (input_name)
5171		err = trace__replay(&trace);
5172	else
5173		err = trace__run(&trace, argc, argv);
5174
5175out_close:
5176	if (output_name != NULL)
5177		fclose(trace.output);
5178out:
5179	trace__exit(&trace);
5180	return err;
5181}