Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * builtin-trace.c
   3 *
   4 * Builtin 'trace' command:
   5 *
   6 * Display a continuously updated trace of any workload, CPU, specific PID,
   7 * system wide, etc.  Default format is loosely strace like, but any other
   8 * event may be specified using --event.
   9 *
  10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  11 *
  12 * Initially based on the 'trace' prototype by Thomas Gleixner:
  13 *
  14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
  15 *
  16 * Released under the GPL v2. (and only v2, not any later version)
  17 */
  18
  19#include <traceevent/event-parse.h>
  20#include <api/fs/tracing_path.h>
 
 
 
 
 
  21#include "builtin.h"
 
  22#include "util/color.h"
 
  23#include "util/debug.h"
 
 
 
 
 
 
  24#include "util/evlist.h"
 
 
 
  25#include <subcmd/exec-cmd.h>
  26#include "util/machine.h"
 
 
 
  27#include "util/session.h"
  28#include "util/thread.h"
  29#include <subcmd/parse-options.h>
  30#include "util/strlist.h"
  31#include "util/intlist.h"
  32#include "util/thread_map.h"
  33#include "util/stat.h"
 
 
 
  34#include "trace-event.h"
  35#include "util/parse-events.h"
  36#include "util/bpf-loader.h"
  37
  38#include <libaudit.h>
 
 
 
 
 
 
 
 
 
 
  39#include <stdlib.h>
  40#include <sys/mman.h>
  41#include <linux/futex.h>
  42#include <linux/err.h>
 
 
 
 
 
 
 
 
  43
  44/* For older distros: */
  45#ifndef MAP_STACK
  46# define MAP_STACK		0x20000
  47#endif
  48
  49#ifndef MADV_HWPOISON
  50# define MADV_HWPOISON		100
  51
  52#endif
  53
  54#ifndef MADV_MERGEABLE
  55# define MADV_MERGEABLE		12
  56#endif
  57
  58#ifndef MADV_UNMERGEABLE
  59# define MADV_UNMERGEABLE	13
  60#endif
  61
  62#ifndef EFD_SEMAPHORE
  63# define EFD_SEMAPHORE		1
  64#endif
  65
  66#ifndef EFD_NONBLOCK
  67# define EFD_NONBLOCK		00004000
  68#endif
  69
  70#ifndef EFD_CLOEXEC
  71# define EFD_CLOEXEC		02000000
  72#endif
  73
  74#ifndef O_CLOEXEC
  75# define O_CLOEXEC		02000000
  76#endif
  77
  78#ifndef SOCK_DCCP
  79# define SOCK_DCCP		6
  80#endif
  81
  82#ifndef SOCK_CLOEXEC
  83# define SOCK_CLOEXEC		02000000
  84#endif
  85
  86#ifndef SOCK_NONBLOCK
  87# define SOCK_NONBLOCK		00004000
  88#endif
  89
  90#ifndef MSG_CMSG_CLOEXEC
  91# define MSG_CMSG_CLOEXEC	0x40000000
  92#endif
  93
  94#ifndef PERF_FLAG_FD_NO_GROUP
  95# define PERF_FLAG_FD_NO_GROUP		(1UL << 0)
  96#endif
  97
  98#ifndef PERF_FLAG_FD_OUTPUT
  99# define PERF_FLAG_FD_OUTPUT		(1UL << 1)
 100#endif
 101
 102#ifndef PERF_FLAG_PID_CGROUP
 103# define PERF_FLAG_PID_CGROUP		(1UL << 2) /* pid=cgroup id, per-cpu mode only */
 104#endif
 
 
 
 
 
 
 
 
 
 105
 106#ifndef PERF_FLAG_FD_CLOEXEC
 107# define PERF_FLAG_FD_CLOEXEC		(1UL << 3) /* O_CLOEXEC */
 108#endif
 
 
 
 
 
 
 
 
 
 
 109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110
 111struct tp_field {
 112	int offset;
 113	union {
 114		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
 115		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
 116	};
 117};
 118
 119#define TP_UINT_FIELD(bits) \
 120static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
 121{ \
 122	u##bits value; \
 123	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 124	return value;  \
 125}
 126
 127TP_UINT_FIELD(8);
 128TP_UINT_FIELD(16);
 129TP_UINT_FIELD(32);
 130TP_UINT_FIELD(64);
 131
 132#define TP_UINT_FIELD__SWAPPED(bits) \
 133static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
 134{ \
 135	u##bits value; \
 136	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 137	return bswap_##bits(value);\
 138}
 139
 140TP_UINT_FIELD__SWAPPED(16);
 141TP_UINT_FIELD__SWAPPED(32);
 142TP_UINT_FIELD__SWAPPED(64);
 143
 144static int tp_field__init_uint(struct tp_field *field,
 145			       struct format_field *format_field,
 146			       bool needs_swap)
 147{
 148	field->offset = format_field->offset;
 149
 150	switch (format_field->size) {
 151	case 1:
 152		field->integer = tp_field__u8;
 153		break;
 154	case 2:
 155		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
 156		break;
 157	case 4:
 158		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
 159		break;
 160	case 8:
 161		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
 162		break;
 163	default:
 164		return -1;
 165	}
 166
 167	return 0;
 168}
 169
 
 
 
 
 
 170static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
 171{
 172	return sample->raw_data + field->offset;
 173}
 174
 175static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
 176{
 177	field->offset = format_field->offset;
 178	field->pointer = tp_field__ptr;
 179	return 0;
 180}
 181
 
 
 
 
 
 182struct syscall_tp {
 183	struct tp_field id;
 184	union {
 185		struct tp_field args, ret;
 186	};
 187};
 188
 189static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
 190					  struct tp_field *field,
 191					  const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 192{
 193	struct format_field *format_field = perf_evsel__field(evsel, name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194
 195	if (format_field == NULL)
 196		return -1;
 197
 198	return tp_field__init_uint(field, format_field, evsel->needs_swap);
 199}
 200
 201#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
 202	({ struct syscall_tp *sc = evsel->priv;\
 203	   perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
 204
 205static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
 206					 struct tp_field *field,
 207					 const char *name)
 208{
 209	struct format_field *format_field = perf_evsel__field(evsel, name);
 210
 211	if (format_field == NULL)
 212		return -1;
 213
 214	return tp_field__init_ptr(field, format_field);
 215}
 216
 217#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
 218	({ struct syscall_tp *sc = evsel->priv;\
 219	   perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
 220
 221static void perf_evsel__delete_priv(struct perf_evsel *evsel)
 222{
 223	zfree(&evsel->priv);
 224	perf_evsel__delete(evsel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 225}
 226
 227static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
 228{
 229	evsel->priv = malloc(sizeof(struct syscall_tp));
 230	if (evsel->priv != NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 231		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
 232			goto out_delete;
 233
 234		evsel->handler = handler;
 235		return 0;
 236	}
 237
 238	return -ENOMEM;
 239
 240out_delete:
 241	zfree(&evsel->priv);
 242	return -ENOENT;
 243}
 244
 245static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
 246{
 247	struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
 248
 249	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
 250	if (IS_ERR(evsel))
 251		evsel = perf_evsel__newtp("syscalls", direction);
 252
 253	if (IS_ERR(evsel))
 254		return NULL;
 255
 256	if (perf_evsel__init_syscall_tp(evsel, handler))
 257		goto out_delete;
 258
 259	return evsel;
 260
 261out_delete:
 262	perf_evsel__delete_priv(evsel);
 263	return NULL;
 264}
 265
 266#define perf_evsel__sc_tp_uint(evsel, name, sample) \
 267	({ struct syscall_tp *fields = evsel->priv; \
 268	   fields->name.integer(&fields->name, sample); })
 269
 270#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
 271	({ struct syscall_tp *fields = evsel->priv; \
 272	   fields->name.pointer(&fields->name, sample); })
 273
 274struct syscall_arg {
 275	unsigned long val;
 276	struct thread *thread;
 277	struct trace  *trace;
 278	void	      *parm;
 279	u8	      idx;
 280	u8	      mask;
 281};
 282
 283struct strarray {
 284	int	    offset;
 285	int	    nr_entries;
 286	const char **entries;
 287};
 
 288
 289#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
 290	.nr_entries = ARRAY_SIZE(array), \
 291	.entries = array, \
 292}
 293
 294#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
 295	.offset	    = off, \
 296	.nr_entries = ARRAY_SIZE(array), \
 297	.entries = array, \
 
 
 
 
 
 
 
 
 298}
 299
 300static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
 301						const char *intfmt,
 302					        struct syscall_arg *arg)
 303{
 304	struct strarray *sa = arg->parm;
 305	int idx = arg->val - sa->offset;
 306
 307	if (idx < 0 || idx >= sa->nr_entries)
 308		return scnprintf(bf, size, intfmt, arg->val);
 309
 310	return scnprintf(bf, size, "%s", sa->entries[idx]);
 311}
 312
 313static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
 314					      struct syscall_arg *arg)
 315{
 316	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
 317}
 318
 319#define SCA_STRARRAY syscall_arg__scnprintf_strarray
 320
 321#if defined(__i386__) || defined(__x86_64__)
 322/*
 323 * FIXME: Make this available to all arches as soon as the ioctl beautifier
 324 * 	  gets rewritten to support all arches.
 325 */
 326static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
 327						 struct syscall_arg *arg)
 328{
 329	return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
 330}
 331
 332#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
 333#endif /* defined(__i386__) || defined(__x86_64__) */
 334
 335static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
 336					struct syscall_arg *arg);
 337
 338#define SCA_FD syscall_arg__scnprintf_fd
 339
 340static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
 341					   struct syscall_arg *arg)
 342{
 343	int fd = arg->val;
 344
 345	if (fd == AT_FDCWD)
 346		return scnprintf(bf, size, "CWD");
 347
 348	return syscall_arg__scnprintf_fd(bf, size, arg);
 349}
 350
 351#define SCA_FDAT syscall_arg__scnprintf_fd_at
 352
 353static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
 354					      struct syscall_arg *arg);
 355
 356#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
 357
 358static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
 359					 struct syscall_arg *arg)
 360{
 361	return scnprintf(bf, size, "%#lx", arg->val);
 362}
 363
 364#define SCA_HEX syscall_arg__scnprintf_hex
 365
 366static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
 367					 struct syscall_arg *arg)
 368{
 369	return scnprintf(bf, size, "%d", arg->val);
 370}
 371
 372#define SCA_INT syscall_arg__scnprintf_int
 373
 374static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
 375					       struct syscall_arg *arg)
 376{
 377	int printed = 0, prot = arg->val;
 378
 379	if (prot == PROT_NONE)
 380		return scnprintf(bf, size, "NONE");
 381#define	P_MMAP_PROT(n) \
 382	if (prot & PROT_##n) { \
 383		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 384		prot &= ~PROT_##n; \
 385	}
 386
 387	P_MMAP_PROT(EXEC);
 388	P_MMAP_PROT(READ);
 389	P_MMAP_PROT(WRITE);
 390#ifdef PROT_SEM
 391	P_MMAP_PROT(SEM);
 392#endif
 393	P_MMAP_PROT(GROWSDOWN);
 394	P_MMAP_PROT(GROWSUP);
 395#undef P_MMAP_PROT
 396
 397	if (prot)
 398		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
 
 
 
 
 
 
 
 
 399
 
 
 
 400	return printed;
 401}
 402
 403#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
 404
 405static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
 406						struct syscall_arg *arg)
 407{
 408	int printed = 0, flags = arg->val;
 409
 410#define	P_MMAP_FLAG(n) \
 411	if (flags & MAP_##n) { \
 412		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 413		flags &= ~MAP_##n; \
 
 414	}
 415
 416	P_MMAP_FLAG(SHARED);
 417	P_MMAP_FLAG(PRIVATE);
 418#ifdef MAP_32BIT
 419	P_MMAP_FLAG(32BIT);
 420#endif
 421	P_MMAP_FLAG(ANONYMOUS);
 422	P_MMAP_FLAG(DENYWRITE);
 423	P_MMAP_FLAG(EXECUTABLE);
 424	P_MMAP_FLAG(FILE);
 425	P_MMAP_FLAG(FIXED);
 426	P_MMAP_FLAG(GROWSDOWN);
 427#ifdef MAP_HUGETLB
 428	P_MMAP_FLAG(HUGETLB);
 429#endif
 430	P_MMAP_FLAG(LOCKED);
 431	P_MMAP_FLAG(NONBLOCK);
 432	P_MMAP_FLAG(NORESERVE);
 433	P_MMAP_FLAG(POPULATE);
 434	P_MMAP_FLAG(STACK);
 435#ifdef MAP_UNINITIALIZED
 436	P_MMAP_FLAG(UNINITIALIZED);
 437#endif
 438#undef P_MMAP_FLAG
 439
 440	if (flags)
 441		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 
 
 442
 443	return printed;
 444}
 445
 446#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
 
 447
 448static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
 449						  struct syscall_arg *arg)
 450{
 451	int printed = 0, flags = arg->val;
 452
 453#define P_MREMAP_FLAG(n) \
 454	if (flags & MREMAP_##n) { \
 455		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 456		flags &= ~MREMAP_##n; \
 457	}
 458
 459	P_MREMAP_FLAG(MAYMOVE);
 460#ifdef MREMAP_FIXED
 461	P_MREMAP_FLAG(FIXED);
 462#endif
 463#undef P_MREMAP_FLAG
 464
 465	if (flags)
 466		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 467
 468	return printed;
 469}
 
 
 
 
 
 
 
 
 
 
 470
 471#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
 
 472
 473static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
 474						      struct syscall_arg *arg)
 475{
 476	int behavior = arg->val;
 477
 478	switch (behavior) {
 479#define	P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
 480	P_MADV_BHV(NORMAL);
 481	P_MADV_BHV(RANDOM);
 482	P_MADV_BHV(SEQUENTIAL);
 483	P_MADV_BHV(WILLNEED);
 484	P_MADV_BHV(DONTNEED);
 485	P_MADV_BHV(REMOVE);
 486	P_MADV_BHV(DONTFORK);
 487	P_MADV_BHV(DOFORK);
 488	P_MADV_BHV(HWPOISON);
 489#ifdef MADV_SOFT_OFFLINE
 490	P_MADV_BHV(SOFT_OFFLINE);
 491#endif
 492	P_MADV_BHV(MERGEABLE);
 493	P_MADV_BHV(UNMERGEABLE);
 494#ifdef MADV_HUGEPAGE
 495	P_MADV_BHV(HUGEPAGE);
 496#endif
 497#ifdef MADV_NOHUGEPAGE
 498	P_MADV_BHV(NOHUGEPAGE);
 499#endif
 500#ifdef MADV_DONTDUMP
 501	P_MADV_BHV(DONTDUMP);
 502#endif
 503#ifdef MADV_DODUMP
 504	P_MADV_BHV(DODUMP);
 505#endif
 506#undef P_MADV_PHV
 507	default: break;
 508	}
 509
 510	return scnprintf(bf, size, "%#x", behavior);
 511}
 512
 513#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
 
 
 
 
 514
 515static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
 
 
 
 
 516					   struct syscall_arg *arg)
 517{
 518	int printed = 0, op = arg->val;
 519
 520	if (op == 0)
 521		return scnprintf(bf, size, "NONE");
 522#define	P_CMD(cmd) \
 523	if ((op & LOCK_##cmd) == LOCK_##cmd) { \
 524		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
 525		op &= ~LOCK_##cmd; \
 526	}
 527
 528	P_CMD(SH);
 529	P_CMD(EX);
 530	P_CMD(NB);
 531	P_CMD(UN);
 532	P_CMD(MAND);
 533	P_CMD(RW);
 534	P_CMD(READ);
 535	P_CMD(WRITE);
 536#undef P_OP
 537
 538	if (op)
 539		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
 540
 541	return printed;
 542}
 543
 544#define SCA_FLOCK syscall_arg__scnprintf_flock
 545
 546static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
 
 
 
 
 
 547{
 548	enum syscall_futex_args {
 549		SCF_UADDR   = (1 << 0),
 550		SCF_OP	    = (1 << 1),
 551		SCF_VAL	    = (1 << 2),
 552		SCF_TIMEOUT = (1 << 3),
 553		SCF_UADDR2  = (1 << 4),
 554		SCF_VAL3    = (1 << 5),
 555	};
 556	int op = arg->val;
 557	int cmd = op & FUTEX_CMD_MASK;
 558	size_t printed = 0;
 559
 560	switch (cmd) {
 561#define	P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
 562	P_FUTEX_OP(WAIT);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
 563	P_FUTEX_OP(WAKE);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
 564	P_FUTEX_OP(FD);		    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
 565	P_FUTEX_OP(REQUEUE);	    arg->mask |= SCF_VAL3|SCF_TIMEOUT;	          break;
 566	P_FUTEX_OP(CMP_REQUEUE);    arg->mask |= SCF_TIMEOUT;			  break;
 567	P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT;			  break;
 568	P_FUTEX_OP(WAKE_OP);							  break;
 569	P_FUTEX_OP(LOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
 570	P_FUTEX_OP(UNLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
 571	P_FUTEX_OP(TRYLOCK_PI);	    arg->mask |= SCF_VAL3|SCF_UADDR2;		  break;
 572	P_FUTEX_OP(WAIT_BITSET);    arg->mask |= SCF_UADDR2;			  break;
 573	P_FUTEX_OP(WAKE_BITSET);    arg->mask |= SCF_UADDR2;			  break;
 574	P_FUTEX_OP(WAIT_REQUEUE_PI);						  break;
 575	default: printed = scnprintf(bf, size, "%#x", cmd);			  break;
 576	}
 577
 578	if (op & FUTEX_PRIVATE_FLAG)
 579		printed += scnprintf(bf + printed, size - printed, "|PRIV");
 
 
 580
 581	if (op & FUTEX_CLOCK_REALTIME)
 582		printed += scnprintf(bf + printed, size - printed, "|CLKRT");
 
 
 583
 584	return printed;
 
 
 
 
 
 585}
 586
 587#define SCA_FUTEX_OP  syscall_arg__scnprintf_futex_op
 588
 589static const char *bpf_cmd[] = {
 590	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
 591	"MAP_GET_NEXT_KEY", "PROG_LOAD",
 
 
 
 
 
 
 
 
 
 
 
 
 
 592};
 593static DEFINE_STRARRAY(bpf_cmd);
 
 
 
 
 594
 595static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
 596static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
 597
 598static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
 599static DEFINE_STRARRAY(itimers);
 600
 601static const char *keyctl_options[] = {
 602	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
 603	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
 604	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
 605	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
 606	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
 607};
 608static DEFINE_STRARRAY(keyctl_options);
 609
 610static const char *whences[] = { "SET", "CUR", "END",
 611#ifdef SEEK_DATA
 612"DATA",
 613#endif
 614#ifdef SEEK_HOLE
 615"HOLE",
 616#endif
 617};
 618static DEFINE_STRARRAY(whences);
 619
 620static const char *fcntl_cmds[] = {
 621	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
 622	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
 623	"F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
 624	"F_GETOWNER_UIDS",
 625};
 626static DEFINE_STRARRAY(fcntl_cmds);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627
 628static const char *rlimit_resources[] = {
 629	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
 630	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
 631	"RTTIME",
 632};
 633static DEFINE_STRARRAY(rlimit_resources);
 634
 635static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
 636static DEFINE_STRARRAY(sighow);
 637
 638static const char *clockid[] = {
 639	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
 640	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
 641	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
 642};
 643static DEFINE_STRARRAY(clockid);
 644
 645static const char *socket_families[] = {
 646	"UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
 647	"BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
 648	"SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
 649	"RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
 650	"BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
 651	"ALG", "NFC", "VSOCK",
 652};
 653static DEFINE_STRARRAY(socket_families);
 654
 655#ifndef SOCK_TYPE_MASK
 656#define SOCK_TYPE_MASK 0xf
 657#endif
 658
 659static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
 660						      struct syscall_arg *arg)
 661{
 662	size_t printed;
 663	int type = arg->val,
 664	    flags = type & ~SOCK_TYPE_MASK;
 665
 666	type &= SOCK_TYPE_MASK;
 667	/*
 668 	 * Can't use a strarray, MIPS may override for ABI reasons.
 669 	 */
 670	switch (type) {
 671#define	P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
 672	P_SK_TYPE(STREAM);
 673	P_SK_TYPE(DGRAM);
 674	P_SK_TYPE(RAW);
 675	P_SK_TYPE(RDM);
 676	P_SK_TYPE(SEQPACKET);
 677	P_SK_TYPE(DCCP);
 678	P_SK_TYPE(PACKET);
 679#undef P_SK_TYPE
 680	default:
 681		printed = scnprintf(bf, size, "%#x", type);
 682	}
 683
 684#define	P_SK_FLAG(n) \
 685	if (flags & SOCK_##n) { \
 686		printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
 687		flags &= ~SOCK_##n; \
 688	}
 689
 690	P_SK_FLAG(CLOEXEC);
 691	P_SK_FLAG(NONBLOCK);
 692#undef P_SK_FLAG
 693
 694	if (flags)
 695		printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
 696
 697	return printed;
 698}
 699
 700#define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
 701
 702#ifndef MSG_PROBE
 703#define MSG_PROBE	     0x10
 704#endif
 705#ifndef MSG_WAITFORONE
 706#define MSG_WAITFORONE	0x10000
 707#endif
 708#ifndef MSG_SENDPAGE_NOTLAST
 709#define MSG_SENDPAGE_NOTLAST 0x20000
 710#endif
 711#ifndef MSG_FASTOPEN
 712#define MSG_FASTOPEN	     0x20000000
 713#endif
 714
 715static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
 716					       struct syscall_arg *arg)
 717{
 718	int printed = 0, flags = arg->val;
 719
 720	if (flags == 0)
 721		return scnprintf(bf, size, "NONE");
 722#define	P_MSG_FLAG(n) \
 723	if (flags & MSG_##n) { \
 724		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 725		flags &= ~MSG_##n; \
 726	}
 727
 728	P_MSG_FLAG(OOB);
 729	P_MSG_FLAG(PEEK);
 730	P_MSG_FLAG(DONTROUTE);
 731	P_MSG_FLAG(TRYHARD);
 732	P_MSG_FLAG(CTRUNC);
 733	P_MSG_FLAG(PROBE);
 734	P_MSG_FLAG(TRUNC);
 735	P_MSG_FLAG(DONTWAIT);
 736	P_MSG_FLAG(EOR);
 737	P_MSG_FLAG(WAITALL);
 738	P_MSG_FLAG(FIN);
 739	P_MSG_FLAG(SYN);
 740	P_MSG_FLAG(CONFIRM);
 741	P_MSG_FLAG(RST);
 742	P_MSG_FLAG(ERRQUEUE);
 743	P_MSG_FLAG(NOSIGNAL);
 744	P_MSG_FLAG(MORE);
 745	P_MSG_FLAG(WAITFORONE);
 746	P_MSG_FLAG(SENDPAGE_NOTLAST);
 747	P_MSG_FLAG(FASTOPEN);
 748	P_MSG_FLAG(CMSG_CLOEXEC);
 749#undef P_MSG_FLAG
 750
 751	if (flags)
 752		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 753
 754	return printed;
 755}
 756
 757#define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
 758
 759static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
 760						 struct syscall_arg *arg)
 761{
 
 
 762	size_t printed = 0;
 763	int mode = arg->val;
 764
 765	if (mode == F_OK) /* 0 */
 766		return scnprintf(bf, size, "F");
 767#define	P_MODE(n) \
 768	if (mode & n##_OK) { \
 769		printed += scnprintf(bf + printed, size - printed, "%s", #n); \
 770		mode &= ~n##_OK; \
 771	}
 772
 773	P_MODE(R);
 774	P_MODE(W);
 775	P_MODE(X);
 776#undef P_MODE
 777
 778	if (mode)
 779		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
 780
 781	return printed;
 782}
 783
 784#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
 785
 786static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
 787					      struct syscall_arg *arg);
 788
 789#define SCA_FILENAME syscall_arg__scnprintf_filename
 790
 791static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
 792					       struct syscall_arg *arg)
 793{
 
 
 794	int printed = 0, flags = arg->val;
 795
 796	if (!(flags & O_CREAT))
 797		arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
 798
 799	if (flags == 0)
 800		return scnprintf(bf, size, "RDONLY");
 801#define	P_FLAG(n) \
 802	if (flags & O_##n) { \
 803		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 804		flags &= ~O_##n; \
 805	}
 806
 807	P_FLAG(APPEND);
 808	P_FLAG(ASYNC);
 809	P_FLAG(CLOEXEC);
 810	P_FLAG(CREAT);
 811	P_FLAG(DIRECT);
 812	P_FLAG(DIRECTORY);
 813	P_FLAG(EXCL);
 814	P_FLAG(LARGEFILE);
 815	P_FLAG(NOATIME);
 816	P_FLAG(NOCTTY);
 817#ifdef O_NONBLOCK
 818	P_FLAG(NONBLOCK);
 819#elif O_NDELAY
 820	P_FLAG(NDELAY);
 821#endif
 822#ifdef O_PATH
 823	P_FLAG(PATH);
 824#endif
 825	P_FLAG(RDWR);
 826#ifdef O_DSYNC
 827	if ((flags & O_SYNC) == O_SYNC)
 828		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
 829	else {
 830		P_FLAG(DSYNC);
 831	}
 832#else
 833	P_FLAG(SYNC);
 834#endif
 835	P_FLAG(TRUNC);
 836	P_FLAG(WRONLY);
 837#undef P_FLAG
 838
 839	if (flags)
 840		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 841
 842	return printed;
 843}
 844
 845#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
 846
 847static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
 848						struct syscall_arg *arg)
 849{
 850	int printed = 0, flags = arg->val;
 851
 852	if (flags == 0)
 853		return 0;
 854
 855#define	P_FLAG(n) \
 856	if (flags & PERF_FLAG_##n) { \
 857		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 858		flags &= ~PERF_FLAG_##n; \
 859	}
 860
 861	P_FLAG(FD_NO_GROUP);
 862	P_FLAG(FD_OUTPUT);
 863	P_FLAG(PID_CGROUP);
 864	P_FLAG(FD_CLOEXEC);
 865#undef P_FLAG
 866
 867	if (flags)
 868		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 869
 870	return printed;
 871}
 872
 873#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
 
 
 
 
 
 874
 875static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
 876						   struct syscall_arg *arg)
 877{
 
 
 878	int printed = 0, flags = arg->val;
 879
 880	if (flags == 0)
 881		return scnprintf(bf, size, "NONE");
 882#define	P_FLAG(n) \
 883	if (flags & EFD_##n) { \
 884		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 885		flags &= ~EFD_##n; \
 886	}
 887
 888	P_FLAG(SEMAPHORE);
 889	P_FLAG(CLOEXEC);
 890	P_FLAG(NONBLOCK);
 891#undef P_FLAG
 892
 893	if (flags)
 894		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 895
 896	return printed;
 897}
 898
 899#define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
 900
 901static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
 902						struct syscall_arg *arg)
 903{
 904	int printed = 0, flags = arg->val;
 905
 906#define	P_FLAG(n) \
 907	if (flags & O_##n) { \
 908		printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
 909		flags &= ~O_##n; \
 910	}
 911
 912	P_FLAG(CLOEXEC);
 913	P_FLAG(NONBLOCK);
 914#undef P_FLAG
 915
 916	if (flags)
 917		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 918
 919	return printed;
 920}
 921
 922#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
 923
 924static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
 925{
 926	int sig = arg->val;
 927
 928	switch (sig) {
 929#define	P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
 930	P_SIGNUM(HUP);
 931	P_SIGNUM(INT);
 932	P_SIGNUM(QUIT);
 933	P_SIGNUM(ILL);
 934	P_SIGNUM(TRAP);
 935	P_SIGNUM(ABRT);
 936	P_SIGNUM(BUS);
 937	P_SIGNUM(FPE);
 938	P_SIGNUM(KILL);
 939	P_SIGNUM(USR1);
 940	P_SIGNUM(SEGV);
 941	P_SIGNUM(USR2);
 942	P_SIGNUM(PIPE);
 943	P_SIGNUM(ALRM);
 944	P_SIGNUM(TERM);
 945	P_SIGNUM(CHLD);
 946	P_SIGNUM(CONT);
 947	P_SIGNUM(STOP);
 948	P_SIGNUM(TSTP);
 949	P_SIGNUM(TTIN);
 950	P_SIGNUM(TTOU);
 951	P_SIGNUM(URG);
 952	P_SIGNUM(XCPU);
 953	P_SIGNUM(XFSZ);
 954	P_SIGNUM(VTALRM);
 955	P_SIGNUM(PROF);
 956	P_SIGNUM(WINCH);
 957	P_SIGNUM(IO);
 958	P_SIGNUM(PWR);
 959	P_SIGNUM(SYS);
 960#ifdef SIGEMT
 961	P_SIGNUM(EMT);
 962#endif
 963#ifdef SIGSTKFLT
 964	P_SIGNUM(STKFLT);
 965#endif
 966#ifdef SIGSWI
 967	P_SIGNUM(SWI);
 968#endif
 969	default: break;
 970	}
 971
 972	return scnprintf(bf, size, "%#x", sig);
 973}
 974
 975#define SCA_SIGNUM syscall_arg__scnprintf_signum
 976
 977#if defined(__i386__) || defined(__x86_64__)
 978/*
 979 * FIXME: Make this available to all arches.
 980 */
 981#define TCGETS		0x5401
 982
 983static const char *tioctls[] = {
 984	"TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
 985	"TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
 986	"TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
 987	"TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
 988	"TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
 989	"TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
 990	"TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
 991	"TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
 992	"TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
 993	"TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
 994	"TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
 995	[0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
 996	"TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
 997	"TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
 998	"TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
 999};
1000
1001static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
1002#endif /* defined(__i386__) || defined(__x86_64__) */
1003
1004#define STRARRAY(arg, name, array) \
1005	  .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
1006	  .arg_parm	 = { [arg] = &strarray__##array, }
1007
1008static struct syscall_fmt {
1009	const char *name;
1010	const char *alias;
1011	size_t	   (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
1012	void	   *arg_parm[6];
1013	bool	   errmsg;
1014	bool	   timeout;
1015	bool	   hexret;
1016} syscall_fmts[] = {
1017	{ .name	    = "access",	    .errmsg = true,
1018	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
1019			     [1] = SCA_ACCMODE,  /* mode */ }, },
1020	{ .name	    = "arch_prctl", .errmsg = true, .alias = "prctl", },
1021	{ .name	    = "bpf",	    .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
1022	{ .name	    = "brk",	    .hexret = true,
1023	  .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
1024	{ .name	    = "chdir",	    .errmsg = true,
1025	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1026	{ .name	    = "chmod",	    .errmsg = true,
1027	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1028	{ .name	    = "chroot",	    .errmsg = true,
1029	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1030	{ .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
1031	{ .name	    = "close",	    .errmsg = true,
1032	  .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
1033	{ .name	    = "connect",    .errmsg = true, },
1034	{ .name	    = "creat",	    .errmsg = true,
1035	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1036	{ .name	    = "dup",	    .errmsg = true,
1037	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1038	{ .name	    = "dup2",	    .errmsg = true,
1039	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1040	{ .name	    = "dup3",	    .errmsg = true,
1041	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1042	{ .name	    = "epoll_ctl",  .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
1043	{ .name	    = "eventfd2",   .errmsg = true,
1044	  .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
1045	{ .name	    = "faccessat",  .errmsg = true,
1046	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1047			     [1] = SCA_FILENAME, /* filename */ }, },
1048	{ .name	    = "fadvise64",  .errmsg = true,
1049	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1050	{ .name	    = "fallocate",  .errmsg = true,
1051	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1052	{ .name	    = "fchdir",	    .errmsg = true,
1053	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1054	{ .name	    = "fchmod",	    .errmsg = true,
1055	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1056	{ .name	    = "fchmodat",   .errmsg = true,
1057	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1058			     [1] = SCA_FILENAME, /* filename */ }, },
1059	{ .name	    = "fchown",	    .errmsg = true,
1060	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1061	{ .name	    = "fchownat",   .errmsg = true,
1062	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1063			     [1] = SCA_FILENAME, /* filename */ }, },
1064	{ .name	    = "fcntl",	    .errmsg = true,
1065	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1066			     [1] = SCA_STRARRAY, /* cmd */ },
1067	  .arg_parm	 = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
1068	{ .name	    = "fdatasync",  .errmsg = true,
1069	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1070	{ .name	    = "flock",	    .errmsg = true,
1071	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1072			     [1] = SCA_FLOCK, /* cmd */ }, },
1073	{ .name	    = "fsetxattr",  .errmsg = true,
1074	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1075	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat",
1076	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1077	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat",
1078	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1079			     [1] = SCA_FILENAME, /* filename */ }, },
1080	{ .name	    = "fstatfs",    .errmsg = true,
1081	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1082	{ .name	    = "fsync",    .errmsg = true,
1083	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1084	{ .name	    = "ftruncate", .errmsg = true,
1085	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1086	{ .name	    = "futex",	    .errmsg = true,
1087	  .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
1088	{ .name	    = "futimesat", .errmsg = true,
1089	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1090			     [1] = SCA_FILENAME, /* filename */ }, },
1091	{ .name	    = "getdents",   .errmsg = true,
1092	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1093	{ .name	    = "getdents64", .errmsg = true,
1094	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1095	{ .name	    = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
1096	{ .name	    = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1097	{ .name	    = "getxattr",    .errmsg = true,
1098	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1099	{ .name	    = "inotify_add_watch",	    .errmsg = true,
1100	  .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
1101	{ .name	    = "ioctl",	    .errmsg = true,
1102	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1103#if defined(__i386__) || defined(__x86_64__)
1104/*
1105 * FIXME: Make this available to all arches.
1106 */
1107			     [1] = SCA_STRHEXARRAY, /* cmd */
1108			     [2] = SCA_HEX, /* arg */ },
1109	  .arg_parm	 = { [1] = &strarray__tioctls, /* cmd */ }, },
1110#else
1111			     [2] = SCA_HEX, /* arg */ }, },
1112#endif
1113	{ .name	    = "keyctl",	    .errmsg = true, STRARRAY(0, option, keyctl_options), },
1114	{ .name	    = "kill",	    .errmsg = true,
1115	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1116	{ .name	    = "lchown",    .errmsg = true,
1117	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1118	{ .name	    = "lgetxattr",  .errmsg = true,
1119	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1120	{ .name	    = "linkat",	    .errmsg = true,
1121	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
1122	{ .name	    = "listxattr",  .errmsg = true,
1123	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1124	{ .name	    = "llistxattr", .errmsg = true,
1125	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1126	{ .name	    = "lremovexattr",  .errmsg = true,
1127	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1128	{ .name	    = "lseek",	    .errmsg = true,
1129	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1130			     [2] = SCA_STRARRAY, /* whence */ },
1131	  .arg_parm	 = { [2] = &strarray__whences, /* whence */ }, },
1132	{ .name	    = "lsetxattr",  .errmsg = true,
1133	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1134	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat",
1135	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1136	{ .name	    = "lsxattr",    .errmsg = true,
1137	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1138	{ .name     = "madvise",    .errmsg = true,
1139	  .arg_scnprintf = { [0] = SCA_HEX,	 /* start */
1140			     [2] = SCA_MADV_BHV, /* behavior */ }, },
1141	{ .name	    = "mkdir",    .errmsg = true,
1142	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1143	{ .name	    = "mkdirat",    .errmsg = true,
1144	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1145			     [1] = SCA_FILENAME, /* pathname */ }, },
1146	{ .name	    = "mknod",      .errmsg = true,
1147	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1148	{ .name	    = "mknodat",    .errmsg = true,
1149	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
1150			     [1] = SCA_FILENAME, /* filename */ }, },
1151	{ .name	    = "mlock",	    .errmsg = true,
1152	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1153	{ .name	    = "mlockall",   .errmsg = true,
1154	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1155	{ .name	    = "mmap",	    .hexret = true,
1156	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
1157			     [2] = SCA_MMAP_PROT, /* prot */
1158			     [3] = SCA_MMAP_FLAGS, /* flags */
1159			     [4] = SCA_FD, 	  /* fd */ }, },
1160	{ .name	    = "mprotect",   .errmsg = true,
1161	  .arg_scnprintf = { [0] = SCA_HEX, /* start */
1162			     [2] = SCA_MMAP_PROT, /* prot */ }, },
1163	{ .name	    = "mq_unlink", .errmsg = true,
1164	  .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1165	{ .name	    = "mremap",	    .hexret = true,
1166	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */
1167			     [3] = SCA_MREMAP_FLAGS, /* flags */
1168			     [4] = SCA_HEX, /* new_addr */ }, },
1169	{ .name	    = "munlock",    .errmsg = true,
1170	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1171	{ .name	    = "munmap",	    .errmsg = true,
1172	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
1173	{ .name	    = "name_to_handle_at", .errmsg = true,
1174	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1175	{ .name	    = "newfstatat", .errmsg = true,
1176	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1177			     [1] = SCA_FILENAME, /* filename */ }, },
1178	{ .name	    = "open",	    .errmsg = true,
1179	  .arg_scnprintf = { [0] = SCA_FILENAME,   /* filename */
1180			     [1] = SCA_OPEN_FLAGS, /* flags */ }, },
1181	{ .name	    = "open_by_handle_at", .errmsg = true,
1182	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1183			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
1184	{ .name	    = "openat",	    .errmsg = true,
1185	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1186			     [1] = SCA_FILENAME, /* filename */
1187			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
1188	{ .name	    = "perf_event_open", .errmsg = true,
1189	  .arg_scnprintf = { [1] = SCA_INT, /* pid */
1190			     [2] = SCA_INT, /* cpu */
1191			     [3] = SCA_FD,  /* group_fd */
1192			     [4] = SCA_PERF_FLAGS,  /* flags */ }, },
1193	{ .name	    = "pipe2",	    .errmsg = true,
1194	  .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
1195	{ .name	    = "poll",	    .errmsg = true, .timeout = true, },
1196	{ .name	    = "ppoll",	    .errmsg = true, .timeout = true, },
1197	{ .name	    = "pread",	    .errmsg = true, .alias = "pread64",
1198	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1199	{ .name	    = "preadv",	    .errmsg = true, .alias = "pread",
1200	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1201	{ .name	    = "prlimit64",  .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
1202	{ .name	    = "pwrite",	    .errmsg = true, .alias = "pwrite64",
1203	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1204	{ .name	    = "pwritev",    .errmsg = true,
1205	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1206	{ .name	    = "read",	    .errmsg = true,
1207	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1208	{ .name	    = "readlink",   .errmsg = true,
1209	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
1210	{ .name	    = "readlinkat", .errmsg = true,
1211	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1212			     [1] = SCA_FILENAME, /* pathname */ }, },
1213	{ .name	    = "readv",	    .errmsg = true,
1214	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1215	{ .name	    = "recvfrom",   .errmsg = true,
1216	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1217			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
1218	{ .name	    = "recvmmsg",   .errmsg = true,
1219	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1220			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
1221	{ .name	    = "recvmsg",    .errmsg = true,
1222	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1223			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
1224	{ .name	    = "removexattr", .errmsg = true,
1225	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1226	{ .name	    = "renameat",   .errmsg = true,
1227	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1228	{ .name	    = "rmdir",    .errmsg = true,
1229	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1230	{ .name	    = "rt_sigaction", .errmsg = true,
1231	  .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
1232	{ .name	    = "rt_sigprocmask",  .errmsg = true, STRARRAY(0, how, sighow), },
1233	{ .name	    = "rt_sigqueueinfo", .errmsg = true,
1234	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1235	{ .name	    = "rt_tgsigqueueinfo", .errmsg = true,
1236	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1237	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
1238	{ .name	    = "sendmmsg",    .errmsg = true,
1239	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1240			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
1241	{ .name	    = "sendmsg",    .errmsg = true,
1242	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1243			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
1244	{ .name	    = "sendto",	    .errmsg = true,
1245	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
1246			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
1247	{ .name	    = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
1248	{ .name	    = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
1249	{ .name	    = "setxattr",   .errmsg = true,
1250	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1251	{ .name	    = "shutdown",   .errmsg = true,
1252	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1253	{ .name	    = "socket",	    .errmsg = true,
1254	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1255			     [1] = SCA_SK_TYPE, /* type */ },
1256	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
1257	{ .name	    = "socketpair", .errmsg = true,
1258	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
1259			     [1] = SCA_SK_TYPE, /* type */ },
1260	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
1261	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat",
1262	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1263	{ .name	    = "statfs",	    .errmsg = true,
1264	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
1265	{ .name	    = "swapoff",    .errmsg = true,
1266	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
1267	{ .name	    = "swapon",	    .errmsg = true,
1268	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
1269	{ .name	    = "symlinkat",  .errmsg = true,
1270	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
1271	{ .name	    = "tgkill",	    .errmsg = true,
1272	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
1273	{ .name	    = "tkill",	    .errmsg = true,
1274	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
1275	{ .name	    = "truncate",   .errmsg = true,
1276	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
1277	{ .name	    = "uname",	    .errmsg = true, .alias = "newuname", },
1278	{ .name	    = "unlinkat",   .errmsg = true,
1279	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
1280			     [1] = SCA_FILENAME, /* pathname */ }, },
1281	{ .name	    = "utime",  .errmsg = true,
1282	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1283	{ .name	    = "utimensat",  .errmsg = true,
1284	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
1285			     [1] = SCA_FILENAME, /* filename */ }, },
1286	{ .name	    = "utimes",  .errmsg = true,
1287	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
1288	{ .name	    = "vmsplice",  .errmsg = true,
1289	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1290	{ .name	    = "write",	    .errmsg = true,
1291	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1292	{ .name	    = "writev",	    .errmsg = true,
1293	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
1294};
1295
1296static int syscall_fmt__cmp(const void *name, const void *fmtp)
1297{
1298	const struct syscall_fmt *fmt = fmtp;
1299	return strcmp(name, fmt->name);
1300}
1301
 
 
 
 
 
1302static struct syscall_fmt *syscall_fmt__find(const char *name)
1303{
1304	const int nmemb = ARRAY_SIZE(syscall_fmts);
1305	return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306}
1307
 
 
 
 
 
 
1308struct syscall {
1309	struct event_format *tp_format;
1310	int		    nr_args;
1311	struct format_field *args;
1312	const char	    *name;
 
 
 
1313	bool		    is_exit;
 
 
 
 
1314	struct syscall_fmt  *fmt;
1315	size_t		    (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1316	void		    **arg_parm;
1317};
1318
1319static size_t fprintf_duration(unsigned long t, FILE *fp)
 
 
 
 
 
 
 
1320{
1321	double duration = (double)t / NSEC_PER_MSEC;
1322	size_t printed = fprintf(fp, "(");
1323
1324	if (duration >= 1.0)
 
 
1325		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1326	else if (duration >= 0.01)
1327		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1328	else
1329		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1330	return printed + fprintf(fp, "): ");
1331}
1332
1333/**
1334 * filename.ptr: The filename char pointer that will be vfs_getname'd
1335 * filename.entry_str_pos: Where to insert the string translated from
1336 *                         filename.ptr by the vfs_getname tracepoint/kprobe.
 
 
1337 */
1338struct thread_trace {
1339	u64		  entry_time;
1340	u64		  exit_time;
1341	bool		  entry_pending;
1342	unsigned long	  nr_events;
1343	unsigned long	  pfmaj, pfmin;
1344	char		  *entry_str;
1345	double		  runtime_ms;
 
1346        struct {
1347		unsigned long ptr;
1348		short int     entry_str_pos;
1349		bool	      pending_open;
1350		unsigned int  namelen;
1351		char	      *name;
1352	} filename;
1353	struct {
1354		int	  max;
1355		char	  **table;
1356	} paths;
1357
1358	struct intlist *syscall_stats;
1359};
1360
1361static struct thread_trace *thread_trace__new(void)
1362{
1363	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1364
1365	if (ttrace)
1366		ttrace->paths.max = -1;
1367
1368	ttrace->syscall_stats = intlist__new(NULL);
1369
1370	return ttrace;
1371}
1372
1373static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1374{
1375	struct thread_trace *ttrace;
1376
1377	if (thread == NULL)
1378		goto fail;
1379
1380	if (thread__priv(thread) == NULL)
1381		thread__set_priv(thread, thread_trace__new());
1382
1383	if (thread__priv(thread) == NULL)
1384		goto fail;
1385
1386	ttrace = thread__priv(thread);
1387	++ttrace->nr_events;
1388
1389	return ttrace;
1390fail:
1391	color_fprintf(fp, PERF_COLOR_RED,
1392		      "WARNING: not enough memory, dropping samples!\n");
1393	return NULL;
1394}
1395
 
 
 
 
 
 
 
 
 
1396#define TRACE_PFMAJ		(1 << 0)
1397#define TRACE_PFMIN		(1 << 1)
1398
1399static const size_t trace__entry_str_size = 2048;
1400
1401struct trace {
1402	struct perf_tool	tool;
1403	struct {
1404		int		machine;
1405		int		open_id;
1406	}			audit;
1407	struct {
1408		int		max;
1409		struct syscall  *table;
1410		struct {
1411			struct perf_evsel *sys_enter,
1412					  *sys_exit;
1413		}		events;
1414	} syscalls;
1415	struct record_opts	opts;
1416	struct perf_evlist	*evlist;
1417	struct machine		*host;
1418	struct thread		*current;
1419	u64			base_time;
1420	FILE			*output;
1421	unsigned long		nr_events;
1422	struct strlist		*ev_qualifier;
1423	struct {
1424		size_t		nr;
1425		int		*entries;
1426	}			ev_qualifier_ids;
1427	struct intlist		*tid_list;
1428	struct intlist		*pid_list;
1429	struct {
1430		size_t		nr;
1431		pid_t		*entries;
1432	}			filter_pids;
1433	double			duration_filter;
1434	double			runtime_ms;
1435	struct {
1436		u64		vfs_getname,
1437				proc_getname;
1438	} stats;
1439	bool			not_ev_qualifier;
1440	bool			live;
1441	bool			full_time;
1442	bool			sched;
1443	bool			multiple_threads;
1444	bool			summary;
1445	bool			summary_only;
1446	bool			show_comm;
1447	bool			show_tool_stats;
1448	bool			trace_syscalls;
1449	bool			force;
1450	bool			vfs_getname;
1451	int			trace_pgfaults;
1452};
1453
1454static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1455{
1456	struct thread_trace *ttrace = thread__priv(thread);
 
1457
1458	if (fd > ttrace->paths.max) {
1459		char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
1460
1461		if (npath == NULL)
1462			return -1;
1463
1464		if (ttrace->paths.max != -1) {
1465			memset(npath + ttrace->paths.max + 1, 0,
1466			       (fd - ttrace->paths.max) * sizeof(char *));
1467		} else {
1468			memset(npath, 0, (fd + 1) * sizeof(char *));
1469		}
1470
1471		ttrace->paths.table = npath;
1472		ttrace->paths.max   = fd;
1473	}
1474
1475	ttrace->paths.table[fd] = strdup(pathname);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476
1477	return ttrace->paths.table[fd] != NULL ? 0 : -1;
1478}
1479
1480static int thread__read_fd_path(struct thread *thread, int fd)
1481{
1482	char linkname[PATH_MAX], pathname[PATH_MAX];
1483	struct stat st;
1484	int ret;
1485
1486	if (thread->pid_ == thread->tid) {
1487		scnprintf(linkname, sizeof(linkname),
1488			  "/proc/%d/fd/%d", thread->pid_, fd);
1489	} else {
1490		scnprintf(linkname, sizeof(linkname),
1491			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1492	}
1493
1494	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1495		return -1;
1496
1497	ret = readlink(linkname, pathname, sizeof(pathname));
1498
1499	if (ret < 0 || ret > st.st_size)
1500		return -1;
1501
1502	pathname[ret] = '\0';
1503	return trace__set_fd_pathname(thread, fd, pathname);
1504}
1505
1506static const char *thread__fd_path(struct thread *thread, int fd,
1507				   struct trace *trace)
1508{
1509	struct thread_trace *ttrace = thread__priv(thread);
1510
1511	if (ttrace == NULL)
1512		return NULL;
1513
1514	if (fd < 0)
1515		return NULL;
1516
1517	if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
1518		if (!trace->live)
1519			return NULL;
1520		++trace->stats.proc_getname;
1521		if (thread__read_fd_path(thread, fd))
1522			return NULL;
1523	}
1524
1525	return ttrace->paths.table[fd];
1526}
1527
1528static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
1529					struct syscall_arg *arg)
1530{
1531	int fd = arg->val;
1532	size_t printed = scnprintf(bf, size, "%d", fd);
1533	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1534
1535	if (path)
1536		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1537
1538	return printed;
1539}
1540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1542					      struct syscall_arg *arg)
1543{
1544	int fd = arg->val;
1545	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1546	struct thread_trace *ttrace = thread__priv(arg->thread);
1547
1548	if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1549		zfree(&ttrace->paths.table[fd]);
1550
1551	return printed;
1552}
1553
1554static void thread__set_filename_pos(struct thread *thread, const char *bf,
1555				     unsigned long ptr)
1556{
1557	struct thread_trace *ttrace = thread__priv(thread);
1558
1559	ttrace->filename.ptr = ptr;
1560	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1561}
1562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1563static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1564					      struct syscall_arg *arg)
1565{
1566	unsigned long ptr = arg->val;
1567
 
 
 
1568	if (!arg->trace->vfs_getname)
1569		return scnprintf(bf, size, "%#x", ptr);
1570
1571	thread__set_filename_pos(arg->thread, bf, ptr);
1572	return 0;
1573}
1574
1575static bool trace__filter_duration(struct trace *trace, double t)
1576{
1577	return t < (trace->duration_filter * NSEC_PER_MSEC);
1578}
1579
1580static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1581{
1582	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1583
1584	return fprintf(fp, "%10.3f ", ts);
1585}
1586
1587static bool done = false;
1588static bool interrupted = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589
1590static void sig_handler(int sig)
1591{
1592	done = true;
1593	interrupted = sig == SIGINT;
1594}
1595
1596static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1597					u64 duration, u64 tstamp, FILE *fp)
1598{
1599	size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1600	printed += fprintf_duration(duration, fp);
 
 
 
 
 
1601
1602	if (trace->multiple_threads) {
1603		if (trace->show_comm)
1604			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1605		printed += fprintf(fp, "%d ", thread->tid);
1606	}
1607
1608	return printed;
1609}
1610
 
 
 
 
 
 
 
 
 
 
 
 
1611static int trace__process_event(struct trace *trace, struct machine *machine,
1612				union perf_event *event, struct perf_sample *sample)
1613{
1614	int ret = 0;
1615
1616	switch (event->header.type) {
1617	case PERF_RECORD_LOST:
1618		color_fprintf(trace->output, PERF_COLOR_RED,
1619			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1620		ret = machine__process_lost_event(machine, event, sample);
 
1621	default:
1622		ret = machine__process_event(machine, event, sample);
1623		break;
1624	}
1625
1626	return ret;
1627}
1628
1629static int trace__tool_process(struct perf_tool *tool,
1630			       union perf_event *event,
1631			       struct perf_sample *sample,
1632			       struct machine *machine)
1633{
1634	struct trace *trace = container_of(tool, struct trace, tool);
1635	return trace__process_event(trace, machine, event, sample);
1636}
1637
1638static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1639{
1640	int err = symbol__init(NULL);
1641
1642	if (err)
1643		return err;
1644
1645	trace->host = machine__new_host();
1646	if (trace->host == NULL)
1647		return -ENOMEM;
1648
1649	if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
1650		return -errno;
 
1651
1652	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1653					    evlist->threads, trace__tool_process, false,
1654					    trace->opts.proc_map_timeout);
 
1655	if (err)
1656		symbol__exit();
1657
1658	return err;
1659}
1660
1661static int syscall__set_arg_fmts(struct syscall *sc)
1662{
1663	struct format_field *field;
1664	int idx = 0;
1665
1666	sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1667	if (sc->arg_scnprintf == NULL)
1668		return -1;
1669
1670	if (sc->fmt)
1671		sc->arg_parm = sc->fmt->arg_parm;
 
1672
1673	for (field = sc->args; field; field = field->next) {
1674		if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1675			sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1676		else if (field->flags & FIELD_IS_POINTER)
1677			sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1678		++idx;
 
 
 
 
1679	}
1680
 
1681	return 0;
1682}
1683
1684static int trace__read_syscall_info(struct trace *trace, int id)
 
 
 
 
 
1685{
1686	char tp_name[128];
1687	struct syscall *sc;
1688	const char *name = audit_syscall_to_name(id, trace->audit.machine);
1689
1690	if (name == NULL)
1691		return -1;
 
 
 
1692
1693	if (id > trace->syscalls.max) {
1694		struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
 
 
 
1695
1696		if (nsyscalls == NULL)
1697			return -1;
 
 
 
1698
1699		if (trace->syscalls.max != -1) {
1700			memset(nsyscalls + trace->syscalls.max + 1, 0,
1701			       (id - trace->syscalls.max) * sizeof(*sc));
1702		} else {
1703			memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1704		}
 
 
 
 
1705
1706		trace->syscalls.table = nsyscalls;
1707		trace->syscalls.max   = id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1708	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1709
 
 
 
 
1710	sc = trace->syscalls.table + id;
1711	sc->name = name;
 
 
 
 
 
 
1712
 
1713	sc->fmt  = syscall_fmt__find(sc->name);
1714
1715	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1716	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1717
1718	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1719		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1720		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1721	}
1722
1723	if (IS_ERR(sc->tp_format))
1724		return -1;
 
 
 
 
 
 
 
 
 
 
1725
1726	sc->args = sc->tp_format->format.fields;
1727	sc->nr_args = sc->tp_format->format.nr_fields;
1728	/*
1729	 * We need to check and discard the first variable '__syscall_nr'
1730	 * or 'nr' that mean the syscall number. It is needless here.
1731	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1732	 */
1733	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1734		sc->args = sc->args->next;
1735		--sc->nr_args;
1736	}
1737
1738	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
 
1739
1740	return syscall__set_arg_fmts(sc);
1741}
1742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1743static int trace__validate_ev_qualifier(struct trace *trace)
1744{
1745	int err = 0, i;
 
1746	struct str_node *pos;
 
1747
1748	trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1749	trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1750						 sizeof(trace->ev_qualifier_ids.entries[0]));
1751
1752	if (trace->ev_qualifier_ids.entries == NULL) {
1753		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1754		       trace->output);
1755		err = -EINVAL;
1756		goto out;
1757	}
1758
1759	i = 0;
1760
1761	strlist__for_each(pos, trace->ev_qualifier) {
1762		const char *sc = pos->s;
1763		int id = audit_name_to_syscall(sc, trace->audit.machine);
1764
1765		if (id < 0) {
1766			if (err == 0) {
1767				fputs("Error:\tInvalid syscall ", trace->output);
1768				err = -EINVAL;
 
 
 
 
1769			} else {
1770				fputs(", ", trace->output);
1771			}
1772
1773			fputs(sc, trace->output);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774		}
1775
1776		trace->ev_qualifier_ids.entries[i++] = id;
1777	}
1778
1779	if (err < 0) {
1780		fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1781		      "\nHint:\tand: 'man syscalls'\n", trace->output);
1782		zfree(&trace->ev_qualifier_ids.entries);
1783		trace->ev_qualifier_ids.nr = 0;
1784	}
1785out:
 
 
1786	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1787}
1788
1789/*
1790 * args is to be interpreted as a series of longs but we need to handle
1791 * 8-byte unaligned accesses. args points to raw_data within the event
1792 * and raw_data is guaranteed to be 8-byte unaligned because it is
1793 * preceded by raw_size which is a u32. So we need to copy args to a temp
1794 * variable to read it. Most notably this avoids extended load instructions
1795 * on unaligned addresses
1796 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797
1798static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1799				      unsigned char *args, struct trace *trace,
1800				      struct thread *thread)
1801{
1802	size_t printed = 0;
1803	unsigned char *p;
1804	unsigned long val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805
1806	if (sc->args != NULL) {
1807		struct format_field *field;
1808		u8 bit = 1;
1809		struct syscall_arg arg = {
1810			.idx	= 0,
1811			.mask	= 0,
1812			.trace  = trace,
1813			.thread = thread,
1814		};
1815
1816		for (field = sc->args; field;
1817		     field = field->next, ++arg.idx, bit <<= 1) {
1818			if (arg.mask & bit)
1819				continue;
1820
1821			/* special care for unaligned accesses */
1822			p = args + sizeof(unsigned long) * arg.idx;
1823			memcpy(&val, p, sizeof(val));
 
 
 
 
1824
1825			/*
1826 			 * Suppress this argument if its value is zero and
1827 			 * and we don't have a string associated in an
1828 			 * strarray for it.
1829 			 */
1830			if (val == 0 &&
1831			    !(sc->arg_scnprintf &&
1832			      sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1833			      sc->arg_parm[arg.idx]))
 
 
 
1834				continue;
1835
1836			printed += scnprintf(bf + printed, size - printed,
1837					     "%s%s: ", printed ? ", " : "", field->name);
1838			if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1839				arg.val = val;
1840				if (sc->arg_parm)
1841					arg.parm = sc->arg_parm[arg.idx];
1842				printed += sc->arg_scnprintf[arg.idx](bf + printed,
1843								      size - printed, &arg);
1844			} else {
1845				printed += scnprintf(bf + printed, size - printed,
1846						     "%ld", val);
1847			}
1848		}
1849	} else {
1850		int i = 0;
1851
1852		while (i < 6) {
1853			/* special care for unaligned accesses */
1854			p = args + sizeof(unsigned long) * i;
1855			memcpy(&val, p, sizeof(val));
1856			printed += scnprintf(bf + printed, size - printed,
1857					     "%sarg%d: %ld",
1858					     printed ? ", " : "", i, val);
1859			++i;
 
 
 
 
 
 
 
 
 
 
 
 
1860		}
1861	}
1862
1863	return printed;
1864}
1865
1866typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1867				  union perf_event *event,
1868				  struct perf_sample *sample);
1869
1870static struct syscall *trace__syscall_info(struct trace *trace,
1871					   struct perf_evsel *evsel, int id)
1872{
 
1873
1874	if (id < 0) {
1875
1876		/*
1877		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1878		 * before that, leaving at a higher verbosity level till that is
1879		 * explained. Reproduced with plain ftrace with:
1880		 *
1881		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1882		 * grep "NR -1 " /t/trace_pipe
1883		 *
1884		 * After generating some load on the machine.
1885 		 */
1886		if (verbose > 1) {
1887			static u64 n;
1888			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1889				id, perf_evsel__name(evsel), ++n);
1890		}
1891		return NULL;
1892	}
1893
1894	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1895	    trace__read_syscall_info(trace, id))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1896		goto out_cant_read;
1897
1898	if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1899		goto out_cant_read;
1900
1901	return &trace->syscalls.table[id];
1902
1903out_cant_read:
1904	if (verbose) {
1905		fprintf(trace->output, "Problems reading syscall %d", id);
1906		if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
 
1907			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1908		fputs(" information\n", trace->output);
1909	}
1910	return NULL;
1911}
1912
1913static void thread__update_stats(struct thread_trace *ttrace,
1914				 int id, struct perf_sample *sample)
 
 
 
 
 
 
 
1915{
1916	struct int_node *inode;
1917	struct stats *stats;
1918	u64 duration = 0;
1919
1920	inode = intlist__findnew(ttrace->syscall_stats, id);
1921	if (inode == NULL)
1922		return;
1923
1924	stats = inode->priv;
1925	if (stats == NULL) {
1926		stats = malloc(sizeof(struct stats));
1927		if (stats == NULL)
1928			return;
1929		init_stats(stats);
 
1930		inode->priv = stats;
1931	}
1932
1933	if (ttrace->entry_time && sample->time > ttrace->entry_time)
1934		duration = sample->time - ttrace->entry_time;
1935
1936	update_stats(stats, duration);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1937}
1938
1939static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1940{
1941	struct thread_trace *ttrace;
1942	u64 duration;
1943	size_t printed;
 
1944
1945	if (trace->current == NULL)
1946		return 0;
1947
1948	ttrace = thread__priv(trace->current);
1949
1950	if (!ttrace->entry_pending)
1951		return 0;
1952
1953	duration = sample->time - ttrace->entry_time;
 
 
 
 
 
 
1954
1955	printed  = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
1956	printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1957	ttrace->entry_pending = false;
 
1958
1959	return printed;
1960}
1961
1962static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963			    union perf_event *event __maybe_unused,
1964			    struct perf_sample *sample)
1965{
1966	char *msg;
1967	void *args;
1968	size_t printed = 0;
1969	struct thread *thread;
1970	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
 
 
1971	struct syscall *sc = trace__syscall_info(trace, evsel, id);
1972	struct thread_trace *ttrace;
1973
1974	if (sc == NULL)
1975		return -1;
1976
1977	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1978	ttrace = thread__trace(thread, trace->output);
1979	if (ttrace == NULL)
1980		goto out_put;
1981
 
 
1982	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1983
1984	if (ttrace->entry_str == NULL) {
1985		ttrace->entry_str = malloc(trace__entry_str_size);
1986		if (!ttrace->entry_str)
1987			goto out_put;
1988	}
1989
1990	if (!trace->summary_only)
1991		trace__printf_interrupted_entry(trace, sample);
1992
 
 
 
 
 
 
 
 
 
 
 
1993	ttrace->entry_time = sample->time;
1994	msg = ttrace->entry_str;
1995	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1996
1997	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1998					   args, trace, thread);
1999
2000	if (sc->is_exit) {
2001		if (!trace->duration_filter && !trace->summary_only) {
2002			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
2003			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
 
 
 
 
 
2004		}
2005	} else {
2006		ttrace->entry_pending = true;
2007		/* See trace__vfs_getname & trace__sys_exit */
2008		ttrace->filename.pending_open = false;
2009	}
2010
2011	if (trace->current != thread) {
2012		thread__put(trace->current);
2013		trace->current = thread__get(thread);
2014	}
2015	err = 0;
2016out_put:
2017	thread__put(thread);
2018	return err;
2019}
2020
2021static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022			   union perf_event *event __maybe_unused,
2023			   struct perf_sample *sample)
2024{
2025	long ret;
2026	u64 duration = 0;
 
2027	struct thread *thread;
2028	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
 
2029	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2030	struct thread_trace *ttrace;
2031
2032	if (sc == NULL)
2033		return -1;
2034
2035	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2036	ttrace = thread__trace(thread, trace->output);
2037	if (ttrace == NULL)
2038		goto out_put;
2039
2040	if (trace->summary)
2041		thread__update_stats(ttrace, id, sample);
2042
2043	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2044
2045	if (id == trace->audit.open_id && ret >= 0 && ttrace->filename.pending_open) {
 
 
 
2046		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2047		ttrace->filename.pending_open = false;
2048		++trace->stats.vfs_getname;
2049	}
2050
2051	ttrace->exit_time = sample->time;
2052
2053	if (ttrace->entry_time) {
2054		duration = sample->time - ttrace->entry_time;
2055		if (trace__filter_duration(trace, duration))
2056			goto out;
 
2057	} else if (trace->duration_filter)
2058		goto out;
2059
2060	if (trace->summary_only)
 
 
 
 
 
 
 
 
 
2061		goto out;
2062
2063	trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
2064
2065	if (ttrace->entry_pending) {
2066		fprintf(trace->output, "%-70s", ttrace->entry_str);
2067	} else {
2068		fprintf(trace->output, " ... [");
2069		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2070		fprintf(trace->output, "]: %s()", sc->name);
 
2071	}
2072
 
 
 
 
 
 
 
 
 
2073	if (sc->fmt == NULL) {
 
 
2074signed_print:
2075		fprintf(trace->output, ") = %ld", ret);
2076	} else if (ret < 0 && sc->fmt->errmsg) {
 
2077		char bf[STRERR_BUFSIZE];
2078		const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
2079			   *e = audit_errno_to_name(-ret);
2080
2081		fprintf(trace->output, ") = -1 %s %s", e, emsg);
 
2082	} else if (ret == 0 && sc->fmt->timeout)
2083		fprintf(trace->output, ") = 0 Timeout");
2084	else if (sc->fmt->hexret)
2085		fprintf(trace->output, ") = %#lx", ret);
2086	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2087		goto signed_print;
2088
2089	fputc('\n', trace->output);
 
 
 
 
 
 
 
 
 
 
 
 
2090out:
2091	ttrace->entry_pending = false;
2092	err = 0;
2093out_put:
2094	thread__put(thread);
2095	return err;
2096}
2097
2098static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
2099			      union perf_event *event __maybe_unused,
2100			      struct perf_sample *sample)
2101{
2102	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2103	struct thread_trace *ttrace;
2104	size_t filename_len, entry_str_len, to_move;
2105	ssize_t remaining_space;
2106	char *pos;
2107	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
2108
2109	if (!thread)
2110		goto out;
2111
2112	ttrace = thread__priv(thread);
2113	if (!ttrace)
2114		goto out;
2115
2116	filename_len = strlen(filename);
 
 
2117
2118	if (ttrace->filename.namelen < filename_len) {
2119		char *f = realloc(ttrace->filename.name, filename_len + 1);
2120
2121		if (f == NULL)
2122				goto out;
2123
2124		ttrace->filename.namelen = filename_len;
2125		ttrace->filename.name = f;
2126	}
2127
2128	strcpy(ttrace->filename.name, filename);
2129	ttrace->filename.pending_open = true;
2130
2131	if (!ttrace->filename.ptr)
2132		goto out;
2133
2134	entry_str_len = strlen(ttrace->entry_str);
2135	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2136	if (remaining_space <= 0)
2137		goto out;
2138
2139	if (filename_len > (size_t)remaining_space) {
2140		filename += filename_len - remaining_space;
2141		filename_len = remaining_space;
2142	}
2143
2144	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2145	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2146	memmove(pos + filename_len, pos, to_move);
2147	memcpy(pos, filename, filename_len);
2148
2149	ttrace->filename.ptr = 0;
2150	ttrace->filename.entry_str_pos = 0;
 
 
2151out:
2152	return 0;
2153}
2154
2155static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
2156				     union perf_event *event __maybe_unused,
2157				     struct perf_sample *sample)
2158{
2159        u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
2160	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2161	struct thread *thread = machine__findnew_thread(trace->host,
2162							sample->pid,
2163							sample->tid);
2164	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2165
2166	if (ttrace == NULL)
2167		goto out_dump;
2168
2169	ttrace->runtime_ms += runtime_ms;
2170	trace->runtime_ms += runtime_ms;
 
2171	thread__put(thread);
2172	return 0;
2173
2174out_dump:
2175	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2176	       evsel->name,
2177	       perf_evsel__strval(evsel, sample, "comm"),
2178	       (pid_t)perf_evsel__intval(evsel, sample, "pid"),
2179	       runtime,
2180	       perf_evsel__intval(evsel, sample, "vruntime"));
2181	thread__put(thread);
2182	return 0;
2183}
2184
2185static void bpf_output__printer(enum binary_printer_ops op,
2186				unsigned int val, void *extra)
2187{
2188	FILE *output = extra;
2189	unsigned char ch = (unsigned char)val;
2190
2191	switch (op) {
2192	case BINARY_PRINT_CHAR_DATA:
2193		fprintf(output, "%c", isprint(ch) ? ch : '.');
2194		break;
2195	case BINARY_PRINT_DATA_BEGIN:
2196	case BINARY_PRINT_LINE_BEGIN:
2197	case BINARY_PRINT_ADDR:
2198	case BINARY_PRINT_NUM_DATA:
2199	case BINARY_PRINT_NUM_PAD:
2200	case BINARY_PRINT_SEP:
2201	case BINARY_PRINT_CHAR_PAD:
2202	case BINARY_PRINT_LINE_END:
2203	case BINARY_PRINT_DATA_END:
2204	default:
2205		break;
2206	}
 
 
2207}
2208
2209static void bpf_output__fprintf(struct trace *trace,
2210				struct perf_sample *sample)
2211{
2212	print_binary(sample->raw_data, sample->raw_size, 8,
2213		     bpf_output__printer, trace->output);
 
2214}
2215
2216static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2217				union perf_event *event __maybe_unused,
2218				struct perf_sample *sample)
2219{
2220	trace__printf_interrupted_entry(trace, sample);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2221	trace__fprintf_tstamp(trace, sample->time, trace->output);
2222
2223	if (trace->trace_syscalls)
2224		fprintf(trace->output, "(         ): ");
2225
2226	fprintf(trace->output, "%s:", evsel->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227
2228	if (perf_evsel__is_bpf_output(evsel)) {
2229		bpf_output__fprintf(trace, sample);
2230	} else if (evsel->tp_format) {
2231		event_format__fprintf(evsel->tp_format, sample->cpu,
2232				      sample->raw_data, sample->raw_size,
2233				      trace->output);
 
 
 
 
 
 
 
2234	}
2235
 
2236	fprintf(trace->output, ")\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2237	return 0;
2238}
2239
2240static void print_location(FILE *f, struct perf_sample *sample,
2241			   struct addr_location *al,
2242			   bool print_dso, bool print_sym)
2243{
2244
2245	if ((verbose || print_dso) && al->map)
2246		fprintf(f, "%s@", al->map->dso->long_name);
2247
2248	if ((verbose || print_sym) && al->sym)
2249		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2250			al->addr - al->sym->start);
2251	else if (al->map)
2252		fprintf(f, "0x%" PRIx64, al->addr);
2253	else
2254		fprintf(f, "0x%" PRIx64, sample->addr);
2255}
2256
2257static int trace__pgfault(struct trace *trace,
2258			  struct perf_evsel *evsel,
2259			  union perf_event *event __maybe_unused,
2260			  struct perf_sample *sample)
2261{
2262	struct thread *thread;
2263	struct addr_location al;
2264	char map_type = 'd';
2265	struct thread_trace *ttrace;
2266	int err = -1;
 
2267
2268	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
 
 
 
 
 
 
 
 
 
 
2269	ttrace = thread__trace(thread, trace->output);
2270	if (ttrace == NULL)
2271		goto out_put;
2272
2273	if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2274		ttrace->pfmaj++;
2275	else
2276		ttrace->pfmin++;
2277
2278	if (trace->summary_only)
2279		goto out;
2280
2281	thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
2282			      sample->ip, &al);
2283
2284	trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
2285
2286	fprintf(trace->output, "%sfault [",
2287		evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2288		"maj" : "min");
2289
2290	print_location(trace->output, sample, &al, false, true);
2291
2292	fprintf(trace->output, "] => ");
2293
2294	thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
2295				   sample->addr, &al);
2296
2297	if (!al.map) {
2298		thread__find_addr_location(thread, sample->cpumode,
2299					   MAP__FUNCTION, sample->addr, &al);
2300
2301		if (al.map)
2302			map_type = 'x';
2303		else
2304			map_type = '?';
2305	}
2306
2307	print_location(trace->output, sample, &al, true, false);
2308
2309	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
 
 
 
 
 
 
 
2310out:
2311	err = 0;
2312out_put:
2313	thread__put(thread);
2314	return err;
2315}
2316
2317static bool skip_sample(struct trace *trace, struct perf_sample *sample)
 
 
2318{
2319	if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
2320	    (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
2321		return false;
2322
2323	if (trace->pid_list || trace->tid_list)
2324		return true;
2325
2326	return false;
 
 
 
2327}
2328
2329static int trace__process_sample(struct perf_tool *tool,
2330				 union perf_event *event,
2331				 struct perf_sample *sample,
2332				 struct perf_evsel *evsel,
2333				 struct machine *machine __maybe_unused)
2334{
2335	struct trace *trace = container_of(tool, struct trace, tool);
 
2336	int err = 0;
2337
2338	tracepoint_handler handler = evsel->handler;
2339
2340	if (skip_sample(trace, sample))
2341		return 0;
 
2342
2343	if (!trace->full_time && trace->base_time == 0)
2344		trace->base_time = sample->time;
2345
2346	if (handler) {
2347		++trace->nr_events;
2348		handler(trace, evsel, event, sample);
2349	}
2350
 
2351	return err;
2352}
2353
2354static int parse_target_str(struct trace *trace)
2355{
2356	if (trace->opts.target.pid) {
2357		trace->pid_list = intlist__new(trace->opts.target.pid);
2358		if (trace->pid_list == NULL) {
2359			pr_err("Error parsing process id string\n");
2360			return -EINVAL;
2361		}
2362	}
2363
2364	if (trace->opts.target.tid) {
2365		trace->tid_list = intlist__new(trace->opts.target.tid);
2366		if (trace->tid_list == NULL) {
2367			pr_err("Error parsing thread id string\n");
2368			return -EINVAL;
2369		}
2370	}
2371
2372	return 0;
2373}
2374
2375static int trace__record(struct trace *trace, int argc, const char **argv)
2376{
2377	unsigned int rec_argc, i, j;
2378	const char **rec_argv;
2379	const char * const record_args[] = {
2380		"record",
2381		"-R",
2382		"-m", "1024",
2383		"-c", "1",
2384	};
2385
 
2386	const char * const sc_args[] = { "-e", };
2387	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
2388	const char * const majpf_args[] = { "-e", "major-faults" };
2389	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
2390	const char * const minpf_args[] = { "-e", "minor-faults" };
2391	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
 
2392
2393	/* +1 is for the event string below */
2394	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
2395		majpf_args_nr + minpf_args_nr + argc;
2396	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2397
2398	if (rec_argv == NULL)
2399		return -ENOMEM;
2400
2401	j = 0;
2402	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2403		rec_argv[j++] = record_args[i];
2404
2405	if (trace->trace_syscalls) {
2406		for (i = 0; i < sc_args_nr; i++)
2407			rec_argv[j++] = sc_args[i];
2408
2409		/* event string may be different for older kernels - e.g., RHEL6 */
2410		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
2411			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
2412		else if (is_valid_tracepoint("syscalls:sys_enter"))
2413			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2414		else {
2415			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2416			return -1;
2417		}
2418	}
2419
 
 
 
2420	if (trace->trace_pgfaults & TRACE_PFMAJ)
2421		for (i = 0; i < majpf_args_nr; i++)
2422			rec_argv[j++] = majpf_args[i];
2423
2424	if (trace->trace_pgfaults & TRACE_PFMIN)
2425		for (i = 0; i < minpf_args_nr; i++)
2426			rec_argv[j++] = minpf_args[i];
2427
2428	for (i = 0; i < (unsigned int)argc; i++)
2429		rec_argv[j++] = argv[i];
2430
2431	return cmd_record(j, rec_argv, NULL);
 
 
 
 
2432}
2433
2434static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2435
2436static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2437{
2438	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
 
 
 
2439
2440	if (IS_ERR(evsel))
 
 
 
2441		return false;
2442
2443	if (perf_evsel__field(evsel, "pathname") == NULL) {
2444		perf_evsel__delete(evsel);
2445		return false;
 
 
 
 
 
 
 
 
 
 
2446	}
2447
2448	evsel->handler = trace__vfs_getname;
2449	perf_evlist__add(evlist, evsel);
2450	return true;
2451}
2452
2453static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
2454				    u64 config)
2455{
2456	struct perf_evsel *evsel;
2457	struct perf_event_attr attr = {
2458		.type = PERF_TYPE_SOFTWARE,
2459		.mmap_data = 1,
2460	};
2461
2462	attr.config = config;
2463	attr.sample_period = 1;
2464
2465	event_attr_init(&attr);
2466
2467	evsel = perf_evsel__new(&attr);
2468	if (!evsel)
2469		return -ENOMEM;
2470
2471	evsel->handler = trace__pgfault;
2472	perf_evlist__add(evlist, evsel);
2473
2474	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2475}
2476
2477static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2478{
2479	const u32 type = event->header.type;
2480	struct perf_evsel *evsel;
2481
2482	if (!trace->full_time && trace->base_time == 0)
2483		trace->base_time = sample->time;
2484
2485	if (type != PERF_RECORD_SAMPLE) {
2486		trace__process_event(trace, trace->host, event, sample);
2487		return;
2488	}
2489
2490	evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2491	if (evsel == NULL) {
2492		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2493		return;
2494	}
2495
2496	if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
 
 
 
 
 
2497	    sample->raw_data == NULL) {
2498		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2499		       perf_evsel__name(evsel), sample->tid,
2500		       sample->cpu, sample->raw_size);
2501	} else {
2502		tracepoint_handler handler = evsel->handler;
2503		handler(trace, evsel, event, sample);
2504	}
 
 
 
2505}
2506
2507static int trace__add_syscall_newtp(struct trace *trace)
2508{
2509	int ret = -1;
2510	struct perf_evlist *evlist = trace->evlist;
2511	struct perf_evsel *sys_enter, *sys_exit;
2512
2513	sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2514	if (sys_enter == NULL)
2515		goto out;
2516
2517	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2518		goto out_delete_sys_enter;
2519
2520	sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2521	if (sys_exit == NULL)
2522		goto out_delete_sys_enter;
2523
2524	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2525		goto out_delete_sys_exit;
2526
2527	perf_evlist__add(evlist, sys_enter);
2528	perf_evlist__add(evlist, sys_exit);
 
 
 
 
 
 
 
 
 
 
 
 
2529
2530	trace->syscalls.events.sys_enter = sys_enter;
2531	trace->syscalls.events.sys_exit  = sys_exit;
2532
2533	ret = 0;
2534out:
2535	return ret;
2536
2537out_delete_sys_exit:
2538	perf_evsel__delete_priv(sys_exit);
2539out_delete_sys_enter:
2540	perf_evsel__delete_priv(sys_enter);
2541	goto out;
2542}
2543
2544static int trace__set_ev_qualifier_filter(struct trace *trace)
2545{
2546	int err = -1;
 
2547	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2548						trace->ev_qualifier_ids.nr,
2549						trace->ev_qualifier_ids.entries);
2550
2551	if (filter == NULL)
2552		goto out_enomem;
2553
2554	if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
2555		err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
 
 
2556
2557	free(filter);
2558out:
2559	return err;
2560out_enomem:
2561	errno = ENOMEM;
2562	goto out;
2563}
2564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2565static int trace__run(struct trace *trace, int argc, const char **argv)
2566{
2567	struct perf_evlist *evlist = trace->evlist;
2568	struct perf_evsel *evsel;
2569	int err = -1, i;
2570	unsigned long before;
2571	const bool forks = argc > 0;
2572	bool draining = false;
2573
2574	trace->live = true;
2575
2576	if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2577		goto out_error_raw_syscalls;
 
2578
2579	if (trace->trace_syscalls)
2580		trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
 
2581
2582	if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
2583	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
2584		goto out_error_mem;
 
 
 
2585	}
2586
2587	if ((trace->trace_pgfaults & TRACE_PFMIN) &&
2588	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
2589		goto out_error_mem;
 
 
 
 
 
 
 
2590
2591	if (trace->sched &&
2592	    perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2593				   trace__sched_stat_runtime))
2594		goto out_error_sched_stat_runtime;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2595
2596	err = perf_evlist__create_maps(evlist, &trace->opts.target);
2597	if (err < 0) {
2598		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2599		goto out_delete_evlist;
2600	}
2601
2602	err = trace__symbols_init(trace, evlist);
2603	if (err < 0) {
2604		fprintf(trace->output, "Problems initializing symbol libraries!\n");
2605		goto out_delete_evlist;
2606	}
2607
2608	perf_evlist__config(evlist, &trace->opts);
2609
2610	signal(SIGCHLD, sig_handler);
2611	signal(SIGINT, sig_handler);
2612
2613	if (forks) {
2614		err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2615						    argv, false, NULL);
2616		if (err < 0) {
2617			fprintf(trace->output, "Couldn't run the workload!\n");
2618			goto out_delete_evlist;
2619		}
 
2620	}
2621
2622	err = perf_evlist__open(evlist);
2623	if (err < 0)
2624		goto out_error_open;
2625
2626	err = bpf__apply_obj_config();
2627	if (err) {
2628		char errbuf[BUFSIZ];
2629
2630		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2631		pr_err("ERROR: Apply config to BPF failed: %s\n",
2632			 errbuf);
2633		goto out_error_open;
2634	}
2635
2636	/*
2637	 * Better not use !target__has_task() here because we need to cover the
2638	 * case where no threads were specified in the command line, but a
2639	 * workload was, and in that case we will fill in the thread_map when
2640	 * we fork the workload in perf_evlist__prepare_workload.
2641	 */
2642	if (trace->filter_pids.nr > 0)
2643		err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2644	else if (thread_map__pid(evlist->threads, 0) == -1)
2645		err = perf_evlist__set_filter_pid(evlist, getpid());
2646
2647	if (err < 0)
2648		goto out_error_mem;
2649
 
 
 
2650	if (trace->ev_qualifier_ids.nr > 0) {
2651		err = trace__set_ev_qualifier_filter(trace);
2652		if (err < 0)
2653			goto out_errno;
2654
2655		pr_debug("event qualifier tracepoint filter: %s\n",
2656			 trace->syscalls.events.sys_exit->filter);
 
 
2657	}
2658
2659	err = perf_evlist__apply_filters(evlist, &evsel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2660	if (err < 0)
2661		goto out_error_apply_filters;
2662
2663	err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
 
 
 
2664	if (err < 0)
2665		goto out_error_mmap;
2666
2667	if (!target__none(&trace->opts.target))
2668		perf_evlist__enable(evlist);
2669
2670	if (forks)
2671		perf_evlist__start_workload(evlist);
2672
2673	trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2674				  evlist->threads->nr > 1 ||
2675				  perf_evlist__first(evlist)->attr.inherit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2676again:
2677	before = trace->nr_events;
2678
2679	for (i = 0; i < evlist->nr_mmaps; i++) {
2680		union perf_event *event;
 
2681
2682		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2683			struct perf_sample sample;
 
2684
 
2685			++trace->nr_events;
2686
2687			err = perf_evlist__parse_sample(evlist, event, &sample);
2688			if (err) {
2689				fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2690				goto next_event;
2691			}
2692
2693			trace__handle_event(trace, event, &sample);
2694next_event:
2695			perf_evlist__mmap_consume(evlist, i);
2696
2697			if (interrupted)
2698				goto out_disable;
2699
2700			if (done && !draining) {
2701				perf_evlist__disable(evlist);
2702				draining = true;
2703			}
2704		}
 
2705	}
2706
2707	if (trace->nr_events == before) {
2708		int timeout = done ? 100 : -1;
2709
2710		if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2711			if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2712				draining = true;
2713
2714			goto again;
 
 
 
2715		}
2716	} else {
2717		goto again;
2718	}
2719
2720out_disable:
2721	thread__zput(trace->current);
2722
2723	perf_evlist__disable(evlist);
 
 
 
2724
2725	if (!err) {
2726		if (trace->summary)
2727			trace__fprintf_thread_summary(trace, trace->output);
2728
2729		if (trace->show_tool_stats) {
2730			fprintf(trace->output, "Stats:\n "
2731					       " vfs_getname : %" PRIu64 "\n"
2732					       " proc_getname: %" PRIu64 "\n",
2733				trace->stats.vfs_getname,
2734				trace->stats.proc_getname);
2735		}
2736	}
2737
2738out_delete_evlist:
2739	perf_evlist__delete(evlist);
 
 
 
2740	trace->evlist = NULL;
2741	trace->live = false;
2742	return err;
2743{
2744	char errbuf[BUFSIZ];
2745
2746out_error_sched_stat_runtime:
2747	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2748	goto out_error;
2749
2750out_error_raw_syscalls:
2751	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2752	goto out_error;
2753
2754out_error_mmap:
2755	perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2756	goto out_error;
2757
2758out_error_open:
2759	perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2760
2761out_error:
2762	fprintf(trace->output, "%s\n", errbuf);
2763	goto out_delete_evlist;
2764
2765out_error_apply_filters:
2766	fprintf(trace->output,
2767		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
2768		evsel->filter, perf_evsel__name(evsel), errno,
2769		strerror_r(errno, errbuf, sizeof(errbuf)));
2770	goto out_delete_evlist;
2771}
2772out_error_mem:
2773	fprintf(trace->output, "Not enough memory to run!\n");
2774	goto out_delete_evlist;
2775
2776out_errno:
2777	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2778	goto out_delete_evlist;
2779}
2780
2781static int trace__replay(struct trace *trace)
2782{
2783	const struct perf_evsel_str_handler handlers[] = {
2784		{ "probe:vfs_getname",	     trace__vfs_getname, },
2785	};
2786	struct perf_data_file file = {
2787		.path  = input_name,
2788		.mode  = PERF_DATA_MODE_READ,
2789		.force = trace->force,
2790	};
2791	struct perf_session *session;
2792	struct perf_evsel *evsel;
2793	int err = -1;
2794
2795	trace->tool.sample	  = trace__process_sample;
2796	trace->tool.mmap	  = perf_event__process_mmap;
2797	trace->tool.mmap2	  = perf_event__process_mmap2;
2798	trace->tool.comm	  = perf_event__process_comm;
2799	trace->tool.exit	  = perf_event__process_exit;
2800	trace->tool.fork	  = perf_event__process_fork;
2801	trace->tool.attr	  = perf_event__process_attr;
2802	trace->tool.tracing_data = perf_event__process_tracing_data;
2803	trace->tool.build_id	  = perf_event__process_build_id;
 
2804
2805	trace->tool.ordered_events = true;
2806	trace->tool.ordering_requires_timestamps = true;
2807
2808	/* add tid to output */
2809	trace->multiple_threads = true;
2810
2811	session = perf_session__new(&file, false, &trace->tool);
2812	if (session == NULL)
2813		return -1;
 
 
 
 
 
 
2814
2815	if (symbol__init(&session->header.env) < 0)
2816		goto out;
2817
2818	trace->host = &session->machines.host;
2819
2820	err = perf_session__set_tracepoints_handlers(session, handlers);
2821	if (err)
2822		goto out;
2823
2824	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2825						     "raw_syscalls:sys_enter");
2826	/* older kernels have syscalls tp versus raw_syscalls */
2827	if (evsel == NULL)
2828		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2829							     "syscalls:sys_enter");
2830
2831	if (evsel &&
2832	    (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2833	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2834		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2835		goto out;
2836	}
2837
2838	evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2839						     "raw_syscalls:sys_exit");
2840	if (evsel == NULL)
2841		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2842							     "syscalls:sys_exit");
2843	if (evsel &&
2844	    (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2845	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2846		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2847		goto out;
2848	}
2849
2850	evlist__for_each(session->evlist, evsel) {
2851		if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2852		    (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2853		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2854		     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2855			evsel->handler = trace__pgfault;
2856	}
2857
2858	err = parse_target_str(trace);
2859	if (err != 0)
2860		goto out;
2861
2862	setup_pager();
2863
2864	err = perf_session__process_events(session);
2865	if (err)
2866		pr_err("Failed to process events, error %d", err);
2867
2868	else if (trace->summary)
2869		trace__fprintf_thread_summary(trace, trace->output);
2870
2871out:
2872	perf_session__delete(session);
2873
2874	return err;
2875}
2876
2877static size_t trace__fprintf_threads_header(FILE *fp)
2878{
2879	size_t printed;
2880
2881	printed  = fprintf(fp, "\n Summary of events:\n\n");
2882
2883	return printed;
2884}
2885
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2886static size_t thread__dump_stats(struct thread_trace *ttrace,
2887				 struct trace *trace, FILE *fp)
2888{
2889	struct stats *stats;
2890	size_t printed = 0;
2891	struct syscall *sc;
2892	struct int_node *inode = intlist__first(ttrace->syscall_stats);
 
2893
2894	if (inode == NULL)
2895		return 0;
2896
2897	printed += fprintf(fp, "\n");
2898
2899	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
2900	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
2901	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
2902
2903	/* each int_node is a syscall */
2904	while (inode) {
2905		stats = inode->priv;
2906		if (stats) {
2907			double min = (double)(stats->min) / NSEC_PER_MSEC;
2908			double max = (double)(stats->max) / NSEC_PER_MSEC;
2909			double avg = avg_stats(stats);
2910			double pct;
2911			u64 n = (u64) stats->n;
2912
2913			pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2914			avg /= NSEC_PER_MSEC;
2915
2916			sc = &trace->syscalls.table[inode->i];
2917			printed += fprintf(fp, "   %-15s", sc->name);
2918			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2919					   n, avg * n, min, avg);
2920			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2921		}
2922
2923		inode = intlist__next(inode);
 
 
 
 
 
 
 
 
 
2924	}
2925
 
2926	printed += fprintf(fp, "\n\n");
2927
2928	return printed;
2929}
2930
2931/* struct used to pass data to per-thread function */
2932struct summary_data {
2933	FILE *fp;
2934	struct trace *trace;
2935	size_t printed;
2936};
2937
2938static int trace__fprintf_one_thread(struct thread *thread, void *priv)
2939{
2940	struct summary_data *data = priv;
2941	FILE *fp = data->fp;
2942	size_t printed = data->printed;
2943	struct trace *trace = data->trace;
2944	struct thread_trace *ttrace = thread__priv(thread);
2945	double ratio;
2946
2947	if (ttrace == NULL)
2948		return 0;
2949
2950	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2951
2952	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2953	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2954	printed += fprintf(fp, "%.1f%%", ratio);
2955	if (ttrace->pfmaj)
2956		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2957	if (ttrace->pfmin)
2958		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2959	printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
 
 
 
 
2960	printed += thread__dump_stats(ttrace, trace, fp);
2961
2962	data->printed += printed;
 
2963
2964	return 0;
 
 
 
 
 
 
 
 
 
2965}
2966
2967static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2968{
2969	struct summary_data data = {
2970		.fp = fp,
2971		.trace = trace
2972	};
2973	data.printed = trace__fprintf_threads_header(fp);
 
 
 
 
 
 
2974
2975	machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
 
2976
2977	return data.printed;
 
 
2978}
2979
2980static int trace__set_duration(const struct option *opt, const char *str,
2981			       int unset __maybe_unused)
2982{
2983	struct trace *trace = opt->value;
2984
2985	trace->duration_filter = atof(str);
2986	return 0;
2987}
2988
2989static int trace__set_filter_pids(const struct option *opt, const char *str,
2990				  int unset __maybe_unused)
2991{
2992	int ret = -1;
2993	size_t i;
2994	struct trace *trace = opt->value;
2995	/*
2996	 * FIXME: introduce a intarray class, plain parse csv and create a
2997	 * { int nr, int entries[] } struct...
2998	 */
2999	struct intlist *list = intlist__new(str);
3000
3001	if (list == NULL)
3002		return -1;
3003
3004	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
3005	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
3006
3007	if (trace->filter_pids.entries == NULL)
3008		goto out;
3009
3010	trace->filter_pids.entries[0] = getpid();
3011
3012	for (i = 1; i < trace->filter_pids.nr; ++i)
3013		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
3014
3015	intlist__delete(list);
3016	ret = 0;
3017out:
3018	return ret;
3019}
3020
3021static int trace__open_output(struct trace *trace, const char *filename)
3022{
3023	struct stat st;
3024
3025	if (!stat(filename, &st) && st.st_size) {
3026		char oldname[PATH_MAX];
3027
3028		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
3029		unlink(oldname);
3030		rename(filename, oldname);
3031	}
3032
3033	trace->output = fopen(filename, "w");
3034
3035	return trace->output == NULL ? -errno : 0;
3036}
3037
3038static int parse_pagefaults(const struct option *opt, const char *str,
3039			    int unset __maybe_unused)
3040{
3041	int *trace_pgfaults = opt->value;
3042
3043	if (strcmp(str, "all") == 0)
3044		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
3045	else if (strcmp(str, "maj") == 0)
3046		*trace_pgfaults |= TRACE_PFMAJ;
3047	else if (strcmp(str, "min") == 0)
3048		*trace_pgfaults |= TRACE_PFMIN;
3049	else
3050		return -1;
3051
3052	return 0;
3053}
3054
3055static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
3056{
3057	struct perf_evsel *evsel;
3058
3059	evlist__for_each(evlist, evsel)
3060		evsel->handler = handler;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3061}
3062
3063int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3064{
3065	const char *trace_usage[] = {
3066		"perf trace [<options>] [<command>]",
3067		"perf trace [<options>] -- <command> [<options>]",
3068		"perf trace record [<options>] [<command>]",
3069		"perf trace record [<options>] -- <command> [<options>]",
3070		NULL
3071	};
3072	struct trace trace = {
3073		.audit = {
3074			.machine = audit_detect_machine(),
3075			.open_id = audit_name_to_syscall("open", trace.audit.machine),
3076		},
3077		.syscalls = {
3078			. max = -1,
3079		},
3080		.opts = {
3081			.target = {
3082				.uid	   = UINT_MAX,
3083				.uses_mmap = true,
3084			},
3085			.user_freq     = UINT_MAX,
3086			.user_interval = ULLONG_MAX,
3087			.no_buffering  = true,
3088			.mmap_pages    = UINT_MAX,
3089			.proc_map_timeout  = 500,
3090		},
3091		.output = stderr,
3092		.show_comm = true,
3093		.trace_syscalls = true,
 
 
 
 
 
 
 
3094	};
 
3095	const char *output_name = NULL;
3096	const char *ev_qualifier_str = NULL;
3097	const struct option trace_options[] = {
3098	OPT_CALLBACK(0, "event", &trace.evlist, "event",
3099		     "event selector. use 'perf list' to list available events",
3100		     parse_events_option),
 
 
3101	OPT_BOOLEAN(0, "comm", &trace.show_comm,
3102		    "show the thread COMM next to its id"),
3103	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
3104	OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
 
3105	OPT_STRING('o', "output", &output_name, "file", "output file name"),
3106	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
3107	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
3108		    "trace events on existing process id"),
3109	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
3110		    "trace events on existing thread id"),
3111	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
3112		     "pids to filter (by the kernel)", trace__set_filter_pids),
3113	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
3114		    "system-wide collection from all CPUs"),
3115	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
3116		    "list of cpus to monitor"),
3117	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
3118		    "child tasks do not inherit counters"),
3119	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
3120		     "number of mmap data pages",
3121		     perf_evlist__parse_mmap_pages),
3122	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
3123		   "user to profile"),
3124	OPT_CALLBACK(0, "duration", &trace, "float",
3125		     "show only events with duration > N.M ms",
3126		     trace__set_duration),
 
 
 
3127	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
3128	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
3129	OPT_BOOLEAN('T', "time", &trace.full_time,
3130		    "Show full timestamp, not time relative to first start"),
 
 
3131	OPT_BOOLEAN('s', "summary", &trace.summary_only,
3132		    "Show only syscall summary with statistics"),
3133	OPT_BOOLEAN('S', "with-summary", &trace.summary,
3134		    "Show all syscalls and summary with statistics"),
 
 
3135	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
3136		     "Trace pagefaults", parse_pagefaults, "maj"),
3137	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
3138	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
3139	OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3140			"per thread proc mmap processing timeout in ms"),
 
 
 
 
 
 
3141	OPT_END()
3142	};
 
 
 
3143	const char * const trace_subcommands[] = { "record", NULL };
3144	int err;
3145	char bf[BUFSIZ];
 
3146
3147	signal(SIGSEGV, sighandler_dump_stack);
3148	signal(SIGFPE, sighandler_dump_stack);
 
 
 
 
 
 
3149
3150	trace.evlist = perf_evlist__new();
 
3151
3152	if (trace.evlist == NULL) {
3153		pr_err("Not enough memory to run!\n");
3154		err = -ENOMEM;
3155		goto out;
3156	}
3157
 
 
 
 
 
 
 
 
 
 
 
 
 
3158	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
3159				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3161	if (trace.trace_pgfaults) {
3162		trace.opts.sample_address = true;
3163		trace.opts.sample_time = true;
3164	}
3165
3166	if (trace.evlist->nr_entries > 0)
3167		evlist__set_evsel_handler(trace.evlist, trace__event_handler);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3168
3169	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
3170		return trace__record(&trace, argc-1, &argv[1]);
3171
 
 
 
 
3172	/* summary_only implies summary option, but don't overwrite summary if set */
3173	if (trace.summary_only)
3174		trace.summary = trace.summary_only;
3175
3176	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
3177	    trace.evlist->nr_entries == 0 /* Was --events used? */) {
3178		pr_err("Please specify something to trace.\n");
3179		return -1;
3180	}
3181
3182	if (output_name != NULL) {
3183		err = trace__open_output(&trace, output_name);
3184		if (err < 0) {
3185			perror("failed to create output file");
3186			goto out;
3187		}
3188	}
3189
3190	if (ev_qualifier_str != NULL) {
3191		const char *s = ev_qualifier_str;
3192		struct strlist_config slist_config = {
3193			.dirname = system_path(STRACE_GROUPS_DIR),
3194		};
3195
3196		trace.not_ev_qualifier = *s == '!';
3197		if (trace.not_ev_qualifier)
3198			++s;
3199		trace.ev_qualifier = strlist__new(s, &slist_config);
3200		if (trace.ev_qualifier == NULL) {
3201			fputs("Not enough memory to parse event qualifier",
3202			      trace.output);
3203			err = -ENOMEM;
3204			goto out_close;
3205		}
3206
3207		err = trace__validate_ev_qualifier(&trace);
3208		if (err)
3209			goto out_close;
3210	}
3211
3212	err = target__validate(&trace.opts.target);
3213	if (err) {
3214		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3215		fprintf(trace.output, "%s", bf);
3216		goto out_close;
3217	}
3218
3219	err = target__parse_uid(&trace.opts.target);
3220	if (err) {
3221		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
3222		fprintf(trace.output, "%s", bf);
3223		goto out_close;
3224	}
3225
3226	if (!argc && target__none(&trace.opts.target))
3227		trace.opts.target.system_wide = true;
3228
3229	if (input_name)
3230		err = trace__replay(&trace);
3231	else
3232		err = trace__run(&trace, argc, argv);
3233
3234out_close:
3235	if (output_name != NULL)
3236		fclose(trace.output);
3237out:
 
3238	return err;
3239}
v6.2
   1/*
   2 * builtin-trace.c
   3 *
   4 * Builtin 'trace' command:
   5 *
   6 * Display a continuously updated trace of any workload, CPU, specific PID,
   7 * system wide, etc.  Default format is loosely strace like, but any other
   8 * event may be specified using --event.
   9 *
  10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  11 *
  12 * Initially based on the 'trace' prototype by Thomas Gleixner:
  13 *
  14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
 
 
  15 */
  16
  17#include "util/record.h"
  18#include <api/fs/tracing_path.h>
  19#ifdef HAVE_LIBBPF_SUPPORT
  20#include <bpf/bpf.h>
  21#endif
  22#include "util/bpf_map.h"
  23#include "util/rlimit.h"
  24#include "builtin.h"
  25#include "util/cgroup.h"
  26#include "util/color.h"
  27#include "util/config.h"
  28#include "util/debug.h"
  29#include "util/dso.h"
  30#include "util/env.h"
  31#include "util/event.h"
  32#include "util/evsel.h"
  33#include "util/evsel_fprintf.h"
  34#include "util/synthetic-events.h"
  35#include "util/evlist.h"
  36#include "util/evswitch.h"
  37#include "util/mmap.h"
  38#include <subcmd/pager.h>
  39#include <subcmd/exec-cmd.h>
  40#include "util/machine.h"
  41#include "util/map.h"
  42#include "util/symbol.h"
  43#include "util/path.h"
  44#include "util/session.h"
  45#include "util/thread.h"
  46#include <subcmd/parse-options.h>
  47#include "util/strlist.h"
  48#include "util/intlist.h"
  49#include "util/thread_map.h"
  50#include "util/stat.h"
  51#include "util/tool.h"
  52#include "util/util.h"
  53#include "trace/beauty/beauty.h"
  54#include "trace-event.h"
  55#include "util/parse-events.h"
  56#include "util/bpf-loader.h"
  57#include "util/tracepoint.h"
  58#include "callchain.h"
  59#include "print_binary.h"
  60#include "string2.h"
  61#include "syscalltbl.h"
  62#include "rb_resort.h"
  63#include "../perf.h"
  64
  65#include <errno.h>
  66#include <inttypes.h>
  67#include <poll.h>
  68#include <signal.h>
  69#include <stdlib.h>
  70#include <string.h>
 
  71#include <linux/err.h>
  72#include <linux/filter.h>
  73#include <linux/kernel.h>
  74#include <linux/random.h>
  75#include <linux/stringify.h>
  76#include <linux/time64.h>
  77#include <linux/zalloc.h>
  78#include <fcntl.h>
  79#include <sys/sysmacros.h>
  80
  81#include <linux/ctype.h>
  82#include <perf/mmap.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83
  84#ifdef HAVE_LIBTRACEEVENT
  85#include <traceevent/event-parse.h>
 
 
 
 
  86#endif
  87
  88#ifndef O_CLOEXEC
  89# define O_CLOEXEC		02000000
  90#endif
  91
  92#ifndef F_LINUX_SPECIFIC_BASE
  93# define F_LINUX_SPECIFIC_BASE	1024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94#endif
  95
  96#define RAW_SYSCALL_ARGS_NUM	6
 
 
  97
  98/*
  99 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
 100 */
 101struct syscall_arg_fmt {
 102	size_t	   (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
 103	bool	   (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val);
 104	unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
 105	void	   *parm;
 106	const char *name;
 107	u16	   nr_entries; // for arrays
 108	bool	   show_zero;
 109};
 110
 111struct syscall_fmt {
 112	const char *name;
 113	const char *alias;
 114	struct {
 115		const char *sys_enter,
 116			   *sys_exit;
 117	}	   bpf_prog_name;
 118	struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
 119	u8	   nr_args;
 120	bool	   errpid;
 121	bool	   timeout;
 122	bool	   hexret;
 123};
 124
 125struct trace {
 126	struct perf_tool	tool;
 127	struct syscalltbl	*sctbl;
 128	struct {
 129		struct syscall  *table;
 130		struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
 131			struct bpf_map  *sys_enter,
 132					*sys_exit;
 133		}		prog_array;
 134		struct {
 135			struct evsel *sys_enter,
 136					  *sys_exit,
 137					  *augmented;
 138		}		events;
 139		struct bpf_program *unaugmented_prog;
 140	} syscalls;
 141	struct {
 142		struct bpf_map *map;
 143	} dump;
 144	struct record_opts	opts;
 145	struct evlist	*evlist;
 146	struct machine		*host;
 147	struct thread		*current;
 148	struct bpf_object	*bpf_obj;
 149	struct cgroup		*cgroup;
 150	u64			base_time;
 151	FILE			*output;
 152	unsigned long		nr_events;
 153	unsigned long		nr_events_printed;
 154	unsigned long		max_events;
 155	struct evswitch		evswitch;
 156	struct strlist		*ev_qualifier;
 157	struct {
 158		size_t		nr;
 159		int		*entries;
 160	}			ev_qualifier_ids;
 161	struct {
 162		size_t		nr;
 163		pid_t		*entries;
 164		struct bpf_map  *map;
 165	}			filter_pids;
 166	double			duration_filter;
 167	double			runtime_ms;
 168	struct {
 169		u64		vfs_getname,
 170				proc_getname;
 171	} stats;
 172	unsigned int		max_stack;
 173	unsigned int		min_stack;
 174	int			raw_augmented_syscalls_args_size;
 175	bool			raw_augmented_syscalls;
 176	bool			fd_path_disabled;
 177	bool			sort_events;
 178	bool			not_ev_qualifier;
 179	bool			live;
 180	bool			full_time;
 181	bool			sched;
 182	bool			multiple_threads;
 183	bool			summary;
 184	bool			summary_only;
 185	bool			errno_summary;
 186	bool			failure_only;
 187	bool			show_comm;
 188	bool			print_sample;
 189	bool			show_tool_stats;
 190	bool			trace_syscalls;
 191	bool			libtraceevent_print;
 192	bool			kernel_syscallchains;
 193	s16			args_alignment;
 194	bool			show_tstamp;
 195	bool			show_duration;
 196	bool			show_zeros;
 197	bool			show_arg_names;
 198	bool			show_string_prefix;
 199	bool			force;
 200	bool			vfs_getname;
 201	int			trace_pgfaults;
 202	char			*perfconfig_events;
 203	struct {
 204		struct ordered_events	data;
 205		u64			last;
 206	} oe;
 207};
 208
 209struct tp_field {
 210	int offset;
 211	union {
 212		u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
 213		void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
 214	};
 215};
 216
 217#define TP_UINT_FIELD(bits) \
 218static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
 219{ \
 220	u##bits value; \
 221	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 222	return value;  \
 223}
 224
 225TP_UINT_FIELD(8);
 226TP_UINT_FIELD(16);
 227TP_UINT_FIELD(32);
 228TP_UINT_FIELD(64);
 229
 230#define TP_UINT_FIELD__SWAPPED(bits) \
 231static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
 232{ \
 233	u##bits value; \
 234	memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
 235	return bswap_##bits(value);\
 236}
 237
 238TP_UINT_FIELD__SWAPPED(16);
 239TP_UINT_FIELD__SWAPPED(32);
 240TP_UINT_FIELD__SWAPPED(64);
 241
 242static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
 
 
 243{
 244	field->offset = offset;
 245
 246	switch (size) {
 247	case 1:
 248		field->integer = tp_field__u8;
 249		break;
 250	case 2:
 251		field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
 252		break;
 253	case 4:
 254		field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
 255		break;
 256	case 8:
 257		field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
 258		break;
 259	default:
 260		return -1;
 261	}
 262
 263	return 0;
 264}
 265
 266static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap)
 267{
 268	return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
 269}
 270
 271static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
 272{
 273	return sample->raw_data + field->offset;
 274}
 275
 276static int __tp_field__init_ptr(struct tp_field *field, int offset)
 277{
 278	field->offset = offset;
 279	field->pointer = tp_field__ptr;
 280	return 0;
 281}
 282
 283static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field)
 284{
 285	return __tp_field__init_ptr(field, format_field->offset);
 286}
 287
 288struct syscall_tp {
 289	struct tp_field id;
 290	union {
 291		struct tp_field args, ret;
 292	};
 293};
 294
 295/*
 296 * The evsel->priv as used by 'perf trace'
 297 * sc:	for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME
 298 * fmt: for all the other tracepoints
 299 */
 300struct evsel_trace {
 301	struct syscall_tp	sc;
 302	struct syscall_arg_fmt  *fmt;
 303};
 304
 305static struct evsel_trace *evsel_trace__new(void)
 306{
 307	return zalloc(sizeof(struct evsel_trace));
 308}
 309
 310static void evsel_trace__delete(struct evsel_trace *et)
 311{
 312	if (et == NULL)
 313		return;
 314
 315	zfree(&et->fmt);
 316	free(et);
 317}
 318
 319/*
 320 * Used with raw_syscalls:sys_{enter,exit} and with the
 321 * syscalls:sys_{enter,exit}_SYSCALL tracepoints
 322 */
 323static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel)
 324{
 325	struct evsel_trace *et = evsel->priv;
 326
 327	return &et->sc;
 328}
 329
 330static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel)
 331{
 332	if (evsel->priv == NULL) {
 333		evsel->priv = evsel_trace__new();
 334		if (evsel->priv == NULL)
 335			return NULL;
 336	}
 337
 338	return __evsel__syscall_tp(evsel);
 339}
 340
 341/*
 342 * Used with all the other tracepoints.
 343 */
 344static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel)
 345{
 346	struct evsel_trace *et = evsel->priv;
 347
 348	return et->fmt;
 349}
 350
 351static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel)
 352{
 353	struct evsel_trace *et = evsel->priv;
 354
 355	if (evsel->priv == NULL) {
 356		et = evsel->priv = evsel_trace__new();
 357
 358		if (et == NULL)
 359			return NULL;
 360	}
 361
 362	if (et->fmt == NULL) {
 363		et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt));
 364		if (et->fmt == NULL)
 365			goto out_delete;
 366	}
 367
 368	return __evsel__syscall_arg_fmt(evsel);
 369
 370out_delete:
 371	evsel_trace__delete(evsel->priv);
 372	evsel->priv = NULL;
 373	return NULL;
 374}
 375
 376static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
 377{
 378	struct tep_format_field *format_field = evsel__field(evsel, name);
 379
 380	if (format_field == NULL)
 381		return -1;
 382
 383	return tp_field__init_uint(field, format_field, evsel->needs_swap);
 384}
 385
 386#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
 387	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
 388	   evsel__init_tp_uint_field(evsel, &sc->name, #name); })
 389
 390static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
 
 
 391{
 392	struct tep_format_field *format_field = evsel__field(evsel, name);
 393
 394	if (format_field == NULL)
 395		return -1;
 396
 397	return tp_field__init_ptr(field, format_field);
 398}
 399
 400#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
 401	({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
 402	   evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
 403
 404static void evsel__delete_priv(struct evsel *evsel)
 405{
 406	zfree(&evsel->priv);
 407	evsel__delete(evsel);
 408}
 409
 410static int evsel__init_syscall_tp(struct evsel *evsel)
 411{
 412	struct syscall_tp *sc = evsel__syscall_tp(evsel);
 413
 414	if (sc != NULL) {
 415		if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
 416		    evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
 417			return -ENOENT;
 418		return 0;
 419	}
 420
 421	return -ENOMEM;
 422}
 423
 424static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
 425{
 426	struct syscall_tp *sc = evsel__syscall_tp(evsel);
 427
 428	if (sc != NULL) {
 429		struct tep_format_field *syscall_id = evsel__field(tp, "id");
 430		if (syscall_id == NULL)
 431			syscall_id = evsel__field(tp, "__syscall_nr");
 432		if (syscall_id == NULL ||
 433		    __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
 434			return -EINVAL;
 435
 436		return 0;
 437	}
 438
 439	return -ENOMEM;
 440}
 441
 442static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
 443{
 444	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
 445
 446	return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
 447}
 448
 449static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
 450{
 451	struct syscall_tp *sc = __evsel__syscall_tp(evsel);
 452
 453	return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
 454}
 455
 456static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
 457{
 458	if (evsel__syscall_tp(evsel) != NULL) {
 459		if (perf_evsel__init_sc_tp_uint_field(evsel, id))
 460			return -ENOENT;
 461
 462		evsel->handler = handler;
 463		return 0;
 464	}
 465
 466	return -ENOMEM;
 
 
 
 
 467}
 468
 469static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
 470{
 471	struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
 472
 473	/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
 474	if (IS_ERR(evsel))
 475		evsel = evsel__newtp("syscalls", direction);
 476
 477	if (IS_ERR(evsel))
 478		return NULL;
 479
 480	if (evsel__init_raw_syscall_tp(evsel, handler))
 481		goto out_delete;
 482
 483	return evsel;
 484
 485out_delete:
 486	evsel__delete_priv(evsel);
 487	return NULL;
 488}
 489
 490#define perf_evsel__sc_tp_uint(evsel, name, sample) \
 491	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
 492	   fields->name.integer(&fields->name, sample); })
 493
 494#define perf_evsel__sc_tp_ptr(evsel, name, sample) \
 495	({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \
 496	   fields->name.pointer(&fields->name, sample); })
 497
 498size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val)
 499{
 500	int idx = val - sa->offset;
 
 
 
 
 
 501
 502	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
 503		size_t printed = scnprintf(bf, size, intfmt, val);
 504		if (show_suffix)
 505			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
 506		return printed;
 507	}
 508
 509	return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : "");
 
 
 510}
 511
 512size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
 513{
 514	int idx = val - sa->offset;
 515
 516	if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
 517		size_t printed = scnprintf(bf, size, intfmt, val);
 518		if (show_prefix)
 519			printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
 520		return printed;
 521	}
 522
 523	return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
 524}
 525
 526static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
 527						const char *intfmt,
 528					        struct syscall_arg *arg)
 529{
 530	return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
 
 
 
 
 
 
 531}
 532
 533static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
 534					      struct syscall_arg *arg)
 535{
 536	return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
 537}
 538
 539#define SCA_STRARRAY syscall_arg__scnprintf_strarray
 540
 541bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 
 
 
 
 
 
 542{
 543	return strarray__strtoul(arg->parm, bf, size, ret);
 544}
 545
 546bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 
 
 
 
 
 
 
 
 
 547{
 548	return strarray__strtoul_flags(arg->parm, bf, size, ret);
 
 
 
 
 
 549}
 550
 551bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret)
 
 
 
 
 
 
 
 
 552{
 553	return strarrays__strtoul(arg->parm, bf, size, ret);
 554}
 555
 556size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg)
 
 
 
 557{
 558	return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val);
 559}
 560
 561size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
 
 
 
 562{
 563	size_t printed;
 564	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566	for (i = 0; i < sas->nr_entries; ++i) {
 567		struct strarray *sa = sas->entries[i];
 568		int idx = val - sa->offset;
 569
 570		if (idx >= 0 && idx < sa->nr_entries) {
 571			if (sa->entries[idx] == NULL)
 572				break;
 573			return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
 574		}
 575	}
 576
 577	printed = scnprintf(bf, size, intfmt, val);
 578	if (show_prefix)
 579		printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
 580	return printed;
 581}
 582
 583bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret)
 
 
 
 584{
 585	int i;
 586
 587	for (i = 0; i < sa->nr_entries; ++i) {
 588		if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') {
 589			*ret = sa->offset + i;
 590			return true;
 591		}
 592	}
 593
 594	return false;
 595}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596
 597bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret)
 598{
 599	u64 val = 0;
 600	char *tok = bf, *sep, *end;
 601
 602	*ret = 0;
 
 603
 604	while (size != 0) {
 605		int toklen = size;
 606
 607		sep = memchr(tok, '|', size);
 608		if (sep != NULL) {
 609			size -= sep - tok + 1;
 
 610
 611			end = sep - 1;
 612			while (end > tok && isspace(*end))
 613				--end;
 
 
 614
 615			toklen = end - tok + 1;
 616		}
 
 
 
 617
 618		while (isspace(*tok))
 619			++tok;
 620
 621		if (isalpha(*tok) || *tok == '_') {
 622			if (!strarray__strtoul(sa, tok, toklen, &val))
 623				return false;
 624		} else
 625			val = strtoul(tok, NULL, 0);
 626
 627		*ret |= (1 << (val - 1));
 628
 629		if (sep == NULL)
 630			break;
 631		tok = sep + 1;
 632	}
 633
 634	return true;
 635}
 636
 637bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret)
 
 638{
 639	int i;
 640
 641	for (i = 0; i < sas->nr_entries; ++i) {
 642		struct strarray *sa = sas->entries[i];
 643
 644		if (strarray__strtoul(sa, bf, size, ret))
 645			return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 646	}
 647
 648	return false;
 649}
 650
 651size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
 652					struct syscall_arg *arg)
 653{
 654	return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
 655}
 656
 657#ifndef AT_FDCWD
 658#define AT_FDCWD	-100
 659#endif
 660
 661static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
 662					   struct syscall_arg *arg)
 663{
 664	int fd = arg->val;
 665	const char *prefix = "AT_FD";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666
 667	if (fd == AT_FDCWD)
 668		return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
 669
 670	return syscall_arg__scnprintf_fd(bf, size, arg);
 671}
 672
 673#define SCA_FDAT syscall_arg__scnprintf_fd_at
 674
 675static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
 676					      struct syscall_arg *arg);
 677
 678#define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
 679
 680size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg)
 681{
 682	return scnprintf(bf, size, "%#lx", arg->val);
 683}
 
 
 
 
 
 
 
 
 
 684
 685size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
 686{
 687	if (arg->val == 0)
 688		return scnprintf(bf, size, "NULL");
 689	return syscall_arg__scnprintf_hex(bf, size, arg);
 690}
 
 
 
 
 
 
 
 
 
 
 
 691
 692size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
 693{
 694	return scnprintf(bf, size, "%d", arg->val);
 695}
 696
 697size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg)
 698{
 699	return scnprintf(bf, size, "%ld", arg->val);
 700}
 701
 702static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg)
 703{
 704	// XXX Hey, maybe for sched:sched_switch prev/next comm fields we can
 705	//     fill missing comms using thread__set_comm()...
 706	//     here or in a special syscall_arg__scnprintf_pid_sched_tp...
 707	return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val);
 708}
 709
 710#define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array
 711
 712static const char *bpf_cmd[] = {
 713	"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
 714	"MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
 715	"PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
 716	"PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
 717	"PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
 718	"TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
 719	"BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
 720	"MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
 721	"LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
 722	"LINK_DETACH", "PROG_BIND_MAP",
 723};
 724static DEFINE_STRARRAY(bpf_cmd, "BPF_");
 725
 726static const char *fsmount_flags[] = {
 727	[1] = "CLOEXEC",
 728};
 729static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_");
 730
 731#include "trace/beauty/generated/fsconfig_arrays.c"
 732
 733static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_");
 734
 735static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
 736static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
 737
 738static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
 739static DEFINE_STRARRAY(itimers, "ITIMER_");
 740
 741static const char *keyctl_options[] = {
 742	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
 743	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
 744	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
 745	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
 746	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
 747};
 748static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
 749
 750static const char *whences[] = { "SET", "CUR", "END",
 751#ifdef SEEK_DATA
 752"DATA",
 753#endif
 754#ifdef SEEK_HOLE
 755"HOLE",
 756#endif
 757};
 758static DEFINE_STRARRAY(whences, "SEEK_");
 759
 760static const char *fcntl_cmds[] = {
 761	"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
 762	"SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64",
 763	"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
 764	"GETOWNER_UIDS",
 765};
 766static DEFINE_STRARRAY(fcntl_cmds, "F_");
 767
 768static const char *fcntl_linux_specific_cmds[] = {
 769	"SETLEASE", "GETLEASE", "NOTIFY", [5] =	"CANCELLK", "DUPFD_CLOEXEC",
 770	"SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS",
 771	"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
 772};
 773
 774static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
 775
 776static struct strarray *fcntl_cmds_arrays[] = {
 777	&strarray__fcntl_cmds,
 778	&strarray__fcntl_linux_specific_cmds,
 779};
 780
 781static DEFINE_STRARRAYS(fcntl_cmds_arrays);
 782
 783static const char *rlimit_resources[] = {
 784	"CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
 785	"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
 786	"RTTIME",
 787};
 788static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
 789
 790static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
 791static DEFINE_STRARRAY(sighow, "SIG_");
 792
 793static const char *clockid[] = {
 794	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
 795	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
 796	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
 797};
 798static DEFINE_STRARRAY(clockid, "CLOCK_");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799
 800static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
 801						 struct syscall_arg *arg)
 802{
 803	bool show_prefix = arg->show_string_prefix;
 804	const char *suffix = "_OK";
 805	size_t printed = 0;
 806	int mode = arg->val;
 807
 808	if (mode == F_OK) /* 0 */
 809		return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
 810#define	P_MODE(n) \
 811	if (mode & n##_OK) { \
 812		printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
 813		mode &= ~n##_OK; \
 814	}
 815
 816	P_MODE(R);
 817	P_MODE(W);
 818	P_MODE(X);
 819#undef P_MODE
 820
 821	if (mode)
 822		printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
 823
 824	return printed;
 825}
 826
 827#define SCA_ACCMODE syscall_arg__scnprintf_access_mode
 828
 829static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
 830					      struct syscall_arg *arg);
 831
 832#define SCA_FILENAME syscall_arg__scnprintf_filename
 833
 834static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
 835						struct syscall_arg *arg)
 836{
 837	bool show_prefix = arg->show_string_prefix;
 838	const char *prefix = "O_";
 839	int printed = 0, flags = arg->val;
 840
 
 
 
 
 
 841#define	P_FLAG(n) \
 842	if (flags & O_##n) { \
 843		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
 844		flags &= ~O_##n; \
 845	}
 846
 
 
 847	P_FLAG(CLOEXEC);
 
 
 
 
 
 
 
 
 848	P_FLAG(NONBLOCK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849#undef P_FLAG
 850
 851	if (flags)
 852		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 853
 854	return printed;
 855}
 856
 857#define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858
 859#ifndef GRND_NONBLOCK
 860#define GRND_NONBLOCK	0x0001
 861#endif
 862#ifndef GRND_RANDOM
 863#define GRND_RANDOM	0x0002
 864#endif
 865
 866static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
 867						   struct syscall_arg *arg)
 868{
 869	bool show_prefix = arg->show_string_prefix;
 870	const char *prefix = "GRND_";
 871	int printed = 0, flags = arg->val;
 872
 
 
 873#define	P_FLAG(n) \
 874	if (flags & GRND_##n) { \
 875		printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
 876		flags &= ~GRND_##n; \
 877	}
 878
 879	P_FLAG(RANDOM);
 
 880	P_FLAG(NONBLOCK);
 881#undef P_FLAG
 882
 883	if (flags)
 884		printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
 885
 886	return printed;
 887}
 888
 889#define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
 890
 891#define STRARRAY(name, array) \
 892	  { .scnprintf	= SCA_STRARRAY, \
 893	    .strtoul	= STUL_STRARRAY, \
 894	    .parm	= &strarray__##array, }
 895
 896#define STRARRAY_FLAGS(name, array) \
 897	  { .scnprintf	= SCA_STRARRAY_FLAGS, \
 898	    .strtoul	= STUL_STRARRAY_FLAGS, \
 899	    .parm	= &strarray__##array, }
 900
 901#include "trace/beauty/arch_errno_names.c"
 902#include "trace/beauty/eventfd.c"
 903#include "trace/beauty/futex_op.c"
 904#include "trace/beauty/futex_val3.c"
 905#include "trace/beauty/mmap.c"
 906#include "trace/beauty/mode_t.c"
 907#include "trace/beauty/msg_flags.c"
 908#include "trace/beauty/open_flags.c"
 909#include "trace/beauty/perf_event_open.c"
 910#include "trace/beauty/pid.c"
 911#include "trace/beauty/sched_policy.c"
 912#include "trace/beauty/seccomp.c"
 913#include "trace/beauty/signum.c"
 914#include "trace/beauty/socket_type.c"
 915#include "trace/beauty/waitid_options.c"
 916
 917static struct syscall_fmt syscall_fmts[] = {
 918	{ .name	    = "access",
 919	  .arg = { [1] = { .scnprintf = SCA_ACCMODE,  /* mode */ }, }, },
 920	{ .name	    = "arch_prctl",
 921	  .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
 922		   [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
 923	{ .name	    = "bind",
 924	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
 925		   [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ },
 926		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
 927	{ .name	    = "bpf",
 928	  .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929	{ .name	    = "brk",	    .hexret = true,
 930	  .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
 931	{ .name     = "clock_gettime",
 932	  .arg = { [0] = STRARRAY(clk_id, clockid), }, },
 933	{ .name	    = "clock_nanosleep",
 934	  .arg = { [2] = { .scnprintf = SCA_TIMESPEC,  /* rqtp */ }, }, },
 935	{ .name	    = "clone",	    .errpid = true, .nr_args = 5,
 936	  .arg = { [0] = { .name = "flags",	    .scnprintf = SCA_CLONE_FLAGS, },
 937		   [1] = { .name = "child_stack",   .scnprintf = SCA_HEX, },
 938		   [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, },
 939		   [3] = { .name = "child_tidptr",  .scnprintf = SCA_HEX, },
 940		   [4] = { .name = "tls",	    .scnprintf = SCA_HEX, }, }, },
 941	{ .name	    = "close",
 942	  .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, },
 943	{ .name	    = "connect",
 944	  .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ },
 945		   [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ },
 946		   [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, },
 947	{ .name	    = "epoll_ctl",
 948	  .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, },
 949	{ .name	    = "eventfd2",
 950	  .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, },
 951	{ .name	    = "fchmodat",
 952	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 953	{ .name	    = "fchownat",
 954	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 955	{ .name	    = "fcntl",
 956	  .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD,  /* cmd */
 957			   .strtoul   = STUL_STRARRAYS,
 958			   .parm      = &strarrays__fcntl_cmds_arrays,
 959			   .show_zero = true, },
 960		   [2] = { .scnprintf =  SCA_FCNTL_ARG, /* arg */ }, }, },
 961	{ .name	    = "flock",
 962	  .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, },
 963	{ .name     = "fsconfig",
 964	  .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, },
 965	{ .name     = "fsmount",
 966	  .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags),
 967		   [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, },
 968	{ .name     = "fspick",
 969	  .arg = { [0] = { .scnprintf = SCA_FDAT,	  /* dfd */ },
 970		   [1] = { .scnprintf = SCA_FILENAME,	  /* path */ },
 971		   [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, },
 972	{ .name	    = "fstat", .alias = "newfstat", },
 973	{ .name	    = "fstatat", .alias = "newfstatat", },
 974	{ .name	    = "futex",
 975	  .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
 976		   [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
 977	{ .name	    = "futimesat",
 978	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 979	{ .name	    = "getitimer",
 980	  .arg = { [0] = STRARRAY(which, itimers), }, },
 981	{ .name	    = "getpid",	    .errpid = true, },
 982	{ .name	    = "getpgid",    .errpid = true, },
 983	{ .name	    = "getppid",    .errpid = true, },
 984	{ .name	    = "getrandom",
 985	  .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
 986	{ .name	    = "getrlimit",
 987	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
 988	{ .name	    = "getsockopt",
 989	  .arg = { [1] = STRARRAY(level, socket_level), }, },
 990	{ .name	    = "gettid",	    .errpid = true, },
 991	{ .name	    = "ioctl",
 992	  .arg = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993#if defined(__i386__) || defined(__x86_64__)
 994/*
 995 * FIXME: Make this available to all arches.
 996 */
 997		   [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ },
 998		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
 
 999#else
1000		   [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
1001#endif
1002	{ .name	    = "kcmp",	    .nr_args = 5,
1003	  .arg = { [0] = { .name = "pid1",	.scnprintf = SCA_PID, },
1004		   [1] = { .name = "pid2",	.scnprintf = SCA_PID, },
1005		   [2] = { .name = "type",	.scnprintf = SCA_KCMP_TYPE, },
1006		   [3] = { .name = "idx1",	.scnprintf = SCA_KCMP_IDX, },
1007		   [4] = { .name = "idx2",	.scnprintf = SCA_KCMP_IDX, }, }, },
1008	{ .name	    = "keyctl",
1009	  .arg = { [0] = STRARRAY(option, keyctl_options), }, },
1010	{ .name	    = "kill",
1011	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1012	{ .name	    = "linkat",
1013	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1014	{ .name	    = "lseek",
1015	  .arg = { [2] = STRARRAY(whence, whences), }, },
1016	{ .name	    = "lstat", .alias = "newlstat", },
1017	{ .name     = "madvise",
1018	  .arg = { [0] = { .scnprintf = SCA_HEX,      /* start */ },
1019		   [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, },
1020	{ .name	    = "mkdirat",
1021	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
1022	{ .name	    = "mknodat",
1023	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1024	{ .name	    = "mmap",	    .hexret = true,
1025/* The standard mmap maps to old_mmap on s390x */
1026#if defined(__s390x__)
1027	.alias = "old_mmap",
1028#endif
1029	  .arg = { [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
1030		   [3] = { .scnprintf = SCA_MMAP_FLAGS,	/* flags */
1031			   .strtoul   = STUL_STRARRAY_FLAGS,
1032			   .parm      = &strarray__mmap_flags, },
1033		   [5] = { .scnprintf = SCA_HEX,	/* offset */ }, }, },
1034	{ .name	    = "mount",
1035	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
1036		   [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
1037			   .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
1038	{ .name	    = "move_mount",
1039	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* from_dfd */ },
1040		   [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ },
1041		   [2] = { .scnprintf = SCA_FDAT,	/* to_dfd */ },
1042		   [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ },
1043		   [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, },
1044	{ .name	    = "mprotect",
1045	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1046		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ }, }, },
1047	{ .name	    = "mq_unlink",
1048	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
1049	{ .name	    = "mremap",	    .hexret = true,
1050	  .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
1051	{ .name	    = "name_to_handle_at",
1052	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1053	{ .name	    = "newfstatat",
1054	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1055	{ .name	    = "open",
1056	  .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1057	{ .name	    = "open_by_handle_at",
1058	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1059		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1060	{ .name	    = "openat",
1061	  .arg = { [0] = { .scnprintf = SCA_FDAT,	/* dfd */ },
1062		   [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
1063	{ .name	    = "perf_event_open",
1064	  .arg = { [0] = { .scnprintf = SCA_PERF_ATTR,  /* attr */ },
1065		   [2] = { .scnprintf = SCA_INT,	/* cpu */ },
1066		   [3] = { .scnprintf = SCA_FD,		/* group_fd */ },
1067		   [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
1068	{ .name	    = "pipe2",
1069	  .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, },
1070	{ .name	    = "pkey_alloc",
1071	  .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS,	/* access_rights */ }, }, },
1072	{ .name	    = "pkey_free",
1073	  .arg = { [0] = { .scnprintf = SCA_INT,	/* key */ }, }, },
1074	{ .name	    = "pkey_mprotect",
1075	  .arg = { [0] = { .scnprintf = SCA_HEX,	/* start */ },
1076		   [2] = { .scnprintf = SCA_MMAP_PROT,	/* prot */ },
1077		   [3] = { .scnprintf = SCA_INT,	/* pkey */ }, }, },
1078	{ .name	    = "poll", .timeout = true, },
1079	{ .name	    = "ppoll", .timeout = true, },
1080	{ .name	    = "prctl",
1081	  .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
1082			   .strtoul   = STUL_STRARRAY,
1083			   .parm      = &strarray__prctl_options, },
1084		   [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
1085		   [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
1086	{ .name	    = "pread", .alias = "pread64", },
1087	{ .name	    = "preadv", .alias = "pread", },
1088	{ .name	    = "prlimit64",
1089	  .arg = { [1] = STRARRAY(resource, rlimit_resources), }, },
1090	{ .name	    = "pwrite", .alias = "pwrite64", },
1091	{ .name	    = "readlinkat",
1092	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1093	{ .name	    = "recvfrom",
1094	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1095	{ .name	    = "recvmmsg",
1096	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1097	{ .name	    = "recvmsg",
1098	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1099	{ .name	    = "renameat",
1100	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1101		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
1102	{ .name	    = "renameat2",
1103	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
1104		   [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
1105		   [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
1106	{ .name	    = "rt_sigaction",
1107	  .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1108	{ .name	    = "rt_sigprocmask",
1109	  .arg = { [0] = STRARRAY(how, sighow), }, },
1110	{ .name	    = "rt_sigqueueinfo",
1111	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1112	{ .name	    = "rt_tgsigqueueinfo",
1113	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1114	{ .name	    = "sched_setscheduler",
1115	  .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, },
1116	{ .name	    = "seccomp",
1117	  .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP,	   /* op */ },
1118		   [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, },
1119	{ .name	    = "select", .timeout = true, },
1120	{ .name	    = "sendfile", .alias = "sendfile64", },
1121	{ .name	    = "sendmmsg",
1122	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1123	{ .name	    = "sendmsg",
1124	  .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
1125	{ .name	    = "sendto",
1126	  .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ },
1127		   [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, },
1128	{ .name	    = "set_tid_address", .errpid = true, },
1129	{ .name	    = "setitimer",
1130	  .arg = { [0] = STRARRAY(which, itimers), }, },
1131	{ .name	    = "setrlimit",
1132	  .arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
1133	{ .name	    = "setsockopt",
1134	  .arg = { [1] = STRARRAY(level, socket_level), }, },
1135	{ .name	    = "socket",
1136	  .arg = { [0] = STRARRAY(family, socket_families),
1137		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1138		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1139	{ .name	    = "socketpair",
1140	  .arg = { [0] = STRARRAY(family, socket_families),
1141		   [1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
1142		   [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, },
1143	{ .name	    = "stat", .alias = "newstat", },
1144	{ .name	    = "statx",
1145	  .arg = { [0] = { .scnprintf = SCA_FDAT,	 /* fdat */ },
1146		   [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } ,
1147		   [3] = { .scnprintf = SCA_STATX_MASK,	 /* mask */ }, }, },
1148	{ .name	    = "swapoff",
1149	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1150	{ .name	    = "swapon",
1151	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, },
1152	{ .name	    = "symlinkat",
1153	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1154	{ .name	    = "sync_file_range",
1155	  .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, },
1156	{ .name	    = "tgkill",
1157	  .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1158	{ .name	    = "tkill",
1159	  .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
1160	{ .name     = "umount2", .alias = "umount",
1161	  .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
1162	{ .name	    = "uname", .alias = "newuname", },
1163	{ .name	    = "unlinkat",
1164	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
1165	{ .name	    = "utimensat",
1166	  .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, },
1167	{ .name	    = "wait4",	    .errpid = true,
1168	  .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
1169	{ .name	    = "waitid",	    .errpid = true,
1170	  .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
 
 
 
 
 
 
 
1171};
1172
1173static int syscall_fmt__cmp(const void *name, const void *fmtp)
1174{
1175	const struct syscall_fmt *fmt = fmtp;
1176	return strcmp(name, fmt->name);
1177}
1178
1179static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name)
1180{
1181	return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
1182}
1183
1184static struct syscall_fmt *syscall_fmt__find(const char *name)
1185{
1186	const int nmemb = ARRAY_SIZE(syscall_fmts);
1187	return __syscall_fmt__find(syscall_fmts, nmemb, name);
1188}
1189
1190static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias)
1191{
1192	int i;
1193
1194	for (i = 0; i < nmemb; ++i) {
1195		if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0)
1196			return &fmts[i];
1197	}
1198
1199	return NULL;
1200}
1201
1202static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
1203{
1204	const int nmemb = ARRAY_SIZE(syscall_fmts);
1205	return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias);
1206}
1207
1208/*
1209 * is_exit: is this "exit" or "exit_group"?
1210 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
1211 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
1212 * nonexistent: Just a hole in the syscall table, syscall id not allocated
1213 */
1214struct syscall {
1215	struct tep_event    *tp_format;
1216	int		    nr_args;
1217	int		    args_size;
1218	struct {
1219		struct bpf_program *sys_enter,
1220				   *sys_exit;
1221	}		    bpf_prog;
1222	bool		    is_exit;
1223	bool		    is_open;
1224	bool		    nonexistent;
1225	struct tep_format_field *args;
1226	const char	    *name;
1227	struct syscall_fmt  *fmt;
1228	struct syscall_arg_fmt *arg_fmt;
 
1229};
1230
1231/*
1232 * We need to have this 'calculated' boolean because in some cases we really
1233 * don't know what is the duration of a syscall, for instance, when we start
1234 * a session and some threads are waiting for a syscall to finish, say 'poll',
1235 * in which case all we can do is to print "( ? ) for duration and for the
1236 * start timestamp.
1237 */
1238static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
1239{
1240	double duration = (double)t / NSEC_PER_MSEC;
1241	size_t printed = fprintf(fp, "(");
1242
1243	if (!calculated)
1244		printed += fprintf(fp, "         ");
1245	else if (duration >= 1.0)
1246		printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
1247	else if (duration >= 0.01)
1248		printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
1249	else
1250		printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
1251	return printed + fprintf(fp, "): ");
1252}
1253
1254/**
1255 * filename.ptr: The filename char pointer that will be vfs_getname'd
1256 * filename.entry_str_pos: Where to insert the string translated from
1257 *                         filename.ptr by the vfs_getname tracepoint/kprobe.
1258 * ret_scnprintf: syscall args may set this to a different syscall return
1259 *                formatter, for instance, fcntl may return fds, file flags, etc.
1260 */
1261struct thread_trace {
1262	u64		  entry_time;
 
1263	bool		  entry_pending;
1264	unsigned long	  nr_events;
1265	unsigned long	  pfmaj, pfmin;
1266	char		  *entry_str;
1267	double		  runtime_ms;
1268	size_t		  (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
1269        struct {
1270		unsigned long ptr;
1271		short int     entry_str_pos;
1272		bool	      pending_open;
1273		unsigned int  namelen;
1274		char	      *name;
1275	} filename;
1276	struct {
1277		int	      max;
1278		struct file   *table;
1279	} files;
1280
1281	struct intlist *syscall_stats;
1282};
1283
1284static struct thread_trace *thread_trace__new(void)
1285{
1286	struct thread_trace *ttrace =  zalloc(sizeof(struct thread_trace));
1287
1288	if (ttrace) {
1289		ttrace->files.max = -1;
1290		ttrace->syscall_stats = intlist__new(NULL);
1291	}
1292
1293	return ttrace;
1294}
1295
1296static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
1297{
1298	struct thread_trace *ttrace;
1299
1300	if (thread == NULL)
1301		goto fail;
1302
1303	if (thread__priv(thread) == NULL)
1304		thread__set_priv(thread, thread_trace__new());
1305
1306	if (thread__priv(thread) == NULL)
1307		goto fail;
1308
1309	ttrace = thread__priv(thread);
1310	++ttrace->nr_events;
1311
1312	return ttrace;
1313fail:
1314	color_fprintf(fp, PERF_COLOR_RED,
1315		      "WARNING: not enough memory, dropping samples!\n");
1316	return NULL;
1317}
1318
1319
1320void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
1321				    size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg))
1322{
1323	struct thread_trace *ttrace = thread__priv(arg->thread);
1324
1325	ttrace->ret_scnprintf = ret_scnprintf;
1326}
1327
1328#define TRACE_PFMAJ		(1 << 0)
1329#define TRACE_PFMIN		(1 << 1)
1330
1331static const size_t trace__entry_str_size = 2048;
1332
1333static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334{
1335	if (fd < 0)
1336		return NULL;
1337
1338	if (fd > ttrace->files.max) {
1339		struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
1340
1341		if (nfiles == NULL)
1342			return NULL;
1343
1344		if (ttrace->files.max != -1) {
1345			memset(nfiles + ttrace->files.max + 1, 0,
1346			       (fd - ttrace->files.max) * sizeof(struct file));
1347		} else {
1348			memset(nfiles, 0, (fd + 1) * sizeof(struct file));
1349		}
1350
1351		ttrace->files.table = nfiles;
1352		ttrace->files.max   = fd;
1353	}
1354
1355	return ttrace->files.table + fd;
1356}
1357
1358struct file *thread__files_entry(struct thread *thread, int fd)
1359{
1360	return thread_trace__files_entry(thread__priv(thread), fd);
1361}
1362
1363static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
1364{
1365	struct thread_trace *ttrace = thread__priv(thread);
1366	struct file *file = thread_trace__files_entry(ttrace, fd);
1367
1368	if (file != NULL) {
1369		struct stat st;
1370		if (stat(pathname, &st) == 0)
1371			file->dev_maj = major(st.st_rdev);
1372		file->pathname = strdup(pathname);
1373		if (file->pathname)
1374			return 0;
1375	}
1376
1377	return -1;
1378}
1379
1380static int thread__read_fd_path(struct thread *thread, int fd)
1381{
1382	char linkname[PATH_MAX], pathname[PATH_MAX];
1383	struct stat st;
1384	int ret;
1385
1386	if (thread->pid_ == thread->tid) {
1387		scnprintf(linkname, sizeof(linkname),
1388			  "/proc/%d/fd/%d", thread->pid_, fd);
1389	} else {
1390		scnprintf(linkname, sizeof(linkname),
1391			  "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
1392	}
1393
1394	if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
1395		return -1;
1396
1397	ret = readlink(linkname, pathname, sizeof(pathname));
1398
1399	if (ret < 0 || ret > st.st_size)
1400		return -1;
1401
1402	pathname[ret] = '\0';
1403	return trace__set_fd_pathname(thread, fd, pathname);
1404}
1405
1406static const char *thread__fd_path(struct thread *thread, int fd,
1407				   struct trace *trace)
1408{
1409	struct thread_trace *ttrace = thread__priv(thread);
1410
1411	if (ttrace == NULL || trace->fd_path_disabled)
1412		return NULL;
1413
1414	if (fd < 0)
1415		return NULL;
1416
1417	if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
1418		if (!trace->live)
1419			return NULL;
1420		++trace->stats.proc_getname;
1421		if (thread__read_fd_path(thread, fd))
1422			return NULL;
1423	}
1424
1425	return ttrace->files.table[fd].pathname;
1426}
1427
1428size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
 
1429{
1430	int fd = arg->val;
1431	size_t printed = scnprintf(bf, size, "%d", fd);
1432	const char *path = thread__fd_path(arg->thread, fd, arg->trace);
1433
1434	if (path)
1435		printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1436
1437	return printed;
1438}
1439
1440size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
1441{
1442        size_t printed = scnprintf(bf, size, "%d", fd);
1443	struct thread *thread = machine__find_thread(trace->host, pid, pid);
1444
1445	if (thread) {
1446		const char *path = thread__fd_path(thread, fd, trace);
1447
1448		if (path)
1449			printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1450
1451		thread__put(thread);
1452	}
1453
1454        return printed;
1455}
1456
1457static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
1458					      struct syscall_arg *arg)
1459{
1460	int fd = arg->val;
1461	size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
1462	struct thread_trace *ttrace = thread__priv(arg->thread);
1463
1464	if (ttrace && fd >= 0 && fd <= ttrace->files.max)
1465		zfree(&ttrace->files.table[fd].pathname);
1466
1467	return printed;
1468}
1469
1470static void thread__set_filename_pos(struct thread *thread, const char *bf,
1471				     unsigned long ptr)
1472{
1473	struct thread_trace *ttrace = thread__priv(thread);
1474
1475	ttrace->filename.ptr = ptr;
1476	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1477}
1478
1479static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size)
1480{
1481	struct augmented_arg *augmented_arg = arg->augmented.args;
1482	size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
1483	/*
1484	 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls
1485	 * we would have two strings, each prefixed by its size.
1486	 */
1487	int consumed = sizeof(*augmented_arg) + augmented_arg->size;
1488
1489	arg->augmented.args = ((void *)arg->augmented.args) + consumed;
1490	arg->augmented.size -= consumed;
1491
1492	return printed;
1493}
1494
1495static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1496					      struct syscall_arg *arg)
1497{
1498	unsigned long ptr = arg->val;
1499
1500	if (arg->augmented.args)
1501		return syscall_arg__scnprintf_augmented_string(arg, bf, size);
1502
1503	if (!arg->trace->vfs_getname)
1504		return scnprintf(bf, size, "%#x", ptr);
1505
1506	thread__set_filename_pos(arg->thread, bf, ptr);
1507	return 0;
1508}
1509
1510static bool trace__filter_duration(struct trace *trace, double t)
1511{
1512	return t < (trace->duration_filter * NSEC_PER_MSEC);
1513}
1514
1515static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1516{
1517	double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1518
1519	return fprintf(fp, "%10.3f ", ts);
1520}
1521
1522/*
1523 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
1524 * using ttrace->entry_time for a thread that receives a sys_exit without
1525 * first having received a sys_enter ("poll" issued before tracing session
1526 * starts, lost sys_enter exit due to ring buffer overflow).
1527 */
1528static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1529{
1530	if (tstamp > 0)
1531		return __trace__fprintf_tstamp(trace, tstamp, fp);
1532
1533	return fprintf(fp, "         ? ");
1534}
1535
1536static pid_t workload_pid = -1;
1537static volatile sig_atomic_t done = false;
1538static volatile sig_atomic_t interrupted = false;
1539
1540static void sighandler_interrupt(int sig __maybe_unused)
1541{
1542	done = interrupted = true;
 
1543}
1544
1545static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
1546			    void *context __maybe_unused)
1547{
1548	if (info->si_pid == workload_pid)
1549		done = true;
1550}
1551
1552static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
1553{
1554	size_t printed = 0;
1555
1556	if (trace->multiple_threads) {
1557		if (trace->show_comm)
1558			printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1559		printed += fprintf(fp, "%d ", thread->tid);
1560	}
1561
1562	return printed;
1563}
1564
1565static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1566					u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
1567{
1568	size_t printed = 0;
1569
1570	if (trace->show_tstamp)
1571		printed = trace__fprintf_tstamp(trace, tstamp, fp);
1572	if (trace->show_duration)
1573		printed += fprintf_duration(duration, duration_calculated, fp);
1574	return printed + trace__fprintf_comm_tid(trace, thread, fp);
1575}
1576
1577static int trace__process_event(struct trace *trace, struct machine *machine,
1578				union perf_event *event, struct perf_sample *sample)
1579{
1580	int ret = 0;
1581
1582	switch (event->header.type) {
1583	case PERF_RECORD_LOST:
1584		color_fprintf(trace->output, PERF_COLOR_RED,
1585			      "LOST %" PRIu64 " events!\n", event->lost.lost);
1586		ret = machine__process_lost_event(machine, event, sample);
1587		break;
1588	default:
1589		ret = machine__process_event(machine, event, sample);
1590		break;
1591	}
1592
1593	return ret;
1594}
1595
1596static int trace__tool_process(struct perf_tool *tool,
1597			       union perf_event *event,
1598			       struct perf_sample *sample,
1599			       struct machine *machine)
1600{
1601	struct trace *trace = container_of(tool, struct trace, tool);
1602	return trace__process_event(trace, machine, event, sample);
1603}
1604
1605static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1606{
1607	struct machine *machine = vmachine;
1608
1609	if (machine->kptr_restrict_warned)
1610		return NULL;
1611
1612	if (symbol_conf.kptr_restrict) {
1613		pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1614			   "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1615			   "Kernel samples will not be resolved.\n");
1616		machine->kptr_restrict_warned = true;
1617		return NULL;
1618	}
1619
1620	return machine__resolve_kernel_addr(vmachine, addrp, modp);
1621}
1622
1623static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
1624{
1625	int err = symbol__init(NULL);
1626
1627	if (err)
1628		return err;
1629
1630	trace->host = machine__new_host();
1631	if (trace->host == NULL)
1632		return -ENOMEM;
1633
1634	err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1635	if (err < 0)
1636		goto out;
1637
1638	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1639					    evlist->core.threads, trace__tool_process,
1640					    true, false, 1);
1641out:
1642	if (err)
1643		symbol__exit();
1644
1645	return err;
1646}
1647
1648static void trace__symbols__exit(struct trace *trace)
1649{
1650	machine__exit(trace->host);
1651	trace->host = NULL;
1652
1653	symbol__exit();
1654}
 
1655
1656static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
1657{
1658	int idx;
1659
1660	if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
1661		nr_args = sc->fmt->nr_args;
1662
1663	sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
1664	if (sc->arg_fmt == NULL)
1665		return -1;
1666
1667	for (idx = 0; idx < nr_args; ++idx) {
1668		if (sc->fmt)
1669			sc->arg_fmt[idx] = sc->fmt->arg[idx];
1670	}
1671
1672	sc->nr_args = nr_args;
1673	return 0;
1674}
1675
1676static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = {
1677	{ .name = "msr",	.scnprintf = SCA_X86_MSR,	  .strtoul = STUL_X86_MSR,	   },
1678	{ .name = "vector",	.scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, },
1679};
1680
1681static int syscall_arg_fmt__cmp(const void *name, const void *fmtp)
1682{
1683       const struct syscall_arg_fmt *fmt = fmtp;
1684       return strcmp(name, fmt->name);
1685}
1686
1687static struct syscall_arg_fmt *
1688__syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name)
1689{
1690       return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp);
1691}
1692
1693static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name)
1694{
1695       const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name);
1696       return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name);
1697}
1698
1699static struct tep_format_field *
1700syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field)
1701{
1702	struct tep_format_field *last_field = NULL;
1703	int len;
1704
1705	for (; field; field = field->next, ++arg) {
1706		last_field = field;
1707
1708		if (arg->scnprintf)
1709			continue;
1710
1711		len = strlen(field->name);
1712
1713		if (strcmp(field->type, "const char *") == 0 &&
1714		    ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) ||
1715		     strstr(field->name, "path") != NULL))
1716			arg->scnprintf = SCA_FILENAME;
1717		else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
1718			arg->scnprintf = SCA_PTR;
1719		else if (strcmp(field->type, "pid_t") == 0)
1720			arg->scnprintf = SCA_PID;
1721		else if (strcmp(field->type, "umode_t") == 0)
1722			arg->scnprintf = SCA_MODE_T;
1723		else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) {
1724			arg->scnprintf = SCA_CHAR_ARRAY;
1725			arg->nr_entries = field->arraylen;
1726		} else if ((strcmp(field->type, "int") == 0 ||
1727			  strcmp(field->type, "unsigned int") == 0 ||
1728			  strcmp(field->type, "long") == 0) &&
1729			 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
1730			/*
1731			 * /sys/kernel/tracing/events/syscalls/sys_enter*
1732			 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1733			 * 65 int
1734			 * 23 unsigned int
1735			 * 7 unsigned long
1736			 */
1737			arg->scnprintf = SCA_FD;
1738               } else {
1739			struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name);
1740
1741			if (fmt) {
1742				arg->scnprintf = fmt->scnprintf;
1743				arg->strtoul   = fmt->strtoul;
1744			}
1745		}
1746	}
1747
1748	return last_field;
1749}
1750
1751static int syscall__set_arg_fmts(struct syscall *sc)
1752{
1753	struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args);
1754
1755	if (last_field)
1756		sc->args_size = last_field->offset + last_field->size;
1757
1758	return 0;
1759}
1760
1761static int trace__read_syscall_info(struct trace *trace, int id)
1762{
1763	char tp_name[128];
1764	struct syscall *sc;
1765	const char *name = syscalltbl__name(trace->sctbl, id);
1766
1767#ifdef HAVE_SYSCALL_TABLE_SUPPORT
1768	if (trace->syscalls.table == NULL) {
1769		trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
1770		if (trace->syscalls.table == NULL)
1771			return -ENOMEM;
1772	}
1773#else
1774	if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
1775		// When using libaudit we don't know beforehand what is the max syscall id
1776		struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1777
1778		if (table == NULL)
1779			return -ENOMEM;
1780
1781		// Need to memset from offset 0 and +1 members if brand new
1782		if (trace->syscalls.table == NULL)
1783			memset(table, 0, (id + 1) * sizeof(*sc));
1784		else
1785			memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
1786
1787		trace->syscalls.table	      = table;
1788		trace->sctbl->syscalls.max_id = id;
1789	}
1790#endif
1791	sc = trace->syscalls.table + id;
1792	if (sc->nonexistent)
1793		return -EEXIST;
1794
1795	if (name == NULL) {
1796		sc->nonexistent = true;
1797		return -EEXIST;
1798	}
1799
1800	sc->name = name;
1801	sc->fmt  = syscall_fmt__find(sc->name);
1802
1803	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1804	sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1805
1806	if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1807		snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1808		sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1809	}
1810
1811	/*
1812	 * Fails to read trace point format via sysfs node, so the trace point
1813	 * doesn't exist.  Set the 'nonexistent' flag as true.
1814	 */
1815	if (IS_ERR(sc->tp_format)) {
1816		sc->nonexistent = true;
1817		return PTR_ERR(sc->tp_format);
1818	}
1819
1820	if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
1821					RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
1822		return -ENOMEM;
1823
1824	sc->args = sc->tp_format->format.fields;
 
1825	/*
1826	 * We need to check and discard the first variable '__syscall_nr'
1827	 * or 'nr' that mean the syscall number. It is needless here.
1828	 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1829	 */
1830	if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1831		sc->args = sc->args->next;
1832		--sc->nr_args;
1833	}
1834
1835	sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1836	sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
1837
1838	return syscall__set_arg_fmts(sc);
1839}
1840
1841static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
1842{
1843	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
1844
1845	if (fmt != NULL) {
1846		syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields);
1847		return 0;
1848	}
1849
1850	return -ENOMEM;
1851}
1852
1853static int intcmp(const void *a, const void *b)
1854{
1855	const int *one = a, *another = b;
1856
1857	return *one - *another;
1858}
1859
1860static int trace__validate_ev_qualifier(struct trace *trace)
1861{
1862	int err = 0;
1863	bool printed_invalid_prefix = false;
1864	struct str_node *pos;
1865	size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier);
1866
1867	trace->ev_qualifier_ids.entries = malloc(nr_allocated *
 
1868						 sizeof(trace->ev_qualifier_ids.entries[0]));
1869
1870	if (trace->ev_qualifier_ids.entries == NULL) {
1871		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1872		       trace->output);
1873		err = -EINVAL;
1874		goto out;
1875	}
1876
1877	strlist__for_each_entry(pos, trace->ev_qualifier) {
 
 
1878		const char *sc = pos->s;
1879		int id = syscalltbl__id(trace->sctbl, sc), match_next = -1;
1880
1881		if (id < 0) {
1882			id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next);
1883			if (id >= 0)
1884				goto matches;
1885
1886			if (!printed_invalid_prefix) {
1887				pr_debug("Skipping unknown syscalls: ");
1888				printed_invalid_prefix = true;
1889			} else {
1890				pr_debug(", ");
1891			}
1892
1893			pr_debug("%s", sc);
1894			continue;
1895		}
1896matches:
1897		trace->ev_qualifier_ids.entries[nr_used++] = id;
1898		if (match_next == -1)
1899			continue;
1900
1901		while (1) {
1902			id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next);
1903			if (id < 0)
1904				break;
1905			if (nr_allocated == nr_used) {
1906				void *entries;
1907
1908				nr_allocated += 8;
1909				entries = realloc(trace->ev_qualifier_ids.entries,
1910						  nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0]));
1911				if (entries == NULL) {
1912					err = -ENOMEM;
1913					fputs("\nError:\t Not enough memory for parsing\n", trace->output);
1914					goto out_free;
1915				}
1916				trace->ev_qualifier_ids.entries = entries;
1917			}
1918			trace->ev_qualifier_ids.entries[nr_used++] = id;
1919		}
 
 
1920	}
1921
1922	trace->ev_qualifier_ids.nr = nr_used;
1923	qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp);
 
 
 
 
1924out:
1925	if (printed_invalid_prefix)
1926		pr_debug("\n");
1927	return err;
1928out_free:
1929	zfree(&trace->ev_qualifier_ids.entries);
1930	trace->ev_qualifier_ids.nr = 0;
1931	goto out;
1932}
1933
1934static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id)
1935{
1936	bool in_ev_qualifier;
1937
1938	if (trace->ev_qualifier_ids.nr == 0)
1939		return true;
1940
1941	in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries,
1942				  trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL;
1943
1944	if (in_ev_qualifier)
1945	       return !trace->not_ev_qualifier;
1946
1947	return trace->not_ev_qualifier;
1948}
1949
1950/*
1951 * args is to be interpreted as a series of longs but we need to handle
1952 * 8-byte unaligned accesses. args points to raw_data within the event
1953 * and raw_data is guaranteed to be 8-byte unaligned because it is
1954 * preceded by raw_size which is a u32. So we need to copy args to a temp
1955 * variable to read it. Most notably this avoids extended load instructions
1956 * on unaligned addresses
1957 */
1958unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx)
1959{
1960	unsigned long val;
1961	unsigned char *p = arg->args + sizeof(unsigned long) * idx;
1962
1963	memcpy(&val, p, sizeof(val));
1964	return val;
1965}
1966
1967static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
1968				      struct syscall_arg *arg)
1969{
1970	if (sc->arg_fmt && sc->arg_fmt[arg->idx].name)
1971		return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name);
1972
1973	return scnprintf(bf, size, "arg%d: ", arg->idx);
1974}
1975
1976/*
1977 * Check if the value is in fact zero, i.e. mask whatever needs masking, such
1978 * as mount 'flags' argument that needs ignoring some magic flag, see comment
1979 * in tools/perf/trace/beauty/mount_flags.c
1980 */
1981static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val)
1982{
1983	if (fmt && fmt->mask_val)
1984		return fmt->mask_val(arg, val);
1985
1986	return val;
1987}
1988
1989static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size,
1990					     struct syscall_arg *arg, unsigned long val)
1991{
1992	if (fmt && fmt->scnprintf) {
1993		arg->val = val;
1994		if (fmt->parm)
1995			arg->parm = fmt->parm;
1996		return fmt->scnprintf(bf, size, arg);
1997	}
1998	return scnprintf(bf, size, "%ld", val);
1999}
2000
2001static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
2002				      unsigned char *args, void *augmented_args, int augmented_args_size,
2003				      struct trace *trace, struct thread *thread)
2004{
2005	size_t printed = 0;
 
2006	unsigned long val;
2007	u8 bit = 1;
2008	struct syscall_arg arg = {
2009		.args	= args,
2010		.augmented = {
2011			.size = augmented_args_size,
2012			.args = augmented_args,
2013		},
2014		.idx	= 0,
2015		.mask	= 0,
2016		.trace  = trace,
2017		.thread = thread,
2018		.show_string_prefix = trace->show_string_prefix,
2019	};
2020	struct thread_trace *ttrace = thread__priv(thread);
2021
2022	/*
2023	 * Things like fcntl will set this in its 'cmd' formatter to pick the
2024	 * right formatter for the return value (an fd? file flags?), which is
2025	 * not needed for syscalls that always return a given type, say an fd.
2026	 */
2027	ttrace->ret_scnprintf = NULL;
2028
2029	if (sc->args != NULL) {
2030		struct tep_format_field *field;
 
 
 
 
 
 
 
2031
2032		for (field = sc->args; field;
2033		     field = field->next, ++arg.idx, bit <<= 1) {
2034			if (arg.mask & bit)
2035				continue;
2036
2037			arg.fmt = &sc->arg_fmt[arg.idx];
2038			val = syscall_arg__val(&arg, arg.idx);
2039			/*
2040			 * Some syscall args need some mask, most don't and
2041			 * return val untouched.
2042			 */
2043			val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val);
2044
2045			/*
2046 			 * Suppress this argument if its value is zero and
2047 			 * and we don't have a string associated in an
2048 			 * strarray for it.
2049 			 */
2050			if (val == 0 &&
2051			    !trace->show_zeros &&
2052			    !(sc->arg_fmt &&
2053			      (sc->arg_fmt[arg.idx].show_zero ||
2054			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
2055			       sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) &&
2056			      sc->arg_fmt[arg.idx].parm))
2057				continue;
2058
2059			printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2060
2061			if (trace->show_arg_names)
2062				printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
 
 
 
 
 
 
 
 
 
 
 
2063
2064			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx],
2065								  bf + printed, size - printed, &arg, val);
2066		}
2067	} else if (IS_ERR(sc->tp_format)) {
2068		/*
2069		 * If we managed to read the tracepoint /format file, then we
2070		 * may end up not having any args, like with gettid(), so only
2071		 * print the raw args when we didn't manage to read it.
2072		 */
2073		while (arg.idx < sc->nr_args) {
2074			if (arg.mask & bit)
2075				goto next_arg;
2076			val = syscall_arg__val(&arg, arg.idx);
2077			if (printed)
2078				printed += scnprintf(bf + printed, size - printed, ", ");
2079			printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg);
2080			printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val);
2081next_arg:
2082			++arg.idx;
2083			bit <<= 1;
2084		}
2085	}
2086
2087	return printed;
2088}
2089
2090typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2091				  union perf_event *event,
2092				  struct perf_sample *sample);
2093
2094static struct syscall *trace__syscall_info(struct trace *trace,
2095					   struct evsel *evsel, int id)
2096{
2097	int err = 0;
2098
2099	if (id < 0) {
2100
2101		/*
2102		 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
2103		 * before that, leaving at a higher verbosity level till that is
2104		 * explained. Reproduced with plain ftrace with:
2105		 *
2106		 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
2107		 * grep "NR -1 " /t/trace_pipe
2108		 *
2109		 * After generating some load on the machine.
2110 		 */
2111		if (verbose > 1) {
2112			static u64 n;
2113			fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2114				id, evsel__name(evsel), ++n);
2115		}
2116		return NULL;
2117	}
2118
2119	err = -EINVAL;
2120
2121#ifdef HAVE_SYSCALL_TABLE_SUPPORT
2122	if (id > trace->sctbl->syscalls.max_id) {
2123#else
2124	if (id >= trace->sctbl->syscalls.max_id) {
2125		/*
2126		 * With libaudit we don't know beforehand what is the max_id,
2127		 * so we let trace__read_syscall_info() figure that out as we
2128		 * go on reading syscalls.
2129		 */
2130		err = trace__read_syscall_info(trace, id);
2131		if (err)
2132#endif
2133		goto out_cant_read;
2134	}
2135
2136	if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2137	    (err = trace__read_syscall_info(trace, id)) != 0)
2138		goto out_cant_read;
2139
2140	if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2141		goto out_cant_read;
2142
2143	return &trace->syscalls.table[id];
2144
2145out_cant_read:
2146	if (verbose > 0) {
2147		char sbuf[STRERR_BUFSIZE];
2148		fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf)));
2149		if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2150			fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2151		fputs(" information\n", trace->output);
2152	}
2153	return NULL;
2154}
2155
2156struct syscall_stats {
2157	struct stats stats;
2158	u64	     nr_failures;
2159	int	     max_errno;
2160	u32	     *errnos;
2161};
2162
2163static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace,
2164				 int id, struct perf_sample *sample, long err, bool errno_summary)
2165{
2166	struct int_node *inode;
2167	struct syscall_stats *stats;
2168	u64 duration = 0;
2169
2170	inode = intlist__findnew(ttrace->syscall_stats, id);
2171	if (inode == NULL)
2172		return;
2173
2174	stats = inode->priv;
2175	if (stats == NULL) {
2176		stats = zalloc(sizeof(*stats));
2177		if (stats == NULL)
2178			return;
2179
2180		init_stats(&stats->stats);
2181		inode->priv = stats;
2182	}
2183
2184	if (ttrace->entry_time && sample->time > ttrace->entry_time)
2185		duration = sample->time - ttrace->entry_time;
2186
2187	update_stats(&stats->stats, duration);
2188
2189	if (err < 0) {
2190		++stats->nr_failures;
2191
2192		if (!errno_summary)
2193			return;
2194
2195		err = -err;
2196		if (err > stats->max_errno) {
2197			u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32));
2198
2199			if (new_errnos) {
2200				memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32));
2201			} else {
2202				pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n",
2203					 thread__comm_str(thread), thread->pid_, thread->tid);
2204				return;
2205			}
2206
2207			stats->errnos = new_errnos;
2208			stats->max_errno = err;
2209		}
2210
2211		++stats->errnos[err - 1];
2212	}
2213}
2214
2215static int trace__printf_interrupted_entry(struct trace *trace)
2216{
2217	struct thread_trace *ttrace;
 
2218	size_t printed;
2219	int len;
2220
2221	if (trace->failure_only || trace->current == NULL)
2222		return 0;
2223
2224	ttrace = thread__priv(trace->current);
2225
2226	if (!ttrace->entry_pending)
2227		return 0;
2228
2229	printed  = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
2230	printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2231
2232	if (len < trace->args_alignment - 4)
2233		printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2234
2235	printed += fprintf(trace->output, " ...\n");
2236
 
 
2237	ttrace->entry_pending = false;
2238	++trace->nr_events_printed;
2239
2240	return printed;
2241}
2242
2243static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
2244				 struct perf_sample *sample, struct thread *thread)
2245{
2246	int printed = 0;
2247
2248	if (trace->print_sample) {
2249		double ts = (double)sample->time / NSEC_PER_MSEC;
2250
2251		printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2252				   evsel__name(evsel), ts,
2253				   thread__comm_str(thread),
2254				   sample->pid, sample->tid, sample->cpu);
2255	}
2256
2257	return printed;
2258}
2259
2260static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
2261{
2262	void *augmented_args = NULL;
2263	/*
2264	 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
2265	 * and there we get all 6 syscall args plus the tracepoint common fields
2266	 * that gets calculated at the start and the syscall_nr (another long).
2267	 * So we check if that is the case and if so don't look after the
2268	 * sc->args_size but always after the full raw_syscalls:sys_enter payload,
2269	 * which is fixed.
2270	 *
2271	 * We'll revisit this later to pass s->args_size to the BPF augmenter
2272	 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
2273	 * copies only what we need for each syscall, like what happens when we
2274	 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
2275	 * traffic to just what is needed for each syscall.
2276	 */
2277	int args_size = raw_augmented_args_size ?: sc->args_size;
2278
2279	*augmented_args_size = sample->raw_size - args_size;
2280	if (*augmented_args_size > 0)
2281		augmented_args = sample->raw_data + args_size;
2282
2283	return augmented_args;
2284}
2285
2286static void syscall__exit(struct syscall *sc)
2287{
2288	if (!sc)
2289		return;
2290
2291	free(sc->arg_fmt);
2292}
2293
2294static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
2295			    union perf_event *event __maybe_unused,
2296			    struct perf_sample *sample)
2297{
2298	char *msg;
2299	void *args;
2300	int printed = 0;
2301	struct thread *thread;
2302	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2303	int augmented_args_size = 0;
2304	void *augmented_args = NULL;
2305	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2306	struct thread_trace *ttrace;
2307
2308	if (sc == NULL)
2309		return -1;
2310
2311	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2312	ttrace = thread__trace(thread, trace->output);
2313	if (ttrace == NULL)
2314		goto out_put;
2315
2316	trace__fprintf_sample(trace, evsel, sample, thread);
2317
2318	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2319
2320	if (ttrace->entry_str == NULL) {
2321		ttrace->entry_str = malloc(trace__entry_str_size);
2322		if (!ttrace->entry_str)
2323			goto out_put;
2324	}
2325
2326	if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2327		trace__printf_interrupted_entry(trace);
2328	/*
2329	 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible
2330	 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments
2331	 * this breaks syscall__augmented_args() check for augmented args, as we calculate
2332	 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file,
2333	 * so when handling, say the openat syscall, we end up getting 6 args for the
2334	 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly
2335	 * thinking that the extra 2 u64 args are the augmented filename, so just check
2336	 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
2337	 */
2338	if (evsel != trace->syscalls.events.sys_enter)
2339		augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2340	ttrace->entry_time = sample->time;
2341	msg = ttrace->entry_str;
2342	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
2343
2344	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
2345					   args, augmented_args, augmented_args_size, trace, thread);
2346
2347	if (sc->is_exit) {
2348		if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2349			int alignment = 0;
2350
2351			trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2352			printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2353			if (trace->args_alignment > printed)
2354				alignment = trace->args_alignment - printed;
2355			fprintf(trace->output, "%*s= ?\n", alignment, " ");
2356		}
2357	} else {
2358		ttrace->entry_pending = true;
2359		/* See trace__vfs_getname & trace__sys_exit */
2360		ttrace->filename.pending_open = false;
2361	}
2362
2363	if (trace->current != thread) {
2364		thread__put(trace->current);
2365		trace->current = thread__get(thread);
2366	}
2367	err = 0;
2368out_put:
2369	thread__put(thread);
2370	return err;
2371}
2372
2373static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel,
2374				    struct perf_sample *sample)
2375{
2376	struct thread_trace *ttrace;
2377	struct thread *thread;
2378	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
2379	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2380	char msg[1024];
2381	void *args, *augmented_args = NULL;
2382	int augmented_args_size;
2383
2384	if (sc == NULL)
2385		return -1;
2386
2387	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2388	ttrace = thread__trace(thread, trace->output);
2389	/*
2390	 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
2391	 * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
2392	 */
2393	if (ttrace == NULL)
2394		goto out_put;
2395
2396	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
2397	augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
2398	syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
2399	fprintf(trace->output, "%s", msg);
2400	err = 0;
2401out_put:
2402	thread__put(thread);
2403	return err;
2404}
2405
2406static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel,
2407				    struct perf_sample *sample,
2408				    struct callchain_cursor *cursor)
2409{
2410	struct addr_location al;
2411	int max_stack = evsel->core.attr.sample_max_stack ?
2412			evsel->core.attr.sample_max_stack :
2413			trace->max_stack;
2414	int err;
2415
2416	if (machine__resolve(trace->host, &al, sample) < 0)
2417		return -1;
2418
2419	err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
2420	addr_location__put(&al);
2421	return err;
2422}
2423
2424static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
2425{
2426	/* TODO: user-configurable print_opts */
2427	const unsigned int print_opts = EVSEL__PRINT_SYM |
2428				        EVSEL__PRINT_DSO |
2429				        EVSEL__PRINT_UNKNOWN_AS_ADDR;
2430
2431	return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output);
2432}
2433
2434static const char *errno_to_name(struct evsel *evsel, int err)
2435{
2436	struct perf_env *env = evsel__env(evsel);
2437	const char *arch_name = perf_env__arch(env);
2438
2439	return arch_syscalls__strerrno(arch_name, err);
2440}
2441
2442static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
2443			   union perf_event *event __maybe_unused,
2444			   struct perf_sample *sample)
2445{
2446	long ret;
2447	u64 duration = 0;
2448	bool duration_calculated = false;
2449	struct thread *thread;
2450	int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
2451	int alignment = trace->args_alignment;
2452	struct syscall *sc = trace__syscall_info(trace, evsel, id);
2453	struct thread_trace *ttrace;
2454
2455	if (sc == NULL)
2456		return -1;
2457
2458	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2459	ttrace = thread__trace(thread, trace->output);
2460	if (ttrace == NULL)
2461		goto out_put;
2462
2463	trace__fprintf_sample(trace, evsel, sample, thread);
 
2464
2465	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
2466
2467	if (trace->summary)
2468		thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2469
2470	if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2471		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
2472		ttrace->filename.pending_open = false;
2473		++trace->stats.vfs_getname;
2474	}
2475
 
 
2476	if (ttrace->entry_time) {
2477		duration = sample->time - ttrace->entry_time;
2478		if (trace__filter_duration(trace, duration))
2479			goto out;
2480		duration_calculated = true;
2481	} else if (trace->duration_filter)
2482		goto out;
2483
2484	if (sample->callchain) {
2485		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2486		if (callchain_ret == 0) {
2487			if (callchain_cursor.nr < trace->min_stack)
2488				goto out;
2489			callchain_ret = 1;
2490		}
2491	}
2492
2493	if (trace->summary_only || (ret >= 0 && trace->failure_only))
2494		goto out;
2495
2496	trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
2497
2498	if (ttrace->entry_pending) {
2499		printed = fprintf(trace->output, "%s", ttrace->entry_str);
2500	} else {
2501		printed += fprintf(trace->output, " ... [");
2502		color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2503		printed += 9;
2504		printed += fprintf(trace->output, "]: %s()", sc->name);
2505	}
2506
2507	printed++; /* the closing ')' */
2508
2509	if (alignment > printed)
2510		alignment -= printed;
2511	else
2512		alignment = 0;
2513
2514	fprintf(trace->output, ")%*s= ", alignment, " ");
2515
2516	if (sc->fmt == NULL) {
2517		if (ret < 0)
2518			goto errno_print;
2519signed_print:
2520		fprintf(trace->output, "%ld", ret);
2521	} else if (ret < 0) {
2522errno_print: {
2523		char bf[STRERR_BUFSIZE];
2524		const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
2525			   *e = errno_to_name(evsel, -ret);
2526
2527		fprintf(trace->output, "-1 %s (%s)", e, emsg);
2528	}
2529	} else if (ret == 0 && sc->fmt->timeout)
2530		fprintf(trace->output, "0 (Timeout)");
2531	else if (ttrace->ret_scnprintf) {
2532		char bf[1024];
2533		struct syscall_arg arg = {
2534			.val	= ret,
2535			.thread	= thread,
2536			.trace	= trace,
2537		};
2538		ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
2539		ttrace->ret_scnprintf = NULL;
2540		fprintf(trace->output, "%s", bf);
2541	} else if (sc->fmt->hexret)
2542		fprintf(trace->output, "%#lx", ret);
2543	else if (sc->fmt->errpid) {
2544		struct thread *child = machine__find_thread(trace->host, ret, ret);
2545
2546		if (child != NULL) {
2547			fprintf(trace->output, "%ld", ret);
2548			if (child->comm_set)
2549				fprintf(trace->output, " (%s)", thread__comm_str(child));
2550			thread__put(child);
2551		}
2552	} else
2553		goto signed_print;
2554
2555	fputc('\n', trace->output);
2556
2557	/*
2558	 * We only consider an 'event' for the sake of --max-events a non-filtered
2559	 * sys_enter + sys_exit and other tracepoint events.
2560	 */
2561	if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2562		interrupted = true;
2563
2564	if (callchain_ret > 0)
2565		trace__fprintf_callchain(trace, sample);
2566	else if (callchain_ret < 0)
2567		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2568out:
2569	ttrace->entry_pending = false;
2570	err = 0;
2571out_put:
2572	thread__put(thread);
2573	return err;
2574}
2575
2576static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
2577			      union perf_event *event __maybe_unused,
2578			      struct perf_sample *sample)
2579{
2580	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2581	struct thread_trace *ttrace;
2582	size_t filename_len, entry_str_len, to_move;
2583	ssize_t remaining_space;
2584	char *pos;
2585	const char *filename = evsel__rawptr(evsel, sample, "pathname");
2586
2587	if (!thread)
2588		goto out;
2589
2590	ttrace = thread__priv(thread);
2591	if (!ttrace)
2592		goto out_put;
2593
2594	filename_len = strlen(filename);
2595	if (filename_len == 0)
2596		goto out_put;
2597
2598	if (ttrace->filename.namelen < filename_len) {
2599		char *f = realloc(ttrace->filename.name, filename_len + 1);
2600
2601		if (f == NULL)
2602			goto out_put;
2603
2604		ttrace->filename.namelen = filename_len;
2605		ttrace->filename.name = f;
2606	}
2607
2608	strcpy(ttrace->filename.name, filename);
2609	ttrace->filename.pending_open = true;
2610
2611	if (!ttrace->filename.ptr)
2612		goto out_put;
2613
2614	entry_str_len = strlen(ttrace->entry_str);
2615	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
2616	if (remaining_space <= 0)
2617		goto out_put;
2618
2619	if (filename_len > (size_t)remaining_space) {
2620		filename += filename_len - remaining_space;
2621		filename_len = remaining_space;
2622	}
2623
2624	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
2625	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
2626	memmove(pos + filename_len, pos, to_move);
2627	memcpy(pos, filename, filename_len);
2628
2629	ttrace->filename.ptr = 0;
2630	ttrace->filename.entry_str_pos = 0;
2631out_put:
2632	thread__put(thread);
2633out:
2634	return 0;
2635}
2636
2637static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
2638				     union perf_event *event __maybe_unused,
2639				     struct perf_sample *sample)
2640{
2641        u64 runtime = evsel__intval(evsel, sample, "runtime");
2642	double runtime_ms = (double)runtime / NSEC_PER_MSEC;
2643	struct thread *thread = machine__findnew_thread(trace->host,
2644							sample->pid,
2645							sample->tid);
2646	struct thread_trace *ttrace = thread__trace(thread, trace->output);
2647
2648	if (ttrace == NULL)
2649		goto out_dump;
2650
2651	ttrace->runtime_ms += runtime_ms;
2652	trace->runtime_ms += runtime_ms;
2653out_put:
2654	thread__put(thread);
2655	return 0;
2656
2657out_dump:
2658	fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
2659	       evsel->name,
2660	       evsel__strval(evsel, sample, "comm"),
2661	       (pid_t)evsel__intval(evsel, sample, "pid"),
2662	       runtime,
2663	       evsel__intval(evsel, sample, "vruntime"));
2664	goto out_put;
 
2665}
2666
2667static int bpf_output__printer(enum binary_printer_ops op,
2668			       unsigned int val, void *extra __maybe_unused, FILE *fp)
2669{
 
2670	unsigned char ch = (unsigned char)val;
2671
2672	switch (op) {
2673	case BINARY_PRINT_CHAR_DATA:
2674		return fprintf(fp, "%c", isprint(ch) ? ch : '.');
 
2675	case BINARY_PRINT_DATA_BEGIN:
2676	case BINARY_PRINT_LINE_BEGIN:
2677	case BINARY_PRINT_ADDR:
2678	case BINARY_PRINT_NUM_DATA:
2679	case BINARY_PRINT_NUM_PAD:
2680	case BINARY_PRINT_SEP:
2681	case BINARY_PRINT_CHAR_PAD:
2682	case BINARY_PRINT_LINE_END:
2683	case BINARY_PRINT_DATA_END:
2684	default:
2685		break;
2686	}
2687
2688	return 0;
2689}
2690
2691static void bpf_output__fprintf(struct trace *trace,
2692				struct perf_sample *sample)
2693{
2694	binary__fprintf(sample->raw_data, sample->raw_size, 8,
2695			bpf_output__printer, NULL, trace->output);
2696	++trace->nr_events_printed;
2697}
2698
2699static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample,
2700				       struct thread *thread, void *augmented_args, int augmented_args_size)
2701{
2702	char bf[2048];
2703	size_t size = sizeof(bf);
2704	struct tep_format_field *field = evsel->tp_format->format.fields;
2705	struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel);
2706	size_t printed = 0;
2707	unsigned long val;
2708	u8 bit = 1;
2709	struct syscall_arg syscall_arg = {
2710		.augmented = {
2711			.size = augmented_args_size,
2712			.args = augmented_args,
2713		},
2714		.idx	= 0,
2715		.mask	= 0,
2716		.trace  = trace,
2717		.thread = thread,
2718		.show_string_prefix = trace->show_string_prefix,
2719	};
2720
2721	for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) {
2722		if (syscall_arg.mask & bit)
2723			continue;
2724
2725		syscall_arg.len = 0;
2726		syscall_arg.fmt = arg;
2727		if (field->flags & TEP_FIELD_IS_ARRAY) {
2728			int offset = field->offset;
2729
2730			if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2731				offset = format_field__intval(field, sample, evsel->needs_swap);
2732				syscall_arg.len = offset >> 16;
2733				offset &= 0xffff;
2734#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
2735				if (field->flags & TEP_FIELD_IS_RELATIVE)
2736					offset += field->offset + field->size;
2737#endif
2738			}
2739
2740			val = (uintptr_t)(sample->raw_data + offset);
2741		} else
2742			val = format_field__intval(field, sample, evsel->needs_swap);
2743		/*
2744		 * Some syscall args need some mask, most don't and
2745		 * return val untouched.
2746		 */
2747		val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val);
2748
2749		/*
2750		 * Suppress this argument if its value is zero and
2751		 * we don't have a string associated in an
2752		 * strarray for it.
2753		 */
2754		if (val == 0 &&
2755		    !trace->show_zeros &&
2756		    !((arg->show_zero ||
2757		       arg->scnprintf == SCA_STRARRAY ||
2758		       arg->scnprintf == SCA_STRARRAYS) &&
2759		      arg->parm))
2760			continue;
2761
2762		printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
2763
2764		if (trace->show_arg_names)
2765			printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
2766
2767		printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
2768	}
2769
2770	return printed + fprintf(trace->output, "%s", bf);
2771}
2772
2773static int trace__event_handler(struct trace *trace, struct evsel *evsel,
2774				union perf_event *event __maybe_unused,
2775				struct perf_sample *sample)
2776{
2777	struct thread *thread;
2778	int callchain_ret = 0;
2779	/*
2780	 * Check if we called perf_evsel__disable(evsel) due to, for instance,
2781	 * this event's max_events having been hit and this is an entry coming
2782	 * from the ring buffer that we should discard, since the max events
2783	 * have already been considered/printed.
2784	 */
2785	if (evsel->disabled)
2786		return 0;
2787
2788	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2789
2790	if (sample->callchain) {
2791		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2792		if (callchain_ret == 0) {
2793			if (callchain_cursor.nr < trace->min_stack)
2794				goto out;
2795			callchain_ret = 1;
2796		}
2797	}
2798
2799	trace__printf_interrupted_entry(trace);
2800	trace__fprintf_tstamp(trace, sample->time, trace->output);
2801
2802	if (trace->trace_syscalls && trace->show_duration)
2803		fprintf(trace->output, "(         ): ");
2804
2805	if (thread)
2806		trace__fprintf_comm_tid(trace, thread, trace->output);
2807
2808	if (evsel == trace->syscalls.events.augmented) {
2809		int id = perf_evsel__sc_tp_uint(evsel, id, sample);
2810		struct syscall *sc = trace__syscall_info(trace, evsel, id);
2811
2812		if (sc) {
2813			fprintf(trace->output, "%s(", sc->name);
2814			trace__fprintf_sys_enter(trace, evsel, sample);
2815			fputc(')', trace->output);
2816			goto newline;
2817		}
2818
2819		/*
2820		 * XXX: Not having the associated syscall info or not finding/adding
2821		 * 	the thread should never happen, but if it does...
2822		 * 	fall thru and print it as a bpf_output event.
2823		 */
2824	}
2825
2826	fprintf(trace->output, "%s(", evsel->name);
2827
2828	if (evsel__is_bpf_output(evsel)) {
2829		bpf_output__fprintf(trace, sample);
2830	} else if (evsel->tp_format) {
2831		if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
2832		    trace__fprintf_sys_enter(trace, evsel, sample)) {
2833			if (trace->libtraceevent_print) {
2834				event_format__fprintf(evsel->tp_format, sample->cpu,
2835						      sample->raw_data, sample->raw_size,
2836						      trace->output);
2837			} else {
2838				trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
2839			}
2840		}
2841	}
2842
2843newline:
2844	fprintf(trace->output, ")\n");
2845
2846	if (callchain_ret > 0)
2847		trace__fprintf_callchain(trace, sample);
2848	else if (callchain_ret < 0)
2849		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2850
2851	++trace->nr_events_printed;
2852
2853	if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
2854		evsel__disable(evsel);
2855		evsel__close(evsel);
2856	}
2857out:
2858	thread__put(thread);
2859	return 0;
2860}
2861
2862static void print_location(FILE *f, struct perf_sample *sample,
2863			   struct addr_location *al,
2864			   bool print_dso, bool print_sym)
2865{
2866
2867	if ((verbose > 0 || print_dso) && al->map)
2868		fprintf(f, "%s@", al->map->dso->long_name);
2869
2870	if ((verbose > 0 || print_sym) && al->sym)
2871		fprintf(f, "%s+0x%" PRIx64, al->sym->name,
2872			al->addr - al->sym->start);
2873	else if (al->map)
2874		fprintf(f, "0x%" PRIx64, al->addr);
2875	else
2876		fprintf(f, "0x%" PRIx64, sample->addr);
2877}
2878
2879static int trace__pgfault(struct trace *trace,
2880			  struct evsel *evsel,
2881			  union perf_event *event __maybe_unused,
2882			  struct perf_sample *sample)
2883{
2884	struct thread *thread;
2885	struct addr_location al;
2886	char map_type = 'd';
2887	struct thread_trace *ttrace;
2888	int err = -1;
2889	int callchain_ret = 0;
2890
2891	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2892
2893	if (sample->callchain) {
2894		callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
2895		if (callchain_ret == 0) {
2896			if (callchain_cursor.nr < trace->min_stack)
2897				goto out_put;
2898			callchain_ret = 1;
2899		}
2900	}
2901
2902	ttrace = thread__trace(thread, trace->output);
2903	if (ttrace == NULL)
2904		goto out_put;
2905
2906	if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
2907		ttrace->pfmaj++;
2908	else
2909		ttrace->pfmin++;
2910
2911	if (trace->summary_only)
2912		goto out;
2913
2914	thread__find_symbol(thread, sample->cpumode, sample->ip, &al);
 
2915
2916	trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
2917
2918	fprintf(trace->output, "%sfault [",
2919		evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
2920		"maj" : "min");
2921
2922	print_location(trace->output, sample, &al, false, true);
2923
2924	fprintf(trace->output, "] => ");
2925
2926	thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
2927
2928	if (!al.map) {
2929		thread__find_symbol(thread, sample->cpumode, sample->addr, &al);
 
2930
2931		if (al.map)
2932			map_type = 'x';
2933		else
2934			map_type = '?';
2935	}
2936
2937	print_location(trace->output, sample, &al, true, false);
2938
2939	fprintf(trace->output, " (%c%c)\n", map_type, al.level);
2940
2941	if (callchain_ret > 0)
2942		trace__fprintf_callchain(trace, sample);
2943	else if (callchain_ret < 0)
2944		pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
2945
2946	++trace->nr_events_printed;
2947out:
2948	err = 0;
2949out_put:
2950	thread__put(thread);
2951	return err;
2952}
2953
2954static void trace__set_base_time(struct trace *trace,
2955				 struct evsel *evsel,
2956				 struct perf_sample *sample)
2957{
2958	/*
2959	 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
2960	 * and don't use sample->time unconditionally, we may end up having
2961	 * some other event in the future without PERF_SAMPLE_TIME for good
2962	 * reason, i.e. we may not be interested in its timestamps, just in
2963	 * it taking place, picking some piece of information when it
2964	 * appears in our event stream (vfs_getname comes to mind).
2965	 */
2966	if (trace->base_time == 0 && !trace->full_time &&
2967	    (evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2968		trace->base_time = sample->time;
2969}
2970
2971static int trace__process_sample(struct perf_tool *tool,
2972				 union perf_event *event,
2973				 struct perf_sample *sample,
2974				 struct evsel *evsel,
2975				 struct machine *machine __maybe_unused)
2976{
2977	struct trace *trace = container_of(tool, struct trace, tool);
2978	struct thread *thread;
2979	int err = 0;
2980
2981	tracepoint_handler handler = evsel->handler;
2982
2983	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2984	if (thread && thread__is_filtered(thread))
2985		goto out;
2986
2987	trace__set_base_time(trace, evsel, sample);
 
2988
2989	if (handler) {
2990		++trace->nr_events;
2991		handler(trace, evsel, event, sample);
2992	}
2993out:
2994	thread__put(thread);
2995	return err;
2996}
2997
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2998static int trace__record(struct trace *trace, int argc, const char **argv)
2999{
3000	unsigned int rec_argc, i, j;
3001	const char **rec_argv;
3002	const char * const record_args[] = {
3003		"record",
3004		"-R",
3005		"-m", "1024",
3006		"-c", "1",
3007	};
3008	pid_t pid = getpid();
3009	char *filter = asprintf__tp_filter_pids(1, &pid);
3010	const char * const sc_args[] = { "-e", };
3011	unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
3012	const char * const majpf_args[] = { "-e", "major-faults" };
3013	unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
3014	const char * const minpf_args[] = { "-e", "minor-faults" };
3015	unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
3016	int err = -1;
3017
3018	/* +3 is for the event string below and the pid filter */
3019	rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 +
3020		majpf_args_nr + minpf_args_nr + argc;
3021	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3022
3023	if (rec_argv == NULL || filter == NULL)
3024		goto out_free;
3025
3026	j = 0;
3027	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3028		rec_argv[j++] = record_args[i];
3029
3030	if (trace->trace_syscalls) {
3031		for (i = 0; i < sc_args_nr; i++)
3032			rec_argv[j++] = sc_args[i];
3033
3034		/* event string may be different for older kernels - e.g., RHEL6 */
3035		if (is_valid_tracepoint("raw_syscalls:sys_enter"))
3036			rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
3037		else if (is_valid_tracepoint("syscalls:sys_enter"))
3038			rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
3039		else {
3040			pr_err("Neither raw_syscalls nor syscalls events exist.\n");
3041			goto out_free;
3042		}
3043	}
3044
3045	rec_argv[j++] = "--filter";
3046	rec_argv[j++] = filter;
3047
3048	if (trace->trace_pgfaults & TRACE_PFMAJ)
3049		for (i = 0; i < majpf_args_nr; i++)
3050			rec_argv[j++] = majpf_args[i];
3051
3052	if (trace->trace_pgfaults & TRACE_PFMIN)
3053		for (i = 0; i < minpf_args_nr; i++)
3054			rec_argv[j++] = minpf_args[i];
3055
3056	for (i = 0; i < (unsigned int)argc; i++)
3057		rec_argv[j++] = argv[i];
3058
3059	err = cmd_record(j, rec_argv);
3060out_free:
3061	free(filter);
3062	free(rec_argv);
3063	return err;
3064}
3065
3066static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3067
3068static bool evlist__add_vfs_getname(struct evlist *evlist)
3069{
3070	bool found = false;
3071	struct evsel *evsel, *tmp;
3072	struct parse_events_error err;
3073	int ret;
3074
3075	parse_events_error__init(&err);
3076	ret = parse_events(evlist, "probe:vfs_getname*", &err);
3077	parse_events_error__exit(&err);
3078	if (ret)
3079		return false;
3080
3081	evlist__for_each_entry_safe(evlist, evsel, tmp) {
3082		if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
3083			continue;
3084
3085		if (evsel__field(evsel, "pathname")) {
3086			evsel->handler = trace__vfs_getname;
3087			found = true;
3088			continue;
3089		}
3090
3091		list_del_init(&evsel->core.node);
3092		evsel->evlist = NULL;
3093		evsel__delete(evsel);
3094	}
3095
3096	return found;
 
 
3097}
3098
3099static struct evsel *evsel__new_pgfault(u64 config)
 
3100{
3101	struct evsel *evsel;
3102	struct perf_event_attr attr = {
3103		.type = PERF_TYPE_SOFTWARE,
3104		.mmap_data = 1,
3105	};
3106
3107	attr.config = config;
3108	attr.sample_period = 1;
3109
3110	event_attr_init(&attr);
3111
3112	evsel = evsel__new(&attr);
3113	if (evsel)
3114		evsel->handler = trace__pgfault;
3115
3116	return evsel;
3117}
3118
3119static void evlist__free_syscall_tp_fields(struct evlist *evlist)
3120{
3121	struct evsel *evsel;
3122
3123	evlist__for_each_entry(evlist, evsel) {
3124		struct evsel_trace *et = evsel->priv;
3125
3126		if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
3127			continue;
3128
3129		free(et->fmt);
3130		free(et);
3131	}
3132}
3133
3134static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
3135{
3136	const u32 type = event->header.type;
3137	struct evsel *evsel;
 
 
 
3138
3139	if (type != PERF_RECORD_SAMPLE) {
3140		trace__process_event(trace, trace->host, event, sample);
3141		return;
3142	}
3143
3144	evsel = evlist__id2evsel(trace->evlist, sample->id);
3145	if (evsel == NULL) {
3146		fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3147		return;
3148	}
3149
3150	if (evswitch__discard(&trace->evswitch, evsel))
3151		return;
3152
3153	trace__set_base_time(trace, evsel, sample);
3154
3155	if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
3156	    sample->raw_data == NULL) {
3157		fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3158		       evsel__name(evsel), sample->tid,
3159		       sample->cpu, sample->raw_size);
3160	} else {
3161		tracepoint_handler handler = evsel->handler;
3162		handler(trace, evsel, event, sample);
3163	}
3164
3165	if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3166		interrupted = true;
3167}
3168
3169static int trace__add_syscall_newtp(struct trace *trace)
3170{
3171	int ret = -1;
3172	struct evlist *evlist = trace->evlist;
3173	struct evsel *sys_enter, *sys_exit;
3174
3175	sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
3176	if (sys_enter == NULL)
3177		goto out;
3178
3179	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
3180		goto out_delete_sys_enter;
3181
3182	sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
3183	if (sys_exit == NULL)
3184		goto out_delete_sys_enter;
3185
3186	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
3187		goto out_delete_sys_exit;
3188
3189	evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3190	evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3191
3192	evlist__add(evlist, sys_enter);
3193	evlist__add(evlist, sys_exit);
3194
3195	if (callchain_param.enabled && !trace->kernel_syscallchains) {
3196		/*
3197		 * We're interested only in the user space callchain
3198		 * leading to the syscall, allow overriding that for
3199		 * debugging reasons using --kernel_syscall_callchains
3200		 */
3201		sys_exit->core.attr.exclude_callchain_kernel = 1;
3202	}
3203
3204	trace->syscalls.events.sys_enter = sys_enter;
3205	trace->syscalls.events.sys_exit  = sys_exit;
3206
3207	ret = 0;
3208out:
3209	return ret;
3210
3211out_delete_sys_exit:
3212	evsel__delete_priv(sys_exit);
3213out_delete_sys_enter:
3214	evsel__delete_priv(sys_enter);
3215	goto out;
3216}
3217
3218static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3219{
3220	int err = -1;
3221	struct evsel *sys_exit;
3222	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3223						trace->ev_qualifier_ids.nr,
3224						trace->ev_qualifier_ids.entries);
3225
3226	if (filter == NULL)
3227		goto out_enomem;
3228
3229	if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3230		sys_exit = trace->syscalls.events.sys_exit;
3231		err = evsel__append_tp_filter(sys_exit, filter);
3232	}
3233
3234	free(filter);
3235out:
3236	return err;
3237out_enomem:
3238	errno = ENOMEM;
3239	goto out;
3240}
3241
3242#ifdef HAVE_LIBBPF_SUPPORT
3243static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
3244{
3245	if (trace->bpf_obj == NULL)
3246		return NULL;
3247
3248	return bpf_object__find_map_by_name(trace->bpf_obj, name);
3249}
3250
3251static void trace__set_bpf_map_filtered_pids(struct trace *trace)
3252{
3253	trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
3254}
3255
3256static void trace__set_bpf_map_syscalls(struct trace *trace)
3257{
3258	trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
3259	trace->syscalls.prog_array.sys_exit  = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
3260}
3261
3262static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
3263{
3264	struct bpf_program *pos, *prog = NULL;
3265	const char *sec_name;
3266
3267	if (trace->bpf_obj == NULL)
3268		return NULL;
3269
3270	bpf_object__for_each_program(pos, trace->bpf_obj) {
3271		sec_name = bpf_program__section_name(pos);
3272		if (sec_name && !strcmp(sec_name, name)) {
3273			prog = pos;
3274			break;
3275		}
3276	}
3277
3278	return prog;
3279}
3280
3281static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
3282							const char *prog_name, const char *type)
3283{
3284	struct bpf_program *prog;
3285
3286	if (prog_name == NULL) {
3287		char default_prog_name[256];
3288		scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name);
3289		prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3290		if (prog != NULL)
3291			goto out_found;
3292		if (sc->fmt && sc->fmt->alias) {
3293			scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias);
3294			prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3295			if (prog != NULL)
3296				goto out_found;
3297		}
3298		goto out_unaugmented;
3299	}
3300
3301	prog = trace__find_bpf_program_by_title(trace, prog_name);
3302
3303	if (prog != NULL) {
3304out_found:
3305		return prog;
3306	}
3307
3308	pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n",
3309		 prog_name, type, sc->name);
3310out_unaugmented:
3311	return trace->syscalls.unaugmented_prog;
3312}
3313
3314static void trace__init_syscall_bpf_progs(struct trace *trace, int id)
3315{
3316	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3317
3318	if (sc == NULL)
3319		return;
3320
3321	sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3322	sc->bpf_prog.sys_exit  = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit  : NULL,  "exit");
3323}
3324
3325static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id)
3326{
3327	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3328	return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3329}
3330
3331static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
3332{
3333	struct syscall *sc = trace__syscall_info(trace, NULL, id);
3334	return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
3335}
3336
3337static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
3338{
3339	struct tep_format_field *field, *candidate_field;
3340	int id;
3341
3342	/*
3343	 * We're only interested in syscalls that have a pointer:
3344	 */
3345	for (field = sc->args; field; field = field->next) {
3346		if (field->flags & TEP_FIELD_IS_POINTER)
3347			goto try_to_find_pair;
3348	}
3349
3350	return NULL;
3351
3352try_to_find_pair:
3353	for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) {
3354		struct syscall *pair = trace__syscall_info(trace, NULL, id);
3355		struct bpf_program *pair_prog;
3356		bool is_candidate = false;
3357
3358		if (pair == NULL || pair == sc ||
3359		    pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog)
3360			continue;
3361
3362		for (field = sc->args, candidate_field = pair->args;
3363		     field && candidate_field; field = field->next, candidate_field = candidate_field->next) {
3364			bool is_pointer = field->flags & TEP_FIELD_IS_POINTER,
3365			     candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER;
3366
3367			if (is_pointer) {
3368			       if (!candidate_is_pointer) {
3369					// The candidate just doesn't copies our pointer arg, might copy other pointers we want.
3370					continue;
3371			       }
3372			} else {
3373				if (candidate_is_pointer) {
3374					// The candidate might copy a pointer we don't have, skip it.
3375					goto next_candidate;
3376				}
3377				continue;
3378			}
3379
3380			if (strcmp(field->type, candidate_field->type))
3381				goto next_candidate;
3382
3383			is_candidate = true;
3384		}
3385
3386		if (!is_candidate)
3387			goto next_candidate;
3388
3389		/*
3390		 * Check if the tentative pair syscall augmenter has more pointers, if it has,
3391		 * then it may be collecting that and we then can't use it, as it would collect
3392		 * more than what is common to the two syscalls.
3393		 */
3394		if (candidate_field) {
3395			for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next)
3396				if (candidate_field->flags & TEP_FIELD_IS_POINTER)
3397					goto next_candidate;
3398		}
3399
3400		pair_prog = pair->bpf_prog.sys_enter;
3401		/*
3402		 * If the pair isn't enabled, then its bpf_prog.sys_enter will not
3403		 * have been searched for, so search it here and if it returns the
3404		 * unaugmented one, then ignore it, otherwise we'll reuse that BPF
3405		 * program for a filtered syscall on a non-filtered one.
3406		 *
3407		 * For instance, we have "!syscalls:sys_enter_renameat" and that is
3408		 * useful for "renameat2".
3409		 */
3410		if (pair_prog == NULL) {
3411			pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter");
3412			if (pair_prog == trace->syscalls.unaugmented_prog)
3413				goto next_candidate;
3414		}
3415
3416		pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name);
3417		return pair_prog;
3418	next_candidate:
3419		continue;
3420	}
3421
3422	return NULL;
3423}
3424
3425static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace)
3426{
3427	int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter),
3428	    map_exit_fd  = bpf_map__fd(trace->syscalls.prog_array.sys_exit);
3429	int err = 0, key;
3430
3431	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3432		int prog_fd;
3433
3434		if (!trace__syscall_enabled(trace, key))
3435			continue;
3436
3437		trace__init_syscall_bpf_progs(trace, key);
3438
3439		// It'll get at least the "!raw_syscalls:unaugmented"
3440		prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3441		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3442		if (err)
3443			break;
3444		prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3445		err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY);
3446		if (err)
3447			break;
3448	}
3449
3450	/*
3451	 * Now lets do a second pass looking for enabled syscalls without
3452	 * an augmenter that have a signature that is a superset of another
3453	 * syscall with an augmenter so that we can auto-reuse it.
3454	 *
3455	 * I.e. if we have an augmenter for the "open" syscall that has
3456	 * this signature:
3457	 *
3458	 *   int open(const char *pathname, int flags, mode_t mode);
3459	 *
3460	 * I.e. that will collect just the first string argument, then we
3461	 * can reuse it for the 'creat' syscall, that has this signature:
3462	 *
3463	 *   int creat(const char *pathname, mode_t mode);
3464	 *
3465	 * and for:
3466	 *
3467	 *   int stat(const char *pathname, struct stat *statbuf);
3468	 *   int lstat(const char *pathname, struct stat *statbuf);
3469	 *
3470	 * Because the 'open' augmenter will collect the first arg as a string,
3471	 * and leave alone all the other args, which already helps with
3472	 * beautifying 'stat' and 'lstat''s pathname arg.
3473	 *
3474	 * Then, in time, when 'stat' gets an augmenter that collects both
3475	 * first and second arg (this one on the raw_syscalls:sys_exit prog
3476	 * array tail call, then that one will be used.
3477	 */
3478	for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
3479		struct syscall *sc = trace__syscall_info(trace, NULL, key);
3480		struct bpf_program *pair_prog;
3481		int prog_fd;
3482
3483		if (sc == NULL || sc->bpf_prog.sys_enter == NULL)
3484			continue;
3485
3486		/*
3487		 * For now we're just reusing the sys_enter prog, and if it
3488		 * already has an augmenter, we don't need to find one.
3489		 */
3490		if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog)
3491			continue;
3492
3493		/*
3494		 * Look at all the other syscalls for one that has a signature
3495		 * that is close enough that we can share:
3496		 */
3497		pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3498		if (pair_prog == NULL)
3499			continue;
3500
3501		sc->bpf_prog.sys_enter = pair_prog;
3502
3503		/*
3504		 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter
3505		 * with the fd for the program we're reusing:
3506		 */
3507		prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter);
3508		err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY);
3509		if (err)
3510			break;
3511	}
3512
3513
3514	return err;
3515}
3516
3517static void trace__delete_augmented_syscalls(struct trace *trace)
3518{
3519	struct evsel *evsel, *tmp;
3520
3521	evlist__remove(trace->evlist, trace->syscalls.events.augmented);
3522	evsel__delete(trace->syscalls.events.augmented);
3523	trace->syscalls.events.augmented = NULL;
3524
3525	evlist__for_each_entry_safe(trace->evlist, tmp, evsel) {
3526		if (evsel->bpf_obj == trace->bpf_obj) {
3527			evlist__remove(trace->evlist, evsel);
3528			evsel__delete(evsel);
3529		}
3530
3531	}
3532
3533	bpf_object__close(trace->bpf_obj);
3534	trace->bpf_obj = NULL;
3535}
3536#else // HAVE_LIBBPF_SUPPORT
3537static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
3538						   const char *name __maybe_unused)
3539{
3540	return NULL;
3541}
3542
3543static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
3544{
3545}
3546
3547static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
3548{
3549}
3550
3551static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
3552							    const char *name __maybe_unused)
3553{
3554	return NULL;
3555}
3556
3557static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused)
3558{
3559	return 0;
3560}
3561
3562static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused)
3563{
3564}
3565#endif // HAVE_LIBBPF_SUPPORT
3566
3567static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
3568{
3569	struct evsel *evsel;
3570
3571	evlist__for_each_entry(trace->evlist, evsel) {
3572		if (evsel == trace->syscalls.events.augmented ||
3573		    evsel->bpf_obj == trace->bpf_obj)
3574			continue;
3575
3576		return false;
3577	}
3578
3579	return true;
3580}
3581
3582static int trace__set_ev_qualifier_filter(struct trace *trace)
3583{
3584	if (trace->syscalls.events.sys_enter)
3585		return trace__set_ev_qualifier_tp_filter(trace);
3586	return 0;
3587}
3588
3589static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
3590				    size_t npids __maybe_unused, pid_t *pids __maybe_unused)
3591{
3592	int err = 0;
3593#ifdef HAVE_LIBBPF_SUPPORT
3594	bool value = true;
3595	int map_fd = bpf_map__fd(map);
3596	size_t i;
3597
3598	for (i = 0; i < npids; ++i) {
3599		err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
3600		if (err)
3601			break;
3602	}
3603#endif
3604	return err;
3605}
3606
3607static int trace__set_filter_loop_pids(struct trace *trace)
3608{
3609	unsigned int nr = 1, err;
3610	pid_t pids[32] = {
3611		getpid(),
3612	};
3613	struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3614
3615	while (thread && nr < ARRAY_SIZE(pids)) {
3616		struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid);
3617
3618		if (parent == NULL)
3619			break;
3620
3621		if (!strcmp(thread__comm_str(parent), "sshd") ||
3622		    strstarts(thread__comm_str(parent), "gnome-terminal")) {
3623			pids[nr++] = parent->tid;
3624			break;
3625		}
3626		thread = parent;
3627	}
3628
3629	err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3630	if (!err && trace->filter_pids.map)
3631		err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
3632
3633	return err;
3634}
3635
3636static int trace__set_filter_pids(struct trace *trace)
3637{
3638	int err = 0;
3639	/*
3640	 * Better not use !target__has_task() here because we need to cover the
3641	 * case where no threads were specified in the command line, but a
3642	 * workload was, and in that case we will fill in the thread_map when
3643	 * we fork the workload in evlist__prepare_workload.
3644	 */
3645	if (trace->filter_pids.nr > 0) {
3646		err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
3647						    trace->filter_pids.entries);
3648		if (!err && trace->filter_pids.map) {
3649			err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
3650						       trace->filter_pids.entries);
3651		}
3652	} else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
3653		err = trace__set_filter_loop_pids(trace);
3654	}
3655
3656	return err;
3657}
3658
3659static int __trace__deliver_event(struct trace *trace, union perf_event *event)
3660{
3661	struct evlist *evlist = trace->evlist;
3662	struct perf_sample sample;
3663	int err = evlist__parse_sample(evlist, event, &sample);
3664
3665	if (err)
3666		fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
3667	else
3668		trace__handle_event(trace, event, &sample);
3669
3670	return 0;
3671}
3672
3673static int __trace__flush_events(struct trace *trace)
3674{
3675	u64 first = ordered_events__first_time(&trace->oe.data);
3676	u64 flush = trace->oe.last - NSEC_PER_SEC;
3677
3678	/* Is there some thing to flush.. */
3679	if (first && first < flush)
3680		return ordered_events__flush_time(&trace->oe.data, flush);
3681
3682	return 0;
3683}
3684
3685static int trace__flush_events(struct trace *trace)
3686{
3687	return !trace->sort_events ? 0 : __trace__flush_events(trace);
3688}
3689
3690static int trace__deliver_event(struct trace *trace, union perf_event *event)
3691{
3692	int err;
3693
3694	if (!trace->sort_events)
3695		return __trace__deliver_event(trace, event);
3696
3697	err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
3698	if (err && err != -1)
3699		return err;
3700
3701	err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
3702	if (err)
3703		return err;
3704
3705	return trace__flush_events(trace);
3706}
3707
3708static int ordered_events__deliver_event(struct ordered_events *oe,
3709					 struct ordered_event *event)
3710{
3711	struct trace *trace = container_of(oe, struct trace, oe.data);
3712
3713	return __trace__deliver_event(trace, event->event);
3714}
3715
3716static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
3717{
3718	struct tep_format_field *field;
3719	struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
3720
3721	if (evsel->tp_format == NULL || fmt == NULL)
3722		return NULL;
3723
3724	for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt)
3725		if (strcmp(field->name, arg) == 0)
3726			return fmt;
3727
3728	return NULL;
3729}
3730
3731static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel)
3732{
3733	char *tok, *left = evsel->filter, *new_filter = evsel->filter;
3734
3735	while ((tok = strpbrk(left, "=<>!")) != NULL) {
3736		char *right = tok + 1, *right_end;
3737
3738		if (*right == '=')
3739			++right;
3740
3741		while (isspace(*right))
3742			++right;
3743
3744		if (*right == '\0')
3745			break;
3746
3747		while (!isalpha(*left))
3748			if (++left == tok) {
3749				/*
3750				 * Bail out, can't find the name of the argument that is being
3751				 * used in the filter, let it try to set this filter, will fail later.
3752				 */
3753				return 0;
3754			}
3755
3756		right_end = right + 1;
3757		while (isalnum(*right_end) || *right_end == '_' || *right_end == '|')
3758			++right_end;
3759
3760		if (isalpha(*right)) {
3761			struct syscall_arg_fmt *fmt;
3762			int left_size = tok - left,
3763			    right_size = right_end - right;
3764			char arg[128];
3765
3766			while (isspace(left[left_size - 1]))
3767				--left_size;
3768
3769			scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
3770
3771			fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
3772			if (fmt == NULL) {
3773				pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
3774				       arg, evsel->name, evsel->filter);
3775				return -1;
3776			}
3777
3778			pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ",
3779				 arg, (int)(right - tok), tok, right_size, right);
3780
3781			if (fmt->strtoul) {
3782				u64 val;
3783				struct syscall_arg syscall_arg = {
3784					.parm = fmt->parm,
3785				};
3786
3787				if (fmt->strtoul(right, right_size, &syscall_arg, &val)) {
3788					char *n, expansion[19];
3789					int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val);
3790					int expansion_offset = right - new_filter;
3791
3792					pr_debug("%s", expansion);
3793
3794					if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) {
3795						pr_debug(" out of memory!\n");
3796						free(new_filter);
3797						return -1;
3798					}
3799					if (new_filter != evsel->filter)
3800						free(new_filter);
3801					left = n + expansion_offset + expansion_lenght;
3802					new_filter = n;
3803				} else {
3804					pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3805					       right_size, right, arg, evsel->name, evsel->filter);
3806					return -1;
3807				}
3808			} else {
3809				pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n",
3810				       arg, evsel->name, evsel->filter);
3811				return -1;
3812			}
3813
3814			pr_debug("\n");
3815		} else {
3816			left = right_end;
3817		}
3818	}
3819
3820	if (new_filter != evsel->filter) {
3821		pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
3822		evsel__set_filter(evsel, new_filter);
3823		free(new_filter);
3824	}
3825
3826	return 0;
3827}
3828
3829static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel)
3830{
3831	struct evlist *evlist = trace->evlist;
3832	struct evsel *evsel;
3833
3834	evlist__for_each_entry(evlist, evsel) {
3835		if (evsel->filter == NULL)
3836			continue;
3837
3838		if (trace__expand_filter(trace, evsel)) {
3839			*err_evsel = evsel;
3840			return -1;
3841		}
3842	}
3843
3844	return 0;
3845}
3846
3847static int trace__run(struct trace *trace, int argc, const char **argv)
3848{
3849	struct evlist *evlist = trace->evlist;
3850	struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
3851	int err = -1, i;
3852	unsigned long before;
3853	const bool forks = argc > 0;
3854	bool draining = false;
3855
3856	trace->live = true;
3857
3858	if (!trace->raw_augmented_syscalls) {
3859		if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
3860			goto out_error_raw_syscalls;
3861
3862		if (trace->trace_syscalls)
3863			trace->vfs_getname = evlist__add_vfs_getname(evlist);
3864	}
3865
3866	if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
3867		pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
3868		if (pgfault_maj == NULL)
3869			goto out_error_mem;
3870		evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
3871		evlist__add(evlist, pgfault_maj);
3872	}
3873
3874	if ((trace->trace_pgfaults & TRACE_PFMIN)) {
3875		pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
3876		if (pgfault_min == NULL)
3877			goto out_error_mem;
3878		evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
3879		evlist__add(evlist, pgfault_min);
3880	}
3881
3882	/* Enable ignoring missing threads when -u/-p option is defined. */
3883	trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
3884
3885	if (trace->sched &&
3886	    evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
 
3887		goto out_error_sched_stat_runtime;
3888	/*
3889	 * If a global cgroup was set, apply it to all the events without an
3890	 * explicit cgroup. I.e.:
3891	 *
3892	 * 	trace -G A -e sched:*switch
3893	 *
3894	 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc
3895	 * _and_ sched:sched_switch to the 'A' cgroup, while:
3896	 *
3897	 * trace -e sched:*switch -G A
3898	 *
3899	 * will only set the sched:sched_switch event to the 'A' cgroup, all the
3900	 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without"
3901	 * a cgroup (on the root cgroup, sys wide, etc).
3902	 *
3903	 * Multiple cgroups:
3904	 *
3905	 * trace -G A -e sched:*switch -G B
3906	 *
3907	 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes
3908	 * to the 'B' cgroup.
3909	 *
3910	 * evlist__set_default_cgroup() grabs a reference of the passed cgroup
3911	 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL.
3912	 */
3913	if (trace->cgroup)
3914		evlist__set_default_cgroup(trace->evlist, trace->cgroup);
3915
3916	err = evlist__create_maps(evlist, &trace->opts.target);
3917	if (err < 0) {
3918		fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
3919		goto out_delete_evlist;
3920	}
3921
3922	err = trace__symbols_init(trace, evlist);
3923	if (err < 0) {
3924		fprintf(trace->output, "Problems initializing symbol libraries!\n");
3925		goto out_delete_evlist;
3926	}
3927
3928	evlist__config(evlist, &trace->opts, &callchain_param);
 
 
 
3929
3930	if (forks) {
3931		err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
 
3932		if (err < 0) {
3933			fprintf(trace->output, "Couldn't run the workload!\n");
3934			goto out_delete_evlist;
3935		}
3936		workload_pid = evlist->workload.pid;
3937	}
3938
3939	err = evlist__open(evlist);
3940	if (err < 0)
3941		goto out_error_open;
3942
3943	err = bpf__apply_obj_config();
3944	if (err) {
3945		char errbuf[BUFSIZ];
3946
3947		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
3948		pr_err("ERROR: Apply config to BPF failed: %s\n",
3949			 errbuf);
3950		goto out_error_open;
3951	}
3952
3953	err = trace__set_filter_pids(trace);
 
 
 
 
 
 
 
 
 
 
3954	if (err < 0)
3955		goto out_error_mem;
3956
3957	if (trace->syscalls.prog_array.sys_enter)
3958		trace__init_syscalls_bpf_prog_array_maps(trace);
3959
3960	if (trace->ev_qualifier_ids.nr > 0) {
3961		err = trace__set_ev_qualifier_filter(trace);
3962		if (err < 0)
3963			goto out_errno;
3964
3965		if (trace->syscalls.events.sys_exit) {
3966			pr_debug("event qualifier tracepoint filter: %s\n",
3967				 trace->syscalls.events.sys_exit->filter);
3968		}
3969	}
3970
3971	/*
3972	 * If the "close" syscall is not traced, then we will not have the
3973	 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the
3974	 * fd->pathname table and were ending up showing the last value set by
3975	 * syscalls opening a pathname and associating it with a descriptor or
3976	 * reading it from /proc/pid/fd/ in cases where that doesn't make
3977	 * sense.
3978	 *
3979	 *  So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is
3980	 *  not in use.
3981	 */
3982	trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
3983
3984	err = trace__expand_filters(trace, &evsel);
3985	if (err)
3986		goto out_delete_evlist;
3987	err = evlist__apply_filters(evlist, &evsel);
3988	if (err < 0)
3989		goto out_error_apply_filters;
3990
3991	if (trace->dump.map)
3992		bpf_map__fprintf(trace->dump.map, trace->output);
3993
3994	err = evlist__mmap(evlist, trace->opts.mmap_pages);
3995	if (err < 0)
3996		goto out_error_mmap;
3997
3998	if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
3999		evlist__enable(evlist);
4000
4001	if (forks)
4002		evlist__start_workload(evlist);
4003
4004	if (trace->opts.initial_delay) {
4005		usleep(trace->opts.initial_delay * 1000);
4006		evlist__enable(evlist);
4007	}
4008
4009	trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4010		perf_thread_map__nr(evlist->core.threads) > 1 ||
4011		evlist__first(evlist)->core.attr.inherit;
4012
4013	/*
4014	 * Now that we already used evsel->core.attr to ask the kernel to setup the
4015	 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in
4016	 * trace__resolve_callchain(), allowing per-event max-stack settings
4017	 * to override an explicitly set --max-stack global setting.
4018	 */
4019	evlist__for_each_entry(evlist, evsel) {
4020		if (evsel__has_callchain(evsel) &&
4021		    evsel->core.attr.sample_max_stack == 0)
4022			evsel->core.attr.sample_max_stack = trace->max_stack;
4023	}
4024again:
4025	before = trace->nr_events;
4026
4027	for (i = 0; i < evlist->core.nr_mmaps; i++) {
4028		union perf_event *event;
4029		struct mmap *md;
4030
4031		md = &evlist->mmap[i];
4032		if (perf_mmap__read_init(&md->core) < 0)
4033			continue;
4034
4035		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
4036			++trace->nr_events;
4037
4038			err = trace__deliver_event(trace, event);
4039			if (err)
4040				goto out_disable;
 
 
4041
4042			perf_mmap__consume(&md->core);
 
 
4043
4044			if (interrupted)
4045				goto out_disable;
4046
4047			if (done && !draining) {
4048				evlist__disable(evlist);
4049				draining = true;
4050			}
4051		}
4052		perf_mmap__read_done(&md->core);
4053	}
4054
4055	if (trace->nr_events == before) {
4056		int timeout = done ? 100 : -1;
4057
4058		if (!draining && evlist__poll(evlist, timeout) > 0) {
4059			if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
4060				draining = true;
4061
4062			goto again;
4063		} else {
4064			if (trace__flush_events(trace))
4065				goto out_disable;
4066		}
4067	} else {
4068		goto again;
4069	}
4070
4071out_disable:
4072	thread__zput(trace->current);
4073
4074	evlist__disable(evlist);
4075
4076	if (trace->sort_events)
4077		ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4078
4079	if (!err) {
4080		if (trace->summary)
4081			trace__fprintf_thread_summary(trace, trace->output);
4082
4083		if (trace->show_tool_stats) {
4084			fprintf(trace->output, "Stats:\n "
4085					       " vfs_getname : %" PRIu64 "\n"
4086					       " proc_getname: %" PRIu64 "\n",
4087				trace->stats.vfs_getname,
4088				trace->stats.proc_getname);
4089		}
4090	}
4091
4092out_delete_evlist:
4093	trace__symbols__exit(trace);
4094	evlist__free_syscall_tp_fields(evlist);
4095	evlist__delete(evlist);
4096	cgroup__put(trace->cgroup);
4097	trace->evlist = NULL;
4098	trace->live = false;
4099	return err;
4100{
4101	char errbuf[BUFSIZ];
4102
4103out_error_sched_stat_runtime:
4104	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
4105	goto out_error;
4106
4107out_error_raw_syscalls:
4108	tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
4109	goto out_error;
4110
4111out_error_mmap:
4112	evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
4113	goto out_error;
4114
4115out_error_open:
4116	evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
4117
4118out_error:
4119	fprintf(trace->output, "%s\n", errbuf);
4120	goto out_delete_evlist;
4121
4122out_error_apply_filters:
4123	fprintf(trace->output,
4124		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
4125		evsel->filter, evsel__name(evsel), errno,
4126		str_error_r(errno, errbuf, sizeof(errbuf)));
4127	goto out_delete_evlist;
4128}
4129out_error_mem:
4130	fprintf(trace->output, "Not enough memory to run!\n");
4131	goto out_delete_evlist;
4132
4133out_errno:
4134	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4135	goto out_delete_evlist;
4136}
4137
4138static int trace__replay(struct trace *trace)
4139{
4140	const struct evsel_str_handler handlers[] = {
4141		{ "probe:vfs_getname",	     trace__vfs_getname, },
4142	};
4143	struct perf_data data = {
4144		.path  = input_name,
4145		.mode  = PERF_DATA_MODE_READ,
4146		.force = trace->force,
4147	};
4148	struct perf_session *session;
4149	struct evsel *evsel;
4150	int err = -1;
4151
4152	trace->tool.sample	  = trace__process_sample;
4153	trace->tool.mmap	  = perf_event__process_mmap;
4154	trace->tool.mmap2	  = perf_event__process_mmap2;
4155	trace->tool.comm	  = perf_event__process_comm;
4156	trace->tool.exit	  = perf_event__process_exit;
4157	trace->tool.fork	  = perf_event__process_fork;
4158	trace->tool.attr	  = perf_event__process_attr;
4159	trace->tool.tracing_data  = perf_event__process_tracing_data;
4160	trace->tool.build_id	  = perf_event__process_build_id;
4161	trace->tool.namespaces	  = perf_event__process_namespaces;
4162
4163	trace->tool.ordered_events = true;
4164	trace->tool.ordering_requires_timestamps = true;
4165
4166	/* add tid to output */
4167	trace->multiple_threads = true;
4168
4169	session = perf_session__new(&data, &trace->tool);
4170	if (IS_ERR(session))
4171		return PTR_ERR(session);
4172
4173	if (trace->opts.target.pid)
4174		symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4175
4176	if (trace->opts.target.tid)
4177		symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4178
4179	if (symbol__init(&session->header.env) < 0)
4180		goto out;
4181
4182	trace->host = &session->machines.host;
4183
4184	err = perf_session__set_tracepoints_handlers(session, handlers);
4185	if (err)
4186		goto out;
4187
4188	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
4189	trace->syscalls.events.sys_enter = evsel;
4190	/* older kernels have syscalls tp versus raw_syscalls */
4191	if (evsel == NULL)
4192		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
 
4193
4194	if (evsel &&
4195	    (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
4196	    perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
4197		pr_err("Error during initialize raw_syscalls:sys_enter event\n");
4198		goto out;
4199	}
4200
4201	evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
4202	trace->syscalls.events.sys_exit = evsel;
4203	if (evsel == NULL)
4204		evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
 
4205	if (evsel &&
4206	    (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
4207	    perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
4208		pr_err("Error during initialize raw_syscalls:sys_exit event\n");
4209		goto out;
4210	}
4211
4212	evlist__for_each_entry(session->evlist, evsel) {
4213		if (evsel->core.attr.type == PERF_TYPE_SOFTWARE &&
4214		    (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
4215		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
4216		     evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS))
4217			evsel->handler = trace__pgfault;
4218	}
4219
 
 
 
 
4220	setup_pager();
4221
4222	err = perf_session__process_events(session);
4223	if (err)
4224		pr_err("Failed to process events, error %d", err);
4225
4226	else if (trace->summary)
4227		trace__fprintf_thread_summary(trace, trace->output);
4228
4229out:
4230	perf_session__delete(session);
4231
4232	return err;
4233}
4234
4235static size_t trace__fprintf_threads_header(FILE *fp)
4236{
4237	size_t printed;
4238
4239	printed  = fprintf(fp, "\n Summary of events:\n\n");
4240
4241	return printed;
4242}
4243
4244DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
4245	struct syscall_stats *stats;
4246	double		     msecs;
4247	int		     syscall;
4248)
4249{
4250	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
4251	struct syscall_stats *stats = source->priv;
4252
4253	entry->syscall = source->i;
4254	entry->stats   = stats;
4255	entry->msecs   = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0;
4256}
4257
4258static size_t thread__dump_stats(struct thread_trace *ttrace,
4259				 struct trace *trace, FILE *fp)
4260{
 
4261	size_t printed = 0;
4262	struct syscall *sc;
4263	struct rb_node *nd;
4264	DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
4265
4266	if (syscall_stats == NULL)
4267		return 0;
4268
4269	printed += fprintf(fp, "\n");
4270
4271	printed += fprintf(fp, "   syscall            calls  errors  total       min       avg       max       stddev\n");
4272	printed += fprintf(fp, "                                     (msec)    (msec)    (msec)    (msec)        (%%)\n");
4273	printed += fprintf(fp, "   --------------- --------  ------ -------- --------- --------- ---------     ------\n");
4274
4275	resort_rb__for_each_entry(nd, syscall_stats) {
4276		struct syscall_stats *stats = syscall_stats_entry->stats;
 
4277		if (stats) {
4278			double min = (double)(stats->stats.min) / NSEC_PER_MSEC;
4279			double max = (double)(stats->stats.max) / NSEC_PER_MSEC;
4280			double avg = avg_stats(&stats->stats);
4281			double pct;
4282			u64 n = (u64)stats->stats.n;
4283
4284			pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0;
4285			avg /= NSEC_PER_MSEC;
4286
4287			sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4288			printed += fprintf(fp, "   %-15s", sc->name);
4289			printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f",
4290					   n, stats->nr_failures, syscall_stats_entry->msecs, min, avg);
4291			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 
4292
4293			if (trace->errno_summary && stats->nr_failures) {
4294				const char *arch_name = perf_env__arch(trace->host->env);
4295				int e;
4296
4297				for (e = 0; e < stats->max_errno; ++e) {
4298					if (stats->errnos[e] != 0)
4299						fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
4300				}
4301			}
4302		}
4303	}
4304
4305	resort_rb__delete(syscall_stats);
4306	printed += fprintf(fp, "\n\n");
4307
4308	return printed;
4309}
4310
4311static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
 
 
 
 
 
 
 
4312{
4313	size_t printed = 0;
 
 
 
4314	struct thread_trace *ttrace = thread__priv(thread);
4315	double ratio;
4316
4317	if (ttrace == NULL)
4318		return 0;
4319
4320	ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4321
4322	printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
4323	printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
4324	printed += fprintf(fp, "%.1f%%", ratio);
4325	if (ttrace->pfmaj)
4326		printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
4327	if (ttrace->pfmin)
4328		printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
4329	if (trace->sched)
4330		printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
4331	else if (fputc('\n', fp) != EOF)
4332		++printed;
4333
4334	printed += thread__dump_stats(ttrace, trace, fp);
4335
4336	return printed;
4337}
4338
4339static unsigned long thread__nr_events(struct thread_trace *ttrace)
4340{
4341	return ttrace ? ttrace->nr_events : 0;
4342}
4343
4344DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
4345	struct thread *thread;
4346)
4347{
4348	entry->thread = rb_entry(nd, struct thread, rb_node);
4349}
4350
4351static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
4352{
4353	size_t printed = trace__fprintf_threads_header(fp);
4354	struct rb_node *nd;
4355	int i;
4356
4357	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
4358		DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
4359
4360		if (threads == NULL) {
4361			fprintf(fp, "%s", "Error sorting output by nr_events!\n");
4362			return 0;
4363		}
4364
4365		resort_rb__for_each_entry(nd, threads)
4366			printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
4367
4368		resort_rb__delete(threads);
4369	}
4370	return printed;
4371}
4372
4373static int trace__set_duration(const struct option *opt, const char *str,
4374			       int unset __maybe_unused)
4375{
4376	struct trace *trace = opt->value;
4377
4378	trace->duration_filter = atof(str);
4379	return 0;
4380}
4381
4382static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
4383					      int unset __maybe_unused)
4384{
4385	int ret = -1;
4386	size_t i;
4387	struct trace *trace = opt->value;
4388	/*
4389	 * FIXME: introduce a intarray class, plain parse csv and create a
4390	 * { int nr, int entries[] } struct...
4391	 */
4392	struct intlist *list = intlist__new(str);
4393
4394	if (list == NULL)
4395		return -1;
4396
4397	i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4398	trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4399
4400	if (trace->filter_pids.entries == NULL)
4401		goto out;
4402
4403	trace->filter_pids.entries[0] = getpid();
4404
4405	for (i = 1; i < trace->filter_pids.nr; ++i)
4406		trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4407
4408	intlist__delete(list);
4409	ret = 0;
4410out:
4411	return ret;
4412}
4413
4414static int trace__open_output(struct trace *trace, const char *filename)
4415{
4416	struct stat st;
4417
4418	if (!stat(filename, &st) && st.st_size) {
4419		char oldname[PATH_MAX];
4420
4421		scnprintf(oldname, sizeof(oldname), "%s.old", filename);
4422		unlink(oldname);
4423		rename(filename, oldname);
4424	}
4425
4426	trace->output = fopen(filename, "w");
4427
4428	return trace->output == NULL ? -errno : 0;
4429}
4430
4431static int parse_pagefaults(const struct option *opt, const char *str,
4432			    int unset __maybe_unused)
4433{
4434	int *trace_pgfaults = opt->value;
4435
4436	if (strcmp(str, "all") == 0)
4437		*trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
4438	else if (strcmp(str, "maj") == 0)
4439		*trace_pgfaults |= TRACE_PFMAJ;
4440	else if (strcmp(str, "min") == 0)
4441		*trace_pgfaults |= TRACE_PFMIN;
4442	else
4443		return -1;
4444
4445	return 0;
4446}
4447
4448static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler)
4449{
4450	struct evsel *evsel;
4451
4452	evlist__for_each_entry(evlist, evsel) {
4453		if (evsel->handler == NULL)
4454			evsel->handler = handler;
4455	}
4456}
4457
4458static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name)
4459{
4460	struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
4461
4462	if (fmt) {
4463		struct syscall_fmt *scfmt = syscall_fmt__find(name);
4464
4465		if (scfmt) {
4466			int skip = 0;
4467
4468			if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 ||
4469			    strcmp(evsel->tp_format->format.fields->name, "nr") == 0)
4470				++skip;
4471
4472			memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt));
4473		}
4474	}
4475}
4476
4477static int evlist__set_syscall_tp_fields(struct evlist *evlist)
4478{
4479	struct evsel *evsel;
4480
4481	evlist__for_each_entry(evlist, evsel) {
4482		if (evsel->priv || !evsel->tp_format)
4483			continue;
4484
4485		if (strcmp(evsel->tp_format->system, "syscalls")) {
4486			evsel__init_tp_arg_scnprintf(evsel);
4487			continue;
4488		}
4489
4490		if (evsel__init_syscall_tp(evsel))
4491			return -1;
4492
4493		if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
4494			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4495
4496			if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
4497				return -1;
4498
4499			evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1);
4500		} else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
4501			struct syscall_tp *sc = __evsel__syscall_tp(evsel);
4502
4503			if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
4504				return -1;
4505
4506			evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1);
4507		}
4508	}
4509
4510	return 0;
4511}
4512
4513/*
4514 * XXX: Hackish, just splitting the combined -e+--event (syscalls
4515 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
4516 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
4517 *
4518 * It'd be better to introduce a parse_options() variant that would return a
4519 * list with the terms it didn't match to an event...
4520 */
4521static int trace__parse_events_option(const struct option *opt, const char *str,
4522				      int unset __maybe_unused)
4523{
4524	struct trace *trace = (struct trace *)opt->value;
4525	const char *s = str;
4526	char *sep = NULL, *lists[2] = { NULL, NULL, };
4527	int len = strlen(str) + 1, err = -1, list, idx;
4528	char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
4529	char group_name[PATH_MAX];
4530	struct syscall_fmt *fmt;
4531
4532	if (strace_groups_dir == NULL)
4533		return -1;
4534
4535	if (*s == '!') {
4536		++s;
4537		trace->not_ev_qualifier = true;
4538	}
4539
4540	while (1) {
4541		if ((sep = strchr(s, ',')) != NULL)
4542			*sep = '\0';
4543
4544		list = 0;
4545		if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4546		    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4547			list = 1;
4548			goto do_concat;
4549		}
4550
4551		fmt = syscall_fmt__find_by_alias(s);
4552		if (fmt != NULL) {
4553			list = 1;
4554			s = fmt->name;
4555		} else {
4556			path__join(group_name, sizeof(group_name), strace_groups_dir, s);
4557			if (access(group_name, R_OK) == 0)
4558				list = 1;
4559		}
4560do_concat:
4561		if (lists[list]) {
4562			sprintf(lists[list] + strlen(lists[list]), ",%s", s);
4563		} else {
4564			lists[list] = malloc(len);
4565			if (lists[list] == NULL)
4566				goto out;
4567			strcpy(lists[list], s);
4568		}
4569
4570		if (!sep)
4571			break;
4572
4573		*sep = ',';
4574		s = sep + 1;
4575	}
4576
4577	if (lists[1] != NULL) {
4578		struct strlist_config slist_config = {
4579			.dirname = strace_groups_dir,
4580		};
4581
4582		trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4583		if (trace->ev_qualifier == NULL) {
4584			fputs("Not enough memory to parse event qualifier", trace->output);
4585			goto out;
4586		}
4587
4588		if (trace__validate_ev_qualifier(trace))
4589			goto out;
4590		trace->trace_syscalls = true;
4591	}
4592
4593	err = 0;
4594
4595	if (lists[0]) {
4596		struct option o = {
4597			.value = &trace->evlist,
4598		};
4599		err = parse_events_option(&o, lists[0], 0);
4600	}
4601out:
4602	free(strace_groups_dir);
4603	free(lists[0]);
4604	free(lists[1]);
4605	if (sep)
4606		*sep = ',';
4607
4608	return err;
4609}
4610
4611static int trace__parse_cgroups(const struct option *opt, const char *str, int unset)
4612{
4613	struct trace *trace = opt->value;
4614
4615	if (!list_empty(&trace->evlist->core.entries)) {
4616		struct option o = {
4617			.value = &trace->evlist,
4618		};
4619		return parse_cgroups(&o, str, unset);
4620	}
4621	trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
4622
4623	return 0;
4624}
4625
4626static int trace__config(const char *var, const char *value, void *arg)
4627{
4628	struct trace *trace = arg;
4629	int err = 0;
4630
4631	if (!strcmp(var, "trace.add_events")) {
4632		trace->perfconfig_events = strdup(value);
4633		if (trace->perfconfig_events == NULL) {
4634			pr_err("Not enough memory for %s\n", "trace.add_events");
4635			return -1;
4636		}
4637	} else if (!strcmp(var, "trace.show_timestamp")) {
4638		trace->show_tstamp = perf_config_bool(var, value);
4639	} else if (!strcmp(var, "trace.show_duration")) {
4640		trace->show_duration = perf_config_bool(var, value);
4641	} else if (!strcmp(var, "trace.show_arg_names")) {
4642		trace->show_arg_names = perf_config_bool(var, value);
4643		if (!trace->show_arg_names)
4644			trace->show_zeros = true;
4645	} else if (!strcmp(var, "trace.show_zeros")) {
4646		bool new_show_zeros = perf_config_bool(var, value);
4647		if (!trace->show_arg_names && !new_show_zeros) {
4648			pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
4649			goto out;
4650		}
4651		trace->show_zeros = new_show_zeros;
4652	} else if (!strcmp(var, "trace.show_prefix")) {
4653		trace->show_string_prefix = perf_config_bool(var, value);
4654	} else if (!strcmp(var, "trace.no_inherit")) {
4655		trace->opts.no_inherit = perf_config_bool(var, value);
4656	} else if (!strcmp(var, "trace.args_alignment")) {
4657		int args_alignment = 0;
4658		if (perf_config_int(&args_alignment, var, value) == 0)
4659			trace->args_alignment = args_alignment;
4660	} else if (!strcmp(var, "trace.tracepoint_beautifiers")) {
4661		if (strcasecmp(value, "libtraceevent") == 0)
4662			trace->libtraceevent_print = true;
4663		else if (strcasecmp(value, "libbeauty") == 0)
4664			trace->libtraceevent_print = false;
4665	}
4666out:
4667	return err;
4668}
4669
4670static void trace__exit(struct trace *trace)
4671{
4672	int i;
4673
4674	strlist__delete(trace->ev_qualifier);
4675	free(trace->ev_qualifier_ids.entries);
4676	if (trace->syscalls.table) {
4677		for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
4678			syscall__exit(&trace->syscalls.table[i]);
4679		free(trace->syscalls.table);
4680	}
4681	syscalltbl__delete(trace->sctbl);
4682	zfree(&trace->perfconfig_events);
4683}
4684
4685int cmd_trace(int argc, const char **argv)
4686{
4687	const char *trace_usage[] = {
4688		"perf trace [<options>] [<command>]",
4689		"perf trace [<options>] -- <command> [<options>]",
4690		"perf trace record [<options>] [<command>]",
4691		"perf trace record [<options>] -- <command> [<options>]",
4692		NULL
4693	};
4694	struct trace trace = {
 
 
 
 
 
 
 
4695		.opts = {
4696			.target = {
4697				.uid	   = UINT_MAX,
4698				.uses_mmap = true,
4699			},
4700			.user_freq     = UINT_MAX,
4701			.user_interval = ULLONG_MAX,
4702			.no_buffering  = true,
4703			.mmap_pages    = UINT_MAX,
 
4704		},
4705		.output = stderr,
4706		.show_comm = true,
4707		.show_tstamp = true,
4708		.show_duration = true,
4709		.show_arg_names = true,
4710		.args_alignment = 70,
4711		.trace_syscalls = false,
4712		.kernel_syscallchains = false,
4713		.max_stack = UINT_MAX,
4714		.max_events = ULONG_MAX,
4715	};
4716	const char *map_dump_str = NULL;
4717	const char *output_name = NULL;
 
4718	const struct option trace_options[] = {
4719	OPT_CALLBACK('e', "event", &trace, "event",
4720		     "event/syscall selector. use 'perf list' to list available events",
4721		     trace__parse_events_option),
4722	OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
4723		     "event filter", parse_filter),
4724	OPT_BOOLEAN(0, "comm", &trace.show_comm,
4725		    "show the thread COMM next to its id"),
4726	OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
4727	OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
4728		     trace__parse_events_option),
4729	OPT_STRING('o', "output", &output_name, "file", "output file name"),
4730	OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
4731	OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
4732		    "trace events on existing process id"),
4733	OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
4734		    "trace events on existing thread id"),
4735	OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
4736		     "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
4737	OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
4738		    "system-wide collection from all CPUs"),
4739	OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
4740		    "list of cpus to monitor"),
4741	OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
4742		    "child tasks do not inherit counters"),
4743	OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
4744		     "number of mmap data pages", evlist__parse_mmap_pages),
 
4745	OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
4746		   "user to profile"),
4747	OPT_CALLBACK(0, "duration", &trace, "float",
4748		     "show only events with duration > N.M ms",
4749		     trace__set_duration),
4750#ifdef HAVE_LIBBPF_SUPPORT
4751	OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
4752#endif
4753	OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
4754	OPT_INCR('v', "verbose", &verbose, "be more verbose"),
4755	OPT_BOOLEAN('T', "time", &trace.full_time,
4756		    "Show full timestamp, not time relative to first start"),
4757	OPT_BOOLEAN(0, "failure", &trace.failure_only,
4758		    "Show only syscalls that failed"),
4759	OPT_BOOLEAN('s', "summary", &trace.summary_only,
4760		    "Show only syscall summary with statistics"),
4761	OPT_BOOLEAN('S', "with-summary", &trace.summary,
4762		    "Show all syscalls and summary with statistics"),
4763	OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
4764		    "Show errno stats per syscall, use with -s or -S"),
4765	OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
4766		     "Trace pagefaults", parse_pagefaults, "maj"),
4767	OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
4768	OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
4769	OPT_CALLBACK(0, "call-graph", &trace.opts,
4770		     "record_mode[,record_size]", record_callchain_help,
4771		     &record_parse_callchain_opt),
4772	OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
4773		    "Use libtraceevent to print the tracepoint arguments."),
4774	OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
4775		    "Show the kernel callchains on the syscall exit path"),
4776	OPT_ULONG(0, "max-events", &trace.max_events,
4777		"Set the maximum number of events to print, exit after that is reached. "),
4778	OPT_UINTEGER(0, "min-stack", &trace.min_stack,
4779		     "Set the minimum stack depth when parsing the callchain, "
4780		     "anything below the specified depth will be ignored."),
4781	OPT_UINTEGER(0, "max-stack", &trace.max_stack,
4782		     "Set the maximum stack depth when parsing the callchain, "
4783		     "anything beyond the specified depth will be ignored. "
4784		     "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4785	OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
4786			"Sort batch of events before processing, use if getting out of order events"),
4787	OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
4788			"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
4789	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
4790			"per thread proc mmap processing timeout in ms"),
4791	OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
4792		     trace__parse_cgroups),
4793	OPT_INTEGER('D', "delay", &trace.opts.initial_delay,
4794		     "ms to wait before starting measurement after program "
4795		     "start"),
4796	OPTS_EVSWITCH(&trace.evswitch),
4797	OPT_END()
4798	};
4799	bool __maybe_unused max_stack_user_set = true;
4800	bool mmap_pages_user_set = true;
4801	struct evsel *evsel;
4802	const char * const trace_subcommands[] = { "record", NULL };
4803	int err = -1;
4804	char bf[BUFSIZ];
4805	struct sigaction sigchld_act;
4806
4807	signal(SIGSEGV, sighandler_dump_stack);
4808	signal(SIGFPE, sighandler_dump_stack);
4809	signal(SIGINT, sighandler_interrupt);
4810
4811	memset(&sigchld_act, 0, sizeof(sigchld_act));
4812	sigchld_act.sa_flags = SA_SIGINFO;
4813	sigchld_act.sa_sigaction = sighandler_chld;
4814	sigaction(SIGCHLD, &sigchld_act, NULL);
4815
4816	trace.evlist = evlist__new();
4817	trace.sctbl = syscalltbl__new();
4818
4819	if (trace.evlist == NULL || trace.sctbl == NULL) {
4820		pr_err("Not enough memory to run!\n");
4821		err = -ENOMEM;
4822		goto out;
4823	}
4824
4825	/*
4826	 * Parsing .perfconfig may entail creating a BPF event, that may need
4827	 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting
4828	 * is too small. This affects just this process, not touching the
4829	 * global setting. If it fails we'll get something in 'perf trace -v'
4830	 * to help diagnose the problem.
4831	 */
4832	rlimit__bump_memlock();
4833
4834	err = perf_config(trace__config, &trace);
4835	if (err)
4836		goto out;
4837
4838	argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
4839				 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
4840
4841	/*
4842	 * Here we already passed thru trace__parse_events_option() and it has
4843	 * already figured out if -e syscall_name, if not but if --event
4844	 * foo:bar was used, the user is interested _just_ in those, say,
4845	 * tracepoint events, not in the strace-like syscall-name-based mode.
4846	 *
4847	 * This is important because we need to check if strace-like mode is
4848	 * needed to decided if we should filter out the eBPF
4849	 * __augmented_syscalls__ code, if it is in the mix, say, via
4850	 * .perfconfig trace.add_events, and filter those out.
4851	 */
4852	if (!trace.trace_syscalls && !trace.trace_pgfaults &&
4853	    trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
4854		trace.trace_syscalls = true;
4855	}
4856	/*
4857	 * Now that we have --verbose figured out, lets see if we need to parse
4858	 * events from .perfconfig, so that if those events fail parsing, say some
4859	 * BPF program fails, then we'll be able to use --verbose to see what went
4860	 * wrong in more detail.
4861	 */
4862	if (trace.perfconfig_events != NULL) {
4863		struct parse_events_error parse_err;
4864
4865		parse_events_error__init(&parse_err);
4866		err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
4867		if (err)
4868			parse_events_error__print(&parse_err, trace.perfconfig_events);
4869		parse_events_error__exit(&parse_err);
4870		if (err)
4871			goto out;
4872	}
4873
4874	if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
4875		usage_with_options_msg(trace_usage, trace_options,
4876				       "cgroup monitoring only available in system-wide mode");
4877	}
4878
4879	evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
4880	if (IS_ERR(evsel)) {
4881		bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
4882		pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
4883		goto out;
4884	}
4885
4886	if (evsel) {
4887		trace.syscalls.events.augmented = evsel;
4888
4889		evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
4890		if (evsel == NULL) {
4891			pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
4892			goto out;
4893		}
4894
4895		if (evsel->bpf_obj == NULL) {
4896			pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n");
4897			goto out;
4898		}
4899
4900		trace.bpf_obj = evsel->bpf_obj;
4901
4902		/*
4903		 * If we have _just_ the augmenter event but don't have a
4904		 * explicit --syscalls, then assume we want all strace-like
4905		 * syscalls:
4906		 */
4907		if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace))
4908			trace.trace_syscalls = true;
4909		/*
4910		 * So, if we have a syscall augmenter, but trace_syscalls, aka
4911		 * strace-like syscall tracing is not set, then we need to trow
4912		 * away the augmenter, i.e. all the events that were created
4913		 * from that BPF object file.
4914		 *
4915		 * This is more to fix the current .perfconfig trace.add_events
4916		 * style of setting up the strace-like eBPF based syscall point
4917		 * payload augmenter.
4918		 *
4919		 * All this complexity will be avoided by adding an alternative
4920		 * to trace.add_events in the form of
4921		 * trace.bpf_augmented_syscalls, that will be only parsed if we
4922		 * need it.
4923		 *
4924		 * .perfconfig trace.add_events is still useful if we want, for
4925		 * instance, have msr_write.msr in some .perfconfig profile based
4926		 * 'perf trace --config determinism.profile' mode, where for some
4927		 * particular goal/workload type we want a set of events and
4928		 * output mode (with timings, etc) instead of having to add
4929		 * all via the command line.
4930		 *
4931		 * Also --config to specify an alternate .perfconfig file needs
4932		 * to be implemented.
4933		 */
4934		if (!trace.trace_syscalls) {
4935			trace__delete_augmented_syscalls(&trace);
4936		} else {
4937			trace__set_bpf_map_filtered_pids(&trace);
4938			trace__set_bpf_map_syscalls(&trace);
4939			trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented");
4940		}
4941	}
4942
4943	err = bpf__setup_stdout(trace.evlist);
4944	if (err) {
4945		bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
4946		pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
4947		goto out;
4948	}
4949
4950	err = -1;
4951
4952	if (map_dump_str) {
4953		trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str);
4954		if (trace.dump.map == NULL) {
4955			pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
4956			goto out;
4957		}
4958	}
4959
4960	if (trace.trace_pgfaults) {
4961		trace.opts.sample_address = true;
4962		trace.opts.sample_time = true;
4963	}
4964
4965	if (trace.opts.mmap_pages == UINT_MAX)
4966		mmap_pages_user_set = false;
4967
4968	if (trace.max_stack == UINT_MAX) {
4969		trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
4970		max_stack_user_set = false;
4971	}
4972
4973#ifdef HAVE_DWARF_UNWIND_SUPPORT
4974	if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
4975		record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
4976	}
4977#endif
4978
4979	if (callchain_param.enabled) {
4980		if (!mmap_pages_user_set && geteuid() == 0)
4981			trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
4982
4983		symbol_conf.use_callchain = true;
4984	}
4985
4986	if (trace.evlist->core.nr_entries > 0) {
4987		evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
4988		if (evlist__set_syscall_tp_fields(trace.evlist)) {
4989			perror("failed to set syscalls:* tracepoint fields");
4990			goto out;
4991		}
4992	}
4993
4994	if (trace.sort_events) {
4995		ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
4996		ordered_events__set_copy_on_queue(&trace.oe.data, true);
4997	}
4998
4999	/*
5000	 * If we are augmenting syscalls, then combine what we put in the
5001	 * __augmented_syscalls__ BPF map with what is in the
5002	 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF,
5003	 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit.
5004	 *
5005	 * We'll switch to look at two BPF maps, one for sys_enter and the
5006	 * other for sys_exit when we start augmenting the sys_exit paths with
5007	 * buffers that are being copied from kernel to userspace, think 'read'
5008	 * syscall.
5009	 */
5010	if (trace.syscalls.events.augmented) {
5011		evlist__for_each_entry(trace.evlist, evsel) {
5012			bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
5013
5014			if (raw_syscalls_sys_exit) {
5015				trace.raw_augmented_syscalls = true;
5016				goto init_augmented_syscall_tp;
5017			}
5018
5019			if (trace.syscalls.events.augmented->priv == NULL &&
5020			    strstr(evsel__name(evsel), "syscalls:sys_enter")) {
5021				struct evsel *augmented = trace.syscalls.events.augmented;
5022				if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
5023				    evsel__init_augmented_syscall_tp_args(augmented))
5024					goto out;
5025				/*
5026				 * Augmented is __augmented_syscalls__ BPF_OUTPUT event
5027				 * Above we made sure we can get from the payload the tp fields
5028				 * that we get from syscalls:sys_enter tracefs format file.
5029				 */
5030				augmented->handler = trace__sys_enter;
5031				/*
5032				 * Now we do the same for the *syscalls:sys_enter event so that
5033				 * if we handle it directly, i.e. if the BPF prog returns 0 so
5034				 * as not to filter it, then we'll handle it just like we would
5035				 * for the BPF_OUTPUT one:
5036				 */
5037				if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
5038				    evsel__init_augmented_syscall_tp_args(evsel))
5039					goto out;
5040				evsel->handler = trace__sys_enter;
5041			}
5042
5043			if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
5044				struct syscall_tp *sc;
5045init_augmented_syscall_tp:
5046				if (evsel__init_augmented_syscall_tp(evsel, evsel))
5047					goto out;
5048				sc = __evsel__syscall_tp(evsel);
5049				/*
5050				 * For now with BPF raw_augmented we hook into
5051				 * raw_syscalls:sys_enter and there we get all
5052				 * 6 syscall args plus the tracepoint common
5053				 * fields and the syscall_nr (another long).
5054				 * So we check if that is the case and if so
5055				 * don't look after the sc->args_size but
5056				 * always after the full raw_syscalls:sys_enter
5057				 * payload, which is fixed.
5058				 *
5059				 * We'll revisit this later to pass
5060				 * s->args_size to the BPF augmenter (now
5061				 * tools/perf/examples/bpf/augmented_raw_syscalls.c,
5062				 * so that it copies only what we need for each
5063				 * syscall, like what happens when we use
5064				 * syscalls:sys_enter_NAME, so that we reduce
5065				 * the kernel/userspace traffic to just what is
5066				 * needed for each syscall.
5067				 */
5068				if (trace.raw_augmented_syscalls)
5069					trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5070				evsel__init_augmented_syscall_tp_ret(evsel);
5071				evsel->handler = trace__sys_exit;
5072			}
5073		}
5074	}
5075
5076	if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
5077		return trace__record(&trace, argc-1, &argv[1]);
5078
5079	/* Using just --errno-summary will trigger --summary */
5080	if (trace.errno_summary && !trace.summary && !trace.summary_only)
5081		trace.summary_only = true;
5082
5083	/* summary_only implies summary option, but don't overwrite summary if set */
5084	if (trace.summary_only)
5085		trace.summary = trace.summary_only;
5086
 
 
 
 
 
 
5087	if (output_name != NULL) {
5088		err = trace__open_output(&trace, output_name);
5089		if (err < 0) {
5090			perror("failed to create output file");
5091			goto out;
5092		}
5093	}
5094
5095	err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5096	if (err)
5097		goto out_close;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5098
5099	err = target__validate(&trace.opts.target);
5100	if (err) {
5101		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5102		fprintf(trace.output, "%s", bf);
5103		goto out_close;
5104	}
5105
5106	err = target__parse_uid(&trace.opts.target);
5107	if (err) {
5108		target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5109		fprintf(trace.output, "%s", bf);
5110		goto out_close;
5111	}
5112
5113	if (!argc && target__none(&trace.opts.target))
5114		trace.opts.target.system_wide = true;
5115
5116	if (input_name)
5117		err = trace__replay(&trace);
5118	else
5119		err = trace__run(&trace, argc, argv);
5120
5121out_close:
5122	if (output_name != NULL)
5123		fclose(trace.output);
5124out:
5125	trace__exit(&trace);
5126	return err;
5127}