Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-record.c
   4 *
   5 * Builtin record command: Record the profile of a workload
   6 * (or a CPU, or a PID) into the perf.data output file - for
   7 * later analysis via perf report.
   8 */
   9#include "builtin.h"
  10
  11#include "util/build-id.h"
  12#include <subcmd/parse-options.h>
 
  13#include "util/parse-events.h"
  14#include "util/config.h"
  15
  16#include "util/callchain.h"
  17#include "util/cgroup.h"
  18#include "util/header.h"
  19#include "util/event.h"
  20#include "util/evlist.h"
  21#include "util/evsel.h"
  22#include "util/debug.h"
  23#include "util/mmap.h"
 
  24#include "util/target.h"
  25#include "util/session.h"
  26#include "util/tool.h"
  27#include "util/symbol.h"
  28#include "util/record.h"
  29#include "util/cpumap.h"
  30#include "util/thread_map.h"
  31#include "util/data.h"
  32#include "util/perf_regs.h"
  33#include "util/auxtrace.h"
  34#include "util/tsc.h"
  35#include "util/parse-branch-options.h"
  36#include "util/parse-regs-options.h"
  37#include "util/llvm-utils.h"
  38#include "util/bpf-loader.h"
  39#include "util/trigger.h"
  40#include "util/perf-hooks.h"
  41#include "util/cpu-set-sched.h"
  42#include "util/synthetic-events.h"
  43#include "util/time-utils.h"
  44#include "util/units.h"
  45#include "util/bpf-event.h"
 
 
 
 
 
 
 
  46#include "asm/bug.h"
  47#include "perf.h"
 
  48
  49#include <errno.h>
  50#include <inttypes.h>
  51#include <locale.h>
  52#include <poll.h>
 
  53#include <unistd.h>
 
 
 
  54#include <sched.h>
  55#include <signal.h>
 
 
 
  56#include <sys/mman.h>
  57#include <sys/wait.h>
 
 
 
  58#include <linux/err.h>
  59#include <linux/string.h>
  60#include <linux/time64.h>
  61#include <linux/zalloc.h>
 
 
  62
  63struct switch_output {
  64	bool		 enabled;
  65	bool		 signal;
  66	unsigned long	 size;
  67	unsigned long	 time;
  68	const char	*str;
  69	bool		 set;
  70	char		 **filenames;
  71	int		 num_files;
  72	int		 cur_file;
  73};
  74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  75struct record {
  76	struct perf_tool	tool;
  77	struct record_opts	opts;
  78	u64			bytes_written;
 
  79	struct perf_data	data;
  80	struct auxtrace_record	*itr;
  81	struct evlist	*evlist;
  82	struct perf_session	*session;
 
 
  83	int			realtime_prio;
 
  84	bool			no_buildid;
  85	bool			no_buildid_set;
  86	bool			no_buildid_cache;
  87	bool			no_buildid_cache_set;
  88	bool			buildid_all;
 
  89	bool			timestamp_filename;
  90	bool			timestamp_boundary;
 
  91	struct switch_output	switch_output;
  92	unsigned long long	samples;
  93	cpu_set_t		affinity_mask;
 
 
 
 
 
 
 
  94};
  95
 
 
  96static volatile int auxtrace_record__snapshot_started;
  97static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
  98static DEFINE_TRIGGER(switch_output_trigger);
  99
 100static const char *affinity_tags[PERF_AFFINITY_MAX] = {
 101	"SYS", "NODE", "CPU"
 102};
 103
 
 
 
 
 
 
 
 
 
 
 
 
 104static bool switch_output_signal(struct record *rec)
 105{
 106	return rec->switch_output.signal &&
 107	       trigger_is_ready(&switch_output_trigger);
 108}
 109
 110static bool switch_output_size(struct record *rec)
 111{
 112	return rec->switch_output.size &&
 113	       trigger_is_ready(&switch_output_trigger) &&
 114	       (rec->bytes_written >= rec->switch_output.size);
 115}
 116
 117static bool switch_output_time(struct record *rec)
 118{
 119	return rec->switch_output.time &&
 120	       trigger_is_ready(&switch_output_trigger);
 121}
 122
 
 
 
 
 
 
 
 
 
 
 
 123static int record__write(struct record *rec, struct mmap *map __maybe_unused,
 124			 void *bf, size_t size)
 125{
 126	struct perf_data_file *file = &rec->session->data->file;
 127
 
 
 
 128	if (perf_data_file__write(file, bf, size) < 0) {
 129		pr_err("failed to write perf data, error: %m\n");
 130		return -1;
 131	}
 132
 133	rec->bytes_written += size;
 
 
 
 
 
 
 
 
 
 
 
 
 134
 135	if (switch_output_size(rec))
 136		trigger_hit(&switch_output_trigger);
 137
 138	return 0;
 139}
 140
 141static int record__aio_enabled(struct record *rec);
 142static int record__comp_enabled(struct record *rec);
 143static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
 144			    void *src, size_t src_size);
 145
 146#ifdef HAVE_AIO_SUPPORT
 147static int record__aio_write(struct aiocb *cblock, int trace_fd,
 148		void *buf, size_t size, off_t off)
 149{
 150	int rc;
 151
 152	cblock->aio_fildes = trace_fd;
 153	cblock->aio_buf    = buf;
 154	cblock->aio_nbytes = size;
 155	cblock->aio_offset = off;
 156	cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
 157
 158	do {
 159		rc = aio_write(cblock);
 160		if (rc == 0) {
 161			break;
 162		} else if (errno != EAGAIN) {
 163			cblock->aio_fildes = -1;
 164			pr_err("failed to queue perf data, error: %m\n");
 165			break;
 166		}
 167	} while (1);
 168
 169	return rc;
 170}
 171
 172static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
 173{
 174	void *rem_buf;
 175	off_t rem_off;
 176	size_t rem_size;
 177	int rc, aio_errno;
 178	ssize_t aio_ret, written;
 179
 180	aio_errno = aio_error(cblock);
 181	if (aio_errno == EINPROGRESS)
 182		return 0;
 183
 184	written = aio_ret = aio_return(cblock);
 185	if (aio_ret < 0) {
 186		if (aio_errno != EINTR)
 187			pr_err("failed to write perf data, error: %m\n");
 188		written = 0;
 189	}
 190
 191	rem_size = cblock->aio_nbytes - written;
 192
 193	if (rem_size == 0) {
 194		cblock->aio_fildes = -1;
 195		/*
 196		 * md->refcount is incremented in record__aio_pushfn() for
 197		 * every aio write request started in record__aio_push() so
 198		 * decrement it because the request is now complete.
 199		 */
 200		perf_mmap__put(md);
 201		rc = 1;
 202	} else {
 203		/*
 204		 * aio write request may require restart with the
 205		 * reminder if the kernel didn't write whole
 206		 * chunk at once.
 207		 */
 208		rem_off = cblock->aio_offset + written;
 209		rem_buf = (void *)(cblock->aio_buf + written);
 210		record__aio_write(cblock, cblock->aio_fildes,
 211				rem_buf, rem_size, rem_off);
 212		rc = 0;
 213	}
 214
 215	return rc;
 216}
 217
 218static int record__aio_sync(struct mmap *md, bool sync_all)
 219{
 220	struct aiocb **aiocb = md->aio.aiocb;
 221	struct aiocb *cblocks = md->aio.cblocks;
 222	struct timespec timeout = { 0, 1000 * 1000  * 1 }; /* 1ms */
 223	int i, do_suspend;
 224
 225	do {
 226		do_suspend = 0;
 227		for (i = 0; i < md->aio.nr_cblocks; ++i) {
 228			if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
 229				if (sync_all)
 230					aiocb[i] = NULL;
 231				else
 232					return i;
 233			} else {
 234				/*
 235				 * Started aio write is not complete yet
 236				 * so it has to be waited before the
 237				 * next allocation.
 238				 */
 239				aiocb[i] = &cblocks[i];
 240				do_suspend = 1;
 241			}
 242		}
 243		if (!do_suspend)
 244			return -1;
 245
 246		while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
 247			if (!(errno == EAGAIN || errno == EINTR))
 248				pr_err("failed to sync perf data, error: %m\n");
 249		}
 250	} while (1);
 251}
 252
 253struct record_aio {
 254	struct record	*rec;
 255	void		*data;
 256	size_t		size;
 257};
 258
 259static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
 260{
 261	struct record_aio *aio = to;
 262
 263	/*
 264	 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
 265	 * to release space in the kernel buffer as fast as possible, calling
 266	 * perf_mmap__consume() from perf_mmap__push() function.
 267	 *
 268	 * That lets the kernel to proceed with storing more profiling data into
 269	 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
 270	 *
 271	 * Coping can be done in two steps in case the chunk of profiling data
 272	 * crosses the upper bound of the kernel buffer. In this case we first move
 273	 * part of data from map->start till the upper bound and then the reminder
 274	 * from the beginning of the kernel buffer till the end of the data chunk.
 275	 */
 276
 277	if (record__comp_enabled(aio->rec)) {
 278		size = zstd_compress(aio->rec->session, aio->data + aio->size,
 279				     perf_mmap__mmap_len(map) - aio->size,
 280				     buf, size);
 
 
 
 
 281	} else {
 282		memcpy(aio->data + aio->size, buf, size);
 283	}
 284
 285	if (!aio->size) {
 286		/*
 287		 * Increment map->refcount to guard map->aio.data[] buffer
 288		 * from premature deallocation because map object can be
 289		 * released earlier than aio write request started on
 290		 * map->aio.data[] buffer is complete.
 291		 *
 292		 * perf_mmap__put() is done at record__aio_complete()
 293		 * after started aio request completion or at record__aio_push()
 294		 * if the request failed to start.
 295		 */
 296		perf_mmap__get(map);
 297	}
 298
 299	aio->size += size;
 300
 301	return size;
 302}
 303
 304static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
 305{
 306	int ret, idx;
 307	int trace_fd = rec->session->data->file.fd;
 308	struct record_aio aio = { .rec = rec, .size = 0 };
 309
 310	/*
 311	 * Call record__aio_sync() to wait till map->aio.data[] buffer
 312	 * becomes available after previous aio write operation.
 313	 */
 314
 315	idx = record__aio_sync(map, false);
 316	aio.data = map->aio.data[idx];
 317	ret = perf_mmap__push(map, &aio, record__aio_pushfn);
 318	if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
 319		return ret;
 320
 321	rec->samples++;
 322	ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
 323	if (!ret) {
 324		*off += aio.size;
 325		rec->bytes_written += aio.size;
 326		if (switch_output_size(rec))
 327			trigger_hit(&switch_output_trigger);
 328	} else {
 329		/*
 330		 * Decrement map->refcount incremented in record__aio_pushfn()
 331		 * back if record__aio_write() operation failed to start, otherwise
 332		 * map->refcount is decremented in record__aio_complete() after
 333		 * aio write operation finishes successfully.
 334		 */
 335		perf_mmap__put(map);
 336	}
 337
 338	return ret;
 339}
 340
 341static off_t record__aio_get_pos(int trace_fd)
 342{
 343	return lseek(trace_fd, 0, SEEK_CUR);
 344}
 345
 346static void record__aio_set_pos(int trace_fd, off_t pos)
 347{
 348	lseek(trace_fd, pos, SEEK_SET);
 349}
 350
 351static void record__aio_mmap_read_sync(struct record *rec)
 352{
 353	int i;
 354	struct evlist *evlist = rec->evlist;
 355	struct mmap *maps = evlist->mmap;
 356
 357	if (!record__aio_enabled(rec))
 358		return;
 359
 360	for (i = 0; i < evlist->core.nr_mmaps; i++) {
 361		struct mmap *map = &maps[i];
 362
 363		if (map->core.base)
 364			record__aio_sync(map, true);
 365	}
 366}
 367
 368static int nr_cblocks_default = 1;
 369static int nr_cblocks_max = 4;
 370
 371static int record__aio_parse(const struct option *opt,
 372			     const char *str,
 373			     int unset)
 374{
 375	struct record_opts *opts = (struct record_opts *)opt->value;
 376
 377	if (unset) {
 378		opts->nr_cblocks = 0;
 379	} else {
 380		if (str)
 381			opts->nr_cblocks = strtol(str, NULL, 0);
 382		if (!opts->nr_cblocks)
 383			opts->nr_cblocks = nr_cblocks_default;
 384	}
 385
 386	return 0;
 387}
 388#else /* HAVE_AIO_SUPPORT */
 389static int nr_cblocks_max = 0;
 390
 391static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
 392			    off_t *off __maybe_unused)
 393{
 394	return -1;
 395}
 396
 397static off_t record__aio_get_pos(int trace_fd __maybe_unused)
 398{
 399	return -1;
 400}
 401
 402static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
 403{
 404}
 405
 406static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
 407{
 408}
 409#endif
 410
 411static int record__aio_enabled(struct record *rec)
 412{
 413	return rec->opts.nr_cblocks > 0;
 414}
 415
 416#define MMAP_FLUSH_DEFAULT 1
 417static int record__mmap_flush_parse(const struct option *opt,
 418				    const char *str,
 419				    int unset)
 420{
 421	int flush_max;
 422	struct record_opts *opts = (struct record_opts *)opt->value;
 423	static struct parse_tag tags[] = {
 424			{ .tag  = 'B', .mult = 1       },
 425			{ .tag  = 'K', .mult = 1 << 10 },
 426			{ .tag  = 'M', .mult = 1 << 20 },
 427			{ .tag  = 'G', .mult = 1 << 30 },
 428			{ .tag  = 0 },
 429	};
 430
 431	if (unset)
 432		return 0;
 433
 434	if (str) {
 435		opts->mmap_flush = parse_tag_value(str, tags);
 436		if (opts->mmap_flush == (int)-1)
 437			opts->mmap_flush = strtol(str, NULL, 0);
 438	}
 439
 440	if (!opts->mmap_flush)
 441		opts->mmap_flush = MMAP_FLUSH_DEFAULT;
 442
 443	flush_max = evlist__mmap_size(opts->mmap_pages);
 444	flush_max /= 4;
 445	if (opts->mmap_flush > flush_max)
 446		opts->mmap_flush = flush_max;
 447
 448	return 0;
 449}
 450
 451#ifdef HAVE_ZSTD_SUPPORT
 452static unsigned int comp_level_default = 1;
 453
 454static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
 455{
 456	struct record_opts *opts = opt->value;
 457
 458	if (unset) {
 459		opts->comp_level = 0;
 460	} else {
 461		if (str)
 462			opts->comp_level = strtol(str, NULL, 0);
 463		if (!opts->comp_level)
 464			opts->comp_level = comp_level_default;
 465	}
 466
 467	return 0;
 468}
 469#endif
 470static unsigned int comp_level_max = 22;
 471
 472static int record__comp_enabled(struct record *rec)
 473{
 474	return rec->opts.comp_level > 0;
 475}
 476
 477static int process_synthesized_event(struct perf_tool *tool,
 478				     union perf_event *event,
 479				     struct perf_sample *sample __maybe_unused,
 480				     struct machine *machine __maybe_unused)
 481{
 482	struct record *rec = container_of(tool, struct record, tool);
 483	return record__write(rec, NULL, event, event->header.size);
 484}
 485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
 487{
 488	struct record *rec = to;
 489
 490	if (record__comp_enabled(rec)) {
 491		size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
 
 
 
 
 
 
 492		bf   = map->data;
 493	}
 494
 495	rec->samples++;
 496	return record__write(rec, map, bf, size);
 497}
 498
 499static volatile int done;
 500static volatile int signr = -1;
 501static volatile int child_finished;
 
 
 502
 503static void sig_handler(int sig)
 504{
 505	if (sig == SIGCHLD)
 506		child_finished = 1;
 507	else
 508		signr = sig;
 509
 510	done = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511}
 512
 513static void sigsegv_handler(int sig)
 514{
 515	perf_hooks__recover();
 516	sighandler_dump_stack(sig);
 517}
 518
 519static void record__sig_exit(void)
 520{
 521	if (signr == -1)
 522		return;
 523
 524	signal(signr, SIG_DFL);
 525	raise(signr);
 526}
 527
 528#ifdef HAVE_AUXTRACE_SUPPORT
 529
 530static int record__process_auxtrace(struct perf_tool *tool,
 531				    struct mmap *map,
 532				    union perf_event *event, void *data1,
 533				    size_t len1, void *data2, size_t len2)
 534{
 535	struct record *rec = container_of(tool, struct record, tool);
 536	struct perf_data *data = &rec->data;
 537	size_t padding;
 538	u8 pad[8] = {0};
 539
 540	if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
 541		off_t file_offset;
 542		int fd = perf_data__fd(data);
 543		int err;
 544
 545		file_offset = lseek(fd, 0, SEEK_CUR);
 546		if (file_offset == -1)
 547			return -1;
 548		err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
 549						     event, file_offset);
 550		if (err)
 551			return err;
 552	}
 553
 554	/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
 555	padding = (len1 + len2) & 7;
 556	if (padding)
 557		padding = 8 - padding;
 558
 559	record__write(rec, map, event, event->header.size);
 560	record__write(rec, map, data1, len1);
 561	if (len2)
 562		record__write(rec, map, data2, len2);
 563	record__write(rec, map, &pad, padding);
 564
 565	return 0;
 566}
 567
 568static int record__auxtrace_mmap_read(struct record *rec,
 569				      struct mmap *map)
 570{
 571	int ret;
 572
 573	ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
 574				  record__process_auxtrace);
 575	if (ret < 0)
 576		return ret;
 577
 578	if (ret)
 579		rec->samples++;
 580
 581	return 0;
 582}
 583
 584static int record__auxtrace_mmap_read_snapshot(struct record *rec,
 585					       struct mmap *map)
 586{
 587	int ret;
 588
 589	ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
 590					   record__process_auxtrace,
 591					   rec->opts.auxtrace_snapshot_size);
 592	if (ret < 0)
 593		return ret;
 594
 595	if (ret)
 596		rec->samples++;
 597
 598	return 0;
 599}
 600
 601static int record__auxtrace_read_snapshot_all(struct record *rec)
 602{
 603	int i;
 604	int rc = 0;
 605
 606	for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
 607		struct mmap *map = &rec->evlist->mmap[i];
 608
 609		if (!map->auxtrace_mmap.base)
 610			continue;
 611
 612		if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
 613			rc = -1;
 614			goto out;
 615		}
 616	}
 617out:
 618	return rc;
 619}
 620
 621static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
 622{
 623	pr_debug("Recording AUX area tracing snapshot\n");
 624	if (record__auxtrace_read_snapshot_all(rec) < 0) {
 625		trigger_error(&auxtrace_snapshot_trigger);
 626	} else {
 627		if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
 628			trigger_error(&auxtrace_snapshot_trigger);
 629		else
 630			trigger_ready(&auxtrace_snapshot_trigger);
 631	}
 632}
 633
 634static int record__auxtrace_snapshot_exit(struct record *rec)
 635{
 636	if (trigger_is_error(&auxtrace_snapshot_trigger))
 637		return 0;
 638
 639	if (!auxtrace_record__snapshot_started &&
 640	    auxtrace_record__snapshot_start(rec->itr))
 641		return -1;
 642
 643	record__read_auxtrace_snapshot(rec, true);
 644	if (trigger_is_error(&auxtrace_snapshot_trigger))
 645		return -1;
 646
 647	return 0;
 648}
 649
 650static int record__auxtrace_init(struct record *rec)
 651{
 652	int err;
 653
 
 
 
 
 
 
 654	if (!rec->itr) {
 655		rec->itr = auxtrace_record__init(rec->evlist, &err);
 656		if (err)
 657			return err;
 658	}
 659
 660	err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
 661					      rec->opts.auxtrace_snapshot_opts);
 662	if (err)
 663		return err;
 664
 
 
 
 
 
 
 
 665	return auxtrace_parse_filters(rec->evlist);
 666}
 667
 668#else
 669
 670static inline
 671int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
 672			       struct mmap *map __maybe_unused)
 673{
 674	return 0;
 675}
 676
 677static inline
 678void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
 679				    bool on_exit __maybe_unused)
 680{
 681}
 682
 683static inline
 684int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 685{
 686	return 0;
 687}
 688
 689static inline
 690int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
 691{
 692	return 0;
 693}
 694
 695static int record__auxtrace_init(struct record *rec __maybe_unused)
 696{
 697	return 0;
 698}
 699
 700#endif
 701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702static int record__mmap_evlist(struct record *rec,
 703			       struct evlist *evlist)
 704{
 
 705	struct record_opts *opts = &rec->opts;
 
 
 706	char msg[512];
 707
 708	if (opts->affinity != PERF_AFFINITY_SYS)
 709		cpu__setup_cpunode_map();
 710
 711	if (evlist__mmap_ex(evlist, opts->mmap_pages,
 712				 opts->auxtrace_mmap_pages,
 713				 opts->auxtrace_snapshot_mode,
 714				 opts->nr_cblocks, opts->affinity,
 715				 opts->mmap_flush, opts->comp_level) < 0) {
 716		if (errno == EPERM) {
 717			pr_err("Permission error mapping pages.\n"
 718			       "Consider increasing "
 719			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
 720			       "or try again with a smaller value of -m/--mmap_pages.\n"
 721			       "(current value: %u,%u)\n",
 722			       opts->mmap_pages, opts->auxtrace_mmap_pages);
 723			return -errno;
 724		} else {
 725			pr_err("failed to mmap with %d (%s)\n", errno,
 726				str_error_r(errno, msg, sizeof(msg)));
 727			if (errno)
 728				return -errno;
 729			else
 730				return -EINVAL;
 731		}
 732	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733	return 0;
 734}
 735
 736static int record__mmap(struct record *rec)
 737{
 738	return record__mmap_evlist(rec, rec->evlist);
 739}
 740
 741static int record__open(struct record *rec)
 742{
 743	char msg[BUFSIZ];
 744	struct evsel *pos;
 745	struct evlist *evlist = rec->evlist;
 746	struct perf_session *session = rec->session;
 747	struct record_opts *opts = &rec->opts;
 748	int rc = 0;
 749
 750	/*
 751	 * For initial_delay we need to add a dummy event so that we can track
 752	 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
 753	 * real events, the ones asked by the user.
 754	 */
 755	if (opts->initial_delay) {
 756		if (perf_evlist__add_dummy(evlist))
 757			return -ENOMEM;
 758
 759		pos = evlist__first(evlist);
 760		pos->tracking = 0;
 761		pos = evlist__last(evlist);
 762		pos->tracking = 1;
 763		pos->core.attr.enable_on_exec = 1;
 764	}
 765
 766	perf_evlist__config(evlist, opts, &callchain_param);
 767
 768	evlist__for_each_entry(evlist, pos) {
 769try_again:
 770		if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
 771			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 772				if (verbose > 0)
 773					ui__warning("%s\n", msg);
 774				goto try_again;
 775			}
 776			if ((errno == EINVAL || errno == EBADF) &&
 777			    pos->leader != pos &&
 778			    pos->weak_group) {
 779			        pos = perf_evlist__reset_weak_group(evlist, pos);
 780				goto try_again;
 781			}
 782			rc = -errno;
 783			perf_evsel__open_strerror(pos, &opts->target,
 784						  errno, msg, sizeof(msg));
 785			ui__error("%s\n", msg);
 786			goto out;
 787		}
 788
 789		pos->supported = true;
 790	}
 791
 792	if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
 793		pr_warning(
 794"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
 795"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
 796"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
 797"file is not found in the buildid cache or in the vmlinux path.\n\n"
 798"Samples in kernel modules won't be resolved at all.\n\n"
 799"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
 800"even with a suitable vmlinux or kallsyms file.\n\n");
 801	}
 802
 803	if (perf_evlist__apply_filters(evlist, &pos)) {
 804		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
 805			pos->filter, perf_evsel__name(pos), errno,
 806			str_error_r(errno, msg, sizeof(msg)));
 807		rc = -1;
 808		goto out;
 809	}
 810
 811	rc = record__mmap(rec);
 812	if (rc)
 813		goto out;
 814
 815	session->evlist = evlist;
 816	perf_session__set_id_hdr_size(session);
 817out:
 818	return rc;
 819}
 820
 
 
 
 
 
 
 
 
 
 821static int process_sample_event(struct perf_tool *tool,
 822				union perf_event *event,
 823				struct perf_sample *sample,
 824				struct evsel *evsel,
 825				struct machine *machine)
 826{
 827	struct record *rec = container_of(tool, struct record, tool);
 828
 829	if (rec->evlist->first_sample_time == 0)
 830		rec->evlist->first_sample_time = sample->time;
 831
 832	rec->evlist->last_sample_time = sample->time;
 833
 834	if (rec->buildid_all)
 835		return 0;
 836
 837	rec->samples++;
 838	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 839}
 840
 841static int process_buildids(struct record *rec)
 842{
 843	struct perf_session *session = rec->session;
 844
 845	if (perf_data__size(&rec->data) == 0)
 846		return 0;
 847
 848	/*
 849	 * During this process, it'll load kernel map and replace the
 850	 * dso->long_name to a real pathname it found.  In this case
 851	 * we prefer the vmlinux path like
 852	 *   /lib/modules/3.16.4/build/vmlinux
 853	 *
 854	 * rather than build-id path (in debug directory).
 855	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
 856	 */
 857	symbol_conf.ignore_vmlinux_buildid = true;
 858
 859	/*
 860	 * If --buildid-all is given, it marks all DSO regardless of hits,
 861	 * so no need to process samples. But if timestamp_boundary is enabled,
 862	 * it still needs to walk on all samples to get the timestamps of
 863	 * first/last samples.
 864	 */
 865	if (rec->buildid_all && !rec->timestamp_boundary)
 866		rec->tool.sample = NULL;
 867
 868	return perf_session__process_events(session);
 869}
 870
 871static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 872{
 873	int err;
 874	struct perf_tool *tool = data;
 875	/*
 876	 *As for guest kernel when processing subcommand record&report,
 877	 *we arrange module mmap prior to guest kernel mmap and trigger
 878	 *a preload dso because default guest module symbols are loaded
 879	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
 880	 *method is used to avoid symbol missing when the first addr is
 881	 *in module instead of in guest kernel.
 882	 */
 883	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 884					     machine);
 885	if (err < 0)
 886		pr_err("Couldn't record guest kernel [%d]'s reference"
 887		       " relocation symbol.\n", machine->pid);
 888
 889	/*
 890	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
 891	 * have no _text sometimes.
 892	 */
 893	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 894						 machine);
 895	if (err < 0)
 896		pr_err("Couldn't record guest kernel [%d]'s reference"
 897		       " relocation symbol.\n", machine->pid);
 898}
 899
 900static struct perf_event_header finished_round_event = {
 901	.size = sizeof(struct perf_event_header),
 902	.type = PERF_RECORD_FINISHED_ROUND,
 903};
 904
 
 
 
 
 
 905static void record__adjust_affinity(struct record *rec, struct mmap *map)
 906{
 907	if (rec->opts.affinity != PERF_AFFINITY_SYS &&
 908	    !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
 909		CPU_ZERO(&rec->affinity_mask);
 910		CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
 911		sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
 
 
 
 
 
 
 
 912	}
 913}
 914
 915static size_t process_comp_header(void *record, size_t increment)
 916{
 917	struct perf_record_compressed *event = record;
 918	size_t size = sizeof(*event);
 919
 920	if (increment) {
 921		event->header.size += increment;
 922		return increment;
 923	}
 924
 925	event->header.type = PERF_RECORD_COMPRESSED;
 926	event->header.size = size;
 927
 928	return size;
 929}
 930
 931static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
 932			    void *src, size_t src_size)
 933{
 934	size_t compressed;
 935	size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
 
 
 
 
 936
 937	compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
 938						     max_record_size, process_comp_header);
 
 
 939
 940	session->bytes_transferred += src_size;
 941	session->bytes_compressed  += compressed;
 
 
 
 
 
 942
 943	return compressed;
 944}
 945
 946static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
 947				    bool overwrite, bool synch)
 948{
 949	u64 bytes_written = rec->bytes_written;
 950	int i;
 951	int rc = 0;
 952	struct mmap *maps;
 
 953	int trace_fd = rec->data.file.fd;
 954	off_t off = 0;
 955
 956	if (!evlist)
 957		return 0;
 958
 959	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
 
 
 960	if (!maps)
 961		return 0;
 962
 963	if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
 964		return 0;
 965
 966	if (record__aio_enabled(rec))
 967		off = record__aio_get_pos(trace_fd);
 968
 969	for (i = 0; i < evlist->core.nr_mmaps; i++) {
 970		u64 flush = 0;
 971		struct mmap *map = &maps[i];
 972
 973		if (map->core.base) {
 974			record__adjust_affinity(rec, map);
 975			if (synch) {
 976				flush = map->core.flush;
 977				map->core.flush = 1;
 978			}
 979			if (!record__aio_enabled(rec)) {
 980				if (perf_mmap__push(map, rec, record__pushfn) < 0) {
 981					if (synch)
 982						map->core.flush = flush;
 983					rc = -1;
 984					goto out;
 985				}
 986			} else {
 987				if (record__aio_push(rec, map, &off) < 0) {
 988					record__aio_set_pos(trace_fd, off);
 989					if (synch)
 990						map->core.flush = flush;
 991					rc = -1;
 992					goto out;
 993				}
 994			}
 995			if (synch)
 996				map->core.flush = flush;
 997		}
 998
 999		if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
 
1000		    record__auxtrace_mmap_read(rec, map) != 0) {
1001			rc = -1;
1002			goto out;
1003		}
1004	}
1005
1006	if (record__aio_enabled(rec))
1007		record__aio_set_pos(trace_fd, off);
1008
1009	/*
1010	 * Mark the round finished in case we wrote
1011	 * at least one event.
 
 
 
 
1012	 */
1013	if (bytes_written != rec->bytes_written)
1014		rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1015
1016	if (overwrite)
1017		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
1018out:
1019	return rc;
1020}
1021
1022static int record__mmap_read_all(struct record *rec, bool synch)
1023{
1024	int err;
1025
1026	err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1027	if (err)
1028		return err;
1029
1030	return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1031}
1032
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033static void record__init_features(struct record *rec)
1034{
1035	struct perf_session *session = rec->session;
1036	int feat;
1037
1038	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1039		perf_header__set_feat(&session->header, feat);
1040
1041	if (rec->no_buildid)
1042		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1043
 
1044	if (!have_tracepoints(&rec->evlist->core.entries))
1045		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
 
1046
1047	if (!rec->opts.branch_stack)
1048		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1049
1050	if (!rec->opts.full_auxtrace)
1051		perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1052
1053	if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1054		perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1055
1056	perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
 
 
 
 
 
1057	if (!record__comp_enabled(rec))
1058		perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
1059
1060	perf_header__clear_feat(&session->header, HEADER_STAT);
1061}
1062
1063static void
1064record__finish_output(struct record *rec)
1065{
 
1066	struct perf_data *data = &rec->data;
1067	int fd = perf_data__fd(data);
1068
1069	if (data->is_pipe)
1070		return;
1071
1072	rec->session->header.data_size += rec->bytes_written;
1073	data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
 
 
 
 
1074
1075	if (!rec->no_buildid) {
1076		process_buildids(rec);
1077
1078		if (rec->buildid_all)
1079			dsos__hit_all(rec->session);
1080	}
1081	perf_session__write_header(rec->session, rec->evlist, fd, true);
1082
1083	return;
1084}
1085
1086static int record__synthesize_workload(struct record *rec, bool tail)
1087{
1088	int err;
1089	struct perf_thread_map *thread_map;
 
1090
1091	if (rec->opts.tail_synthesize != tail)
1092		return 0;
1093
1094	thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1095	if (thread_map == NULL)
1096		return -1;
1097
1098	err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1099						 process_synthesized_event,
1100						 &rec->session->machines.host,
 
1101						 rec->opts.sample_address);
1102	perf_thread_map__put(thread_map);
1103	return err;
1104}
1105
 
 
 
 
 
 
 
 
1106static int record__synthesize(struct record *rec, bool tail);
1107
1108static int
1109record__switch_output(struct record *rec, bool at_exit)
1110{
1111	struct perf_data *data = &rec->data;
1112	int fd, err;
1113	char *new_filename;
1114
1115	/* Same Size:      "2015122520103046"*/
1116	char timestamp[] = "InvalidTimestamp";
1117
1118	record__aio_mmap_read_sync(rec);
1119
 
 
1120	record__synthesize(rec, true);
1121	if (target__none(&rec->opts.target))
1122		record__synthesize_workload(rec, true);
1123
1124	rec->samples = 0;
1125	record__finish_output(rec);
1126	err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1127	if (err) {
1128		pr_err("Failed to get current timestamp\n");
1129		return -EINVAL;
1130	}
1131
1132	fd = perf_data__switch(data, timestamp,
1133				    rec->session->header.data_offset,
1134				    at_exit, &new_filename);
1135	if (fd >= 0 && !at_exit) {
1136		rec->bytes_written = 0;
1137		rec->session->header.data_size = 0;
1138	}
1139
1140	if (!quiet)
1141		fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
1142			data->path, timestamp);
1143
1144	if (rec->switch_output.num_files) {
1145		int n = rec->switch_output.cur_file + 1;
1146
1147		if (n >= rec->switch_output.num_files)
1148			n = 0;
1149		rec->switch_output.cur_file = n;
1150		if (rec->switch_output.filenames[n]) {
1151			remove(rec->switch_output.filenames[n]);
1152			zfree(&rec->switch_output.filenames[n]);
1153		}
1154		rec->switch_output.filenames[n] = new_filename;
1155	} else {
1156		free(new_filename);
1157	}
1158
1159	/* Output tracking events */
1160	if (!at_exit) {
1161		record__synthesize(rec, false);
1162
1163		/*
1164		 * In 'perf record --switch-output' without -a,
1165		 * record__synthesize() in record__switch_output() won't
1166		 * generate tracking events because there's no thread_map
1167		 * in evlist. Which causes newly created perf.data doesn't
1168		 * contain map and comm information.
1169		 * Create a fake thread_map and directly call
1170		 * perf_event__synthesize_thread_map() for those events.
1171		 */
1172		if (target__none(&rec->opts.target))
1173			record__synthesize_workload(rec, false);
 
1174	}
1175	return fd;
1176}
1177
1178static volatile int workload_exec_errno;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179
1180/*
1181 * perf_evlist__prepare_workload will send a SIGUSR1
1182 * if the fork fails, since we asked by setting its
1183 * want_signal to true.
1184 */
1185static void workload_exec_failed_signal(int signo __maybe_unused,
1186					siginfo_t *info,
1187					void *ucontext __maybe_unused)
1188{
1189	workload_exec_errno = info->si_value.sival_int;
1190	done = 1;
1191	child_finished = 1;
1192}
1193
1194static void snapshot_sig_handler(int sig);
1195static void alarm_sig_handler(int sig);
1196
1197static const struct perf_event_mmap_page *
1198perf_evlist__pick_pc(struct evlist *evlist)
1199{
1200	if (evlist) {
1201		if (evlist->mmap && evlist->mmap[0].core.base)
1202			return evlist->mmap[0].core.base;
1203		if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
1204			return evlist->overwrite_mmap[0].core.base;
1205	}
1206	return NULL;
1207}
1208
1209static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1210{
1211	const struct perf_event_mmap_page *pc;
1212
1213	pc = perf_evlist__pick_pc(rec->evlist);
1214	if (pc)
1215		return pc;
1216	return NULL;
1217}
1218
1219static int record__synthesize(struct record *rec, bool tail)
1220{
1221	struct perf_session *session = rec->session;
1222	struct machine *machine = &session->machines.host;
1223	struct perf_data *data = &rec->data;
1224	struct record_opts *opts = &rec->opts;
1225	struct perf_tool *tool = &rec->tool;
1226	int fd = perf_data__fd(data);
1227	int err = 0;
 
1228
1229	if (rec->opts.tail_synthesize != tail)
1230		return 0;
1231
1232	if (data->is_pipe) {
1233		/*
1234		 * We need to synthesize events first, because some
1235		 * features works on top of them (on report side).
1236		 */
1237		err = perf_event__synthesize_attrs(tool, rec->evlist,
1238						   process_synthesized_event);
1239		if (err < 0) {
1240			pr_err("Couldn't synthesize attrs.\n");
1241			goto out;
1242		}
1243
1244		err = perf_event__synthesize_features(tool, session, rec->evlist,
1245						      process_synthesized_event);
1246		if (err < 0) {
1247			pr_err("Couldn't synthesize features.\n");
1248			return err;
1249		}
1250
1251		if (have_tracepoints(&rec->evlist->core.entries)) {
1252			/*
1253			 * FIXME err <= 0 here actually means that
1254			 * there were no tracepoints so its not really
1255			 * an error, just that we don't need to
1256			 * synthesize anything.  We really have to
1257			 * return this more properly and also
1258			 * propagate errors that now are calling die()
1259			 */
1260			err = perf_event__synthesize_tracing_data(tool,	fd, rec->evlist,
1261								  process_synthesized_event);
1262			if (err <= 0) {
1263				pr_err("Couldn't record tracing data.\n");
1264				goto out;
1265			}
1266			rec->bytes_written += err;
1267		}
1268	}
1269
1270	err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
1271					  process_synthesized_event, machine);
1272	if (err)
1273		goto out;
1274
 
 
 
 
 
 
 
1275	if (rec->opts.full_auxtrace) {
1276		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1277					session, process_synthesized_event);
1278		if (err)
1279			goto out;
1280	}
1281
1282	if (!perf_evlist__exclude_kernel(rec->evlist)) {
1283		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1284							 machine);
1285		WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
1286				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1287				   "Check /proc/kallsyms permission or run as root.\n");
1288
1289		err = perf_event__synthesize_modules(tool, process_synthesized_event,
1290						     machine);
1291		WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
1292				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
1293				   "Check /proc/modules permission or run as root.\n");
1294	}
1295
1296	if (perf_guest) {
1297		machines__process_guests(&session->machines,
1298					 perf_event__synthesize_guest_os, tool);
1299	}
1300
1301	err = perf_event__synthesize_extra_attr(&rec->tool,
1302						rec->evlist,
1303						process_synthesized_event,
1304						data->is_pipe);
1305	if (err)
1306		goto out;
1307
1308	err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
1309						 process_synthesized_event,
1310						NULL);
1311	if (err < 0) {
1312		pr_err("Couldn't synthesize thread map.\n");
1313		return err;
1314	}
1315
1316	err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
1317					     process_synthesized_event, NULL);
1318	if (err < 0) {
1319		pr_err("Couldn't synthesize cpu map.\n");
1320		return err;
1321	}
1322
1323	err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
1324						machine, opts);
1325	if (err < 0)
1326		pr_warning("Couldn't synthesize bpf events.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1327
1328	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
1329					    process_synthesized_event, opts->sample_address,
1330					    1);
1331out:
1332	return err;
1333}
1334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335static int __cmd_record(struct record *rec, int argc, const char **argv)
1336{
1337	int err;
1338	int status = 0;
1339	unsigned long waking = 0;
1340	const bool forks = argc > 0;
1341	struct perf_tool *tool = &rec->tool;
1342	struct record_opts *opts = &rec->opts;
1343	struct perf_data *data = &rec->data;
1344	struct perf_session *session;
1345	bool disabled = false, draining = false;
1346	struct evlist *sb_evlist = NULL;
1347	int fd;
1348	float ratio = 0;
 
1349
1350	atexit(record__sig_exit);
1351	signal(SIGCHLD, sig_handler);
1352	signal(SIGINT, sig_handler);
1353	signal(SIGTERM, sig_handler);
1354	signal(SIGSEGV, sigsegv_handler);
1355
1356	if (rec->opts.record_namespaces)
1357		tool->namespace_events = true;
1358
 
 
 
 
 
 
 
 
 
1359	if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
1360		signal(SIGUSR2, snapshot_sig_handler);
1361		if (rec->opts.auxtrace_snapshot_mode)
1362			trigger_on(&auxtrace_snapshot_trigger);
1363		if (rec->switch_output.enabled)
1364			trigger_on(&switch_output_trigger);
1365	} else {
1366		signal(SIGUSR2, SIG_IGN);
1367	}
1368
1369	session = perf_session__new(data, false, tool);
1370	if (IS_ERR(session)) {
1371		pr_err("Perf session creation failed.\n");
1372		return PTR_ERR(session);
1373	}
1374
 
 
 
 
 
 
 
 
 
 
 
1375	fd = perf_data__fd(data);
1376	rec->session = session;
1377
1378	if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1379		pr_err("Compression initialization failed.\n");
1380		return -1;
1381	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382
1383	session->header.env.comp_type  = PERF_COMP_ZSTD;
1384	session->header.env.comp_level = rec->opts.comp_level;
1385
1386	record__init_features(rec);
 
 
 
 
1387
1388	if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1389		session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
 
 
1390
1391	if (forks) {
1392		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
1393						    argv, data->is_pipe,
1394						    workload_exec_failed_signal);
1395		if (err < 0) {
1396			pr_err("Couldn't run the workload!\n");
1397			status = err;
1398			goto out_delete_session;
1399		}
1400	}
1401
1402	/*
1403	 * If we have just single event and are sending data
1404	 * through pipe, we need to force the ids allocation,
1405	 * because we synthesize event name through the pipe
1406	 * and need the id for that.
1407	 */
1408	if (data->is_pipe && rec->evlist->core.nr_entries == 1)
1409		rec->opts.sample_id = true;
1410
 
 
 
 
1411	if (record__open(rec) != 0) {
1412		err = -1;
1413		goto out_child;
1414	}
 
 
1415	session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
1416
1417	err = bpf__apply_obj_config();
1418	if (err) {
1419		char errbuf[BUFSIZ];
1420
1421		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
1422		pr_err("ERROR: Apply config to BPF failed: %s\n",
1423			 errbuf);
1424		goto out_child;
1425	}
1426
1427	/*
1428	 * Normally perf_session__new would do this, but it doesn't have the
1429	 * evlist.
1430	 */
1431	if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
1432		pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
1433		rec->tool.ordered_events = false;
1434	}
1435
1436	if (!rec->evlist->nr_groups)
1437		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
1438
1439	if (data->is_pipe) {
1440		err = perf_header__write_pipe(fd);
1441		if (err < 0)
1442			goto out_child;
1443	} else {
1444		err = perf_session__write_header(session, rec->evlist, fd, false);
1445		if (err < 0)
1446			goto out_child;
1447	}
1448
 
1449	if (!rec->no_buildid
1450	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
1451		pr_err("Couldn't generate buildids. "
1452		       "Use --no-buildid to profile anyway.\n");
1453		err = -1;
1454		goto out_child;
1455	}
1456
1457	if (!opts->no_bpf_event)
1458		bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1459
1460	if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1461		pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1462		opts->no_bpf_event = true;
1463	}
1464
1465	err = record__synthesize(rec, false);
1466	if (err < 0)
1467		goto out_child;
1468
1469	if (rec->realtime_prio) {
1470		struct sched_param param;
1471
1472		param.sched_priority = rec->realtime_prio;
1473		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1474			pr_err("Could not set realtime priority.\n");
1475			err = -1;
1476			goto out_child;
1477		}
1478	}
1479
 
 
 
1480	/*
1481	 * When perf is starting the traced process, all the events
1482	 * (apart from group members) have enable_on_exec=1 set,
1483	 * so don't spoil it by prematurely enabling them.
1484	 */
1485	if (!target__none(&opts->target) && !opts->initial_delay)
1486		evlist__enable(rec->evlist);
1487
1488	/*
1489	 * Let the child rip
1490	 */
1491	if (forks) {
1492		struct machine *machine = &session->machines.host;
1493		union perf_event *event;
1494		pid_t tgid;
1495
1496		event = malloc(sizeof(event->comm) + machine->id_hdr_size);
1497		if (event == NULL) {
1498			err = -ENOMEM;
1499			goto out_child;
1500		}
1501
1502		/*
1503		 * Some H/W events are generated before COMM event
1504		 * which is emitted during exec(), so perf script
1505		 * cannot see a correct process name for those events.
1506		 * Synthesize COMM event to prevent it.
1507		 */
1508		tgid = perf_event__synthesize_comm(tool, event,
1509						   rec->evlist->workload.pid,
1510						   process_synthesized_event,
1511						   machine);
1512		free(event);
1513
1514		if (tgid == -1)
1515			goto out_child;
1516
1517		event = malloc(sizeof(event->namespaces) +
1518			       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1519			       machine->id_hdr_size);
1520		if (event == NULL) {
1521			err = -ENOMEM;
1522			goto out_child;
1523		}
1524
1525		/*
1526		 * Synthesize NAMESPACES event for the command specified.
1527		 */
1528		perf_event__synthesize_namespaces(tool, event,
1529						  rec->evlist->workload.pid,
1530						  tgid, process_synthesized_event,
1531						  machine);
1532		free(event);
1533
1534		perf_evlist__start_workload(rec->evlist);
1535	}
1536
1537	if (opts->initial_delay) {
1538		usleep(opts->initial_delay * USEC_PER_MSEC);
1539		evlist__enable(rec->evlist);
 
 
 
 
1540	}
1541
 
 
 
 
 
 
 
 
1542	trigger_ready(&auxtrace_snapshot_trigger);
1543	trigger_ready(&switch_output_trigger);
1544	perf_hooks__invoke_record_start();
 
 
 
 
 
 
 
 
 
1545	for (;;) {
1546		unsigned long long hits = rec->samples;
1547
1548		/*
1549		 * rec->evlist->bkw_mmap_state is possible to be
1550		 * BKW_MMAP_EMPTY here: when done == true and
1551		 * hits != rec->samples in previous round.
1552		 *
1553		 * perf_evlist__toggle_bkw_mmap ensure we never
1554		 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1555		 */
1556		if (trigger_is_hit(&switch_output_trigger) || done || draining)
1557			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1558
1559		if (record__mmap_read_all(rec, false) < 0) {
1560			trigger_error(&auxtrace_snapshot_trigger);
1561			trigger_error(&switch_output_trigger);
1562			err = -1;
1563			goto out_child;
1564		}
1565
1566		if (auxtrace_record__snapshot_started) {
1567			auxtrace_record__snapshot_started = 0;
1568			if (!trigger_is_error(&auxtrace_snapshot_trigger))
1569				record__read_auxtrace_snapshot(rec, false);
1570			if (trigger_is_error(&auxtrace_snapshot_trigger)) {
1571				pr_err("AUX area tracing snapshot failed\n");
1572				err = -1;
1573				goto out_child;
1574			}
1575		}
1576
1577		if (trigger_is_hit(&switch_output_trigger)) {
1578			/*
1579			 * If switch_output_trigger is hit, the data in
1580			 * overwritable ring buffer should have been collected,
1581			 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1582			 *
1583			 * If SIGUSR2 raise after or during record__mmap_read_all(),
1584			 * record__mmap_read_all() didn't collect data from
1585			 * overwritable ring buffer. Read again.
1586			 */
1587			if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1588				continue;
1589			trigger_ready(&switch_output_trigger);
1590
1591			/*
1592			 * Reenable events in overwrite ring buffer after
1593			 * record__mmap_read_all(): we should have collected
1594			 * data from it.
1595			 */
1596			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1597
1598			if (!quiet)
1599				fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1600					waking);
1601			waking = 0;
1602			fd = record__switch_output(rec, false);
1603			if (fd < 0) {
1604				pr_err("Failed to switch to new file\n");
1605				trigger_error(&switch_output_trigger);
1606				err = fd;
1607				goto out_child;
1608			}
1609
1610			/* re-arm the alarm */
1611			if (rec->switch_output.time)
1612				alarm(rec->switch_output.time);
1613		}
1614
1615		if (hits == rec->samples) {
1616			if (done || draining)
1617				break;
1618			err = evlist__poll(rec->evlist, -1);
1619			/*
1620			 * Propagate error, only if there's any. Ignore positive
1621			 * number of returned events and interrupt error.
1622			 */
1623			if (err > 0 || (err < 0 && errno == EINTR))
1624				err = 0;
1625			waking++;
1626
1627			if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
 
1628				draining = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1629		}
1630
1631		/*
1632		 * When perf is starting the traced process, at the end events
1633		 * die with the process and we wait for that. Thus no need to
1634		 * disable events in this case.
1635		 */
1636		if (done && !disabled && !target__none(&opts->target)) {
1637			trigger_off(&auxtrace_snapshot_trigger);
1638			evlist__disable(rec->evlist);
1639			disabled = true;
1640		}
1641	}
1642
1643	trigger_off(&auxtrace_snapshot_trigger);
1644	trigger_off(&switch_output_trigger);
1645
1646	if (opts->auxtrace_snapshot_on_exit)
1647		record__auxtrace_snapshot_exit(rec);
1648
1649	if (forks && workload_exec_errno) {
1650		char msg[STRERR_BUFSIZE];
1651		const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
1652		pr_err("Workload failed: %s\n", emsg);
 
 
 
 
1653		err = -1;
1654		goto out_child;
1655	}
1656
1657	if (!quiet)
1658		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
 
 
1659
1660	if (target__none(&rec->opts.target))
1661		record__synthesize_workload(rec, true);
1662
1663out_child:
 
1664	record__mmap_read_all(rec, true);
 
 
 
1665	record__aio_mmap_read_sync(rec);
1666
1667	if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1668		ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
1669		session->header.env.comp_ratio = ratio + 0.5;
1670	}
1671
1672	if (forks) {
1673		int exit_status;
1674
1675		if (!child_finished)
1676			kill(rec->evlist->workload.pid, SIGTERM);
1677
1678		wait(&exit_status);
1679
1680		if (err < 0)
1681			status = err;
1682		else if (WIFEXITED(exit_status))
1683			status = WEXITSTATUS(exit_status);
1684		else if (WIFSIGNALED(exit_status))
1685			signr = WTERMSIG(exit_status);
1686	} else
1687		status = err;
1688
 
 
 
 
1689	record__synthesize(rec, true);
1690	/* this will be recalculated during process_buildids() */
1691	rec->samples = 0;
1692
1693	if (!err) {
1694		if (!rec->timestamp_filename) {
1695			record__finish_output(rec);
1696		} else {
1697			fd = record__switch_output(rec, true);
1698			if (fd < 0) {
1699				status = fd;
1700				goto out_delete_session;
1701			}
1702		}
1703	}
1704
1705	perf_hooks__invoke_record_end();
1706
1707	if (!err && !quiet) {
1708		char samples[128];
1709		const char *postfix = rec->timestamp_filename ?
1710					".<timestamp>" : "";
1711
1712		if (rec->samples && !rec->opts.full_auxtrace)
1713			scnprintf(samples, sizeof(samples),
1714				  " (%" PRIu64 " samples)", rec->samples);
1715		else
1716			samples[0] = '\0';
1717
1718		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s%s",
1719			perf_data__size(data) / 1024.0 / 1024.0,
1720			data->path, postfix, samples);
1721		if (ratio) {
1722			fprintf(stderr,	", compressed (original %.3f MB, ratio is %.3f)",
1723					rec->session->bytes_transferred / 1024.0 / 1024.0,
1724					ratio);
1725		}
1726		fprintf(stderr, " ]\n");
1727	}
1728
1729out_delete_session:
 
 
 
 
 
 
 
 
1730	zstd_fini(&session->zstd_data);
1731	perf_session__delete(session);
1732
1733	if (!opts->no_bpf_event)
1734		perf_evlist__stop_sb_thread(sb_evlist);
1735	return status;
1736}
1737
1738static void callchain_debug(struct callchain_param *callchain)
1739{
1740	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
1741
1742	pr_debug("callchain: type %s\n", str[callchain->record_mode]);
1743
1744	if (callchain->record_mode == CALLCHAIN_DWARF)
1745		pr_debug("callchain: stack dump size %d\n",
1746			 callchain->dump_size);
1747}
1748
1749int record_opts__parse_callchain(struct record_opts *record,
1750				 struct callchain_param *callchain,
1751				 const char *arg, bool unset)
1752{
1753	int ret;
1754	callchain->enabled = !unset;
1755
1756	/* --no-call-graph */
1757	if (unset) {
1758		callchain->record_mode = CALLCHAIN_NONE;
1759		pr_debug("callchain: disabled\n");
1760		return 0;
1761	}
1762
1763	ret = parse_callchain_record_opt(arg, callchain);
1764	if (!ret) {
1765		/* Enable data address sampling for DWARF unwind. */
1766		if (callchain->record_mode == CALLCHAIN_DWARF)
1767			record->sample_address = true;
1768		callchain_debug(callchain);
1769	}
1770
1771	return ret;
1772}
1773
1774int record_parse_callchain_opt(const struct option *opt,
1775			       const char *arg,
1776			       int unset)
1777{
1778	return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1779}
1780
1781int record_callchain_opt(const struct option *opt,
1782			 const char *arg __maybe_unused,
1783			 int unset __maybe_unused)
1784{
1785	struct callchain_param *callchain = opt->value;
1786
1787	callchain->enabled = true;
1788
1789	if (callchain->record_mode == CALLCHAIN_NONE)
1790		callchain->record_mode = CALLCHAIN_FP;
1791
1792	callchain_debug(callchain);
1793	return 0;
1794}
1795
1796static int perf_record_config(const char *var, const char *value, void *cb)
1797{
1798	struct record *rec = cb;
1799
1800	if (!strcmp(var, "record.build-id")) {
1801		if (!strcmp(value, "cache"))
1802			rec->no_buildid_cache = false;
1803		else if (!strcmp(value, "no-cache"))
1804			rec->no_buildid_cache = true;
1805		else if (!strcmp(value, "skip"))
1806			rec->no_buildid = true;
 
 
1807		else
1808			return -1;
1809		return 0;
1810	}
1811	if (!strcmp(var, "record.call-graph")) {
1812		var = "call-graph.record-mode";
1813		return perf_default_config(var, value, cb);
1814	}
1815#ifdef HAVE_AIO_SUPPORT
1816	if (!strcmp(var, "record.aio")) {
1817		rec->opts.nr_cblocks = strtol(value, NULL, 0);
1818		if (!rec->opts.nr_cblocks)
1819			rec->opts.nr_cblocks = nr_cblocks_default;
1820	}
1821#endif
 
 
 
 
 
 
1822
1823	return 0;
1824}
1825
1826struct clockid_map {
1827	const char *name;
1828	int clockid;
1829};
1830
1831#define CLOCKID_MAP(n, c)	\
1832	{ .name = n, .clockid = (c), }
1833
1834#define CLOCKID_END	{ .name = NULL, }
 
1835
 
 
 
1836
1837/*
1838 * Add the missing ones, we need to build on many distros...
1839 */
1840#ifndef CLOCK_MONOTONIC_RAW
1841#define CLOCK_MONOTONIC_RAW 4
1842#endif
1843#ifndef CLOCK_BOOTTIME
1844#define CLOCK_BOOTTIME 7
1845#endif
1846#ifndef CLOCK_TAI
1847#define CLOCK_TAI 11
1848#endif
1849
1850static const struct clockid_map clockids[] = {
1851	/* available for all events, NMI safe */
1852	CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1853	CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1854
1855	/* available for some events */
1856	CLOCKID_MAP("realtime", CLOCK_REALTIME),
1857	CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1858	CLOCKID_MAP("tai", CLOCK_TAI),
1859
1860	/* available for the lazy */
1861	CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1862	CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1863	CLOCKID_MAP("real", CLOCK_REALTIME),
1864	CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1865
1866	CLOCKID_END,
1867};
1868
1869static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
1870{
1871	struct timespec res;
1872
1873	*res_ns = 0;
1874	if (!clock_getres(clk_id, &res))
1875		*res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
1876	else
1877		pr_warning("WARNING: Failed to determine specified clock resolution.\n");
1878
1879	return 0;
1880}
1881
1882static int parse_clockid(const struct option *opt, const char *str, int unset)
1883{
1884	struct record_opts *opts = (struct record_opts *)opt->value;
1885	const struct clockid_map *cm;
1886	const char *ostr = str;
1887
1888	if (unset) {
1889		opts->use_clockid = 0;
1890		return 0;
1891	}
1892
1893	/* no arg passed */
1894	if (!str)
1895		return 0;
 
 
1896
1897	/* no setting it twice */
1898	if (opts->use_clockid)
1899		return -1;
 
 
1900
1901	opts->use_clockid = true;
 
1902
1903	/* if its a number, we're done */
1904	if (sscanf(str, "%d", &opts->clockid) == 1)
1905		return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
 
 
1906
1907	/* allow a "CLOCK_" prefix to the name */
1908	if (!strncasecmp(str, "CLOCK_", 6))
1909		str += 6;
 
1910
1911	for (cm = clockids; cm->name; cm++) {
1912		if (!strcasecmp(str, cm->name)) {
1913			opts->clockid = cm->clockid;
1914			return get_clockid_res(opts->clockid,
1915					       &opts->clockid_res_ns);
 
 
 
 
 
 
 
 
 
 
1916		}
1917	}
1918
1919	opts->use_clockid = false;
1920	ui__warning("unknown clockid %s, check man page\n", ostr);
1921	return -1;
 
 
 
1922}
1923
1924static int record__parse_affinity(const struct option *opt, const char *str, int unset)
 
1925{
1926	struct record_opts *opts = (struct record_opts *)opt->value;
 
 
 
 
 
 
 
 
1927
1928	if (unset || !str)
 
1929		return 0;
 
1930
1931	if (!strcasecmp(str, "node"))
1932		opts->affinity = PERF_AFFINITY_NODE;
1933	else if (!strcasecmp(str, "cpu"))
1934		opts->affinity = PERF_AFFINITY_CPU;
 
1935
1936	return 0;
1937}
1938
1939static int record__parse_mmap_pages(const struct option *opt,
1940				    const char *str,
1941				    int unset __maybe_unused)
1942{
1943	struct record_opts *opts = opt->value;
1944	char *s, *p;
1945	unsigned int mmap_pages;
1946	int ret;
1947
1948	if (!str)
1949		return -EINVAL;
1950
1951	s = strdup(str);
1952	if (!s)
1953		return -ENOMEM;
1954
1955	p = strchr(s, ',');
1956	if (p)
1957		*p = '\0';
1958
1959	if (*s) {
1960		ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1961		if (ret)
1962			goto out_free;
1963		opts->mmap_pages = mmap_pages;
1964	}
1965
1966	if (!p) {
1967		ret = 0;
1968		goto out_free;
1969	}
1970
1971	ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1972	if (ret)
1973		goto out_free;
1974
1975	opts->auxtrace_mmap_pages = mmap_pages;
1976
1977out_free:
1978	free(s);
1979	return ret;
1980}
1981
 
 
 
 
 
 
 
 
 
 
 
 
 
1982static void switch_output_size_warn(struct record *rec)
1983{
1984	u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
1985	struct switch_output *s = &rec->switch_output;
1986
1987	wakeup_size /= 2;
1988
1989	if (s->size < wakeup_size) {
1990		char buf[100];
1991
1992		unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1993		pr_warning("WARNING: switch-output data size lower than "
1994			   "wakeup kernel buffer size (%s) "
1995			   "expect bigger perf.data sizes\n", buf);
1996	}
1997}
1998
1999static int switch_output_setup(struct record *rec)
2000{
2001	struct switch_output *s = &rec->switch_output;
2002	static struct parse_tag tags_size[] = {
2003		{ .tag  = 'B', .mult = 1       },
2004		{ .tag  = 'K', .mult = 1 << 10 },
2005		{ .tag  = 'M', .mult = 1 << 20 },
2006		{ .tag  = 'G', .mult = 1 << 30 },
2007		{ .tag  = 0 },
2008	};
2009	static struct parse_tag tags_time[] = {
2010		{ .tag  = 's', .mult = 1        },
2011		{ .tag  = 'm', .mult = 60       },
2012		{ .tag  = 'h', .mult = 60*60    },
2013		{ .tag  = 'd', .mult = 60*60*24 },
2014		{ .tag  = 0 },
2015	};
2016	unsigned long val;
2017
 
 
 
 
 
 
 
 
 
 
 
 
 
2018	if (!s->set)
2019		return 0;
2020
 
 
 
 
 
2021	if (!strcmp(s->str, "signal")) {
 
2022		s->signal = true;
2023		pr_debug("switch-output with SIGUSR2 signal\n");
2024		goto enabled;
2025	}
2026
2027	val = parse_tag_value(s->str, tags_size);
2028	if (val != (unsigned long) -1) {
2029		s->size = val;
2030		pr_debug("switch-output with %s size threshold\n", s->str);
2031		goto enabled;
2032	}
2033
2034	val = parse_tag_value(s->str, tags_time);
2035	if (val != (unsigned long) -1) {
2036		s->time = val;
2037		pr_debug("switch-output with %s time threshold (%lu seconds)\n",
2038			 s->str, s->time);
2039		goto enabled;
2040	}
2041
2042	return -1;
2043
2044enabled:
2045	rec->timestamp_filename = true;
2046	s->enabled              = true;
2047
2048	if (s->size && !rec->opts.no_buffering)
2049		switch_output_size_warn(rec);
2050
2051	return 0;
2052}
2053
2054static const char * const __record_usage[] = {
2055	"perf record [<options>] [<command>]",
2056	"perf record [<options>] -- <command> [<options>]",
2057	NULL
2058};
2059const char * const *record_usage = __record_usage;
2060
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2061/*
2062 * XXX Ideally would be local to cmd_record() and passed to a record__new
2063 * because we need to have access to it in record__exit, that is called
2064 * after cmd_record() exits, but since record_options need to be accessible to
2065 * builtin-script, leave it here.
2066 *
2067 * At least we don't ouch it in all the other functions here directly.
2068 *
2069 * Just say no to tons of global variables, sigh.
2070 */
2071static struct record record = {
2072	.opts = {
2073		.sample_time	     = true,
2074		.mmap_pages	     = UINT_MAX,
2075		.user_freq	     = UINT_MAX,
2076		.user_interval	     = ULLONG_MAX,
2077		.freq		     = 4000,
2078		.target		     = {
2079			.uses_mmap   = true,
2080			.default_per_cpu = true,
2081		},
2082		.mmap_flush          = MMAP_FLUSH_DEFAULT,
 
 
 
 
2083	},
2084	.tool = {
2085		.sample		= process_sample_event,
2086		.fork		= perf_event__process_fork,
2087		.exit		= perf_event__process_exit,
2088		.comm		= perf_event__process_comm,
2089		.namespaces	= perf_event__process_namespaces,
2090		.mmap		= perf_event__process_mmap,
2091		.mmap2		= perf_event__process_mmap2,
 
 
2092		.ordered_events	= true,
2093	},
2094};
2095
2096const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
2097	"\n\t\t\t\tDefault: fp";
2098
2099static bool dry_run;
2100
 
 
 
 
 
 
 
 
2101/*
2102 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
2103 * with it and switch to use the library functions in perf_evlist that came
2104 * from builtin-record.c, i.e. use record_opts,
2105 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
2106 * using pipes, etc.
2107 */
2108static struct option __record_options[] = {
2109	OPT_CALLBACK('e', "event", &record.evlist, "event",
2110		     "event selector. use 'perf list' to list available events",
2111		     parse_events_option),
2112	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
2113		     "event filter", parse_filter),
2114	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
2115			   NULL, "don't record events from perf itself",
2116			   exclude_perf),
2117	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
2118		    "record events on existing process id"),
2119	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
2120		    "record events on existing thread id"),
2121	OPT_INTEGER('r', "realtime", &record.realtime_prio,
2122		    "collect data with this RT SCHED_FIFO priority"),
2123	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
2124		    "collect data without buffering"),
2125	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
2126		    "collect raw sample records from all opened counters"),
2127	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
2128			    "system-wide collection from all CPUs"),
2129	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
2130		    "list of cpus to monitor"),
2131	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
2132	OPT_STRING('o', "output", &record.data.path, "file",
2133		    "output file name"),
2134	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
2135			&record.opts.no_inherit_set,
2136			"child tasks do not inherit counters"),
2137	OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
2138		    "synthesize non-sample events at the end of output"),
2139	OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
2140	OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
2141	OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
2142		    "Fail if the specified frequency can't be used"),
2143	OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
2144		     "profile at this frequency",
2145		      record__parse_freq),
2146	OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
2147		     "number of mmap data pages and AUX area tracing mmap pages",
2148		     record__parse_mmap_pages),
2149	OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
2150		     "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
2151		     record__mmap_flush_parse),
2152	OPT_BOOLEAN(0, "group", &record.opts.group,
2153		    "put the counters into a counter group"),
2154	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
2155			   NULL, "enables call-graph recording" ,
2156			   &record_callchain_opt),
2157	OPT_CALLBACK(0, "call-graph", &record.opts,
2158		     "record_mode[,record_size]", record_callchain_help,
2159		     &record_parse_callchain_opt),
2160	OPT_INCR('v', "verbose", &verbose,
2161		    "be more verbose (show counter open errors, etc)"),
2162	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
2163	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
2164		    "per thread counts"),
2165	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
2166	OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
2167		    "Record the sample physical addresses"),
 
 
 
 
2168	OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
 
 
2169	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
2170			&record.opts.sample_time_set,
2171			"Record the sample timestamps"),
2172	OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
2173			"Record the sample period"),
2174	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
2175		    "don't sample"),
2176	OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
2177			&record.no_buildid_cache_set,
2178			"do not update the buildid cache"),
2179	OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
2180			&record.no_buildid_set,
2181			"do not collect buildids in perf.data"),
2182	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
2183		     "monitor event in cgroup name only",
2184		     parse_cgroups),
2185	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
2186		  "ms to wait before starting measurement after program start"),
 
 
 
2187	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
2188		   "user to profile"),
2189
2190	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
2191		     "branch any", "sample any taken branches",
2192		     parse_branch_stack),
2193
2194	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
2195		     "branch filter mask", "branch stack filter modes",
2196		     parse_branch_stack),
2197	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
2198		    "sample by weight (on special events only)"),
2199	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
2200		    "sample transaction flags (special events only)"),
2201	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
2202		    "use per-thread mmaps"),
2203	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
2204		    "sample selected machine registers on interrupt,"
2205		    " use '-I?' to list register names", parse_intr_regs),
2206	OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
2207		    "sample selected machine registers on interrupt,"
2208		    " use '--user-regs=?' to list register names", parse_user_regs),
2209	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
2210		    "Record running/enabled time of read (:S) events"),
2211	OPT_CALLBACK('k', "clockid", &record.opts,
2212	"clockid", "clockid to use for events, see clock_gettime()",
2213	parse_clockid),
2214	OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
2215			  "opts", "AUX area tracing Snapshot Mode", ""),
 
 
2216	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
2217			"per thread proc mmap processing timeout in ms"),
2218	OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
2219		    "Record namespaces events"),
2220	OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
2221		    "Record context switch events"),
 
 
 
2222	OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
2223			 "Configure all used events to run in kernel space.",
2224			 PARSE_OPT_EXCLUSIVE),
2225	OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
2226			 "Configure all used events to run in user space.",
2227			 PARSE_OPT_EXCLUSIVE),
2228	OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
2229		    "collect kernel callchains"),
2230	OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
2231		    "collect user callchains"),
2232	OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
2233		   "clang binary to use for compiling BPF scriptlets"),
2234	OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
2235		   "options passed to clang when compiling BPF scriptlets"),
2236	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2237		   "file", "vmlinux pathname"),
2238	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
2239		    "Record build-id of all DSOs regardless of hits"),
 
 
2240	OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
2241		    "append timestamp to output filename"),
2242	OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
2243		    "Record timestamp boundary (time of first/last samples)"),
2244	OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
2245			  &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
2246			  "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
2247			  "signal"),
 
 
 
 
2248	OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2249		   "Limit number of switch output generated files"),
2250	OPT_BOOLEAN(0, "dry-run", &dry_run,
2251		    "Parse options then exit"),
2252#ifdef HAVE_AIO_SUPPORT
2253	OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
2254		     &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
2255		     record__aio_parse),
2256#endif
2257	OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
2258		     "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
2259		     record__parse_affinity),
2260#ifdef HAVE_ZSTD_SUPPORT
2261	OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
2262			    "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
2263			    record__parse_comp_level),
2264#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2265	OPT_END()
2266};
2267
2268struct option *record_options = __record_options;
2269
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2270int cmd_record(int argc, const char **argv)
2271{
2272	int err;
2273	struct record *rec = &record;
2274	char errbuf[BUFSIZ];
2275
2276	setlocale(LC_ALL, "");
2277
2278#ifndef HAVE_LIBBPF_SUPPORT
2279# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
2280	set_nobuild('\0', "clang-path", true);
2281	set_nobuild('\0', "clang-opt", true);
2282# undef set_nobuild
2283#endif
2284
2285#ifndef HAVE_BPF_PROLOGUE
2286# if !defined (HAVE_DWARF_SUPPORT)
2287#  define REASON  "NO_DWARF=1"
2288# elif !defined (HAVE_LIBBPF_SUPPORT)
2289#  define REASON  "NO_LIBBPF=1"
2290# else
2291#  define REASON  "this architecture doesn't support BPF prologue"
2292# endif
2293# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
2294	set_nobuild('\0', "vmlinux", true);
2295# undef set_nobuild
2296# undef REASON
2297#endif
2298
2299	CPU_ZERO(&rec->affinity_mask);
 
2300	rec->opts.affinity = PERF_AFFINITY_SYS;
2301
2302	rec->evlist = evlist__new();
2303	if (rec->evlist == NULL)
2304		return -ENOMEM;
2305
2306	err = perf_config(perf_record_config, rec);
2307	if (err)
2308		return err;
2309
2310	argc = parse_options(argc, argv, record_options, record_usage,
2311			    PARSE_OPT_STOP_AT_NON_OPTION);
2312	if (quiet)
2313		perf_quiet_option();
2314
 
 
 
 
 
 
2315	/* Make system wide (-a) the default target. */
2316	if (!argc && target__none(&rec->opts.target))
2317		rec->opts.target.system_wide = true;
2318
2319	if (nr_cgroups && !rec->opts.target.system_wide) {
2320		usage_with_options_msg(record_usage, record_options,
2321			"cgroup monitoring only available in system-wide mode");
2322
2323	}
2324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2325	if (rec->opts.comp_level != 0) {
2326		pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
2327		rec->no_buildid = true;
2328	}
2329
2330	if (rec->opts.record_switch_events &&
2331	    !perf_can_record_switch_events()) {
2332		ui__error("kernel does not support recording context switch events\n");
2333		parse_options_usage(record_usage, record_options, "switch-events", 0);
2334		return -EINVAL;
 
2335	}
2336
2337	if (switch_output_setup(rec)) {
2338		parse_options_usage(record_usage, record_options, "switch-output", 0);
2339		return -EINVAL;
 
2340	}
2341
2342	if (rec->switch_output.time) {
2343		signal(SIGALRM, alarm_sig_handler);
2344		alarm(rec->switch_output.time);
2345	}
2346
2347	if (rec->switch_output.num_files) {
2348		rec->switch_output.filenames = calloc(sizeof(char *),
2349						      rec->switch_output.num_files);
2350		if (!rec->switch_output.filenames)
2351			return -EINVAL;
 
 
 
 
 
 
 
2352	}
2353
2354	/*
2355	 * Allow aliases to facilitate the lookup of symbols for address
2356	 * filters. Refer to auxtrace_parse_filters().
2357	 */
2358	symbol_conf.allow_aliases = true;
2359
2360	symbol__init(NULL);
2361
2362	err = record__auxtrace_init(rec);
2363	if (err)
2364		goto out;
2365
2366	if (dry_run)
2367		goto out;
2368
2369	err = bpf__setup_stdout(rec->evlist);
2370	if (err) {
2371		bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2372		pr_err("ERROR: Setup BPF stdout failed: %s\n",
2373			 errbuf);
2374		goto out;
2375	}
2376
2377	err = -ENOMEM;
2378
2379	if (rec->no_buildid_cache || rec->no_buildid) {
2380		disable_buildid_cache();
2381	} else if (rec->switch_output.enabled) {
2382		/*
2383		 * In 'perf record --switch-output', disable buildid
2384		 * generation by default to reduce data file switching
2385		 * overhead. Still generate buildid if they are required
2386		 * explicitly using
2387		 *
2388		 *  perf record --switch-output --no-no-buildid \
2389		 *              --no-no-buildid-cache
2390		 *
2391		 * Following code equals to:
2392		 *
2393		 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2394		 *     (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2395		 *         disable_buildid_cache();
2396		 */
2397		bool disable = true;
2398
2399		if (rec->no_buildid_set && !rec->no_buildid)
2400			disable = false;
2401		if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2402			disable = false;
2403		if (disable) {
2404			rec->no_buildid = true;
2405			rec->no_buildid_cache = true;
2406			disable_buildid_cache();
2407		}
2408	}
2409
2410	if (record.opts.overwrite)
2411		record.opts.tail_synthesize = true;
2412
2413	if (rec->evlist->core.nr_entries == 0 &&
2414	    __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
2415		pr_err("Not enough memory for event selector list\n");
2416		goto out;
 
 
2417	}
2418
2419	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2420		rec->opts.no_inherit = true;
2421
2422	err = target__validate(&rec->opts.target);
2423	if (err) {
2424		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2425		ui__warning("%s\n", errbuf);
2426	}
2427
2428	err = target__parse_uid(&rec->opts.target);
2429	if (err) {
2430		int saved_errno = errno;
2431
2432		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2433		ui__error("%s", errbuf);
2434
2435		err = -saved_errno;
2436		goto out;
2437	}
2438
2439	/* Enable ignoring missing threads when -u/-p option is defined. */
2440	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
2441
 
 
 
 
 
2442	err = -ENOMEM;
2443	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
2444		usage_with_options(record_usage, record_options);
 
 
 
 
 
 
 
2445
2446	err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2447	if (err)
2448		goto out;
2449
2450	/*
2451	 * We take all buildids when the file contains
2452	 * AUX area tracing data because we do not decode the
2453	 * trace because it would take too long.
2454	 */
2455	if (rec->opts.full_auxtrace)
2456		rec->buildid_all = true;
2457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2458	if (record_opts__config(&rec->opts)) {
2459		err = -EINVAL;
2460		goto out;
2461	}
2462
 
 
 
 
 
 
 
 
 
 
 
 
2463	if (rec->opts.nr_cblocks > nr_cblocks_max)
2464		rec->opts.nr_cblocks = nr_cblocks_max;
2465	pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2466
2467	pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
2468	pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
2469
2470	if (rec->opts.comp_level > comp_level_max)
2471		rec->opts.comp_level = comp_level_max;
2472	pr_debug("comp level: %d\n", rec->opts.comp_level);
2473
2474	err = __cmd_record(&record, argc, argv);
2475out:
2476	evlist__delete(rec->evlist);
2477	symbol__exit();
2478	auxtrace_record__free(rec->itr);
 
 
 
 
2479	return err;
2480}
2481
2482static void snapshot_sig_handler(int sig __maybe_unused)
2483{
2484	struct record *rec = &record;
2485
2486	if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2487		trigger_hit(&auxtrace_snapshot_trigger);
2488		auxtrace_record__snapshot_started = 1;
2489		if (auxtrace_record__snapshot_start(record.itr))
2490			trigger_error(&auxtrace_snapshot_trigger);
2491	}
2492
2493	if (switch_output_signal(rec))
2494		trigger_hit(&switch_output_trigger);
2495}
2496
2497static void alarm_sig_handler(int sig __maybe_unused)
2498{
2499	struct record *rec = &record;
2500
2501	if (switch_output_time(rec))
2502		trigger_hit(&switch_output_trigger);
2503}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-record.c
   4 *
   5 * Builtin record command: Record the profile of a workload
   6 * (or a CPU, or a PID) into the perf.data output file - for
   7 * later analysis via perf report.
   8 */
   9#include "builtin.h"
  10
  11#include "util/build-id.h"
  12#include <subcmd/parse-options.h>
  13#include <internal/xyarray.h>
  14#include "util/parse-events.h"
  15#include "util/config.h"
  16
  17#include "util/callchain.h"
  18#include "util/cgroup.h"
  19#include "util/header.h"
  20#include "util/event.h"
  21#include "util/evlist.h"
  22#include "util/evsel.h"
  23#include "util/debug.h"
  24#include "util/mmap.h"
  25#include "util/mutex.h"
  26#include "util/target.h"
  27#include "util/session.h"
  28#include "util/tool.h"
  29#include "util/symbol.h"
  30#include "util/record.h"
  31#include "util/cpumap.h"
  32#include "util/thread_map.h"
  33#include "util/data.h"
  34#include "util/perf_regs.h"
  35#include "util/auxtrace.h"
  36#include "util/tsc.h"
  37#include "util/parse-branch-options.h"
  38#include "util/parse-regs-options.h"
  39#include "util/perf_api_probe.h"
 
  40#include "util/trigger.h"
  41#include "util/perf-hooks.h"
  42#include "util/cpu-set-sched.h"
  43#include "util/synthetic-events.h"
  44#include "util/time-utils.h"
  45#include "util/units.h"
  46#include "util/bpf-event.h"
  47#include "util/util.h"
  48#include "util/pfm.h"
  49#include "util/pmu.h"
  50#include "util/pmus.h"
  51#include "util/clockid.h"
  52#include "util/off_cpu.h"
  53#include "util/bpf-filter.h"
  54#include "asm/bug.h"
  55#include "perf.h"
  56#include "cputopo.h"
  57
  58#include <errno.h>
  59#include <inttypes.h>
  60#include <locale.h>
  61#include <poll.h>
  62#include <pthread.h>
  63#include <unistd.h>
  64#ifndef HAVE_GETTID
  65#include <syscall.h>
  66#endif
  67#include <sched.h>
  68#include <signal.h>
  69#ifdef HAVE_EVENTFD_SUPPORT
  70#include <sys/eventfd.h>
  71#endif
  72#include <sys/mman.h>
  73#include <sys/wait.h>
  74#include <sys/types.h>
  75#include <sys/stat.h>
  76#include <fcntl.h>
  77#include <linux/err.h>
  78#include <linux/string.h>
  79#include <linux/time64.h>
  80#include <linux/zalloc.h>
  81#include <linux/bitmap.h>
  82#include <sys/time.h>
  83
  84struct switch_output {
  85	bool		 enabled;
  86	bool		 signal;
  87	unsigned long	 size;
  88	unsigned long	 time;
  89	const char	*str;
  90	bool		 set;
  91	char		 **filenames;
  92	int		 num_files;
  93	int		 cur_file;
  94};
  95
  96struct thread_mask {
  97	struct mmap_cpu_mask	maps;
  98	struct mmap_cpu_mask	affinity;
  99};
 100
 101struct record_thread {
 102	pid_t			tid;
 103	struct thread_mask	*mask;
 104	struct {
 105		int		msg[2];
 106		int		ack[2];
 107	} pipes;
 108	struct fdarray		pollfd;
 109	int			ctlfd_pos;
 110	int			nr_mmaps;
 111	struct mmap		**maps;
 112	struct mmap		**overwrite_maps;
 113	struct record		*rec;
 114	unsigned long long	samples;
 115	unsigned long		waking;
 116	u64			bytes_written;
 117	u64			bytes_transferred;
 118	u64			bytes_compressed;
 119};
 120
 121static __thread struct record_thread *thread;
 122
 123enum thread_msg {
 124	THREAD_MSG__UNDEFINED = 0,
 125	THREAD_MSG__READY,
 126	THREAD_MSG__MAX,
 127};
 128
 129static const char *thread_msg_tags[THREAD_MSG__MAX] = {
 130	"UNDEFINED", "READY"
 131};
 132
 133enum thread_spec {
 134	THREAD_SPEC__UNDEFINED = 0,
 135	THREAD_SPEC__CPU,
 136	THREAD_SPEC__CORE,
 137	THREAD_SPEC__PACKAGE,
 138	THREAD_SPEC__NUMA,
 139	THREAD_SPEC__USER,
 140	THREAD_SPEC__MAX,
 141};
 142
 143static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
 144	"undefined", "cpu", "core", "package", "numa", "user"
 145};
 146
 147struct pollfd_index_map {
 148	int evlist_pollfd_index;
 149	int thread_pollfd_index;
 150};
 151
 152struct record {
 153	struct perf_tool	tool;
 154	struct record_opts	opts;
 155	u64			bytes_written;
 156	u64			thread_bytes_written;
 157	struct perf_data	data;
 158	struct auxtrace_record	*itr;
 159	struct evlist	*evlist;
 160	struct perf_session	*session;
 161	struct evlist		*sb_evlist;
 162	pthread_t		thread_id;
 163	int			realtime_prio;
 164	bool			switch_output_event_set;
 165	bool			no_buildid;
 166	bool			no_buildid_set;
 167	bool			no_buildid_cache;
 168	bool			no_buildid_cache_set;
 169	bool			buildid_all;
 170	bool			buildid_mmap;
 171	bool			timestamp_filename;
 172	bool			timestamp_boundary;
 173	bool			off_cpu;
 174	struct switch_output	switch_output;
 175	unsigned long long	samples;
 176	unsigned long		output_max_size;	/* = 0: unlimited */
 177	struct perf_debuginfod	debuginfod;
 178	int			nr_threads;
 179	struct thread_mask	*thread_masks;
 180	struct record_thread	*thread_data;
 181	struct pollfd_index_map	*index_map;
 182	size_t			index_map_sz;
 183	size_t			index_map_cnt;
 184};
 185
 186static volatile int done;
 187
 188static volatile int auxtrace_record__snapshot_started;
 189static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
 190static DEFINE_TRIGGER(switch_output_trigger);
 191
 192static const char *affinity_tags[PERF_AFFINITY_MAX] = {
 193	"SYS", "NODE", "CPU"
 194};
 195
 196#ifndef HAVE_GETTID
 197static inline pid_t gettid(void)
 198{
 199	return (pid_t)syscall(__NR_gettid);
 200}
 201#endif
 202
 203static int record__threads_enabled(struct record *rec)
 204{
 205	return rec->opts.threads_spec;
 206}
 207
 208static bool switch_output_signal(struct record *rec)
 209{
 210	return rec->switch_output.signal &&
 211	       trigger_is_ready(&switch_output_trigger);
 212}
 213
 214static bool switch_output_size(struct record *rec)
 215{
 216	return rec->switch_output.size &&
 217	       trigger_is_ready(&switch_output_trigger) &&
 218	       (rec->bytes_written >= rec->switch_output.size);
 219}
 220
 221static bool switch_output_time(struct record *rec)
 222{
 223	return rec->switch_output.time &&
 224	       trigger_is_ready(&switch_output_trigger);
 225}
 226
 227static u64 record__bytes_written(struct record *rec)
 228{
 229	return rec->bytes_written + rec->thread_bytes_written;
 230}
 231
 232static bool record__output_max_size_exceeded(struct record *rec)
 233{
 234	return rec->output_max_size &&
 235	       (record__bytes_written(rec) >= rec->output_max_size);
 236}
 237
 238static int record__write(struct record *rec, struct mmap *map __maybe_unused,
 239			 void *bf, size_t size)
 240{
 241	struct perf_data_file *file = &rec->session->data->file;
 242
 243	if (map && map->file)
 244		file = map->file;
 245
 246	if (perf_data_file__write(file, bf, size) < 0) {
 247		pr_err("failed to write perf data, error: %m\n");
 248		return -1;
 249	}
 250
 251	if (map && map->file) {
 252		thread->bytes_written += size;
 253		rec->thread_bytes_written += size;
 254	} else {
 255		rec->bytes_written += size;
 256	}
 257
 258	if (record__output_max_size_exceeded(rec) && !done) {
 259		fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
 260				" stopping session ]\n",
 261				record__bytes_written(rec) >> 10);
 262		done = 1;
 263	}
 264
 265	if (switch_output_size(rec))
 266		trigger_hit(&switch_output_trigger);
 267
 268	return 0;
 269}
 270
 271static int record__aio_enabled(struct record *rec);
 272static int record__comp_enabled(struct record *rec);
 273static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
 274			    void *dst, size_t dst_size, void *src, size_t src_size);
 275
 276#ifdef HAVE_AIO_SUPPORT
 277static int record__aio_write(struct aiocb *cblock, int trace_fd,
 278		void *buf, size_t size, off_t off)
 279{
 280	int rc;
 281
 282	cblock->aio_fildes = trace_fd;
 283	cblock->aio_buf    = buf;
 284	cblock->aio_nbytes = size;
 285	cblock->aio_offset = off;
 286	cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
 287
 288	do {
 289		rc = aio_write(cblock);
 290		if (rc == 0) {
 291			break;
 292		} else if (errno != EAGAIN) {
 293			cblock->aio_fildes = -1;
 294			pr_err("failed to queue perf data, error: %m\n");
 295			break;
 296		}
 297	} while (1);
 298
 299	return rc;
 300}
 301
 302static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
 303{
 304	void *rem_buf;
 305	off_t rem_off;
 306	size_t rem_size;
 307	int rc, aio_errno;
 308	ssize_t aio_ret, written;
 309
 310	aio_errno = aio_error(cblock);
 311	if (aio_errno == EINPROGRESS)
 312		return 0;
 313
 314	written = aio_ret = aio_return(cblock);
 315	if (aio_ret < 0) {
 316		if (aio_errno != EINTR)
 317			pr_err("failed to write perf data, error: %m\n");
 318		written = 0;
 319	}
 320
 321	rem_size = cblock->aio_nbytes - written;
 322
 323	if (rem_size == 0) {
 324		cblock->aio_fildes = -1;
 325		/*
 326		 * md->refcount is incremented in record__aio_pushfn() for
 327		 * every aio write request started in record__aio_push() so
 328		 * decrement it because the request is now complete.
 329		 */
 330		perf_mmap__put(&md->core);
 331		rc = 1;
 332	} else {
 333		/*
 334		 * aio write request may require restart with the
 335		 * reminder if the kernel didn't write whole
 336		 * chunk at once.
 337		 */
 338		rem_off = cblock->aio_offset + written;
 339		rem_buf = (void *)(cblock->aio_buf + written);
 340		record__aio_write(cblock, cblock->aio_fildes,
 341				rem_buf, rem_size, rem_off);
 342		rc = 0;
 343	}
 344
 345	return rc;
 346}
 347
 348static int record__aio_sync(struct mmap *md, bool sync_all)
 349{
 350	struct aiocb **aiocb = md->aio.aiocb;
 351	struct aiocb *cblocks = md->aio.cblocks;
 352	struct timespec timeout = { 0, 1000 * 1000  * 1 }; /* 1ms */
 353	int i, do_suspend;
 354
 355	do {
 356		do_suspend = 0;
 357		for (i = 0; i < md->aio.nr_cblocks; ++i) {
 358			if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
 359				if (sync_all)
 360					aiocb[i] = NULL;
 361				else
 362					return i;
 363			} else {
 364				/*
 365				 * Started aio write is not complete yet
 366				 * so it has to be waited before the
 367				 * next allocation.
 368				 */
 369				aiocb[i] = &cblocks[i];
 370				do_suspend = 1;
 371			}
 372		}
 373		if (!do_suspend)
 374			return -1;
 375
 376		while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
 377			if (!(errno == EAGAIN || errno == EINTR))
 378				pr_err("failed to sync perf data, error: %m\n");
 379		}
 380	} while (1);
 381}
 382
 383struct record_aio {
 384	struct record	*rec;
 385	void		*data;
 386	size_t		size;
 387};
 388
 389static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
 390{
 391	struct record_aio *aio = to;
 392
 393	/*
 394	 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
 395	 * to release space in the kernel buffer as fast as possible, calling
 396	 * perf_mmap__consume() from perf_mmap__push() function.
 397	 *
 398	 * That lets the kernel to proceed with storing more profiling data into
 399	 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
 400	 *
 401	 * Coping can be done in two steps in case the chunk of profiling data
 402	 * crosses the upper bound of the kernel buffer. In this case we first move
 403	 * part of data from map->start till the upper bound and then the reminder
 404	 * from the beginning of the kernel buffer till the end of the data chunk.
 405	 */
 406
 407	if (record__comp_enabled(aio->rec)) {
 408		ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
 409						   mmap__mmap_len(map) - aio->size,
 410						   buf, size);
 411		if (compressed < 0)
 412			return (int)compressed;
 413
 414		size = compressed;
 415	} else {
 416		memcpy(aio->data + aio->size, buf, size);
 417	}
 418
 419	if (!aio->size) {
 420		/*
 421		 * Increment map->refcount to guard map->aio.data[] buffer
 422		 * from premature deallocation because map object can be
 423		 * released earlier than aio write request started on
 424		 * map->aio.data[] buffer is complete.
 425		 *
 426		 * perf_mmap__put() is done at record__aio_complete()
 427		 * after started aio request completion or at record__aio_push()
 428		 * if the request failed to start.
 429		 */
 430		perf_mmap__get(&map->core);
 431	}
 432
 433	aio->size += size;
 434
 435	return size;
 436}
 437
 438static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
 439{
 440	int ret, idx;
 441	int trace_fd = rec->session->data->file.fd;
 442	struct record_aio aio = { .rec = rec, .size = 0 };
 443
 444	/*
 445	 * Call record__aio_sync() to wait till map->aio.data[] buffer
 446	 * becomes available after previous aio write operation.
 447	 */
 448
 449	idx = record__aio_sync(map, false);
 450	aio.data = map->aio.data[idx];
 451	ret = perf_mmap__push(map, &aio, record__aio_pushfn);
 452	if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
 453		return ret;
 454
 455	rec->samples++;
 456	ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
 457	if (!ret) {
 458		*off += aio.size;
 459		rec->bytes_written += aio.size;
 460		if (switch_output_size(rec))
 461			trigger_hit(&switch_output_trigger);
 462	} else {
 463		/*
 464		 * Decrement map->refcount incremented in record__aio_pushfn()
 465		 * back if record__aio_write() operation failed to start, otherwise
 466		 * map->refcount is decremented in record__aio_complete() after
 467		 * aio write operation finishes successfully.
 468		 */
 469		perf_mmap__put(&map->core);
 470	}
 471
 472	return ret;
 473}
 474
 475static off_t record__aio_get_pos(int trace_fd)
 476{
 477	return lseek(trace_fd, 0, SEEK_CUR);
 478}
 479
 480static void record__aio_set_pos(int trace_fd, off_t pos)
 481{
 482	lseek(trace_fd, pos, SEEK_SET);
 483}
 484
 485static void record__aio_mmap_read_sync(struct record *rec)
 486{
 487	int i;
 488	struct evlist *evlist = rec->evlist;
 489	struct mmap *maps = evlist->mmap;
 490
 491	if (!record__aio_enabled(rec))
 492		return;
 493
 494	for (i = 0; i < evlist->core.nr_mmaps; i++) {
 495		struct mmap *map = &maps[i];
 496
 497		if (map->core.base)
 498			record__aio_sync(map, true);
 499	}
 500}
 501
 502static int nr_cblocks_default = 1;
 503static int nr_cblocks_max = 4;
 504
 505static int record__aio_parse(const struct option *opt,
 506			     const char *str,
 507			     int unset)
 508{
 509	struct record_opts *opts = (struct record_opts *)opt->value;
 510
 511	if (unset) {
 512		opts->nr_cblocks = 0;
 513	} else {
 514		if (str)
 515			opts->nr_cblocks = strtol(str, NULL, 0);
 516		if (!opts->nr_cblocks)
 517			opts->nr_cblocks = nr_cblocks_default;
 518	}
 519
 520	return 0;
 521}
 522#else /* HAVE_AIO_SUPPORT */
 523static int nr_cblocks_max = 0;
 524
 525static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
 526			    off_t *off __maybe_unused)
 527{
 528	return -1;
 529}
 530
 531static off_t record__aio_get_pos(int trace_fd __maybe_unused)
 532{
 533	return -1;
 534}
 535
 536static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
 537{
 538}
 539
 540static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
 541{
 542}
 543#endif
 544
 545static int record__aio_enabled(struct record *rec)
 546{
 547	return rec->opts.nr_cblocks > 0;
 548}
 549
 550#define MMAP_FLUSH_DEFAULT 1
 551static int record__mmap_flush_parse(const struct option *opt,
 552				    const char *str,
 553				    int unset)
 554{
 555	int flush_max;
 556	struct record_opts *opts = (struct record_opts *)opt->value;
 557	static struct parse_tag tags[] = {
 558			{ .tag  = 'B', .mult = 1       },
 559			{ .tag  = 'K', .mult = 1 << 10 },
 560			{ .tag  = 'M', .mult = 1 << 20 },
 561			{ .tag  = 'G', .mult = 1 << 30 },
 562			{ .tag  = 0 },
 563	};
 564
 565	if (unset)
 566		return 0;
 567
 568	if (str) {
 569		opts->mmap_flush = parse_tag_value(str, tags);
 570		if (opts->mmap_flush == (int)-1)
 571			opts->mmap_flush = strtol(str, NULL, 0);
 572	}
 573
 574	if (!opts->mmap_flush)
 575		opts->mmap_flush = MMAP_FLUSH_DEFAULT;
 576
 577	flush_max = evlist__mmap_size(opts->mmap_pages);
 578	flush_max /= 4;
 579	if (opts->mmap_flush > flush_max)
 580		opts->mmap_flush = flush_max;
 581
 582	return 0;
 583}
 584
 585#ifdef HAVE_ZSTD_SUPPORT
 586static unsigned int comp_level_default = 1;
 587
 588static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
 589{
 590	struct record_opts *opts = opt->value;
 591
 592	if (unset) {
 593		opts->comp_level = 0;
 594	} else {
 595		if (str)
 596			opts->comp_level = strtol(str, NULL, 0);
 597		if (!opts->comp_level)
 598			opts->comp_level = comp_level_default;
 599	}
 600
 601	return 0;
 602}
 603#endif
 604static unsigned int comp_level_max = 22;
 605
 606static int record__comp_enabled(struct record *rec)
 607{
 608	return rec->opts.comp_level > 0;
 609}
 610
 611static int process_synthesized_event(struct perf_tool *tool,
 612				     union perf_event *event,
 613				     struct perf_sample *sample __maybe_unused,
 614				     struct machine *machine __maybe_unused)
 615{
 616	struct record *rec = container_of(tool, struct record, tool);
 617	return record__write(rec, NULL, event, event->header.size);
 618}
 619
 620static struct mutex synth_lock;
 621
 622static int process_locked_synthesized_event(struct perf_tool *tool,
 623				     union perf_event *event,
 624				     struct perf_sample *sample __maybe_unused,
 625				     struct machine *machine __maybe_unused)
 626{
 627	int ret;
 628
 629	mutex_lock(&synth_lock);
 630	ret = process_synthesized_event(tool, event, sample, machine);
 631	mutex_unlock(&synth_lock);
 632	return ret;
 633}
 634
 635static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
 636{
 637	struct record *rec = to;
 638
 639	if (record__comp_enabled(rec)) {
 640		ssize_t compressed = zstd_compress(rec->session, map, map->data,
 641						   mmap__mmap_len(map), bf, size);
 642
 643		if (compressed < 0)
 644			return (int)compressed;
 645
 646		size = compressed;
 647		bf   = map->data;
 648	}
 649
 650	thread->samples++;
 651	return record__write(rec, map, bf, size);
 652}
 653
 654static volatile sig_atomic_t signr = -1;
 655static volatile sig_atomic_t child_finished;
 656#ifdef HAVE_EVENTFD_SUPPORT
 657static volatile sig_atomic_t done_fd = -1;
 658#endif
 659
 660static void sig_handler(int sig)
 661{
 662	if (sig == SIGCHLD)
 663		child_finished = 1;
 664	else
 665		signr = sig;
 666
 667	done = 1;
 668#ifdef HAVE_EVENTFD_SUPPORT
 669	if (done_fd >= 0) {
 670		u64 tmp = 1;
 671		int orig_errno = errno;
 672
 673		/*
 674		 * It is possible for this signal handler to run after done is
 675		 * checked in the main loop, but before the perf counter fds are
 676		 * polled. If this happens, the poll() will continue to wait
 677		 * even though done is set, and will only break out if either
 678		 * another signal is received, or the counters are ready for
 679		 * read. To ensure the poll() doesn't sleep when done is set,
 680		 * use an eventfd (done_fd) to wake up the poll().
 681		 */
 682		if (write(done_fd, &tmp, sizeof(tmp)) < 0)
 683			pr_err("failed to signal wakeup fd, error: %m\n");
 684
 685		errno = orig_errno;
 686	}
 687#endif // HAVE_EVENTFD_SUPPORT
 688}
 689
 690static void sigsegv_handler(int sig)
 691{
 692	perf_hooks__recover();
 693	sighandler_dump_stack(sig);
 694}
 695
 696static void record__sig_exit(void)
 697{
 698	if (signr == -1)
 699		return;
 700
 701	signal(signr, SIG_DFL);
 702	raise(signr);
 703}
 704
 705#ifdef HAVE_AUXTRACE_SUPPORT
 706
 707static int record__process_auxtrace(struct perf_tool *tool,
 708				    struct mmap *map,
 709				    union perf_event *event, void *data1,
 710				    size_t len1, void *data2, size_t len2)
 711{
 712	struct record *rec = container_of(tool, struct record, tool);
 713	struct perf_data *data = &rec->data;
 714	size_t padding;
 715	u8 pad[8] = {0};
 716
 717	if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
 718		off_t file_offset;
 719		int fd = perf_data__fd(data);
 720		int err;
 721
 722		file_offset = lseek(fd, 0, SEEK_CUR);
 723		if (file_offset == -1)
 724			return -1;
 725		err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
 726						     event, file_offset);
 727		if (err)
 728			return err;
 729	}
 730
 731	/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
 732	padding = (len1 + len2) & 7;
 733	if (padding)
 734		padding = 8 - padding;
 735
 736	record__write(rec, map, event, event->header.size);
 737	record__write(rec, map, data1, len1);
 738	if (len2)
 739		record__write(rec, map, data2, len2);
 740	record__write(rec, map, &pad, padding);
 741
 742	return 0;
 743}
 744
 745static int record__auxtrace_mmap_read(struct record *rec,
 746				      struct mmap *map)
 747{
 748	int ret;
 749
 750	ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
 751				  record__process_auxtrace);
 752	if (ret < 0)
 753		return ret;
 754
 755	if (ret)
 756		rec->samples++;
 757
 758	return 0;
 759}
 760
 761static int record__auxtrace_mmap_read_snapshot(struct record *rec,
 762					       struct mmap *map)
 763{
 764	int ret;
 765
 766	ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
 767					   record__process_auxtrace,
 768					   rec->opts.auxtrace_snapshot_size);
 769	if (ret < 0)
 770		return ret;
 771
 772	if (ret)
 773		rec->samples++;
 774
 775	return 0;
 776}
 777
 778static int record__auxtrace_read_snapshot_all(struct record *rec)
 779{
 780	int i;
 781	int rc = 0;
 782
 783	for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
 784		struct mmap *map = &rec->evlist->mmap[i];
 785
 786		if (!map->auxtrace_mmap.base)
 787			continue;
 788
 789		if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
 790			rc = -1;
 791			goto out;
 792		}
 793	}
 794out:
 795	return rc;
 796}
 797
 798static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
 799{
 800	pr_debug("Recording AUX area tracing snapshot\n");
 801	if (record__auxtrace_read_snapshot_all(rec) < 0) {
 802		trigger_error(&auxtrace_snapshot_trigger);
 803	} else {
 804		if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
 805			trigger_error(&auxtrace_snapshot_trigger);
 806		else
 807			trigger_ready(&auxtrace_snapshot_trigger);
 808	}
 809}
 810
 811static int record__auxtrace_snapshot_exit(struct record *rec)
 812{
 813	if (trigger_is_error(&auxtrace_snapshot_trigger))
 814		return 0;
 815
 816	if (!auxtrace_record__snapshot_started &&
 817	    auxtrace_record__snapshot_start(rec->itr))
 818		return -1;
 819
 820	record__read_auxtrace_snapshot(rec, true);
 821	if (trigger_is_error(&auxtrace_snapshot_trigger))
 822		return -1;
 823
 824	return 0;
 825}
 826
 827static int record__auxtrace_init(struct record *rec)
 828{
 829	int err;
 830
 831	if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
 832	    && record__threads_enabled(rec)) {
 833		pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
 834		return -EINVAL;
 835	}
 836
 837	if (!rec->itr) {
 838		rec->itr = auxtrace_record__init(rec->evlist, &err);
 839		if (err)
 840			return err;
 841	}
 842
 843	err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
 844					      rec->opts.auxtrace_snapshot_opts);
 845	if (err)
 846		return err;
 847
 848	err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
 849					    rec->opts.auxtrace_sample_opts);
 850	if (err)
 851		return err;
 852
 853	auxtrace_regroup_aux_output(rec->evlist);
 854
 855	return auxtrace_parse_filters(rec->evlist);
 856}
 857
 858#else
 859
 860static inline
 861int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
 862			       struct mmap *map __maybe_unused)
 863{
 864	return 0;
 865}
 866
 867static inline
 868void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
 869				    bool on_exit __maybe_unused)
 870{
 871}
 872
 873static inline
 874int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 875{
 876	return 0;
 877}
 878
 879static inline
 880int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
 881{
 882	return 0;
 883}
 884
 885static int record__auxtrace_init(struct record *rec __maybe_unused)
 886{
 887	return 0;
 888}
 889
 890#endif
 891
 892static int record__config_text_poke(struct evlist *evlist)
 893{
 894	struct evsel *evsel;
 895
 896	/* Nothing to do if text poke is already configured */
 897	evlist__for_each_entry(evlist, evsel) {
 898		if (evsel->core.attr.text_poke)
 899			return 0;
 900	}
 901
 902	evsel = evlist__add_dummy_on_all_cpus(evlist);
 903	if (!evsel)
 904		return -ENOMEM;
 905
 906	evsel->core.attr.text_poke = 1;
 907	evsel->core.attr.ksymbol = 1;
 908	evsel->immediate = true;
 909	evsel__set_sample_bit(evsel, TIME);
 910
 911	return 0;
 912}
 913
 914static int record__config_off_cpu(struct record *rec)
 915{
 916	return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
 917}
 918
 919static bool record__tracking_system_wide(struct record *rec)
 920{
 921	struct evlist *evlist = rec->evlist;
 922	struct evsel *evsel;
 923
 924	/*
 925	 * If non-dummy evsel exists, system_wide sideband is need to
 926	 * help parse sample information.
 927	 * For example, PERF_EVENT_MMAP event to help parse symbol,
 928	 * and PERF_EVENT_COMM event to help parse task executable name.
 929	 */
 930	evlist__for_each_entry(evlist, evsel) {
 931		if (!evsel__is_dummy_event(evsel))
 932			return true;
 933	}
 934
 935	return false;
 936}
 937
 938static int record__config_tracking_events(struct record *rec)
 939{
 940	struct record_opts *opts = &rec->opts;
 941	struct evlist *evlist = rec->evlist;
 942	bool system_wide = false;
 943	struct evsel *evsel;
 944
 945	/*
 946	 * For initial_delay, system wide or a hybrid system, we need to add
 947	 * tracking event so that we can track PERF_RECORD_MMAP to cover the
 948	 * delay of waiting or event synthesis.
 949	 */
 950	if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
 951	    perf_pmus__num_core_pmus() > 1) {
 952
 953		/*
 954		 * User space tasks can migrate between CPUs, so when tracing
 955		 * selected CPUs, sideband for all CPUs is still needed.
 956		 */
 957		if (!!opts->target.cpu_list && record__tracking_system_wide(rec))
 958			system_wide = true;
 959
 960		evsel = evlist__findnew_tracking_event(evlist, system_wide);
 961		if (!evsel)
 962			return -ENOMEM;
 963
 964		/*
 965		 * Enable the tracking event when the process is forked for
 966		 * initial_delay, immediately for system wide.
 967		 */
 968		if (opts->target.initial_delay && !evsel->immediate &&
 969		    !target__has_cpu(&opts->target))
 970			evsel->core.attr.enable_on_exec = 1;
 971		else
 972			evsel->immediate = 1;
 973	}
 974
 975	return 0;
 976}
 977
 978static bool record__kcore_readable(struct machine *machine)
 979{
 980	char kcore[PATH_MAX];
 981	int fd;
 982
 983	scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
 984
 985	fd = open(kcore, O_RDONLY);
 986	if (fd < 0)
 987		return false;
 988
 989	close(fd);
 990
 991	return true;
 992}
 993
 994static int record__kcore_copy(struct machine *machine, struct perf_data *data)
 995{
 996	char from_dir[PATH_MAX];
 997	char kcore_dir[PATH_MAX];
 998	int ret;
 999
1000	snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
1001
1002	ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
1003	if (ret)
1004		return ret;
1005
1006	return kcore_copy(from_dir, kcore_dir);
1007}
1008
1009static void record__thread_data_init_pipes(struct record_thread *thread_data)
1010{
1011	thread_data->pipes.msg[0] = -1;
1012	thread_data->pipes.msg[1] = -1;
1013	thread_data->pipes.ack[0] = -1;
1014	thread_data->pipes.ack[1] = -1;
1015}
1016
1017static int record__thread_data_open_pipes(struct record_thread *thread_data)
1018{
1019	if (pipe(thread_data->pipes.msg))
1020		return -EINVAL;
1021
1022	if (pipe(thread_data->pipes.ack)) {
1023		close(thread_data->pipes.msg[0]);
1024		thread_data->pipes.msg[0] = -1;
1025		close(thread_data->pipes.msg[1]);
1026		thread_data->pipes.msg[1] = -1;
1027		return -EINVAL;
1028	}
1029
1030	pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
1031		 thread_data->pipes.msg[0], thread_data->pipes.msg[1],
1032		 thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
1033
1034	return 0;
1035}
1036
1037static void record__thread_data_close_pipes(struct record_thread *thread_data)
1038{
1039	if (thread_data->pipes.msg[0] != -1) {
1040		close(thread_data->pipes.msg[0]);
1041		thread_data->pipes.msg[0] = -1;
1042	}
1043	if (thread_data->pipes.msg[1] != -1) {
1044		close(thread_data->pipes.msg[1]);
1045		thread_data->pipes.msg[1] = -1;
1046	}
1047	if (thread_data->pipes.ack[0] != -1) {
1048		close(thread_data->pipes.ack[0]);
1049		thread_data->pipes.ack[0] = -1;
1050	}
1051	if (thread_data->pipes.ack[1] != -1) {
1052		close(thread_data->pipes.ack[1]);
1053		thread_data->pipes.ack[1] = -1;
1054	}
1055}
1056
1057static bool evlist__per_thread(struct evlist *evlist)
1058{
1059	return cpu_map__is_dummy(evlist->core.user_requested_cpus);
1060}
1061
1062static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
1063{
1064	int m, tm, nr_mmaps = evlist->core.nr_mmaps;
1065	struct mmap *mmap = evlist->mmap;
1066	struct mmap *overwrite_mmap = evlist->overwrite_mmap;
1067	struct perf_cpu_map *cpus = evlist->core.all_cpus;
1068	bool per_thread = evlist__per_thread(evlist);
1069
1070	if (per_thread)
1071		thread_data->nr_mmaps = nr_mmaps;
1072	else
1073		thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
1074						      thread_data->mask->maps.nbits);
1075	if (mmap) {
1076		thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1077		if (!thread_data->maps)
1078			return -ENOMEM;
1079	}
1080	if (overwrite_mmap) {
1081		thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1082		if (!thread_data->overwrite_maps) {
1083			zfree(&thread_data->maps);
1084			return -ENOMEM;
1085		}
1086	}
1087	pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
1088		 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
1089
1090	for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
1091		if (per_thread ||
1092		    test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
1093			if (thread_data->maps) {
1094				thread_data->maps[tm] = &mmap[m];
1095				pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
1096					  thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
1097			}
1098			if (thread_data->overwrite_maps) {
1099				thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
1100				pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
1101					  thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
1102			}
1103			tm++;
1104		}
1105	}
1106
1107	return 0;
1108}
1109
1110static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
1111{
1112	int f, tm, pos;
1113	struct mmap *map, *overwrite_map;
1114
1115	fdarray__init(&thread_data->pollfd, 64);
1116
1117	for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
1118		map = thread_data->maps ? thread_data->maps[tm] : NULL;
1119		overwrite_map = thread_data->overwrite_maps ?
1120				thread_data->overwrite_maps[tm] : NULL;
1121
1122		for (f = 0; f < evlist->core.pollfd.nr; f++) {
1123			void *ptr = evlist->core.pollfd.priv[f].ptr;
1124
1125			if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
1126				pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
1127							      &evlist->core.pollfd);
1128				if (pos < 0)
1129					return pos;
1130				pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
1131					 thread_data, pos, evlist->core.pollfd.entries[f].fd);
1132			}
1133		}
1134	}
1135
1136	return 0;
1137}
1138
1139static void record__free_thread_data(struct record *rec)
1140{
1141	int t;
1142	struct record_thread *thread_data = rec->thread_data;
1143
1144	if (thread_data == NULL)
1145		return;
1146
1147	for (t = 0; t < rec->nr_threads; t++) {
1148		record__thread_data_close_pipes(&thread_data[t]);
1149		zfree(&thread_data[t].maps);
1150		zfree(&thread_data[t].overwrite_maps);
1151		fdarray__exit(&thread_data[t].pollfd);
1152	}
1153
1154	zfree(&rec->thread_data);
1155}
1156
1157static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
1158						    int evlist_pollfd_index,
1159						    int thread_pollfd_index)
1160{
1161	size_t x = rec->index_map_cnt;
1162
1163	if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
1164		return -ENOMEM;
1165	rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
1166	rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
1167	rec->index_map_cnt += 1;
1168	return 0;
1169}
1170
1171static int record__update_evlist_pollfd_from_thread(struct record *rec,
1172						    struct evlist *evlist,
1173						    struct record_thread *thread_data)
1174{
1175	struct pollfd *e_entries = evlist->core.pollfd.entries;
1176	struct pollfd *t_entries = thread_data->pollfd.entries;
1177	int err = 0;
1178	size_t i;
1179
1180	for (i = 0; i < rec->index_map_cnt; i++) {
1181		int e_pos = rec->index_map[i].evlist_pollfd_index;
1182		int t_pos = rec->index_map[i].thread_pollfd_index;
1183
1184		if (e_entries[e_pos].fd != t_entries[t_pos].fd ||
1185		    e_entries[e_pos].events != t_entries[t_pos].events) {
1186			pr_err("Thread and evlist pollfd index mismatch\n");
1187			err = -EINVAL;
1188			continue;
1189		}
1190		e_entries[e_pos].revents = t_entries[t_pos].revents;
1191	}
1192	return err;
1193}
1194
1195static int record__dup_non_perf_events(struct record *rec,
1196				       struct evlist *evlist,
1197				       struct record_thread *thread_data)
1198{
1199	struct fdarray *fda = &evlist->core.pollfd;
1200	int i, ret;
1201
1202	for (i = 0; i < fda->nr; i++) {
1203		if (!(fda->priv[i].flags & fdarray_flag__non_perf_event))
1204			continue;
1205		ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda);
1206		if (ret < 0) {
1207			pr_err("Failed to duplicate descriptor in main thread pollfd\n");
1208			return ret;
1209		}
1210		pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n",
1211			  thread_data, ret, fda->entries[i].fd);
1212		ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
1213		if (ret < 0) {
1214			pr_err("Failed to map thread and evlist pollfd indexes\n");
1215			return ret;
1216		}
1217	}
1218	return 0;
1219}
1220
1221static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1222{
1223	int t, ret;
1224	struct record_thread *thread_data;
1225
1226	rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1227	if (!rec->thread_data) {
1228		pr_err("Failed to allocate thread data\n");
1229		return -ENOMEM;
1230	}
1231	thread_data = rec->thread_data;
1232
1233	for (t = 0; t < rec->nr_threads; t++)
1234		record__thread_data_init_pipes(&thread_data[t]);
1235
1236	for (t = 0; t < rec->nr_threads; t++) {
1237		thread_data[t].rec = rec;
1238		thread_data[t].mask = &rec->thread_masks[t];
1239		ret = record__thread_data_init_maps(&thread_data[t], evlist);
1240		if (ret) {
1241			pr_err("Failed to initialize thread[%d] maps\n", t);
1242			goto out_free;
1243		}
1244		ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
1245		if (ret) {
1246			pr_err("Failed to initialize thread[%d] pollfd\n", t);
1247			goto out_free;
1248		}
1249		if (t) {
1250			thread_data[t].tid = -1;
1251			ret = record__thread_data_open_pipes(&thread_data[t]);
1252			if (ret) {
1253				pr_err("Failed to open thread[%d] communication pipes\n", t);
1254				goto out_free;
1255			}
1256			ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
1257					   POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
1258			if (ret < 0) {
1259				pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
1260				goto out_free;
1261			}
1262			thread_data[t].ctlfd_pos = ret;
1263			pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
1264				 thread_data, thread_data[t].ctlfd_pos,
1265				 thread_data[t].pipes.msg[0]);
1266		} else {
1267			thread_data[t].tid = gettid();
1268
1269			ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
1270			if (ret < 0)
1271				goto out_free;
1272
1273			thread_data[t].ctlfd_pos = -1; /* Not used */
1274		}
1275	}
1276
1277	return 0;
1278
1279out_free:
1280	record__free_thread_data(rec);
1281
1282	return ret;
1283}
1284
1285static int record__mmap_evlist(struct record *rec,
1286			       struct evlist *evlist)
1287{
1288	int i, ret;
1289	struct record_opts *opts = &rec->opts;
1290	bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
1291				  opts->auxtrace_sample_mode;
1292	char msg[512];
1293
1294	if (opts->affinity != PERF_AFFINITY_SYS)
1295		cpu__setup_cpunode_map();
1296
1297	if (evlist__mmap_ex(evlist, opts->mmap_pages,
1298				 opts->auxtrace_mmap_pages,
1299				 auxtrace_overwrite,
1300				 opts->nr_cblocks, opts->affinity,
1301				 opts->mmap_flush, opts->comp_level) < 0) {
1302		if (errno == EPERM) {
1303			pr_err("Permission error mapping pages.\n"
1304			       "Consider increasing "
1305			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
1306			       "or try again with a smaller value of -m/--mmap_pages.\n"
1307			       "(current value: %u,%u)\n",
1308			       opts->mmap_pages, opts->auxtrace_mmap_pages);
1309			return -errno;
1310		} else {
1311			pr_err("failed to mmap with %d (%s)\n", errno,
1312				str_error_r(errno, msg, sizeof(msg)));
1313			if (errno)
1314				return -errno;
1315			else
1316				return -EINVAL;
1317		}
1318	}
1319
1320	if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
1321		return -1;
1322
1323	ret = record__alloc_thread_data(rec, evlist);
1324	if (ret)
1325		return ret;
1326
1327	if (record__threads_enabled(rec)) {
1328		ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
1329		if (ret) {
1330			pr_err("Failed to create data directory: %s\n", strerror(-ret));
1331			return ret;
1332		}
1333		for (i = 0; i < evlist->core.nr_mmaps; i++) {
1334			if (evlist->mmap)
1335				evlist->mmap[i].file = &rec->data.dir.files[i];
1336			if (evlist->overwrite_mmap)
1337				evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1338		}
1339	}
1340
1341	return 0;
1342}
1343
1344static int record__mmap(struct record *rec)
1345{
1346	return record__mmap_evlist(rec, rec->evlist);
1347}
1348
1349static int record__open(struct record *rec)
1350{
1351	char msg[BUFSIZ];
1352	struct evsel *pos;
1353	struct evlist *evlist = rec->evlist;
1354	struct perf_session *session = rec->session;
1355	struct record_opts *opts = &rec->opts;
1356	int rc = 0;
1357
1358	evlist__config(evlist, opts, &callchain_param);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1359
1360	evlist__for_each_entry(evlist, pos) {
1361try_again:
1362		if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
1363			if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
1364				if (verbose > 0)
1365					ui__warning("%s\n", msg);
1366				goto try_again;
1367			}
1368			if ((errno == EINVAL || errno == EBADF) &&
1369			    pos->core.leader != &pos->core &&
1370			    pos->weak_group) {
1371			        pos = evlist__reset_weak_group(evlist, pos, true);
1372				goto try_again;
1373			}
1374			rc = -errno;
1375			evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
 
1376			ui__error("%s\n", msg);
1377			goto out;
1378		}
1379
1380		pos->supported = true;
1381	}
1382
1383	if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
1384		pr_warning(
1385"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1386"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1387"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1388"file is not found in the buildid cache or in the vmlinux path.\n\n"
1389"Samples in kernel modules won't be resolved at all.\n\n"
1390"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1391"even with a suitable vmlinux or kallsyms file.\n\n");
1392	}
1393
1394	if (evlist__apply_filters(evlist, &pos)) {
1395		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
1396			pos->filter ?: "BPF", evsel__name(pos), errno,
1397			str_error_r(errno, msg, sizeof(msg)));
1398		rc = -1;
1399		goto out;
1400	}
1401
1402	rc = record__mmap(rec);
1403	if (rc)
1404		goto out;
1405
1406	session->evlist = evlist;
1407	perf_session__set_id_hdr_size(session);
1408out:
1409	return rc;
1410}
1411
1412static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1413{
1414	if (rec->evlist->first_sample_time == 0)
1415		rec->evlist->first_sample_time = sample_time;
1416
1417	if (sample_time)
1418		rec->evlist->last_sample_time = sample_time;
1419}
1420
1421static int process_sample_event(struct perf_tool *tool,
1422				union perf_event *event,
1423				struct perf_sample *sample,
1424				struct evsel *evsel,
1425				struct machine *machine)
1426{
1427	struct record *rec = container_of(tool, struct record, tool);
1428
1429	set_timestamp_boundary(rec, sample->time);
 
 
 
1430
1431	if (rec->buildid_all)
1432		return 0;
1433
1434	rec->samples++;
1435	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
1436}
1437
1438static int process_buildids(struct record *rec)
1439{
1440	struct perf_session *session = rec->session;
1441
1442	if (perf_data__size(&rec->data) == 0)
1443		return 0;
1444
1445	/*
1446	 * During this process, it'll load kernel map and replace the
1447	 * dso->long_name to a real pathname it found.  In this case
1448	 * we prefer the vmlinux path like
1449	 *   /lib/modules/3.16.4/build/vmlinux
1450	 *
1451	 * rather than build-id path (in debug directory).
1452	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1453	 */
1454	symbol_conf.ignore_vmlinux_buildid = true;
1455
1456	/*
1457	 * If --buildid-all is given, it marks all DSO regardless of hits,
1458	 * so no need to process samples. But if timestamp_boundary is enabled,
1459	 * it still needs to walk on all samples to get the timestamps of
1460	 * first/last samples.
1461	 */
1462	if (rec->buildid_all && !rec->timestamp_boundary)
1463		rec->tool.sample = NULL;
1464
1465	return perf_session__process_events(session);
1466}
1467
1468static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
1469{
1470	int err;
1471	struct perf_tool *tool = data;
1472	/*
1473	 *As for guest kernel when processing subcommand record&report,
1474	 *we arrange module mmap prior to guest kernel mmap and trigger
1475	 *a preload dso because default guest module symbols are loaded
1476	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1477	 *method is used to avoid symbol missing when the first addr is
1478	 *in module instead of in guest kernel.
1479	 */
1480	err = perf_event__synthesize_modules(tool, process_synthesized_event,
1481					     machine);
1482	if (err < 0)
1483		pr_err("Couldn't record guest kernel [%d]'s reference"
1484		       " relocation symbol.\n", machine->pid);
1485
1486	/*
1487	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1488	 * have no _text sometimes.
1489	 */
1490	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1491						 machine);
1492	if (err < 0)
1493		pr_err("Couldn't record guest kernel [%d]'s reference"
1494		       " relocation symbol.\n", machine->pid);
1495}
1496
1497static struct perf_event_header finished_round_event = {
1498	.size = sizeof(struct perf_event_header),
1499	.type = PERF_RECORD_FINISHED_ROUND,
1500};
1501
1502static struct perf_event_header finished_init_event = {
1503	.size = sizeof(struct perf_event_header),
1504	.type = PERF_RECORD_FINISHED_INIT,
1505};
1506
1507static void record__adjust_affinity(struct record *rec, struct mmap *map)
1508{
1509	if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1510	    !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
1511			  thread->mask->affinity.nbits)) {
1512		bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
1513		bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
1514			  map->affinity_mask.bits, thread->mask->affinity.nbits);
1515		sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
1516					(cpu_set_t *)thread->mask->affinity.bits);
1517		if (verbose == 2) {
1518			pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
1519			mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
1520		}
1521	}
1522}
1523
1524static size_t process_comp_header(void *record, size_t increment)
1525{
1526	struct perf_record_compressed *event = record;
1527	size_t size = sizeof(*event);
1528
1529	if (increment) {
1530		event->header.size += increment;
1531		return increment;
1532	}
1533
1534	event->header.type = PERF_RECORD_COMPRESSED;
1535	event->header.size = size;
1536
1537	return size;
1538}
1539
1540static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
1541			    void *dst, size_t dst_size, void *src, size_t src_size)
1542{
1543	ssize_t compressed;
1544	size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
1545	struct zstd_data *zstd_data = &session->zstd_data;
1546
1547	if (map && map->file)
1548		zstd_data = &map->zstd_data;
1549
1550	compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
1551						     max_record_size, process_comp_header);
1552	if (compressed < 0)
1553		return compressed;
1554
1555	if (map && map->file) {
1556		thread->bytes_transferred += src_size;
1557		thread->bytes_compressed  += compressed;
1558	} else {
1559		session->bytes_transferred += src_size;
1560		session->bytes_compressed  += compressed;
1561	}
1562
1563	return compressed;
1564}
1565
1566static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1567				    bool overwrite, bool synch)
1568{
1569	u64 bytes_written = rec->bytes_written;
1570	int i;
1571	int rc = 0;
1572	int nr_mmaps;
1573	struct mmap **maps;
1574	int trace_fd = rec->data.file.fd;
1575	off_t off = 0;
1576
1577	if (!evlist)
1578		return 0;
1579
1580	nr_mmaps = thread->nr_mmaps;
1581	maps = overwrite ? thread->overwrite_maps : thread->maps;
1582
1583	if (!maps)
1584		return 0;
1585
1586	if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
1587		return 0;
1588
1589	if (record__aio_enabled(rec))
1590		off = record__aio_get_pos(trace_fd);
1591
1592	for (i = 0; i < nr_mmaps; i++) {
1593		u64 flush = 0;
1594		struct mmap *map = maps[i];
1595
1596		if (map->core.base) {
1597			record__adjust_affinity(rec, map);
1598			if (synch) {
1599				flush = map->core.flush;
1600				map->core.flush = 1;
1601			}
1602			if (!record__aio_enabled(rec)) {
1603				if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1604					if (synch)
1605						map->core.flush = flush;
1606					rc = -1;
1607					goto out;
1608				}
1609			} else {
1610				if (record__aio_push(rec, map, &off) < 0) {
1611					record__aio_set_pos(trace_fd, off);
1612					if (synch)
1613						map->core.flush = flush;
1614					rc = -1;
1615					goto out;
1616				}
1617			}
1618			if (synch)
1619				map->core.flush = flush;
1620		}
1621
1622		if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1623		    !rec->opts.auxtrace_sample_mode &&
1624		    record__auxtrace_mmap_read(rec, map) != 0) {
1625			rc = -1;
1626			goto out;
1627		}
1628	}
1629
1630	if (record__aio_enabled(rec))
1631		record__aio_set_pos(trace_fd, off);
1632
1633	/*
1634	 * Mark the round finished in case we wrote
1635	 * at least one event.
1636	 *
1637	 * No need for round events in directory mode,
1638	 * because per-cpu maps and files have data
1639	 * sorted by kernel.
1640	 */
1641	if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
1642		rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1643
1644	if (overwrite)
1645		evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
1646out:
1647	return rc;
1648}
1649
1650static int record__mmap_read_all(struct record *rec, bool synch)
1651{
1652	int err;
1653
1654	err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1655	if (err)
1656		return err;
1657
1658	return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1659}
1660
1661static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
1662					   void *arg __maybe_unused)
1663{
1664	struct perf_mmap *map = fda->priv[fd].ptr;
1665
1666	if (map)
1667		perf_mmap__put(map);
1668}
1669
1670static void *record__thread(void *arg)
1671{
1672	enum thread_msg msg = THREAD_MSG__READY;
1673	bool terminate = false;
1674	struct fdarray *pollfd;
1675	int err, ctlfd_pos;
1676
1677	thread = arg;
1678	thread->tid = gettid();
1679
1680	err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1681	if (err == -1)
1682		pr_warning("threads[%d]: failed to notify on start: %s\n",
1683			   thread->tid, strerror(errno));
1684
1685	pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
1686
1687	pollfd = &thread->pollfd;
1688	ctlfd_pos = thread->ctlfd_pos;
1689
1690	for (;;) {
1691		unsigned long long hits = thread->samples;
1692
1693		if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1694			break;
1695
1696		if (hits == thread->samples) {
1697
1698			err = fdarray__poll(pollfd, -1);
1699			/*
1700			 * Propagate error, only if there's any. Ignore positive
1701			 * number of returned events and interrupt error.
1702			 */
1703			if (err > 0 || (err < 0 && errno == EINTR))
1704				err = 0;
1705			thread->waking++;
1706
1707			if (fdarray__filter(pollfd, POLLERR | POLLHUP,
1708					    record__thread_munmap_filtered, NULL) == 0)
1709				break;
1710		}
1711
1712		if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
1713			terminate = true;
1714			close(thread->pipes.msg[0]);
1715			thread->pipes.msg[0] = -1;
1716			pollfd->entries[ctlfd_pos].fd = -1;
1717			pollfd->entries[ctlfd_pos].events = 0;
1718		}
1719
1720		pollfd->entries[ctlfd_pos].revents = 0;
1721	}
1722	record__mmap_read_all(thread->rec, true);
1723
1724	err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1725	if (err == -1)
1726		pr_warning("threads[%d]: failed to notify on termination: %s\n",
1727			   thread->tid, strerror(errno));
1728
1729	return NULL;
1730}
1731
1732static void record__init_features(struct record *rec)
1733{
1734	struct perf_session *session = rec->session;
1735	int feat;
1736
1737	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1738		perf_header__set_feat(&session->header, feat);
1739
1740	if (rec->no_buildid)
1741		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1742
1743#ifdef HAVE_LIBTRACEEVENT
1744	if (!have_tracepoints(&rec->evlist->core.entries))
1745		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1746#endif
1747
1748	if (!rec->opts.branch_stack)
1749		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1750
1751	if (!rec->opts.full_auxtrace)
1752		perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1753
1754	if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1755		perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1756
1757	if (!rec->opts.use_clockid)
1758		perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1759
1760	if (!record__threads_enabled(rec))
1761		perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1762
1763	if (!record__comp_enabled(rec))
1764		perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
1765
1766	perf_header__clear_feat(&session->header, HEADER_STAT);
1767}
1768
1769static void
1770record__finish_output(struct record *rec)
1771{
1772	int i;
1773	struct perf_data *data = &rec->data;
1774	int fd = perf_data__fd(data);
1775
1776	if (data->is_pipe)
1777		return;
1778
1779	rec->session->header.data_size += rec->bytes_written;
1780	data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
1781	if (record__threads_enabled(rec)) {
1782		for (i = 0; i < data->dir.nr; i++)
1783			data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
1784	}
1785
1786	if (!rec->no_buildid) {
1787		process_buildids(rec);
1788
1789		if (rec->buildid_all)
1790			dsos__hit_all(rec->session);
1791	}
1792	perf_session__write_header(rec->session, rec->evlist, fd, true);
1793
1794	return;
1795}
1796
1797static int record__synthesize_workload(struct record *rec, bool tail)
1798{
1799	int err;
1800	struct perf_thread_map *thread_map;
1801	bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1802
1803	if (rec->opts.tail_synthesize != tail)
1804		return 0;
1805
1806	thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1807	if (thread_map == NULL)
1808		return -1;
1809
1810	err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1811						 process_synthesized_event,
1812						 &rec->session->machines.host,
1813						 needs_mmap,
1814						 rec->opts.sample_address);
1815	perf_thread_map__put(thread_map);
1816	return err;
1817}
1818
1819static int write_finished_init(struct record *rec, bool tail)
1820{
1821	if (rec->opts.tail_synthesize != tail)
1822		return 0;
1823
1824	return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
1825}
1826
1827static int record__synthesize(struct record *rec, bool tail);
1828
1829static int
1830record__switch_output(struct record *rec, bool at_exit)
1831{
1832	struct perf_data *data = &rec->data;
1833	int fd, err;
1834	char *new_filename;
1835
1836	/* Same Size:      "2015122520103046"*/
1837	char timestamp[] = "InvalidTimestamp";
1838
1839	record__aio_mmap_read_sync(rec);
1840
1841	write_finished_init(rec, true);
1842
1843	record__synthesize(rec, true);
1844	if (target__none(&rec->opts.target))
1845		record__synthesize_workload(rec, true);
1846
1847	rec->samples = 0;
1848	record__finish_output(rec);
1849	err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1850	if (err) {
1851		pr_err("Failed to get current timestamp\n");
1852		return -EINVAL;
1853	}
1854
1855	fd = perf_data__switch(data, timestamp,
1856				    rec->session->header.data_offset,
1857				    at_exit, &new_filename);
1858	if (fd >= 0 && !at_exit) {
1859		rec->bytes_written = 0;
1860		rec->session->header.data_size = 0;
1861	}
1862
1863	if (!quiet)
1864		fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
1865			data->path, timestamp);
1866
1867	if (rec->switch_output.num_files) {
1868		int n = rec->switch_output.cur_file + 1;
1869
1870		if (n >= rec->switch_output.num_files)
1871			n = 0;
1872		rec->switch_output.cur_file = n;
1873		if (rec->switch_output.filenames[n]) {
1874			remove(rec->switch_output.filenames[n]);
1875			zfree(&rec->switch_output.filenames[n]);
1876		}
1877		rec->switch_output.filenames[n] = new_filename;
1878	} else {
1879		free(new_filename);
1880	}
1881
1882	/* Output tracking events */
1883	if (!at_exit) {
1884		record__synthesize(rec, false);
1885
1886		/*
1887		 * In 'perf record --switch-output' without -a,
1888		 * record__synthesize() in record__switch_output() won't
1889		 * generate tracking events because there's no thread_map
1890		 * in evlist. Which causes newly created perf.data doesn't
1891		 * contain map and comm information.
1892		 * Create a fake thread_map and directly call
1893		 * perf_event__synthesize_thread_map() for those events.
1894		 */
1895		if (target__none(&rec->opts.target))
1896			record__synthesize_workload(rec, false);
1897		write_finished_init(rec, false);
1898	}
1899	return fd;
1900}
1901
1902static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
1903					struct perf_record_lost_samples *lost,
1904					int cpu_idx, int thread_idx, u64 lost_count,
1905					u16 misc_flag)
1906{
1907	struct perf_sample_id *sid;
1908	struct perf_sample sample = {};
1909	int id_hdr_size;
1910
1911	lost->lost = lost_count;
1912	if (evsel->core.ids) {
1913		sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
1914		sample.id = sid->id;
1915	}
1916
1917	id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
1918						       evsel->core.attr.sample_type, &sample);
1919	lost->header.size = sizeof(*lost) + id_hdr_size;
1920	lost->header.misc = misc_flag;
1921	record__write(rec, NULL, lost, lost->header.size);
1922}
1923
1924static void record__read_lost_samples(struct record *rec)
1925{
1926	struct perf_session *session = rec->session;
1927	struct perf_record_lost_samples *lost = NULL;
1928	struct evsel *evsel;
1929
1930	/* there was an error during record__open */
1931	if (session->evlist == NULL)
1932		return;
1933
1934	evlist__for_each_entry(session->evlist, evsel) {
1935		struct xyarray *xy = evsel->core.sample_id;
1936		u64 lost_count;
1937
1938		if (xy == NULL || evsel->core.fd == NULL)
1939			continue;
1940		if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) ||
1941		    xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) {
1942			pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n");
1943			continue;
1944		}
1945
1946		for (int x = 0; x < xyarray__max_x(xy); x++) {
1947			for (int y = 0; y < xyarray__max_y(xy); y++) {
1948				struct perf_counts_values count;
1949
1950				if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
1951					pr_debug("read LOST count failed\n");
1952					goto out;
1953				}
1954
1955				if (count.lost) {
1956					if (!lost) {
1957						lost = zalloc(sizeof(*lost) +
1958							      session->machines.host.id_hdr_size);
1959						if (!lost) {
1960							pr_debug("Memory allocation failed\n");
1961							return;
1962						}
1963						lost->header.type = PERF_RECORD_LOST_SAMPLES;
1964					}
1965					__record__save_lost_samples(rec, evsel, lost,
1966								    x, y, count.lost, 0);
1967				}
1968			}
1969		}
1970
1971		lost_count = perf_bpf_filter__lost_count(evsel);
1972		if (lost_count) {
1973			if (!lost) {
1974				lost = zalloc(sizeof(*lost) +
1975					      session->machines.host.id_hdr_size);
1976				if (!lost) {
1977					pr_debug("Memory allocation failed\n");
1978					return;
1979				}
1980				lost->header.type = PERF_RECORD_LOST_SAMPLES;
1981			}
1982			__record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
1983						    PERF_RECORD_MISC_LOST_SAMPLES_BPF);
1984		}
1985	}
1986out:
1987	free(lost);
1988}
1989
1990static volatile sig_atomic_t workload_exec_errno;
1991
1992/*
1993 * evlist__prepare_workload will send a SIGUSR1
1994 * if the fork fails, since we asked by setting its
1995 * want_signal to true.
1996 */
1997static void workload_exec_failed_signal(int signo __maybe_unused,
1998					siginfo_t *info,
1999					void *ucontext __maybe_unused)
2000{
2001	workload_exec_errno = info->si_value.sival_int;
2002	done = 1;
2003	child_finished = 1;
2004}
2005
2006static void snapshot_sig_handler(int sig);
2007static void alarm_sig_handler(int sig);
2008
2009static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
 
2010{
2011	if (evlist) {
2012		if (evlist->mmap && evlist->mmap[0].core.base)
2013			return evlist->mmap[0].core.base;
2014		if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
2015			return evlist->overwrite_mmap[0].core.base;
2016	}
2017	return NULL;
2018}
2019
2020static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
2021{
2022	const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
 
 
2023	if (pc)
2024		return pc;
2025	return NULL;
2026}
2027
2028static int record__synthesize(struct record *rec, bool tail)
2029{
2030	struct perf_session *session = rec->session;
2031	struct machine *machine = &session->machines.host;
2032	struct perf_data *data = &rec->data;
2033	struct record_opts *opts = &rec->opts;
2034	struct perf_tool *tool = &rec->tool;
 
2035	int err = 0;
2036	event_op f = process_synthesized_event;
2037
2038	if (rec->opts.tail_synthesize != tail)
2039		return 0;
2040
2041	if (data->is_pipe) {
2042		err = perf_event__synthesize_for_pipe(tool, session, data,
 
 
 
 
 
 
 
 
 
 
 
2043						      process_synthesized_event);
2044		if (err < 0)
2045			goto out;
 
 
2046
2047		rec->bytes_written += err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2048	}
2049
2050	err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
2051					  process_synthesized_event, machine);
2052	if (err)
2053		goto out;
2054
2055	/* Synthesize id_index before auxtrace_info */
2056	err = perf_event__synthesize_id_index(tool,
2057					      process_synthesized_event,
2058					      session->evlist, machine);
2059	if (err)
2060		goto out;
2061
2062	if (rec->opts.full_auxtrace) {
2063		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
2064					session, process_synthesized_event);
2065		if (err)
2066			goto out;
2067	}
2068
2069	if (!evlist__exclude_kernel(rec->evlist)) {
2070		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
2071							 machine);
2072		WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
2073				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2074				   "Check /proc/kallsyms permission or run as root.\n");
2075
2076		err = perf_event__synthesize_modules(tool, process_synthesized_event,
2077						     machine);
2078		WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
2079				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2080				   "Check /proc/modules permission or run as root.\n");
2081	}
2082
2083	if (perf_guest) {
2084		machines__process_guests(&session->machines,
2085					 perf_event__synthesize_guest_os, tool);
2086	}
2087
2088	err = perf_event__synthesize_extra_attr(&rec->tool,
2089						rec->evlist,
2090						process_synthesized_event,
2091						data->is_pipe);
2092	if (err)
2093		goto out;
2094
2095	err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
2096						 process_synthesized_event,
2097						NULL);
2098	if (err < 0) {
2099		pr_err("Couldn't synthesize thread map.\n");
2100		return err;
2101	}
2102
2103	err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
2104					     process_synthesized_event, NULL);
2105	if (err < 0) {
2106		pr_err("Couldn't synthesize cpu map.\n");
2107		return err;
2108	}
2109
2110	err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
2111						machine, opts);
2112	if (err < 0) {
2113		pr_warning("Couldn't synthesize bpf events.\n");
2114		err = 0;
2115	}
2116
2117	if (rec->opts.synth & PERF_SYNTH_CGROUP) {
2118		err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
2119						     machine);
2120		if (err < 0) {
2121			pr_warning("Couldn't synthesize cgroup events.\n");
2122			err = 0;
2123		}
2124	}
2125
2126	if (rec->opts.nr_threads_synthesize > 1) {
2127		mutex_init(&synth_lock);
2128		perf_set_multithreaded();
2129		f = process_locked_synthesized_event;
2130	}
2131
2132	if (rec->opts.synth & PERF_SYNTH_TASK) {
2133		bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
2134
2135		err = __machine__synthesize_threads(machine, tool, &opts->target,
2136						    rec->evlist->core.threads,
2137						    f, needs_mmap, opts->sample_address,
2138						    rec->opts.nr_threads_synthesize);
2139	}
2140
2141	if (rec->opts.nr_threads_synthesize > 1) {
2142		perf_set_singlethreaded();
2143		mutex_destroy(&synth_lock);
2144	}
2145
 
 
 
2146out:
2147	return err;
2148}
2149
2150static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
2151{
2152	struct record *rec = data;
2153	pthread_kill(rec->thread_id, SIGUSR2);
2154	return 0;
2155}
2156
2157static int record__setup_sb_evlist(struct record *rec)
2158{
2159	struct record_opts *opts = &rec->opts;
2160
2161	if (rec->sb_evlist != NULL) {
2162		/*
2163		 * We get here if --switch-output-event populated the
2164		 * sb_evlist, so associate a callback that will send a SIGUSR2
2165		 * to the main thread.
2166		 */
2167		evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
2168		rec->thread_id = pthread_self();
2169	}
2170#ifdef HAVE_LIBBPF_SUPPORT
2171	if (!opts->no_bpf_event) {
2172		if (rec->sb_evlist == NULL) {
2173			rec->sb_evlist = evlist__new();
2174
2175			if (rec->sb_evlist == NULL) {
2176				pr_err("Couldn't create side band evlist.\n.");
2177				return -1;
2178			}
2179		}
2180
2181		if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
2182			pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
2183			return -1;
2184		}
2185	}
2186#endif
2187	if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
2188		pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
2189		opts->no_bpf_event = true;
2190	}
2191
2192	return 0;
2193}
2194
2195static int record__init_clock(struct record *rec)
2196{
2197	struct perf_session *session = rec->session;
2198	struct timespec ref_clockid;
2199	struct timeval ref_tod;
2200	u64 ref;
2201
2202	if (!rec->opts.use_clockid)
2203		return 0;
2204
2205	if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
2206		session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
2207
2208	session->header.env.clock.clockid = rec->opts.clockid;
2209
2210	if (gettimeofday(&ref_tod, NULL) != 0) {
2211		pr_err("gettimeofday failed, cannot set reference time.\n");
2212		return -1;
2213	}
2214
2215	if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
2216		pr_err("clock_gettime failed, cannot set reference time.\n");
2217		return -1;
2218	}
2219
2220	ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
2221	      (u64) ref_tod.tv_usec * NSEC_PER_USEC;
2222
2223	session->header.env.clock.tod_ns = ref;
2224
2225	ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
2226	      (u64) ref_clockid.tv_nsec;
2227
2228	session->header.env.clock.clockid_ns = ref;
2229	return 0;
2230}
2231
2232static void hit_auxtrace_snapshot_trigger(struct record *rec)
2233{
2234	if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2235		trigger_hit(&auxtrace_snapshot_trigger);
2236		auxtrace_record__snapshot_started = 1;
2237		if (auxtrace_record__snapshot_start(rec->itr))
2238			trigger_error(&auxtrace_snapshot_trigger);
2239	}
2240}
2241
2242static int record__terminate_thread(struct record_thread *thread_data)
2243{
2244	int err;
2245	enum thread_msg ack = THREAD_MSG__UNDEFINED;
2246	pid_t tid = thread_data->tid;
2247
2248	close(thread_data->pipes.msg[1]);
2249	thread_data->pipes.msg[1] = -1;
2250	err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
2251	if (err > 0)
2252		pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
2253	else
2254		pr_warning("threads[%d]: failed to receive termination notification from %d\n",
2255			   thread->tid, tid);
2256
2257	return 0;
2258}
2259
2260static int record__start_threads(struct record *rec)
2261{
2262	int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
2263	struct record_thread *thread_data = rec->thread_data;
2264	sigset_t full, mask;
2265	pthread_t handle;
2266	pthread_attr_t attrs;
2267
2268	thread = &thread_data[0];
2269
2270	if (!record__threads_enabled(rec))
2271		return 0;
2272
2273	sigfillset(&full);
2274	if (sigprocmask(SIG_SETMASK, &full, &mask)) {
2275		pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
2276		return -1;
2277	}
2278
2279	pthread_attr_init(&attrs);
2280	pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
2281
2282	for (t = 1; t < nr_threads; t++) {
2283		enum thread_msg msg = THREAD_MSG__UNDEFINED;
2284
2285#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
2286		pthread_attr_setaffinity_np(&attrs,
2287					    MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
2288					    (cpu_set_t *)(thread_data[t].mask->affinity.bits));
2289#endif
2290		if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
2291			for (tt = 1; tt < t; tt++)
2292				record__terminate_thread(&thread_data[t]);
2293			pr_err("Failed to start threads: %s\n", strerror(errno));
2294			ret = -1;
2295			goto out_err;
2296		}
2297
2298		err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
2299		if (err > 0)
2300			pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2301				  thread_msg_tags[msg]);
2302		else
2303			pr_warning("threads[%d]: failed to receive start notification from %d\n",
2304				   thread->tid, rec->thread_data[t].tid);
2305	}
2306
2307	sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
2308			(cpu_set_t *)thread->mask->affinity.bits);
2309
2310	pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
2311
2312out_err:
2313	pthread_attr_destroy(&attrs);
2314
2315	if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
2316		pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
2317		ret = -1;
2318	}
2319
2320	return ret;
2321}
2322
2323static int record__stop_threads(struct record *rec)
2324{
2325	int t;
2326	struct record_thread *thread_data = rec->thread_data;
2327
2328	for (t = 1; t < rec->nr_threads; t++)
2329		record__terminate_thread(&thread_data[t]);
2330
2331	for (t = 0; t < rec->nr_threads; t++) {
2332		rec->samples += thread_data[t].samples;
2333		if (!record__threads_enabled(rec))
2334			continue;
2335		rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2336		rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2337		pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
2338			 thread_data[t].samples, thread_data[t].waking);
2339		if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
2340			pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
2341				 thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
2342		else
2343			pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
2344	}
2345
2346	return 0;
2347}
2348
2349static unsigned long record__waking(struct record *rec)
2350{
2351	int t;
2352	unsigned long waking = 0;
2353	struct record_thread *thread_data = rec->thread_data;
2354
2355	for (t = 0; t < rec->nr_threads; t++)
2356		waking += thread_data[t].waking;
2357
2358	return waking;
2359}
2360
2361static int __cmd_record(struct record *rec, int argc, const char **argv)
2362{
2363	int err;
2364	int status = 0;
 
2365	const bool forks = argc > 0;
2366	struct perf_tool *tool = &rec->tool;
2367	struct record_opts *opts = &rec->opts;
2368	struct perf_data *data = &rec->data;
2369	struct perf_session *session;
2370	bool disabled = false, draining = false;
 
2371	int fd;
2372	float ratio = 0;
2373	enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
2374
2375	atexit(record__sig_exit);
2376	signal(SIGCHLD, sig_handler);
2377	signal(SIGINT, sig_handler);
2378	signal(SIGTERM, sig_handler);
2379	signal(SIGSEGV, sigsegv_handler);
2380
2381	if (rec->opts.record_namespaces)
2382		tool->namespace_events = true;
2383
2384	if (rec->opts.record_cgroup) {
2385#ifdef HAVE_FILE_HANDLE
2386		tool->cgroup_events = true;
2387#else
2388		pr_err("cgroup tracking is not supported\n");
2389		return -1;
2390#endif
2391	}
2392
2393	if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2394		signal(SIGUSR2, snapshot_sig_handler);
2395		if (rec->opts.auxtrace_snapshot_mode)
2396			trigger_on(&auxtrace_snapshot_trigger);
2397		if (rec->switch_output.enabled)
2398			trigger_on(&switch_output_trigger);
2399	} else {
2400		signal(SIGUSR2, SIG_IGN);
2401	}
2402
2403	session = perf_session__new(data, tool);
2404	if (IS_ERR(session)) {
2405		pr_err("Perf session creation failed.\n");
2406		return PTR_ERR(session);
2407	}
2408
2409	if (record__threads_enabled(rec)) {
2410		if (perf_data__is_pipe(&rec->data)) {
2411			pr_err("Parallel trace streaming is not available in pipe mode.\n");
2412			return -1;
2413		}
2414		if (rec->opts.full_auxtrace) {
2415			pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
2416			return -1;
2417		}
2418	}
2419
2420	fd = perf_data__fd(data);
2421	rec->session = session;
2422
2423	if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2424		pr_err("Compression initialization failed.\n");
2425		return -1;
2426	}
2427#ifdef HAVE_EVENTFD_SUPPORT
2428	done_fd = eventfd(0, EFD_NONBLOCK);
2429	if (done_fd < 0) {
2430		pr_err("Failed to create wakeup eventfd, error: %m\n");
2431		status = -1;
2432		goto out_delete_session;
2433	}
2434	err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
2435	if (err < 0) {
2436		pr_err("Failed to add wakeup eventfd to poll list\n");
2437		status = err;
2438		goto out_delete_session;
2439	}
2440#endif // HAVE_EVENTFD_SUPPORT
2441
2442	session->header.env.comp_type  = PERF_COMP_ZSTD;
2443	session->header.env.comp_level = rec->opts.comp_level;
2444
2445	if (rec->opts.kcore &&
2446	    !record__kcore_readable(&session->machines.host)) {
2447		pr_err("ERROR: kcore is not readable.\n");
2448		return -1;
2449	}
2450
2451	if (record__init_clock(rec))
2452		return -1;
2453
2454	record__init_features(rec);
2455
2456	if (forks) {
2457		err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2458					       workload_exec_failed_signal);
 
2459		if (err < 0) {
2460			pr_err("Couldn't run the workload!\n");
2461			status = err;
2462			goto out_delete_session;
2463		}
2464	}
2465
2466	/*
2467	 * If we have just single event and are sending data
2468	 * through pipe, we need to force the ids allocation,
2469	 * because we synthesize event name through the pipe
2470	 * and need the id for that.
2471	 */
2472	if (data->is_pipe && rec->evlist->core.nr_entries == 1)
2473		rec->opts.sample_id = true;
2474
2475	evlist__uniquify_name(rec->evlist);
2476
2477	/* Debug message used by test scripts */
2478	pr_debug3("perf record opening and mmapping events\n");
2479	if (record__open(rec) != 0) {
2480		err = -1;
2481		goto out_free_threads;
2482	}
2483	/* Debug message used by test scripts */
2484	pr_debug3("perf record done opening and mmapping events\n");
2485	session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
2486
2487	if (rec->opts.kcore) {
2488		err = record__kcore_copy(&session->machines.host, data);
2489		if (err) {
2490			pr_err("ERROR: Failed to copy kcore\n");
2491			goto out_free_threads;
2492		}
 
 
2493	}
2494
2495	/*
2496	 * Normally perf_session__new would do this, but it doesn't have the
2497	 * evlist.
2498	 */
2499	if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
2500		pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
2501		rec->tool.ordered_events = false;
2502	}
2503
2504	if (evlist__nr_groups(rec->evlist) == 0)
2505		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
2506
2507	if (data->is_pipe) {
2508		err = perf_header__write_pipe(fd);
2509		if (err < 0)
2510			goto out_free_threads;
2511	} else {
2512		err = perf_session__write_header(session, rec->evlist, fd, false);
2513		if (err < 0)
2514			goto out_free_threads;
2515	}
2516
2517	err = -1;
2518	if (!rec->no_buildid
2519	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
2520		pr_err("Couldn't generate buildids. "
2521		       "Use --no-buildid to profile anyway.\n");
2522		goto out_free_threads;
 
2523	}
2524
2525	err = record__setup_sb_evlist(rec);
2526	if (err)
2527		goto out_free_threads;
 
 
 
 
2528
2529	err = record__synthesize(rec, false);
2530	if (err < 0)
2531		goto out_free_threads;
2532
2533	if (rec->realtime_prio) {
2534		struct sched_param param;
2535
2536		param.sched_priority = rec->realtime_prio;
2537		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
2538			pr_err("Could not set realtime priority.\n");
2539			err = -1;
2540			goto out_free_threads;
2541		}
2542	}
2543
2544	if (record__start_threads(rec))
2545		goto out_free_threads;
2546
2547	/*
2548	 * When perf is starting the traced process, all the events
2549	 * (apart from group members) have enable_on_exec=1 set,
2550	 * so don't spoil it by prematurely enabling them.
2551	 */
2552	if (!target__none(&opts->target) && !opts->target.initial_delay)
2553		evlist__enable(rec->evlist);
2554
2555	/*
2556	 * Let the child rip
2557	 */
2558	if (forks) {
2559		struct machine *machine = &session->machines.host;
2560		union perf_event *event;
2561		pid_t tgid;
2562
2563		event = malloc(sizeof(event->comm) + machine->id_hdr_size);
2564		if (event == NULL) {
2565			err = -ENOMEM;
2566			goto out_child;
2567		}
2568
2569		/*
2570		 * Some H/W events are generated before COMM event
2571		 * which is emitted during exec(), so perf script
2572		 * cannot see a correct process name for those events.
2573		 * Synthesize COMM event to prevent it.
2574		 */
2575		tgid = perf_event__synthesize_comm(tool, event,
2576						   rec->evlist->workload.pid,
2577						   process_synthesized_event,
2578						   machine);
2579		free(event);
2580
2581		if (tgid == -1)
2582			goto out_child;
2583
2584		event = malloc(sizeof(event->namespaces) +
2585			       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
2586			       machine->id_hdr_size);
2587		if (event == NULL) {
2588			err = -ENOMEM;
2589			goto out_child;
2590		}
2591
2592		/*
2593		 * Synthesize NAMESPACES event for the command specified.
2594		 */
2595		perf_event__synthesize_namespaces(tool, event,
2596						  rec->evlist->workload.pid,
2597						  tgid, process_synthesized_event,
2598						  machine);
2599		free(event);
2600
2601		evlist__start_workload(rec->evlist);
2602	}
2603
2604	if (opts->target.initial_delay) {
2605		pr_info(EVLIST_DISABLED_MSG);
2606		if (opts->target.initial_delay > 0) {
2607			usleep(opts->target.initial_delay * USEC_PER_MSEC);
2608			evlist__enable(rec->evlist);
2609			pr_info(EVLIST_ENABLED_MSG);
2610		}
2611	}
2612
2613	err = event_enable_timer__start(rec->evlist->eet);
2614	if (err)
2615		goto out_child;
2616
2617	/* Debug message used by test scripts */
2618	pr_debug3("perf record has started\n");
2619	fflush(stderr);
2620
2621	trigger_ready(&auxtrace_snapshot_trigger);
2622	trigger_ready(&switch_output_trigger);
2623	perf_hooks__invoke_record_start();
2624
2625	/*
2626	 * Must write FINISHED_INIT so it will be seen after all other
2627	 * synthesized user events, but before any regular events.
2628	 */
2629	err = write_finished_init(rec, false);
2630	if (err < 0)
2631		goto out_child;
2632
2633	for (;;) {
2634		unsigned long long hits = thread->samples;
2635
2636		/*
2637		 * rec->evlist->bkw_mmap_state is possible to be
2638		 * BKW_MMAP_EMPTY here: when done == true and
2639		 * hits != rec->samples in previous round.
2640		 *
2641		 * evlist__toggle_bkw_mmap ensure we never
2642		 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
2643		 */
2644		if (trigger_is_hit(&switch_output_trigger) || done || draining)
2645			evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
2646
2647		if (record__mmap_read_all(rec, false) < 0) {
2648			trigger_error(&auxtrace_snapshot_trigger);
2649			trigger_error(&switch_output_trigger);
2650			err = -1;
2651			goto out_child;
2652		}
2653
2654		if (auxtrace_record__snapshot_started) {
2655			auxtrace_record__snapshot_started = 0;
2656			if (!trigger_is_error(&auxtrace_snapshot_trigger))
2657				record__read_auxtrace_snapshot(rec, false);
2658			if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2659				pr_err("AUX area tracing snapshot failed\n");
2660				err = -1;
2661				goto out_child;
2662			}
2663		}
2664
2665		if (trigger_is_hit(&switch_output_trigger)) {
2666			/*
2667			 * If switch_output_trigger is hit, the data in
2668			 * overwritable ring buffer should have been collected,
2669			 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
2670			 *
2671			 * If SIGUSR2 raise after or during record__mmap_read_all(),
2672			 * record__mmap_read_all() didn't collect data from
2673			 * overwritable ring buffer. Read again.
2674			 */
2675			if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2676				continue;
2677			trigger_ready(&switch_output_trigger);
2678
2679			/*
2680			 * Reenable events in overwrite ring buffer after
2681			 * record__mmap_read_all(): we should have collected
2682			 * data from it.
2683			 */
2684			evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
2685
2686			if (!quiet)
2687				fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
2688					record__waking(rec));
2689			thread->waking = 0;
2690			fd = record__switch_output(rec, false);
2691			if (fd < 0) {
2692				pr_err("Failed to switch to new file\n");
2693				trigger_error(&switch_output_trigger);
2694				err = fd;
2695				goto out_child;
2696			}
2697
2698			/* re-arm the alarm */
2699			if (rec->switch_output.time)
2700				alarm(rec->switch_output.time);
2701		}
2702
2703		if (hits == thread->samples) {
2704			if (done || draining)
2705				break;
2706			err = fdarray__poll(&thread->pollfd, -1);
2707			/*
2708			 * Propagate error, only if there's any. Ignore positive
2709			 * number of returned events and interrupt error.
2710			 */
2711			if (err > 0 || (err < 0 && errno == EINTR))
2712				err = 0;
2713			thread->waking++;
2714
2715			if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
2716					    record__thread_munmap_filtered, NULL) == 0)
2717				draining = true;
2718
2719			err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
2720			if (err)
2721				goto out_child;
2722		}
2723
2724		if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2725			switch (cmd) {
2726			case EVLIST_CTL_CMD_SNAPSHOT:
2727				hit_auxtrace_snapshot_trigger(rec);
2728				evlist__ctlfd_ack(rec->evlist);
2729				break;
2730			case EVLIST_CTL_CMD_STOP:
2731				done = 1;
2732				break;
2733			case EVLIST_CTL_CMD_ACK:
2734			case EVLIST_CTL_CMD_UNSUPPORTED:
2735			case EVLIST_CTL_CMD_ENABLE:
2736			case EVLIST_CTL_CMD_DISABLE:
2737			case EVLIST_CTL_CMD_EVLIST:
2738			case EVLIST_CTL_CMD_PING:
2739			default:
2740				break;
2741			}
2742		}
2743
2744		err = event_enable_timer__process(rec->evlist->eet);
2745		if (err < 0)
2746			goto out_child;
2747		if (err) {
2748			err = 0;
2749			done = 1;
2750		}
2751
2752		/*
2753		 * When perf is starting the traced process, at the end events
2754		 * die with the process and we wait for that. Thus no need to
2755		 * disable events in this case.
2756		 */
2757		if (done && !disabled && !target__none(&opts->target)) {
2758			trigger_off(&auxtrace_snapshot_trigger);
2759			evlist__disable(rec->evlist);
2760			disabled = true;
2761		}
2762	}
2763
2764	trigger_off(&auxtrace_snapshot_trigger);
2765	trigger_off(&switch_output_trigger);
2766
2767	if (opts->auxtrace_snapshot_on_exit)
2768		record__auxtrace_snapshot_exit(rec);
2769
2770	if (forks && workload_exec_errno) {
2771		char msg[STRERR_BUFSIZE], strevsels[2048];
2772		const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
2773
2774		evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2775
2776		pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2777			strevsels, argv[0], emsg);
2778		err = -1;
2779		goto out_child;
2780	}
2781
2782	if (!quiet)
2783		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
2784			record__waking(rec));
2785
2786	write_finished_init(rec, true);
2787
2788	if (target__none(&rec->opts.target))
2789		record__synthesize_workload(rec, true);
2790
2791out_child:
2792	record__stop_threads(rec);
2793	record__mmap_read_all(rec, true);
2794out_free_threads:
2795	record__free_thread_data(rec);
2796	evlist__finalize_ctlfd(rec->evlist);
2797	record__aio_mmap_read_sync(rec);
2798
2799	if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2800		ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2801		session->header.env.comp_ratio = ratio + 0.5;
2802	}
2803
2804	if (forks) {
2805		int exit_status;
2806
2807		if (!child_finished)
2808			kill(rec->evlist->workload.pid, SIGTERM);
2809
2810		wait(&exit_status);
2811
2812		if (err < 0)
2813			status = err;
2814		else if (WIFEXITED(exit_status))
2815			status = WEXITSTATUS(exit_status);
2816		else if (WIFSIGNALED(exit_status))
2817			signr = WTERMSIG(exit_status);
2818	} else
2819		status = err;
2820
2821	if (rec->off_cpu)
2822		rec->bytes_written += off_cpu_write(rec->session);
2823
2824	record__read_lost_samples(rec);
2825	record__synthesize(rec, true);
2826	/* this will be recalculated during process_buildids() */
2827	rec->samples = 0;
2828
2829	if (!err) {
2830		if (!rec->timestamp_filename) {
2831			record__finish_output(rec);
2832		} else {
2833			fd = record__switch_output(rec, true);
2834			if (fd < 0) {
2835				status = fd;
2836				goto out_delete_session;
2837			}
2838		}
2839	}
2840
2841	perf_hooks__invoke_record_end();
2842
2843	if (!err && !quiet) {
2844		char samples[128];
2845		const char *postfix = rec->timestamp_filename ?
2846					".<timestamp>" : "";
2847
2848		if (rec->samples && !rec->opts.full_auxtrace)
2849			scnprintf(samples, sizeof(samples),
2850				  " (%" PRIu64 " samples)", rec->samples);
2851		else
2852			samples[0] = '\0';
2853
2854		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s%s",
2855			perf_data__size(data) / 1024.0 / 1024.0,
2856			data->path, postfix, samples);
2857		if (ratio) {
2858			fprintf(stderr,	", compressed (original %.3f MB, ratio is %.3f)",
2859					rec->session->bytes_transferred / 1024.0 / 1024.0,
2860					ratio);
2861		}
2862		fprintf(stderr, " ]\n");
2863	}
2864
2865out_delete_session:
2866#ifdef HAVE_EVENTFD_SUPPORT
2867	if (done_fd >= 0) {
2868		fd = done_fd;
2869		done_fd = -1;
2870
2871		close(fd);
2872	}
2873#endif
2874	zstd_fini(&session->zstd_data);
2875	perf_session__delete(session);
2876
2877	if (!opts->no_bpf_event)
2878		evlist__stop_sb_thread(rec->sb_evlist);
2879	return status;
2880}
2881
2882static void callchain_debug(struct callchain_param *callchain)
2883{
2884	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
2885
2886	pr_debug("callchain: type %s\n", str[callchain->record_mode]);
2887
2888	if (callchain->record_mode == CALLCHAIN_DWARF)
2889		pr_debug("callchain: stack dump size %d\n",
2890			 callchain->dump_size);
2891}
2892
2893int record_opts__parse_callchain(struct record_opts *record,
2894				 struct callchain_param *callchain,
2895				 const char *arg, bool unset)
2896{
2897	int ret;
2898	callchain->enabled = !unset;
2899
2900	/* --no-call-graph */
2901	if (unset) {
2902		callchain->record_mode = CALLCHAIN_NONE;
2903		pr_debug("callchain: disabled\n");
2904		return 0;
2905	}
2906
2907	ret = parse_callchain_record_opt(arg, callchain);
2908	if (!ret) {
2909		/* Enable data address sampling for DWARF unwind. */
2910		if (callchain->record_mode == CALLCHAIN_DWARF)
2911			record->sample_address = true;
2912		callchain_debug(callchain);
2913	}
2914
2915	return ret;
2916}
2917
2918int record_parse_callchain_opt(const struct option *opt,
2919			       const char *arg,
2920			       int unset)
2921{
2922	return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
2923}
2924
2925int record_callchain_opt(const struct option *opt,
2926			 const char *arg __maybe_unused,
2927			 int unset __maybe_unused)
2928{
2929	struct callchain_param *callchain = opt->value;
2930
2931	callchain->enabled = true;
2932
2933	if (callchain->record_mode == CALLCHAIN_NONE)
2934		callchain->record_mode = CALLCHAIN_FP;
2935
2936	callchain_debug(callchain);
2937	return 0;
2938}
2939
2940static int perf_record_config(const char *var, const char *value, void *cb)
2941{
2942	struct record *rec = cb;
2943
2944	if (!strcmp(var, "record.build-id")) {
2945		if (!strcmp(value, "cache"))
2946			rec->no_buildid_cache = false;
2947		else if (!strcmp(value, "no-cache"))
2948			rec->no_buildid_cache = true;
2949		else if (!strcmp(value, "skip"))
2950			rec->no_buildid = true;
2951		else if (!strcmp(value, "mmap"))
2952			rec->buildid_mmap = true;
2953		else
2954			return -1;
2955		return 0;
2956	}
2957	if (!strcmp(var, "record.call-graph")) {
2958		var = "call-graph.record-mode";
2959		return perf_default_config(var, value, cb);
2960	}
2961#ifdef HAVE_AIO_SUPPORT
2962	if (!strcmp(var, "record.aio")) {
2963		rec->opts.nr_cblocks = strtol(value, NULL, 0);
2964		if (!rec->opts.nr_cblocks)
2965			rec->opts.nr_cblocks = nr_cblocks_default;
2966	}
2967#endif
2968	if (!strcmp(var, "record.debuginfod")) {
2969		rec->debuginfod.urls = strdup(value);
2970		if (!rec->debuginfod.urls)
2971			return -ENOMEM;
2972		rec->debuginfod.set = true;
2973	}
2974
2975	return 0;
2976}
2977
2978static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset)
2979{
2980	struct record *rec = (struct record *)opt->value;
 
 
 
 
2981
2982	return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
2983}
2984
2985static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2986{
2987	struct record_opts *opts = (struct record_opts *)opt->value;
2988
2989	if (unset || !str)
2990		return 0;
 
 
 
 
 
 
 
 
 
 
2991
2992	if (!strcasecmp(str, "node"))
2993		opts->affinity = PERF_AFFINITY_NODE;
2994	else if (!strcasecmp(str, "cpu"))
2995		opts->affinity = PERF_AFFINITY_CPU;
 
 
 
 
 
 
 
 
 
 
 
2996
2997	return 0;
2998}
2999
3000static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
3001{
3002	mask->nbits = nr_bits;
3003	mask->bits = bitmap_zalloc(mask->nbits);
3004	if (!mask->bits)
3005		return -ENOMEM;
 
 
 
3006
3007	return 0;
3008}
3009
3010static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
3011{
3012	bitmap_free(mask->bits);
3013	mask->nbits = 0;
3014}
3015
3016static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
3017{
3018	int ret;
 
3019
3020	ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
3021	if (ret) {
3022		mask->affinity.bits = NULL;
3023		return ret;
3024	}
3025
3026	ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
3027	if (ret) {
3028		record__mmap_cpu_mask_free(&mask->maps);
3029		mask->maps.bits = NULL;
3030	}
3031
3032	return ret;
3033}
3034
3035static void record__thread_mask_free(struct thread_mask *mask)
3036{
3037	record__mmap_cpu_mask_free(&mask->maps);
3038	record__mmap_cpu_mask_free(&mask->affinity);
3039}
3040
3041static int record__parse_threads(const struct option *opt, const char *str, int unset)
3042{
3043	int s;
3044	struct record_opts *opts = opt->value;
3045
3046	if (unset || !str || !strlen(str)) {
3047		opts->threads_spec = THREAD_SPEC__CPU;
3048	} else {
3049		for (s = 1; s < THREAD_SPEC__MAX; s++) {
3050			if (s == THREAD_SPEC__USER) {
3051				opts->threads_user_spec = strdup(str);
3052				if (!opts->threads_user_spec)
3053					return -ENOMEM;
3054				opts->threads_spec = THREAD_SPEC__USER;
3055				break;
3056			}
3057			if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
3058				opts->threads_spec = s;
3059				break;
3060			}
3061		}
3062	}
3063
3064	if (opts->threads_spec == THREAD_SPEC__USER)
3065		pr_debug("threads_spec: %s\n", opts->threads_user_spec);
3066	else
3067		pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
3068
3069	return 0;
3070}
3071
3072static int parse_output_max_size(const struct option *opt,
3073				 const char *str, int unset)
3074{
3075	unsigned long *s = (unsigned long *)opt->value;
3076	static struct parse_tag tags_size[] = {
3077		{ .tag  = 'B', .mult = 1       },
3078		{ .tag  = 'K', .mult = 1 << 10 },
3079		{ .tag  = 'M', .mult = 1 << 20 },
3080		{ .tag  = 'G', .mult = 1 << 30 },
3081		{ .tag  = 0 },
3082	};
3083	unsigned long val;
3084
3085	if (unset) {
3086		*s = 0;
3087		return 0;
3088	}
3089
3090	val = parse_tag_value(str, tags_size);
3091	if (val != (unsigned long) -1) {
3092		*s = val;
3093		return 0;
3094	}
3095
3096	return -1;
3097}
3098
3099static int record__parse_mmap_pages(const struct option *opt,
3100				    const char *str,
3101				    int unset __maybe_unused)
3102{
3103	struct record_opts *opts = opt->value;
3104	char *s, *p;
3105	unsigned int mmap_pages;
3106	int ret;
3107
3108	if (!str)
3109		return -EINVAL;
3110
3111	s = strdup(str);
3112	if (!s)
3113		return -ENOMEM;
3114
3115	p = strchr(s, ',');
3116	if (p)
3117		*p = '\0';
3118
3119	if (*s) {
3120		ret = __evlist__parse_mmap_pages(&mmap_pages, s);
3121		if (ret)
3122			goto out_free;
3123		opts->mmap_pages = mmap_pages;
3124	}
3125
3126	if (!p) {
3127		ret = 0;
3128		goto out_free;
3129	}
3130
3131	ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
3132	if (ret)
3133		goto out_free;
3134
3135	opts->auxtrace_mmap_pages = mmap_pages;
3136
3137out_free:
3138	free(s);
3139	return ret;
3140}
3141
3142void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
3143{
3144}
3145
3146static int parse_control_option(const struct option *opt,
3147				const char *str,
3148				int unset __maybe_unused)
3149{
3150	struct record_opts *opts = opt->value;
3151
3152	return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
3153}
3154
3155static void switch_output_size_warn(struct record *rec)
3156{
3157	u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
3158	struct switch_output *s = &rec->switch_output;
3159
3160	wakeup_size /= 2;
3161
3162	if (s->size < wakeup_size) {
3163		char buf[100];
3164
3165		unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
3166		pr_warning("WARNING: switch-output data size lower than "
3167			   "wakeup kernel buffer size (%s) "
3168			   "expect bigger perf.data sizes\n", buf);
3169	}
3170}
3171
3172static int switch_output_setup(struct record *rec)
3173{
3174	struct switch_output *s = &rec->switch_output;
3175	static struct parse_tag tags_size[] = {
3176		{ .tag  = 'B', .mult = 1       },
3177		{ .tag  = 'K', .mult = 1 << 10 },
3178		{ .tag  = 'M', .mult = 1 << 20 },
3179		{ .tag  = 'G', .mult = 1 << 30 },
3180		{ .tag  = 0 },
3181	};
3182	static struct parse_tag tags_time[] = {
3183		{ .tag  = 's', .mult = 1        },
3184		{ .tag  = 'm', .mult = 60       },
3185		{ .tag  = 'h', .mult = 60*60    },
3186		{ .tag  = 'd', .mult = 60*60*24 },
3187		{ .tag  = 0 },
3188	};
3189	unsigned long val;
3190
3191	/*
3192	 * If we're using --switch-output-events, then we imply its 
3193	 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
3194	 *  thread to its parent.
3195	 */
3196	if (rec->switch_output_event_set) {
3197		if (record__threads_enabled(rec)) {
3198			pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
3199			return 0;
3200		}
3201		goto do_signal;
3202	}
3203
3204	if (!s->set)
3205		return 0;
3206
3207	if (record__threads_enabled(rec)) {
3208		pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
3209		return 0;
3210	}
3211
3212	if (!strcmp(s->str, "signal")) {
3213do_signal:
3214		s->signal = true;
3215		pr_debug("switch-output with SIGUSR2 signal\n");
3216		goto enabled;
3217	}
3218
3219	val = parse_tag_value(s->str, tags_size);
3220	if (val != (unsigned long) -1) {
3221		s->size = val;
3222		pr_debug("switch-output with %s size threshold\n", s->str);
3223		goto enabled;
3224	}
3225
3226	val = parse_tag_value(s->str, tags_time);
3227	if (val != (unsigned long) -1) {
3228		s->time = val;
3229		pr_debug("switch-output with %s time threshold (%lu seconds)\n",
3230			 s->str, s->time);
3231		goto enabled;
3232	}
3233
3234	return -1;
3235
3236enabled:
3237	rec->timestamp_filename = true;
3238	s->enabled              = true;
3239
3240	if (s->size && !rec->opts.no_buffering)
3241		switch_output_size_warn(rec);
3242
3243	return 0;
3244}
3245
3246static const char * const __record_usage[] = {
3247	"perf record [<options>] [<command>]",
3248	"perf record [<options>] -- <command> [<options>]",
3249	NULL
3250};
3251const char * const *record_usage = __record_usage;
3252
3253static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
3254				  struct perf_sample *sample, struct machine *machine)
3255{
3256	/*
3257	 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3258	 * no need to add them twice.
3259	 */
3260	if (!(event->header.misc & PERF_RECORD_MISC_USER))
3261		return 0;
3262	return perf_event__process_mmap(tool, event, sample, machine);
3263}
3264
3265static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
3266				   struct perf_sample *sample, struct machine *machine)
3267{
3268	/*
3269	 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3270	 * no need to add them twice.
3271	 */
3272	if (!(event->header.misc & PERF_RECORD_MISC_USER))
3273		return 0;
3274
3275	return perf_event__process_mmap2(tool, event, sample, machine);
3276}
3277
3278static int process_timestamp_boundary(struct perf_tool *tool,
3279				      union perf_event *event __maybe_unused,
3280				      struct perf_sample *sample,
3281				      struct machine *machine __maybe_unused)
3282{
3283	struct record *rec = container_of(tool, struct record, tool);
3284
3285	set_timestamp_boundary(rec, sample->time);
3286	return 0;
3287}
3288
3289static int parse_record_synth_option(const struct option *opt,
3290				     const char *str,
3291				     int unset __maybe_unused)
3292{
3293	struct record_opts *opts = opt->value;
3294	char *p = strdup(str);
3295
3296	if (p == NULL)
3297		return -1;
3298
3299	opts->synth = parse_synth_opt(p);
3300	free(p);
3301
3302	if (opts->synth < 0) {
3303		pr_err("Invalid synth option: %s\n", str);
3304		return -1;
3305	}
3306	return 0;
3307}
3308
3309/*
3310 * XXX Ideally would be local to cmd_record() and passed to a record__new
3311 * because we need to have access to it in record__exit, that is called
3312 * after cmd_record() exits, but since record_options need to be accessible to
3313 * builtin-script, leave it here.
3314 *
3315 * At least we don't ouch it in all the other functions here directly.
3316 *
3317 * Just say no to tons of global variables, sigh.
3318 */
3319static struct record record = {
3320	.opts = {
3321		.sample_time	     = true,
3322		.mmap_pages	     = UINT_MAX,
3323		.user_freq	     = UINT_MAX,
3324		.user_interval	     = ULLONG_MAX,
3325		.freq		     = 4000,
3326		.target		     = {
3327			.uses_mmap   = true,
3328			.default_per_cpu = true,
3329		},
3330		.mmap_flush          = MMAP_FLUSH_DEFAULT,
3331		.nr_threads_synthesize = 1,
3332		.ctl_fd              = -1,
3333		.ctl_fd_ack          = -1,
3334		.synth               = PERF_SYNTH_ALL,
3335	},
3336	.tool = {
3337		.sample		= process_sample_event,
3338		.fork		= perf_event__process_fork,
3339		.exit		= perf_event__process_exit,
3340		.comm		= perf_event__process_comm,
3341		.namespaces	= perf_event__process_namespaces,
3342		.mmap		= build_id__process_mmap,
3343		.mmap2		= build_id__process_mmap2,
3344		.itrace_start	= process_timestamp_boundary,
3345		.aux		= process_timestamp_boundary,
3346		.ordered_events	= true,
3347	},
3348};
3349
3350const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
3351	"\n\t\t\t\tDefault: fp";
3352
3353static bool dry_run;
3354
3355static struct parse_events_option_args parse_events_option_args = {
3356	.evlistp = &record.evlist,
3357};
3358
3359static struct parse_events_option_args switch_output_parse_events_option_args = {
3360	.evlistp = &record.sb_evlist,
3361};
3362
3363/*
3364 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3365 * with it and switch to use the library functions in perf_evlist that came
3366 * from builtin-record.c, i.e. use record_opts,
3367 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
3368 * using pipes, etc.
3369 */
3370static struct option __record_options[] = {
3371	OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
3372		     "event selector. use 'perf list' to list available events",
3373		     parse_events_option),
3374	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
3375		     "event filter", parse_filter),
3376	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3377			   NULL, "don't record events from perf itself",
3378			   exclude_perf),
3379	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
3380		    "record events on existing process id"),
3381	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
3382		    "record events on existing thread id"),
3383	OPT_INTEGER('r', "realtime", &record.realtime_prio,
3384		    "collect data with this RT SCHED_FIFO priority"),
3385	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3386		    "collect data without buffering"),
3387	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3388		    "collect raw sample records from all opened counters"),
3389	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3390			    "system-wide collection from all CPUs"),
3391	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
3392		    "list of cpus to monitor"),
3393	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
3394	OPT_STRING('o', "output", &record.data.path, "file",
3395		    "output file name"),
3396	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3397			&record.opts.no_inherit_set,
3398			"child tasks do not inherit counters"),
3399	OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3400		    "synthesize non-sample events at the end of output"),
3401	OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
3402	OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3403	OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3404		    "Fail if the specified frequency can't be used"),
3405	OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
3406		     "profile at this frequency",
3407		      record__parse_freq),
3408	OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3409		     "number of mmap data pages and AUX area tracing mmap pages",
3410		     record__parse_mmap_pages),
3411	OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3412		     "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
3413		     record__mmap_flush_parse),
 
 
3414	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
3415			   NULL, "enables call-graph recording" ,
3416			   &record_callchain_opt),
3417	OPT_CALLBACK(0, "call-graph", &record.opts,
3418		     "record_mode[,record_size]", record_callchain_help,
3419		     &record_parse_callchain_opt),
3420	OPT_INCR('v', "verbose", &verbose,
3421		    "be more verbose (show counter open errors, etc)"),
3422	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
3423	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
3424		    "per thread counts"),
3425	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3426	OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3427		    "Record the sample physical addresses"),
3428	OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3429		    "Record the sampled data address data page size"),
3430	OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3431		    "Record the sampled code address (ip) page size"),
3432	OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3433	OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3434		    "Record the sample identifier"),
3435	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
3436			&record.opts.sample_time_set,
3437			"Record the sample timestamps"),
3438	OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
3439			"Record the sample period"),
3440	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3441		    "don't sample"),
3442	OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3443			&record.no_buildid_cache_set,
3444			"do not update the buildid cache"),
3445	OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3446			&record.no_buildid_set,
3447			"do not collect buildids in perf.data"),
3448	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
3449		     "monitor event in cgroup name only",
3450		     parse_cgroups),
3451	OPT_CALLBACK('D', "delay", &record, "ms",
3452		     "ms to wait before starting measurement after program start (-1: start with events disabled), "
3453		     "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3454		     record__parse_event_enable_time),
3455	OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
3456	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
3457		   "user to profile"),
3458
3459	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3460		     "branch any", "sample any taken branches",
3461		     parse_branch_stack),
3462
3463	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3464		     "branch filter mask", "branch stack filter modes",
3465		     parse_branch_stack),
3466	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
3467		    "sample by weight (on special events only)"),
3468	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
3469		    "sample transaction flags (special events only)"),
3470	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3471		    "use per-thread mmaps"),
3472	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3473		    "sample selected machine registers on interrupt,"
3474		    " use '-I?' to list register names", parse_intr_regs),
3475	OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3476		    "sample selected machine registers on interrupt,"
3477		    " use '--user-regs=?' to list register names", parse_user_regs),
3478	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3479		    "Record running/enabled time of read (:S) events"),
3480	OPT_CALLBACK('k', "clockid", &record.opts,
3481	"clockid", "clockid to use for events, see clock_gettime()",
3482	parse_clockid),
3483	OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
3484			  "opts", "AUX area tracing Snapshot Mode", ""),
3485	OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3486			  "opts", "sample AUX area", ""),
3487	OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3488			"per thread proc mmap processing timeout in ms"),
3489	OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
3490		    "Record namespaces events"),
3491	OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3492		    "Record cgroup events"),
3493	OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3494			&record.opts.record_switch_events_set,
3495			"Record context switch events"),
3496	OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3497			 "Configure all used events to run in kernel space.",
3498			 PARSE_OPT_EXCLUSIVE),
3499	OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3500			 "Configure all used events to run in user space.",
3501			 PARSE_OPT_EXCLUSIVE),
3502	OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3503		    "collect kernel callchains"),
3504	OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3505		    "collect user callchains"),
 
 
 
 
3506	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
3507		   "file", "vmlinux pathname"),
3508	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3509		    "Record build-id of all DSOs regardless of hits"),
3510	OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3511		    "Record build-id in map events"),
3512	OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3513		    "append timestamp to output filename"),
3514	OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3515		    "Record timestamp boundary (time of first/last samples)"),
3516	OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3517			  &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
3518			  "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
3519			  "signal"),
3520	OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3521			 &record.switch_output_event_set, "switch output event",
3522			 "switch output event selector. use 'perf list' to list available events",
3523			 parse_events_option_new_evlist),
3524	OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3525		   "Limit number of switch output generated files"),
3526	OPT_BOOLEAN(0, "dry-run", &dry_run,
3527		    "Parse options then exit"),
3528#ifdef HAVE_AIO_SUPPORT
3529	OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
3530		     &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
3531		     record__aio_parse),
3532#endif
3533	OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3534		     "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3535		     record__parse_affinity),
3536#ifdef HAVE_ZSTD_SUPPORT
3537	OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3538			    "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
3539			    record__parse_comp_level),
3540#endif
3541	OPT_CALLBACK(0, "max-size", &record.output_max_size,
3542		     "size", "Limit the maximum size of the output file", parse_output_max_size),
3543	OPT_UINTEGER(0, "num-thread-synthesize",
3544		     &record.opts.nr_threads_synthesize,
3545		     "number of threads to run for event synthesis"),
3546#ifdef HAVE_LIBPFM
3547	OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3548		"libpfm4 event selector. use 'perf list' to list available events",
3549		parse_libpfm_events_option),
3550#endif
3551	OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3552		     "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
3553		     "\t\t\t  'snapshot': AUX area tracing snapshot).\n"
3554		     "\t\t\t  Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3555		     "\t\t\t  Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3556		      parse_control_option),
3557	OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
3558		     "Fine-tune event synthesis: default=all", parse_record_synth_option),
3559	OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
3560			  &record.debuginfod.set, "debuginfod urls",
3561			  "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
3562			  "system"),
3563	OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
3564			    "write collected trace data into several data files using parallel threads",
3565			    record__parse_threads),
3566	OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3567	OPT_END()
3568};
3569
3570struct option *record_options = __record_options;
3571
3572static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
3573{
3574	struct perf_cpu cpu;
3575	int idx;
3576
3577	if (cpu_map__is_dummy(cpus))
3578		return 0;
3579
3580	perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
3581		/* Return ENODEV is input cpu is greater than max cpu */
3582		if ((unsigned long)cpu.cpu > mask->nbits)
3583			return -ENODEV;
3584		__set_bit(cpu.cpu, mask->bits);
3585	}
3586
3587	return 0;
3588}
3589
3590static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
3591{
3592	struct perf_cpu_map *cpus;
3593
3594	cpus = perf_cpu_map__new(mask_spec);
3595	if (!cpus)
3596		return -ENOMEM;
3597
3598	bitmap_zero(mask->bits, mask->nbits);
3599	if (record__mmap_cpu_mask_init(mask, cpus))
3600		return -ENODEV;
3601
3602	perf_cpu_map__put(cpus);
3603
3604	return 0;
3605}
3606
3607static void record__free_thread_masks(struct record *rec, int nr_threads)
3608{
3609	int t;
3610
3611	if (rec->thread_masks)
3612		for (t = 0; t < nr_threads; t++)
3613			record__thread_mask_free(&rec->thread_masks[t]);
3614
3615	zfree(&rec->thread_masks);
3616}
3617
3618static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3619{
3620	int t, ret;
3621
3622	rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3623	if (!rec->thread_masks) {
3624		pr_err("Failed to allocate thread masks\n");
3625		return -ENOMEM;
3626	}
3627
3628	for (t = 0; t < nr_threads; t++) {
3629		ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3630		if (ret) {
3631			pr_err("Failed to allocate thread masks[%d]\n", t);
3632			goto out_free;
3633		}
3634	}
3635
3636	return 0;
3637
3638out_free:
3639	record__free_thread_masks(rec, nr_threads);
3640
3641	return ret;
3642}
3643
3644static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3645{
3646	int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
3647
3648	ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3649	if (ret)
3650		return ret;
3651
3652	rec->nr_threads = nr_cpus;
3653	pr_debug("nr_threads: %d\n", rec->nr_threads);
3654
3655	for (t = 0; t < rec->nr_threads; t++) {
3656		__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3657		__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
3658		if (verbose > 0) {
3659			pr_debug("thread_masks[%d]: ", t);
3660			mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3661			pr_debug("thread_masks[%d]: ", t);
3662			mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3663		}
3664	}
3665
3666	return 0;
3667}
3668
3669static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3670					  const char **maps_spec, const char **affinity_spec,
3671					  u32 nr_spec)
3672{
3673	u32 s;
3674	int ret = 0, t = 0;
3675	struct mmap_cpu_mask cpus_mask;
3676	struct thread_mask thread_mask, full_mask, *thread_masks;
3677
3678	ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
3679	if (ret) {
3680		pr_err("Failed to allocate CPUs mask\n");
3681		return ret;
3682	}
3683
3684	ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
3685	if (ret) {
3686		pr_err("Failed to init cpu mask\n");
3687		goto out_free_cpu_mask;
3688	}
3689
3690	ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
3691	if (ret) {
3692		pr_err("Failed to allocate full mask\n");
3693		goto out_free_cpu_mask;
3694	}
3695
3696	ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3697	if (ret) {
3698		pr_err("Failed to allocate thread mask\n");
3699		goto out_free_full_and_cpu_masks;
3700	}
3701
3702	for (s = 0; s < nr_spec; s++) {
3703		ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
3704		if (ret) {
3705			pr_err("Failed to initialize maps thread mask\n");
3706			goto out_free;
3707		}
3708		ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
3709		if (ret) {
3710			pr_err("Failed to initialize affinity thread mask\n");
3711			goto out_free;
3712		}
3713
3714		/* ignore invalid CPUs but do not allow empty masks */
3715		if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
3716				cpus_mask.bits, thread_mask.maps.nbits)) {
3717			pr_err("Empty maps mask: %s\n", maps_spec[s]);
3718			ret = -EINVAL;
3719			goto out_free;
3720		}
3721		if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
3722				cpus_mask.bits, thread_mask.affinity.nbits)) {
3723			pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
3724			ret = -EINVAL;
3725			goto out_free;
3726		}
3727
3728		/* do not allow intersection with other masks (full_mask) */
3729		if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
3730				      thread_mask.maps.nbits)) {
3731			pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
3732			ret = -EINVAL;
3733			goto out_free;
3734		}
3735		if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
3736				      thread_mask.affinity.nbits)) {
3737			pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
3738			ret = -EINVAL;
3739			goto out_free;
3740		}
3741
3742		bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
3743			  thread_mask.maps.bits, full_mask.maps.nbits);
3744		bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
3745			  thread_mask.affinity.bits, full_mask.maps.nbits);
3746
3747		thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3748		if (!thread_masks) {
3749			pr_err("Failed to reallocate thread masks\n");
3750			ret = -ENOMEM;
3751			goto out_free;
3752		}
3753		rec->thread_masks = thread_masks;
3754		rec->thread_masks[t] = thread_mask;
3755		if (verbose > 0) {
3756			pr_debug("thread_masks[%d]: ", t);
3757			mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3758			pr_debug("thread_masks[%d]: ", t);
3759			mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3760		}
3761		t++;
3762		ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3763		if (ret) {
3764			pr_err("Failed to allocate thread mask\n");
3765			goto out_free_full_and_cpu_masks;
3766		}
3767	}
3768	rec->nr_threads = t;
3769	pr_debug("nr_threads: %d\n", rec->nr_threads);
3770	if (!rec->nr_threads)
3771		ret = -EINVAL;
3772
3773out_free:
3774	record__thread_mask_free(&thread_mask);
3775out_free_full_and_cpu_masks:
3776	record__thread_mask_free(&full_mask);
3777out_free_cpu_mask:
3778	record__mmap_cpu_mask_free(&cpus_mask);
3779
3780	return ret;
3781}
3782
3783static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3784{
3785	int ret;
3786	struct cpu_topology *topo;
3787
3788	topo = cpu_topology__new();
3789	if (!topo) {
3790		pr_err("Failed to allocate CPU topology\n");
3791		return -ENOMEM;
3792	}
3793
3794	ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3795					     topo->core_cpus_list, topo->core_cpus_lists);
3796	cpu_topology__delete(topo);
3797
3798	return ret;
3799}
3800
3801static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3802{
3803	int ret;
3804	struct cpu_topology *topo;
3805
3806	topo = cpu_topology__new();
3807	if (!topo) {
3808		pr_err("Failed to allocate CPU topology\n");
3809		return -ENOMEM;
3810	}
3811
3812	ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3813					     topo->package_cpus_list, topo->package_cpus_lists);
3814	cpu_topology__delete(topo);
3815
3816	return ret;
3817}
3818
3819static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3820{
3821	u32 s;
3822	int ret;
3823	const char **spec;
3824	struct numa_topology *topo;
3825
3826	topo = numa_topology__new();
3827	if (!topo) {
3828		pr_err("Failed to allocate NUMA topology\n");
3829		return -ENOMEM;
3830	}
3831
3832	spec = zalloc(topo->nr * sizeof(char *));
3833	if (!spec) {
3834		pr_err("Failed to allocate NUMA spec\n");
3835		ret = -ENOMEM;
3836		goto out_delete_topo;
3837	}
3838	for (s = 0; s < topo->nr; s++)
3839		spec[s] = topo->nodes[s].cpus;
3840
3841	ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3842
3843	zfree(&spec);
3844
3845out_delete_topo:
3846	numa_topology__delete(topo);
3847
3848	return ret;
3849}
3850
3851static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3852{
3853	int t, ret;
3854	u32 s, nr_spec = 0;
3855	char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
3856	char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
3857
3858	for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3859		spec = strtok_r(user_spec, ":", &spec_ptr);
3860		if (spec == NULL)
3861			break;
3862		pr_debug2("threads_spec[%d]: %s\n", t, spec);
3863		mask = strtok_r(spec, "/", &mask_ptr);
3864		if (mask == NULL)
3865			break;
3866		pr_debug2("  maps mask: %s\n", mask);
3867		tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
3868		if (!tmp_spec) {
3869			pr_err("Failed to reallocate maps spec\n");
3870			ret = -ENOMEM;
3871			goto out_free;
3872		}
3873		maps_spec = tmp_spec;
3874		maps_spec[nr_spec] = dup_mask = strdup(mask);
3875		if (!maps_spec[nr_spec]) {
3876			pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
3877			ret = -ENOMEM;
3878			goto out_free;
3879		}
3880		mask = strtok_r(NULL, "/", &mask_ptr);
3881		if (mask == NULL) {
3882			pr_err("Invalid thread maps or affinity specs\n");
3883			ret = -EINVAL;
3884			goto out_free;
3885		}
3886		pr_debug2("  affinity mask: %s\n", mask);
3887		tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
3888		if (!tmp_spec) {
3889			pr_err("Failed to reallocate affinity spec\n");
3890			ret = -ENOMEM;
3891			goto out_free;
3892		}
3893		affinity_spec = tmp_spec;
3894		affinity_spec[nr_spec] = strdup(mask);
3895		if (!affinity_spec[nr_spec]) {
3896			pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
3897			ret = -ENOMEM;
3898			goto out_free;
3899		}
3900		dup_mask = NULL;
3901		nr_spec++;
3902	}
3903
3904	ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3905					     (const char **)affinity_spec, nr_spec);
3906
3907out_free:
3908	free(dup_mask);
3909	for (s = 0; s < nr_spec; s++) {
3910		if (maps_spec)
3911			free(maps_spec[s]);
3912		if (affinity_spec)
3913			free(affinity_spec[s]);
3914	}
3915	free(affinity_spec);
3916	free(maps_spec);
3917
3918	return ret;
3919}
3920
3921static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3922{
3923	int ret;
3924
3925	ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3926	if (ret)
3927		return ret;
3928
3929	if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
3930		return -ENODEV;
3931
3932	rec->nr_threads = 1;
3933
3934	return 0;
3935}
3936
3937static int record__init_thread_masks(struct record *rec)
3938{
3939	int ret = 0;
3940	struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
3941
3942	if (!record__threads_enabled(rec))
3943		return record__init_thread_default_masks(rec, cpus);
3944
3945	if (evlist__per_thread(rec->evlist)) {
3946		pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
3947		return -EINVAL;
3948	}
3949
3950	switch (rec->opts.threads_spec) {
3951	case THREAD_SPEC__CPU:
3952		ret = record__init_thread_cpu_masks(rec, cpus);
3953		break;
3954	case THREAD_SPEC__CORE:
3955		ret = record__init_thread_core_masks(rec, cpus);
3956		break;
3957	case THREAD_SPEC__PACKAGE:
3958		ret = record__init_thread_package_masks(rec, cpus);
3959		break;
3960	case THREAD_SPEC__NUMA:
3961		ret = record__init_thread_numa_masks(rec, cpus);
3962		break;
3963	case THREAD_SPEC__USER:
3964		ret = record__init_thread_user_masks(rec, cpus);
3965		break;
3966	default:
3967		break;
3968	}
3969
3970	return ret;
3971}
3972
3973int cmd_record(int argc, const char **argv)
3974{
3975	int err;
3976	struct record *rec = &record;
3977	char errbuf[BUFSIZ];
3978
3979	setlocale(LC_ALL, "");
3980
3981#ifndef HAVE_BPF_SKEL
3982# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
3983	set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3984# undef set_nobuild
 
3985#endif
3986
3987	/* Disable eager loading of kernel symbols that adds overhead to perf record. */
3988	symbol_conf.lazy_load_kernel_maps = true;
3989	rec->opts.affinity = PERF_AFFINITY_SYS;
3990
3991	rec->evlist = evlist__new();
3992	if (rec->evlist == NULL)
3993		return -ENOMEM;
3994
3995	err = perf_config(perf_record_config, rec);
3996	if (err)
3997		return err;
3998
3999	argc = parse_options(argc, argv, record_options, record_usage,
4000			    PARSE_OPT_STOP_AT_NON_OPTION);
4001	if (quiet)
4002		perf_quiet_option();
4003
4004	err = symbol__validate_sym_arguments();
4005	if (err)
4006		return err;
4007
4008	perf_debuginfod_setup(&record.debuginfod);
4009
4010	/* Make system wide (-a) the default target. */
4011	if (!argc && target__none(&rec->opts.target))
4012		rec->opts.target.system_wide = true;
4013
4014	if (nr_cgroups && !rec->opts.target.system_wide) {
4015		usage_with_options_msg(record_usage, record_options,
4016			"cgroup monitoring only available in system-wide mode");
4017
4018	}
4019
4020	if (rec->buildid_mmap) {
4021		if (!perf_can_record_build_id()) {
4022			pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
4023			err = -EINVAL;
4024			goto out_opts;
4025		}
4026		pr_debug("Enabling build id in mmap2 events.\n");
4027		/* Enable mmap build id synthesizing. */
4028		symbol_conf.buildid_mmap2 = true;
4029		/* Enable perf_event_attr::build_id bit. */
4030		rec->opts.build_id = true;
4031		/* Disable build id cache. */
4032		rec->no_buildid = true;
4033	}
4034
4035	if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
4036		pr_err("Kernel has no cgroup sampling support.\n");
4037		err = -EINVAL;
4038		goto out_opts;
4039	}
4040
4041	if (rec->opts.kcore)
4042		rec->opts.text_poke = true;
4043
4044	if (rec->opts.kcore || record__threads_enabled(rec))
4045		rec->data.is_dir = true;
4046
4047	if (record__threads_enabled(rec)) {
4048		if (rec->opts.affinity != PERF_AFFINITY_SYS) {
4049			pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
4050			goto out_opts;
4051		}
4052		if (record__aio_enabled(rec)) {
4053			pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
4054			goto out_opts;
4055		}
4056	}
4057
4058	if (rec->opts.comp_level != 0) {
4059		pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
4060		rec->no_buildid = true;
4061	}
4062
4063	if (rec->opts.record_switch_events &&
4064	    !perf_can_record_switch_events()) {
4065		ui__error("kernel does not support recording context switch events\n");
4066		parse_options_usage(record_usage, record_options, "switch-events", 0);
4067		err = -EINVAL;
4068		goto out_opts;
4069	}
4070
4071	if (switch_output_setup(rec)) {
4072		parse_options_usage(record_usage, record_options, "switch-output", 0);
4073		err = -EINVAL;
4074		goto out_opts;
4075	}
4076
4077	if (rec->switch_output.time) {
4078		signal(SIGALRM, alarm_sig_handler);
4079		alarm(rec->switch_output.time);
4080	}
4081
4082	if (rec->switch_output.num_files) {
4083		rec->switch_output.filenames = calloc(rec->switch_output.num_files,
4084						      sizeof(char *));
4085		if (!rec->switch_output.filenames) {
4086			err = -EINVAL;
4087			goto out_opts;
4088		}
4089	}
4090
4091	if (rec->timestamp_filename && record__threads_enabled(rec)) {
4092		rec->timestamp_filename = false;
4093		pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
4094	}
4095
4096	/*
4097	 * Allow aliases to facilitate the lookup of symbols for address
4098	 * filters. Refer to auxtrace_parse_filters().
4099	 */
4100	symbol_conf.allow_aliases = true;
4101
4102	symbol__init(NULL);
4103
4104	err = record__auxtrace_init(rec);
4105	if (err)
4106		goto out;
4107
4108	if (dry_run)
4109		goto out;
4110
 
 
 
 
 
 
 
 
4111	err = -ENOMEM;
4112
4113	if (rec->no_buildid_cache || rec->no_buildid) {
4114		disable_buildid_cache();
4115	} else if (rec->switch_output.enabled) {
4116		/*
4117		 * In 'perf record --switch-output', disable buildid
4118		 * generation by default to reduce data file switching
4119		 * overhead. Still generate buildid if they are required
4120		 * explicitly using
4121		 *
4122		 *  perf record --switch-output --no-no-buildid \
4123		 *              --no-no-buildid-cache
4124		 *
4125		 * Following code equals to:
4126		 *
4127		 * if ((rec->no_buildid || !rec->no_buildid_set) &&
4128		 *     (rec->no_buildid_cache || !rec->no_buildid_cache_set))
4129		 *         disable_buildid_cache();
4130		 */
4131		bool disable = true;
4132
4133		if (rec->no_buildid_set && !rec->no_buildid)
4134			disable = false;
4135		if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
4136			disable = false;
4137		if (disable) {
4138			rec->no_buildid = true;
4139			rec->no_buildid_cache = true;
4140			disable_buildid_cache();
4141		}
4142	}
4143
4144	if (record.opts.overwrite)
4145		record.opts.tail_synthesize = true;
4146
4147	if (rec->evlist->core.nr_entries == 0) {
4148		bool can_profile_kernel = perf_event_paranoid_check(1);
4149
4150		err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
4151		if (err)
4152			goto out;
4153	}
4154
4155	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
4156		rec->opts.no_inherit = true;
4157
4158	err = target__validate(&rec->opts.target);
4159	if (err) {
4160		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4161		ui__warning("%s\n", errbuf);
4162	}
4163
4164	err = target__parse_uid(&rec->opts.target);
4165	if (err) {
4166		int saved_errno = errno;
4167
4168		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4169		ui__error("%s", errbuf);
4170
4171		err = -saved_errno;
4172		goto out;
4173	}
4174
4175	/* Enable ignoring missing threads when -u/-p option is defined. */
4176	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
4177
4178	evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
4179
4180	if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
4181		arch__add_leaf_frame_record_opts(&rec->opts);
4182
4183	err = -ENOMEM;
4184	if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
4185		if (rec->opts.target.pid != NULL) {
4186			pr_err("Couldn't create thread/CPU maps: %s\n",
4187				errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
4188			goto out;
4189		}
4190		else
4191			usage_with_options(record_usage, record_options);
4192	}
4193
4194	err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
4195	if (err)
4196		goto out;
4197
4198	/*
4199	 * We take all buildids when the file contains
4200	 * AUX area tracing data because we do not decode the
4201	 * trace because it would take too long.
4202	 */
4203	if (rec->opts.full_auxtrace)
4204		rec->buildid_all = true;
4205
4206	if (rec->opts.text_poke) {
4207		err = record__config_text_poke(rec->evlist);
4208		if (err) {
4209			pr_err("record__config_text_poke failed, error %d\n", err);
4210			goto out;
4211		}
4212	}
4213
4214	if (rec->off_cpu) {
4215		err = record__config_off_cpu(rec);
4216		if (err) {
4217			pr_err("record__config_off_cpu failed, error %d\n", err);
4218			goto out;
4219		}
4220	}
4221
4222	if (record_opts__config(&rec->opts)) {
4223		err = -EINVAL;
4224		goto out;
4225	}
4226
4227	err = record__config_tracking_events(rec);
4228	if (err) {
4229		pr_err("record__config_tracking_events failed, error %d\n", err);
4230		goto out;
4231	}
4232
4233	err = record__init_thread_masks(rec);
4234	if (err) {
4235		pr_err("Failed to initialize parallel data streaming masks\n");
4236		goto out;
4237	}
4238
4239	if (rec->opts.nr_cblocks > nr_cblocks_max)
4240		rec->opts.nr_cblocks = nr_cblocks_max;
4241	pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
4242
4243	pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
4244	pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
4245
4246	if (rec->opts.comp_level > comp_level_max)
4247		rec->opts.comp_level = comp_level_max;
4248	pr_debug("comp level: %d\n", rec->opts.comp_level);
4249
4250	err = __cmd_record(&record, argc, argv);
4251out:
4252	evlist__delete(rec->evlist);
4253	symbol__exit();
4254	auxtrace_record__free(rec->itr);
4255out_opts:
4256	record__free_thread_masks(rec, rec->nr_threads);
4257	rec->nr_threads = 0;
4258	evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
4259	return err;
4260}
4261
4262static void snapshot_sig_handler(int sig __maybe_unused)
4263{
4264	struct record *rec = &record;
4265
4266	hit_auxtrace_snapshot_trigger(rec);
 
 
 
 
 
4267
4268	if (switch_output_signal(rec))
4269		trigger_hit(&switch_output_trigger);
4270}
4271
4272static void alarm_sig_handler(int sig __maybe_unused)
4273{
4274	struct record *rec = &record;
4275
4276	if (switch_output_time(rec))
4277		trigger_hit(&switch_output_trigger);
4278}