Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-record.c
   4 *
   5 * Builtin record command: Record the profile of a workload
   6 * (or a CPU, or a PID) into the perf.data output file - for
   7 * later analysis via perf report.
   8 */
   9#include "builtin.h"
  10
  11#include "perf.h"
  12
  13#include "util/build-id.h"
  14#include "util/util.h"
  15#include <subcmd/parse-options.h>
  16#include "util/parse-events.h"
  17#include "util/config.h"
  18
  19#include "util/callchain.h"
  20#include "util/cgroup.h"
  21#include "util/header.h"
  22#include "util/event.h"
  23#include "util/evlist.h"
  24#include "util/evsel.h"
  25#include "util/debug.h"
  26#include "util/drv_configs.h"
  27#include "util/session.h"
  28#include "util/tool.h"
  29#include "util/symbol.h"
  30#include "util/cpumap.h"
  31#include "util/thread_map.h"
  32#include "util/data.h"
  33#include "util/perf_regs.h"
  34#include "util/auxtrace.h"
  35#include "util/tsc.h"
  36#include "util/parse-branch-options.h"
  37#include "util/parse-regs-options.h"
  38#include "util/llvm-utils.h"
  39#include "util/bpf-loader.h"
  40#include "util/trigger.h"
  41#include "util/perf-hooks.h"
  42#include "util/time-utils.h"
  43#include "util/units.h"
  44#include "asm/bug.h"
  45
  46#include <errno.h>
  47#include <inttypes.h>
  48#include <locale.h>
  49#include <poll.h>
  50#include <unistd.h>
  51#include <sched.h>
  52#include <signal.h>
  53#include <sys/mman.h>
  54#include <sys/wait.h>
  55#include <linux/time64.h>
  56
  57struct switch_output {
  58	bool		 enabled;
  59	bool		 signal;
  60	unsigned long	 size;
  61	unsigned long	 time;
  62	const char	*str;
  63	bool		 set;
  64};
  65
  66struct record {
  67	struct perf_tool	tool;
  68	struct record_opts	opts;
  69	u64			bytes_written;
  70	struct perf_data	data;
  71	struct auxtrace_record	*itr;
  72	struct perf_evlist	*evlist;
  73	struct perf_session	*session;
 
  74	int			realtime_prio;
  75	bool			no_buildid;
  76	bool			no_buildid_set;
  77	bool			no_buildid_cache;
  78	bool			no_buildid_cache_set;
  79	bool			buildid_all;
  80	bool			timestamp_filename;
  81	bool			timestamp_boundary;
  82	struct switch_output	switch_output;
  83	unsigned long long	samples;
  84};
  85
  86static volatile int auxtrace_record__snapshot_started;
  87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
  88static DEFINE_TRIGGER(switch_output_trigger);
  89
  90static bool switch_output_signal(struct record *rec)
  91{
  92	return rec->switch_output.signal &&
  93	       trigger_is_ready(&switch_output_trigger);
  94}
  95
  96static bool switch_output_size(struct record *rec)
  97{
  98	return rec->switch_output.size &&
  99	       trigger_is_ready(&switch_output_trigger) &&
 100	       (rec->bytes_written >= rec->switch_output.size);
 101}
 102
 103static bool switch_output_time(struct record *rec)
 104{
 105	return rec->switch_output.time &&
 106	       trigger_is_ready(&switch_output_trigger);
 107}
 108
 109static int record__write(struct record *rec, void *bf, size_t size)
 110{
 111	if (perf_data__write(rec->session->data, bf, size) < 0) {
 112		pr_err("failed to write perf data, error: %m\n");
 113		return -1;
 114	}
 115
 116	rec->bytes_written += size;
 117
 118	if (switch_output_size(rec))
 119		trigger_hit(&switch_output_trigger);
 120
 121	return 0;
 122}
 123
 124static int process_synthesized_event(struct perf_tool *tool,
 125				     union perf_event *event,
 126				     struct perf_sample *sample __maybe_unused,
 127				     struct machine *machine __maybe_unused)
 128{
 129	struct record *rec = container_of(tool, struct record, tool);
 130	return record__write(rec, event, event->header.size);
 131}
 132
 133static int record__pushfn(void *to, void *bf, size_t size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134{
 135	struct record *rec = to;
 
 
 
 
 
 
 
 
 
 
 
 
 
 136
 137	rec->samples++;
 138	return record__write(rec, bf, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139}
 140
 141static volatile int done;
 142static volatile int signr = -1;
 143static volatile int child_finished;
 144
 
 
 
 
 145static void sig_handler(int sig)
 146{
 147	if (sig == SIGCHLD)
 148		child_finished = 1;
 149	else
 150		signr = sig;
 151
 152	done = 1;
 153}
 154
 155static void sigsegv_handler(int sig)
 156{
 157	perf_hooks__recover();
 158	sighandler_dump_stack(sig);
 159}
 160
 161static void record__sig_exit(void)
 162{
 163	if (signr == -1)
 164		return;
 165
 166	signal(signr, SIG_DFL);
 167	raise(signr);
 168}
 169
 170#ifdef HAVE_AUXTRACE_SUPPORT
 171
 172static int record__process_auxtrace(struct perf_tool *tool,
 173				    union perf_event *event, void *data1,
 174				    size_t len1, void *data2, size_t len2)
 175{
 176	struct record *rec = container_of(tool, struct record, tool);
 177	struct perf_data *data = &rec->data;
 178	size_t padding;
 179	u8 pad[8] = {0};
 180
 181	if (!perf_data__is_pipe(data)) {
 182		off_t file_offset;
 183		int fd = perf_data__fd(data);
 184		int err;
 185
 186		file_offset = lseek(fd, 0, SEEK_CUR);
 187		if (file_offset == -1)
 188			return -1;
 189		err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
 190						     event, file_offset);
 191		if (err)
 192			return err;
 193	}
 194
 195	/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
 196	padding = (len1 + len2) & 7;
 197	if (padding)
 198		padding = 8 - padding;
 199
 200	record__write(rec, event, event->header.size);
 201	record__write(rec, data1, len1);
 202	if (len2)
 203		record__write(rec, data2, len2);
 204	record__write(rec, &pad, padding);
 205
 206	return 0;
 207}
 208
 209static int record__auxtrace_mmap_read(struct record *rec,
 210				      struct auxtrace_mmap *mm)
 211{
 212	int ret;
 213
 214	ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
 215				  record__process_auxtrace);
 216	if (ret < 0)
 217		return ret;
 218
 219	if (ret)
 220		rec->samples++;
 221
 222	return 0;
 223}
 224
 225static int record__auxtrace_mmap_read_snapshot(struct record *rec,
 226					       struct auxtrace_mmap *mm)
 227{
 228	int ret;
 229
 230	ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
 231					   record__process_auxtrace,
 232					   rec->opts.auxtrace_snapshot_size);
 233	if (ret < 0)
 234		return ret;
 235
 236	if (ret)
 237		rec->samples++;
 238
 239	return 0;
 240}
 241
 242static int record__auxtrace_read_snapshot_all(struct record *rec)
 243{
 244	int i;
 245	int rc = 0;
 246
 247	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
 248		struct auxtrace_mmap *mm =
 249				&rec->evlist->mmap[i].auxtrace_mmap;
 250
 251		if (!mm->base)
 252			continue;
 253
 254		if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
 255			rc = -1;
 256			goto out;
 257		}
 258	}
 259out:
 260	return rc;
 261}
 262
 263static void record__read_auxtrace_snapshot(struct record *rec)
 264{
 265	pr_debug("Recording AUX area tracing snapshot\n");
 266	if (record__auxtrace_read_snapshot_all(rec) < 0) {
 267		trigger_error(&auxtrace_snapshot_trigger);
 268	} else {
 269		if (auxtrace_record__snapshot_finish(rec->itr))
 270			trigger_error(&auxtrace_snapshot_trigger);
 271		else
 272			trigger_ready(&auxtrace_snapshot_trigger);
 273	}
 274}
 275
 276static int record__auxtrace_init(struct record *rec)
 277{
 278	int err;
 279
 280	if (!rec->itr) {
 281		rec->itr = auxtrace_record__init(rec->evlist, &err);
 282		if (err)
 283			return err;
 284	}
 285
 286	err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
 287					      rec->opts.auxtrace_snapshot_opts);
 288	if (err)
 289		return err;
 290
 291	return auxtrace_parse_filters(rec->evlist);
 292}
 293
 294#else
 295
 296static inline
 297int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
 298			       struct auxtrace_mmap *mm __maybe_unused)
 299{
 300	return 0;
 301}
 302
 303static inline
 304void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
 305{
 306}
 307
 308static inline
 309int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 310{
 311	return 0;
 312}
 313
 314static int record__auxtrace_init(struct record *rec __maybe_unused)
 315{
 316	return 0;
 317}
 318
 319#endif
 320
 321static int record__mmap_evlist(struct record *rec,
 322			       struct perf_evlist *evlist)
 323{
 324	struct record_opts *opts = &rec->opts;
 325	char msg[512];
 326
 327	if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
 328				 opts->auxtrace_mmap_pages,
 329				 opts->auxtrace_snapshot_mode) < 0) {
 330		if (errno == EPERM) {
 331			pr_err("Permission error mapping pages.\n"
 332			       "Consider increasing "
 333			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
 334			       "or try again with a smaller value of -m/--mmap_pages.\n"
 335			       "(current value: %u,%u)\n",
 336			       opts->mmap_pages, opts->auxtrace_mmap_pages);
 337			return -errno;
 338		} else {
 339			pr_err("failed to mmap with %d (%s)\n", errno,
 340				str_error_r(errno, msg, sizeof(msg)));
 341			if (errno)
 342				return -errno;
 343			else
 344				return -EINVAL;
 345		}
 346	}
 347	return 0;
 348}
 349
 350static int record__mmap(struct record *rec)
 351{
 352	return record__mmap_evlist(rec, rec->evlist);
 353}
 354
 355static int record__open(struct record *rec)
 356{
 357	char msg[BUFSIZ];
 358	struct perf_evsel *pos;
 359	struct perf_evlist *evlist = rec->evlist;
 360	struct perf_session *session = rec->session;
 361	struct record_opts *opts = &rec->opts;
 362	struct perf_evsel_config_term *err_term;
 363	int rc = 0;
 364
 365	/*
 366	 * For initial_delay we need to add a dummy event so that we can track
 367	 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
 368	 * real events, the ones asked by the user.
 369	 */
 370	if (opts->initial_delay) {
 371		if (perf_evlist__add_dummy(evlist))
 372			return -ENOMEM;
 373
 374		pos = perf_evlist__first(evlist);
 375		pos->tracking = 0;
 376		pos = perf_evlist__last(evlist);
 377		pos->tracking = 1;
 378		pos->attr.enable_on_exec = 1;
 379	}
 380
 381	perf_evlist__config(evlist, opts, &callchain_param);
 382
 383	evlist__for_each_entry(evlist, pos) {
 384try_again:
 385		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 386			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 387				if (verbose > 0)
 388					ui__warning("%s\n", msg);
 389				goto try_again;
 390			}
 391
 392			rc = -errno;
 393			perf_evsel__open_strerror(pos, &opts->target,
 394						  errno, msg, sizeof(msg));
 395			ui__error("%s\n", msg);
 396			goto out;
 397		}
 398
 399		pos->supported = true;
 400	}
 401
 402	if (perf_evlist__apply_filters(evlist, &pos)) {
 403		pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
 404			pos->filter, perf_evsel__name(pos), errno,
 405			str_error_r(errno, msg, sizeof(msg)));
 406		rc = -1;
 407		goto out;
 408	}
 409
 410	if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
 411		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
 412		      err_term->val.drv_cfg, perf_evsel__name(pos), errno,
 413		      str_error_r(errno, msg, sizeof(msg)));
 414		rc = -1;
 415		goto out;
 416	}
 417
 418	rc = record__mmap(rec);
 419	if (rc)
 420		goto out;
 421
 422	session->evlist = evlist;
 423	perf_session__set_id_hdr_size(session);
 424out:
 425	return rc;
 426}
 427
 428static int process_sample_event(struct perf_tool *tool,
 429				union perf_event *event,
 430				struct perf_sample *sample,
 431				struct perf_evsel *evsel,
 432				struct machine *machine)
 433{
 434	struct record *rec = container_of(tool, struct record, tool);
 435
 436	if (rec->evlist->first_sample_time == 0)
 437		rec->evlist->first_sample_time = sample->time;
 438
 439	rec->evlist->last_sample_time = sample->time;
 440
 441	if (rec->buildid_all)
 442		return 0;
 443
 444	rec->samples++;
 
 445	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 446}
 447
 448static int process_buildids(struct record *rec)
 449{
 450	struct perf_data *data = &rec->data;
 451	struct perf_session *session = rec->session;
 452
 453	if (data->size == 0)
 454		return 0;
 455
 456	/*
 457	 * During this process, it'll load kernel map and replace the
 458	 * dso->long_name to a real pathname it found.  In this case
 459	 * we prefer the vmlinux path like
 460	 *   /lib/modules/3.16.4/build/vmlinux
 461	 *
 462	 * rather than build-id path (in debug directory).
 463	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
 464	 */
 465	symbol_conf.ignore_vmlinux_buildid = true;
 466
 467	/*
 468	 * If --buildid-all is given, it marks all DSO regardless of hits,
 469	 * so no need to process samples. But if timestamp_boundary is enabled,
 470	 * it still needs to walk on all samples to get the timestamps of
 471	 * first/last samples.
 472	 */
 473	if (rec->buildid_all && !rec->timestamp_boundary)
 474		rec->tool.sample = NULL;
 475
 476	return perf_session__process_events(session);
 477}
 478
 479static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 480{
 481	int err;
 482	struct perf_tool *tool = data;
 483	/*
 484	 *As for guest kernel when processing subcommand record&report,
 485	 *we arrange module mmap prior to guest kernel mmap and trigger
 486	 *a preload dso because default guest module symbols are loaded
 487	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
 488	 *method is used to avoid symbol missing when the first addr is
 489	 *in module instead of in guest kernel.
 490	 */
 491	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 492					     machine);
 493	if (err < 0)
 494		pr_err("Couldn't record guest kernel [%d]'s reference"
 495		       " relocation symbol.\n", machine->pid);
 496
 497	/*
 498	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
 499	 * have no _text sometimes.
 500	 */
 501	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 502						 machine);
 503	if (err < 0)
 504		pr_err("Couldn't record guest kernel [%d]'s reference"
 505		       " relocation symbol.\n", machine->pid);
 506}
 507
 508static struct perf_event_header finished_round_event = {
 509	.size = sizeof(struct perf_event_header),
 510	.type = PERF_RECORD_FINISHED_ROUND,
 511};
 512
 513static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
 514				    bool overwrite)
 515{
 516	u64 bytes_written = rec->bytes_written;
 517	int i;
 518	int rc = 0;
 519	struct perf_mmap *maps;
 520
 521	if (!evlist)
 522		return 0;
 523
 524	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
 525	if (!maps)
 526		return 0;
 527
 528	if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
 529		return 0;
 530
 531	for (i = 0; i < evlist->nr_mmaps; i++) {
 532		struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
 533
 534		if (maps[i].base) {
 535			if (perf_mmap__push(&maps[i], rec, record__pushfn) != 0) {
 
 536				rc = -1;
 537				goto out;
 538			}
 539		}
 540
 541		if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
 542		    record__auxtrace_mmap_read(rec, mm) != 0) {
 543			rc = -1;
 544			goto out;
 545		}
 546	}
 547
 548	/*
 549	 * Mark the round finished in case we wrote
 550	 * at least one event.
 551	 */
 552	if (bytes_written != rec->bytes_written)
 553		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 554
 555	if (overwrite)
 556		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
 557out:
 558	return rc;
 559}
 560
 561static int record__mmap_read_all(struct record *rec)
 562{
 563	int err;
 564
 565	err = record__mmap_read_evlist(rec, rec->evlist, false);
 566	if (err)
 567		return err;
 568
 569	return record__mmap_read_evlist(rec, rec->evlist, true);
 570}
 571
 572static void record__init_features(struct record *rec)
 573{
 574	struct perf_session *session = rec->session;
 575	int feat;
 576
 577	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
 578		perf_header__set_feat(&session->header, feat);
 579
 580	if (rec->no_buildid)
 581		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
 582
 583	if (!have_tracepoints(&rec->evlist->entries))
 584		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
 585
 586	if (!rec->opts.branch_stack)
 587		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
 588
 589	if (!rec->opts.full_auxtrace)
 590		perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
 591
 592	perf_header__clear_feat(&session->header, HEADER_STAT);
 593}
 594
 595static void
 596record__finish_output(struct record *rec)
 597{
 598	struct perf_data *data = &rec->data;
 599	int fd = perf_data__fd(data);
 600
 601	if (data->is_pipe)
 602		return;
 603
 604	rec->session->header.data_size += rec->bytes_written;
 605	data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
 606
 607	if (!rec->no_buildid) {
 608		process_buildids(rec);
 609
 610		if (rec->buildid_all)
 611			dsos__hit_all(rec->session);
 612	}
 613	perf_session__write_header(rec->session, rec->evlist, fd, true);
 614
 615	return;
 616}
 617
 618static int record__synthesize_workload(struct record *rec, bool tail)
 619{
 620	int err;
 621	struct thread_map *thread_map;
 
 
 622
 623	if (rec->opts.tail_synthesize != tail)
 624		return 0;
 625
 626	thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
 627	if (thread_map == NULL)
 628		return -1;
 629
 630	err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
 631						 process_synthesized_event,
 632						 &rec->session->machines.host,
 633						 rec->opts.sample_address,
 634						 rec->opts.proc_map_timeout);
 635	thread_map__put(thread_map);
 636	return err;
 637}
 638
 639static int record__synthesize(struct record *rec, bool tail);
 640
 641static int
 642record__switch_output(struct record *rec, bool at_exit)
 643{
 644	struct perf_data *data = &rec->data;
 645	int fd, err;
 646
 647	/* Same Size:      "2015122520103046"*/
 648	char timestamp[] = "InvalidTimestamp";
 649
 650	record__synthesize(rec, true);
 651	if (target__none(&rec->opts.target))
 652		record__synthesize_workload(rec, true);
 653
 654	rec->samples = 0;
 655	record__finish_output(rec);
 656	err = fetch_current_timestamp(timestamp, sizeof(timestamp));
 657	if (err) {
 658		pr_err("Failed to get current timestamp\n");
 659		return -EINVAL;
 660	}
 661
 662	fd = perf_data__switch(data, timestamp,
 663				    rec->session->header.data_offset,
 664				    at_exit);
 665	if (fd >= 0 && !at_exit) {
 666		rec->bytes_written = 0;
 667		rec->session->header.data_size = 0;
 668	}
 669
 670	if (!quiet)
 671		fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
 672			data->file.path, timestamp);
 673
 674	/* Output tracking events */
 675	if (!at_exit) {
 676		record__synthesize(rec, false);
 677
 678		/*
 679		 * In 'perf record --switch-output' without -a,
 680		 * record__synthesize() in record__switch_output() won't
 681		 * generate tracking events because there's no thread_map
 682		 * in evlist. Which causes newly created perf.data doesn't
 683		 * contain map and comm information.
 684		 * Create a fake thread_map and directly call
 685		 * perf_event__synthesize_thread_map() for those events.
 686		 */
 687		if (target__none(&rec->opts.target))
 688			record__synthesize_workload(rec, false);
 689	}
 690	return fd;
 691}
 692
 693static volatile int workload_exec_errno;
 694
 695/*
 696 * perf_evlist__prepare_workload will send a SIGUSR1
 697 * if the fork fails, since we asked by setting its
 698 * want_signal to true.
 699 */
 700static void workload_exec_failed_signal(int signo __maybe_unused,
 701					siginfo_t *info,
 702					void *ucontext __maybe_unused)
 703{
 704	workload_exec_errno = info->si_value.sival_int;
 705	done = 1;
 706	child_finished = 1;
 707}
 708
 709static void snapshot_sig_handler(int sig);
 710static void alarm_sig_handler(int sig);
 711
 712int __weak
 713perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
 714			    struct perf_tool *tool __maybe_unused,
 715			    perf_event__handler_t process __maybe_unused,
 716			    struct machine *machine __maybe_unused)
 717{
 718	return 0;
 719}
 720
 721static const struct perf_event_mmap_page *
 722perf_evlist__pick_pc(struct perf_evlist *evlist)
 723{
 724	if (evlist) {
 725		if (evlist->mmap && evlist->mmap[0].base)
 726			return evlist->mmap[0].base;
 727		if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
 728			return evlist->overwrite_mmap[0].base;
 729	}
 730	return NULL;
 731}
 732
 733static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
 734{
 735	const struct perf_event_mmap_page *pc;
 736
 737	pc = perf_evlist__pick_pc(rec->evlist);
 738	if (pc)
 739		return pc;
 740	return NULL;
 741}
 742
 743static int record__synthesize(struct record *rec, bool tail)
 744{
 745	struct perf_session *session = rec->session;
 746	struct machine *machine = &session->machines.host;
 747	struct perf_data *data = &rec->data;
 748	struct record_opts *opts = &rec->opts;
 749	struct perf_tool *tool = &rec->tool;
 750	int fd = perf_data__fd(data);
 751	int err = 0;
 752
 753	if (rec->opts.tail_synthesize != tail)
 754		return 0;
 755
 756	if (data->is_pipe) {
 757		/*
 758		 * We need to synthesize events first, because some
 759		 * features works on top of them (on report side).
 760		 */
 761		err = perf_event__synthesize_attrs(tool, session,
 762						   process_synthesized_event);
 763		if (err < 0) {
 764			pr_err("Couldn't synthesize attrs.\n");
 765			goto out;
 766		}
 767
 768		err = perf_event__synthesize_features(tool, session, rec->evlist,
 769						      process_synthesized_event);
 770		if (err < 0) {
 771			pr_err("Couldn't synthesize features.\n");
 772			return err;
 773		}
 774
 775		if (have_tracepoints(&rec->evlist->entries)) {
 776			/*
 777			 * FIXME err <= 0 here actually means that
 778			 * there were no tracepoints so its not really
 779			 * an error, just that we don't need to
 780			 * synthesize anything.  We really have to
 781			 * return this more properly and also
 782			 * propagate errors that now are calling die()
 783			 */
 784			err = perf_event__synthesize_tracing_data(tool,	fd, rec->evlist,
 785								  process_synthesized_event);
 786			if (err <= 0) {
 787				pr_err("Couldn't record tracing data.\n");
 788				goto out;
 789			}
 790			rec->bytes_written += err;
 791		}
 792	}
 793
 794	err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
 795					  process_synthesized_event, machine);
 796	if (err)
 797		goto out;
 798
 799	if (rec->opts.full_auxtrace) {
 800		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
 801					session, process_synthesized_event);
 802		if (err)
 803			goto out;
 804	}
 805
 806	if (!perf_evlist__exclude_kernel(rec->evlist)) {
 807		err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 808							 machine);
 809		WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
 810				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 811				   "Check /proc/kallsyms permission or run as root.\n");
 812
 813		err = perf_event__synthesize_modules(tool, process_synthesized_event,
 814						     machine);
 815		WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
 816				   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 817				   "Check /proc/modules permission or run as root.\n");
 818	}
 819
 820	if (perf_guest) {
 821		machines__process_guests(&session->machines,
 822					 perf_event__synthesize_guest_os, tool);
 823	}
 824
 825	err = perf_event__synthesize_extra_attr(&rec->tool,
 826						rec->evlist,
 827						process_synthesized_event,
 828						data->is_pipe);
 829	if (err)
 830		goto out;
 831
 832	err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
 833						 process_synthesized_event,
 834						NULL);
 835	if (err < 0) {
 836		pr_err("Couldn't synthesize thread map.\n");
 837		return err;
 838	}
 839
 840	err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
 841					     process_synthesized_event, NULL);
 842	if (err < 0) {
 843		pr_err("Couldn't synthesize cpu map.\n");
 844		return err;
 845	}
 846
 847	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
 848					    process_synthesized_event, opts->sample_address,
 849					    opts->proc_map_timeout, 1);
 850out:
 851	return err;
 852}
 853
 854static int __cmd_record(struct record *rec, int argc, const char **argv)
 855{
 856	int err;
 857	int status = 0;
 858	unsigned long waking = 0;
 859	const bool forks = argc > 0;
 
 860	struct perf_tool *tool = &rec->tool;
 861	struct record_opts *opts = &rec->opts;
 862	struct perf_data *data = &rec->data;
 863	struct perf_session *session;
 864	bool disabled = false, draining = false;
 865	int fd;
 866
 
 
 867	atexit(record__sig_exit);
 868	signal(SIGCHLD, sig_handler);
 869	signal(SIGINT, sig_handler);
 870	signal(SIGTERM, sig_handler);
 871	signal(SIGSEGV, sigsegv_handler);
 872
 873	if (rec->opts.record_namespaces)
 874		tool->namespace_events = true;
 875
 876	if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
 877		signal(SIGUSR2, snapshot_sig_handler);
 878		if (rec->opts.auxtrace_snapshot_mode)
 879			trigger_on(&auxtrace_snapshot_trigger);
 880		if (rec->switch_output.enabled)
 881			trigger_on(&switch_output_trigger);
 882	} else {
 883		signal(SIGUSR2, SIG_IGN);
 884	}
 885
 886	session = perf_session__new(data, false, tool);
 887	if (session == NULL) {
 888		pr_err("Perf session creation failed.\n");
 889		return -1;
 890	}
 891
 892	fd = perf_data__fd(data);
 893	rec->session = session;
 894
 895	record__init_features(rec);
 896
 897	if (forks) {
 898		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
 899						    argv, data->is_pipe,
 900						    workload_exec_failed_signal);
 901		if (err < 0) {
 902			pr_err("Couldn't run the workload!\n");
 903			status = err;
 904			goto out_delete_session;
 905		}
 906	}
 907
 908	/*
 909	 * If we have just single event and are sending data
 910	 * through pipe, we need to force the ids allocation,
 911	 * because we synthesize event name through the pipe
 912	 * and need the id for that.
 913	 */
 914	if (data->is_pipe && rec->evlist->nr_entries == 1)
 915		rec->opts.sample_id = true;
 916
 917	if (record__open(rec) != 0) {
 918		err = -1;
 919		goto out_child;
 920	}
 921
 922	err = bpf__apply_obj_config();
 923	if (err) {
 924		char errbuf[BUFSIZ];
 925
 926		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
 927		pr_err("ERROR: Apply config to BPF failed: %s\n",
 928			 errbuf);
 929		goto out_child;
 930	}
 931
 932	/*
 933	 * Normally perf_session__new would do this, but it doesn't have the
 934	 * evlist.
 935	 */
 936	if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
 937		pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
 938		rec->tool.ordered_events = false;
 939	}
 940
 941	if (!rec->evlist->nr_groups)
 942		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 943
 944	if (data->is_pipe) {
 945		err = perf_header__write_pipe(fd);
 946		if (err < 0)
 947			goto out_child;
 948	} else {
 949		err = perf_session__write_header(session, rec->evlist, fd, false);
 950		if (err < 0)
 951			goto out_child;
 952	}
 953
 954	if (!rec->no_buildid
 955	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
 956		pr_err("Couldn't generate buildids. "
 957		       "Use --no-buildid to profile anyway.\n");
 958		err = -1;
 959		goto out_child;
 960	}
 961
 
 
 962	err = record__synthesize(rec, false);
 963	if (err < 0)
 964		goto out_child;
 965
 966	if (rec->realtime_prio) {
 967		struct sched_param param;
 968
 969		param.sched_priority = rec->realtime_prio;
 970		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
 971			pr_err("Could not set realtime priority.\n");
 972			err = -1;
 973			goto out_child;
 974		}
 975	}
 976
 977	/*
 978	 * When perf is starting the traced process, all the events
 979	 * (apart from group members) have enable_on_exec=1 set,
 980	 * so don't spoil it by prematurely enabling them.
 981	 */
 982	if (!target__none(&opts->target) && !opts->initial_delay)
 983		perf_evlist__enable(rec->evlist);
 984
 985	/*
 986	 * Let the child rip
 987	 */
 988	if (forks) {
 989		struct machine *machine = &session->machines.host;
 990		union perf_event *event;
 991		pid_t tgid;
 992
 993		event = malloc(sizeof(event->comm) + machine->id_hdr_size);
 994		if (event == NULL) {
 995			err = -ENOMEM;
 996			goto out_child;
 997		}
 998
 999		/*
1000		 * Some H/W events are generated before COMM event
1001		 * which is emitted during exec(), so perf script
1002		 * cannot see a correct process name for those events.
1003		 * Synthesize COMM event to prevent it.
1004		 */
1005		tgid = perf_event__synthesize_comm(tool, event,
1006						   rec->evlist->workload.pid,
1007						   process_synthesized_event,
1008						   machine);
1009		free(event);
1010
1011		if (tgid == -1)
1012			goto out_child;
1013
1014		event = malloc(sizeof(event->namespaces) +
1015			       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1016			       machine->id_hdr_size);
1017		if (event == NULL) {
1018			err = -ENOMEM;
1019			goto out_child;
1020		}
1021
1022		/*
1023		 * Synthesize NAMESPACES event for the command specified.
1024		 */
1025		perf_event__synthesize_namespaces(tool, event,
1026						  rec->evlist->workload.pid,
1027						  tgid, process_synthesized_event,
1028						  machine);
1029		free(event);
1030
1031		perf_evlist__start_workload(rec->evlist);
1032	}
1033
1034	if (opts->initial_delay) {
1035		usleep(opts->initial_delay * USEC_PER_MSEC);
1036		perf_evlist__enable(rec->evlist);
1037	}
1038
1039	trigger_ready(&auxtrace_snapshot_trigger);
1040	trigger_ready(&switch_output_trigger);
1041	perf_hooks__invoke_record_start();
1042	for (;;) {
1043		unsigned long long hits = rec->samples;
1044
1045		/*
1046		 * rec->evlist->bkw_mmap_state is possible to be
1047		 * BKW_MMAP_EMPTY here: when done == true and
1048		 * hits != rec->samples in previous round.
1049		 *
1050		 * perf_evlist__toggle_bkw_mmap ensure we never
1051		 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1052		 */
1053		if (trigger_is_hit(&switch_output_trigger) || done || draining)
1054			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1055
1056		if (record__mmap_read_all(rec) < 0) {
1057			trigger_error(&auxtrace_snapshot_trigger);
1058			trigger_error(&switch_output_trigger);
1059			err = -1;
1060			goto out_child;
1061		}
1062
1063		if (auxtrace_record__snapshot_started) {
1064			auxtrace_record__snapshot_started = 0;
1065			if (!trigger_is_error(&auxtrace_snapshot_trigger))
1066				record__read_auxtrace_snapshot(rec);
1067			if (trigger_is_error(&auxtrace_snapshot_trigger)) {
1068				pr_err("AUX area tracing snapshot failed\n");
1069				err = -1;
1070				goto out_child;
1071			}
1072		}
1073
1074		if (trigger_is_hit(&switch_output_trigger)) {
1075			/*
1076			 * If switch_output_trigger is hit, the data in
1077			 * overwritable ring buffer should have been collected,
1078			 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1079			 *
1080			 * If SIGUSR2 raise after or during record__mmap_read_all(),
1081			 * record__mmap_read_all() didn't collect data from
1082			 * overwritable ring buffer. Read again.
1083			 */
1084			if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1085				continue;
1086			trigger_ready(&switch_output_trigger);
1087
1088			/*
1089			 * Reenable events in overwrite ring buffer after
1090			 * record__mmap_read_all(): we should have collected
1091			 * data from it.
1092			 */
1093			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1094
1095			if (!quiet)
1096				fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1097					waking);
1098			waking = 0;
1099			fd = record__switch_output(rec, false);
1100			if (fd < 0) {
1101				pr_err("Failed to switch to new file\n");
1102				trigger_error(&switch_output_trigger);
1103				err = fd;
1104				goto out_child;
1105			}
1106
1107			/* re-arm the alarm */
1108			if (rec->switch_output.time)
1109				alarm(rec->switch_output.time);
1110		}
1111
1112		if (hits == rec->samples) {
1113			if (done || draining)
1114				break;
1115			err = perf_evlist__poll(rec->evlist, -1);
1116			/*
1117			 * Propagate error, only if there's any. Ignore positive
1118			 * number of returned events and interrupt error.
1119			 */
1120			if (err > 0 || (err < 0 && errno == EINTR))
1121				err = 0;
1122			waking++;
1123
1124			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1125				draining = true;
1126		}
1127
1128		/*
1129		 * When perf is starting the traced process, at the end events
1130		 * die with the process and we wait for that. Thus no need to
1131		 * disable events in this case.
1132		 */
1133		if (done && !disabled && !target__none(&opts->target)) {
1134			trigger_off(&auxtrace_snapshot_trigger);
1135			perf_evlist__disable(rec->evlist);
1136			disabled = true;
1137		}
1138	}
1139	trigger_off(&auxtrace_snapshot_trigger);
1140	trigger_off(&switch_output_trigger);
1141
1142	if (forks && workload_exec_errno) {
1143		char msg[STRERR_BUFSIZE];
1144		const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
1145		pr_err("Workload failed: %s\n", emsg);
1146		err = -1;
1147		goto out_child;
1148	}
1149
1150	if (!quiet)
1151		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
1152
1153	if (target__none(&rec->opts.target))
1154		record__synthesize_workload(rec, true);
1155
1156out_child:
1157	if (forks) {
1158		int exit_status;
1159
1160		if (!child_finished)
1161			kill(rec->evlist->workload.pid, SIGTERM);
1162
1163		wait(&exit_status);
1164
1165		if (err < 0)
1166			status = err;
1167		else if (WIFEXITED(exit_status))
1168			status = WEXITSTATUS(exit_status);
1169		else if (WIFSIGNALED(exit_status))
1170			signr = WTERMSIG(exit_status);
1171	} else
1172		status = err;
1173
1174	record__synthesize(rec, true);
1175	/* this will be recalculated during process_buildids() */
1176	rec->samples = 0;
1177
1178	if (!err) {
1179		if (!rec->timestamp_filename) {
1180			record__finish_output(rec);
1181		} else {
1182			fd = record__switch_output(rec, true);
1183			if (fd < 0) {
1184				status = fd;
1185				goto out_delete_session;
1186			}
1187		}
1188	}
1189
1190	perf_hooks__invoke_record_end();
1191
1192	if (!err && !quiet) {
1193		char samples[128];
1194		const char *postfix = rec->timestamp_filename ?
1195					".<timestamp>" : "";
1196
1197		if (rec->samples && !rec->opts.full_auxtrace)
1198			scnprintf(samples, sizeof(samples),
1199				  " (%" PRIu64 " samples)", rec->samples);
1200		else
1201			samples[0] = '\0';
1202
1203		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
1204			perf_data__size(data) / 1024.0 / 1024.0,
1205			data->file.path, postfix, samples);
1206	}
1207
1208out_delete_session:
1209	perf_session__delete(session);
1210	return status;
1211}
1212
1213static void callchain_debug(struct callchain_param *callchain)
1214{
1215	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
1216
1217	pr_debug("callchain: type %s\n", str[callchain->record_mode]);
1218
1219	if (callchain->record_mode == CALLCHAIN_DWARF)
1220		pr_debug("callchain: stack dump size %d\n",
1221			 callchain->dump_size);
1222}
1223
1224int record_opts__parse_callchain(struct record_opts *record,
1225				 struct callchain_param *callchain,
1226				 const char *arg, bool unset)
1227{
1228	int ret;
1229	callchain->enabled = !unset;
1230
1231	/* --no-call-graph */
1232	if (unset) {
1233		callchain->record_mode = CALLCHAIN_NONE;
1234		pr_debug("callchain: disabled\n");
1235		return 0;
1236	}
1237
1238	ret = parse_callchain_record_opt(arg, callchain);
1239	if (!ret) {
1240		/* Enable data address sampling for DWARF unwind. */
1241		if (callchain->record_mode == CALLCHAIN_DWARF)
1242			record->sample_address = true;
1243		callchain_debug(callchain);
1244	}
1245
1246	return ret;
1247}
1248
1249int record_parse_callchain_opt(const struct option *opt,
1250			       const char *arg,
1251			       int unset)
1252{
1253	return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1254}
1255
1256int record_callchain_opt(const struct option *opt,
1257			 const char *arg __maybe_unused,
1258			 int unset __maybe_unused)
1259{
1260	struct callchain_param *callchain = opt->value;
1261
1262	callchain->enabled = true;
1263
1264	if (callchain->record_mode == CALLCHAIN_NONE)
1265		callchain->record_mode = CALLCHAIN_FP;
1266
1267	callchain_debug(callchain);
1268	return 0;
1269}
1270
1271static int perf_record_config(const char *var, const char *value, void *cb)
1272{
1273	struct record *rec = cb;
1274
1275	if (!strcmp(var, "record.build-id")) {
1276		if (!strcmp(value, "cache"))
1277			rec->no_buildid_cache = false;
1278		else if (!strcmp(value, "no-cache"))
1279			rec->no_buildid_cache = true;
1280		else if (!strcmp(value, "skip"))
1281			rec->no_buildid = true;
1282		else
1283			return -1;
1284		return 0;
1285	}
1286	if (!strcmp(var, "record.call-graph")) {
1287		var = "call-graph.record-mode";
1288		return perf_default_config(var, value, cb);
1289	}
1290
1291	return 0;
1292}
1293
1294struct clockid_map {
1295	const char *name;
1296	int clockid;
1297};
1298
1299#define CLOCKID_MAP(n, c)	\
1300	{ .name = n, .clockid = (c), }
1301
1302#define CLOCKID_END	{ .name = NULL, }
1303
1304
1305/*
1306 * Add the missing ones, we need to build on many distros...
1307 */
1308#ifndef CLOCK_MONOTONIC_RAW
1309#define CLOCK_MONOTONIC_RAW 4
1310#endif
1311#ifndef CLOCK_BOOTTIME
1312#define CLOCK_BOOTTIME 7
1313#endif
1314#ifndef CLOCK_TAI
1315#define CLOCK_TAI 11
1316#endif
1317
1318static const struct clockid_map clockids[] = {
1319	/* available for all events, NMI safe */
1320	CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1321	CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1322
1323	/* available for some events */
1324	CLOCKID_MAP("realtime", CLOCK_REALTIME),
1325	CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1326	CLOCKID_MAP("tai", CLOCK_TAI),
1327
1328	/* available for the lazy */
1329	CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1330	CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1331	CLOCKID_MAP("real", CLOCK_REALTIME),
1332	CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1333
1334	CLOCKID_END,
1335};
1336
1337static int parse_clockid(const struct option *opt, const char *str, int unset)
1338{
1339	struct record_opts *opts = (struct record_opts *)opt->value;
1340	const struct clockid_map *cm;
1341	const char *ostr = str;
1342
1343	if (unset) {
1344		opts->use_clockid = 0;
1345		return 0;
1346	}
1347
1348	/* no arg passed */
1349	if (!str)
1350		return 0;
1351
1352	/* no setting it twice */
1353	if (opts->use_clockid)
1354		return -1;
1355
1356	opts->use_clockid = true;
1357
1358	/* if its a number, we're done */
1359	if (sscanf(str, "%d", &opts->clockid) == 1)
1360		return 0;
1361
1362	/* allow a "CLOCK_" prefix to the name */
1363	if (!strncasecmp(str, "CLOCK_", 6))
1364		str += 6;
1365
1366	for (cm = clockids; cm->name; cm++) {
1367		if (!strcasecmp(str, cm->name)) {
1368			opts->clockid = cm->clockid;
1369			return 0;
1370		}
1371	}
1372
1373	opts->use_clockid = false;
1374	ui__warning("unknown clockid %s, check man page\n", ostr);
1375	return -1;
1376}
1377
1378static int record__parse_mmap_pages(const struct option *opt,
1379				    const char *str,
1380				    int unset __maybe_unused)
1381{
1382	struct record_opts *opts = opt->value;
1383	char *s, *p;
1384	unsigned int mmap_pages;
1385	int ret;
1386
1387	if (!str)
1388		return -EINVAL;
1389
1390	s = strdup(str);
1391	if (!s)
1392		return -ENOMEM;
1393
1394	p = strchr(s, ',');
1395	if (p)
1396		*p = '\0';
1397
1398	if (*s) {
1399		ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1400		if (ret)
1401			goto out_free;
1402		opts->mmap_pages = mmap_pages;
1403	}
1404
1405	if (!p) {
1406		ret = 0;
1407		goto out_free;
1408	}
1409
1410	ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1411	if (ret)
1412		goto out_free;
1413
1414	opts->auxtrace_mmap_pages = mmap_pages;
1415
1416out_free:
1417	free(s);
1418	return ret;
1419}
1420
1421static void switch_output_size_warn(struct record *rec)
1422{
1423	u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1424	struct switch_output *s = &rec->switch_output;
1425
1426	wakeup_size /= 2;
1427
1428	if (s->size < wakeup_size) {
1429		char buf[100];
1430
1431		unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1432		pr_warning("WARNING: switch-output data size lower than "
1433			   "wakeup kernel buffer size (%s) "
1434			   "expect bigger perf.data sizes\n", buf);
1435	}
1436}
1437
1438static int switch_output_setup(struct record *rec)
1439{
1440	struct switch_output *s = &rec->switch_output;
1441	static struct parse_tag tags_size[] = {
1442		{ .tag  = 'B', .mult = 1       },
1443		{ .tag  = 'K', .mult = 1 << 10 },
1444		{ .tag  = 'M', .mult = 1 << 20 },
1445		{ .tag  = 'G', .mult = 1 << 30 },
1446		{ .tag  = 0 },
1447	};
1448	static struct parse_tag tags_time[] = {
1449		{ .tag  = 's', .mult = 1        },
1450		{ .tag  = 'm', .mult = 60       },
1451		{ .tag  = 'h', .mult = 60*60    },
1452		{ .tag  = 'd', .mult = 60*60*24 },
1453		{ .tag  = 0 },
1454	};
1455	unsigned long val;
1456
1457	if (!s->set)
1458		return 0;
1459
1460	if (!strcmp(s->str, "signal")) {
1461		s->signal = true;
1462		pr_debug("switch-output with SIGUSR2 signal\n");
1463		goto enabled;
1464	}
1465
1466	val = parse_tag_value(s->str, tags_size);
1467	if (val != (unsigned long) -1) {
1468		s->size = val;
1469		pr_debug("switch-output with %s size threshold\n", s->str);
1470		goto enabled;
1471	}
1472
1473	val = parse_tag_value(s->str, tags_time);
1474	if (val != (unsigned long) -1) {
1475		s->time = val;
1476		pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1477			 s->str, s->time);
1478		goto enabled;
1479	}
1480
1481	return -1;
1482
1483enabled:
1484	rec->timestamp_filename = true;
1485	s->enabled              = true;
1486
1487	if (s->size && !rec->opts.no_buffering)
1488		switch_output_size_warn(rec);
1489
1490	return 0;
1491}
1492
1493static const char * const __record_usage[] = {
1494	"perf record [<options>] [<command>]",
1495	"perf record [<options>] -- <command> [<options>]",
1496	NULL
1497};
1498const char * const *record_usage = __record_usage;
1499
1500/*
1501 * XXX Ideally would be local to cmd_record() and passed to a record__new
1502 * because we need to have access to it in record__exit, that is called
1503 * after cmd_record() exits, but since record_options need to be accessible to
1504 * builtin-script, leave it here.
1505 *
1506 * At least we don't ouch it in all the other functions here directly.
1507 *
1508 * Just say no to tons of global variables, sigh.
1509 */
1510static struct record record = {
1511	.opts = {
1512		.sample_time	     = true,
1513		.mmap_pages	     = UINT_MAX,
1514		.user_freq	     = UINT_MAX,
1515		.user_interval	     = ULLONG_MAX,
1516		.freq		     = 4000,
1517		.target		     = {
1518			.uses_mmap   = true,
1519			.default_per_cpu = true,
1520		},
1521		.proc_map_timeout     = 500,
1522	},
1523	.tool = {
1524		.sample		= process_sample_event,
1525		.fork		= perf_event__process_fork,
1526		.exit		= perf_event__process_exit,
1527		.comm		= perf_event__process_comm,
1528		.namespaces	= perf_event__process_namespaces,
1529		.mmap		= perf_event__process_mmap,
1530		.mmap2		= perf_event__process_mmap2,
1531		.ordered_events	= true,
1532	},
1533};
1534
1535const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1536	"\n\t\t\t\tDefault: fp";
1537
1538static bool dry_run;
1539
1540/*
1541 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1542 * with it and switch to use the library functions in perf_evlist that came
1543 * from builtin-record.c, i.e. use record_opts,
1544 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1545 * using pipes, etc.
1546 */
1547static struct option __record_options[] = {
1548	OPT_CALLBACK('e', "event", &record.evlist, "event",
1549		     "event selector. use 'perf list' to list available events",
1550		     parse_events_option),
1551	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1552		     "event filter", parse_filter),
1553	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1554			   NULL, "don't record events from perf itself",
1555			   exclude_perf),
1556	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1557		    "record events on existing process id"),
1558	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1559		    "record events on existing thread id"),
1560	OPT_INTEGER('r', "realtime", &record.realtime_prio,
1561		    "collect data with this RT SCHED_FIFO priority"),
1562	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1563		    "collect data without buffering"),
1564	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1565		    "collect raw sample records from all opened counters"),
1566	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1567			    "system-wide collection from all CPUs"),
1568	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1569		    "list of cpus to monitor"),
1570	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1571	OPT_STRING('o', "output", &record.data.file.path, "file",
1572		    "output file name"),
1573	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1574			&record.opts.no_inherit_set,
1575			"child tasks do not inherit counters"),
1576	OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1577		    "synthesize non-sample events at the end of output"),
1578	OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
1579	OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1580		    "Fail if the specified frequency can't be used"),
1581	OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1582		     "profile at this frequency",
1583		      record__parse_freq),
1584	OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1585		     "number of mmap data pages and AUX area tracing mmap pages",
1586		     record__parse_mmap_pages),
1587	OPT_BOOLEAN(0, "group", &record.opts.group,
1588		    "put the counters into a counter group"),
1589	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1590			   NULL, "enables call-graph recording" ,
1591			   &record_callchain_opt),
1592	OPT_CALLBACK(0, "call-graph", &record.opts,
1593		     "record_mode[,record_size]", record_callchain_help,
1594		     &record_parse_callchain_opt),
1595	OPT_INCR('v', "verbose", &verbose,
1596		    "be more verbose (show counter open errors, etc)"),
1597	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1598	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1599		    "per thread counts"),
1600	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1601	OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1602		    "Record the sample physical addresses"),
1603	OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
1604	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1605			&record.opts.sample_time_set,
1606			"Record the sample timestamps"),
1607	OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1608			"Record the sample period"),
1609	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1610		    "don't sample"),
1611	OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1612			&record.no_buildid_cache_set,
1613			"do not update the buildid cache"),
1614	OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1615			&record.no_buildid_set,
1616			"do not collect buildids in perf.data"),
1617	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1618		     "monitor event in cgroup name only",
1619		     parse_cgroups),
1620	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1621		  "ms to wait before starting measurement after program start"),
1622	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1623		   "user to profile"),
1624
1625	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1626		     "branch any", "sample any taken branches",
1627		     parse_branch_stack),
1628
1629	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1630		     "branch filter mask", "branch stack filter modes",
1631		     parse_branch_stack),
1632	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1633		    "sample by weight (on special events only)"),
1634	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1635		    "sample transaction flags (special events only)"),
1636	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1637		    "use per-thread mmaps"),
1638	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1639		    "sample selected machine registers on interrupt,"
1640		    " use -I ? to list register names", parse_regs),
1641	OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1642		    "sample selected machine registers on interrupt,"
1643		    " use -I ? to list register names", parse_regs),
1644	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1645		    "Record running/enabled time of read (:S) events"),
1646	OPT_CALLBACK('k', "clockid", &record.opts,
1647	"clockid", "clockid to use for events, see clock_gettime()",
1648	parse_clockid),
1649	OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1650			  "opts", "AUX area tracing Snapshot Mode", ""),
1651	OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1652			"per thread proc mmap processing timeout in ms"),
1653	OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1654		    "Record namespaces events"),
1655	OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1656		    "Record context switch events"),
1657	OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1658			 "Configure all used events to run in kernel space.",
1659			 PARSE_OPT_EXCLUSIVE),
1660	OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1661			 "Configure all used events to run in user space.",
1662			 PARSE_OPT_EXCLUSIVE),
1663	OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1664		   "clang binary to use for compiling BPF scriptlets"),
1665	OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1666		   "options passed to clang when compiling BPF scriptlets"),
1667	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1668		   "file", "vmlinux pathname"),
1669	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1670		    "Record build-id of all DSOs regardless of hits"),
1671	OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1672		    "append timestamp to output filename"),
1673	OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1674		    "Record timestamp boundary (time of first/last samples)"),
1675	OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
1676			  &record.switch_output.set, "signal,size,time",
1677			  "Switch output when receive SIGUSR2 or cross size,time threshold",
1678			  "signal"),
1679	OPT_BOOLEAN(0, "dry-run", &dry_run,
1680		    "Parse options then exit"),
1681	OPT_END()
1682};
1683
1684struct option *record_options = __record_options;
1685
1686int cmd_record(int argc, const char **argv)
1687{
1688	int err;
1689	struct record *rec = &record;
1690	char errbuf[BUFSIZ];
1691
1692	setlocale(LC_ALL, "");
1693
1694#ifndef HAVE_LIBBPF_SUPPORT
1695# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1696	set_nobuild('\0', "clang-path", true);
1697	set_nobuild('\0', "clang-opt", true);
1698# undef set_nobuild
1699#endif
1700
1701#ifndef HAVE_BPF_PROLOGUE
1702# if !defined (HAVE_DWARF_SUPPORT)
1703#  define REASON  "NO_DWARF=1"
1704# elif !defined (HAVE_LIBBPF_SUPPORT)
1705#  define REASON  "NO_LIBBPF=1"
1706# else
1707#  define REASON  "this architecture doesn't support BPF prologue"
1708# endif
1709# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1710	set_nobuild('\0', "vmlinux", true);
1711# undef set_nobuild
1712# undef REASON
1713#endif
1714
1715	rec->evlist = perf_evlist__new();
1716	if (rec->evlist == NULL)
1717		return -ENOMEM;
1718
1719	err = perf_config(perf_record_config, rec);
1720	if (err)
1721		return err;
1722
1723	argc = parse_options(argc, argv, record_options, record_usage,
1724			    PARSE_OPT_STOP_AT_NON_OPTION);
1725	if (quiet)
1726		perf_quiet_option();
1727
1728	/* Make system wide (-a) the default target. */
1729	if (!argc && target__none(&rec->opts.target))
1730		rec->opts.target.system_wide = true;
1731
1732	if (nr_cgroups && !rec->opts.target.system_wide) {
1733		usage_with_options_msg(record_usage, record_options,
1734			"cgroup monitoring only available in system-wide mode");
1735
1736	}
1737	if (rec->opts.record_switch_events &&
1738	    !perf_can_record_switch_events()) {
1739		ui__error("kernel does not support recording context switch events\n");
1740		parse_options_usage(record_usage, record_options, "switch-events", 0);
1741		return -EINVAL;
1742	}
1743
1744	if (switch_output_setup(rec)) {
1745		parse_options_usage(record_usage, record_options, "switch-output", 0);
1746		return -EINVAL;
1747	}
1748
1749	if (rec->switch_output.time) {
1750		signal(SIGALRM, alarm_sig_handler);
1751		alarm(rec->switch_output.time);
 
1752	}
1753
 
 
 
 
 
1754	/*
1755	 * Allow aliases to facilitate the lookup of symbols for address
1756	 * filters. Refer to auxtrace_parse_filters().
1757	 */
1758	symbol_conf.allow_aliases = true;
1759
1760	symbol__init(NULL);
1761
1762	err = record__auxtrace_init(rec);
1763	if (err)
1764		goto out;
1765
1766	if (dry_run)
1767		goto out;
1768
1769	err = bpf__setup_stdout(rec->evlist);
1770	if (err) {
1771		bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1772		pr_err("ERROR: Setup BPF stdout failed: %s\n",
1773			 errbuf);
1774		goto out;
1775	}
1776
1777	err = -ENOMEM;
1778
1779	if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1780		pr_warning(
1781"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1782"check /proc/sys/kernel/kptr_restrict.\n\n"
1783"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1784"file is not found in the buildid cache or in the vmlinux path.\n\n"
1785"Samples in kernel modules won't be resolved at all.\n\n"
1786"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1787"even with a suitable vmlinux or kallsyms file.\n\n");
1788
1789	if (rec->no_buildid_cache || rec->no_buildid) {
1790		disable_buildid_cache();
1791	} else if (rec->switch_output.enabled) {
1792		/*
1793		 * In 'perf record --switch-output', disable buildid
1794		 * generation by default to reduce data file switching
1795		 * overhead. Still generate buildid if they are required
1796		 * explicitly using
1797		 *
1798		 *  perf record --switch-output --no-no-buildid \
1799		 *              --no-no-buildid-cache
1800		 *
1801		 * Following code equals to:
1802		 *
1803		 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1804		 *     (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1805		 *         disable_buildid_cache();
1806		 */
1807		bool disable = true;
1808
1809		if (rec->no_buildid_set && !rec->no_buildid)
1810			disable = false;
1811		if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1812			disable = false;
1813		if (disable) {
1814			rec->no_buildid = true;
1815			rec->no_buildid_cache = true;
1816			disable_buildid_cache();
1817		}
1818	}
1819
1820	if (record.opts.overwrite)
1821		record.opts.tail_synthesize = true;
1822
1823	if (rec->evlist->nr_entries == 0 &&
1824	    __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
1825		pr_err("Not enough memory for event selector list\n");
1826		goto out;
1827	}
1828
1829	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1830		rec->opts.no_inherit = true;
1831
1832	err = target__validate(&rec->opts.target);
1833	if (err) {
1834		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1835		ui__warning("%s\n", errbuf);
1836	}
1837
1838	err = target__parse_uid(&rec->opts.target);
1839	if (err) {
1840		int saved_errno = errno;
1841
1842		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1843		ui__error("%s", errbuf);
1844
1845		err = -saved_errno;
1846		goto out;
1847	}
1848
1849	/* Enable ignoring missing threads when -u/-p option is defined. */
1850	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
1851
1852	err = -ENOMEM;
1853	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1854		usage_with_options(record_usage, record_options);
1855
1856	err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1857	if (err)
1858		goto out;
1859
1860	/*
1861	 * We take all buildids when the file contains
1862	 * AUX area tracing data because we do not decode the
1863	 * trace because it would take too long.
1864	 */
1865	if (rec->opts.full_auxtrace)
1866		rec->buildid_all = true;
1867
1868	if (record_opts__config(&rec->opts)) {
1869		err = -EINVAL;
1870		goto out;
1871	}
1872
1873	err = __cmd_record(&record, argc, argv);
1874out:
1875	perf_evlist__delete(rec->evlist);
1876	symbol__exit();
1877	auxtrace_record__free(rec->itr);
1878	return err;
1879}
1880
1881static void snapshot_sig_handler(int sig __maybe_unused)
1882{
1883	struct record *rec = &record;
1884
1885	if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1886		trigger_hit(&auxtrace_snapshot_trigger);
1887		auxtrace_record__snapshot_started = 1;
1888		if (auxtrace_record__snapshot_start(record.itr))
1889			trigger_error(&auxtrace_snapshot_trigger);
1890	}
1891
1892	if (switch_output_signal(rec))
1893		trigger_hit(&switch_output_trigger);
1894}
1895
1896static void alarm_sig_handler(int sig __maybe_unused)
1897{
1898	struct record *rec = &record;
1899
1900	if (switch_output_time(rec))
1901		trigger_hit(&switch_output_trigger);
1902}
v4.10.11
 
   1/*
   2 * builtin-record.c
   3 *
   4 * Builtin record command: Record the profile of a workload
   5 * (or a CPU, or a PID) into the perf.data output file - for
   6 * later analysis via perf report.
   7 */
   8#include "builtin.h"
   9
  10#include "perf.h"
  11
  12#include "util/build-id.h"
  13#include "util/util.h"
  14#include <subcmd/parse-options.h>
  15#include "util/parse-events.h"
  16#include "util/config.h"
  17
  18#include "util/callchain.h"
  19#include "util/cgroup.h"
  20#include "util/header.h"
  21#include "util/event.h"
  22#include "util/evlist.h"
  23#include "util/evsel.h"
  24#include "util/debug.h"
  25#include "util/drv_configs.h"
  26#include "util/session.h"
  27#include "util/tool.h"
  28#include "util/symbol.h"
  29#include "util/cpumap.h"
  30#include "util/thread_map.h"
  31#include "util/data.h"
  32#include "util/perf_regs.h"
  33#include "util/auxtrace.h"
  34#include "util/tsc.h"
  35#include "util/parse-branch-options.h"
  36#include "util/parse-regs-options.h"
  37#include "util/llvm-utils.h"
  38#include "util/bpf-loader.h"
  39#include "util/trigger.h"
  40#include "util/perf-hooks.h"
 
 
  41#include "asm/bug.h"
  42
 
 
 
 
  43#include <unistd.h>
  44#include <sched.h>
 
  45#include <sys/mman.h>
  46#include <asm/bug.h>
  47#include <linux/time64.h>
  48
 
 
 
 
 
 
 
 
 
  49struct record {
  50	struct perf_tool	tool;
  51	struct record_opts	opts;
  52	u64			bytes_written;
  53	struct perf_data_file	file;
  54	struct auxtrace_record	*itr;
  55	struct perf_evlist	*evlist;
  56	struct perf_session	*session;
  57	const char		*progname;
  58	int			realtime_prio;
  59	bool			no_buildid;
  60	bool			no_buildid_set;
  61	bool			no_buildid_cache;
  62	bool			no_buildid_cache_set;
  63	bool			buildid_all;
  64	bool			timestamp_filename;
  65	bool			switch_output;
 
  66	unsigned long long	samples;
  67};
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69static int record__write(struct record *rec, void *bf, size_t size)
  70{
  71	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
  72		pr_err("failed to write perf data, error: %m\n");
  73		return -1;
  74	}
  75
  76	rec->bytes_written += size;
 
 
 
 
  77	return 0;
  78}
  79
  80static int process_synthesized_event(struct perf_tool *tool,
  81				     union perf_event *event,
  82				     struct perf_sample *sample __maybe_unused,
  83				     struct machine *machine __maybe_unused)
  84{
  85	struct record *rec = container_of(tool, struct record, tool);
  86	return record__write(rec, event, event->header.size);
  87}
  88
  89static int
  90backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
  91{
  92	struct perf_event_header *pheader;
  93	u64 evt_head = head;
  94	int size = mask + 1;
  95
  96	pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
  97	pheader = (struct perf_event_header *)(buf + (head & mask));
  98	*start = head;
  99	while (true) {
 100		if (evt_head - head >= (unsigned int)size) {
 101			pr_debug("Finished reading backward ring buffer: rewind\n");
 102			if (evt_head - head > (unsigned int)size)
 103				evt_head -= pheader->size;
 104			*end = evt_head;
 105			return 0;
 106		}
 107
 108		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
 109
 110		if (pheader->size == 0) {
 111			pr_debug("Finished reading backward ring buffer: get start\n");
 112			*end = evt_head;
 113			return 0;
 114		}
 115
 116		evt_head += pheader->size;
 117		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
 118	}
 119	WARN_ONCE(1, "Shouldn't get here\n");
 120	return -1;
 121}
 122
 123static int
 124rb_find_range(void *data, int mask, u64 head, u64 old,
 125	      u64 *start, u64 *end, bool backward)
 126{
 127	if (!backward) {
 128		*start = old;
 129		*end = head;
 130		return 0;
 131	}
 132
 133	return backward_rb_find_range(data, mask, head, start, end);
 134}
 135
 136static int
 137record__mmap_read(struct record *rec, struct perf_mmap *md,
 138		  bool overwrite, bool backward)
 139{
 140	u64 head = perf_mmap__read_head(md);
 141	u64 old = md->prev;
 142	u64 end = head, start = old;
 143	unsigned char *data = md->base + page_size;
 144	unsigned long size;
 145	void *buf;
 146	int rc = 0;
 147
 148	if (rb_find_range(data, md->mask, head,
 149			  old, &start, &end, backward))
 150		return -1;
 151
 152	if (start == end)
 153		return 0;
 154
 155	rec->samples++;
 156
 157	size = end - start;
 158	if (size > (unsigned long)(md->mask) + 1) {
 159		WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
 160
 161		md->prev = head;
 162		perf_mmap__consume(md, overwrite || backward);
 163		return 0;
 164	}
 165
 166	if ((start & md->mask) + size != (end & md->mask)) {
 167		buf = &data[start & md->mask];
 168		size = md->mask + 1 - (start & md->mask);
 169		start += size;
 170
 171		if (record__write(rec, buf, size) < 0) {
 172			rc = -1;
 173			goto out;
 174		}
 175	}
 176
 177	buf = &data[start & md->mask];
 178	size = end - start;
 179	start += size;
 180
 181	if (record__write(rec, buf, size) < 0) {
 182		rc = -1;
 183		goto out;
 184	}
 185
 186	md->prev = head;
 187	perf_mmap__consume(md, overwrite || backward);
 188out:
 189	return rc;
 190}
 191
 192static volatile int done;
 193static volatile int signr = -1;
 194static volatile int child_finished;
 195
 196static volatile int auxtrace_record__snapshot_started;
 197static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
 198static DEFINE_TRIGGER(switch_output_trigger);
 199
 200static void sig_handler(int sig)
 201{
 202	if (sig == SIGCHLD)
 203		child_finished = 1;
 204	else
 205		signr = sig;
 206
 207	done = 1;
 208}
 209
 210static void sigsegv_handler(int sig)
 211{
 212	perf_hooks__recover();
 213	sighandler_dump_stack(sig);
 214}
 215
 216static void record__sig_exit(void)
 217{
 218	if (signr == -1)
 219		return;
 220
 221	signal(signr, SIG_DFL);
 222	raise(signr);
 223}
 224
 225#ifdef HAVE_AUXTRACE_SUPPORT
 226
 227static int record__process_auxtrace(struct perf_tool *tool,
 228				    union perf_event *event, void *data1,
 229				    size_t len1, void *data2, size_t len2)
 230{
 231	struct record *rec = container_of(tool, struct record, tool);
 232	struct perf_data_file *file = &rec->file;
 233	size_t padding;
 234	u8 pad[8] = {0};
 235
 236	if (!perf_data_file__is_pipe(file)) {
 237		off_t file_offset;
 238		int fd = perf_data_file__fd(file);
 239		int err;
 240
 241		file_offset = lseek(fd, 0, SEEK_CUR);
 242		if (file_offset == -1)
 243			return -1;
 244		err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
 245						     event, file_offset);
 246		if (err)
 247			return err;
 248	}
 249
 250	/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
 251	padding = (len1 + len2) & 7;
 252	if (padding)
 253		padding = 8 - padding;
 254
 255	record__write(rec, event, event->header.size);
 256	record__write(rec, data1, len1);
 257	if (len2)
 258		record__write(rec, data2, len2);
 259	record__write(rec, &pad, padding);
 260
 261	return 0;
 262}
 263
 264static int record__auxtrace_mmap_read(struct record *rec,
 265				      struct auxtrace_mmap *mm)
 266{
 267	int ret;
 268
 269	ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
 270				  record__process_auxtrace);
 271	if (ret < 0)
 272		return ret;
 273
 274	if (ret)
 275		rec->samples++;
 276
 277	return 0;
 278}
 279
 280static int record__auxtrace_mmap_read_snapshot(struct record *rec,
 281					       struct auxtrace_mmap *mm)
 282{
 283	int ret;
 284
 285	ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
 286					   record__process_auxtrace,
 287					   rec->opts.auxtrace_snapshot_size);
 288	if (ret < 0)
 289		return ret;
 290
 291	if (ret)
 292		rec->samples++;
 293
 294	return 0;
 295}
 296
 297static int record__auxtrace_read_snapshot_all(struct record *rec)
 298{
 299	int i;
 300	int rc = 0;
 301
 302	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
 303		struct auxtrace_mmap *mm =
 304				&rec->evlist->mmap[i].auxtrace_mmap;
 305
 306		if (!mm->base)
 307			continue;
 308
 309		if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
 310			rc = -1;
 311			goto out;
 312		}
 313	}
 314out:
 315	return rc;
 316}
 317
 318static void record__read_auxtrace_snapshot(struct record *rec)
 319{
 320	pr_debug("Recording AUX area tracing snapshot\n");
 321	if (record__auxtrace_read_snapshot_all(rec) < 0) {
 322		trigger_error(&auxtrace_snapshot_trigger);
 323	} else {
 324		if (auxtrace_record__snapshot_finish(rec->itr))
 325			trigger_error(&auxtrace_snapshot_trigger);
 326		else
 327			trigger_ready(&auxtrace_snapshot_trigger);
 328	}
 329}
 330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 331#else
 332
 333static inline
 334int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
 335			       struct auxtrace_mmap *mm __maybe_unused)
 336{
 337	return 0;
 338}
 339
 340static inline
 341void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
 342{
 343}
 344
 345static inline
 346int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 347{
 348	return 0;
 349}
 350
 
 
 
 
 
 351#endif
 352
 353static int record__mmap_evlist(struct record *rec,
 354			       struct perf_evlist *evlist)
 355{
 356	struct record_opts *opts = &rec->opts;
 357	char msg[512];
 358
 359	if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
 360				 opts->auxtrace_mmap_pages,
 361				 opts->auxtrace_snapshot_mode) < 0) {
 362		if (errno == EPERM) {
 363			pr_err("Permission error mapping pages.\n"
 364			       "Consider increasing "
 365			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
 366			       "or try again with a smaller value of -m/--mmap_pages.\n"
 367			       "(current value: %u,%u)\n",
 368			       opts->mmap_pages, opts->auxtrace_mmap_pages);
 369			return -errno;
 370		} else {
 371			pr_err("failed to mmap with %d (%s)\n", errno,
 372				str_error_r(errno, msg, sizeof(msg)));
 373			if (errno)
 374				return -errno;
 375			else
 376				return -EINVAL;
 377		}
 378	}
 379	return 0;
 380}
 381
 382static int record__mmap(struct record *rec)
 383{
 384	return record__mmap_evlist(rec, rec->evlist);
 385}
 386
 387static int record__open(struct record *rec)
 388{
 389	char msg[512];
 390	struct perf_evsel *pos;
 391	struct perf_evlist *evlist = rec->evlist;
 392	struct perf_session *session = rec->session;
 393	struct record_opts *opts = &rec->opts;
 394	struct perf_evsel_config_term *err_term;
 395	int rc = 0;
 396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397	perf_evlist__config(evlist, opts, &callchain_param);
 398
 399	evlist__for_each_entry(evlist, pos) {
 400try_again:
 401		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 402			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 403				if (verbose)
 404					ui__warning("%s\n", msg);
 405				goto try_again;
 406			}
 407
 408			rc = -errno;
 409			perf_evsel__open_strerror(pos, &opts->target,
 410						  errno, msg, sizeof(msg));
 411			ui__error("%s\n", msg);
 412			goto out;
 413		}
 
 
 414	}
 415
 416	if (perf_evlist__apply_filters(evlist, &pos)) {
 417		error("failed to set filter \"%s\" on event %s with %d (%s)\n",
 418			pos->filter, perf_evsel__name(pos), errno,
 419			str_error_r(errno, msg, sizeof(msg)));
 420		rc = -1;
 421		goto out;
 422	}
 423
 424	if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
 425		error("failed to set config \"%s\" on event %s with %d (%s)\n",
 426		      err_term->val.drv_cfg, perf_evsel__name(pos), errno,
 427		      str_error_r(errno, msg, sizeof(msg)));
 428		rc = -1;
 429		goto out;
 430	}
 431
 432	rc = record__mmap(rec);
 433	if (rc)
 434		goto out;
 435
 436	session->evlist = evlist;
 437	perf_session__set_id_hdr_size(session);
 438out:
 439	return rc;
 440}
 441
 442static int process_sample_event(struct perf_tool *tool,
 443				union perf_event *event,
 444				struct perf_sample *sample,
 445				struct perf_evsel *evsel,
 446				struct machine *machine)
 447{
 448	struct record *rec = container_of(tool, struct record, tool);
 449
 
 
 
 
 
 
 
 
 450	rec->samples++;
 451
 452	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 453}
 454
 455static int process_buildids(struct record *rec)
 456{
 457	struct perf_data_file *file  = &rec->file;
 458	struct perf_session *session = rec->session;
 459
 460	if (file->size == 0)
 461		return 0;
 462
 463	/*
 464	 * During this process, it'll load kernel map and replace the
 465	 * dso->long_name to a real pathname it found.  In this case
 466	 * we prefer the vmlinux path like
 467	 *   /lib/modules/3.16.4/build/vmlinux
 468	 *
 469	 * rather than build-id path (in debug directory).
 470	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
 471	 */
 472	symbol_conf.ignore_vmlinux_buildid = true;
 473
 474	/*
 475	 * If --buildid-all is given, it marks all DSO regardless of hits,
 476	 * so no need to process samples.
 
 
 477	 */
 478	if (rec->buildid_all)
 479		rec->tool.sample = NULL;
 480
 481	return perf_session__process_events(session);
 482}
 483
 484static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 485{
 486	int err;
 487	struct perf_tool *tool = data;
 488	/*
 489	 *As for guest kernel when processing subcommand record&report,
 490	 *we arrange module mmap prior to guest kernel mmap and trigger
 491	 *a preload dso because default guest module symbols are loaded
 492	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
 493	 *method is used to avoid symbol missing when the first addr is
 494	 *in module instead of in guest kernel.
 495	 */
 496	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 497					     machine);
 498	if (err < 0)
 499		pr_err("Couldn't record guest kernel [%d]'s reference"
 500		       " relocation symbol.\n", machine->pid);
 501
 502	/*
 503	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
 504	 * have no _text sometimes.
 505	 */
 506	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 507						 machine);
 508	if (err < 0)
 509		pr_err("Couldn't record guest kernel [%d]'s reference"
 510		       " relocation symbol.\n", machine->pid);
 511}
 512
 513static struct perf_event_header finished_round_event = {
 514	.size = sizeof(struct perf_event_header),
 515	.type = PERF_RECORD_FINISHED_ROUND,
 516};
 517
 518static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
 519				    bool backward)
 520{
 521	u64 bytes_written = rec->bytes_written;
 522	int i;
 523	int rc = 0;
 524	struct perf_mmap *maps;
 525
 526	if (!evlist)
 527		return 0;
 528
 529	maps = backward ? evlist->backward_mmap : evlist->mmap;
 530	if (!maps)
 531		return 0;
 532
 533	if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
 534		return 0;
 535
 536	for (i = 0; i < evlist->nr_mmaps; i++) {
 537		struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
 538
 539		if (maps[i].base) {
 540			if (record__mmap_read(rec, &maps[i],
 541					      evlist->overwrite, backward) != 0) {
 542				rc = -1;
 543				goto out;
 544			}
 545		}
 546
 547		if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
 548		    record__auxtrace_mmap_read(rec, mm) != 0) {
 549			rc = -1;
 550			goto out;
 551		}
 552	}
 553
 554	/*
 555	 * Mark the round finished in case we wrote
 556	 * at least one event.
 557	 */
 558	if (bytes_written != rec->bytes_written)
 559		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 560
 561	if (backward)
 562		perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
 563out:
 564	return rc;
 565}
 566
 567static int record__mmap_read_all(struct record *rec)
 568{
 569	int err;
 570
 571	err = record__mmap_read_evlist(rec, rec->evlist, false);
 572	if (err)
 573		return err;
 574
 575	return record__mmap_read_evlist(rec, rec->evlist, true);
 576}
 577
 578static void record__init_features(struct record *rec)
 579{
 580	struct perf_session *session = rec->session;
 581	int feat;
 582
 583	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
 584		perf_header__set_feat(&session->header, feat);
 585
 586	if (rec->no_buildid)
 587		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
 588
 589	if (!have_tracepoints(&rec->evlist->entries))
 590		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
 591
 592	if (!rec->opts.branch_stack)
 593		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
 594
 595	if (!rec->opts.full_auxtrace)
 596		perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
 597
 598	perf_header__clear_feat(&session->header, HEADER_STAT);
 599}
 600
 601static void
 602record__finish_output(struct record *rec)
 603{
 604	struct perf_data_file *file = &rec->file;
 605	int fd = perf_data_file__fd(file);
 606
 607	if (file->is_pipe)
 608		return;
 609
 610	rec->session->header.data_size += rec->bytes_written;
 611	file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
 612
 613	if (!rec->no_buildid) {
 614		process_buildids(rec);
 615
 616		if (rec->buildid_all)
 617			dsos__hit_all(rec->session);
 618	}
 619	perf_session__write_header(rec->session, rec->evlist, fd, true);
 620
 621	return;
 622}
 623
 624static int record__synthesize_workload(struct record *rec, bool tail)
 625{
 626	struct {
 627		struct thread_map map;
 628		struct thread_map_data map_data;
 629	} thread_map;
 630
 631	if (rec->opts.tail_synthesize != tail)
 632		return 0;
 633
 634	thread_map.map.nr = 1;
 635	thread_map.map.map[0].pid = rec->evlist->workload.pid;
 636	thread_map.map.map[0].comm = NULL;
 637	return perf_event__synthesize_thread_map(&rec->tool, &thread_map.map,
 
 638						 process_synthesized_event,
 639						 &rec->session->machines.host,
 640						 rec->opts.sample_address,
 641						 rec->opts.proc_map_timeout);
 
 
 642}
 643
 644static int record__synthesize(struct record *rec, bool tail);
 645
 646static int
 647record__switch_output(struct record *rec, bool at_exit)
 648{
 649	struct perf_data_file *file = &rec->file;
 650	int fd, err;
 651
 652	/* Same Size:      "2015122520103046"*/
 653	char timestamp[] = "InvalidTimestamp";
 654
 655	record__synthesize(rec, true);
 656	if (target__none(&rec->opts.target))
 657		record__synthesize_workload(rec, true);
 658
 659	rec->samples = 0;
 660	record__finish_output(rec);
 661	err = fetch_current_timestamp(timestamp, sizeof(timestamp));
 662	if (err) {
 663		pr_err("Failed to get current timestamp\n");
 664		return -EINVAL;
 665	}
 666
 667	fd = perf_data_file__switch(file, timestamp,
 668				    rec->session->header.data_offset,
 669				    at_exit);
 670	if (fd >= 0 && !at_exit) {
 671		rec->bytes_written = 0;
 672		rec->session->header.data_size = 0;
 673	}
 674
 675	if (!quiet)
 676		fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
 677			file->path, timestamp);
 678
 679	/* Output tracking events */
 680	if (!at_exit) {
 681		record__synthesize(rec, false);
 682
 683		/*
 684		 * In 'perf record --switch-output' without -a,
 685		 * record__synthesize() in record__switch_output() won't
 686		 * generate tracking events because there's no thread_map
 687		 * in evlist. Which causes newly created perf.data doesn't
 688		 * contain map and comm information.
 689		 * Create a fake thread_map and directly call
 690		 * perf_event__synthesize_thread_map() for those events.
 691		 */
 692		if (target__none(&rec->opts.target))
 693			record__synthesize_workload(rec, false);
 694	}
 695	return fd;
 696}
 697
 698static volatile int workload_exec_errno;
 699
 700/*
 701 * perf_evlist__prepare_workload will send a SIGUSR1
 702 * if the fork fails, since we asked by setting its
 703 * want_signal to true.
 704 */
 705static void workload_exec_failed_signal(int signo __maybe_unused,
 706					siginfo_t *info,
 707					void *ucontext __maybe_unused)
 708{
 709	workload_exec_errno = info->si_value.sival_int;
 710	done = 1;
 711	child_finished = 1;
 712}
 713
 714static void snapshot_sig_handler(int sig);
 
 715
 716int __weak
 717perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
 718			    struct perf_tool *tool __maybe_unused,
 719			    perf_event__handler_t process __maybe_unused,
 720			    struct machine *machine __maybe_unused)
 721{
 722	return 0;
 723}
 724
 725static const struct perf_event_mmap_page *
 726perf_evlist__pick_pc(struct perf_evlist *evlist)
 727{
 728	if (evlist) {
 729		if (evlist->mmap && evlist->mmap[0].base)
 730			return evlist->mmap[0].base;
 731		if (evlist->backward_mmap && evlist->backward_mmap[0].base)
 732			return evlist->backward_mmap[0].base;
 733	}
 734	return NULL;
 735}
 736
 737static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
 738{
 739	const struct perf_event_mmap_page *pc;
 740
 741	pc = perf_evlist__pick_pc(rec->evlist);
 742	if (pc)
 743		return pc;
 744	return NULL;
 745}
 746
 747static int record__synthesize(struct record *rec, bool tail)
 748{
 749	struct perf_session *session = rec->session;
 750	struct machine *machine = &session->machines.host;
 751	struct perf_data_file *file = &rec->file;
 752	struct record_opts *opts = &rec->opts;
 753	struct perf_tool *tool = &rec->tool;
 754	int fd = perf_data_file__fd(file);
 755	int err = 0;
 756
 757	if (rec->opts.tail_synthesize != tail)
 758		return 0;
 759
 760	if (file->is_pipe) {
 
 
 
 
 761		err = perf_event__synthesize_attrs(tool, session,
 762						   process_synthesized_event);
 763		if (err < 0) {
 764			pr_err("Couldn't synthesize attrs.\n");
 765			goto out;
 766		}
 767
 
 
 
 
 
 
 
 768		if (have_tracepoints(&rec->evlist->entries)) {
 769			/*
 770			 * FIXME err <= 0 here actually means that
 771			 * there were no tracepoints so its not really
 772			 * an error, just that we don't need to
 773			 * synthesize anything.  We really have to
 774			 * return this more properly and also
 775			 * propagate errors that now are calling die()
 776			 */
 777			err = perf_event__synthesize_tracing_data(tool,	fd, rec->evlist,
 778								  process_synthesized_event);
 779			if (err <= 0) {
 780				pr_err("Couldn't record tracing data.\n");
 781				goto out;
 782			}
 783			rec->bytes_written += err;
 784		}
 785	}
 786
 787	err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
 788					  process_synthesized_event, machine);
 789	if (err)
 790		goto out;
 791
 792	if (rec->opts.full_auxtrace) {
 793		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
 794					session, process_synthesized_event);
 795		if (err)
 796			goto out;
 797	}
 798
 799	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 800						 machine);
 801	WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
 802			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 803			   "Check /proc/kallsyms permission or run as root.\n");
 804
 805	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 806					     machine);
 807	WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
 808			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 809			   "Check /proc/modules permission or run as root.\n");
 
 
 810
 811	if (perf_guest) {
 812		machines__process_guests(&session->machines,
 813					 perf_event__synthesize_guest_os, tool);
 814	}
 815
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
 817					    process_synthesized_event, opts->sample_address,
 818					    opts->proc_map_timeout);
 819out:
 820	return err;
 821}
 822
 823static int __cmd_record(struct record *rec, int argc, const char **argv)
 824{
 825	int err;
 826	int status = 0;
 827	unsigned long waking = 0;
 828	const bool forks = argc > 0;
 829	struct machine *machine;
 830	struct perf_tool *tool = &rec->tool;
 831	struct record_opts *opts = &rec->opts;
 832	struct perf_data_file *file = &rec->file;
 833	struct perf_session *session;
 834	bool disabled = false, draining = false;
 835	int fd;
 836
 837	rec->progname = argv[0];
 838
 839	atexit(record__sig_exit);
 840	signal(SIGCHLD, sig_handler);
 841	signal(SIGINT, sig_handler);
 842	signal(SIGTERM, sig_handler);
 843	signal(SIGSEGV, sigsegv_handler);
 844
 845	if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
 
 
 
 846		signal(SIGUSR2, snapshot_sig_handler);
 847		if (rec->opts.auxtrace_snapshot_mode)
 848			trigger_on(&auxtrace_snapshot_trigger);
 849		if (rec->switch_output)
 850			trigger_on(&switch_output_trigger);
 851	} else {
 852		signal(SIGUSR2, SIG_IGN);
 853	}
 854
 855	session = perf_session__new(file, false, tool);
 856	if (session == NULL) {
 857		pr_err("Perf session creation failed.\n");
 858		return -1;
 859	}
 860
 861	fd = perf_data_file__fd(file);
 862	rec->session = session;
 863
 864	record__init_features(rec);
 865
 866	if (forks) {
 867		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
 868						    argv, file->is_pipe,
 869						    workload_exec_failed_signal);
 870		if (err < 0) {
 871			pr_err("Couldn't run the workload!\n");
 872			status = err;
 873			goto out_delete_session;
 874		}
 875	}
 876
 
 
 
 
 
 
 
 
 
 877	if (record__open(rec) != 0) {
 878		err = -1;
 879		goto out_child;
 880	}
 881
 882	err = bpf__apply_obj_config();
 883	if (err) {
 884		char errbuf[BUFSIZ];
 885
 886		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
 887		pr_err("ERROR: Apply config to BPF failed: %s\n",
 888			 errbuf);
 889		goto out_child;
 890	}
 891
 892	/*
 893	 * Normally perf_session__new would do this, but it doesn't have the
 894	 * evlist.
 895	 */
 896	if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
 897		pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
 898		rec->tool.ordered_events = false;
 899	}
 900
 901	if (!rec->evlist->nr_groups)
 902		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 903
 904	if (file->is_pipe) {
 905		err = perf_header__write_pipe(fd);
 906		if (err < 0)
 907			goto out_child;
 908	} else {
 909		err = perf_session__write_header(session, rec->evlist, fd, false);
 910		if (err < 0)
 911			goto out_child;
 912	}
 913
 914	if (!rec->no_buildid
 915	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
 916		pr_err("Couldn't generate buildids. "
 917		       "Use --no-buildid to profile anyway.\n");
 918		err = -1;
 919		goto out_child;
 920	}
 921
 922	machine = &session->machines.host;
 923
 924	err = record__synthesize(rec, false);
 925	if (err < 0)
 926		goto out_child;
 927
 928	if (rec->realtime_prio) {
 929		struct sched_param param;
 930
 931		param.sched_priority = rec->realtime_prio;
 932		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
 933			pr_err("Could not set realtime priority.\n");
 934			err = -1;
 935			goto out_child;
 936		}
 937	}
 938
 939	/*
 940	 * When perf is starting the traced process, all the events
 941	 * (apart from group members) have enable_on_exec=1 set,
 942	 * so don't spoil it by prematurely enabling them.
 943	 */
 944	if (!target__none(&opts->target) && !opts->initial_delay)
 945		perf_evlist__enable(rec->evlist);
 946
 947	/*
 948	 * Let the child rip
 949	 */
 950	if (forks) {
 
 951		union perf_event *event;
 
 952
 953		event = malloc(sizeof(event->comm) + machine->id_hdr_size);
 954		if (event == NULL) {
 955			err = -ENOMEM;
 956			goto out_child;
 957		}
 958
 959		/*
 960		 * Some H/W events are generated before COMM event
 961		 * which is emitted during exec(), so perf script
 962		 * cannot see a correct process name for those events.
 963		 * Synthesize COMM event to prevent it.
 964		 */
 965		perf_event__synthesize_comm(tool, event,
 966					    rec->evlist->workload.pid,
 967					    process_synthesized_event,
 968					    machine);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969		free(event);
 970
 971		perf_evlist__start_workload(rec->evlist);
 972	}
 973
 974	if (opts->initial_delay) {
 975		usleep(opts->initial_delay * USEC_PER_MSEC);
 976		perf_evlist__enable(rec->evlist);
 977	}
 978
 979	trigger_ready(&auxtrace_snapshot_trigger);
 980	trigger_ready(&switch_output_trigger);
 981	perf_hooks__invoke_record_start();
 982	for (;;) {
 983		unsigned long long hits = rec->samples;
 984
 985		/*
 986		 * rec->evlist->bkw_mmap_state is possible to be
 987		 * BKW_MMAP_EMPTY here: when done == true and
 988		 * hits != rec->samples in previous round.
 989		 *
 990		 * perf_evlist__toggle_bkw_mmap ensure we never
 991		 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
 992		 */
 993		if (trigger_is_hit(&switch_output_trigger) || done || draining)
 994			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
 995
 996		if (record__mmap_read_all(rec) < 0) {
 997			trigger_error(&auxtrace_snapshot_trigger);
 998			trigger_error(&switch_output_trigger);
 999			err = -1;
1000			goto out_child;
1001		}
1002
1003		if (auxtrace_record__snapshot_started) {
1004			auxtrace_record__snapshot_started = 0;
1005			if (!trigger_is_error(&auxtrace_snapshot_trigger))
1006				record__read_auxtrace_snapshot(rec);
1007			if (trigger_is_error(&auxtrace_snapshot_trigger)) {
1008				pr_err("AUX area tracing snapshot failed\n");
1009				err = -1;
1010				goto out_child;
1011			}
1012		}
1013
1014		if (trigger_is_hit(&switch_output_trigger)) {
1015			/*
1016			 * If switch_output_trigger is hit, the data in
1017			 * overwritable ring buffer should have been collected,
1018			 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1019			 *
1020			 * If SIGUSR2 raise after or during record__mmap_read_all(),
1021			 * record__mmap_read_all() didn't collect data from
1022			 * overwritable ring buffer. Read again.
1023			 */
1024			if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1025				continue;
1026			trigger_ready(&switch_output_trigger);
1027
1028			/*
1029			 * Reenable events in overwrite ring buffer after
1030			 * record__mmap_read_all(): we should have collected
1031			 * data from it.
1032			 */
1033			perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1034
1035			if (!quiet)
1036				fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1037					waking);
1038			waking = 0;
1039			fd = record__switch_output(rec, false);
1040			if (fd < 0) {
1041				pr_err("Failed to switch to new file\n");
1042				trigger_error(&switch_output_trigger);
1043				err = fd;
1044				goto out_child;
1045			}
 
 
 
 
1046		}
1047
1048		if (hits == rec->samples) {
1049			if (done || draining)
1050				break;
1051			err = perf_evlist__poll(rec->evlist, -1);
1052			/*
1053			 * Propagate error, only if there's any. Ignore positive
1054			 * number of returned events and interrupt error.
1055			 */
1056			if (err > 0 || (err < 0 && errno == EINTR))
1057				err = 0;
1058			waking++;
1059
1060			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1061				draining = true;
1062		}
1063
1064		/*
1065		 * When perf is starting the traced process, at the end events
1066		 * die with the process and we wait for that. Thus no need to
1067		 * disable events in this case.
1068		 */
1069		if (done && !disabled && !target__none(&opts->target)) {
1070			trigger_off(&auxtrace_snapshot_trigger);
1071			perf_evlist__disable(rec->evlist);
1072			disabled = true;
1073		}
1074	}
1075	trigger_off(&auxtrace_snapshot_trigger);
1076	trigger_off(&switch_output_trigger);
1077
1078	if (forks && workload_exec_errno) {
1079		char msg[STRERR_BUFSIZE];
1080		const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
1081		pr_err("Workload failed: %s\n", emsg);
1082		err = -1;
1083		goto out_child;
1084	}
1085
1086	if (!quiet)
1087		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
1088
1089	if (target__none(&rec->opts.target))
1090		record__synthesize_workload(rec, true);
1091
1092out_child:
1093	if (forks) {
1094		int exit_status;
1095
1096		if (!child_finished)
1097			kill(rec->evlist->workload.pid, SIGTERM);
1098
1099		wait(&exit_status);
1100
1101		if (err < 0)
1102			status = err;
1103		else if (WIFEXITED(exit_status))
1104			status = WEXITSTATUS(exit_status);
1105		else if (WIFSIGNALED(exit_status))
1106			signr = WTERMSIG(exit_status);
1107	} else
1108		status = err;
1109
1110	record__synthesize(rec, true);
1111	/* this will be recalculated during process_buildids() */
1112	rec->samples = 0;
1113
1114	if (!err) {
1115		if (!rec->timestamp_filename) {
1116			record__finish_output(rec);
1117		} else {
1118			fd = record__switch_output(rec, true);
1119			if (fd < 0) {
1120				status = fd;
1121				goto out_delete_session;
1122			}
1123		}
1124	}
1125
1126	perf_hooks__invoke_record_end();
1127
1128	if (!err && !quiet) {
1129		char samples[128];
1130		const char *postfix = rec->timestamp_filename ?
1131					".<timestamp>" : "";
1132
1133		if (rec->samples && !rec->opts.full_auxtrace)
1134			scnprintf(samples, sizeof(samples),
1135				  " (%" PRIu64 " samples)", rec->samples);
1136		else
1137			samples[0] = '\0';
1138
1139		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
1140			perf_data_file__size(file) / 1024.0 / 1024.0,
1141			file->path, postfix, samples);
1142	}
1143
1144out_delete_session:
1145	perf_session__delete(session);
1146	return status;
1147}
1148
1149static void callchain_debug(struct callchain_param *callchain)
1150{
1151	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
1152
1153	pr_debug("callchain: type %s\n", str[callchain->record_mode]);
1154
1155	if (callchain->record_mode == CALLCHAIN_DWARF)
1156		pr_debug("callchain: stack dump size %d\n",
1157			 callchain->dump_size);
1158}
1159
1160int record_opts__parse_callchain(struct record_opts *record,
1161				 struct callchain_param *callchain,
1162				 const char *arg, bool unset)
1163{
1164	int ret;
1165	callchain->enabled = !unset;
1166
1167	/* --no-call-graph */
1168	if (unset) {
1169		callchain->record_mode = CALLCHAIN_NONE;
1170		pr_debug("callchain: disabled\n");
1171		return 0;
1172	}
1173
1174	ret = parse_callchain_record_opt(arg, callchain);
1175	if (!ret) {
1176		/* Enable data address sampling for DWARF unwind. */
1177		if (callchain->record_mode == CALLCHAIN_DWARF)
1178			record->sample_address = true;
1179		callchain_debug(callchain);
1180	}
1181
1182	return ret;
1183}
1184
1185int record_parse_callchain_opt(const struct option *opt,
1186			       const char *arg,
1187			       int unset)
1188{
1189	return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1190}
1191
1192int record_callchain_opt(const struct option *opt,
1193			 const char *arg __maybe_unused,
1194			 int unset __maybe_unused)
1195{
1196	struct callchain_param *callchain = opt->value;
1197
1198	callchain->enabled = true;
1199
1200	if (callchain->record_mode == CALLCHAIN_NONE)
1201		callchain->record_mode = CALLCHAIN_FP;
1202
1203	callchain_debug(callchain);
1204	return 0;
1205}
1206
1207static int perf_record_config(const char *var, const char *value, void *cb)
1208{
1209	struct record *rec = cb;
1210
1211	if (!strcmp(var, "record.build-id")) {
1212		if (!strcmp(value, "cache"))
1213			rec->no_buildid_cache = false;
1214		else if (!strcmp(value, "no-cache"))
1215			rec->no_buildid_cache = true;
1216		else if (!strcmp(value, "skip"))
1217			rec->no_buildid = true;
1218		else
1219			return -1;
1220		return 0;
1221	}
1222	if (!strcmp(var, "record.call-graph"))
1223		var = "call-graph.record-mode"; /* fall-through */
 
 
1224
1225	return perf_default_config(var, value, cb);
1226}
1227
1228struct clockid_map {
1229	const char *name;
1230	int clockid;
1231};
1232
1233#define CLOCKID_MAP(n, c)	\
1234	{ .name = n, .clockid = (c), }
1235
1236#define CLOCKID_END	{ .name = NULL, }
1237
1238
1239/*
1240 * Add the missing ones, we need to build on many distros...
1241 */
1242#ifndef CLOCK_MONOTONIC_RAW
1243#define CLOCK_MONOTONIC_RAW 4
1244#endif
1245#ifndef CLOCK_BOOTTIME
1246#define CLOCK_BOOTTIME 7
1247#endif
1248#ifndef CLOCK_TAI
1249#define CLOCK_TAI 11
1250#endif
1251
1252static const struct clockid_map clockids[] = {
1253	/* available for all events, NMI safe */
1254	CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1255	CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1256
1257	/* available for some events */
1258	CLOCKID_MAP("realtime", CLOCK_REALTIME),
1259	CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1260	CLOCKID_MAP("tai", CLOCK_TAI),
1261
1262	/* available for the lazy */
1263	CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1264	CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1265	CLOCKID_MAP("real", CLOCK_REALTIME),
1266	CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1267
1268	CLOCKID_END,
1269};
1270
1271static int parse_clockid(const struct option *opt, const char *str, int unset)
1272{
1273	struct record_opts *opts = (struct record_opts *)opt->value;
1274	const struct clockid_map *cm;
1275	const char *ostr = str;
1276
1277	if (unset) {
1278		opts->use_clockid = 0;
1279		return 0;
1280	}
1281
1282	/* no arg passed */
1283	if (!str)
1284		return 0;
1285
1286	/* no setting it twice */
1287	if (opts->use_clockid)
1288		return -1;
1289
1290	opts->use_clockid = true;
1291
1292	/* if its a number, we're done */
1293	if (sscanf(str, "%d", &opts->clockid) == 1)
1294		return 0;
1295
1296	/* allow a "CLOCK_" prefix to the name */
1297	if (!strncasecmp(str, "CLOCK_", 6))
1298		str += 6;
1299
1300	for (cm = clockids; cm->name; cm++) {
1301		if (!strcasecmp(str, cm->name)) {
1302			opts->clockid = cm->clockid;
1303			return 0;
1304		}
1305	}
1306
1307	opts->use_clockid = false;
1308	ui__warning("unknown clockid %s, check man page\n", ostr);
1309	return -1;
1310}
1311
1312static int record__parse_mmap_pages(const struct option *opt,
1313				    const char *str,
1314				    int unset __maybe_unused)
1315{
1316	struct record_opts *opts = opt->value;
1317	char *s, *p;
1318	unsigned int mmap_pages;
1319	int ret;
1320
1321	if (!str)
1322		return -EINVAL;
1323
1324	s = strdup(str);
1325	if (!s)
1326		return -ENOMEM;
1327
1328	p = strchr(s, ',');
1329	if (p)
1330		*p = '\0';
1331
1332	if (*s) {
1333		ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1334		if (ret)
1335			goto out_free;
1336		opts->mmap_pages = mmap_pages;
1337	}
1338
1339	if (!p) {
1340		ret = 0;
1341		goto out_free;
1342	}
1343
1344	ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1345	if (ret)
1346		goto out_free;
1347
1348	opts->auxtrace_mmap_pages = mmap_pages;
1349
1350out_free:
1351	free(s);
1352	return ret;
1353}
1354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355static const char * const __record_usage[] = {
1356	"perf record [<options>] [<command>]",
1357	"perf record [<options>] -- <command> [<options>]",
1358	NULL
1359};
1360const char * const *record_usage = __record_usage;
1361
1362/*
1363 * XXX Ideally would be local to cmd_record() and passed to a record__new
1364 * because we need to have access to it in record__exit, that is called
1365 * after cmd_record() exits, but since record_options need to be accessible to
1366 * builtin-script, leave it here.
1367 *
1368 * At least we don't ouch it in all the other functions here directly.
1369 *
1370 * Just say no to tons of global variables, sigh.
1371 */
1372static struct record record = {
1373	.opts = {
1374		.sample_time	     = true,
1375		.mmap_pages	     = UINT_MAX,
1376		.user_freq	     = UINT_MAX,
1377		.user_interval	     = ULLONG_MAX,
1378		.freq		     = 4000,
1379		.target		     = {
1380			.uses_mmap   = true,
1381			.default_per_cpu = true,
1382		},
1383		.proc_map_timeout     = 500,
1384	},
1385	.tool = {
1386		.sample		= process_sample_event,
1387		.fork		= perf_event__process_fork,
1388		.exit		= perf_event__process_exit,
1389		.comm		= perf_event__process_comm,
 
1390		.mmap		= perf_event__process_mmap,
1391		.mmap2		= perf_event__process_mmap2,
1392		.ordered_events	= true,
1393	},
1394};
1395
1396const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1397	"\n\t\t\t\tDefault: fp";
1398
1399static bool dry_run;
1400
1401/*
1402 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1403 * with it and switch to use the library functions in perf_evlist that came
1404 * from builtin-record.c, i.e. use record_opts,
1405 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1406 * using pipes, etc.
1407 */
1408static struct option __record_options[] = {
1409	OPT_CALLBACK('e', "event", &record.evlist, "event",
1410		     "event selector. use 'perf list' to list available events",
1411		     parse_events_option),
1412	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1413		     "event filter", parse_filter),
1414	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1415			   NULL, "don't record events from perf itself",
1416			   exclude_perf),
1417	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1418		    "record events on existing process id"),
1419	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1420		    "record events on existing thread id"),
1421	OPT_INTEGER('r', "realtime", &record.realtime_prio,
1422		    "collect data with this RT SCHED_FIFO priority"),
1423	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1424		    "collect data without buffering"),
1425	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1426		    "collect raw sample records from all opened counters"),
1427	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1428			    "system-wide collection from all CPUs"),
1429	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1430		    "list of cpus to monitor"),
1431	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1432	OPT_STRING('o', "output", &record.file.path, "file",
1433		    "output file name"),
1434	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1435			&record.opts.no_inherit_set,
1436			"child tasks do not inherit counters"),
1437	OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1438		    "synthesize non-sample events at the end of output"),
1439	OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
1440	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
 
 
 
 
1441	OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1442		     "number of mmap data pages and AUX area tracing mmap pages",
1443		     record__parse_mmap_pages),
1444	OPT_BOOLEAN(0, "group", &record.opts.group,
1445		    "put the counters into a counter group"),
1446	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1447			   NULL, "enables call-graph recording" ,
1448			   &record_callchain_opt),
1449	OPT_CALLBACK(0, "call-graph", &record.opts,
1450		     "record_mode[,record_size]", record_callchain_help,
1451		     &record_parse_callchain_opt),
1452	OPT_INCR('v', "verbose", &verbose,
1453		    "be more verbose (show counter open errors, etc)"),
1454	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1455	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1456		    "per thread counts"),
1457	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
 
 
1458	OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
1459	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1460			&record.opts.sample_time_set,
1461			"Record the sample timestamps"),
1462	OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
 
1463	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1464		    "don't sample"),
1465	OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1466			&record.no_buildid_cache_set,
1467			"do not update the buildid cache"),
1468	OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1469			&record.no_buildid_set,
1470			"do not collect buildids in perf.data"),
1471	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1472		     "monitor event in cgroup name only",
1473		     parse_cgroups),
1474	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1475		  "ms to wait before starting measurement after program start"),
1476	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1477		   "user to profile"),
1478
1479	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1480		     "branch any", "sample any taken branches",
1481		     parse_branch_stack),
1482
1483	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1484		     "branch filter mask", "branch stack filter modes",
1485		     parse_branch_stack),
1486	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1487		    "sample by weight (on special events only)"),
1488	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1489		    "sample transaction flags (special events only)"),
1490	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1491		    "use per-thread mmaps"),
1492	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1493		    "sample selected machine registers on interrupt,"
1494		    " use -I ? to list register names", parse_regs),
 
 
 
1495	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1496		    "Record running/enabled time of read (:S) events"),
1497	OPT_CALLBACK('k', "clockid", &record.opts,
1498	"clockid", "clockid to use for events, see clock_gettime()",
1499	parse_clockid),
1500	OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1501			  "opts", "AUX area tracing Snapshot Mode", ""),
1502	OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1503			"per thread proc mmap processing timeout in ms"),
 
 
1504	OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1505		    "Record context switch events"),
1506	OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1507			 "Configure all used events to run in kernel space.",
1508			 PARSE_OPT_EXCLUSIVE),
1509	OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1510			 "Configure all used events to run in user space.",
1511			 PARSE_OPT_EXCLUSIVE),
1512	OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1513		   "clang binary to use for compiling BPF scriptlets"),
1514	OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1515		   "options passed to clang when compiling BPF scriptlets"),
1516	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1517		   "file", "vmlinux pathname"),
1518	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1519		    "Record build-id of all DSOs regardless of hits"),
1520	OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1521		    "append timestamp to output filename"),
1522	OPT_BOOLEAN(0, "switch-output", &record.switch_output,
1523		    "Switch output when receive SIGUSR2"),
 
 
 
 
1524	OPT_BOOLEAN(0, "dry-run", &dry_run,
1525		    "Parse options then exit"),
1526	OPT_END()
1527};
1528
1529struct option *record_options = __record_options;
1530
1531int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1532{
1533	int err;
1534	struct record *rec = &record;
1535	char errbuf[BUFSIZ];
1536
 
 
1537#ifndef HAVE_LIBBPF_SUPPORT
1538# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1539	set_nobuild('\0', "clang-path", true);
1540	set_nobuild('\0', "clang-opt", true);
1541# undef set_nobuild
1542#endif
1543
1544#ifndef HAVE_BPF_PROLOGUE
1545# if !defined (HAVE_DWARF_SUPPORT)
1546#  define REASON  "NO_DWARF=1"
1547# elif !defined (HAVE_LIBBPF_SUPPORT)
1548#  define REASON  "NO_LIBBPF=1"
1549# else
1550#  define REASON  "this architecture doesn't support BPF prologue"
1551# endif
1552# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1553	set_nobuild('\0', "vmlinux", true);
1554# undef set_nobuild
1555# undef REASON
1556#endif
1557
1558	rec->evlist = perf_evlist__new();
1559	if (rec->evlist == NULL)
1560		return -ENOMEM;
1561
1562	perf_config(perf_record_config, rec);
 
 
1563
1564	argc = parse_options(argc, argv, record_options, record_usage,
1565			    PARSE_OPT_STOP_AT_NON_OPTION);
 
 
 
 
1566	if (!argc && target__none(&rec->opts.target))
1567		usage_with_options(record_usage, record_options);
1568
1569	if (nr_cgroups && !rec->opts.target.system_wide) {
1570		usage_with_options_msg(record_usage, record_options,
1571			"cgroup monitoring only available in system-wide mode");
1572
1573	}
1574	if (rec->opts.record_switch_events &&
1575	    !perf_can_record_switch_events()) {
1576		ui__error("kernel does not support recording context switch events\n");
1577		parse_options_usage(record_usage, record_options, "switch-events", 0);
1578		return -EINVAL;
1579	}
1580
1581	if (rec->switch_output)
1582		rec->timestamp_filename = true;
 
 
1583
1584	if (!rec->itr) {
1585		rec->itr = auxtrace_record__init(rec->evlist, &err);
1586		if (err)
1587			goto out;
1588	}
1589
1590	err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1591					      rec->opts.auxtrace_snapshot_opts);
1592	if (err)
1593		goto out;
1594
1595	/*
1596	 * Allow aliases to facilitate the lookup of symbols for address
1597	 * filters. Refer to auxtrace_parse_filters().
1598	 */
1599	symbol_conf.allow_aliases = true;
1600
1601	symbol__init(NULL);
1602
1603	err = auxtrace_parse_filters(rec->evlist);
1604	if (err)
1605		goto out;
1606
1607	if (dry_run)
1608		goto out;
1609
1610	err = bpf__setup_stdout(rec->evlist);
1611	if (err) {
1612		bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1613		pr_err("ERROR: Setup BPF stdout failed: %s\n",
1614			 errbuf);
1615		goto out;
1616	}
1617
1618	err = -ENOMEM;
1619
1620	if (symbol_conf.kptr_restrict)
1621		pr_warning(
1622"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1623"check /proc/sys/kernel/kptr_restrict.\n\n"
1624"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1625"file is not found in the buildid cache or in the vmlinux path.\n\n"
1626"Samples in kernel modules won't be resolved at all.\n\n"
1627"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1628"even with a suitable vmlinux or kallsyms file.\n\n");
1629
1630	if (rec->no_buildid_cache || rec->no_buildid) {
1631		disable_buildid_cache();
1632	} else if (rec->switch_output) {
1633		/*
1634		 * In 'perf record --switch-output', disable buildid
1635		 * generation by default to reduce data file switching
1636		 * overhead. Still generate buildid if they are required
1637		 * explicitly using
1638		 *
1639		 *  perf record --switch-output --no-no-buildid \
1640		 *              --no-no-buildid-cache
1641		 *
1642		 * Following code equals to:
1643		 *
1644		 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1645		 *     (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1646		 *         disable_buildid_cache();
1647		 */
1648		bool disable = true;
1649
1650		if (rec->no_buildid_set && !rec->no_buildid)
1651			disable = false;
1652		if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1653			disable = false;
1654		if (disable) {
1655			rec->no_buildid = true;
1656			rec->no_buildid_cache = true;
1657			disable_buildid_cache();
1658		}
1659	}
1660
1661	if (record.opts.overwrite)
1662		record.opts.tail_synthesize = true;
1663
1664	if (rec->evlist->nr_entries == 0 &&
1665	    perf_evlist__add_default(rec->evlist) < 0) {
1666		pr_err("Not enough memory for event selector list\n");
1667		goto out;
1668	}
1669
1670	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1671		rec->opts.no_inherit = true;
1672
1673	err = target__validate(&rec->opts.target);
1674	if (err) {
1675		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1676		ui__warning("%s", errbuf);
1677	}
1678
1679	err = target__parse_uid(&rec->opts.target);
1680	if (err) {
1681		int saved_errno = errno;
1682
1683		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1684		ui__error("%s", errbuf);
1685
1686		err = -saved_errno;
1687		goto out;
1688	}
1689
1690	/* Enable ignoring missing threads when -u option is defined. */
1691	rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1692
1693	err = -ENOMEM;
1694	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1695		usage_with_options(record_usage, record_options);
1696
1697	err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1698	if (err)
1699		goto out;
1700
1701	/*
1702	 * We take all buildids when the file contains
1703	 * AUX area tracing data because we do not decode the
1704	 * trace because it would take too long.
1705	 */
1706	if (rec->opts.full_auxtrace)
1707		rec->buildid_all = true;
1708
1709	if (record_opts__config(&rec->opts)) {
1710		err = -EINVAL;
1711		goto out;
1712	}
1713
1714	err = __cmd_record(&record, argc, argv);
1715out:
1716	perf_evlist__delete(rec->evlist);
1717	symbol__exit();
1718	auxtrace_record__free(rec->itr);
1719	return err;
1720}
1721
1722static void snapshot_sig_handler(int sig __maybe_unused)
1723{
 
 
1724	if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1725		trigger_hit(&auxtrace_snapshot_trigger);
1726		auxtrace_record__snapshot_started = 1;
1727		if (auxtrace_record__snapshot_start(record.itr))
1728			trigger_error(&auxtrace_snapshot_trigger);
1729	}
1730
1731	if (trigger_is_ready(&switch_output_trigger))
 
 
 
 
 
 
 
 
1732		trigger_hit(&switch_output_trigger);
1733}