Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * builtin-record.c
  3 *
  4 * Builtin record command: Record the profile of a workload
  5 * (or a CPU, or a PID) into the perf.data output file - for
  6 * later analysis via perf report.
  7 */
  8#include "builtin.h"
  9
 10#include "perf.h"
 11
 12#include "util/build-id.h"
 13#include "util/util.h"
 14#include "util/parse-options.h"
 15#include "util/parse-events.h"
 16
 
 
 17#include "util/header.h"
 18#include "util/event.h"
 19#include "util/evlist.h"
 20#include "util/evsel.h"
 21#include "util/debug.h"
 22#include "util/session.h"
 23#include "util/tool.h"
 24#include "util/symbol.h"
 25#include "util/cpumap.h"
 26#include "util/thread_map.h"
 27#include "util/data.h"
 
 
 
 
 
 
 
 28
 29#include <unistd.h>
 30#include <sched.h>
 31#include <sys/mman.h>
 32
 33#ifndef HAVE_ON_EXIT_SUPPORT
 34#ifndef ATEXIT_MAX
 35#define ATEXIT_MAX 32
 36#endif
 37static int __on_exit_count = 0;
 38typedef void (*on_exit_func_t) (int, void *);
 39static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
 40static void *__on_exit_args[ATEXIT_MAX];
 41static int __exitcode = 0;
 42static void __handle_on_exit_funcs(void);
 43static int on_exit(on_exit_func_t function, void *arg);
 44#define exit(x) (exit)(__exitcode = (x))
 45
 46static int on_exit(on_exit_func_t function, void *arg)
 47{
 48	if (__on_exit_count == ATEXIT_MAX)
 49		return -ENOMEM;
 50	else if (__on_exit_count == 0)
 51		atexit(__handle_on_exit_funcs);
 52	__on_exit_funcs[__on_exit_count] = function;
 53	__on_exit_args[__on_exit_count++] = arg;
 54	return 0;
 55}
 56
 57static void __handle_on_exit_funcs(void)
 58{
 59	int i;
 60	for (i = 0; i < __on_exit_count; i++)
 61		__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
 62}
 63#endif
 64
 65struct record {
 66	struct perf_tool	tool;
 67	struct record_opts	opts;
 68	u64			bytes_written;
 69	struct perf_data_file	file;
 
 70	struct perf_evlist	*evlist;
 71	struct perf_session	*session;
 72	const char		*progname;
 73	int			realtime_prio;
 74	bool			no_buildid;
 
 75	bool			no_buildid_cache;
 76	long			samples;
 
 
 77};
 78
 79static int record__write(struct record *rec, void *bf, size_t size)
 80{
 81	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
 82		pr_err("failed to write perf data, error: %m\n");
 83		return -1;
 84	}
 85
 86	rec->bytes_written += size;
 87	return 0;
 88}
 89
 90static int process_synthesized_event(struct perf_tool *tool,
 91				     union perf_event *event,
 92				     struct perf_sample *sample __maybe_unused,
 93				     struct machine *machine __maybe_unused)
 94{
 95	struct record *rec = container_of(tool, struct record, tool);
 96	return record__write(rec, event, event->header.size);
 97}
 98
 99static int record__mmap_read(struct record *rec, struct perf_mmap *md)
100{
101	unsigned int head = perf_mmap__read_head(md);
102	unsigned int old = md->prev;
 
103	unsigned char *data = md->base + page_size;
104	unsigned long size;
105	void *buf;
106	int rc = 0;
107
108	if (old == head)
109		return 0;
110
111	rec->samples++;
112
113	size = head - old;
114
115	if ((old & md->mask) + size != (head & md->mask)) {
116		buf = &data[old & md->mask];
117		size = md->mask + 1 - (old & md->mask);
118		old += size;
119
120		if (record__write(rec, buf, size) < 0) {
121			rc = -1;
122			goto out;
123		}
124	}
125
126	buf = &data[old & md->mask];
127	size = head - old;
128	old += size;
129
130	if (record__write(rec, buf, size) < 0) {
131		rc = -1;
132		goto out;
133	}
134
135	md->prev = old;
136	perf_mmap__write_tail(md, old);
137
138out:
139	return rc;
140}
141
142static volatile int done = 0;
143static volatile int signr = -1;
144static volatile int child_finished = 0;
 
 
 
145
146static void sig_handler(int sig)
147{
148	if (sig == SIGCHLD)
149		child_finished = 1;
 
 
150
151	done = 1;
152	signr = sig;
153}
154
155static void record__sig_exit(int exit_status __maybe_unused, void *arg)
156{
157	struct record *rec = arg;
158	int status;
159
160	if (rec->evlist->workload.pid > 0) {
161		if (!child_finished)
162			kill(rec->evlist->workload.pid, SIGTERM);
 
 
163
164		wait(&status);
165		if (WIFSIGNALED(status))
166			psignal(WTERMSIG(status), rec->progname);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167	}
 
 
 
168
169	if (signr == -1 || signr == SIGUSR1)
170		return;
 
 
 
 
 
 
 
 
 
171
172	signal(signr, SIG_DFL);
 
 
 
 
 
 
 
 
 
 
 
173}
174
 
 
 
 
 
 
 
 
175static int record__open(struct record *rec)
176{
177	char msg[512];
178	struct perf_evsel *pos;
179	struct perf_evlist *evlist = rec->evlist;
180	struct perf_session *session = rec->session;
181	struct record_opts *opts = &rec->opts;
182	int rc = 0;
183
184	perf_evlist__config(evlist, opts);
185
186	evlist__for_each(evlist, pos) {
187try_again:
188		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
189			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
190				if (verbose)
191					ui__warning("%s\n", msg);
192				goto try_again;
193			}
194
195			rc = -errno;
196			perf_evsel__open_strerror(pos, &opts->target,
197						  errno, msg, sizeof(msg));
198			ui__error("%s\n", msg);
199			goto out;
200		}
201	}
202
203	if (perf_evlist__apply_filters(evlist)) {
204		error("failed to set filter with %d (%s)\n", errno,
205			strerror(errno));
 
206		rc = -1;
207		goto out;
208	}
209
210	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
 
 
211		if (errno == EPERM) {
212			pr_err("Permission error mapping pages.\n"
213			       "Consider increasing "
214			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
215			       "or try again with a smaller value of -m/--mmap_pages.\n"
216			       "(current value: %u)\n", opts->mmap_pages);
 
217			rc = -errno;
218		} else {
219			pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
220			rc = -errno;
 
 
 
 
221		}
222		goto out;
223	}
224
225	session->evlist = evlist;
226	perf_session__set_id_hdr_size(session);
227out:
228	return rc;
229}
230
231static int process_buildids(struct record *rec)
 
 
 
 
232{
233	struct perf_data_file *file  = &rec->file;
234	struct perf_session *session = rec->session;
235	u64 start = session->header.data_offset;
236
237	u64 size = lseek(file->fd, 0, SEEK_CUR);
238	if (size == 0)
239		return 0;
240
241	return __perf_session__process_events(session, start,
242					      size - start,
243					      size, &build_id__mark_dso_hit_ops);
244}
245
246static void record__exit(int status, void *arg)
247{
248	struct record *rec = arg;
249	struct perf_data_file *file = &rec->file;
250
251	if (status != 0)
252		return;
253
254	if (!file->is_pipe) {
255		rec->session->header.data_size += rec->bytes_written;
 
 
 
 
 
 
 
 
256
257		if (!rec->no_buildid)
258			process_buildids(rec);
259		perf_session__write_header(rec->session, rec->evlist,
260					   file->fd, true);
261		perf_session__delete(rec->session);
262		perf_evlist__delete(rec->evlist);
263		symbol__exit();
264	}
265}
266
267static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
268{
269	int err;
270	struct perf_tool *tool = data;
271	/*
272	 *As for guest kernel when processing subcommand record&report,
273	 *we arrange module mmap prior to guest kernel mmap and trigger
274	 *a preload dso because default guest module symbols are loaded
275	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
276	 *method is used to avoid symbol missing when the first addr is
277	 *in module instead of in guest kernel.
278	 */
279	err = perf_event__synthesize_modules(tool, process_synthesized_event,
280					     machine);
281	if (err < 0)
282		pr_err("Couldn't record guest kernel [%d]'s reference"
283		       " relocation symbol.\n", machine->pid);
284
285	/*
286	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
287	 * have no _text sometimes.
288	 */
289	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
290						 machine);
291	if (err < 0)
292		pr_err("Couldn't record guest kernel [%d]'s reference"
293		       " relocation symbol.\n", machine->pid);
294}
295
296static struct perf_event_header finished_round_event = {
297	.size = sizeof(struct perf_event_header),
298	.type = PERF_RECORD_FINISHED_ROUND,
299};
300
301static int record__mmap_read_all(struct record *rec)
302{
 
303	int i;
304	int rc = 0;
305
306	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
 
 
307		if (rec->evlist->mmap[i].base) {
308			if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
309				rc = -1;
310				goto out;
311			}
312		}
 
 
 
 
 
 
313	}
314
315	if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
 
 
 
 
316		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
317
318out:
319	return rc;
320}
321
322static void record__init_features(struct record *rec)
323{
324	struct perf_session *session = rec->session;
325	int feat;
326
327	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
328		perf_header__set_feat(&session->header, feat);
329
330	if (rec->no_buildid)
331		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
332
333	if (!have_tracepoints(&rec->evlist->entries))
334		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
335
336	if (!rec->opts.branch_stack)
337		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338}
339
340static volatile int workload_exec_errno;
341
342/*
343 * perf_evlist__prepare_workload will send a SIGUSR1
344 * if the fork fails, since we asked by setting its
345 * want_signal to true.
346 */
347static void workload_exec_failed_signal(int signo, siginfo_t *info,
 
348					void *ucontext __maybe_unused)
349{
350	workload_exec_errno = info->si_value.sival_int;
351	done = 1;
352	signr = signo;
353	child_finished = 1;
354}
355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356static int __cmd_record(struct record *rec, int argc, const char **argv)
357{
358	int err;
 
359	unsigned long waking = 0;
360	const bool forks = argc > 0;
361	struct machine *machine;
362	struct perf_tool *tool = &rec->tool;
363	struct record_opts *opts = &rec->opts;
364	struct perf_data_file *file = &rec->file;
365	struct perf_session *session;
366	bool disabled = false;
 
367
368	rec->progname = argv[0];
369
370	on_exit(record__sig_exit, rec);
371	signal(SIGCHLD, sig_handler);
372	signal(SIGINT, sig_handler);
373	signal(SIGTERM, sig_handler);
 
 
 
 
374
375	session = perf_session__new(file, false, NULL);
376	if (session == NULL) {
377		pr_err("Perf session creation failed.\n");
378		return -1;
379	}
380
 
381	rec->session = session;
382
383	record__init_features(rec);
384
385	if (forks) {
386		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
387						    argv, file->is_pipe,
388						    workload_exec_failed_signal);
389		if (err < 0) {
390			pr_err("Couldn't run the workload!\n");
 
391			goto out_delete_session;
392		}
393	}
394
395	if (record__open(rec) != 0) {
396		err = -1;
397		goto out_delete_session;
398	}
399
400	if (!rec->evlist->nr_groups)
401		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 
 
 
 
 
 
 
402
403	/*
404	 * perf_session__delete(session) will be called at record__exit()
 
405	 */
406	on_exit(record__exit, rec);
 
 
 
 
 
 
407
408	if (file->is_pipe) {
409		err = perf_header__write_pipe(file->fd);
410		if (err < 0)
411			goto out_delete_session;
412	} else {
413		err = perf_session__write_header(session, rec->evlist,
414						 file->fd, false);
415		if (err < 0)
416			goto out_delete_session;
417	}
418
419	if (!rec->no_buildid
420	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
421		pr_err("Couldn't generate buildids. "
422		       "Use --no-buildid to profile anyway.\n");
423		err = -1;
424		goto out_delete_session;
425	}
426
427	machine = &session->machines.host;
428
429	if (file->is_pipe) {
430		err = perf_event__synthesize_attrs(tool, session,
431						   process_synthesized_event);
432		if (err < 0) {
433			pr_err("Couldn't synthesize attrs.\n");
434			goto out_delete_session;
435		}
436
437		if (have_tracepoints(&rec->evlist->entries)) {
438			/*
439			 * FIXME err <= 0 here actually means that
440			 * there were no tracepoints so its not really
441			 * an error, just that we don't need to
442			 * synthesize anything.  We really have to
443			 * return this more properly and also
444			 * propagate errors that now are calling die()
445			 */
446			err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
447								  process_synthesized_event);
448			if (err <= 0) {
449				pr_err("Couldn't record tracing data.\n");
450				goto out_delete_session;
451			}
452			rec->bytes_written += err;
453		}
454	}
455
456	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
457						 machine);
458	if (err < 0)
459		pr_err("Couldn't record kernel reference relocation symbol\n"
460		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
461		       "Check /proc/kallsyms permission or run as root.\n");
462
463	err = perf_event__synthesize_modules(tool, process_synthesized_event,
464					     machine);
465	if (err < 0)
466		pr_err("Couldn't record kernel module information.\n"
467		       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
468		       "Check /proc/modules permission or run as root.\n");
469
470	if (perf_guest) {
471		machines__process_guests(&session->machines,
472					 perf_event__synthesize_guest_os, tool);
473	}
474
475	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
476					    process_synthesized_event, opts->sample_address);
477	if (err != 0)
478		goto out_delete_session;
479
480	if (rec->realtime_prio) {
481		struct sched_param param;
482
483		param.sched_priority = rec->realtime_prio;
484		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
485			pr_err("Could not set realtime priority.\n");
486			err = -1;
487			goto out_delete_session;
488		}
489	}
490
491	/*
492	 * When perf is starting the traced process, all the events
493	 * (apart from group members) have enable_on_exec=1 set,
494	 * so don't spoil it by prematurely enabling them.
495	 */
496	if (!target__none(&opts->target) && !opts->initial_delay)
497		perf_evlist__enable(rec->evlist);
498
499	/*
500	 * Let the child rip
501	 */
502	if (forks)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503		perf_evlist__start_workload(rec->evlist);
 
504
505	if (opts->initial_delay) {
506		usleep(opts->initial_delay * 1000);
507		perf_evlist__enable(rec->evlist);
508	}
509
 
510	for (;;) {
511		int hits = rec->samples;
512
513		if (record__mmap_read_all(rec) < 0) {
 
514			err = -1;
515			goto out_delete_session;
 
 
 
 
 
 
 
 
 
 
 
516		}
517
518		if (hits == rec->samples) {
519			if (done)
520				break;
521			err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
 
 
 
 
 
 
522			waking++;
 
 
 
523		}
524
525		/*
526		 * When perf is starting the traced process, at the end events
527		 * die with the process and we wait for that. Thus no need to
528		 * disable events in this case.
529		 */
530		if (done && !disabled && !target__none(&opts->target)) {
 
531			perf_evlist__disable(rec->evlist);
532			disabled = true;
533		}
534	}
 
535
536	if (forks && workload_exec_errno) {
537		char msg[512];
538		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
539		pr_err("Workload failed: %s\n", emsg);
540		err = -1;
541		goto out_delete_session;
542	}
543
544	if (quiet || signr == SIGUSR1)
545		return 0;
546
547	fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
 
548
549	/*
550	 * Approximate RIP event size: 24 bytes.
551	 */
552	fprintf(stderr,
553		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
554		(double)rec->bytes_written / 1024.0 / 1024.0,
555		file->path,
556		rec->bytes_written / 24);
557
558	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
560out_delete_session:
561	perf_session__delete(session);
562	return err;
563}
564
565#define BRANCH_OPT(n, m) \
566	{ .name = n, .mode = (m) }
567
568#define BRANCH_END { .name = NULL }
569
570struct branch_mode {
571	const char *name;
572	int mode;
573};
574
575static const struct branch_mode branch_modes[] = {
576	BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
577	BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
578	BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
579	BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
580	BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
581	BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
582	BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
583	BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
584	BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
585	BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
586	BRANCH_END
587};
588
589static int
590parse_branch_stack(const struct option *opt, const char *str, int unset)
591{
592#define ONLY_PLM \
593	(PERF_SAMPLE_BRANCH_USER	|\
594	 PERF_SAMPLE_BRANCH_KERNEL	|\
595	 PERF_SAMPLE_BRANCH_HV)
596
597	uint64_t *mode = (uint64_t *)opt->value;
598	const struct branch_mode *br;
599	char *s, *os = NULL, *p;
600	int ret = -1;
601
602	if (unset)
603		return 0;
604
605	/*
606	 * cannot set it twice, -b + --branch-filter for instance
607	 */
608	if (*mode)
609		return -1;
610
611	/* str may be NULL in case no arg is passed to -b */
612	if (str) {
613		/* because str is read-only */
614		s = os = strdup(str);
615		if (!s)
616			return -1;
617
618		for (;;) {
619			p = strchr(s, ',');
620			if (p)
621				*p = '\0';
622
623			for (br = branch_modes; br->name; br++) {
624				if (!strcasecmp(s, br->name))
625					break;
626			}
627			if (!br->name) {
628				ui__warning("unknown branch filter %s,"
629					    " check man page\n", s);
630				goto error;
631			}
632
633			*mode |= br->mode;
 
 
 
 
 
634
635			if (!p)
636				break;
637
638			s = p + 1;
639		}
 
 
 
640	}
641	ret = 0;
642
643	/* default to any branch */
644	if ((*mode & ~ONLY_PLM) == 0) {
645		*mode = PERF_SAMPLE_BRANCH_ANY;
 
 
 
646	}
647error:
648	free(os);
649	return ret;
650}
651
652#ifdef HAVE_DWARF_UNWIND_SUPPORT
653static int get_stack_size(char *str, unsigned long *_size)
 
654{
655	char *endptr;
656	unsigned long size;
657	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
658
659	size = strtoul(str, &endptr, 0);
 
660
661	do {
662		if (*endptr)
663			break;
664
665		size = round_up(size, sizeof(u64));
666		if (!size || size > max_size)
667			break;
668
669		*_size = size;
670		return 0;
 
671
672	} while (0);
 
 
 
 
 
 
 
 
 
 
 
 
673
674	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
675	       max_size, str);
676	return -1;
677}
678#endif /* HAVE_DWARF_UNWIND_SUPPORT */
679
680int record_parse_callchain(const char *arg, struct record_opts *opts)
681{
682	char *tok, *name, *saveptr = NULL;
683	char *buf;
684	int ret = -1;
685
686	/* We need buffer that we know we can write to. */
687	buf = malloc(strlen(arg) + 1);
688	if (!buf)
689		return -ENOMEM;
690
691	strcpy(buf, arg);
 
692
693	tok = strtok_r((char *)buf, ",", &saveptr);
694	name = tok ? : (char *)buf;
695
696	do {
697		/* Framepointer style */
698		if (!strncmp(name, "fp", sizeof("fp"))) {
699			if (!strtok_r(NULL, ",", &saveptr)) {
700				opts->call_graph = CALLCHAIN_FP;
701				ret = 0;
702			} else
703				pr_err("callchain: No more arguments "
704				       "needed for -g fp\n");
705			break;
706
707#ifdef HAVE_DWARF_UNWIND_SUPPORT
708		/* Dwarf style */
709		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
710			const unsigned long default_stack_dump_size = 8192;
711
712			ret = 0;
713			opts->call_graph = CALLCHAIN_DWARF;
714			opts->stack_dump_size = default_stack_dump_size;
715
716			tok = strtok_r(NULL, ",", &saveptr);
717			if (tok) {
718				unsigned long size = 0;
719
720				ret = get_stack_size(tok, &size);
721				opts->stack_dump_size = size;
722			}
723#endif /* HAVE_DWARF_UNWIND_SUPPORT */
724		} else {
725			pr_err("callchain: Unknown --call-graph option "
726			       "value: %s\n", arg);
727			break;
728		}
 
 
 
729
730	} while (0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
731
732	free(buf);
733	return ret;
734}
735
736static void callchain_debug(struct record_opts *opts)
737{
738	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
 
 
739
740	pr_debug("callchain: type %s\n", str[opts->call_graph]);
 
 
 
741
742	if (opts->call_graph == CALLCHAIN_DWARF)
743		pr_debug("callchain: stack dump size %d\n",
744			 opts->stack_dump_size);
745}
746
747int record_parse_callchain_opt(const struct option *opt,
748			       const char *arg,
749			       int unset)
750{
751	struct record_opts *opts = opt->value;
752	int ret;
753
754	opts->call_graph_enabled = !unset;
755
756	/* --no-call-graph */
757	if (unset) {
758		opts->call_graph = CALLCHAIN_NONE;
759		pr_debug("callchain: disabled\n");
760		return 0;
761	}
762
763	ret = record_parse_callchain(arg, opts);
764	if (!ret)
765		callchain_debug(opts);
 
 
 
 
 
 
 
766
767	return ret;
 
 
768}
769
770int record_callchain_opt(const struct option *opt,
771			 const char *arg __maybe_unused,
772			 int unset __maybe_unused)
773{
774	struct record_opts *opts = opt->value;
 
 
 
775
776	opts->call_graph_enabled = !unset;
 
777
778	if (opts->call_graph == CALLCHAIN_NONE)
779		opts->call_graph = CALLCHAIN_FP;
 
780
781	callchain_debug(opts);
782	return 0;
783}
784
785static int perf_record_config(const char *var, const char *value, void *cb)
786{
787	struct record *rec = cb;
 
 
 
788
789	if (!strcmp(var, "record.call-graph"))
790		return record_parse_callchain(value, &rec->opts);
 
 
791
792	return perf_default_config(var, value, cb);
 
 
 
 
 
 
 
 
793}
794
795static const char * const record_usage[] = {
796	"perf record [<options>] [<command>]",
797	"perf record [<options>] -- <command> [<options>]",
798	NULL
799};
 
800
801/*
802 * XXX Ideally would be local to cmd_record() and passed to a record__new
803 * because we need to have access to it in record__exit, that is called
804 * after cmd_record() exits, but since record_options need to be accessible to
805 * builtin-script, leave it here.
806 *
807 * At least we don't ouch it in all the other functions here directly.
808 *
809 * Just say no to tons of global variables, sigh.
810 */
811static struct record record = {
812	.opts = {
 
813		.mmap_pages	     = UINT_MAX,
814		.user_freq	     = UINT_MAX,
815		.user_interval	     = ULLONG_MAX,
816		.freq		     = 4000,
817		.target		     = {
818			.uses_mmap   = true,
819			.default_per_cpu = true,
820		},
 
 
 
 
 
 
 
 
 
 
821	},
822};
823
824#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
825
826#ifdef HAVE_DWARF_UNWIND_SUPPORT
827const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
828#else
829const char record_callchain_help[] = CALLCHAIN_HELP "fp";
830#endif
831
832/*
833 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
834 * with it and switch to use the library functions in perf_evlist that came
835 * from builtin-record.c, i.e. use record_opts,
836 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
837 * using pipes, etc.
838 */
839const struct option record_options[] = {
840	OPT_CALLBACK('e', "event", &record.evlist, "event",
841		     "event selector. use 'perf list' to list available events",
842		     parse_events_option),
843	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
844		     "event filter", parse_filter),
 
 
 
845	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
846		    "record events on existing process id"),
847	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
848		    "record events on existing thread id"),
849	OPT_INTEGER('r', "realtime", &record.realtime_prio,
850		    "collect data with this RT SCHED_FIFO priority"),
851	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
852		    "collect data without buffering"),
853	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
854		    "collect raw sample records from all opened counters"),
855	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
856			    "system-wide collection from all CPUs"),
857	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
858		    "list of cpus to monitor"),
859	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
860	OPT_STRING('o', "output", &record.file.path, "file",
861		    "output file name"),
862	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
863			&record.opts.no_inherit_set,
864			"child tasks do not inherit counters"),
865	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
866	OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
867		     "number of mmap data pages",
868		     perf_evlist__parse_mmap_pages),
869	OPT_BOOLEAN(0, "group", &record.opts.group,
870		    "put the counters into a counter group"),
871	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
872			   NULL, "enables call-graph recording" ,
873			   &record_callchain_opt),
874	OPT_CALLBACK(0, "call-graph", &record.opts,
875		     "mode[,dump_size]", record_callchain_help,
876		     &record_parse_callchain_opt),
877	OPT_INCR('v', "verbose", &verbose,
878		    "be more verbose (show counter open errors, etc)"),
879	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
880	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
881		    "per thread counts"),
882	OPT_BOOLEAN('d', "data", &record.opts.sample_address,
883		    "Sample addresses"),
884	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
885	OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
 
886	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
887		    "don't sample"),
888	OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
889		    "do not update the buildid cache"),
890	OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
891		    "do not collect buildids in perf.data"),
 
 
892	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
893		     "monitor event in cgroup name only",
894		     parse_cgroups),
895	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
896		  "ms to wait before starting measurement after program start"),
897	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
898		   "user to profile"),
899
900	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
901		     "branch any", "sample any taken branches",
902		     parse_branch_stack),
903
904	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
905		     "branch filter mask", "branch stack filter modes",
906		     parse_branch_stack),
907	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
908		    "sample by weight (on special events only)"),
909	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
910		    "sample transaction flags (special events only)"),
911	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
912		    "use per-thread mmaps"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
913	OPT_END()
914};
915
 
 
916int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
917{
918	int err = -ENOMEM;
919	struct record *rec = &record;
920	char errbuf[BUFSIZ];
921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922	rec->evlist = perf_evlist__new();
923	if (rec->evlist == NULL)
924		return -ENOMEM;
925
926	perf_config(perf_record_config, rec);
927
928	argc = parse_options(argc, argv, record_options, record_usage,
929			    PARSE_OPT_STOP_AT_NON_OPTION);
930	if (!argc && target__none(&rec->opts.target))
931		usage_with_options(record_usage, record_options);
932
933	if (nr_cgroups && !rec->opts.target.system_wide) {
934		ui__error("cgroup monitoring only available in"
935			  " system-wide mode\n");
936		usage_with_options(record_usage, record_options);
 
 
 
 
 
 
937	}
938
939	symbol__init();
 
 
 
 
 
 
 
 
 
 
 
 
 
940
941	if (symbol_conf.kptr_restrict)
942		pr_warning(
943"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
944"check /proc/sys/kernel/kptr_restrict.\n\n"
945"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
946"file is not found in the buildid cache or in the vmlinux path.\n\n"
947"Samples in kernel modules won't be resolved at all.\n\n"
948"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
949"even with a suitable vmlinux or kallsyms file.\n\n");
950
951	if (rec->no_buildid_cache || rec->no_buildid)
952		disable_buildid_cache();
953
954	if (rec->evlist->nr_entries == 0 &&
955	    perf_evlist__add_default(rec->evlist) < 0) {
956		pr_err("Not enough memory for event selector list\n");
957		goto out_symbol_exit;
958	}
959
960	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
961		rec->opts.no_inherit = true;
962
963	err = target__validate(&rec->opts.target);
964	if (err) {
965		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
966		ui__warning("%s", errbuf);
967	}
968
969	err = target__parse_uid(&rec->opts.target);
970	if (err) {
971		int saved_errno = errno;
972
973		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
974		ui__error("%s", errbuf);
975
976		err = -saved_errno;
977		goto out_symbol_exit;
978	}
979
980	err = -ENOMEM;
981	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
982		usage_with_options(record_usage, record_options);
983
 
 
 
 
 
 
 
 
 
 
 
 
984	if (record_opts__config(&rec->opts)) {
985		err = -EINVAL;
986		goto out_symbol_exit;
987	}
988
989	err = __cmd_record(&record, argc, argv);
990out_symbol_exit:
 
991	symbol__exit();
 
992	return err;
 
 
 
 
 
 
 
 
 
993}
v4.6
   1/*
   2 * builtin-record.c
   3 *
   4 * Builtin record command: Record the profile of a workload
   5 * (or a CPU, or a PID) into the perf.data output file - for
   6 * later analysis via perf report.
   7 */
   8#include "builtin.h"
   9
  10#include "perf.h"
  11
  12#include "util/build-id.h"
  13#include "util/util.h"
  14#include <subcmd/parse-options.h>
  15#include "util/parse-events.h"
  16
  17#include "util/callchain.h"
  18#include "util/cgroup.h"
  19#include "util/header.h"
  20#include "util/event.h"
  21#include "util/evlist.h"
  22#include "util/evsel.h"
  23#include "util/debug.h"
  24#include "util/session.h"
  25#include "util/tool.h"
  26#include "util/symbol.h"
  27#include "util/cpumap.h"
  28#include "util/thread_map.h"
  29#include "util/data.h"
  30#include "util/perf_regs.h"
  31#include "util/auxtrace.h"
  32#include "util/parse-branch-options.h"
  33#include "util/parse-regs-options.h"
  34#include "util/llvm-utils.h"
  35#include "util/bpf-loader.h"
  36#include "asm/bug.h"
  37
  38#include <unistd.h>
  39#include <sched.h>
  40#include <sys/mman.h>
  41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  42
  43struct record {
  44	struct perf_tool	tool;
  45	struct record_opts	opts;
  46	u64			bytes_written;
  47	struct perf_data_file	file;
  48	struct auxtrace_record	*itr;
  49	struct perf_evlist	*evlist;
  50	struct perf_session	*session;
  51	const char		*progname;
  52	int			realtime_prio;
  53	bool			no_buildid;
  54	bool			no_buildid_set;
  55	bool			no_buildid_cache;
  56	bool			no_buildid_cache_set;
  57	bool			buildid_all;
  58	unsigned long long	samples;
  59};
  60
  61static int record__write(struct record *rec, void *bf, size_t size)
  62{
  63	if (perf_data_file__write(rec->session->file, bf, size) < 0) {
  64		pr_err("failed to write perf data, error: %m\n");
  65		return -1;
  66	}
  67
  68	rec->bytes_written += size;
  69	return 0;
  70}
  71
  72static int process_synthesized_event(struct perf_tool *tool,
  73				     union perf_event *event,
  74				     struct perf_sample *sample __maybe_unused,
  75				     struct machine *machine __maybe_unused)
  76{
  77	struct record *rec = container_of(tool, struct record, tool);
  78	return record__write(rec, event, event->header.size);
  79}
  80
  81static int record__mmap_read(struct record *rec, int idx)
  82{
  83	struct perf_mmap *md = &rec->evlist->mmap[idx];
  84	u64 head = perf_mmap__read_head(md);
  85	u64 old = md->prev;
  86	unsigned char *data = md->base + page_size;
  87	unsigned long size;
  88	void *buf;
  89	int rc = 0;
  90
  91	if (old == head)
  92		return 0;
  93
  94	rec->samples++;
  95
  96	size = head - old;
  97
  98	if ((old & md->mask) + size != (head & md->mask)) {
  99		buf = &data[old & md->mask];
 100		size = md->mask + 1 - (old & md->mask);
 101		old += size;
 102
 103		if (record__write(rec, buf, size) < 0) {
 104			rc = -1;
 105			goto out;
 106		}
 107	}
 108
 109	buf = &data[old & md->mask];
 110	size = head - old;
 111	old += size;
 112
 113	if (record__write(rec, buf, size) < 0) {
 114		rc = -1;
 115		goto out;
 116	}
 117
 118	md->prev = old;
 119	perf_evlist__mmap_consume(rec->evlist, idx);
 
 120out:
 121	return rc;
 122}
 123
 124static volatile int done;
 125static volatile int signr = -1;
 126static volatile int child_finished;
 127static volatile int auxtrace_snapshot_enabled;
 128static volatile int auxtrace_snapshot_err;
 129static volatile int auxtrace_record__snapshot_started;
 130
 131static void sig_handler(int sig)
 132{
 133	if (sig == SIGCHLD)
 134		child_finished = 1;
 135	else
 136		signr = sig;
 137
 138	done = 1;
 
 139}
 140
 141static void record__sig_exit(void)
 142{
 143	if (signr == -1)
 144		return;
 145
 146	signal(signr, SIG_DFL);
 147	raise(signr);
 148}
 149
 150#ifdef HAVE_AUXTRACE_SUPPORT
 151
 152static int record__process_auxtrace(struct perf_tool *tool,
 153				    union perf_event *event, void *data1,
 154				    size_t len1, void *data2, size_t len2)
 155{
 156	struct record *rec = container_of(tool, struct record, tool);
 157	struct perf_data_file *file = &rec->file;
 158	size_t padding;
 159	u8 pad[8] = {0};
 160
 161	if (!perf_data_file__is_pipe(file)) {
 162		off_t file_offset;
 163		int fd = perf_data_file__fd(file);
 164		int err;
 165
 166		file_offset = lseek(fd, 0, SEEK_CUR);
 167		if (file_offset == -1)
 168			return -1;
 169		err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
 170						     event, file_offset);
 171		if (err)
 172			return err;
 173	}
 174
 175	/* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
 176	padding = (len1 + len2) & 7;
 177	if (padding)
 178		padding = 8 - padding;
 179
 180	record__write(rec, event, event->header.size);
 181	record__write(rec, data1, len1);
 182	if (len2)
 183		record__write(rec, data2, len2);
 184	record__write(rec, &pad, padding);
 185
 186	return 0;
 187}
 188
 189static int record__auxtrace_mmap_read(struct record *rec,
 190				      struct auxtrace_mmap *mm)
 191{
 192	int ret;
 193
 194	ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
 195				  record__process_auxtrace);
 196	if (ret < 0)
 197		return ret;
 198
 199	if (ret)
 200		rec->samples++;
 201
 202	return 0;
 203}
 204
 205static int record__auxtrace_mmap_read_snapshot(struct record *rec,
 206					       struct auxtrace_mmap *mm)
 207{
 208	int ret;
 209
 210	ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
 211					   record__process_auxtrace,
 212					   rec->opts.auxtrace_snapshot_size);
 213	if (ret < 0)
 214		return ret;
 215
 216	if (ret)
 217		rec->samples++;
 218
 219	return 0;
 220}
 221
 222static int record__auxtrace_read_snapshot_all(struct record *rec)
 223{
 224	int i;
 225	int rc = 0;
 226
 227	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
 228		struct auxtrace_mmap *mm =
 229				&rec->evlist->mmap[i].auxtrace_mmap;
 230
 231		if (!mm->base)
 232			continue;
 233
 234		if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
 235			rc = -1;
 236			goto out;
 237		}
 238	}
 239out:
 240	return rc;
 241}
 242
 243static void record__read_auxtrace_snapshot(struct record *rec)
 244{
 245	pr_debug("Recording AUX area tracing snapshot\n");
 246	if (record__auxtrace_read_snapshot_all(rec) < 0) {
 247		auxtrace_snapshot_err = -1;
 248	} else {
 249		auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
 250		if (!auxtrace_snapshot_err)
 251			auxtrace_snapshot_enabled = 1;
 252	}
 253}
 254
 255#else
 256
 257static inline
 258int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
 259			       struct auxtrace_mmap *mm __maybe_unused)
 260{
 261	return 0;
 262}
 263
 264static inline
 265void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
 266{
 267}
 268
 269static inline
 270int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 271{
 272	return 0;
 273}
 274
 275#endif
 276
 277static int record__open(struct record *rec)
 278{
 279	char msg[512];
 280	struct perf_evsel *pos;
 281	struct perf_evlist *evlist = rec->evlist;
 282	struct perf_session *session = rec->session;
 283	struct record_opts *opts = &rec->opts;
 284	int rc = 0;
 285
 286	perf_evlist__config(evlist, opts);
 287
 288	evlist__for_each(evlist, pos) {
 289try_again:
 290		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 291			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 292				if (verbose)
 293					ui__warning("%s\n", msg);
 294				goto try_again;
 295			}
 296
 297			rc = -errno;
 298			perf_evsel__open_strerror(pos, &opts->target,
 299						  errno, msg, sizeof(msg));
 300			ui__error("%s\n", msg);
 301			goto out;
 302		}
 303	}
 304
 305	if (perf_evlist__apply_filters(evlist, &pos)) {
 306		error("failed to set filter \"%s\" on event %s with %d (%s)\n",
 307			pos->filter, perf_evsel__name(pos), errno,
 308			strerror_r(errno, msg, sizeof(msg)));
 309		rc = -1;
 310		goto out;
 311	}
 312
 313	if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
 314				 opts->auxtrace_mmap_pages,
 315				 opts->auxtrace_snapshot_mode) < 0) {
 316		if (errno == EPERM) {
 317			pr_err("Permission error mapping pages.\n"
 318			       "Consider increasing "
 319			       "/proc/sys/kernel/perf_event_mlock_kb,\n"
 320			       "or try again with a smaller value of -m/--mmap_pages.\n"
 321			       "(current value: %u,%u)\n",
 322			       opts->mmap_pages, opts->auxtrace_mmap_pages);
 323			rc = -errno;
 324		} else {
 325			pr_err("failed to mmap with %d (%s)\n", errno,
 326				strerror_r(errno, msg, sizeof(msg)));
 327			if (errno)
 328				rc = -errno;
 329			else
 330				rc = -EINVAL;
 331		}
 332		goto out;
 333	}
 334
 335	session->evlist = evlist;
 336	perf_session__set_id_hdr_size(session);
 337out:
 338	return rc;
 339}
 340
 341static int process_sample_event(struct perf_tool *tool,
 342				union perf_event *event,
 343				struct perf_sample *sample,
 344				struct perf_evsel *evsel,
 345				struct machine *machine)
 346{
 347	struct record *rec = container_of(tool, struct record, tool);
 
 
 348
 349	rec->samples++;
 
 
 350
 351	return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 
 
 352}
 353
 354static int process_buildids(struct record *rec)
 355{
 356	struct perf_data_file *file  = &rec->file;
 357	struct perf_session *session = rec->session;
 358
 359	if (file->size == 0)
 360		return 0;
 361
 362	/*
 363	 * During this process, it'll load kernel map and replace the
 364	 * dso->long_name to a real pathname it found.  In this case
 365	 * we prefer the vmlinux path like
 366	 *   /lib/modules/3.16.4/build/vmlinux
 367	 *
 368	 * rather than build-id path (in debug directory).
 369	 *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
 370	 */
 371	symbol_conf.ignore_vmlinux_buildid = true;
 372
 373	/*
 374	 * If --buildid-all is given, it marks all DSO regardless of hits,
 375	 * so no need to process samples.
 376	 */
 377	if (rec->buildid_all)
 378		rec->tool.sample = NULL;
 379
 380	return perf_session__process_events(session);
 381}
 382
 383static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 384{
 385	int err;
 386	struct perf_tool *tool = data;
 387	/*
 388	 *As for guest kernel when processing subcommand record&report,
 389	 *we arrange module mmap prior to guest kernel mmap and trigger
 390	 *a preload dso because default guest module symbols are loaded
 391	 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
 392	 *method is used to avoid symbol missing when the first addr is
 393	 *in module instead of in guest kernel.
 394	 */
 395	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 396					     machine);
 397	if (err < 0)
 398		pr_err("Couldn't record guest kernel [%d]'s reference"
 399		       " relocation symbol.\n", machine->pid);
 400
 401	/*
 402	 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
 403	 * have no _text sometimes.
 404	 */
 405	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 406						 machine);
 407	if (err < 0)
 408		pr_err("Couldn't record guest kernel [%d]'s reference"
 409		       " relocation symbol.\n", machine->pid);
 410}
 411
 412static struct perf_event_header finished_round_event = {
 413	.size = sizeof(struct perf_event_header),
 414	.type = PERF_RECORD_FINISHED_ROUND,
 415};
 416
 417static int record__mmap_read_all(struct record *rec)
 418{
 419	u64 bytes_written = rec->bytes_written;
 420	int i;
 421	int rc = 0;
 422
 423	for (i = 0; i < rec->evlist->nr_mmaps; i++) {
 424		struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
 425
 426		if (rec->evlist->mmap[i].base) {
 427			if (record__mmap_read(rec, i) != 0) {
 428				rc = -1;
 429				goto out;
 430			}
 431		}
 432
 433		if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
 434		    record__auxtrace_mmap_read(rec, mm) != 0) {
 435			rc = -1;
 436			goto out;
 437		}
 438	}
 439
 440	/*
 441	 * Mark the round finished in case we wrote
 442	 * at least one event.
 443	 */
 444	if (bytes_written != rec->bytes_written)
 445		rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 446
 447out:
 448	return rc;
 449}
 450
 451static void record__init_features(struct record *rec)
 452{
 453	struct perf_session *session = rec->session;
 454	int feat;
 455
 456	for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
 457		perf_header__set_feat(&session->header, feat);
 458
 459	if (rec->no_buildid)
 460		perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
 461
 462	if (!have_tracepoints(&rec->evlist->entries))
 463		perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
 464
 465	if (!rec->opts.branch_stack)
 466		perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
 467
 468	if (!rec->opts.full_auxtrace)
 469		perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
 470
 471	perf_header__clear_feat(&session->header, HEADER_STAT);
 472}
 473
 474static void
 475record__finish_output(struct record *rec)
 476{
 477	struct perf_data_file *file = &rec->file;
 478	int fd = perf_data_file__fd(file);
 479
 480	if (file->is_pipe)
 481		return;
 482
 483	rec->session->header.data_size += rec->bytes_written;
 484	file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
 485
 486	if (!rec->no_buildid) {
 487		process_buildids(rec);
 488
 489		if (rec->buildid_all)
 490			dsos__hit_all(rec->session);
 491	}
 492	perf_session__write_header(rec->session, rec->evlist, fd, true);
 493
 494	return;
 495}
 496
 497static volatile int workload_exec_errno;
 498
 499/*
 500 * perf_evlist__prepare_workload will send a SIGUSR1
 501 * if the fork fails, since we asked by setting its
 502 * want_signal to true.
 503 */
 504static void workload_exec_failed_signal(int signo __maybe_unused,
 505					siginfo_t *info,
 506					void *ucontext __maybe_unused)
 507{
 508	workload_exec_errno = info->si_value.sival_int;
 509	done = 1;
 
 510	child_finished = 1;
 511}
 512
 513static void snapshot_sig_handler(int sig);
 514
 515static int record__synthesize(struct record *rec)
 516{
 517	struct perf_session *session = rec->session;
 518	struct machine *machine = &session->machines.host;
 519	struct perf_data_file *file = &rec->file;
 520	struct record_opts *opts = &rec->opts;
 521	struct perf_tool *tool = &rec->tool;
 522	int fd = perf_data_file__fd(file);
 523	int err = 0;
 524
 525	if (file->is_pipe) {
 526		err = perf_event__synthesize_attrs(tool, session,
 527						   process_synthesized_event);
 528		if (err < 0) {
 529			pr_err("Couldn't synthesize attrs.\n");
 530			goto out;
 531		}
 532
 533		if (have_tracepoints(&rec->evlist->entries)) {
 534			/*
 535			 * FIXME err <= 0 here actually means that
 536			 * there were no tracepoints so its not really
 537			 * an error, just that we don't need to
 538			 * synthesize anything.  We really have to
 539			 * return this more properly and also
 540			 * propagate errors that now are calling die()
 541			 */
 542			err = perf_event__synthesize_tracing_data(tool,	fd, rec->evlist,
 543								  process_synthesized_event);
 544			if (err <= 0) {
 545				pr_err("Couldn't record tracing data.\n");
 546				goto out;
 547			}
 548			rec->bytes_written += err;
 549		}
 550	}
 551
 552	if (rec->opts.full_auxtrace) {
 553		err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
 554					session, process_synthesized_event);
 555		if (err)
 556			goto out;
 557	}
 558
 559	err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
 560						 machine);
 561	WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
 562			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 563			   "Check /proc/kallsyms permission or run as root.\n");
 564
 565	err = perf_event__synthesize_modules(tool, process_synthesized_event,
 566					     machine);
 567	WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
 568			   "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
 569			   "Check /proc/modules permission or run as root.\n");
 570
 571	if (perf_guest) {
 572		machines__process_guests(&session->machines,
 573					 perf_event__synthesize_guest_os, tool);
 574	}
 575
 576	err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
 577					    process_synthesized_event, opts->sample_address,
 578					    opts->proc_map_timeout);
 579out:
 580	return err;
 581}
 582
 583static int __cmd_record(struct record *rec, int argc, const char **argv)
 584{
 585	int err;
 586	int status = 0;
 587	unsigned long waking = 0;
 588	const bool forks = argc > 0;
 589	struct machine *machine;
 590	struct perf_tool *tool = &rec->tool;
 591	struct record_opts *opts = &rec->opts;
 592	struct perf_data_file *file = &rec->file;
 593	struct perf_session *session;
 594	bool disabled = false, draining = false;
 595	int fd;
 596
 597	rec->progname = argv[0];
 598
 599	atexit(record__sig_exit);
 600	signal(SIGCHLD, sig_handler);
 601	signal(SIGINT, sig_handler);
 602	signal(SIGTERM, sig_handler);
 603	if (rec->opts.auxtrace_snapshot_mode)
 604		signal(SIGUSR2, snapshot_sig_handler);
 605	else
 606		signal(SIGUSR2, SIG_IGN);
 607
 608	session = perf_session__new(file, false, tool);
 609	if (session == NULL) {
 610		pr_err("Perf session creation failed.\n");
 611		return -1;
 612	}
 613
 614	fd = perf_data_file__fd(file);
 615	rec->session = session;
 616
 617	record__init_features(rec);
 618
 619	if (forks) {
 620		err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
 621						    argv, file->is_pipe,
 622						    workload_exec_failed_signal);
 623		if (err < 0) {
 624			pr_err("Couldn't run the workload!\n");
 625			status = err;
 626			goto out_delete_session;
 627		}
 628	}
 629
 630	if (record__open(rec) != 0) {
 631		err = -1;
 632		goto out_child;
 633	}
 634
 635	err = bpf__apply_obj_config();
 636	if (err) {
 637		char errbuf[BUFSIZ];
 638
 639		bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
 640		pr_err("ERROR: Apply config to BPF failed: %s\n",
 641			 errbuf);
 642		goto out_child;
 643	}
 644
 645	/*
 646	 * Normally perf_session__new would do this, but it doesn't have the
 647	 * evlist.
 648	 */
 649	if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
 650		pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
 651		rec->tool.ordered_events = false;
 652	}
 653
 654	if (!rec->evlist->nr_groups)
 655		perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 656
 657	if (file->is_pipe) {
 658		err = perf_header__write_pipe(fd);
 659		if (err < 0)
 660			goto out_child;
 661	} else {
 662		err = perf_session__write_header(session, rec->evlist, fd, false);
 
 663		if (err < 0)
 664			goto out_child;
 665	}
 666
 667	if (!rec->no_buildid
 668	    && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
 669		pr_err("Couldn't generate buildids. "
 670		       "Use --no-buildid to profile anyway.\n");
 671		err = -1;
 672		goto out_child;
 673	}
 674
 675	machine = &session->machines.host;
 676
 677	err = record__synthesize(rec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678	if (err < 0)
 679		goto out_child;
 
 
 
 
 
 
 
 
 
 
 
 
 680
 681	if (rec->realtime_prio) {
 682		struct sched_param param;
 683
 684		param.sched_priority = rec->realtime_prio;
 685		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
 686			pr_err("Could not set realtime priority.\n");
 687			err = -1;
 688			goto out_child;
 689		}
 690	}
 691
 692	/*
 693	 * When perf is starting the traced process, all the events
 694	 * (apart from group members) have enable_on_exec=1 set,
 695	 * so don't spoil it by prematurely enabling them.
 696	 */
 697	if (!target__none(&opts->target) && !opts->initial_delay)
 698		perf_evlist__enable(rec->evlist);
 699
 700	/*
 701	 * Let the child rip
 702	 */
 703	if (forks) {
 704		union perf_event *event;
 705
 706		event = malloc(sizeof(event->comm) + machine->id_hdr_size);
 707		if (event == NULL) {
 708			err = -ENOMEM;
 709			goto out_child;
 710		}
 711
 712		/*
 713		 * Some H/W events are generated before COMM event
 714		 * which is emitted during exec(), so perf script
 715		 * cannot see a correct process name for those events.
 716		 * Synthesize COMM event to prevent it.
 717		 */
 718		perf_event__synthesize_comm(tool, event,
 719					    rec->evlist->workload.pid,
 720					    process_synthesized_event,
 721					    machine);
 722		free(event);
 723
 724		perf_evlist__start_workload(rec->evlist);
 725	}
 726
 727	if (opts->initial_delay) {
 728		usleep(opts->initial_delay * 1000);
 729		perf_evlist__enable(rec->evlist);
 730	}
 731
 732	auxtrace_snapshot_enabled = 1;
 733	for (;;) {
 734		unsigned long long hits = rec->samples;
 735
 736		if (record__mmap_read_all(rec) < 0) {
 737			auxtrace_snapshot_enabled = 0;
 738			err = -1;
 739			goto out_child;
 740		}
 741
 742		if (auxtrace_record__snapshot_started) {
 743			auxtrace_record__snapshot_started = 0;
 744			if (!auxtrace_snapshot_err)
 745				record__read_auxtrace_snapshot(rec);
 746			if (auxtrace_snapshot_err) {
 747				pr_err("AUX area tracing snapshot failed\n");
 748				err = -1;
 749				goto out_child;
 750			}
 751		}
 752
 753		if (hits == rec->samples) {
 754			if (done || draining)
 755				break;
 756			err = perf_evlist__poll(rec->evlist, -1);
 757			/*
 758			 * Propagate error, only if there's any. Ignore positive
 759			 * number of returned events and interrupt error.
 760			 */
 761			if (err > 0 || (err < 0 && errno == EINTR))
 762				err = 0;
 763			waking++;
 764
 765			if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
 766				draining = true;
 767		}
 768
 769		/*
 770		 * When perf is starting the traced process, at the end events
 771		 * die with the process and we wait for that. Thus no need to
 772		 * disable events in this case.
 773		 */
 774		if (done && !disabled && !target__none(&opts->target)) {
 775			auxtrace_snapshot_enabled = 0;
 776			perf_evlist__disable(rec->evlist);
 777			disabled = true;
 778		}
 779	}
 780	auxtrace_snapshot_enabled = 0;
 781
 782	if (forks && workload_exec_errno) {
 783		char msg[STRERR_BUFSIZE];
 784		const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
 785		pr_err("Workload failed: %s\n", emsg);
 786		err = -1;
 787		goto out_child;
 788	}
 789
 790	if (!quiet)
 791		fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 792
 793out_child:
 794	if (forks) {
 795		int exit_status;
 796
 797		if (!child_finished)
 798			kill(rec->evlist->workload.pid, SIGTERM);
 
 
 
 
 
 
 799
 800		wait(&exit_status);
 801
 802		if (err < 0)
 803			status = err;
 804		else if (WIFEXITED(exit_status))
 805			status = WEXITSTATUS(exit_status);
 806		else if (WIFSIGNALED(exit_status))
 807			signr = WTERMSIG(exit_status);
 808	} else
 809		status = err;
 810
 811	/* this will be recalculated during process_buildids() */
 812	rec->samples = 0;
 813
 814	if (!err)
 815		record__finish_output(rec);
 816
 817	if (!err && !quiet) {
 818		char samples[128];
 819
 820		if (rec->samples && !rec->opts.full_auxtrace)
 821			scnprintf(samples, sizeof(samples),
 822				  " (%" PRIu64 " samples)", rec->samples);
 823		else
 824			samples[0] = '\0';
 825
 826		fprintf(stderr,	"[ perf record: Captured and wrote %.3f MB %s%s ]\n",
 827			perf_data_file__size(file) / 1024.0 / 1024.0,
 828			file->path, samples);
 829	}
 830
 831out_delete_session:
 832	perf_session__delete(session);
 833	return status;
 834}
 835
 836static void callchain_debug(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 837{
 838	static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839
 840	pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
 
 
 
 
 
 841
 842	if (callchain_param.record_mode == CALLCHAIN_DWARF)
 843		pr_debug("callchain: stack dump size %d\n",
 844			 callchain_param.dump_size);
 845}
 
 
 
 
 
 
 
 
 
 
 846
 847int record_parse_callchain_opt(const struct option *opt,
 848			       const char *arg,
 849			       int unset)
 850{
 851	int ret;
 852	struct record_opts *record = (struct record_opts *)opt->value;
 853
 854	record->callgraph_set = true;
 855	callchain_param.enabled = !unset;
 856
 857	/* --no-call-graph */
 858	if (unset) {
 859		callchain_param.record_mode = CALLCHAIN_NONE;
 860		pr_debug("callchain: disabled\n");
 861		return 0;
 862	}
 
 863
 864	ret = parse_callchain_record_opt(arg, &callchain_param);
 865	if (!ret) {
 866		/* Enable data address sampling for DWARF unwind. */
 867		if (callchain_param.record_mode == CALLCHAIN_DWARF)
 868			record->sample_address = true;
 869		callchain_debug();
 870	}
 871
 
 872	return ret;
 873}
 874
 875int record_callchain_opt(const struct option *opt,
 876			 const char *arg __maybe_unused,
 877			 int unset __maybe_unused)
 878{
 879	struct record_opts *record = (struct record_opts *)opt->value;
 
 
 880
 881	record->callgraph_set = true;
 882	callchain_param.enabled = true;
 883
 884	if (callchain_param.record_mode == CALLCHAIN_NONE)
 885		callchain_param.record_mode = CALLCHAIN_FP;
 
 886
 887	callchain_debug();
 888	return 0;
 889}
 890
 891static int perf_record_config(const char *var, const char *value, void *cb)
 892{
 893	struct record *rec = cb;
 894
 895	if (!strcmp(var, "record.build-id")) {
 896		if (!strcmp(value, "cache"))
 897			rec->no_buildid_cache = false;
 898		else if (!strcmp(value, "no-cache"))
 899			rec->no_buildid_cache = true;
 900		else if (!strcmp(value, "skip"))
 901			rec->no_buildid = true;
 902		else
 903			return -1;
 904		return 0;
 905	}
 906	if (!strcmp(var, "record.call-graph"))
 907		var = "call-graph.record-mode"; /* fall-through */
 908
 909	return perf_default_config(var, value, cb);
 
 
 910}
 
 911
 912struct clockid_map {
 913	const char *name;
 914	int clockid;
 915};
 
 
 
 
 
 
 916
 917#define CLOCKID_MAP(n, c)	\
 918	{ .name = n, .clockid = (c), }
 919
 920#define CLOCKID_END	{ .name = NULL, }
 
 921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922
 923/*
 924 * Add the missing ones, we need to build on many distros...
 925 */
 926#ifndef CLOCK_MONOTONIC_RAW
 927#define CLOCK_MONOTONIC_RAW 4
 928#endif
 929#ifndef CLOCK_BOOTTIME
 930#define CLOCK_BOOTTIME 7
 931#endif
 932#ifndef CLOCK_TAI
 933#define CLOCK_TAI 11
 934#endif
 935
 936static const struct clockid_map clockids[] = {
 937	/* available for all events, NMI safe */
 938	CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
 939	CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
 940
 941	/* available for some events */
 942	CLOCKID_MAP("realtime", CLOCK_REALTIME),
 943	CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
 944	CLOCKID_MAP("tai", CLOCK_TAI),
 945
 946	/* available for the lazy */
 947	CLOCKID_MAP("mono", CLOCK_MONOTONIC),
 948	CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
 949	CLOCKID_MAP("real", CLOCK_REALTIME),
 950	CLOCKID_MAP("boot", CLOCK_BOOTTIME),
 951
 952	CLOCKID_END,
 953};
 
 954
 955static int parse_clockid(const struct option *opt, const char *str, int unset)
 956{
 957	struct record_opts *opts = (struct record_opts *)opt->value;
 958	const struct clockid_map *cm;
 959	const char *ostr = str;
 960
 961	if (unset) {
 962		opts->use_clockid = 0;
 963		return 0;
 964	}
 965
 966	/* no arg passed */
 967	if (!str)
 968		return 0;
 
 969
 970	/* no setting it twice */
 971	if (opts->use_clockid)
 972		return -1;
 
 
 
 973
 974	opts->use_clockid = true;
 975
 976	/* if its a number, we're done */
 977	if (sscanf(str, "%d", &opts->clockid) == 1)
 
 
 978		return 0;
 
 979
 980	/* allow a "CLOCK_" prefix to the name */
 981	if (!strncasecmp(str, "CLOCK_", 6))
 982		str += 6;
 983
 984	for (cm = clockids; cm->name; cm++) {
 985		if (!strcasecmp(str, cm->name)) {
 986			opts->clockid = cm->clockid;
 987			return 0;
 988		}
 989	}
 990
 991	opts->use_clockid = false;
 992	ui__warning("unknown clockid %s, check man page\n", ostr);
 993	return -1;
 994}
 995
 996static int record__parse_mmap_pages(const struct option *opt,
 997				    const char *str,
 998				    int unset __maybe_unused)
 999{
1000	struct record_opts *opts = opt->value;
1001	char *s, *p;
1002	unsigned int mmap_pages;
1003	int ret;
1004
1005	if (!str)
1006		return -EINVAL;
1007
1008	s = strdup(str);
1009	if (!s)
1010		return -ENOMEM;
1011
1012	p = strchr(s, ',');
1013	if (p)
1014		*p = '\0';
1015
1016	if (*s) {
1017		ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1018		if (ret)
1019			goto out_free;
1020		opts->mmap_pages = mmap_pages;
1021	}
1022
1023	if (!p) {
1024		ret = 0;
1025		goto out_free;
1026	}
1027
1028	ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1029	if (ret)
1030		goto out_free;
1031
1032	opts->auxtrace_mmap_pages = mmap_pages;
1033
1034out_free:
1035	free(s);
1036	return ret;
1037}
1038
1039static const char * const __record_usage[] = {
1040	"perf record [<options>] [<command>]",
1041	"perf record [<options>] -- <command> [<options>]",
1042	NULL
1043};
1044const char * const *record_usage = __record_usage;
1045
1046/*
1047 * XXX Ideally would be local to cmd_record() and passed to a record__new
1048 * because we need to have access to it in record__exit, that is called
1049 * after cmd_record() exits, but since record_options need to be accessible to
1050 * builtin-script, leave it here.
1051 *
1052 * At least we don't ouch it in all the other functions here directly.
1053 *
1054 * Just say no to tons of global variables, sigh.
1055 */
1056static struct record record = {
1057	.opts = {
1058		.sample_time	     = true,
1059		.mmap_pages	     = UINT_MAX,
1060		.user_freq	     = UINT_MAX,
1061		.user_interval	     = ULLONG_MAX,
1062		.freq		     = 4000,
1063		.target		     = {
1064			.uses_mmap   = true,
1065			.default_per_cpu = true,
1066		},
1067		.proc_map_timeout     = 500,
1068	},
1069	.tool = {
1070		.sample		= process_sample_event,
1071		.fork		= perf_event__process_fork,
1072		.exit		= perf_event__process_exit,
1073		.comm		= perf_event__process_comm,
1074		.mmap		= perf_event__process_mmap,
1075		.mmap2		= perf_event__process_mmap2,
1076		.ordered_events	= true,
1077	},
1078};
1079
1080const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1081	"\n\t\t\t\tDefault: fp";
 
 
 
 
 
1082
1083/*
1084 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1085 * with it and switch to use the library functions in perf_evlist that came
1086 * from builtin-record.c, i.e. use record_opts,
1087 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1088 * using pipes, etc.
1089 */
1090struct option __record_options[] = {
1091	OPT_CALLBACK('e', "event", &record.evlist, "event",
1092		     "event selector. use 'perf list' to list available events",
1093		     parse_events_option),
1094	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1095		     "event filter", parse_filter),
1096	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1097			   NULL, "don't record events from perf itself",
1098			   exclude_perf),
1099	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1100		    "record events on existing process id"),
1101	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1102		    "record events on existing thread id"),
1103	OPT_INTEGER('r', "realtime", &record.realtime_prio,
1104		    "collect data with this RT SCHED_FIFO priority"),
1105	OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1106		    "collect data without buffering"),
1107	OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1108		    "collect raw sample records from all opened counters"),
1109	OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1110			    "system-wide collection from all CPUs"),
1111	OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1112		    "list of cpus to monitor"),
1113	OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1114	OPT_STRING('o', "output", &record.file.path, "file",
1115		    "output file name"),
1116	OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1117			&record.opts.no_inherit_set,
1118			"child tasks do not inherit counters"),
1119	OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
1120	OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1121		     "number of mmap data pages and AUX area tracing mmap pages",
1122		     record__parse_mmap_pages),
1123	OPT_BOOLEAN(0, "group", &record.opts.group,
1124		    "put the counters into a counter group"),
1125	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1126			   NULL, "enables call-graph recording" ,
1127			   &record_callchain_opt),
1128	OPT_CALLBACK(0, "call-graph", &record.opts,
1129		     "record_mode[,record_size]", record_callchain_help,
1130		     &record_parse_callchain_opt),
1131	OPT_INCR('v', "verbose", &verbose,
1132		    "be more verbose (show counter open errors, etc)"),
1133	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1134	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1135		    "per thread counts"),
1136	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1137	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1138			&record.opts.sample_time_set,
1139			"Record the sample timestamps"),
1140	OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
1141	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1142		    "don't sample"),
1143	OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1144			&record.no_buildid_cache_set,
1145			"do not update the buildid cache"),
1146	OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1147			&record.no_buildid_set,
1148			"do not collect buildids in perf.data"),
1149	OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1150		     "monitor event in cgroup name only",
1151		     parse_cgroups),
1152	OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1153		  "ms to wait before starting measurement after program start"),
1154	OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1155		   "user to profile"),
1156
1157	OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1158		     "branch any", "sample any taken branches",
1159		     parse_branch_stack),
1160
1161	OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1162		     "branch filter mask", "branch stack filter modes",
1163		     parse_branch_stack),
1164	OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1165		    "sample by weight (on special events only)"),
1166	OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1167		    "sample transaction flags (special events only)"),
1168	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1169		    "use per-thread mmaps"),
1170	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1171		    "sample selected machine registers on interrupt,"
1172		    " use -I ? to list register names", parse_regs),
1173	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1174		    "Record running/enabled time of read (:S) events"),
1175	OPT_CALLBACK('k', "clockid", &record.opts,
1176	"clockid", "clockid to use for events, see clock_gettime()",
1177	parse_clockid),
1178	OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1179			  "opts", "AUX area tracing Snapshot Mode", ""),
1180	OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1181			"per thread proc mmap processing timeout in ms"),
1182	OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1183		    "Record context switch events"),
1184	OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1185			 "Configure all used events to run in kernel space.",
1186			 PARSE_OPT_EXCLUSIVE),
1187	OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1188			 "Configure all used events to run in user space.",
1189			 PARSE_OPT_EXCLUSIVE),
1190	OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1191		   "clang binary to use for compiling BPF scriptlets"),
1192	OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1193		   "options passed to clang when compiling BPF scriptlets"),
1194	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1195		   "file", "vmlinux pathname"),
1196	OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1197		    "Record build-id of all DSOs regardless of hits"),
1198	OPT_END()
1199};
1200
1201struct option *record_options = __record_options;
1202
1203int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1204{
1205	int err;
1206	struct record *rec = &record;
1207	char errbuf[BUFSIZ];
1208
1209#ifndef HAVE_LIBBPF_SUPPORT
1210# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1211	set_nobuild('\0', "clang-path", true);
1212	set_nobuild('\0', "clang-opt", true);
1213# undef set_nobuild
1214#endif
1215
1216#ifndef HAVE_BPF_PROLOGUE
1217# if !defined (HAVE_DWARF_SUPPORT)
1218#  define REASON  "NO_DWARF=1"
1219# elif !defined (HAVE_LIBBPF_SUPPORT)
1220#  define REASON  "NO_LIBBPF=1"
1221# else
1222#  define REASON  "this architecture doesn't support BPF prologue"
1223# endif
1224# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1225	set_nobuild('\0', "vmlinux", true);
1226# undef set_nobuild
1227# undef REASON
1228#endif
1229
1230	rec->evlist = perf_evlist__new();
1231	if (rec->evlist == NULL)
1232		return -ENOMEM;
1233
1234	perf_config(perf_record_config, rec);
1235
1236	argc = parse_options(argc, argv, record_options, record_usage,
1237			    PARSE_OPT_STOP_AT_NON_OPTION);
1238	if (!argc && target__none(&rec->opts.target))
1239		usage_with_options(record_usage, record_options);
1240
1241	if (nr_cgroups && !rec->opts.target.system_wide) {
1242		usage_with_options_msg(record_usage, record_options,
1243			"cgroup monitoring only available in system-wide mode");
1244
1245	}
1246	if (rec->opts.record_switch_events &&
1247	    !perf_can_record_switch_events()) {
1248		ui__error("kernel does not support recording context switch events\n");
1249		parse_options_usage(record_usage, record_options, "switch-events", 0);
1250		return -EINVAL;
1251	}
1252
1253	if (!rec->itr) {
1254		rec->itr = auxtrace_record__init(rec->evlist, &err);
1255		if (err)
1256			return err;
1257	}
1258
1259	err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1260					      rec->opts.auxtrace_snapshot_opts);
1261	if (err)
1262		return err;
1263
1264	err = -ENOMEM;
1265
1266	symbol__init(NULL);
1267
1268	if (symbol_conf.kptr_restrict)
1269		pr_warning(
1270"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1271"check /proc/sys/kernel/kptr_restrict.\n\n"
1272"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1273"file is not found in the buildid cache or in the vmlinux path.\n\n"
1274"Samples in kernel modules won't be resolved at all.\n\n"
1275"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1276"even with a suitable vmlinux or kallsyms file.\n\n");
1277
1278	if (rec->no_buildid_cache || rec->no_buildid)
1279		disable_buildid_cache();
1280
1281	if (rec->evlist->nr_entries == 0 &&
1282	    perf_evlist__add_default(rec->evlist) < 0) {
1283		pr_err("Not enough memory for event selector list\n");
1284		goto out_symbol_exit;
1285	}
1286
1287	if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1288		rec->opts.no_inherit = true;
1289
1290	err = target__validate(&rec->opts.target);
1291	if (err) {
1292		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1293		ui__warning("%s", errbuf);
1294	}
1295
1296	err = target__parse_uid(&rec->opts.target);
1297	if (err) {
1298		int saved_errno = errno;
1299
1300		target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1301		ui__error("%s", errbuf);
1302
1303		err = -saved_errno;
1304		goto out_symbol_exit;
1305	}
1306
1307	err = -ENOMEM;
1308	if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1309		usage_with_options(record_usage, record_options);
1310
1311	err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1312	if (err)
1313		goto out_symbol_exit;
1314
1315	/*
1316	 * We take all buildids when the file contains
1317	 * AUX area tracing data because we do not decode the
1318	 * trace because it would take too long.
1319	 */
1320	if (rec->opts.full_auxtrace)
1321		rec->buildid_all = true;
1322
1323	if (record_opts__config(&rec->opts)) {
1324		err = -EINVAL;
1325		goto out_symbol_exit;
1326	}
1327
1328	err = __cmd_record(&record, argc, argv);
1329out_symbol_exit:
1330	perf_evlist__delete(rec->evlist);
1331	symbol__exit();
1332	auxtrace_record__free(rec->itr);
1333	return err;
1334}
1335
1336static void snapshot_sig_handler(int sig __maybe_unused)
1337{
1338	if (!auxtrace_snapshot_enabled)
1339		return;
1340	auxtrace_snapshot_enabled = 0;
1341	auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1342	auxtrace_record__snapshot_started = 1;
1343}