Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
 
  1/*
  2 * builtin-ftrace.c
  3 *
  4 * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
  5 *
  6 * Released under the GPL v2.
  7 */
  8
  9#include "builtin.h"
 10#include "perf.h"
 11
 12#include <errno.h>
 13#include <unistd.h>
 14#include <signal.h>
 
 15#include <fcntl.h>
 16#include <poll.h>
 
 
 17
 18#include "debug.h"
 
 19#include <subcmd/parse-options.h>
 20#include <api/fs/tracing_path.h>
 21#include "evlist.h"
 22#include "target.h"
 23#include "cpumap.h"
 24#include "thread_map.h"
 
 25#include "util/config.h"
 26
 
 27
 28#define DEFAULT_TRACER  "function_graph"
 29
 30struct perf_ftrace {
 31	struct perf_evlist	*evlist;
 32	struct target		target;
 33	const char		*tracer;
 
 34	struct list_head	filters;
 35	struct list_head	notrace;
 36	struct list_head	graph_funcs;
 37	struct list_head	nograph_funcs;
 38	int			graph_depth;
 
 
 
 
 
 
 
 
 
 39};
 40
 41struct filter_entry {
 42	struct list_head	list;
 43	char			name[];
 44};
 45
 
 46static bool done;
 47
 48static void sig_handler(int sig __maybe_unused)
 49{
 50	done = true;
 51}
 52
 53/*
 54 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
 55 * we asked by setting its exec_error to the function below,
 56 * ftrace__workload_exec_failed_signal.
 57 *
 58 * XXX We need to handle this more appropriately, emitting an error, etc.
 59 */
 60static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
 61						siginfo_t *info __maybe_unused,
 62						void *ucontext __maybe_unused)
 63{
 64	/* workload_exec_errno = info->si_value.sival_int; */
 65	done = true;
 66}
 67
 68static int __write_tracing_file(const char *name, const char *val, bool append)
 69{
 70	char *file;
 71	int fd, ret = -1;
 72	ssize_t size = strlen(val);
 73	int flags = O_WRONLY;
 74	char errbuf[512];
 75	char *val_copy;
 76
 77	file = get_tracing_file(name);
 78	if (!file) {
 79		pr_debug("cannot get tracing file: %s\n", name);
 80		return -1;
 81	}
 82
 83	if (append)
 84		flags |= O_APPEND;
 85	else
 86		flags |= O_TRUNC;
 87
 88	fd = open(file, flags);
 89	if (fd < 0) {
 90		pr_debug("cannot open tracing file: %s: %s\n",
 91			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
 92		goto out;
 93	}
 94
 95	/*
 96	 * Copy the original value and append a '\n'. Without this,
 97	 * the kernel can hide possible errors.
 98	 */
 99	val_copy = strdup(val);
100	if (!val_copy)
101		goto out_close;
102	val_copy[size] = '\n';
103
104	if (write(fd, val_copy, size + 1) == size + 1)
105		ret = 0;
106	else
107		pr_debug("write '%s' to tracing/%s failed: %s\n",
108			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
109
110	free(val_copy);
111out_close:
112	close(fd);
113out:
114	put_tracing_file(file);
115	return ret;
116}
117
118static int write_tracing_file(const char *name, const char *val)
119{
120	return __write_tracing_file(name, val, false);
121}
122
123static int append_tracing_file(const char *name, const char *val)
124{
125	return __write_tracing_file(name, val, true);
126}
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128static int reset_tracing_cpu(void);
129static void reset_tracing_filters(void);
130
 
 
 
 
 
 
 
 
 
 
 
 
131static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
132{
133	if (write_tracing_file("tracing_on", "0") < 0)
134		return -1;
135
136	if (write_tracing_file("current_tracer", "nop") < 0)
137		return -1;
138
139	if (write_tracing_file("set_ftrace_pid", " ") < 0)
140		return -1;
141
142	if (reset_tracing_cpu() < 0)
143		return -1;
144
145	if (write_tracing_file("max_graph_depth", "0") < 0)
146		return -1;
147
 
 
 
148	reset_tracing_filters();
 
149	return 0;
150}
151
152static int set_tracing_pid(struct perf_ftrace *ftrace)
153{
154	int i;
155	char buf[16];
156
157	if (target__has_cpu(&ftrace->target))
158		return 0;
159
160	for (i = 0; i < thread_map__nr(ftrace->evlist->threads); i++) {
161		scnprintf(buf, sizeof(buf), "%d",
162			  ftrace->evlist->threads->map[i]);
163		if (append_tracing_file("set_ftrace_pid", buf) < 0)
164			return -1;
165	}
166	return 0;
167}
168
169static int set_tracing_cpumask(struct cpu_map *cpumap)
170{
171	char *cpumask;
172	size_t mask_size;
173	int ret;
174	int last_cpu;
175
176	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
177	mask_size = (last_cpu + 3) / 4 + 1;
178	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
179
180	cpumask = malloc(mask_size);
181	if (cpumask == NULL) {
182		pr_debug("failed to allocate cpu mask\n");
183		return -1;
184	}
185
186	cpu_map__snprint_mask(cpumap, cpumask, mask_size);
187
188	ret = write_tracing_file("tracing_cpumask", cpumask);
189
190	free(cpumask);
191	return ret;
192}
193
194static int set_tracing_cpu(struct perf_ftrace *ftrace)
195{
196	struct cpu_map *cpumap = ftrace->evlist->cpus;
197
198	if (!target__has_cpu(&ftrace->target))
199		return 0;
200
201	return set_tracing_cpumask(cpumap);
202}
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204static int reset_tracing_cpu(void)
205{
206	struct cpu_map *cpumap = cpu_map__new(NULL);
207	int ret;
208
209	ret = set_tracing_cpumask(cpumap);
210	cpu_map__put(cpumap);
211	return ret;
212}
213
214static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
215{
216	struct filter_entry *pos;
217
218	list_for_each_entry(pos, funcs, list) {
219		if (append_tracing_file(filter_file, pos->name) < 0)
220			return -1;
221	}
222
223	return 0;
224}
225
226static int set_tracing_filters(struct perf_ftrace *ftrace)
227{
228	int ret;
229
230	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
231	if (ret < 0)
232		return ret;
233
234	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
235	if (ret < 0)
236		return ret;
237
238	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
239	if (ret < 0)
240		return ret;
241
242	/* old kernels do not have this filter */
243	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
244
245	return ret;
246}
247
248static void reset_tracing_filters(void)
249{
250	write_tracing_file("set_ftrace_filter", " ");
251	write_tracing_file("set_ftrace_notrace", " ");
252	write_tracing_file("set_graph_function", " ");
253	write_tracing_file("set_graph_notrace", " ");
254}
255
256static int set_tracing_depth(struct perf_ftrace *ftrace)
257{
258	char buf[16];
259
260	if (ftrace->graph_depth == 0)
261		return 0;
262
263	if (ftrace->graph_depth < 0) {
264		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
265		return -1;
266	}
267
268	snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270	if (write_tracing_file("max_graph_depth", buf) < 0)
 
271		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
273	return 0;
274}
275
276static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
277{
278	char *trace_file;
279	int trace_fd;
280	char buf[4096];
281	struct pollfd pollfd = {
282		.events = POLLIN,
283	};
284
285	if (geteuid() != 0) {
286		pr_err("ftrace only works for root!\n");
 
 
 
 
 
 
 
287		return -1;
288	}
289
290	signal(SIGINT, sig_handler);
291	signal(SIGUSR1, sig_handler);
292	signal(SIGCHLD, sig_handler);
293	signal(SIGPIPE, sig_handler);
294
 
 
 
295	if (reset_tracing_files(ftrace) < 0) {
296		pr_err("failed to reset ftrace\n");
297		goto out;
298	}
299
300	/* reset ftrace buffer */
301	if (write_tracing_file("trace", "0") < 0)
302		goto out;
303
304	if (argc && perf_evlist__prepare_workload(ftrace->evlist,
305				&ftrace->target, argv, false,
306				ftrace__workload_exec_failed_signal) < 0) {
307		goto out;
308	}
309
310	if (set_tracing_pid(ftrace) < 0) {
311		pr_err("failed to set ftrace pid\n");
312		goto out_reset;
313	}
314
315	if (set_tracing_cpu(ftrace) < 0) {
316		pr_err("failed to set tracing cpumask\n");
317		goto out_reset;
318	}
319
320	if (set_tracing_filters(ftrace) < 0) {
321		pr_err("failed to set tracing filters\n");
322		goto out_reset;
323	}
324
325	if (set_tracing_depth(ftrace) < 0) {
326		pr_err("failed to set graph depth\n");
327		goto out_reset;
328	}
329
330	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
331		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
332		goto out_reset;
333	}
334
335	setup_pager();
336
337	trace_file = get_tracing_file("trace_pipe");
338	if (!trace_file) {
339		pr_err("failed to open trace_pipe\n");
340		goto out_reset;
341	}
342
343	trace_fd = open(trace_file, O_RDONLY);
344
345	put_tracing_file(trace_file);
346
347	if (trace_fd < 0) {
348		pr_err("failed to open trace_pipe\n");
349		goto out_reset;
350	}
351
352	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
353	pollfd.fd = trace_fd;
354
355	if (write_tracing_file("tracing_on", "1") < 0) {
356		pr_err("can't enable tracing\n");
357		goto out_close_fd;
 
 
 
 
 
358	}
359
360	perf_evlist__start_workload(ftrace->evlist);
361
 
 
 
 
 
 
 
 
362	while (!done) {
363		if (poll(&pollfd, 1, -1) < 0)
364			break;
365
366		if (pollfd.revents & POLLIN) {
367			int n = read(trace_fd, buf, sizeof(buf));
368			if (n < 0)
369				break;
370			if (fwrite(buf, n, 1, stdout) != 1)
371				break;
372		}
373	}
374
375	write_tracing_file("tracing_on", "0");
376
 
 
 
 
 
 
 
 
377	/* read remaining buffer contents */
378	while (true) {
379		int n = read(trace_fd, buf, sizeof(buf));
380		if (n <= 0)
381			break;
382		if (fwrite(buf, n, 1, stdout) != 1)
383			break;
384	}
385
386out_close_fd:
387	close(trace_fd);
388out_reset:
389	reset_tracing_files(ftrace);
390out:
391	return done ? 0 : -1;
392}
393
394static int perf_ftrace_config(const char *var, const char *value, void *cb)
395{
396	struct perf_ftrace *ftrace = cb;
397
398	if (!strstarts(var, "ftrace."))
399		return 0;
400
401	if (strcmp(var, "ftrace.tracer"))
402		return -1;
403
404	if (!strcmp(value, "function_graph") ||
405	    !strcmp(value, "function")) {
406		ftrace->tracer = value;
407		return 0;
408	}
409
410	pr_err("Please select \"function_graph\" (default) or \"function\"\n");
411	return -1;
412}
413
414static int parse_filter_func(const struct option *opt, const char *str,
415			     int unset __maybe_unused)
416{
417	struct list_head *head = opt->value;
418	struct filter_entry *entry;
419
420	entry = malloc(sizeof(*entry) + strlen(str) + 1);
421	if (entry == NULL)
422		return -ENOMEM;
423
424	strcpy(entry->name, str);
425	list_add_tail(&entry->list, head);
426
427	return 0;
428}
429
430static void delete_filter_func(struct list_head *head)
431{
432	struct filter_entry *pos, *tmp;
433
434	list_for_each_entry_safe(pos, tmp, head, list) {
435		list_del(&pos->list);
436		free(pos);
437	}
438}
439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440int cmd_ftrace(int argc, const char **argv)
441{
442	int ret;
443	struct perf_ftrace ftrace = {
444		.tracer = DEFAULT_TRACER,
445		.target = { .uid = UINT_MAX, },
446	};
447	const char * const ftrace_usage[] = {
448		"perf ftrace [<options>] [<command>]",
449		"perf ftrace [<options>] -- <command> [<options>]",
450		NULL
451	};
452	const struct option ftrace_options[] = {
453	OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
454		   "tracer to use: function_graph(default) or function"),
 
 
455	OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
456		   "trace on existing process id"),
 
 
 
457	OPT_INCR('v', "verbose", &verbose,
458		 "be more verbose"),
459	OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
460		    "system-wide collection from all CPUs"),
461	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
462		    "list of cpus to monitor"),
463	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
464		     "trace given functions only", parse_filter_func),
 
465	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
466		     "do not trace given functions", parse_filter_func),
 
 
 
467	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
468		     "Set graph filter on given functions", parse_filter_func),
 
469	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
470		     "Set nograph filter on given functions", parse_filter_func),
471	OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
472		    "Max depth for function graph tracer"),
 
 
 
 
 
 
 
473	OPT_END()
474	};
475
476	INIT_LIST_HEAD(&ftrace.filters);
477	INIT_LIST_HEAD(&ftrace.notrace);
478	INIT_LIST_HEAD(&ftrace.graph_funcs);
479	INIT_LIST_HEAD(&ftrace.nograph_funcs);
480
481	ret = perf_config(perf_ftrace_config, &ftrace);
482	if (ret < 0)
483		return -1;
484
485	argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
486			    PARSE_OPT_STOP_AT_NON_OPTION);
487	if (!argc && target__none(&ftrace.target))
488		usage_with_options(ftrace_usage, ftrace_options);
 
 
489
490	ret = target__validate(&ftrace.target);
491	if (ret) {
492		char errbuf[512];
493
494		target__strerror(&ftrace.target, ret, errbuf, 512);
495		pr_err("%s\n", errbuf);
496		goto out_delete_filters;
497	}
498
499	ftrace.evlist = perf_evlist__new();
500	if (ftrace.evlist == NULL) {
501		ret = -ENOMEM;
502		goto out_delete_filters;
503	}
504
505	ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
506	if (ret < 0)
507		goto out_delete_evlist;
508
509	ret = __cmd_ftrace(&ftrace, argc, argv);
510
511out_delete_evlist:
512	perf_evlist__delete(ftrace.evlist);
513
514out_delete_filters:
515	delete_filter_func(&ftrace.filters);
516	delete_filter_func(&ftrace.notrace);
517	delete_filter_func(&ftrace.graph_funcs);
518	delete_filter_func(&ftrace.nograph_funcs);
519
520	return ret;
521}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * builtin-ftrace.c
  4 *
  5 * Copyright (c) 2013  LG Electronics,  Namhyung Kim <namhyung@kernel.org>
  6 * Copyright (c) 2020  Changbin Du <changbin.du@gmail.com>, significant enhancement.
 
  7 */
  8
  9#include "builtin.h"
 
 10
 11#include <errno.h>
 12#include <unistd.h>
 13#include <signal.h>
 14#include <stdlib.h>
 15#include <fcntl.h>
 16#include <poll.h>
 17#include <linux/capability.h>
 18#include <linux/string.h>
 19
 20#include "debug.h"
 21#include <subcmd/pager.h>
 22#include <subcmd/parse-options.h>
 23#include <api/fs/tracing_path.h>
 24#include "evlist.h"
 25#include "target.h"
 26#include "cpumap.h"
 27#include "thread_map.h"
 28#include "util/cap.h"
 29#include "util/config.h"
 30#include "util/units.h"
 31#include "util/parse-sublevel-options.h"
 32
 33#define DEFAULT_TRACER  "function_graph"
 34
 35struct perf_ftrace {
 36	struct evlist		*evlist;
 37	struct target		target;
 38	const char		*tracer;
 39	bool			list_avail_functions;
 40	struct list_head	filters;
 41	struct list_head	notrace;
 42	struct list_head	graph_funcs;
 43	struct list_head	nograph_funcs;
 44	int			graph_depth;
 45	unsigned long		percpu_buffer_size;
 46	bool			inherit;
 47	int			func_stack_trace;
 48	int			func_irq_info;
 49	int			graph_nosleep_time;
 50	int			graph_noirqs;
 51	int			graph_verbose;
 52	int			graph_thresh;
 53	unsigned int		initial_delay;
 54};
 55
 56struct filter_entry {
 57	struct list_head	list;
 58	char			name[];
 59};
 60
 61static volatile int workload_exec_errno;
 62static bool done;
 63
 64static void sig_handler(int sig __maybe_unused)
 65{
 66	done = true;
 67}
 68
 69/*
 70 * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
 71 * we asked by setting its exec_error to the function below,
 72 * ftrace__workload_exec_failed_signal.
 73 *
 74 * XXX We need to handle this more appropriately, emitting an error, etc.
 75 */
 76static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
 77						siginfo_t *info __maybe_unused,
 78						void *ucontext __maybe_unused)
 79{
 80	workload_exec_errno = info->si_value.sival_int;
 81	done = true;
 82}
 83
 84static int __write_tracing_file(const char *name, const char *val, bool append)
 85{
 86	char *file;
 87	int fd, ret = -1;
 88	ssize_t size = strlen(val);
 89	int flags = O_WRONLY;
 90	char errbuf[512];
 91	char *val_copy;
 92
 93	file = get_tracing_file(name);
 94	if (!file) {
 95		pr_debug("cannot get tracing file: %s\n", name);
 96		return -1;
 97	}
 98
 99	if (append)
100		flags |= O_APPEND;
101	else
102		flags |= O_TRUNC;
103
104	fd = open(file, flags);
105	if (fd < 0) {
106		pr_debug("cannot open tracing file: %s: %s\n",
107			 name, str_error_r(errno, errbuf, sizeof(errbuf)));
108		goto out;
109	}
110
111	/*
112	 * Copy the original value and append a '\n'. Without this,
113	 * the kernel can hide possible errors.
114	 */
115	val_copy = strdup(val);
116	if (!val_copy)
117		goto out_close;
118	val_copy[size] = '\n';
119
120	if (write(fd, val_copy, size + 1) == size + 1)
121		ret = 0;
122	else
123		pr_debug("write '%s' to tracing/%s failed: %s\n",
124			 val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
125
126	free(val_copy);
127out_close:
128	close(fd);
129out:
130	put_tracing_file(file);
131	return ret;
132}
133
134static int write_tracing_file(const char *name, const char *val)
135{
136	return __write_tracing_file(name, val, false);
137}
138
139static int append_tracing_file(const char *name, const char *val)
140{
141	return __write_tracing_file(name, val, true);
142}
143
144static int read_tracing_file_to_stdout(const char *name)
145{
146	char buf[4096];
147	char *file;
148	int fd;
149	int ret = -1;
150
151	file = get_tracing_file(name);
152	if (!file) {
153		pr_debug("cannot get tracing file: %s\n", name);
154		return -1;
155	}
156
157	fd = open(file, O_RDONLY);
158	if (fd < 0) {
159		pr_debug("cannot open tracing file: %s: %s\n",
160			 name, str_error_r(errno, buf, sizeof(buf)));
161		goto out;
162	}
163
164	/* read contents to stdout */
165	while (true) {
166		int n = read(fd, buf, sizeof(buf));
167		if (n == 0)
168			break;
169		else if (n < 0)
170			goto out_close;
171
172		if (fwrite(buf, n, 1, stdout) != 1)
173			goto out_close;
174	}
175	ret = 0;
176
177out_close:
178	close(fd);
179out:
180	put_tracing_file(file);
181	return ret;
182}
183
184static int write_tracing_file_int(const char *name, int value)
185{
186	char buf[16];
187
188	snprintf(buf, sizeof(buf), "%d", value);
189	if (write_tracing_file(name, buf) < 0)
190		return -1;
191
192	return 0;
193}
194
195static int write_tracing_option_file(const char *name, const char *val)
196{
197	char *file;
198	int ret;
199
200	if (asprintf(&file, "options/%s", name) < 0)
201		return -1;
202
203	ret = __write_tracing_file(file, val, false);
204	free(file);
205	return ret;
206}
207
208static int reset_tracing_cpu(void);
209static void reset_tracing_filters(void);
210
211static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
212{
213	write_tracing_option_file("function-fork", "0");
214	write_tracing_option_file("func_stack_trace", "0");
215	write_tracing_option_file("sleep-time", "1");
216	write_tracing_option_file("funcgraph-irqs", "1");
217	write_tracing_option_file("funcgraph-proc", "0");
218	write_tracing_option_file("funcgraph-abstime", "0");
219	write_tracing_option_file("latency-format", "0");
220	write_tracing_option_file("irq-info", "0");
221}
222
223static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
224{
225	if (write_tracing_file("tracing_on", "0") < 0)
226		return -1;
227
228	if (write_tracing_file("current_tracer", "nop") < 0)
229		return -1;
230
231	if (write_tracing_file("set_ftrace_pid", " ") < 0)
232		return -1;
233
234	if (reset_tracing_cpu() < 0)
235		return -1;
236
237	if (write_tracing_file("max_graph_depth", "0") < 0)
238		return -1;
239
240	if (write_tracing_file("tracing_thresh", "0") < 0)
241		return -1;
242
243	reset_tracing_filters();
244	reset_tracing_options(ftrace);
245	return 0;
246}
247
248static int set_tracing_pid(struct perf_ftrace *ftrace)
249{
250	int i;
251	char buf[16];
252
253	if (target__has_cpu(&ftrace->target))
254		return 0;
255
256	for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
257		scnprintf(buf, sizeof(buf), "%d",
258			  ftrace->evlist->core.threads->map[i]);
259		if (append_tracing_file("set_ftrace_pid", buf) < 0)
260			return -1;
261	}
262	return 0;
263}
264
265static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
266{
267	char *cpumask;
268	size_t mask_size;
269	int ret;
270	int last_cpu;
271
272	last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
273	mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
274	mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
275
276	cpumask = malloc(mask_size);
277	if (cpumask == NULL) {
278		pr_debug("failed to allocate cpu mask\n");
279		return -1;
280	}
281
282	cpu_map__snprint_mask(cpumap, cpumask, mask_size);
283
284	ret = write_tracing_file("tracing_cpumask", cpumask);
285
286	free(cpumask);
287	return ret;
288}
289
290static int set_tracing_cpu(struct perf_ftrace *ftrace)
291{
292	struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
293
294	if (!target__has_cpu(&ftrace->target))
295		return 0;
296
297	return set_tracing_cpumask(cpumap);
298}
299
300static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
301{
302	if (!ftrace->func_stack_trace)
303		return 0;
304
305	if (write_tracing_option_file("func_stack_trace", "1") < 0)
306		return -1;
307
308	return 0;
309}
310
311static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
312{
313	if (!ftrace->func_irq_info)
314		return 0;
315
316	if (write_tracing_option_file("irq-info", "1") < 0)
317		return -1;
318
319	return 0;
320}
321
322static int reset_tracing_cpu(void)
323{
324	struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
325	int ret;
326
327	ret = set_tracing_cpumask(cpumap);
328	perf_cpu_map__put(cpumap);
329	return ret;
330}
331
332static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
333{
334	struct filter_entry *pos;
335
336	list_for_each_entry(pos, funcs, list) {
337		if (append_tracing_file(filter_file, pos->name) < 0)
338			return -1;
339	}
340
341	return 0;
342}
343
344static int set_tracing_filters(struct perf_ftrace *ftrace)
345{
346	int ret;
347
348	ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
349	if (ret < 0)
350		return ret;
351
352	ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
353	if (ret < 0)
354		return ret;
355
356	ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
357	if (ret < 0)
358		return ret;
359
360	/* old kernels do not have this filter */
361	__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
362
363	return ret;
364}
365
366static void reset_tracing_filters(void)
367{
368	write_tracing_file("set_ftrace_filter", " ");
369	write_tracing_file("set_ftrace_notrace", " ");
370	write_tracing_file("set_graph_function", " ");
371	write_tracing_file("set_graph_notrace", " ");
372}
373
374static int set_tracing_depth(struct perf_ftrace *ftrace)
375{
 
 
376	if (ftrace->graph_depth == 0)
377		return 0;
378
379	if (ftrace->graph_depth < 0) {
380		pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
381		return -1;
382	}
383
384	if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
385		return -1;
386
387	return 0;
388}
389
390static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
391{
392	int ret;
393
394	if (ftrace->percpu_buffer_size == 0)
395		return 0;
396
397	ret = write_tracing_file_int("buffer_size_kb",
398				     ftrace->percpu_buffer_size / 1024);
399	if (ret < 0)
400		return ret;
401
402	return 0;
403}
404
405static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
406{
407	if (!ftrace->inherit)
408		return 0;
409
410	if (write_tracing_option_file("function-fork", "1") < 0)
411		return -1;
412
413	return 0;
414}
415
416static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
417{
418	if (!ftrace->graph_nosleep_time)
419		return 0;
420
421	if (write_tracing_option_file("sleep-time", "0") < 0)
422		return -1;
423
424	return 0;
425}
426
427static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
428{
429	if (!ftrace->graph_noirqs)
430		return 0;
431
432	if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
433		return -1;
434
435	return 0;
436}
437
438static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
439{
440	if (!ftrace->graph_verbose)
441		return 0;
442
443	if (write_tracing_option_file("funcgraph-proc", "1") < 0)
444		return -1;
445
446	if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
447		return -1;
448
449	if (write_tracing_option_file("latency-format", "1") < 0)
450		return -1;
451
452	return 0;
453}
454
455static int set_tracing_thresh(struct perf_ftrace *ftrace)
456{
457	int ret;
458
459	if (ftrace->graph_thresh == 0)
460		return 0;
461
462	ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
463	if (ret < 0)
464		return ret;
465
466	return 0;
467}
468
469static int set_tracing_options(struct perf_ftrace *ftrace)
470{
471	if (set_tracing_pid(ftrace) < 0) {
472		pr_err("failed to set ftrace pid\n");
473		return -1;
474	}
475
476	if (set_tracing_cpu(ftrace) < 0) {
477		pr_err("failed to set tracing cpumask\n");
478		return -1;
479	}
480
481	if (set_tracing_func_stack_trace(ftrace) < 0) {
482		pr_err("failed to set tracing option func_stack_trace\n");
483		return -1;
484	}
485
486	if (set_tracing_func_irqinfo(ftrace) < 0) {
487		pr_err("failed to set tracing option irq-info\n");
488		return -1;
489	}
490
491	if (set_tracing_filters(ftrace) < 0) {
492		pr_err("failed to set tracing filters\n");
493		return -1;
494	}
495
496	if (set_tracing_depth(ftrace) < 0) {
497		pr_err("failed to set graph depth\n");
498		return -1;
499	}
500
501	if (set_tracing_percpu_buffer_size(ftrace) < 0) {
502		pr_err("failed to set tracing per-cpu buffer size\n");
503		return -1;
504	}
505
506	if (set_tracing_trace_inherit(ftrace) < 0) {
507		pr_err("failed to set tracing option function-fork\n");
508		return -1;
509	}
510
511	if (set_tracing_sleep_time(ftrace) < 0) {
512		pr_err("failed to set tracing option sleep-time\n");
513		return -1;
514	}
515
516	if (set_tracing_funcgraph_irqs(ftrace) < 0) {
517		pr_err("failed to set tracing option funcgraph-irqs\n");
518		return -1;
519	}
520
521	if (set_tracing_funcgraph_verbose(ftrace) < 0) {
522		pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
523		return -1;
524	}
525
526	if (set_tracing_thresh(ftrace) < 0) {
527		pr_err("failed to set tracing thresh\n");
528		return -1;
529	}
530
531	return 0;
532}
533
534static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
535{
536	char *trace_file;
537	int trace_fd;
538	char buf[4096];
539	struct pollfd pollfd = {
540		.events = POLLIN,
541	};
542
543	if (!(perf_cap__capable(CAP_PERFMON) ||
544	      perf_cap__capable(CAP_SYS_ADMIN))) {
545		pr_err("ftrace only works for %s!\n",
546#ifdef HAVE_LIBCAP_SUPPORT
547		"users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
548#else
549		"root"
550#endif
551		);
552		return -1;
553	}
554
555	signal(SIGINT, sig_handler);
556	signal(SIGUSR1, sig_handler);
557	signal(SIGCHLD, sig_handler);
558	signal(SIGPIPE, sig_handler);
559
560	if (ftrace->list_avail_functions)
561		return read_tracing_file_to_stdout("available_filter_functions");
562
563	if (reset_tracing_files(ftrace) < 0) {
564		pr_err("failed to reset ftrace\n");
565		goto out;
566	}
567
568	/* reset ftrace buffer */
569	if (write_tracing_file("trace", "0") < 0)
570		goto out;
571
572	if (argc && perf_evlist__prepare_workload(ftrace->evlist,
573				&ftrace->target, argv, false,
574				ftrace__workload_exec_failed_signal) < 0) {
575		goto out;
576	}
577
578	if (set_tracing_options(ftrace) < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
579		goto out_reset;
 
580
581	if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
582		pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
583		goto out_reset;
584	}
585
586	setup_pager();
587
588	trace_file = get_tracing_file("trace_pipe");
589	if (!trace_file) {
590		pr_err("failed to open trace_pipe\n");
591		goto out_reset;
592	}
593
594	trace_fd = open(trace_file, O_RDONLY);
595
596	put_tracing_file(trace_file);
597
598	if (trace_fd < 0) {
599		pr_err("failed to open trace_pipe\n");
600		goto out_reset;
601	}
602
603	fcntl(trace_fd, F_SETFL, O_NONBLOCK);
604	pollfd.fd = trace_fd;
605
606	/* display column headers */
607	read_tracing_file_to_stdout("trace");
608
609	if (!ftrace->initial_delay) {
610		if (write_tracing_file("tracing_on", "1") < 0) {
611			pr_err("can't enable tracing\n");
612			goto out_close_fd;
613		}
614	}
615
616	perf_evlist__start_workload(ftrace->evlist);
617
618	if (ftrace->initial_delay) {
619		usleep(ftrace->initial_delay * 1000);
620		if (write_tracing_file("tracing_on", "1") < 0) {
621			pr_err("can't enable tracing\n");
622			goto out_close_fd;
623		}
624	}
625
626	while (!done) {
627		if (poll(&pollfd, 1, -1) < 0)
628			break;
629
630		if (pollfd.revents & POLLIN) {
631			int n = read(trace_fd, buf, sizeof(buf));
632			if (n < 0)
633				break;
634			if (fwrite(buf, n, 1, stdout) != 1)
635				break;
636		}
637	}
638
639	write_tracing_file("tracing_on", "0");
640
641	if (workload_exec_errno) {
642		const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
643		/* flush stdout first so below error msg appears at the end. */
644		fflush(stdout);
645		pr_err("workload failed: %s\n", emsg);
646		goto out_close_fd;
647	}
648
649	/* read remaining buffer contents */
650	while (true) {
651		int n = read(trace_fd, buf, sizeof(buf));
652		if (n <= 0)
653			break;
654		if (fwrite(buf, n, 1, stdout) != 1)
655			break;
656	}
657
658out_close_fd:
659	close(trace_fd);
660out_reset:
661	reset_tracing_files(ftrace);
662out:
663	return (done && !workload_exec_errno) ? 0 : -1;
664}
665
666static int perf_ftrace_config(const char *var, const char *value, void *cb)
667{
668	struct perf_ftrace *ftrace = cb;
669
670	if (!strstarts(var, "ftrace."))
671		return 0;
672
673	if (strcmp(var, "ftrace.tracer"))
674		return -1;
675
676	if (!strcmp(value, "function_graph") ||
677	    !strcmp(value, "function")) {
678		ftrace->tracer = value;
679		return 0;
680	}
681
682	pr_err("Please select \"function_graph\" (default) or \"function\"\n");
683	return -1;
684}
685
686static int parse_filter_func(const struct option *opt, const char *str,
687			     int unset __maybe_unused)
688{
689	struct list_head *head = opt->value;
690	struct filter_entry *entry;
691
692	entry = malloc(sizeof(*entry) + strlen(str) + 1);
693	if (entry == NULL)
694		return -ENOMEM;
695
696	strcpy(entry->name, str);
697	list_add_tail(&entry->list, head);
698
699	return 0;
700}
701
702static void delete_filter_func(struct list_head *head)
703{
704	struct filter_entry *pos, *tmp;
705
706	list_for_each_entry_safe(pos, tmp, head, list) {
707		list_del_init(&pos->list);
708		free(pos);
709	}
710}
711
712static int parse_buffer_size(const struct option *opt,
713			     const char *str, int unset)
714{
715	unsigned long *s = (unsigned long *)opt->value;
716	static struct parse_tag tags_size[] = {
717		{ .tag  = 'B', .mult = 1       },
718		{ .tag  = 'K', .mult = 1 << 10 },
719		{ .tag  = 'M', .mult = 1 << 20 },
720		{ .tag  = 'G', .mult = 1 << 30 },
721		{ .tag  = 0 },
722	};
723	unsigned long val;
724
725	if (unset) {
726		*s = 0;
727		return 0;
728	}
729
730	val = parse_tag_value(str, tags_size);
731	if (val != (unsigned long) -1) {
732		if (val < 1024) {
733			pr_err("buffer size too small, must larger than 1KB.");
734			return -1;
735		}
736		*s = val;
737		return 0;
738	}
739
740	return -1;
741}
742
743static int parse_func_tracer_opts(const struct option *opt,
744				  const char *str, int unset)
745{
746	int ret;
747	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
748	struct sublevel_option func_tracer_opts[] = {
749		{ .name = "call-graph",	.value_ptr = &ftrace->func_stack_trace },
750		{ .name = "irq-info",	.value_ptr = &ftrace->func_irq_info },
751		{ .name = NULL, }
752	};
753
754	if (unset)
755		return 0;
756
757	ret = perf_parse_sublevel_options(str, func_tracer_opts);
758	if (ret)
759		return ret;
760
761	return 0;
762}
763
764static int parse_graph_tracer_opts(const struct option *opt,
765				  const char *str, int unset)
766{
767	int ret;
768	struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
769	struct sublevel_option graph_tracer_opts[] = {
770		{ .name = "nosleep-time",	.value_ptr = &ftrace->graph_nosleep_time },
771		{ .name = "noirqs",		.value_ptr = &ftrace->graph_noirqs },
772		{ .name = "verbose",		.value_ptr = &ftrace->graph_verbose },
773		{ .name = "thresh",		.value_ptr = &ftrace->graph_thresh },
774		{ .name = "depth",		.value_ptr = &ftrace->graph_depth },
775		{ .name = NULL, }
776	};
777
778	if (unset)
779		return 0;
780
781	ret = perf_parse_sublevel_options(str, graph_tracer_opts);
782	if (ret)
783		return ret;
784
785	return 0;
786}
787
788static void select_tracer(struct perf_ftrace *ftrace)
789{
790	bool graph = !list_empty(&ftrace->graph_funcs) ||
791		     !list_empty(&ftrace->nograph_funcs);
792	bool func = !list_empty(&ftrace->filters) ||
793		    !list_empty(&ftrace->notrace);
794
795	/* The function_graph has priority over function tracer. */
796	if (graph)
797		ftrace->tracer = "function_graph";
798	else if (func)
799		ftrace->tracer = "function";
800	/* Otherwise, the default tracer is used. */
801
802	pr_debug("%s tracer is used\n", ftrace->tracer);
803}
804
805int cmd_ftrace(int argc, const char **argv)
806{
807	int ret;
808	struct perf_ftrace ftrace = {
809		.tracer = DEFAULT_TRACER,
810		.target = { .uid = UINT_MAX, },
811	};
812	const char * const ftrace_usage[] = {
813		"perf ftrace [<options>] [<command>]",
814		"perf ftrace [<options>] -- <command> [<options>]",
815		NULL
816	};
817	const struct option ftrace_options[] = {
818	OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
819		   "Tracer to use: function_graph(default) or function"),
820	OPT_BOOLEAN('F', "funcs", &ftrace.list_avail_functions,
821		    "Show available functions to filter"),
822	OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
823		   "Trace on existing process id"),
824	/* TODO: Add short option -t after -t/--tracer can be removed. */
825	OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
826		   "Trace on existing thread id (exclusive to --pid)"),
827	OPT_INCR('v', "verbose", &verbose,
828		 "Be more verbose"),
829	OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
830		    "System-wide collection from all CPUs"),
831	OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
832		    "List of cpus to monitor"),
833	OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
834		     "Trace given functions using function tracer",
835		     parse_filter_func),
836	OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
837		     "Do not trace given functions", parse_filter_func),
838	OPT_CALLBACK(0, "func-opts", &ftrace, "options",
839		     "Function tracer options, available options: call-graph,irq-info",
840		     parse_func_tracer_opts),
841	OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
842		     "Trace given functions using function_graph tracer",
843		     parse_filter_func),
844	OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
845		     "Set nograph filter on given functions", parse_filter_func),
846	OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
847		     "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
848		     parse_graph_tracer_opts),
849	OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
850		     "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
851	OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
852		    "Trace children processes"),
853	OPT_UINTEGER('D', "delay", &ftrace.initial_delay,
854		     "Number of milliseconds to wait before starting tracing after program start"),
855	OPT_END()
856	};
857
858	INIT_LIST_HEAD(&ftrace.filters);
859	INIT_LIST_HEAD(&ftrace.notrace);
860	INIT_LIST_HEAD(&ftrace.graph_funcs);
861	INIT_LIST_HEAD(&ftrace.nograph_funcs);
862
863	ret = perf_config(perf_ftrace_config, &ftrace);
864	if (ret < 0)
865		return -1;
866
867	argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
868			    PARSE_OPT_STOP_AT_NON_OPTION);
869	if (!argc && target__none(&ftrace.target))
870		ftrace.target.system_wide = true;
871
872	select_tracer(&ftrace);
873
874	ret = target__validate(&ftrace.target);
875	if (ret) {
876		char errbuf[512];
877
878		target__strerror(&ftrace.target, ret, errbuf, 512);
879		pr_err("%s\n", errbuf);
880		goto out_delete_filters;
881	}
882
883	ftrace.evlist = evlist__new();
884	if (ftrace.evlist == NULL) {
885		ret = -ENOMEM;
886		goto out_delete_filters;
887	}
888
889	ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
890	if (ret < 0)
891		goto out_delete_evlist;
892
893	ret = __cmd_ftrace(&ftrace, argc, argv);
894
895out_delete_evlist:
896	evlist__delete(ftrace.evlist);
897
898out_delete_filters:
899	delete_filter_func(&ftrace.filters);
900	delete_filter_func(&ftrace.notrace);
901	delete_filter_func(&ftrace.graph_funcs);
902	delete_filter_func(&ftrace.nograph_funcs);
903
904	return ret;
905}