Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include "evlist.h"
  3#include "evsel.h"
  4#include "cpumap.h"
  5#include "parse-events.h"
  6#include <errno.h>
  7#include <api/fs/fs.h>
  8#include <subcmd/parse-options.h>
  9#include "util.h"
 10#include "cloexec.h"
 11
 12typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
 13
 14static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
 15{
 16	struct perf_evlist *evlist;
 17	struct perf_evsel *evsel;
 18	unsigned long flags = perf_event_open_cloexec_flag();
 19	int err = -EAGAIN, fd;
 20	static pid_t pid = -1;
 21
 22	evlist = perf_evlist__new();
 23	if (!evlist)
 24		return -ENOMEM;
 25
 26	if (parse_events(evlist, str, NULL))
 27		goto out_delete;
 28
 29	evsel = perf_evlist__first(evlist);
 30
 31	while (1) {
 32		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
 33		if (fd < 0) {
 34			if (pid == -1 && errno == EACCES) {
 35				pid = 0;
 36				continue;
 37			}
 38			goto out_delete;
 39		}
 40		break;
 41	}
 42	close(fd);
 43
 44	fn(evsel);
 45
 46	fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
 47	if (fd < 0) {
 48		if (errno == EINVAL)
 49			err = -EINVAL;
 50		goto out_delete;
 51	}
 52	close(fd);
 53	err = 0;
 54
 55out_delete:
 56	perf_evlist__delete(evlist);
 57	return err;
 58}
 59
 60static bool perf_probe_api(setup_probe_fn_t fn)
 61{
 62	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
 63	struct cpu_map *cpus;
 64	int cpu, ret, i = 0;
 65
 66	cpus = cpu_map__new(NULL);
 67	if (!cpus)
 68		return false;
 69	cpu = cpus->map[0];
 70	cpu_map__put(cpus);
 71
 72	do {
 73		ret = perf_do_probe_api(fn, cpu, try[i++]);
 74		if (!ret)
 75			return true;
 76	} while (ret == -EAGAIN && try[i]);
 77
 78	return false;
 79}
 80
 81static void perf_probe_sample_identifier(struct perf_evsel *evsel)
 82{
 83	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
 84}
 85
 86static void perf_probe_comm_exec(struct perf_evsel *evsel)
 87{
 88	evsel->attr.comm_exec = 1;
 89}
 90
 91static void perf_probe_context_switch(struct perf_evsel *evsel)
 92{
 93	evsel->attr.context_switch = 1;
 94}
 95
 96bool perf_can_sample_identifier(void)
 97{
 98	return perf_probe_api(perf_probe_sample_identifier);
 99}
100
101static bool perf_can_comm_exec(void)
102{
103	return perf_probe_api(perf_probe_comm_exec);
104}
105
106bool perf_can_record_switch_events(void)
107{
108	return perf_probe_api(perf_probe_context_switch);
109}
110
111bool perf_can_record_cpu_wide(void)
112{
113	struct perf_event_attr attr = {
114		.type = PERF_TYPE_SOFTWARE,
115		.config = PERF_COUNT_SW_CPU_CLOCK,
116		.exclude_kernel = 1,
117	};
118	struct cpu_map *cpus;
119	int cpu, fd;
120
121	cpus = cpu_map__new(NULL);
122	if (!cpus)
123		return false;
124	cpu = cpus->map[0];
125	cpu_map__put(cpus);
126
127	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
128	if (fd < 0)
129		return false;
130	close(fd);
131
132	return true;
133}
134
135void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
136			 struct callchain_param *callchain)
137{
138	struct perf_evsel *evsel;
139	bool use_sample_identifier = false;
140	bool use_comm_exec;
141	bool sample_id = opts->sample_id;
142
143	/*
144	 * Set the evsel leader links before we configure attributes,
145	 * since some might depend on this info.
146	 */
147	if (opts->group)
148		perf_evlist__set_leader(evlist);
149
150	if (evlist->cpus->map[0] < 0)
151		opts->no_inherit = true;
152
153	use_comm_exec = perf_can_comm_exec();
154
155	evlist__for_each_entry(evlist, evsel) {
156		perf_evsel__config(evsel, opts, callchain);
157		if (evsel->tracking && use_comm_exec)
158			evsel->attr.comm_exec = 1;
159	}
160
161	if (opts->full_auxtrace) {
162		/*
163		 * Need to be able to synthesize and parse selected events with
164		 * arbitrary sample types, which requires always being able to
165		 * match the id.
166		 */
167		use_sample_identifier = perf_can_sample_identifier();
168		sample_id = true;
 
169	} else if (evlist->nr_entries > 1) {
170		struct perf_evsel *first = perf_evlist__first(evlist);
171
172		evlist__for_each_entry(evlist, evsel) {
173			if (evsel->attr.sample_type == first->attr.sample_type)
174				continue;
175			use_sample_identifier = perf_can_sample_identifier();
176			break;
177		}
178		sample_id = true;
179	}
180
181	if (sample_id) {
182		evlist__for_each_entry(evlist, evsel)
183			perf_evsel__set_sample_id(evsel, use_sample_identifier);
184	}
185
186	perf_evlist__set_id_pos(evlist);
187}
188
189static int get_max_rate(unsigned int *rate)
190{
191	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
192}
193
194static int record_opts__config_freq(struct record_opts *opts)
195{
196	bool user_freq = opts->user_freq != UINT_MAX;
197	unsigned int max_rate;
198
199	if (opts->user_interval != ULLONG_MAX)
200		opts->default_interval = opts->user_interval;
201	if (user_freq)
202		opts->freq = opts->user_freq;
203
204	/*
205	 * User specified count overrides default frequency.
206	 */
207	if (opts->default_interval)
208		opts->freq = 0;
209	else if (opts->freq) {
210		opts->default_interval = opts->freq;
211	} else {
212		pr_err("frequency and count are zero, aborting\n");
213		return -1;
214	}
215
216	if (get_max_rate(&max_rate))
217		return 0;
218
219	/*
220	 * User specified frequency is over current maximum.
221	 */
222	if (user_freq && (max_rate < opts->freq)) {
223		if (opts->strict_freq) {
224			pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
225			       "       Please use -F freq option with a lower value or consider\n"
226			       "       tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
227			       max_rate);
228			return -1;
229		} else {
230			pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
231				   "         The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
232				   "         The kernel will lower it when perf's interrupts take too long.\n"
233				   "         Use --strict-freq to disable this throttling, refusing to record.\n",
234				   max_rate, opts->freq, max_rate);
235
236			opts->freq = max_rate;
237		}
238	}
239
240	/*
241	 * Default frequency is over current maximum.
242	 */
243	if (max_rate < opts->freq) {
244		pr_warning("Lowering default frequency rate to %u.\n"
245			   "Please consider tweaking "
246			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
247			   max_rate);
248		opts->freq = max_rate;
249	}
250
251	return 0;
252}
253
254int record_opts__config(struct record_opts *opts)
255{
256	return record_opts__config_freq(opts);
257}
258
259bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
260{
261	struct perf_evlist *temp_evlist;
262	struct perf_evsel *evsel;
263	int err, fd, cpu;
264	bool ret = false;
265	pid_t pid = -1;
266
267	temp_evlist = perf_evlist__new();
268	if (!temp_evlist)
269		return false;
270
271	err = parse_events(temp_evlist, str, NULL);
272	if (err)
273		goto out_delete;
274
275	evsel = perf_evlist__last(temp_evlist);
276
277	if (!evlist || cpu_map__empty(evlist->cpus)) {
278		struct cpu_map *cpus = cpu_map__new(NULL);
279
280		cpu =  cpus ? cpus->map[0] : 0;
281		cpu_map__put(cpus);
282	} else {
283		cpu = evlist->cpus->map[0];
284	}
285
286	while (1) {
287		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
288					 perf_event_open_cloexec_flag());
289		if (fd < 0) {
290			if (pid == -1 && errno == EACCES) {
291				pid = 0;
292				continue;
293			}
294			goto out_delete;
295		}
296		break;
297	}
298	close(fd);
299	ret = true;
300
301out_delete:
302	perf_evlist__delete(temp_evlist);
303	return ret;
304}
305
306int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
307{
308	unsigned int freq;
309	struct record_opts *opts = opt->value;
310
311	if (!str)
312		return -EINVAL;
313
314	if (strcasecmp(str, "max") == 0) {
315		if (get_max_rate(&freq)) {
316			pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
317			return -1;
318		}
319		pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
320	} else {
321		freq = atoi(str);
322	}
323
324	opts->user_freq = freq;
325	return 0;
326}
v4.10.11
 
  1#include "evlist.h"
  2#include "evsel.h"
  3#include "cpumap.h"
  4#include "parse-events.h"
 
  5#include <api/fs/fs.h>
 
  6#include "util.h"
  7#include "cloexec.h"
  8
  9typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
 10
 11static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
 12{
 13	struct perf_evlist *evlist;
 14	struct perf_evsel *evsel;
 15	unsigned long flags = perf_event_open_cloexec_flag();
 16	int err = -EAGAIN, fd;
 17	static pid_t pid = -1;
 18
 19	evlist = perf_evlist__new();
 20	if (!evlist)
 21		return -ENOMEM;
 22
 23	if (parse_events(evlist, str, NULL))
 24		goto out_delete;
 25
 26	evsel = perf_evlist__first(evlist);
 27
 28	while (1) {
 29		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
 30		if (fd < 0) {
 31			if (pid == -1 && errno == EACCES) {
 32				pid = 0;
 33				continue;
 34			}
 35			goto out_delete;
 36		}
 37		break;
 38	}
 39	close(fd);
 40
 41	fn(evsel);
 42
 43	fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
 44	if (fd < 0) {
 45		if (errno == EINVAL)
 46			err = -EINVAL;
 47		goto out_delete;
 48	}
 49	close(fd);
 50	err = 0;
 51
 52out_delete:
 53	perf_evlist__delete(evlist);
 54	return err;
 55}
 56
 57static bool perf_probe_api(setup_probe_fn_t fn)
 58{
 59	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
 60	struct cpu_map *cpus;
 61	int cpu, ret, i = 0;
 62
 63	cpus = cpu_map__new(NULL);
 64	if (!cpus)
 65		return false;
 66	cpu = cpus->map[0];
 67	cpu_map__put(cpus);
 68
 69	do {
 70		ret = perf_do_probe_api(fn, cpu, try[i++]);
 71		if (!ret)
 72			return true;
 73	} while (ret == -EAGAIN && try[i]);
 74
 75	return false;
 76}
 77
 78static void perf_probe_sample_identifier(struct perf_evsel *evsel)
 79{
 80	evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
 81}
 82
 83static void perf_probe_comm_exec(struct perf_evsel *evsel)
 84{
 85	evsel->attr.comm_exec = 1;
 86}
 87
 88static void perf_probe_context_switch(struct perf_evsel *evsel)
 89{
 90	evsel->attr.context_switch = 1;
 91}
 92
 93bool perf_can_sample_identifier(void)
 94{
 95	return perf_probe_api(perf_probe_sample_identifier);
 96}
 97
 98static bool perf_can_comm_exec(void)
 99{
100	return perf_probe_api(perf_probe_comm_exec);
101}
102
103bool perf_can_record_switch_events(void)
104{
105	return perf_probe_api(perf_probe_context_switch);
106}
107
108bool perf_can_record_cpu_wide(void)
109{
110	struct perf_event_attr attr = {
111		.type = PERF_TYPE_SOFTWARE,
112		.config = PERF_COUNT_SW_CPU_CLOCK,
113		.exclude_kernel = 1,
114	};
115	struct cpu_map *cpus;
116	int cpu, fd;
117
118	cpus = cpu_map__new(NULL);
119	if (!cpus)
120		return false;
121	cpu = cpus->map[0];
122	cpu_map__put(cpus);
123
124	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
125	if (fd < 0)
126		return false;
127	close(fd);
128
129	return true;
130}
131
132void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
133			 struct callchain_param *callchain)
134{
135	struct perf_evsel *evsel;
136	bool use_sample_identifier = false;
137	bool use_comm_exec;
 
138
139	/*
140	 * Set the evsel leader links before we configure attributes,
141	 * since some might depend on this info.
142	 */
143	if (opts->group)
144		perf_evlist__set_leader(evlist);
145
146	if (evlist->cpus->map[0] < 0)
147		opts->no_inherit = true;
148
149	use_comm_exec = perf_can_comm_exec();
150
151	evlist__for_each_entry(evlist, evsel) {
152		perf_evsel__config(evsel, opts, callchain);
153		if (evsel->tracking && use_comm_exec)
154			evsel->attr.comm_exec = 1;
155	}
156
157	if (opts->full_auxtrace) {
158		/*
159		 * Need to be able to synthesize and parse selected events with
160		 * arbitrary sample types, which requires always being able to
161		 * match the id.
162		 */
163		use_sample_identifier = perf_can_sample_identifier();
164		evlist__for_each_entry(evlist, evsel)
165			perf_evsel__set_sample_id(evsel, use_sample_identifier);
166	} else if (evlist->nr_entries > 1) {
167		struct perf_evsel *first = perf_evlist__first(evlist);
168
169		evlist__for_each_entry(evlist, evsel) {
170			if (evsel->attr.sample_type == first->attr.sample_type)
171				continue;
172			use_sample_identifier = perf_can_sample_identifier();
173			break;
174		}
 
 
 
 
175		evlist__for_each_entry(evlist, evsel)
176			perf_evsel__set_sample_id(evsel, use_sample_identifier);
177	}
178
179	perf_evlist__set_id_pos(evlist);
180}
181
182static int get_max_rate(unsigned int *rate)
183{
184	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
185}
186
187static int record_opts__config_freq(struct record_opts *opts)
188{
189	bool user_freq = opts->user_freq != UINT_MAX;
190	unsigned int max_rate;
191
192	if (opts->user_interval != ULLONG_MAX)
193		opts->default_interval = opts->user_interval;
194	if (user_freq)
195		opts->freq = opts->user_freq;
196
197	/*
198	 * User specified count overrides default frequency.
199	 */
200	if (opts->default_interval)
201		opts->freq = 0;
202	else if (opts->freq) {
203		opts->default_interval = opts->freq;
204	} else {
205		pr_err("frequency and count are zero, aborting\n");
206		return -1;
207	}
208
209	if (get_max_rate(&max_rate))
210		return 0;
211
212	/*
213	 * User specified frequency is over current maximum.
214	 */
215	if (user_freq && (max_rate < opts->freq)) {
216		pr_err("Maximum frequency rate (%u) reached.\n"
217		   "Please use -F freq option with lower value or consider\n"
218		   "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
219		   max_rate);
220		return -1;
 
 
 
 
 
 
 
 
 
 
221	}
222
223	/*
224	 * Default frequency is over current maximum.
225	 */
226	if (max_rate < opts->freq) {
227		pr_warning("Lowering default frequency rate to %u.\n"
228			   "Please consider tweaking "
229			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
230			   max_rate);
231		opts->freq = max_rate;
232	}
233
234	return 0;
235}
236
237int record_opts__config(struct record_opts *opts)
238{
239	return record_opts__config_freq(opts);
240}
241
242bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
243{
244	struct perf_evlist *temp_evlist;
245	struct perf_evsel *evsel;
246	int err, fd, cpu;
247	bool ret = false;
248	pid_t pid = -1;
249
250	temp_evlist = perf_evlist__new();
251	if (!temp_evlist)
252		return false;
253
254	err = parse_events(temp_evlist, str, NULL);
255	if (err)
256		goto out_delete;
257
258	evsel = perf_evlist__last(temp_evlist);
259
260	if (!evlist || cpu_map__empty(evlist->cpus)) {
261		struct cpu_map *cpus = cpu_map__new(NULL);
262
263		cpu =  cpus ? cpus->map[0] : 0;
264		cpu_map__put(cpus);
265	} else {
266		cpu = evlist->cpus->map[0];
267	}
268
269	while (1) {
270		fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
271					 perf_event_open_cloexec_flag());
272		if (fd < 0) {
273			if (pid == -1 && errno == EACCES) {
274				pid = 0;
275				continue;
276			}
277			goto out_delete;
278		}
279		break;
280	}
281	close(fd);
282	ret = true;
283
284out_delete:
285	perf_evlist__delete(temp_evlist);
286	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287}