Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "debug.h"
3#include "evlist.h"
4#include "evsel.h"
5#include "parse-events.h"
6#include <errno.h>
7#include <limits.h>
8#include <stdlib.h>
9#include <api/fs/fs.h>
10#include <subcmd/parse-options.h>
11#include <perf/cpumap.h>
12#include "cloexec.h"
13#include "record.h"
14#include "../perf-sys.h"
15
16typedef void (*setup_probe_fn_t)(struct evsel *evsel);
17
18static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
19{
20 struct evlist *evlist;
21 struct evsel *evsel;
22 unsigned long flags = perf_event_open_cloexec_flag();
23 int err = -EAGAIN, fd;
24 static pid_t pid = -1;
25
26 evlist = evlist__new();
27 if (!evlist)
28 return -ENOMEM;
29
30 if (parse_events(evlist, str, NULL))
31 goto out_delete;
32
33 evsel = evlist__first(evlist);
34
35 while (1) {
36 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
37 if (fd < 0) {
38 if (pid == -1 && errno == EACCES) {
39 pid = 0;
40 continue;
41 }
42 goto out_delete;
43 }
44 break;
45 }
46 close(fd);
47
48 fn(evsel);
49
50 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags);
51 if (fd < 0) {
52 if (errno == EINVAL)
53 err = -EINVAL;
54 goto out_delete;
55 }
56 close(fd);
57 err = 0;
58
59out_delete:
60 evlist__delete(evlist);
61 return err;
62}
63
64static bool perf_probe_api(setup_probe_fn_t fn)
65{
66 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
67 struct perf_cpu_map *cpus;
68 int cpu, ret, i = 0;
69
70 cpus = perf_cpu_map__new(NULL);
71 if (!cpus)
72 return false;
73 cpu = cpus->map[0];
74 perf_cpu_map__put(cpus);
75
76 do {
77 ret = perf_do_probe_api(fn, cpu, try[i++]);
78 if (!ret)
79 return true;
80 } while (ret == -EAGAIN && try[i]);
81
82 return false;
83}
84
85static void perf_probe_sample_identifier(struct evsel *evsel)
86{
87 evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
88}
89
90static void perf_probe_comm_exec(struct evsel *evsel)
91{
92 evsel->core.attr.comm_exec = 1;
93}
94
95static void perf_probe_context_switch(struct evsel *evsel)
96{
97 evsel->core.attr.context_switch = 1;
98}
99
100bool perf_can_sample_identifier(void)
101{
102 return perf_probe_api(perf_probe_sample_identifier);
103}
104
105static bool perf_can_comm_exec(void)
106{
107 return perf_probe_api(perf_probe_comm_exec);
108}
109
110bool perf_can_record_switch_events(void)
111{
112 return perf_probe_api(perf_probe_context_switch);
113}
114
115bool perf_can_record_cpu_wide(void)
116{
117 struct perf_event_attr attr = {
118 .type = PERF_TYPE_SOFTWARE,
119 .config = PERF_COUNT_SW_CPU_CLOCK,
120 .exclude_kernel = 1,
121 };
122 struct perf_cpu_map *cpus;
123 int cpu, fd;
124
125 cpus = perf_cpu_map__new(NULL);
126 if (!cpus)
127 return false;
128 cpu = cpus->map[0];
129 perf_cpu_map__put(cpus);
130
131 fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
132 if (fd < 0)
133 return false;
134 close(fd);
135
136 return true;
137}
138
139void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
140 struct callchain_param *callchain)
141{
142 struct evsel *evsel;
143 bool use_sample_identifier = false;
144 bool use_comm_exec;
145 bool sample_id = opts->sample_id;
146
147 /*
148 * Set the evsel leader links before we configure attributes,
149 * since some might depend on this info.
150 */
151 if (opts->group)
152 perf_evlist__set_leader(evlist);
153
154 if (evlist->core.cpus->map[0] < 0)
155 opts->no_inherit = true;
156
157 use_comm_exec = perf_can_comm_exec();
158
159 evlist__for_each_entry(evlist, evsel) {
160 perf_evsel__config(evsel, opts, callchain);
161 if (evsel->tracking && use_comm_exec)
162 evsel->core.attr.comm_exec = 1;
163 }
164
165 if (opts->full_auxtrace) {
166 /*
167 * Need to be able to synthesize and parse selected events with
168 * arbitrary sample types, which requires always being able to
169 * match the id.
170 */
171 use_sample_identifier = perf_can_sample_identifier();
172 sample_id = true;
173 } else if (evlist->core.nr_entries > 1) {
174 struct evsel *first = evlist__first(evlist);
175
176 evlist__for_each_entry(evlist, evsel) {
177 if (evsel->core.attr.sample_type == first->core.attr.sample_type)
178 continue;
179 use_sample_identifier = perf_can_sample_identifier();
180 break;
181 }
182 sample_id = true;
183 }
184
185 if (sample_id) {
186 evlist__for_each_entry(evlist, evsel)
187 perf_evsel__set_sample_id(evsel, use_sample_identifier);
188 }
189
190 perf_evlist__set_id_pos(evlist);
191}
192
193static int get_max_rate(unsigned int *rate)
194{
195 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
196}
197
198static int record_opts__config_freq(struct record_opts *opts)
199{
200 bool user_freq = opts->user_freq != UINT_MAX;
201 unsigned int max_rate;
202
203 if (opts->user_interval != ULLONG_MAX)
204 opts->default_interval = opts->user_interval;
205 if (user_freq)
206 opts->freq = opts->user_freq;
207
208 /*
209 * User specified count overrides default frequency.
210 */
211 if (opts->default_interval)
212 opts->freq = 0;
213 else if (opts->freq) {
214 opts->default_interval = opts->freq;
215 } else {
216 pr_err("frequency and count are zero, aborting\n");
217 return -1;
218 }
219
220 if (get_max_rate(&max_rate))
221 return 0;
222
223 /*
224 * User specified frequency is over current maximum.
225 */
226 if (user_freq && (max_rate < opts->freq)) {
227 if (opts->strict_freq) {
228 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
229 " Please use -F freq option with a lower value or consider\n"
230 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
231 max_rate);
232 return -1;
233 } else {
234 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
235 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
236 " The kernel will lower it when perf's interrupts take too long.\n"
237 " Use --strict-freq to disable this throttling, refusing to record.\n",
238 max_rate, opts->freq, max_rate);
239
240 opts->freq = max_rate;
241 }
242 }
243
244 /*
245 * Default frequency is over current maximum.
246 */
247 if (max_rate < opts->freq) {
248 pr_warning("Lowering default frequency rate to %u.\n"
249 "Please consider tweaking "
250 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
251 max_rate);
252 opts->freq = max_rate;
253 }
254
255 return 0;
256}
257
258int record_opts__config(struct record_opts *opts)
259{
260 return record_opts__config_freq(opts);
261}
262
263bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
264{
265 struct evlist *temp_evlist;
266 struct evsel *evsel;
267 int err, fd, cpu;
268 bool ret = false;
269 pid_t pid = -1;
270
271 temp_evlist = evlist__new();
272 if (!temp_evlist)
273 return false;
274
275 err = parse_events(temp_evlist, str, NULL);
276 if (err)
277 goto out_delete;
278
279 evsel = evlist__last(temp_evlist);
280
281 if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
282 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
283
284 cpu = cpus ? cpus->map[0] : 0;
285 perf_cpu_map__put(cpus);
286 } else {
287 cpu = evlist->core.cpus->map[0];
288 }
289
290 while (1) {
291 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
292 perf_event_open_cloexec_flag());
293 if (fd < 0) {
294 if (pid == -1 && errno == EACCES) {
295 pid = 0;
296 continue;
297 }
298 goto out_delete;
299 }
300 break;
301 }
302 close(fd);
303 ret = true;
304
305out_delete:
306 evlist__delete(temp_evlist);
307 return ret;
308}
309
310int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
311{
312 unsigned int freq;
313 struct record_opts *opts = opt->value;
314
315 if (!str)
316 return -EINVAL;
317
318 if (strcasecmp(str, "max") == 0) {
319 if (get_max_rate(&freq)) {
320 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
321 return -1;
322 }
323 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
324 } else {
325 freq = atoi(str);
326 }
327
328 opts->user_freq = freq;
329 return 0;
330}
1// SPDX-License-Identifier: GPL-2.0
2#include "debug.h"
3#include "evlist.h"
4#include "evsel.h"
5#include "evsel_config.h"
6#include "parse-events.h"
7#include <errno.h>
8#include <limits.h>
9#include <stdlib.h>
10#include <api/fs/fs.h>
11#include <subcmd/parse-options.h>
12#include <perf/cpumap.h>
13#include "cloexec.h"
14#include "util/perf_api_probe.h"
15#include "record.h"
16#include "../perf-sys.h"
17#include "topdown.h"
18#include "map_symbol.h"
19#include "mem-events.h"
20
21/*
22 * evsel__config_leader_sampling() uses special rules for leader sampling.
23 * However, if the leader is an AUX area event, then assume the event to sample
24 * is the next event.
25 */
26static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
27{
28 struct evsel *leader = evsel__leader(evsel);
29
30 if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
31 is_mem_loads_aux_event(leader)) {
32 evlist__for_each_entry(evlist, evsel) {
33 if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel))
34 return evsel;
35 }
36 }
37
38 return leader;
39}
40
41static u64 evsel__config_term_mask(struct evsel *evsel)
42{
43 struct evsel_config_term *term;
44 struct list_head *config_terms = &evsel->config_terms;
45 u64 term_types = 0;
46
47 list_for_each_entry(term, config_terms, list) {
48 term_types |= 1 << term->type;
49 }
50 return term_types;
51}
52
53static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
54{
55 struct perf_event_attr *attr = &evsel->core.attr;
56 struct evsel *leader = evsel__leader(evsel);
57 struct evsel *read_sampler;
58 u64 term_types, freq_mask;
59
60 if (!leader->sample_read)
61 return;
62
63 read_sampler = evsel__read_sampler(evsel, evlist);
64
65 if (evsel == read_sampler)
66 return;
67
68 term_types = evsel__config_term_mask(evsel);
69 /*
70 * Disable sampling for all group members except those with explicit
71 * config terms or the leader. In the case of an AUX area event, the 2nd
72 * event in the group is the one that 'leads' the sampling.
73 */
74 freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
75 if ((term_types & freq_mask) == 0) {
76 attr->freq = 0;
77 attr->sample_freq = 0;
78 attr->sample_period = 0;
79 }
80 if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
81 attr->write_backward = 0;
82
83 /*
84 * We don't get a sample for slave events, we make them when delivering
85 * the group leader sample. Set the slave event to follow the master
86 * sample_type to ease up reporting.
87 * An AUX area event also has sample_type requirements, so also include
88 * the sample type bits from the leader's sample_type to cover that
89 * case.
90 */
91 attr->sample_type = read_sampler->core.attr.sample_type |
92 leader->core.attr.sample_type;
93}
94
95void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain)
96{
97 struct evsel *evsel;
98 bool use_sample_identifier = false;
99 bool use_comm_exec;
100 bool sample_id = opts->sample_id;
101
102 /*
103 * Set the evsel leader links before we configure attributes,
104 * since some might depend on this info.
105 */
106 if (opts->group)
107 evlist__set_leader(evlist);
108
109 if (evlist->core.cpus->map[0] < 0)
110 opts->no_inherit = true;
111
112 use_comm_exec = perf_can_comm_exec();
113
114 evlist__for_each_entry(evlist, evsel) {
115 evsel__config(evsel, opts, callchain);
116 if (evsel->tracking && use_comm_exec)
117 evsel->core.attr.comm_exec = 1;
118 }
119
120 /* Configure leader sampling here now that the sample type is known */
121 evlist__for_each_entry(evlist, evsel)
122 evsel__config_leader_sampling(evsel, evlist);
123
124 if (opts->full_auxtrace) {
125 /*
126 * Need to be able to synthesize and parse selected events with
127 * arbitrary sample types, which requires always being able to
128 * match the id.
129 */
130 use_sample_identifier = perf_can_sample_identifier();
131 sample_id = true;
132 } else if (evlist->core.nr_entries > 1) {
133 struct evsel *first = evlist__first(evlist);
134
135 evlist__for_each_entry(evlist, evsel) {
136 if (evsel->core.attr.sample_type == first->core.attr.sample_type)
137 continue;
138 use_sample_identifier = perf_can_sample_identifier();
139 break;
140 }
141 sample_id = true;
142 }
143
144 if (sample_id) {
145 evlist__for_each_entry(evlist, evsel)
146 evsel__set_sample_id(evsel, use_sample_identifier);
147 }
148
149 evlist__set_id_pos(evlist);
150}
151
152static int get_max_rate(unsigned int *rate)
153{
154 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
155}
156
157static int record_opts__config_freq(struct record_opts *opts)
158{
159 bool user_freq = opts->user_freq != UINT_MAX;
160 bool user_interval = opts->user_interval != ULLONG_MAX;
161 unsigned int max_rate;
162
163 if (user_interval && user_freq) {
164 pr_err("cannot set frequency and period at the same time\n");
165 return -1;
166 }
167
168 if (user_interval)
169 opts->default_interval = opts->user_interval;
170 if (user_freq)
171 opts->freq = opts->user_freq;
172
173 /*
174 * User specified count overrides default frequency.
175 */
176 if (opts->default_interval)
177 opts->freq = 0;
178 else if (opts->freq) {
179 opts->default_interval = opts->freq;
180 } else {
181 pr_err("frequency and count are zero, aborting\n");
182 return -1;
183 }
184
185 if (get_max_rate(&max_rate))
186 return 0;
187
188 /*
189 * User specified frequency is over current maximum.
190 */
191 if (user_freq && (max_rate < opts->freq)) {
192 if (opts->strict_freq) {
193 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
194 " Please use -F freq option with a lower value or consider\n"
195 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
196 max_rate);
197 return -1;
198 } else {
199 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
200 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
201 " The kernel will lower it when perf's interrupts take too long.\n"
202 " Use --strict-freq to disable this throttling, refusing to record.\n",
203 max_rate, opts->freq, max_rate);
204
205 opts->freq = max_rate;
206 }
207 }
208
209 /*
210 * Default frequency is over current maximum.
211 */
212 if (max_rate < opts->freq) {
213 pr_warning("Lowering default frequency rate from %u to %u.\n"
214 "Please consider tweaking "
215 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
216 opts->freq, max_rate);
217 opts->freq = max_rate;
218 }
219
220 return 0;
221}
222
223int record_opts__config(struct record_opts *opts)
224{
225 return record_opts__config_freq(opts);
226}
227
228bool evlist__can_select_event(struct evlist *evlist, const char *str)
229{
230 struct evlist *temp_evlist;
231 struct evsel *evsel;
232 int err, fd, cpu;
233 bool ret = false;
234 pid_t pid = -1;
235
236 temp_evlist = evlist__new();
237 if (!temp_evlist)
238 return false;
239
240 err = parse_events(temp_evlist, str, NULL);
241 if (err)
242 goto out_delete;
243
244 evsel = evlist__last(temp_evlist);
245
246 if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
247 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
248
249 cpu = cpus ? cpus->map[0] : 0;
250 perf_cpu_map__put(cpus);
251 } else {
252 cpu = evlist->core.cpus->map[0];
253 }
254
255 while (1) {
256 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
257 perf_event_open_cloexec_flag());
258 if (fd < 0) {
259 if (pid == -1 && errno == EACCES) {
260 pid = 0;
261 continue;
262 }
263 goto out_delete;
264 }
265 break;
266 }
267 close(fd);
268 ret = true;
269
270out_delete:
271 evlist__delete(temp_evlist);
272 return ret;
273}
274
275int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
276{
277 unsigned int freq;
278 struct record_opts *opts = opt->value;
279
280 if (!str)
281 return -EINVAL;
282
283 if (strcasecmp(str, "max") == 0) {
284 if (get_max_rate(&freq)) {
285 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
286 return -1;
287 }
288 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
289 } else {
290 freq = atoi(str);
291 }
292
293 opts->user_freq = freq;
294 return 0;
295}