Loading...
1#include "evlist.h"
2#include "evsel.h"
3#include "cpumap.h"
4#include "parse-events.h"
5#include <api/fs/fs.h>
6#include "util.h"
7#include "cloexec.h"
8
9typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
10
11static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
12{
13 struct perf_evlist *evlist;
14 struct perf_evsel *evsel;
15 unsigned long flags = perf_event_open_cloexec_flag();
16 int err = -EAGAIN, fd;
17 static pid_t pid = -1;
18
19 evlist = perf_evlist__new();
20 if (!evlist)
21 return -ENOMEM;
22
23 if (parse_events(evlist, str, NULL))
24 goto out_delete;
25
26 evsel = perf_evlist__first(evlist);
27
28 while (1) {
29 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
30 if (fd < 0) {
31 if (pid == -1 && errno == EACCES) {
32 pid = 0;
33 continue;
34 }
35 goto out_delete;
36 }
37 break;
38 }
39 close(fd);
40
41 fn(evsel);
42
43 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
44 if (fd < 0) {
45 if (errno == EINVAL)
46 err = -EINVAL;
47 goto out_delete;
48 }
49 close(fd);
50 err = 0;
51
52out_delete:
53 perf_evlist__delete(evlist);
54 return err;
55}
56
57static bool perf_probe_api(setup_probe_fn_t fn)
58{
59 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
60 struct cpu_map *cpus;
61 int cpu, ret, i = 0;
62
63 cpus = cpu_map__new(NULL);
64 if (!cpus)
65 return false;
66 cpu = cpus->map[0];
67 cpu_map__put(cpus);
68
69 do {
70 ret = perf_do_probe_api(fn, cpu, try[i++]);
71 if (!ret)
72 return true;
73 } while (ret == -EAGAIN && try[i]);
74
75 return false;
76}
77
78static void perf_probe_sample_identifier(struct perf_evsel *evsel)
79{
80 evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
81}
82
83static void perf_probe_comm_exec(struct perf_evsel *evsel)
84{
85 evsel->attr.comm_exec = 1;
86}
87
88static void perf_probe_context_switch(struct perf_evsel *evsel)
89{
90 evsel->attr.context_switch = 1;
91}
92
93bool perf_can_sample_identifier(void)
94{
95 return perf_probe_api(perf_probe_sample_identifier);
96}
97
98static bool perf_can_comm_exec(void)
99{
100 return perf_probe_api(perf_probe_comm_exec);
101}
102
103bool perf_can_record_switch_events(void)
104{
105 return perf_probe_api(perf_probe_context_switch);
106}
107
108bool perf_can_record_cpu_wide(void)
109{
110 struct perf_event_attr attr = {
111 .type = PERF_TYPE_SOFTWARE,
112 .config = PERF_COUNT_SW_CPU_CLOCK,
113 .exclude_kernel = 1,
114 };
115 struct cpu_map *cpus;
116 int cpu, fd;
117
118 cpus = cpu_map__new(NULL);
119 if (!cpus)
120 return false;
121 cpu = cpus->map[0];
122 cpu_map__put(cpus);
123
124 fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
125 if (fd < 0)
126 return false;
127 close(fd);
128
129 return true;
130}
131
132void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
133{
134 struct perf_evsel *evsel;
135 bool use_sample_identifier = false;
136 bool use_comm_exec;
137
138 /*
139 * Set the evsel leader links before we configure attributes,
140 * since some might depend on this info.
141 */
142 if (opts->group)
143 perf_evlist__set_leader(evlist);
144
145 if (evlist->cpus->map[0] < 0)
146 opts->no_inherit = true;
147
148 use_comm_exec = perf_can_comm_exec();
149
150 evlist__for_each(evlist, evsel) {
151 perf_evsel__config(evsel, opts);
152 if (evsel->tracking && use_comm_exec)
153 evsel->attr.comm_exec = 1;
154 }
155
156 if (opts->full_auxtrace) {
157 /*
158 * Need to be able to synthesize and parse selected events with
159 * arbitrary sample types, which requires always being able to
160 * match the id.
161 */
162 use_sample_identifier = perf_can_sample_identifier();
163 evlist__for_each(evlist, evsel)
164 perf_evsel__set_sample_id(evsel, use_sample_identifier);
165 } else if (evlist->nr_entries > 1) {
166 struct perf_evsel *first = perf_evlist__first(evlist);
167
168 evlist__for_each(evlist, evsel) {
169 if (evsel->attr.sample_type == first->attr.sample_type)
170 continue;
171 use_sample_identifier = perf_can_sample_identifier();
172 break;
173 }
174 evlist__for_each(evlist, evsel)
175 perf_evsel__set_sample_id(evsel, use_sample_identifier);
176 }
177
178 perf_evlist__set_id_pos(evlist);
179}
180
181static int get_max_rate(unsigned int *rate)
182{
183 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
184}
185
186static int record_opts__config_freq(struct record_opts *opts)
187{
188 bool user_freq = opts->user_freq != UINT_MAX;
189 unsigned int max_rate;
190
191 if (opts->user_interval != ULLONG_MAX)
192 opts->default_interval = opts->user_interval;
193 if (user_freq)
194 opts->freq = opts->user_freq;
195
196 /*
197 * User specified count overrides default frequency.
198 */
199 if (opts->default_interval)
200 opts->freq = 0;
201 else if (opts->freq) {
202 opts->default_interval = opts->freq;
203 } else {
204 pr_err("frequency and count are zero, aborting\n");
205 return -1;
206 }
207
208 if (get_max_rate(&max_rate))
209 return 0;
210
211 /*
212 * User specified frequency is over current maximum.
213 */
214 if (user_freq && (max_rate < opts->freq)) {
215 pr_err("Maximum frequency rate (%u) reached.\n"
216 "Please use -F freq option with lower value or consider\n"
217 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
218 max_rate);
219 return -1;
220 }
221
222 /*
223 * Default frequency is over current maximum.
224 */
225 if (max_rate < opts->freq) {
226 pr_warning("Lowering default frequency rate to %u.\n"
227 "Please consider tweaking "
228 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
229 max_rate);
230 opts->freq = max_rate;
231 }
232
233 return 0;
234}
235
236int record_opts__config(struct record_opts *opts)
237{
238 return record_opts__config_freq(opts);
239}
240
241bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
242{
243 struct perf_evlist *temp_evlist;
244 struct perf_evsel *evsel;
245 int err, fd, cpu;
246 bool ret = false;
247 pid_t pid = -1;
248
249 temp_evlist = perf_evlist__new();
250 if (!temp_evlist)
251 return false;
252
253 err = parse_events(temp_evlist, str, NULL);
254 if (err)
255 goto out_delete;
256
257 evsel = perf_evlist__last(temp_evlist);
258
259 if (!evlist || cpu_map__empty(evlist->cpus)) {
260 struct cpu_map *cpus = cpu_map__new(NULL);
261
262 cpu = cpus ? cpus->map[0] : 0;
263 cpu_map__put(cpus);
264 } else {
265 cpu = evlist->cpus->map[0];
266 }
267
268 while (1) {
269 fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
270 perf_event_open_cloexec_flag());
271 if (fd < 0) {
272 if (pid == -1 && errno == EACCES) {
273 pid = 0;
274 continue;
275 }
276 goto out_delete;
277 }
278 break;
279 }
280 close(fd);
281 ret = true;
282
283out_delete:
284 perf_evlist__delete(temp_evlist);
285 return ret;
286}
1// SPDX-License-Identifier: GPL-2.0
2#include "debug.h"
3#include "evlist.h"
4#include "evsel.h"
5#include "evsel_config.h"
6#include "parse-events.h"
7#include <errno.h>
8#include <limits.h>
9#include <stdlib.h>
10#include <api/fs/fs.h>
11#include <subcmd/parse-options.h>
12#include <perf/cpumap.h>
13#include "cloexec.h"
14#include "util/perf_api_probe.h"
15#include "record.h"
16#include "../perf-sys.h"
17#include "topdown.h"
18#include "map_symbol.h"
19#include "mem-events.h"
20
21/*
22 * evsel__config_leader_sampling() uses special rules for leader sampling.
23 * However, if the leader is an AUX area event, then assume the event to sample
24 * is the next event.
25 */
26static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
27{
28 struct evsel *leader = evsel__leader(evsel);
29
30 if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
31 is_mem_loads_aux_event(leader)) {
32 evlist__for_each_entry(evlist, evsel) {
33 if (evsel__leader(evsel) == leader && evsel != evsel__leader(evsel))
34 return evsel;
35 }
36 }
37
38 return leader;
39}
40
41static u64 evsel__config_term_mask(struct evsel *evsel)
42{
43 struct evsel_config_term *term;
44 struct list_head *config_terms = &evsel->config_terms;
45 u64 term_types = 0;
46
47 list_for_each_entry(term, config_terms, list) {
48 term_types |= 1 << term->type;
49 }
50 return term_types;
51}
52
53static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
54{
55 struct perf_event_attr *attr = &evsel->core.attr;
56 struct evsel *leader = evsel__leader(evsel);
57 struct evsel *read_sampler;
58 u64 term_types, freq_mask;
59
60 if (!leader->sample_read)
61 return;
62
63 read_sampler = evsel__read_sampler(evsel, evlist);
64
65 if (evsel == read_sampler)
66 return;
67
68 term_types = evsel__config_term_mask(evsel);
69 /*
70 * Disable sampling for all group members except those with explicit
71 * config terms or the leader. In the case of an AUX area event, the 2nd
72 * event in the group is the one that 'leads' the sampling.
73 */
74 freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
75 if ((term_types & freq_mask) == 0) {
76 attr->freq = 0;
77 attr->sample_freq = 0;
78 attr->sample_period = 0;
79 }
80 if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
81 attr->write_backward = 0;
82
83 /*
84 * We don't get a sample for slave events, we make them when delivering
85 * the group leader sample. Set the slave event to follow the master
86 * sample_type to ease up reporting.
87 * An AUX area event also has sample_type requirements, so also include
88 * the sample type bits from the leader's sample_type to cover that
89 * case.
90 */
91 attr->sample_type = read_sampler->core.attr.sample_type |
92 leader->core.attr.sample_type;
93}
94
95void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain)
96{
97 struct evsel *evsel;
98 bool use_sample_identifier = false;
99 bool use_comm_exec;
100 bool sample_id = opts->sample_id;
101
102 if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
103 opts->no_inherit = true;
104
105 use_comm_exec = perf_can_comm_exec();
106
107 evlist__for_each_entry(evlist, evsel) {
108 evsel__config(evsel, opts, callchain);
109 if (evsel->tracking && use_comm_exec)
110 evsel->core.attr.comm_exec = 1;
111 }
112
113 /* Configure leader sampling here now that the sample type is known */
114 evlist__for_each_entry(evlist, evsel)
115 evsel__config_leader_sampling(evsel, evlist);
116
117 if (opts->full_auxtrace || opts->sample_identifier) {
118 /*
119 * Need to be able to synthesize and parse selected events with
120 * arbitrary sample types, which requires always being able to
121 * match the id.
122 */
123 use_sample_identifier = perf_can_sample_identifier();
124 sample_id = true;
125 } else if (evlist->core.nr_entries > 1) {
126 struct evsel *first = evlist__first(evlist);
127
128 evlist__for_each_entry(evlist, evsel) {
129 if (evsel->core.attr.sample_type == first->core.attr.sample_type)
130 continue;
131 use_sample_identifier = perf_can_sample_identifier();
132 break;
133 }
134 sample_id = true;
135 }
136
137 if (sample_id) {
138 evlist__for_each_entry(evlist, evsel)
139 evsel__set_sample_id(evsel, use_sample_identifier);
140 }
141
142 evlist__set_id_pos(evlist);
143}
144
145static int get_max_rate(unsigned int *rate)
146{
147 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
148}
149
150static int record_opts__config_freq(struct record_opts *opts)
151{
152 bool user_freq = opts->user_freq != UINT_MAX;
153 bool user_interval = opts->user_interval != ULLONG_MAX;
154 unsigned int max_rate;
155
156 if (user_interval && user_freq) {
157 pr_err("cannot set frequency and period at the same time\n");
158 return -1;
159 }
160
161 if (user_interval)
162 opts->default_interval = opts->user_interval;
163 if (user_freq)
164 opts->freq = opts->user_freq;
165
166 /*
167 * User specified count overrides default frequency.
168 */
169 if (opts->default_interval)
170 opts->freq = 0;
171 else if (opts->freq) {
172 opts->default_interval = opts->freq;
173 } else {
174 pr_err("frequency and count are zero, aborting\n");
175 return -1;
176 }
177
178 if (get_max_rate(&max_rate))
179 return 0;
180
181 /*
182 * User specified frequency is over current maximum.
183 */
184 if (user_freq && (max_rate < opts->freq)) {
185 if (opts->strict_freq) {
186 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
187 " Please use -F freq option with a lower value or consider\n"
188 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
189 max_rate);
190 return -1;
191 } else {
192 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
193 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
194 " The kernel will lower it when perf's interrupts take too long.\n"
195 " Use --strict-freq to disable this throttling, refusing to record.\n",
196 max_rate, opts->freq, max_rate);
197
198 opts->freq = max_rate;
199 }
200 }
201
202 /*
203 * Default frequency is over current maximum.
204 */
205 if (max_rate < opts->freq) {
206 pr_warning("Lowering default frequency rate from %u to %u.\n"
207 "Please consider tweaking "
208 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
209 opts->freq, max_rate);
210 opts->freq = max_rate;
211 }
212
213 return 0;
214}
215
216int record_opts__config(struct record_opts *opts)
217{
218 return record_opts__config_freq(opts);
219}
220
221bool evlist__can_select_event(struct evlist *evlist, const char *str)
222{
223 struct evlist *temp_evlist;
224 struct evsel *evsel;
225 int err, fd;
226 struct perf_cpu cpu = { .cpu = 0 };
227 bool ret = false;
228 pid_t pid = -1;
229
230 temp_evlist = evlist__new();
231 if (!temp_evlist)
232 return false;
233
234 err = parse_event(temp_evlist, str);
235 if (err)
236 goto out_delete;
237
238 evsel = evlist__last(temp_evlist);
239
240 if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
241 struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
242
243 if (cpus)
244 cpu = perf_cpu_map__cpu(cpus, 0);
245
246 perf_cpu_map__put(cpus);
247 } else {
248 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
249 }
250
251 while (1) {
252 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1,
253 perf_event_open_cloexec_flag());
254 if (fd < 0) {
255 if (pid == -1 && errno == EACCES) {
256 pid = 0;
257 continue;
258 }
259 goto out_delete;
260 }
261 break;
262 }
263 close(fd);
264 ret = true;
265
266out_delete:
267 evlist__delete(temp_evlist);
268 return ret;
269}
270
271int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
272{
273 unsigned int freq;
274 struct record_opts *opts = opt->value;
275
276 if (!str)
277 return -EINVAL;
278
279 if (strcasecmp(str, "max") == 0) {
280 if (get_max_rate(&freq)) {
281 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
282 return -1;
283 }
284 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
285 } else {
286 freq = atoi(str);
287 }
288
289 opts->user_freq = freq;
290 return 0;
291}