Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <linux/string.h>
5/* For the CLR_() macros */
6#include <pthread.h>
7
8#include <sched.h>
9#include "evlist.h"
10#include "evsel.h"
11#include "debug.h"
12#include "record.h"
13#include "tests.h"
14#include "util/mmap.h"
15
16static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
17{
18 int i, cpu = -1, nrcpus = 1024;
19realloc:
20 CPU_ZERO(maskp);
21
22 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
23 if (errno == EINVAL && nrcpus < (1024 << 8)) {
24 nrcpus = nrcpus << 2;
25 goto realloc;
26 }
27 perror("sched_getaffinity");
28 return -1;
29 }
30
31 for (i = 0; i < nrcpus; i++) {
32 if (CPU_ISSET(i, maskp)) {
33 if (cpu == -1)
34 cpu = i;
35 else
36 CPU_CLR(i, maskp);
37 }
38 }
39
40 return cpu;
41}
42
43int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
44{
45 struct record_opts opts = {
46 .target = {
47 .uid = UINT_MAX,
48 .uses_mmap = true,
49 },
50 .no_buffering = true,
51 .mmap_pages = 256,
52 };
53 cpu_set_t cpu_mask;
54 size_t cpu_mask_size = sizeof(cpu_mask);
55 struct evlist *evlist = perf_evlist__new_dummy();
56 struct evsel *evsel;
57 struct perf_sample sample;
58 const char *cmd = "sleep";
59 const char *argv[] = { cmd, "1", NULL, };
60 char *bname, *mmap_filename;
61 u64 prev_time = 0;
62 bool found_cmd_mmap = false,
63 found_coreutils_mmap = false,
64 found_libc_mmap = false,
65 found_vdso_mmap = false,
66 found_ld_mmap = false;
67 int err = -1, errs = 0, i, wakeups = 0;
68 u32 cpu;
69 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
70 char sbuf[STRERR_BUFSIZE];
71
72 if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
73 evlist = perf_evlist__new_default();
74
75 if (evlist == NULL) {
76 pr_debug("Not enough memory to create evlist\n");
77 goto out;
78 }
79
80 /*
81 * Create maps of threads and cpus to monitor. In this case
82 * we start with all threads and cpus (-1, -1) but then in
83 * perf_evlist__prepare_workload we'll fill in the only thread
84 * we're monitoring, the one forked there.
85 */
86 err = perf_evlist__create_maps(evlist, &opts.target);
87 if (err < 0) {
88 pr_debug("Not enough memory to create thread/cpu maps\n");
89 goto out_delete_evlist;
90 }
91
92 /*
93 * Prepare the workload in argv[] to run, it'll fork it, and then wait
94 * for perf_evlist__start_workload() to exec it. This is done this way
95 * so that we have time to open the evlist (calling sys_perf_event_open
96 * on all the fds) and then mmap them.
97 */
98 err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
99 if (err < 0) {
100 pr_debug("Couldn't run the workload!\n");
101 goto out_delete_evlist;
102 }
103
104 /*
105 * Config the evsels, setting attr->comm on the first one, etc.
106 */
107 evsel = evlist__first(evlist);
108 perf_evsel__set_sample_bit(evsel, CPU);
109 perf_evsel__set_sample_bit(evsel, TID);
110 perf_evsel__set_sample_bit(evsel, TIME);
111 perf_evlist__config(evlist, &opts, NULL);
112
113 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
114 if (err < 0) {
115 pr_debug("sched__get_first_possible_cpu: %s\n",
116 str_error_r(errno, sbuf, sizeof(sbuf)));
117 goto out_delete_evlist;
118 }
119
120 cpu = err;
121
122 /*
123 * So that we can check perf_sample.cpu on all the samples.
124 */
125 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
126 pr_debug("sched_setaffinity: %s\n",
127 str_error_r(errno, sbuf, sizeof(sbuf)));
128 goto out_delete_evlist;
129 }
130
131 /*
132 * Call sys_perf_event_open on all the fds on all the evsels,
133 * grouping them if asked to.
134 */
135 err = evlist__open(evlist);
136 if (err < 0) {
137 pr_debug("perf_evlist__open: %s\n",
138 str_error_r(errno, sbuf, sizeof(sbuf)));
139 goto out_delete_evlist;
140 }
141
142 /*
143 * mmap the first fd on a given CPU and ask for events for the other
144 * fds in the same CPU to be injected in the same mmap ring buffer
145 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
146 */
147 err = evlist__mmap(evlist, opts.mmap_pages);
148 if (err < 0) {
149 pr_debug("evlist__mmap: %s\n",
150 str_error_r(errno, sbuf, sizeof(sbuf)));
151 goto out_delete_evlist;
152 }
153
154 /*
155 * Now that all is properly set up, enable the events, they will
156 * count just on workload.pid, which will start...
157 */
158 evlist__enable(evlist);
159
160 /*
161 * Now!
162 */
163 perf_evlist__start_workload(evlist);
164
165 while (1) {
166 int before = total_events;
167
168 for (i = 0; i < evlist->core.nr_mmaps; i++) {
169 union perf_event *event;
170 struct mmap *md;
171
172 md = &evlist->mmap[i];
173 if (perf_mmap__read_init(md) < 0)
174 continue;
175
176 while ((event = perf_mmap__read_event(md)) != NULL) {
177 const u32 type = event->header.type;
178 const char *name = perf_event__name(type);
179
180 ++total_events;
181 if (type < PERF_RECORD_MAX)
182 nr_events[type]++;
183
184 err = perf_evlist__parse_sample(evlist, event, &sample);
185 if (err < 0) {
186 if (verbose > 0)
187 perf_event__fprintf(event, stderr);
188 pr_debug("Couldn't parse sample\n");
189 goto out_delete_evlist;
190 }
191
192 if (verbose > 0) {
193 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
194 perf_event__fprintf(event, stderr);
195 }
196
197 if (prev_time > sample.time) {
198 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
199 name, prev_time, sample.time);
200 ++errs;
201 }
202
203 prev_time = sample.time;
204
205 if (sample.cpu != cpu) {
206 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
207 name, cpu, sample.cpu);
208 ++errs;
209 }
210
211 if ((pid_t)sample.pid != evlist->workload.pid) {
212 pr_debug("%s with unexpected pid, expected %d, got %d\n",
213 name, evlist->workload.pid, sample.pid);
214 ++errs;
215 }
216
217 if ((pid_t)sample.tid != evlist->workload.pid) {
218 pr_debug("%s with unexpected tid, expected %d, got %d\n",
219 name, evlist->workload.pid, sample.tid);
220 ++errs;
221 }
222
223 if ((type == PERF_RECORD_COMM ||
224 type == PERF_RECORD_MMAP ||
225 type == PERF_RECORD_MMAP2 ||
226 type == PERF_RECORD_FORK ||
227 type == PERF_RECORD_EXIT) &&
228 (pid_t)event->comm.pid != evlist->workload.pid) {
229 pr_debug("%s with unexpected pid/tid\n", name);
230 ++errs;
231 }
232
233 if ((type == PERF_RECORD_COMM ||
234 type == PERF_RECORD_MMAP ||
235 type == PERF_RECORD_MMAP2) &&
236 event->comm.pid != event->comm.tid) {
237 pr_debug("%s with different pid/tid!\n", name);
238 ++errs;
239 }
240
241 switch (type) {
242 case PERF_RECORD_COMM:
243 if (strcmp(event->comm.comm, cmd)) {
244 pr_debug("%s with unexpected comm!\n", name);
245 ++errs;
246 }
247 break;
248 case PERF_RECORD_EXIT:
249 goto found_exit;
250 case PERF_RECORD_MMAP:
251 mmap_filename = event->mmap.filename;
252 goto check_bname;
253 case PERF_RECORD_MMAP2:
254 mmap_filename = event->mmap2.filename;
255 check_bname:
256 bname = strrchr(mmap_filename, '/');
257 if (bname != NULL) {
258 if (!found_cmd_mmap)
259 found_cmd_mmap = !strcmp(bname + 1, cmd);
260 if (!found_coreutils_mmap)
261 found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
262 if (!found_libc_mmap)
263 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
264 if (!found_ld_mmap)
265 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
266 } else if (!found_vdso_mmap)
267 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
268 break;
269
270 case PERF_RECORD_SAMPLE:
271 /* Just ignore samples for now */
272 break;
273 default:
274 pr_debug("Unexpected perf_event->header.type %d!\n",
275 type);
276 ++errs;
277 }
278
279 perf_mmap__consume(md);
280 }
281 perf_mmap__read_done(md);
282 }
283
284 /*
285 * We don't use poll here because at least at 3.1 times the
286 * PERF_RECORD_{!SAMPLE} events don't honour
287 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
288 */
289 if (total_events == before && false)
290 evlist__poll(evlist, -1);
291
292 sleep(1);
293 if (++wakeups > 5) {
294 pr_debug("No PERF_RECORD_EXIT event!\n");
295 break;
296 }
297 }
298
299found_exit:
300 if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
301 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
302 ++errs;
303 }
304
305 if (nr_events[PERF_RECORD_COMM] == 0) {
306 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
307 ++errs;
308 }
309
310 if (!found_cmd_mmap && !found_coreutils_mmap) {
311 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
312 ++errs;
313 }
314
315 if (!found_libc_mmap) {
316 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
317 ++errs;
318 }
319
320 if (!found_ld_mmap) {
321 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
322 ++errs;
323 }
324
325 if (!found_vdso_mmap) {
326 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
327 ++errs;
328 }
329out_delete_evlist:
330 evlist__delete(evlist);
331out:
332 return (err < 0 || errs > 0) ? -1 : 0;
333}
1#include <sched.h>
2#include "evlist.h"
3#include "evsel.h"
4#include "perf.h"
5#include "debug.h"
6#include "tests.h"
7
8static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
9{
10 int i, cpu = -1, nrcpus = 1024;
11realloc:
12 CPU_ZERO(maskp);
13
14 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
15 if (errno == EINVAL && nrcpus < (1024 << 8)) {
16 nrcpus = nrcpus << 2;
17 goto realloc;
18 }
19 perror("sched_getaffinity");
20 return -1;
21 }
22
23 for (i = 0; i < nrcpus; i++) {
24 if (CPU_ISSET(i, maskp)) {
25 if (cpu == -1)
26 cpu = i;
27 else
28 CPU_CLR(i, maskp);
29 }
30 }
31
32 return cpu;
33}
34
35int test__PERF_RECORD(void)
36{
37 struct record_opts opts = {
38 .target = {
39 .uid = UINT_MAX,
40 .uses_mmap = true,
41 },
42 .no_buffering = true,
43 .freq = 10,
44 .mmap_pages = 256,
45 };
46 cpu_set_t cpu_mask;
47 size_t cpu_mask_size = sizeof(cpu_mask);
48 struct perf_evlist *evlist = perf_evlist__new_default();
49 struct perf_evsel *evsel;
50 struct perf_sample sample;
51 const char *cmd = "sleep";
52 const char *argv[] = { cmd, "1", NULL, };
53 char *bname, *mmap_filename;
54 u64 prev_time = 0;
55 bool found_cmd_mmap = false,
56 found_libc_mmap = false,
57 found_vdso_mmap = false,
58 found_ld_mmap = false;
59 int err = -1, errs = 0, i, wakeups = 0;
60 u32 cpu;
61 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
62
63 if (evlist == NULL || argv == NULL) {
64 pr_debug("Not enough memory to create evlist\n");
65 goto out;
66 }
67
68 /*
69 * Create maps of threads and cpus to monitor. In this case
70 * we start with all threads and cpus (-1, -1) but then in
71 * perf_evlist__prepare_workload we'll fill in the only thread
72 * we're monitoring, the one forked there.
73 */
74 err = perf_evlist__create_maps(evlist, &opts.target);
75 if (err < 0) {
76 pr_debug("Not enough memory to create thread/cpu maps\n");
77 goto out_delete_evlist;
78 }
79
80 /*
81 * Prepare the workload in argv[] to run, it'll fork it, and then wait
82 * for perf_evlist__start_workload() to exec it. This is done this way
83 * so that we have time to open the evlist (calling sys_perf_event_open
84 * on all the fds) and then mmap them.
85 */
86 err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
87 if (err < 0) {
88 pr_debug("Couldn't run the workload!\n");
89 goto out_delete_evlist;
90 }
91
92 /*
93 * Config the evsels, setting attr->comm on the first one, etc.
94 */
95 evsel = perf_evlist__first(evlist);
96 perf_evsel__set_sample_bit(evsel, CPU);
97 perf_evsel__set_sample_bit(evsel, TID);
98 perf_evsel__set_sample_bit(evsel, TIME);
99 perf_evlist__config(evlist, &opts);
100
101 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
102 if (err < 0) {
103 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
104 goto out_delete_evlist;
105 }
106
107 cpu = err;
108
109 /*
110 * So that we can check perf_sample.cpu on all the samples.
111 */
112 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
113 pr_debug("sched_setaffinity: %s\n", strerror(errno));
114 goto out_delete_evlist;
115 }
116
117 /*
118 * Call sys_perf_event_open on all the fds on all the evsels,
119 * grouping them if asked to.
120 */
121 err = perf_evlist__open(evlist);
122 if (err < 0) {
123 pr_debug("perf_evlist__open: %s\n", strerror(errno));
124 goto out_delete_evlist;
125 }
126
127 /*
128 * mmap the first fd on a given CPU and ask for events for the other
129 * fds in the same CPU to be injected in the same mmap ring buffer
130 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
131 */
132 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
133 if (err < 0) {
134 pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
135 goto out_delete_evlist;
136 }
137
138 /*
139 * Now that all is properly set up, enable the events, they will
140 * count just on workload.pid, which will start...
141 */
142 perf_evlist__enable(evlist);
143
144 /*
145 * Now!
146 */
147 perf_evlist__start_workload(evlist);
148
149 while (1) {
150 int before = total_events;
151
152 for (i = 0; i < evlist->nr_mmaps; i++) {
153 union perf_event *event;
154
155 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
156 const u32 type = event->header.type;
157 const char *name = perf_event__name(type);
158
159 ++total_events;
160 if (type < PERF_RECORD_MAX)
161 nr_events[type]++;
162
163 err = perf_evlist__parse_sample(evlist, event, &sample);
164 if (err < 0) {
165 if (verbose)
166 perf_event__fprintf(event, stderr);
167 pr_debug("Couldn't parse sample\n");
168 goto out_delete_evlist;
169 }
170
171 if (verbose) {
172 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
173 perf_event__fprintf(event, stderr);
174 }
175
176 if (prev_time > sample.time) {
177 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
178 name, prev_time, sample.time);
179 ++errs;
180 }
181
182 prev_time = sample.time;
183
184 if (sample.cpu != cpu) {
185 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
186 name, cpu, sample.cpu);
187 ++errs;
188 }
189
190 if ((pid_t)sample.pid != evlist->workload.pid) {
191 pr_debug("%s with unexpected pid, expected %d, got %d\n",
192 name, evlist->workload.pid, sample.pid);
193 ++errs;
194 }
195
196 if ((pid_t)sample.tid != evlist->workload.pid) {
197 pr_debug("%s with unexpected tid, expected %d, got %d\n",
198 name, evlist->workload.pid, sample.tid);
199 ++errs;
200 }
201
202 if ((type == PERF_RECORD_COMM ||
203 type == PERF_RECORD_MMAP ||
204 type == PERF_RECORD_MMAP2 ||
205 type == PERF_RECORD_FORK ||
206 type == PERF_RECORD_EXIT) &&
207 (pid_t)event->comm.pid != evlist->workload.pid) {
208 pr_debug("%s with unexpected pid/tid\n", name);
209 ++errs;
210 }
211
212 if ((type == PERF_RECORD_COMM ||
213 type == PERF_RECORD_MMAP ||
214 type == PERF_RECORD_MMAP2) &&
215 event->comm.pid != event->comm.tid) {
216 pr_debug("%s with different pid/tid!\n", name);
217 ++errs;
218 }
219
220 switch (type) {
221 case PERF_RECORD_COMM:
222 if (strcmp(event->comm.comm, cmd)) {
223 pr_debug("%s with unexpected comm!\n", name);
224 ++errs;
225 }
226 break;
227 case PERF_RECORD_EXIT:
228 goto found_exit;
229 case PERF_RECORD_MMAP:
230 mmap_filename = event->mmap.filename;
231 goto check_bname;
232 case PERF_RECORD_MMAP2:
233 mmap_filename = event->mmap2.filename;
234 check_bname:
235 bname = strrchr(mmap_filename, '/');
236 if (bname != NULL) {
237 if (!found_cmd_mmap)
238 found_cmd_mmap = !strcmp(bname + 1, cmd);
239 if (!found_libc_mmap)
240 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
241 if (!found_ld_mmap)
242 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
243 } else if (!found_vdso_mmap)
244 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
245 break;
246
247 case PERF_RECORD_SAMPLE:
248 /* Just ignore samples for now */
249 break;
250 default:
251 pr_debug("Unexpected perf_event->header.type %d!\n",
252 type);
253 ++errs;
254 }
255
256 perf_evlist__mmap_consume(evlist, i);
257 }
258 }
259
260 /*
261 * We don't use poll here because at least at 3.1 times the
262 * PERF_RECORD_{!SAMPLE} events don't honour
263 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
264 */
265 if (total_events == before && false)
266 poll(evlist->pollfd, evlist->nr_fds, -1);
267
268 sleep(1);
269 if (++wakeups > 5) {
270 pr_debug("No PERF_RECORD_EXIT event!\n");
271 break;
272 }
273 }
274
275found_exit:
276 if (nr_events[PERF_RECORD_COMM] > 1) {
277 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
278 ++errs;
279 }
280
281 if (nr_events[PERF_RECORD_COMM] == 0) {
282 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
283 ++errs;
284 }
285
286 if (!found_cmd_mmap) {
287 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
288 ++errs;
289 }
290
291 if (!found_libc_mmap) {
292 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
293 ++errs;
294 }
295
296 if (!found_ld_mmap) {
297 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
298 ++errs;
299 }
300
301 if (!found_vdso_mmap) {
302 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
303 ++errs;
304 }
305out_delete_evlist:
306 perf_evlist__delete(evlist);
307out:
308 return (err < 0 || errs > 0) ? -1 : 0;
309}