Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4/* For the CLR_() macros */
5#include <pthread.h>
6
7#include <sched.h>
8#include "evlist.h"
9#include "evsel.h"
10#include "perf.h"
11#include "debug.h"
12#include "tests.h"
13
14static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
15{
16 int i, cpu = -1, nrcpus = 1024;
17realloc:
18 CPU_ZERO(maskp);
19
20 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
21 if (errno == EINVAL && nrcpus < (1024 << 8)) {
22 nrcpus = nrcpus << 2;
23 goto realloc;
24 }
25 perror("sched_getaffinity");
26 return -1;
27 }
28
29 for (i = 0; i < nrcpus; i++) {
30 if (CPU_ISSET(i, maskp)) {
31 if (cpu == -1)
32 cpu = i;
33 else
34 CPU_CLR(i, maskp);
35 }
36 }
37
38 return cpu;
39}
40
41int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
42{
43 struct record_opts opts = {
44 .target = {
45 .uid = UINT_MAX,
46 .uses_mmap = true,
47 },
48 .no_buffering = true,
49 .mmap_pages = 256,
50 };
51 cpu_set_t cpu_mask;
52 size_t cpu_mask_size = sizeof(cpu_mask);
53 struct perf_evlist *evlist = perf_evlist__new_dummy();
54 struct perf_evsel *evsel;
55 struct perf_sample sample;
56 const char *cmd = "sleep";
57 const char *argv[] = { cmd, "1", NULL, };
58 char *bname, *mmap_filename;
59 u64 prev_time = 0;
60 bool found_cmd_mmap = false,
61 found_libc_mmap = false,
62 found_vdso_mmap = false,
63 found_ld_mmap = false;
64 int err = -1, errs = 0, i, wakeups = 0;
65 u32 cpu;
66 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
67 char sbuf[STRERR_BUFSIZE];
68
69 if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
70 evlist = perf_evlist__new_default();
71
72 if (evlist == NULL) {
73 pr_debug("Not enough memory to create evlist\n");
74 goto out;
75 }
76
77 /*
78 * Create maps of threads and cpus to monitor. In this case
79 * we start with all threads and cpus (-1, -1) but then in
80 * perf_evlist__prepare_workload we'll fill in the only thread
81 * we're monitoring, the one forked there.
82 */
83 err = perf_evlist__create_maps(evlist, &opts.target);
84 if (err < 0) {
85 pr_debug("Not enough memory to create thread/cpu maps\n");
86 goto out_delete_evlist;
87 }
88
89 /*
90 * Prepare the workload in argv[] to run, it'll fork it, and then wait
91 * for perf_evlist__start_workload() to exec it. This is done this way
92 * so that we have time to open the evlist (calling sys_perf_event_open
93 * on all the fds) and then mmap them.
94 */
95 err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
96 if (err < 0) {
97 pr_debug("Couldn't run the workload!\n");
98 goto out_delete_evlist;
99 }
100
101 /*
102 * Config the evsels, setting attr->comm on the first one, etc.
103 */
104 evsel = perf_evlist__first(evlist);
105 perf_evsel__set_sample_bit(evsel, CPU);
106 perf_evsel__set_sample_bit(evsel, TID);
107 perf_evsel__set_sample_bit(evsel, TIME);
108 perf_evlist__config(evlist, &opts, NULL);
109
110 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
111 if (err < 0) {
112 pr_debug("sched__get_first_possible_cpu: %s\n",
113 str_error_r(errno, sbuf, sizeof(sbuf)));
114 goto out_delete_evlist;
115 }
116
117 cpu = err;
118
119 /*
120 * So that we can check perf_sample.cpu on all the samples.
121 */
122 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
123 pr_debug("sched_setaffinity: %s\n",
124 str_error_r(errno, sbuf, sizeof(sbuf)));
125 goto out_delete_evlist;
126 }
127
128 /*
129 * Call sys_perf_event_open on all the fds on all the evsels,
130 * grouping them if asked to.
131 */
132 err = perf_evlist__open(evlist);
133 if (err < 0) {
134 pr_debug("perf_evlist__open: %s\n",
135 str_error_r(errno, sbuf, sizeof(sbuf)));
136 goto out_delete_evlist;
137 }
138
139 /*
140 * mmap the first fd on a given CPU and ask for events for the other
141 * fds in the same CPU to be injected in the same mmap ring buffer
142 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
143 */
144 err = perf_evlist__mmap(evlist, opts.mmap_pages);
145 if (err < 0) {
146 pr_debug("perf_evlist__mmap: %s\n",
147 str_error_r(errno, sbuf, sizeof(sbuf)));
148 goto out_delete_evlist;
149 }
150
151 /*
152 * Now that all is properly set up, enable the events, they will
153 * count just on workload.pid, which will start...
154 */
155 perf_evlist__enable(evlist);
156
157 /*
158 * Now!
159 */
160 perf_evlist__start_workload(evlist);
161
162 while (1) {
163 int before = total_events;
164
165 for (i = 0; i < evlist->nr_mmaps; i++) {
166 union perf_event *event;
167 struct perf_mmap *md;
168
169 md = &evlist->mmap[i];
170 if (perf_mmap__read_init(md) < 0)
171 continue;
172
173 while ((event = perf_mmap__read_event(md)) != NULL) {
174 const u32 type = event->header.type;
175 const char *name = perf_event__name(type);
176
177 ++total_events;
178 if (type < PERF_RECORD_MAX)
179 nr_events[type]++;
180
181 err = perf_evlist__parse_sample(evlist, event, &sample);
182 if (err < 0) {
183 if (verbose > 0)
184 perf_event__fprintf(event, stderr);
185 pr_debug("Couldn't parse sample\n");
186 goto out_delete_evlist;
187 }
188
189 if (verbose > 0) {
190 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
191 perf_event__fprintf(event, stderr);
192 }
193
194 if (prev_time > sample.time) {
195 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
196 name, prev_time, sample.time);
197 ++errs;
198 }
199
200 prev_time = sample.time;
201
202 if (sample.cpu != cpu) {
203 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
204 name, cpu, sample.cpu);
205 ++errs;
206 }
207
208 if ((pid_t)sample.pid != evlist->workload.pid) {
209 pr_debug("%s with unexpected pid, expected %d, got %d\n",
210 name, evlist->workload.pid, sample.pid);
211 ++errs;
212 }
213
214 if ((pid_t)sample.tid != evlist->workload.pid) {
215 pr_debug("%s with unexpected tid, expected %d, got %d\n",
216 name, evlist->workload.pid, sample.tid);
217 ++errs;
218 }
219
220 if ((type == PERF_RECORD_COMM ||
221 type == PERF_RECORD_MMAP ||
222 type == PERF_RECORD_MMAP2 ||
223 type == PERF_RECORD_FORK ||
224 type == PERF_RECORD_EXIT) &&
225 (pid_t)event->comm.pid != evlist->workload.pid) {
226 pr_debug("%s with unexpected pid/tid\n", name);
227 ++errs;
228 }
229
230 if ((type == PERF_RECORD_COMM ||
231 type == PERF_RECORD_MMAP ||
232 type == PERF_RECORD_MMAP2) &&
233 event->comm.pid != event->comm.tid) {
234 pr_debug("%s with different pid/tid!\n", name);
235 ++errs;
236 }
237
238 switch (type) {
239 case PERF_RECORD_COMM:
240 if (strcmp(event->comm.comm, cmd)) {
241 pr_debug("%s with unexpected comm!\n", name);
242 ++errs;
243 }
244 break;
245 case PERF_RECORD_EXIT:
246 goto found_exit;
247 case PERF_RECORD_MMAP:
248 mmap_filename = event->mmap.filename;
249 goto check_bname;
250 case PERF_RECORD_MMAP2:
251 mmap_filename = event->mmap2.filename;
252 check_bname:
253 bname = strrchr(mmap_filename, '/');
254 if (bname != NULL) {
255 if (!found_cmd_mmap)
256 found_cmd_mmap = !strcmp(bname + 1, cmd);
257 if (!found_libc_mmap)
258 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
259 if (!found_ld_mmap)
260 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
261 } else if (!found_vdso_mmap)
262 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
263 break;
264
265 case PERF_RECORD_SAMPLE:
266 /* Just ignore samples for now */
267 break;
268 default:
269 pr_debug("Unexpected perf_event->header.type %d!\n",
270 type);
271 ++errs;
272 }
273
274 perf_mmap__consume(md);
275 }
276 perf_mmap__read_done(md);
277 }
278
279 /*
280 * We don't use poll here because at least at 3.1 times the
281 * PERF_RECORD_{!SAMPLE} events don't honour
282 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
283 */
284 if (total_events == before && false)
285 perf_evlist__poll(evlist, -1);
286
287 sleep(1);
288 if (++wakeups > 5) {
289 pr_debug("No PERF_RECORD_EXIT event!\n");
290 break;
291 }
292 }
293
294found_exit:
295 if (nr_events[PERF_RECORD_COMM] > 1) {
296 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
297 ++errs;
298 }
299
300 if (nr_events[PERF_RECORD_COMM] == 0) {
301 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
302 ++errs;
303 }
304
305 if (!found_cmd_mmap) {
306 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
307 ++errs;
308 }
309
310 if (!found_libc_mmap) {
311 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
312 ++errs;
313 }
314
315 if (!found_ld_mmap) {
316 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
317 ++errs;
318 }
319
320 if (!found_vdso_mmap) {
321 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
322 ++errs;
323 }
324out_delete_evlist:
325 perf_evlist__delete(evlist);
326out:
327 return (err < 0 || errs > 0) ? -1 : 0;
328}
1/* For the CLR_() macros */
2#include <pthread.h>
3
4#include <sched.h>
5#include "evlist.h"
6#include "evsel.h"
7#include "perf.h"
8#include "debug.h"
9#include "tests.h"
10
11static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
12{
13 int i, cpu = -1, nrcpus = 1024;
14realloc:
15 CPU_ZERO(maskp);
16
17 if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
18 if (errno == EINVAL && nrcpus < (1024 << 8)) {
19 nrcpus = nrcpus << 2;
20 goto realloc;
21 }
22 perror("sched_getaffinity");
23 return -1;
24 }
25
26 for (i = 0; i < nrcpus; i++) {
27 if (CPU_ISSET(i, maskp)) {
28 if (cpu == -1)
29 cpu = i;
30 else
31 CPU_CLR(i, maskp);
32 }
33 }
34
35 return cpu;
36}
37
38int test__PERF_RECORD(int subtest __maybe_unused)
39{
40 struct record_opts opts = {
41 .target = {
42 .uid = UINT_MAX,
43 .uses_mmap = true,
44 },
45 .no_buffering = true,
46 .mmap_pages = 256,
47 };
48 cpu_set_t cpu_mask;
49 size_t cpu_mask_size = sizeof(cpu_mask);
50 struct perf_evlist *evlist = perf_evlist__new_dummy();
51 struct perf_evsel *evsel;
52 struct perf_sample sample;
53 const char *cmd = "sleep";
54 const char *argv[] = { cmd, "1", NULL, };
55 char *bname, *mmap_filename;
56 u64 prev_time = 0;
57 bool found_cmd_mmap = false,
58 found_libc_mmap = false,
59 found_vdso_mmap = false,
60 found_ld_mmap = false;
61 int err = -1, errs = 0, i, wakeups = 0;
62 u32 cpu;
63 int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
64 char sbuf[STRERR_BUFSIZE];
65
66 if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
67 evlist = perf_evlist__new_default();
68
69 if (evlist == NULL || argv == NULL) {
70 pr_debug("Not enough memory to create evlist\n");
71 goto out;
72 }
73
74 /*
75 * Create maps of threads and cpus to monitor. In this case
76 * we start with all threads and cpus (-1, -1) but then in
77 * perf_evlist__prepare_workload we'll fill in the only thread
78 * we're monitoring, the one forked there.
79 */
80 err = perf_evlist__create_maps(evlist, &opts.target);
81 if (err < 0) {
82 pr_debug("Not enough memory to create thread/cpu maps\n");
83 goto out_delete_evlist;
84 }
85
86 /*
87 * Prepare the workload in argv[] to run, it'll fork it, and then wait
88 * for perf_evlist__start_workload() to exec it. This is done this way
89 * so that we have time to open the evlist (calling sys_perf_event_open
90 * on all the fds) and then mmap them.
91 */
92 err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
93 if (err < 0) {
94 pr_debug("Couldn't run the workload!\n");
95 goto out_delete_evlist;
96 }
97
98 /*
99 * Config the evsels, setting attr->comm on the first one, etc.
100 */
101 evsel = perf_evlist__first(evlist);
102 perf_evsel__set_sample_bit(evsel, CPU);
103 perf_evsel__set_sample_bit(evsel, TID);
104 perf_evsel__set_sample_bit(evsel, TIME);
105 perf_evlist__config(evlist, &opts, NULL);
106
107 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
108 if (err < 0) {
109 pr_debug("sched__get_first_possible_cpu: %s\n",
110 str_error_r(errno, sbuf, sizeof(sbuf)));
111 goto out_delete_evlist;
112 }
113
114 cpu = err;
115
116 /*
117 * So that we can check perf_sample.cpu on all the samples.
118 */
119 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
120 pr_debug("sched_setaffinity: %s\n",
121 str_error_r(errno, sbuf, sizeof(sbuf)));
122 goto out_delete_evlist;
123 }
124
125 /*
126 * Call sys_perf_event_open on all the fds on all the evsels,
127 * grouping them if asked to.
128 */
129 err = perf_evlist__open(evlist);
130 if (err < 0) {
131 pr_debug("perf_evlist__open: %s\n",
132 str_error_r(errno, sbuf, sizeof(sbuf)));
133 goto out_delete_evlist;
134 }
135
136 /*
137 * mmap the first fd on a given CPU and ask for events for the other
138 * fds in the same CPU to be injected in the same mmap ring buffer
139 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
140 */
141 err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
142 if (err < 0) {
143 pr_debug("perf_evlist__mmap: %s\n",
144 str_error_r(errno, sbuf, sizeof(sbuf)));
145 goto out_delete_evlist;
146 }
147
148 /*
149 * Now that all is properly set up, enable the events, they will
150 * count just on workload.pid, which will start...
151 */
152 perf_evlist__enable(evlist);
153
154 /*
155 * Now!
156 */
157 perf_evlist__start_workload(evlist);
158
159 while (1) {
160 int before = total_events;
161
162 for (i = 0; i < evlist->nr_mmaps; i++) {
163 union perf_event *event;
164
165 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
166 const u32 type = event->header.type;
167 const char *name = perf_event__name(type);
168
169 ++total_events;
170 if (type < PERF_RECORD_MAX)
171 nr_events[type]++;
172
173 err = perf_evlist__parse_sample(evlist, event, &sample);
174 if (err < 0) {
175 if (verbose)
176 perf_event__fprintf(event, stderr);
177 pr_debug("Couldn't parse sample\n");
178 goto out_delete_evlist;
179 }
180
181 if (verbose) {
182 pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
183 perf_event__fprintf(event, stderr);
184 }
185
186 if (prev_time > sample.time) {
187 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
188 name, prev_time, sample.time);
189 ++errs;
190 }
191
192 prev_time = sample.time;
193
194 if (sample.cpu != cpu) {
195 pr_debug("%s with unexpected cpu, expected %d, got %d\n",
196 name, cpu, sample.cpu);
197 ++errs;
198 }
199
200 if ((pid_t)sample.pid != evlist->workload.pid) {
201 pr_debug("%s with unexpected pid, expected %d, got %d\n",
202 name, evlist->workload.pid, sample.pid);
203 ++errs;
204 }
205
206 if ((pid_t)sample.tid != evlist->workload.pid) {
207 pr_debug("%s with unexpected tid, expected %d, got %d\n",
208 name, evlist->workload.pid, sample.tid);
209 ++errs;
210 }
211
212 if ((type == PERF_RECORD_COMM ||
213 type == PERF_RECORD_MMAP ||
214 type == PERF_RECORD_MMAP2 ||
215 type == PERF_RECORD_FORK ||
216 type == PERF_RECORD_EXIT) &&
217 (pid_t)event->comm.pid != evlist->workload.pid) {
218 pr_debug("%s with unexpected pid/tid\n", name);
219 ++errs;
220 }
221
222 if ((type == PERF_RECORD_COMM ||
223 type == PERF_RECORD_MMAP ||
224 type == PERF_RECORD_MMAP2) &&
225 event->comm.pid != event->comm.tid) {
226 pr_debug("%s with different pid/tid!\n", name);
227 ++errs;
228 }
229
230 switch (type) {
231 case PERF_RECORD_COMM:
232 if (strcmp(event->comm.comm, cmd)) {
233 pr_debug("%s with unexpected comm!\n", name);
234 ++errs;
235 }
236 break;
237 case PERF_RECORD_EXIT:
238 goto found_exit;
239 case PERF_RECORD_MMAP:
240 mmap_filename = event->mmap.filename;
241 goto check_bname;
242 case PERF_RECORD_MMAP2:
243 mmap_filename = event->mmap2.filename;
244 check_bname:
245 bname = strrchr(mmap_filename, '/');
246 if (bname != NULL) {
247 if (!found_cmd_mmap)
248 found_cmd_mmap = !strcmp(bname + 1, cmd);
249 if (!found_libc_mmap)
250 found_libc_mmap = !strncmp(bname + 1, "libc", 4);
251 if (!found_ld_mmap)
252 found_ld_mmap = !strncmp(bname + 1, "ld", 2);
253 } else if (!found_vdso_mmap)
254 found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
255 break;
256
257 case PERF_RECORD_SAMPLE:
258 /* Just ignore samples for now */
259 break;
260 default:
261 pr_debug("Unexpected perf_event->header.type %d!\n",
262 type);
263 ++errs;
264 }
265
266 perf_evlist__mmap_consume(evlist, i);
267 }
268 }
269
270 /*
271 * We don't use poll here because at least at 3.1 times the
272 * PERF_RECORD_{!SAMPLE} events don't honour
273 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
274 */
275 if (total_events == before && false)
276 perf_evlist__poll(evlist, -1);
277
278 sleep(1);
279 if (++wakeups > 5) {
280 pr_debug("No PERF_RECORD_EXIT event!\n");
281 break;
282 }
283 }
284
285found_exit:
286 if (nr_events[PERF_RECORD_COMM] > 1) {
287 pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
288 ++errs;
289 }
290
291 if (nr_events[PERF_RECORD_COMM] == 0) {
292 pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
293 ++errs;
294 }
295
296 if (!found_cmd_mmap) {
297 pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
298 ++errs;
299 }
300
301 if (!found_libc_mmap) {
302 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
303 ++errs;
304 }
305
306 if (!found_ld_mmap) {
307 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
308 ++errs;
309 }
310
311 if (!found_vdso_mmap) {
312 pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
313 ++errs;
314 }
315out_delete_evlist:
316 perf_evlist__delete(evlist);
317out:
318 return (err < 0 || errs > 0) ? -1 : 0;
319}