Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <inttypes.h>
  4/* For the CLR_() macros */
  5#include <pthread.h>
  6
  7#include <sched.h>
 
 
  8#include "evlist.h"
  9#include "evsel.h"
 10#include "perf.h"
 11#include "debug.h"
 
 12#include "tests.h"
 
 
 13
 14static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
 15{
 16	int i, cpu = -1, nrcpus = 1024;
 17realloc:
 18	CPU_ZERO(maskp);
 19
 20	if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
 21		if (errno == EINVAL && nrcpus < (1024 << 8)) {
 22			nrcpus = nrcpus << 2;
 23			goto realloc;
 24		}
 25		perror("sched_getaffinity");
 26			return -1;
 27	}
 28
 29	for (i = 0; i < nrcpus; i++) {
 30		if (CPU_ISSET(i, maskp)) {
 31			if (cpu == -1)
 32				cpu = i;
 33			else
 34				CPU_CLR(i, maskp);
 35		}
 36	}
 37
 38	return cpu;
 39}
 40
 41int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unused)
 42{
 43	struct record_opts opts = {
 44		.target = {
 45			.uid = UINT_MAX,
 46			.uses_mmap = true,
 47		},
 48		.no_buffering = true,
 49		.mmap_pages   = 256,
 50	};
 51	cpu_set_t cpu_mask;
 52	size_t cpu_mask_size = sizeof(cpu_mask);
 53	struct perf_evlist *evlist = perf_evlist__new_dummy();
 54	struct perf_evsel *evsel;
 55	struct perf_sample sample;
 56	const char *cmd = "sleep";
 57	const char *argv[] = { cmd, "1", NULL, };
 58	char *bname, *mmap_filename;
 59	u64 prev_time = 0;
 60	bool found_cmd_mmap = false,
 
 61	     found_libc_mmap = false,
 62	     found_vdso_mmap = false,
 63	     found_ld_mmap = false;
 64	int err = -1, errs = 0, i, wakeups = 0;
 65	u32 cpu;
 66	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
 67	char sbuf[STRERR_BUFSIZE];
 68
 69	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
 70		evlist = perf_evlist__new_default();
 71
 72	if (evlist == NULL) {
 73		pr_debug("Not enough memory to create evlist\n");
 74		goto out;
 75	}
 76
 77	/*
 78	 * Create maps of threads and cpus to monitor. In this case
 79	 * we start with all threads and cpus (-1, -1) but then in
 80	 * perf_evlist__prepare_workload we'll fill in the only thread
 81	 * we're monitoring, the one forked there.
 82	 */
 83	err = perf_evlist__create_maps(evlist, &opts.target);
 84	if (err < 0) {
 85		pr_debug("Not enough memory to create thread/cpu maps\n");
 86		goto out_delete_evlist;
 87	}
 88
 89	/*
 90	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
 91	 * for perf_evlist__start_workload() to exec it. This is done this way
 92	 * so that we have time to open the evlist (calling sys_perf_event_open
 93	 * on all the fds) and then mmap them.
 94	 */
 95	err = perf_evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
 96	if (err < 0) {
 97		pr_debug("Couldn't run the workload!\n");
 98		goto out_delete_evlist;
 99	}
100
101	/*
102	 * Config the evsels, setting attr->comm on the first one, etc.
103	 */
104	evsel = perf_evlist__first(evlist);
105	perf_evsel__set_sample_bit(evsel, CPU);
106	perf_evsel__set_sample_bit(evsel, TID);
107	perf_evsel__set_sample_bit(evsel, TIME);
108	perf_evlist__config(evlist, &opts, NULL);
109
110	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
111	if (err < 0) {
112		pr_debug("sched__get_first_possible_cpu: %s\n",
113			 str_error_r(errno, sbuf, sizeof(sbuf)));
114		goto out_delete_evlist;
115	}
116
117	cpu = err;
118
119	/*
120	 * So that we can check perf_sample.cpu on all the samples.
121	 */
122	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
123		pr_debug("sched_setaffinity: %s\n",
124			 str_error_r(errno, sbuf, sizeof(sbuf)));
125		goto out_delete_evlist;
126	}
127
128	/*
129	 * Call sys_perf_event_open on all the fds on all the evsels,
130	 * grouping them if asked to.
131	 */
132	err = perf_evlist__open(evlist);
133	if (err < 0) {
134		pr_debug("perf_evlist__open: %s\n",
135			 str_error_r(errno, sbuf, sizeof(sbuf)));
136		goto out_delete_evlist;
137	}
138
139	/*
140	 * mmap the first fd on a given CPU and ask for events for the other
141	 * fds in the same CPU to be injected in the same mmap ring buffer
142	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
143	 */
144	err = perf_evlist__mmap(evlist, opts.mmap_pages);
145	if (err < 0) {
146		pr_debug("perf_evlist__mmap: %s\n",
147			 str_error_r(errno, sbuf, sizeof(sbuf)));
148		goto out_delete_evlist;
149	}
150
151	/*
152	 * Now that all is properly set up, enable the events, they will
153	 * count just on workload.pid, which will start...
154	 */
155	perf_evlist__enable(evlist);
156
157	/*
158	 * Now!
159	 */
160	perf_evlist__start_workload(evlist);
161
162	while (1) {
163		int before = total_events;
164
165		for (i = 0; i < evlist->nr_mmaps; i++) {
166			union perf_event *event;
167			struct perf_mmap *md;
168
169			md = &evlist->mmap[i];
170			if (perf_mmap__read_init(md) < 0)
171				continue;
172
173			while ((event = perf_mmap__read_event(md)) != NULL) {
174				const u32 type = event->header.type;
175				const char *name = perf_event__name(type);
176
177				++total_events;
178				if (type < PERF_RECORD_MAX)
179					nr_events[type]++;
180
181				err = perf_evlist__parse_sample(evlist, event, &sample);
182				if (err < 0) {
183					if (verbose > 0)
184						perf_event__fprintf(event, stderr);
185					pr_debug("Couldn't parse sample\n");
186					goto out_delete_evlist;
187				}
188
189				if (verbose > 0) {
190					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
191					perf_event__fprintf(event, stderr);
192				}
193
194				if (prev_time > sample.time) {
195					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
196						 name, prev_time, sample.time);
197					++errs;
198				}
199
200				prev_time = sample.time;
201
202				if (sample.cpu != cpu) {
203					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
204						 name, cpu, sample.cpu);
205					++errs;
206				}
207
208				if ((pid_t)sample.pid != evlist->workload.pid) {
209					pr_debug("%s with unexpected pid, expected %d, got %d\n",
210						 name, evlist->workload.pid, sample.pid);
211					++errs;
212				}
213
214				if ((pid_t)sample.tid != evlist->workload.pid) {
215					pr_debug("%s with unexpected tid, expected %d, got %d\n",
216						 name, evlist->workload.pid, sample.tid);
217					++errs;
218				}
219
220				if ((type == PERF_RECORD_COMM ||
221				     type == PERF_RECORD_MMAP ||
222				     type == PERF_RECORD_MMAP2 ||
223				     type == PERF_RECORD_FORK ||
224				     type == PERF_RECORD_EXIT) &&
225				     (pid_t)event->comm.pid != evlist->workload.pid) {
226					pr_debug("%s with unexpected pid/tid\n", name);
227					++errs;
228				}
229
230				if ((type == PERF_RECORD_COMM ||
231				     type == PERF_RECORD_MMAP ||
232				     type == PERF_RECORD_MMAP2) &&
233				     event->comm.pid != event->comm.tid) {
234					pr_debug("%s with different pid/tid!\n", name);
235					++errs;
236				}
237
238				switch (type) {
239				case PERF_RECORD_COMM:
240					if (strcmp(event->comm.comm, cmd)) {
241						pr_debug("%s with unexpected comm!\n", name);
242						++errs;
243					}
244					break;
245				case PERF_RECORD_EXIT:
246					goto found_exit;
247				case PERF_RECORD_MMAP:
248					mmap_filename = event->mmap.filename;
249					goto check_bname;
250				case PERF_RECORD_MMAP2:
251					mmap_filename = event->mmap2.filename;
252				check_bname:
253					bname = strrchr(mmap_filename, '/');
254					if (bname != NULL) {
255						if (!found_cmd_mmap)
256							found_cmd_mmap = !strcmp(bname + 1, cmd);
 
 
257						if (!found_libc_mmap)
258							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
259						if (!found_ld_mmap)
260							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
261					} else if (!found_vdso_mmap)
262						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
263					break;
264
265				case PERF_RECORD_SAMPLE:
266					/* Just ignore samples for now */
267					break;
268				default:
269					pr_debug("Unexpected perf_event->header.type %d!\n",
270						 type);
271					++errs;
272				}
273
274				perf_mmap__consume(md);
275			}
276			perf_mmap__read_done(md);
277		}
278
279		/*
280		 * We don't use poll here because at least at 3.1 times the
281		 * PERF_RECORD_{!SAMPLE} events don't honour
282		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
283		 */
284		if (total_events == before && false)
285			perf_evlist__poll(evlist, -1);
286
287		sleep(1);
288		if (++wakeups > 5) {
289			pr_debug("No PERF_RECORD_EXIT event!\n");
290			break;
291		}
292	}
293
294found_exit:
295	if (nr_events[PERF_RECORD_COMM] > 1) {
296		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
297		++errs;
298	}
299
300	if (nr_events[PERF_RECORD_COMM] == 0) {
301		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
302		++errs;
303	}
304
305	if (!found_cmd_mmap) {
306		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
307		++errs;
308	}
309
310	if (!found_libc_mmap) {
311		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
312		++errs;
313	}
314
315	if (!found_ld_mmap) {
316		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
317		++errs;
318	}
319
320	if (!found_vdso_mmap) {
321		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
322		++errs;
323	}
324out_delete_evlist:
325	perf_evlist__delete(evlist);
326out:
327	return (err < 0 || errs > 0) ? -1 : 0;
 
 
 
 
328}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <linux/string.h>
 
  5
  6#include <sched.h>
  7#include <perf/mmap.h>
  8#include "event.h"
  9#include "evlist.h"
 10#include "evsel.h"
 
 11#include "debug.h"
 12#include "record.h"
 13#include "tests.h"
 14#include "util/mmap.h"
 15#include "util/sample.h"
 16
 17static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
 18{
 19	int i, cpu = -1, nrcpus = 1024;
 20realloc:
 21	CPU_ZERO(maskp);
 22
 23	if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
 24		if (errno == EINVAL && nrcpus < (1024 << 8)) {
 25			nrcpus = nrcpus << 2;
 26			goto realloc;
 27		}
 28		perror("sched_getaffinity");
 29			return -1;
 30	}
 31
 32	for (i = 0; i < nrcpus; i++) {
 33		if (CPU_ISSET(i, maskp)) {
 34			if (cpu == -1)
 35				cpu = i;
 36			else
 37				CPU_CLR(i, maskp);
 38		}
 39	}
 40
 41	return cpu;
 42}
 43
 44static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
 45{
 46	struct record_opts opts = {
 47		.target = {
 48			.uid = UINT_MAX,
 49			.uses_mmap = true,
 50		},
 51		.no_buffering = true,
 52		.mmap_pages   = 256,
 53	};
 54	cpu_set_t cpu_mask;
 55	size_t cpu_mask_size = sizeof(cpu_mask);
 56	struct evlist *evlist = evlist__new_dummy();
 57	struct evsel *evsel;
 58	struct perf_sample sample;
 59	const char *cmd = "sleep";
 60	const char *argv[] = { cmd, "1", NULL, };
 61	char *bname, *mmap_filename;
 62	u64 prev_time = 0;
 63	bool found_cmd_mmap = false,
 64	     found_coreutils_mmap = false,
 65	     found_libc_mmap = false,
 66	     found_vdso_mmap = false,
 67	     found_ld_mmap = false;
 68	int err = -1, errs = 0, i, wakeups = 0;
 69	u32 cpu;
 70	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
 71	char sbuf[STRERR_BUFSIZE];
 72
 73	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
 74		evlist = evlist__new_default();
 75
 76	if (evlist == NULL) {
 77		pr_debug("Not enough memory to create evlist\n");
 78		goto out;
 79	}
 80
 81	/*
 82	 * Create maps of threads and cpus to monitor. In this case
 83	 * we start with all threads and cpus (-1, -1) but then in
 84	 * evlist__prepare_workload we'll fill in the only thread
 85	 * we're monitoring, the one forked there.
 86	 */
 87	err = evlist__create_maps(evlist, &opts.target);
 88	if (err < 0) {
 89		pr_debug("Not enough memory to create thread/cpu maps\n");
 90		goto out_delete_evlist;
 91	}
 92
 93	/*
 94	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
 95	 * for evlist__start_workload() to exec it. This is done this way
 96	 * so that we have time to open the evlist (calling sys_perf_event_open
 97	 * on all the fds) and then mmap them.
 98	 */
 99	err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
100	if (err < 0) {
101		pr_debug("Couldn't run the workload!\n");
102		goto out_delete_evlist;
103	}
104
105	/*
106	 * Config the evsels, setting attr->comm on the first one, etc.
107	 */
108	evsel = evlist__first(evlist);
109	evsel__set_sample_bit(evsel, CPU);
110	evsel__set_sample_bit(evsel, TID);
111	evsel__set_sample_bit(evsel, TIME);
112	evlist__config(evlist, &opts, NULL);
113
114	err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
115	if (err < 0) {
116		pr_debug("sched__get_first_possible_cpu: %s\n",
117			 str_error_r(errno, sbuf, sizeof(sbuf)));
118		goto out_delete_evlist;
119	}
120
121	cpu = err;
122
123	/*
124	 * So that we can check perf_sample.cpu on all the samples.
125	 */
126	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
127		pr_debug("sched_setaffinity: %s\n",
128			 str_error_r(errno, sbuf, sizeof(sbuf)));
129		goto out_delete_evlist;
130	}
131
132	/*
133	 * Call sys_perf_event_open on all the fds on all the evsels,
134	 * grouping them if asked to.
135	 */
136	err = evlist__open(evlist);
137	if (err < 0) {
138		pr_debug("perf_evlist__open: %s\n",
139			 str_error_r(errno, sbuf, sizeof(sbuf)));
140		goto out_delete_evlist;
141	}
142
143	/*
144	 * mmap the first fd on a given CPU and ask for events for the other
145	 * fds in the same CPU to be injected in the same mmap ring buffer
146	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
147	 */
148	err = evlist__mmap(evlist, opts.mmap_pages);
149	if (err < 0) {
150		pr_debug("evlist__mmap: %s\n",
151			 str_error_r(errno, sbuf, sizeof(sbuf)));
152		goto out_delete_evlist;
153	}
154
155	/*
156	 * Now that all is properly set up, enable the events, they will
157	 * count just on workload.pid, which will start...
158	 */
159	evlist__enable(evlist);
160
161	/*
162	 * Now!
163	 */
164	evlist__start_workload(evlist);
165
166	while (1) {
167		int before = total_events;
168
169		for (i = 0; i < evlist->core.nr_mmaps; i++) {
170			union perf_event *event;
171			struct mmap *md;
172
173			md = &evlist->mmap[i];
174			if (perf_mmap__read_init(&md->core) < 0)
175				continue;
176
177			while ((event = perf_mmap__read_event(&md->core)) != NULL) {
178				const u32 type = event->header.type;
179				const char *name = perf_event__name(type);
180
181				++total_events;
182				if (type < PERF_RECORD_MAX)
183					nr_events[type]++;
184
185				err = evlist__parse_sample(evlist, event, &sample);
186				if (err < 0) {
187					if (verbose > 0)
188						perf_event__fprintf(event, NULL, stderr);
189					pr_debug("Couldn't parse sample\n");
190					goto out_delete_evlist;
191				}
192
193				if (verbose > 0) {
194					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
195					perf_event__fprintf(event, NULL, stderr);
196				}
197
198				if (prev_time > sample.time) {
199					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
200						 name, prev_time, sample.time);
201					++errs;
202				}
203
204				prev_time = sample.time;
205
206				if (sample.cpu != cpu) {
207					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
208						 name, cpu, sample.cpu);
209					++errs;
210				}
211
212				if ((pid_t)sample.pid != evlist->workload.pid) {
213					pr_debug("%s with unexpected pid, expected %d, got %d\n",
214						 name, evlist->workload.pid, sample.pid);
215					++errs;
216				}
217
218				if ((pid_t)sample.tid != evlist->workload.pid) {
219					pr_debug("%s with unexpected tid, expected %d, got %d\n",
220						 name, evlist->workload.pid, sample.tid);
221					++errs;
222				}
223
224				if ((type == PERF_RECORD_COMM ||
225				     type == PERF_RECORD_MMAP ||
226				     type == PERF_RECORD_MMAP2 ||
227				     type == PERF_RECORD_FORK ||
228				     type == PERF_RECORD_EXIT) &&
229				     (pid_t)event->comm.pid != evlist->workload.pid) {
230					pr_debug("%s with unexpected pid/tid\n", name);
231					++errs;
232				}
233
234				if ((type == PERF_RECORD_COMM ||
235				     type == PERF_RECORD_MMAP ||
236				     type == PERF_RECORD_MMAP2) &&
237				     event->comm.pid != event->comm.tid) {
238					pr_debug("%s with different pid/tid!\n", name);
239					++errs;
240				}
241
242				switch (type) {
243				case PERF_RECORD_COMM:
244					if (strcmp(event->comm.comm, cmd)) {
245						pr_debug("%s with unexpected comm!\n", name);
246						++errs;
247					}
248					break;
249				case PERF_RECORD_EXIT:
250					goto found_exit;
251				case PERF_RECORD_MMAP:
252					mmap_filename = event->mmap.filename;
253					goto check_bname;
254				case PERF_RECORD_MMAP2:
255					mmap_filename = event->mmap2.filename;
256				check_bname:
257					bname = strrchr(mmap_filename, '/');
258					if (bname != NULL) {
259						if (!found_cmd_mmap)
260							found_cmd_mmap = !strcmp(bname + 1, cmd);
261						if (!found_coreutils_mmap)
262							found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
263						if (!found_libc_mmap)
264							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
265						if (!found_ld_mmap)
266							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
267					} else if (!found_vdso_mmap)
268						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
269					break;
270
271				case PERF_RECORD_SAMPLE:
272					/* Just ignore samples for now */
273					break;
274				default:
275					pr_debug("Unexpected perf_event->header.type %d!\n",
276						 type);
277					++errs;
278				}
279
280				perf_mmap__consume(&md->core);
281			}
282			perf_mmap__read_done(&md->core);
283		}
284
285		/*
286		 * We don't use poll here because at least at 3.1 times the
287		 * PERF_RECORD_{!SAMPLE} events don't honour
288		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
289		 */
290		if (total_events == before && false)
291			evlist__poll(evlist, -1);
292
293		sleep(1);
294		if (++wakeups > 5) {
295			pr_debug("No PERF_RECORD_EXIT event!\n");
296			break;
297		}
298	}
299
300found_exit:
301	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
302		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
303		++errs;
304	}
305
306	if (nr_events[PERF_RECORD_COMM] == 0) {
307		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
308		++errs;
309	}
310
311	if (!found_cmd_mmap && !found_coreutils_mmap) {
312		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
313		++errs;
314	}
315
316	if (!found_libc_mmap) {
317		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
318		++errs;
319	}
320
321	if (!found_ld_mmap) {
322		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
323		++errs;
324	}
325
326	if (!found_vdso_mmap) {
327		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
328		++errs;
329	}
330out_delete_evlist:
331	evlist__delete(evlist);
332out:
333	if (err == -EACCES)
334		return TEST_SKIP;
335	if (err < 0 || errs != 0)
336		return TEST_FAIL;
337	return TEST_OK;
338}
339
340static struct test_case tests__PERF_RECORD[] = {
341	TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
342			 PERF_RECORD,
343			 "permissions"),
344	{	.name = NULL, }
345};
346
347struct test_suite suite__PERF_RECORD = {
348	.desc = "PERF_RECORD_* events & perf_sample fields",
349	.test_cases = tests__PERF_RECORD,
350};