Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include "tests.h"
  3#include "debug.h"
  4#include "symbol.h"
  5#include "sort.h"
  6#include "evsel.h"
  7#include "evlist.h"
  8#include "machine.h"
 
  9#include "parse-events.h"
 10#include "hists_common.h"
 11#include "util/mmap.h"
 12#include <errno.h>
 13#include <linux/kernel.h>
 14
 15struct sample {
 16	u32 pid;
 17	u64 ip;
 18	struct thread *thread;
 19	struct map *map;
 20	struct symbol *sym;
 21};
 22
 23/* For the numbers, see hists_common.c */
 24static struct sample fake_common_samples[] = {
 25	/* perf [kernel] schedule() */
 26	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
 27	/* perf [perf]   main() */
 28	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
 29	/* perf [perf]   cmd_record() */
 30	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
 31	/* bash [bash]   xmalloc() */
 32	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
 33	/* bash [libc]   malloc() */
 34	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
 35};
 36
 37static struct sample fake_samples[][5] = {
 38	{
 39		/* perf [perf]   run_command() */
 40		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
 41		/* perf [libc]   malloc() */
 42		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
 43		/* perf [kernel] page_fault() */
 44		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
 45		/* perf [kernel] sys_perf_event_open() */
 46		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
 47		/* bash [libc]   free() */
 48		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
 49	},
 50	{
 51		/* perf [libc]   free() */
 52		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
 53		/* bash [libc]   malloc() */
 54		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
 55		/* bash [bash]   xfee() */
 56		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
 57		/* bash [libc]   realloc() */
 58		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
 59		/* bash [kernel] page_fault() */
 60		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
 61	},
 62};
 63
 64static int add_hist_entries(struct evlist *evlist, struct machine *machine)
 65{
 66	struct evsel *evsel;
 67	struct addr_location al;
 68	struct hist_entry *he;
 69	struct perf_sample sample = { .period = 1, .weight = 1, };
 70	size_t i = 0, k;
 71
 72	/*
 73	 * each evsel will have 10 samples - 5 common and 5 distinct.
 74	 * However the second evsel also has a collapsed entry for
 75	 * "bash [libc] malloc" so total 9 entries will be in the tree.
 76	 */
 77	evlist__for_each_entry(evlist, evsel) {
 78		struct hists *hists = evsel__hists(evsel);
 79
 80		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
 81			sample.cpumode = PERF_RECORD_MISC_USER;
 82			sample.pid = fake_common_samples[k].pid;
 83			sample.tid = fake_common_samples[k].pid;
 84			sample.ip = fake_common_samples[k].ip;
 85
 86			if (machine__resolve(machine, &al, &sample) < 0)
 87				goto out;
 88
 89			he = hists__add_entry(hists, &al, NULL,
 90						NULL, NULL, &sample, true);
 91			if (he == NULL) {
 92				addr_location__put(&al);
 93				goto out;
 94			}
 95
 96			fake_common_samples[k].thread = al.thread;
 97			fake_common_samples[k].map = al.map;
 98			fake_common_samples[k].sym = al.sym;
 99		}
100
101		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
102			sample.pid = fake_samples[i][k].pid;
103			sample.tid = fake_samples[i][k].pid;
104			sample.ip = fake_samples[i][k].ip;
105			if (machine__resolve(machine, &al, &sample) < 0)
106				goto out;
107
108			he = hists__add_entry(hists, &al, NULL,
109						NULL, NULL, &sample, true);
110			if (he == NULL) {
111				addr_location__put(&al);
112				goto out;
113			}
114
115			fake_samples[i][k].thread = al.thread;
116			fake_samples[i][k].map = al.map;
117			fake_samples[i][k].sym = al.sym;
118		}
119		i++;
120	}
121
122	return 0;
123
124out:
125	pr_debug("Not enough memory for adding a hist entry\n");
126	return -1;
127}
128
129static int find_sample(struct sample *samples, size_t nr_samples,
130		       struct thread *t, struct map *m, struct symbol *s)
131{
132	while (nr_samples--) {
133		if (samples->thread == t && samples->map == m &&
134		    samples->sym == s)
135			return 1;
136		samples++;
137	}
138	return 0;
139}
140
141static int __validate_match(struct hists *hists)
142{
143	size_t count = 0;
144	struct rb_root_cached *root;
145	struct rb_node *node;
146
147	/*
148	 * Only entries from fake_common_samples should have a pair.
149	 */
150	if (hists__has(hists, need_collapse))
151		root = &hists->entries_collapsed;
152	else
153		root = hists->entries_in;
154
155	node = rb_first_cached(root);
156	while (node) {
157		struct hist_entry *he;
158
159		he = rb_entry(node, struct hist_entry, rb_node_in);
160
161		if (hist_entry__has_pairs(he)) {
162			if (find_sample(fake_common_samples,
163					ARRAY_SIZE(fake_common_samples),
164					he->thread, he->ms.map, he->ms.sym)) {
165				count++;
166			} else {
167				pr_debug("Can't find the matched entry\n");
168				return -1;
169			}
170		}
171
172		node = rb_next(node);
173	}
174
175	if (count != ARRAY_SIZE(fake_common_samples)) {
176		pr_debug("Invalid count for matched entries: %zd of %zd\n",
177			 count, ARRAY_SIZE(fake_common_samples));
178		return -1;
179	}
180
181	return 0;
182}
183
184static int validate_match(struct hists *leader, struct hists *other)
185{
186	return __validate_match(leader) || __validate_match(other);
187}
188
189static int __validate_link(struct hists *hists, int idx)
190{
191	size_t count = 0;
192	size_t count_pair = 0;
193	size_t count_dummy = 0;
194	struct rb_root_cached *root;
195	struct rb_node *node;
196
197	/*
198	 * Leader hists (idx = 0) will have dummy entries from other,
199	 * and some entries will have no pair.  However every entry
200	 * in other hists should have (dummy) pair.
201	 */
202	if (hists__has(hists, need_collapse))
203		root = &hists->entries_collapsed;
204	else
205		root = hists->entries_in;
206
207	node = rb_first_cached(root);
208	while (node) {
209		struct hist_entry *he;
210
211		he = rb_entry(node, struct hist_entry, rb_node_in);
212
213		if (hist_entry__has_pairs(he)) {
214			if (!find_sample(fake_common_samples,
215					 ARRAY_SIZE(fake_common_samples),
216					 he->thread, he->ms.map, he->ms.sym) &&
217			    !find_sample(fake_samples[idx],
218					 ARRAY_SIZE(fake_samples[idx]),
219					 he->thread, he->ms.map, he->ms.sym)) {
220				count_dummy++;
221			}
222			count_pair++;
223		} else if (idx) {
224			pr_debug("A entry from the other hists should have pair\n");
225			return -1;
226		}
227
228		count++;
229		node = rb_next(node);
230	}
231
232	/*
233	 * Note that we have a entry collapsed in the other (idx = 1) hists.
234	 */
235	if (idx == 0) {
236		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
237			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
238				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
239			return -1;
240		}
241		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
242			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
243				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
244			return -1;
245		}
246	} else {
247		if (count != count_pair) {
248			pr_debug("Invalid count of total other entries: %zd of %zd\n",
249				 count, count_pair);
250			return -1;
251		}
252		if (count_dummy > 0) {
253			pr_debug("Other hists should not have dummy entries: %zd\n",
254				 count_dummy);
255			return -1;
256		}
257	}
258
259	return 0;
260}
261
262static int validate_link(struct hists *leader, struct hists *other)
263{
264	return __validate_link(leader, 0) || __validate_link(other, 1);
265}
266
267int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
268{
269	int err = -1;
270	struct hists *hists, *first_hists;
271	struct machines machines;
272	struct machine *machine = NULL;
273	struct evsel *evsel, *first;
274	struct evlist *evlist = evlist__new();
275
276	if (evlist == NULL)
277                return -ENOMEM;
278
279	err = parse_events(evlist, "cpu-clock", NULL);
280	if (err)
281		goto out;
282	err = parse_events(evlist, "task-clock", NULL);
283	if (err)
284		goto out;
285
286	err = TEST_FAIL;
287	/* default sort order (comm,dso,sym) will be used */
288	if (setup_sorting(NULL) < 0)
289		goto out;
290
291	machines__init(&machines);
292
293	/* setup threads/dso/map/symbols also */
294	machine = setup_fake_machine(&machines);
295	if (!machine)
296		goto out;
297
298	if (verbose > 1)
299		machine__fprintf(machine, stderr);
300
301	/* process sample events */
302	err = add_hist_entries(evlist, machine);
303	if (err < 0)
304		goto out;
305
306	evlist__for_each_entry(evlist, evsel) {
307		hists = evsel__hists(evsel);
308		hists__collapse_resort(hists, NULL);
309
310		if (verbose > 2)
311			print_hists_in(hists);
312	}
313
314	first = evlist__first(evlist);
315	evsel = evlist__last(evlist);
316
317	first_hists = evsel__hists(first);
318	hists = evsel__hists(evsel);
319
320	/* match common entries */
321	hists__match(first_hists, hists);
322	err = validate_match(first_hists, hists);
323	if (err)
324		goto out;
325
326	/* link common and/or dummy entries */
327	hists__link(first_hists, hists);
328	err = validate_link(first_hists, hists);
329	if (err)
330		goto out;
331
332	err = 0;
333
334out:
335	/* tear down everything */
336	evlist__delete(evlist);
337	reset_output_field();
338	machines__exit(&machines);
339
340	return err;
341}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include "perf.h"
  3#include "tests.h"
  4#include "debug.h"
  5#include "symbol.h"
  6#include "sort.h"
  7#include "evsel.h"
  8#include "evlist.h"
  9#include "machine.h"
 10#include "thread.h"
 11#include "parse-events.h"
 12#include "hists_common.h"
 
 13#include <errno.h>
 14#include <linux/kernel.h>
 15
 16struct sample {
 17	u32 pid;
 18	u64 ip;
 19	struct thread *thread;
 20	struct map *map;
 21	struct symbol *sym;
 22};
 23
 24/* For the numbers, see hists_common.c */
 25static struct sample fake_common_samples[] = {
 26	/* perf [kernel] schedule() */
 27	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
 28	/* perf [perf]   main() */
 29	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
 30	/* perf [perf]   cmd_record() */
 31	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
 32	/* bash [bash]   xmalloc() */
 33	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
 34	/* bash [libc]   malloc() */
 35	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
 36};
 37
 38static struct sample fake_samples[][5] = {
 39	{
 40		/* perf [perf]   run_command() */
 41		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
 42		/* perf [libc]   malloc() */
 43		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
 44		/* perf [kernel] page_fault() */
 45		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
 46		/* perf [kernel] sys_perf_event_open() */
 47		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
 48		/* bash [libc]   free() */
 49		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
 50	},
 51	{
 52		/* perf [libc]   free() */
 53		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
 54		/* bash [libc]   malloc() */
 55		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
 56		/* bash [bash]   xfee() */
 57		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
 58		/* bash [libc]   realloc() */
 59		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
 60		/* bash [kernel] page_fault() */
 61		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
 62	},
 63};
 64
 65static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
 66{
 67	struct perf_evsel *evsel;
 68	struct addr_location al;
 69	struct hist_entry *he;
 70	struct perf_sample sample = { .period = 1, .weight = 1, };
 71	size_t i = 0, k;
 72
 73	/*
 74	 * each evsel will have 10 samples - 5 common and 5 distinct.
 75	 * However the second evsel also has a collapsed entry for
 76	 * "bash [libc] malloc" so total 9 entries will be in the tree.
 77	 */
 78	evlist__for_each_entry(evlist, evsel) {
 79		struct hists *hists = evsel__hists(evsel);
 80
 81		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
 82			sample.cpumode = PERF_RECORD_MISC_USER;
 83			sample.pid = fake_common_samples[k].pid;
 84			sample.tid = fake_common_samples[k].pid;
 85			sample.ip = fake_common_samples[k].ip;
 86
 87			if (machine__resolve(machine, &al, &sample) < 0)
 88				goto out;
 89
 90			he = hists__add_entry(hists, &al, NULL,
 91						NULL, NULL, &sample, true);
 92			if (he == NULL) {
 93				addr_location__put(&al);
 94				goto out;
 95			}
 96
 97			fake_common_samples[k].thread = al.thread;
 98			fake_common_samples[k].map = al.map;
 99			fake_common_samples[k].sym = al.sym;
100		}
101
102		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
103			sample.pid = fake_samples[i][k].pid;
104			sample.tid = fake_samples[i][k].pid;
105			sample.ip = fake_samples[i][k].ip;
106			if (machine__resolve(machine, &al, &sample) < 0)
107				goto out;
108
109			he = hists__add_entry(hists, &al, NULL,
110						NULL, NULL, &sample, true);
111			if (he == NULL) {
112				addr_location__put(&al);
113				goto out;
114			}
115
116			fake_samples[i][k].thread = al.thread;
117			fake_samples[i][k].map = al.map;
118			fake_samples[i][k].sym = al.sym;
119		}
120		i++;
121	}
122
123	return 0;
124
125out:
126	pr_debug("Not enough memory for adding a hist entry\n");
127	return -1;
128}
129
130static int find_sample(struct sample *samples, size_t nr_samples,
131		       struct thread *t, struct map *m, struct symbol *s)
132{
133	while (nr_samples--) {
134		if (samples->thread == t && samples->map == m &&
135		    samples->sym == s)
136			return 1;
137		samples++;
138	}
139	return 0;
140}
141
142static int __validate_match(struct hists *hists)
143{
144	size_t count = 0;
145	struct rb_root *root;
146	struct rb_node *node;
147
148	/*
149	 * Only entries from fake_common_samples should have a pair.
150	 */
151	if (hists__has(hists, need_collapse))
152		root = &hists->entries_collapsed;
153	else
154		root = hists->entries_in;
155
156	node = rb_first(root);
157	while (node) {
158		struct hist_entry *he;
159
160		he = rb_entry(node, struct hist_entry, rb_node_in);
161
162		if (hist_entry__has_pairs(he)) {
163			if (find_sample(fake_common_samples,
164					ARRAY_SIZE(fake_common_samples),
165					he->thread, he->ms.map, he->ms.sym)) {
166				count++;
167			} else {
168				pr_debug("Can't find the matched entry\n");
169				return -1;
170			}
171		}
172
173		node = rb_next(node);
174	}
175
176	if (count != ARRAY_SIZE(fake_common_samples)) {
177		pr_debug("Invalid count for matched entries: %zd of %zd\n",
178			 count, ARRAY_SIZE(fake_common_samples));
179		return -1;
180	}
181
182	return 0;
183}
184
185static int validate_match(struct hists *leader, struct hists *other)
186{
187	return __validate_match(leader) || __validate_match(other);
188}
189
190static int __validate_link(struct hists *hists, int idx)
191{
192	size_t count = 0;
193	size_t count_pair = 0;
194	size_t count_dummy = 0;
195	struct rb_root *root;
196	struct rb_node *node;
197
198	/*
199	 * Leader hists (idx = 0) will have dummy entries from other,
200	 * and some entries will have no pair.  However every entry
201	 * in other hists should have (dummy) pair.
202	 */
203	if (hists__has(hists, need_collapse))
204		root = &hists->entries_collapsed;
205	else
206		root = hists->entries_in;
207
208	node = rb_first(root);
209	while (node) {
210		struct hist_entry *he;
211
212		he = rb_entry(node, struct hist_entry, rb_node_in);
213
214		if (hist_entry__has_pairs(he)) {
215			if (!find_sample(fake_common_samples,
216					 ARRAY_SIZE(fake_common_samples),
217					 he->thread, he->ms.map, he->ms.sym) &&
218			    !find_sample(fake_samples[idx],
219					 ARRAY_SIZE(fake_samples[idx]),
220					 he->thread, he->ms.map, he->ms.sym)) {
221				count_dummy++;
222			}
223			count_pair++;
224		} else if (idx) {
225			pr_debug("A entry from the other hists should have pair\n");
226			return -1;
227		}
228
229		count++;
230		node = rb_next(node);
231	}
232
233	/*
234	 * Note that we have a entry collapsed in the other (idx = 1) hists.
235	 */
236	if (idx == 0) {
237		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
238			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
239				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
240			return -1;
241		}
242		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
243			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
244				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
245			return -1;
246		}
247	} else {
248		if (count != count_pair) {
249			pr_debug("Invalid count of total other entries: %zd of %zd\n",
250				 count, count_pair);
251			return -1;
252		}
253		if (count_dummy > 0) {
254			pr_debug("Other hists should not have dummy entries: %zd\n",
255				 count_dummy);
256			return -1;
257		}
258	}
259
260	return 0;
261}
262
263static int validate_link(struct hists *leader, struct hists *other)
264{
265	return __validate_link(leader, 0) || __validate_link(other, 1);
266}
267
268int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
269{
270	int err = -1;
271	struct hists *hists, *first_hists;
272	struct machines machines;
273	struct machine *machine = NULL;
274	struct perf_evsel *evsel, *first;
275	struct perf_evlist *evlist = perf_evlist__new();
276
277	if (evlist == NULL)
278                return -ENOMEM;
279
280	err = parse_events(evlist, "cpu-clock", NULL);
281	if (err)
282		goto out;
283	err = parse_events(evlist, "task-clock", NULL);
284	if (err)
285		goto out;
286
287	err = TEST_FAIL;
288	/* default sort order (comm,dso,sym) will be used */
289	if (setup_sorting(NULL) < 0)
290		goto out;
291
292	machines__init(&machines);
293
294	/* setup threads/dso/map/symbols also */
295	machine = setup_fake_machine(&machines);
296	if (!machine)
297		goto out;
298
299	if (verbose > 1)
300		machine__fprintf(machine, stderr);
301
302	/* process sample events */
303	err = add_hist_entries(evlist, machine);
304	if (err < 0)
305		goto out;
306
307	evlist__for_each_entry(evlist, evsel) {
308		hists = evsel__hists(evsel);
309		hists__collapse_resort(hists, NULL);
310
311		if (verbose > 2)
312			print_hists_in(hists);
313	}
314
315	first = perf_evlist__first(evlist);
316	evsel = perf_evlist__last(evlist);
317
318	first_hists = evsel__hists(first);
319	hists = evsel__hists(evsel);
320
321	/* match common entries */
322	hists__match(first_hists, hists);
323	err = validate_match(first_hists, hists);
324	if (err)
325		goto out;
326
327	/* link common and/or dummy entries */
328	hists__link(first_hists, hists);
329	err = validate_link(first_hists, hists);
330	if (err)
331		goto out;
332
333	err = 0;
334
335out:
336	/* tear down everything */
337	perf_evlist__delete(evlist);
338	reset_output_field();
339	machines__exit(&machines);
340
341	return err;
342}