Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "perf.h"
3#include "util/debug.h"
4#include "util/event.h"
5#include "util/symbol.h"
6#include "util/sort.h"
7#include "util/evsel.h"
8#include "util/evlist.h"
9#include "util/machine.h"
10#include "util/thread.h"
11#include "util/parse-events.h"
12#include "tests/tests.h"
13#include "tests/hists_common.h"
14#include <linux/kernel.h>
15
16struct sample {
17 u32 pid;
18 u64 ip;
19 struct thread *thread;
20 struct map *map;
21 struct symbol *sym;
22};
23
24/* For the numbers, see hists_common.c */
25static struct sample fake_samples[] = {
26 /* perf [kernel] schedule() */
27 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
28 /* perf [perf] main() */
29 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
30 /* perf [perf] cmd_record() */
31 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
32 /* perf [libc] malloc() */
33 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
34 /* perf [libc] free() */
35 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
36 /* perf [perf] main() */
37 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
38 /* perf [kernel] page_fault() */
39 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
40 /* bash [bash] main() */
41 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
42 /* bash [bash] xmalloc() */
43 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
44 /* bash [kernel] page_fault() */
45 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
46};
47
48/*
49 * Will be casted to struct ip_callchain which has all 64 bit entries
50 * of nr and ips[].
51 */
52static u64 fake_callchains[][10] = {
53 /* schedule => run_command => main */
54 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
55 /* main */
56 { 1, FAKE_IP_PERF_MAIN, },
57 /* cmd_record => run_command => main */
58 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
59 /* malloc => cmd_record => run_command => main */
60 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
61 FAKE_IP_PERF_MAIN, },
62 /* free => cmd_record => run_command => main */
63 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
64 FAKE_IP_PERF_MAIN, },
65 /* main */
66 { 1, FAKE_IP_PERF_MAIN, },
67 /* page_fault => sys_perf_event_open => run_command => main */
68 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
69 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
70 /* main */
71 { 1, FAKE_IP_BASH_MAIN, },
72 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
73 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
74 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
75 /* page_fault => malloc => main */
76 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
77};
78
79static int add_hist_entries(struct hists *hists, struct machine *machine)
80{
81 struct addr_location al;
82 struct perf_evsel *evsel = hists_to_evsel(hists);
83 struct perf_sample sample = { .period = 1000, };
84 size_t i;
85
86 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
87 struct hist_entry_iter iter = {
88 .evsel = evsel,
89 .sample = &sample,
90 .hide_unresolved = false,
91 };
92
93 if (symbol_conf.cumulate_callchain)
94 iter.ops = &hist_iter_cumulative;
95 else
96 iter.ops = &hist_iter_normal;
97
98 sample.cpumode = PERF_RECORD_MISC_USER;
99 sample.pid = fake_samples[i].pid;
100 sample.tid = fake_samples[i].pid;
101 sample.ip = fake_samples[i].ip;
102 sample.callchain = (struct ip_callchain *)fake_callchains[i];
103
104 if (machine__resolve(machine, &al, &sample) < 0)
105 goto out;
106
107 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
108 NULL) < 0) {
109 addr_location__put(&al);
110 goto out;
111 }
112
113 fake_samples[i].thread = al.thread;
114 fake_samples[i].map = al.map;
115 fake_samples[i].sym = al.sym;
116 }
117
118 return TEST_OK;
119
120out:
121 pr_debug("Not enough memory for adding a hist entry\n");
122 return TEST_FAIL;
123}
124
125static void del_hist_entries(struct hists *hists)
126{
127 struct hist_entry *he;
128 struct rb_root *root_in;
129 struct rb_root *root_out;
130 struct rb_node *node;
131
132 if (hists__has(hists, need_collapse))
133 root_in = &hists->entries_collapsed;
134 else
135 root_in = hists->entries_in;
136
137 root_out = &hists->entries;
138
139 while (!RB_EMPTY_ROOT(root_out)) {
140 node = rb_first(root_out);
141
142 he = rb_entry(node, struct hist_entry, rb_node);
143 rb_erase(node, root_out);
144 rb_erase(&he->rb_node_in, root_in);
145 hist_entry__delete(he);
146 }
147}
148
149typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
150
151#define COMM(he) (thread__comm_str(he->thread))
152#define DSO(he) (he->ms.map->dso->short_name)
153#define SYM(he) (he->ms.sym->name)
154#define CPU(he) (he->cpu)
155#define PID(he) (he->thread->tid)
156#define DEPTH(he) (he->callchain->max_depth)
157#define CDSO(cl) (cl->ms.map->dso->short_name)
158#define CSYM(cl) (cl->ms.sym->name)
159
160struct result {
161 u64 children;
162 u64 self;
163 const char *comm;
164 const char *dso;
165 const char *sym;
166};
167
168struct callchain_result {
169 u64 nr;
170 struct {
171 const char *dso;
172 const char *sym;
173 } node[10];
174};
175
176static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
177 struct callchain_result *expected_callchain, size_t nr_callchain)
178{
179 char buf[32];
180 size_t i, c;
181 struct hist_entry *he;
182 struct rb_root *root;
183 struct rb_node *node;
184 struct callchain_node *cnode;
185 struct callchain_list *clist;
186
187 /*
188 * adding and deleting hist entries must be done outside of this
189 * function since TEST_ASSERT_VAL() returns in case of failure.
190 */
191 hists__collapse_resort(hists, NULL);
192 perf_evsel__output_resort(hists_to_evsel(hists), NULL);
193
194 if (verbose > 2) {
195 pr_info("use callchain: %d, cumulate callchain: %d\n",
196 symbol_conf.use_callchain,
197 symbol_conf.cumulate_callchain);
198 print_hists_out(hists);
199 }
200
201 root = &hists->entries;
202 for (node = rb_first(root), i = 0;
203 node && (he = rb_entry(node, struct hist_entry, rb_node));
204 node = rb_next(node), i++) {
205 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
206
207 TEST_ASSERT_VAL("Incorrect number of hist entry",
208 i < nr_expected);
209 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
210 !strcmp(COMM(he), expected[i].comm) &&
211 !strcmp(DSO(he), expected[i].dso) &&
212 !strcmp(SYM(he), expected[i].sym));
213
214 if (symbol_conf.cumulate_callchain)
215 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
216
217 if (!symbol_conf.use_callchain)
218 continue;
219
220 /* check callchain entries */
221 root = &he->callchain->node.rb_root;
222
223 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
224 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
225
226 c = 0;
227 list_for_each_entry(clist, &cnode->val, list) {
228 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
229
230 TEST_ASSERT_VAL("Incorrect number of callchain entry",
231 c < expected_callchain[i].nr);
232 TEST_ASSERT_VAL(buf,
233 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
234 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
235 c++;
236 }
237 /* TODO: handle multiple child nodes properly */
238 TEST_ASSERT_VAL("Incorrect number of callchain entry",
239 c <= expected_callchain[i].nr);
240 }
241 TEST_ASSERT_VAL("Incorrect number of hist entry",
242 i == nr_expected);
243 TEST_ASSERT_VAL("Incorrect number of callchain entry",
244 !symbol_conf.use_callchain || nr_expected == nr_callchain);
245 return 0;
246}
247
248/* NO callchain + NO children */
249static int test1(struct perf_evsel *evsel, struct machine *machine)
250{
251 int err;
252 struct hists *hists = evsel__hists(evsel);
253 /*
254 * expected output:
255 *
256 * Overhead Command Shared Object Symbol
257 * ======== ======= ============= ==============
258 * 20.00% perf perf [.] main
259 * 10.00% bash [kernel] [k] page_fault
260 * 10.00% bash bash [.] main
261 * 10.00% bash bash [.] xmalloc
262 * 10.00% perf [kernel] [k] page_fault
263 * 10.00% perf [kernel] [k] schedule
264 * 10.00% perf libc [.] free
265 * 10.00% perf libc [.] malloc
266 * 10.00% perf perf [.] cmd_record
267 */
268 struct result expected[] = {
269 { 0, 2000, "perf", "perf", "main" },
270 { 0, 1000, "bash", "[kernel]", "page_fault" },
271 { 0, 1000, "bash", "bash", "main" },
272 { 0, 1000, "bash", "bash", "xmalloc" },
273 { 0, 1000, "perf", "[kernel]", "page_fault" },
274 { 0, 1000, "perf", "[kernel]", "schedule" },
275 { 0, 1000, "perf", "libc", "free" },
276 { 0, 1000, "perf", "libc", "malloc" },
277 { 0, 1000, "perf", "perf", "cmd_record" },
278 };
279
280 symbol_conf.use_callchain = false;
281 symbol_conf.cumulate_callchain = false;
282 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
283
284 setup_sorting(NULL);
285 callchain_register_param(&callchain_param);
286
287 err = add_hist_entries(hists, machine);
288 if (err < 0)
289 goto out;
290
291 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
292
293out:
294 del_hist_entries(hists);
295 reset_output_field();
296 return err;
297}
298
299/* callcain + NO children */
300static int test2(struct perf_evsel *evsel, struct machine *machine)
301{
302 int err;
303 struct hists *hists = evsel__hists(evsel);
304 /*
305 * expected output:
306 *
307 * Overhead Command Shared Object Symbol
308 * ======== ======= ============= ==============
309 * 20.00% perf perf [.] main
310 * |
311 * --- main
312 *
313 * 10.00% bash [kernel] [k] page_fault
314 * |
315 * --- page_fault
316 * malloc
317 * main
318 *
319 * 10.00% bash bash [.] main
320 * |
321 * --- main
322 *
323 * 10.00% bash bash [.] xmalloc
324 * |
325 * --- xmalloc
326 * malloc
327 * xmalloc <--- NOTE: there's a cycle
328 * malloc
329 * xmalloc
330 * main
331 *
332 * 10.00% perf [kernel] [k] page_fault
333 * |
334 * --- page_fault
335 * sys_perf_event_open
336 * run_command
337 * main
338 *
339 * 10.00% perf [kernel] [k] schedule
340 * |
341 * --- schedule
342 * run_command
343 * main
344 *
345 * 10.00% perf libc [.] free
346 * |
347 * --- free
348 * cmd_record
349 * run_command
350 * main
351 *
352 * 10.00% perf libc [.] malloc
353 * |
354 * --- malloc
355 * cmd_record
356 * run_command
357 * main
358 *
359 * 10.00% perf perf [.] cmd_record
360 * |
361 * --- cmd_record
362 * run_command
363 * main
364 *
365 */
366 struct result expected[] = {
367 { 0, 2000, "perf", "perf", "main" },
368 { 0, 1000, "bash", "[kernel]", "page_fault" },
369 { 0, 1000, "bash", "bash", "main" },
370 { 0, 1000, "bash", "bash", "xmalloc" },
371 { 0, 1000, "perf", "[kernel]", "page_fault" },
372 { 0, 1000, "perf", "[kernel]", "schedule" },
373 { 0, 1000, "perf", "libc", "free" },
374 { 0, 1000, "perf", "libc", "malloc" },
375 { 0, 1000, "perf", "perf", "cmd_record" },
376 };
377 struct callchain_result expected_callchain[] = {
378 {
379 1, { { "perf", "main" }, },
380 },
381 {
382 3, { { "[kernel]", "page_fault" },
383 { "libc", "malloc" },
384 { "bash", "main" }, },
385 },
386 {
387 1, { { "bash", "main" }, },
388 },
389 {
390 6, { { "bash", "xmalloc" },
391 { "libc", "malloc" },
392 { "bash", "xmalloc" },
393 { "libc", "malloc" },
394 { "bash", "xmalloc" },
395 { "bash", "main" }, },
396 },
397 {
398 4, { { "[kernel]", "page_fault" },
399 { "[kernel]", "sys_perf_event_open" },
400 { "perf", "run_command" },
401 { "perf", "main" }, },
402 },
403 {
404 3, { { "[kernel]", "schedule" },
405 { "perf", "run_command" },
406 { "perf", "main" }, },
407 },
408 {
409 4, { { "libc", "free" },
410 { "perf", "cmd_record" },
411 { "perf", "run_command" },
412 { "perf", "main" }, },
413 },
414 {
415 4, { { "libc", "malloc" },
416 { "perf", "cmd_record" },
417 { "perf", "run_command" },
418 { "perf", "main" }, },
419 },
420 {
421 3, { { "perf", "cmd_record" },
422 { "perf", "run_command" },
423 { "perf", "main" }, },
424 },
425 };
426
427 symbol_conf.use_callchain = true;
428 symbol_conf.cumulate_callchain = false;
429 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
430
431 setup_sorting(NULL);
432 callchain_register_param(&callchain_param);
433
434 err = add_hist_entries(hists, machine);
435 if (err < 0)
436 goto out;
437
438 err = do_test(hists, expected, ARRAY_SIZE(expected),
439 expected_callchain, ARRAY_SIZE(expected_callchain));
440
441out:
442 del_hist_entries(hists);
443 reset_output_field();
444 return err;
445}
446
447/* NO callchain + children */
448static int test3(struct perf_evsel *evsel, struct machine *machine)
449{
450 int err;
451 struct hists *hists = evsel__hists(evsel);
452 /*
453 * expected output:
454 *
455 * Children Self Command Shared Object Symbol
456 * ======== ======== ======= ============= =======================
457 * 70.00% 20.00% perf perf [.] main
458 * 50.00% 0.00% perf perf [.] run_command
459 * 30.00% 10.00% bash bash [.] main
460 * 30.00% 10.00% perf perf [.] cmd_record
461 * 20.00% 0.00% bash libc [.] malloc
462 * 10.00% 10.00% bash [kernel] [k] page_fault
463 * 10.00% 10.00% bash bash [.] xmalloc
464 * 10.00% 10.00% perf [kernel] [k] page_fault
465 * 10.00% 10.00% perf libc [.] malloc
466 * 10.00% 10.00% perf [kernel] [k] schedule
467 * 10.00% 10.00% perf libc [.] free
468 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
469 */
470 struct result expected[] = {
471 { 7000, 2000, "perf", "perf", "main" },
472 { 5000, 0, "perf", "perf", "run_command" },
473 { 3000, 1000, "bash", "bash", "main" },
474 { 3000, 1000, "perf", "perf", "cmd_record" },
475 { 2000, 0, "bash", "libc", "malloc" },
476 { 1000, 1000, "bash", "[kernel]", "page_fault" },
477 { 1000, 1000, "bash", "bash", "xmalloc" },
478 { 1000, 1000, "perf", "[kernel]", "page_fault" },
479 { 1000, 1000, "perf", "[kernel]", "schedule" },
480 { 1000, 1000, "perf", "libc", "free" },
481 { 1000, 1000, "perf", "libc", "malloc" },
482 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
483 };
484
485 symbol_conf.use_callchain = false;
486 symbol_conf.cumulate_callchain = true;
487 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
488
489 setup_sorting(NULL);
490 callchain_register_param(&callchain_param);
491
492 err = add_hist_entries(hists, machine);
493 if (err < 0)
494 goto out;
495
496 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
497
498out:
499 del_hist_entries(hists);
500 reset_output_field();
501 return err;
502}
503
504/* callchain + children */
505static int test4(struct perf_evsel *evsel, struct machine *machine)
506{
507 int err;
508 struct hists *hists = evsel__hists(evsel);
509 /*
510 * expected output:
511 *
512 * Children Self Command Shared Object Symbol
513 * ======== ======== ======= ============= =======================
514 * 70.00% 20.00% perf perf [.] main
515 * |
516 * --- main
517 *
518 * 50.00% 0.00% perf perf [.] run_command
519 * |
520 * --- run_command
521 * main
522 *
523 * 30.00% 10.00% bash bash [.] main
524 * |
525 * --- main
526 *
527 * 30.00% 10.00% perf perf [.] cmd_record
528 * |
529 * --- cmd_record
530 * run_command
531 * main
532 *
533 * 20.00% 0.00% bash libc [.] malloc
534 * |
535 * --- malloc
536 * |
537 * |--50.00%-- xmalloc
538 * | main
539 * --50.00%-- main
540 *
541 * 10.00% 10.00% bash [kernel] [k] page_fault
542 * |
543 * --- page_fault
544 * malloc
545 * main
546 *
547 * 10.00% 10.00% bash bash [.] xmalloc
548 * |
549 * --- xmalloc
550 * malloc
551 * xmalloc <--- NOTE: there's a cycle
552 * malloc
553 * xmalloc
554 * main
555 *
556 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
557 * |
558 * --- sys_perf_event_open
559 * run_command
560 * main
561 *
562 * 10.00% 10.00% perf [kernel] [k] page_fault
563 * |
564 * --- page_fault
565 * sys_perf_event_open
566 * run_command
567 * main
568 *
569 * 10.00% 10.00% perf [kernel] [k] schedule
570 * |
571 * --- schedule
572 * run_command
573 * main
574 *
575 * 10.00% 10.00% perf libc [.] free
576 * |
577 * --- free
578 * cmd_record
579 * run_command
580 * main
581 *
582 * 10.00% 10.00% perf libc [.] malloc
583 * |
584 * --- malloc
585 * cmd_record
586 * run_command
587 * main
588 *
589 */
590 struct result expected[] = {
591 { 7000, 2000, "perf", "perf", "main" },
592 { 5000, 0, "perf", "perf", "run_command" },
593 { 3000, 1000, "bash", "bash", "main" },
594 { 3000, 1000, "perf", "perf", "cmd_record" },
595 { 2000, 0, "bash", "libc", "malloc" },
596 { 1000, 1000, "bash", "[kernel]", "page_fault" },
597 { 1000, 1000, "bash", "bash", "xmalloc" },
598 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
599 { 1000, 1000, "perf", "[kernel]", "page_fault" },
600 { 1000, 1000, "perf", "[kernel]", "schedule" },
601 { 1000, 1000, "perf", "libc", "free" },
602 { 1000, 1000, "perf", "libc", "malloc" },
603 };
604 struct callchain_result expected_callchain[] = {
605 {
606 1, { { "perf", "main" }, },
607 },
608 {
609 2, { { "perf", "run_command" },
610 { "perf", "main" }, },
611 },
612 {
613 1, { { "bash", "main" }, },
614 },
615 {
616 3, { { "perf", "cmd_record" },
617 { "perf", "run_command" },
618 { "perf", "main" }, },
619 },
620 {
621 4, { { "libc", "malloc" },
622 { "bash", "xmalloc" },
623 { "bash", "main" },
624 { "bash", "main" }, },
625 },
626 {
627 3, { { "[kernel]", "page_fault" },
628 { "libc", "malloc" },
629 { "bash", "main" }, },
630 },
631 {
632 6, { { "bash", "xmalloc" },
633 { "libc", "malloc" },
634 { "bash", "xmalloc" },
635 { "libc", "malloc" },
636 { "bash", "xmalloc" },
637 { "bash", "main" }, },
638 },
639 {
640 3, { { "[kernel]", "sys_perf_event_open" },
641 { "perf", "run_command" },
642 { "perf", "main" }, },
643 },
644 {
645 4, { { "[kernel]", "page_fault" },
646 { "[kernel]", "sys_perf_event_open" },
647 { "perf", "run_command" },
648 { "perf", "main" }, },
649 },
650 {
651 3, { { "[kernel]", "schedule" },
652 { "perf", "run_command" },
653 { "perf", "main" }, },
654 },
655 {
656 4, { { "libc", "free" },
657 { "perf", "cmd_record" },
658 { "perf", "run_command" },
659 { "perf", "main" }, },
660 },
661 {
662 4, { { "libc", "malloc" },
663 { "perf", "cmd_record" },
664 { "perf", "run_command" },
665 { "perf", "main" }, },
666 },
667 };
668
669 symbol_conf.use_callchain = true;
670 symbol_conf.cumulate_callchain = true;
671 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
672
673 setup_sorting(NULL);
674
675 callchain_param = callchain_param_default;
676 callchain_register_param(&callchain_param);
677
678 err = add_hist_entries(hists, machine);
679 if (err < 0)
680 goto out;
681
682 err = do_test(hists, expected, ARRAY_SIZE(expected),
683 expected_callchain, ARRAY_SIZE(expected_callchain));
684
685out:
686 del_hist_entries(hists);
687 reset_output_field();
688 return err;
689}
690
691int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_unused)
692{
693 int err = TEST_FAIL;
694 struct machines machines;
695 struct machine *machine;
696 struct perf_evsel *evsel;
697 struct perf_evlist *evlist = perf_evlist__new();
698 size_t i;
699 test_fn_t testcases[] = {
700 test1,
701 test2,
702 test3,
703 test4,
704 };
705
706 TEST_ASSERT_VAL("No memory", evlist);
707
708 err = parse_events(evlist, "cpu-clock", NULL);
709 if (err)
710 goto out;
711 err = TEST_FAIL;
712
713 machines__init(&machines);
714
715 /* setup threads/dso/map/symbols also */
716 machine = setup_fake_machine(&machines);
717 if (!machine)
718 goto out;
719
720 if (verbose > 1)
721 machine__fprintf(machine, stderr);
722
723 evsel = perf_evlist__first(evlist);
724
725 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
726 err = testcases[i](evsel, machine);
727 if (err < 0)
728 break;
729 }
730
731out:
732 /* tear down everything */
733 perf_evlist__delete(evlist);
734 machines__exit(&machines);
735
736 return err;
737}
1#include "perf.h"
2#include "util/debug.h"
3#include "util/symbol.h"
4#include "util/sort.h"
5#include "util/evsel.h"
6#include "util/evlist.h"
7#include "util/machine.h"
8#include "util/thread.h"
9#include "util/parse-events.h"
10#include "tests/tests.h"
11#include "tests/hists_common.h"
12
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
21/* For the numbers, see hists_common.c */
22static struct sample fake_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
27 /* perf [perf] cmd_record() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 /* perf [libc] malloc() */
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
31 /* perf [libc] free() */
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
33 /* perf [perf] main() */
34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
35 /* perf [kernel] page_fault() */
36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
37 /* bash [bash] main() */
38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
39 /* bash [bash] xmalloc() */
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
41 /* bash [kernel] page_fault() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43};
44
45/*
46 * Will be casted to struct ip_callchain which has all 64 bit entries
47 * of nr and ips[].
48 */
49static u64 fake_callchains[][10] = {
50 /* schedule => run_command => main */
51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
52 /* main */
53 { 1, FAKE_IP_PERF_MAIN, },
54 /* cmd_record => run_command => main */
55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 /* malloc => cmd_record => run_command => main */
57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
58 FAKE_IP_PERF_MAIN, },
59 /* free => cmd_record => run_command => main */
60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
61 FAKE_IP_PERF_MAIN, },
62 /* main */
63 { 1, FAKE_IP_PERF_MAIN, },
64 /* page_fault => sys_perf_event_open => run_command => main */
65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
67 /* main */
68 { 1, FAKE_IP_BASH_MAIN, },
69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
72 /* page_fault => malloc => main */
73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
74};
75
76static int add_hist_entries(struct hists *hists, struct machine *machine)
77{
78 struct addr_location al;
79 struct perf_evsel *evsel = hists_to_evsel(hists);
80 struct perf_sample sample = { .period = 1000, };
81 size_t i;
82
83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
84 struct hist_entry_iter iter = {
85 .evsel = evsel,
86 .sample = &sample,
87 .hide_unresolved = false,
88 };
89
90 if (symbol_conf.cumulate_callchain)
91 iter.ops = &hist_iter_cumulative;
92 else
93 iter.ops = &hist_iter_normal;
94
95 sample.cpumode = PERF_RECORD_MISC_USER;
96 sample.pid = fake_samples[i].pid;
97 sample.tid = fake_samples[i].pid;
98 sample.ip = fake_samples[i].ip;
99 sample.callchain = (struct ip_callchain *)fake_callchains[i];
100
101 if (machine__resolve(machine, &al, &sample) < 0)
102 goto out;
103
104 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
105 NULL) < 0) {
106 addr_location__put(&al);
107 goto out;
108 }
109
110 fake_samples[i].thread = al.thread;
111 fake_samples[i].map = al.map;
112 fake_samples[i].sym = al.sym;
113 }
114
115 return TEST_OK;
116
117out:
118 pr_debug("Not enough memory for adding a hist entry\n");
119 return TEST_FAIL;
120}
121
122static void del_hist_entries(struct hists *hists)
123{
124 struct hist_entry *he;
125 struct rb_root *root_in;
126 struct rb_root *root_out;
127 struct rb_node *node;
128
129 if (hists__has(hists, need_collapse))
130 root_in = &hists->entries_collapsed;
131 else
132 root_in = hists->entries_in;
133
134 root_out = &hists->entries;
135
136 while (!RB_EMPTY_ROOT(root_out)) {
137 node = rb_first(root_out);
138
139 he = rb_entry(node, struct hist_entry, rb_node);
140 rb_erase(node, root_out);
141 rb_erase(&he->rb_node_in, root_in);
142 hist_entry__delete(he);
143 }
144}
145
146typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
147
148#define COMM(he) (thread__comm_str(he->thread))
149#define DSO(he) (he->ms.map->dso->short_name)
150#define SYM(he) (he->ms.sym->name)
151#define CPU(he) (he->cpu)
152#define PID(he) (he->thread->tid)
153#define DEPTH(he) (he->callchain->max_depth)
154#define CDSO(cl) (cl->ms.map->dso->short_name)
155#define CSYM(cl) (cl->ms.sym->name)
156
157struct result {
158 u64 children;
159 u64 self;
160 const char *comm;
161 const char *dso;
162 const char *sym;
163};
164
165struct callchain_result {
166 u64 nr;
167 struct {
168 const char *dso;
169 const char *sym;
170 } node[10];
171};
172
173static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
174 struct callchain_result *expected_callchain, size_t nr_callchain)
175{
176 char buf[32];
177 size_t i, c;
178 struct hist_entry *he;
179 struct rb_root *root;
180 struct rb_node *node;
181 struct callchain_node *cnode;
182 struct callchain_list *clist;
183
184 /*
185 * adding and deleting hist entries must be done outside of this
186 * function since TEST_ASSERT_VAL() returns in case of failure.
187 */
188 hists__collapse_resort(hists, NULL);
189 perf_evsel__output_resort(hists_to_evsel(hists), NULL);
190
191 if (verbose > 2) {
192 pr_info("use callchain: %d, cumulate callchain: %d\n",
193 symbol_conf.use_callchain,
194 symbol_conf.cumulate_callchain);
195 print_hists_out(hists);
196 }
197
198 root = &hists->entries;
199 for (node = rb_first(root), i = 0;
200 node && (he = rb_entry(node, struct hist_entry, rb_node));
201 node = rb_next(node), i++) {
202 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
203
204 TEST_ASSERT_VAL("Incorrect number of hist entry",
205 i < nr_expected);
206 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
207 !strcmp(COMM(he), expected[i].comm) &&
208 !strcmp(DSO(he), expected[i].dso) &&
209 !strcmp(SYM(he), expected[i].sym));
210
211 if (symbol_conf.cumulate_callchain)
212 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
213
214 if (!symbol_conf.use_callchain)
215 continue;
216
217 /* check callchain entries */
218 root = &he->callchain->node.rb_root;
219
220 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
221 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
222
223 c = 0;
224 list_for_each_entry(clist, &cnode->val, list) {
225 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
226
227 TEST_ASSERT_VAL("Incorrect number of callchain entry",
228 c < expected_callchain[i].nr);
229 TEST_ASSERT_VAL(buf,
230 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
231 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
232 c++;
233 }
234 /* TODO: handle multiple child nodes properly */
235 TEST_ASSERT_VAL("Incorrect number of callchain entry",
236 c <= expected_callchain[i].nr);
237 }
238 TEST_ASSERT_VAL("Incorrect number of hist entry",
239 i == nr_expected);
240 TEST_ASSERT_VAL("Incorrect number of callchain entry",
241 !symbol_conf.use_callchain || nr_expected == nr_callchain);
242 return 0;
243}
244
245/* NO callchain + NO children */
246static int test1(struct perf_evsel *evsel, struct machine *machine)
247{
248 int err;
249 struct hists *hists = evsel__hists(evsel);
250 /*
251 * expected output:
252 *
253 * Overhead Command Shared Object Symbol
254 * ======== ======= ============= ==============
255 * 20.00% perf perf [.] main
256 * 10.00% bash [kernel] [k] page_fault
257 * 10.00% bash bash [.] main
258 * 10.00% bash bash [.] xmalloc
259 * 10.00% perf [kernel] [k] page_fault
260 * 10.00% perf [kernel] [k] schedule
261 * 10.00% perf libc [.] free
262 * 10.00% perf libc [.] malloc
263 * 10.00% perf perf [.] cmd_record
264 */
265 struct result expected[] = {
266 { 0, 2000, "perf", "perf", "main" },
267 { 0, 1000, "bash", "[kernel]", "page_fault" },
268 { 0, 1000, "bash", "bash", "main" },
269 { 0, 1000, "bash", "bash", "xmalloc" },
270 { 0, 1000, "perf", "[kernel]", "page_fault" },
271 { 0, 1000, "perf", "[kernel]", "schedule" },
272 { 0, 1000, "perf", "libc", "free" },
273 { 0, 1000, "perf", "libc", "malloc" },
274 { 0, 1000, "perf", "perf", "cmd_record" },
275 };
276
277 symbol_conf.use_callchain = false;
278 symbol_conf.cumulate_callchain = false;
279 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
280
281 setup_sorting(NULL);
282 callchain_register_param(&callchain_param);
283
284 err = add_hist_entries(hists, machine);
285 if (err < 0)
286 goto out;
287
288 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
289
290out:
291 del_hist_entries(hists);
292 reset_output_field();
293 return err;
294}
295
296/* callcain + NO children */
297static int test2(struct perf_evsel *evsel, struct machine *machine)
298{
299 int err;
300 struct hists *hists = evsel__hists(evsel);
301 /*
302 * expected output:
303 *
304 * Overhead Command Shared Object Symbol
305 * ======== ======= ============= ==============
306 * 20.00% perf perf [.] main
307 * |
308 * --- main
309 *
310 * 10.00% bash [kernel] [k] page_fault
311 * |
312 * --- page_fault
313 * malloc
314 * main
315 *
316 * 10.00% bash bash [.] main
317 * |
318 * --- main
319 *
320 * 10.00% bash bash [.] xmalloc
321 * |
322 * --- xmalloc
323 * malloc
324 * xmalloc <--- NOTE: there's a cycle
325 * malloc
326 * xmalloc
327 * main
328 *
329 * 10.00% perf [kernel] [k] page_fault
330 * |
331 * --- page_fault
332 * sys_perf_event_open
333 * run_command
334 * main
335 *
336 * 10.00% perf [kernel] [k] schedule
337 * |
338 * --- schedule
339 * run_command
340 * main
341 *
342 * 10.00% perf libc [.] free
343 * |
344 * --- free
345 * cmd_record
346 * run_command
347 * main
348 *
349 * 10.00% perf libc [.] malloc
350 * |
351 * --- malloc
352 * cmd_record
353 * run_command
354 * main
355 *
356 * 10.00% perf perf [.] cmd_record
357 * |
358 * --- cmd_record
359 * run_command
360 * main
361 *
362 */
363 struct result expected[] = {
364 { 0, 2000, "perf", "perf", "main" },
365 { 0, 1000, "bash", "[kernel]", "page_fault" },
366 { 0, 1000, "bash", "bash", "main" },
367 { 0, 1000, "bash", "bash", "xmalloc" },
368 { 0, 1000, "perf", "[kernel]", "page_fault" },
369 { 0, 1000, "perf", "[kernel]", "schedule" },
370 { 0, 1000, "perf", "libc", "free" },
371 { 0, 1000, "perf", "libc", "malloc" },
372 { 0, 1000, "perf", "perf", "cmd_record" },
373 };
374 struct callchain_result expected_callchain[] = {
375 {
376 1, { { "perf", "main" }, },
377 },
378 {
379 3, { { "[kernel]", "page_fault" },
380 { "libc", "malloc" },
381 { "bash", "main" }, },
382 },
383 {
384 1, { { "bash", "main" }, },
385 },
386 {
387 6, { { "bash", "xmalloc" },
388 { "libc", "malloc" },
389 { "bash", "xmalloc" },
390 { "libc", "malloc" },
391 { "bash", "xmalloc" },
392 { "bash", "main" }, },
393 },
394 {
395 4, { { "[kernel]", "page_fault" },
396 { "[kernel]", "sys_perf_event_open" },
397 { "perf", "run_command" },
398 { "perf", "main" }, },
399 },
400 {
401 3, { { "[kernel]", "schedule" },
402 { "perf", "run_command" },
403 { "perf", "main" }, },
404 },
405 {
406 4, { { "libc", "free" },
407 { "perf", "cmd_record" },
408 { "perf", "run_command" },
409 { "perf", "main" }, },
410 },
411 {
412 4, { { "libc", "malloc" },
413 { "perf", "cmd_record" },
414 { "perf", "run_command" },
415 { "perf", "main" }, },
416 },
417 {
418 3, { { "perf", "cmd_record" },
419 { "perf", "run_command" },
420 { "perf", "main" }, },
421 },
422 };
423
424 symbol_conf.use_callchain = true;
425 symbol_conf.cumulate_callchain = false;
426 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
427
428 setup_sorting(NULL);
429 callchain_register_param(&callchain_param);
430
431 err = add_hist_entries(hists, machine);
432 if (err < 0)
433 goto out;
434
435 err = do_test(hists, expected, ARRAY_SIZE(expected),
436 expected_callchain, ARRAY_SIZE(expected_callchain));
437
438out:
439 del_hist_entries(hists);
440 reset_output_field();
441 return err;
442}
443
444/* NO callchain + children */
445static int test3(struct perf_evsel *evsel, struct machine *machine)
446{
447 int err;
448 struct hists *hists = evsel__hists(evsel);
449 /*
450 * expected output:
451 *
452 * Children Self Command Shared Object Symbol
453 * ======== ======== ======= ============= =======================
454 * 70.00% 20.00% perf perf [.] main
455 * 50.00% 0.00% perf perf [.] run_command
456 * 30.00% 10.00% bash bash [.] main
457 * 30.00% 10.00% perf perf [.] cmd_record
458 * 20.00% 0.00% bash libc [.] malloc
459 * 10.00% 10.00% bash [kernel] [k] page_fault
460 * 10.00% 10.00% bash bash [.] xmalloc
461 * 10.00% 10.00% perf [kernel] [k] page_fault
462 * 10.00% 10.00% perf libc [.] malloc
463 * 10.00% 10.00% perf [kernel] [k] schedule
464 * 10.00% 10.00% perf libc [.] free
465 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
466 */
467 struct result expected[] = {
468 { 7000, 2000, "perf", "perf", "main" },
469 { 5000, 0, "perf", "perf", "run_command" },
470 { 3000, 1000, "bash", "bash", "main" },
471 { 3000, 1000, "perf", "perf", "cmd_record" },
472 { 2000, 0, "bash", "libc", "malloc" },
473 { 1000, 1000, "bash", "[kernel]", "page_fault" },
474 { 1000, 1000, "bash", "bash", "xmalloc" },
475 { 1000, 1000, "perf", "[kernel]", "page_fault" },
476 { 1000, 1000, "perf", "[kernel]", "schedule" },
477 { 1000, 1000, "perf", "libc", "free" },
478 { 1000, 1000, "perf", "libc", "malloc" },
479 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
480 };
481
482 symbol_conf.use_callchain = false;
483 symbol_conf.cumulate_callchain = true;
484 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
485
486 setup_sorting(NULL);
487 callchain_register_param(&callchain_param);
488
489 err = add_hist_entries(hists, machine);
490 if (err < 0)
491 goto out;
492
493 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
494
495out:
496 del_hist_entries(hists);
497 reset_output_field();
498 return err;
499}
500
501/* callchain + children */
502static int test4(struct perf_evsel *evsel, struct machine *machine)
503{
504 int err;
505 struct hists *hists = evsel__hists(evsel);
506 /*
507 * expected output:
508 *
509 * Children Self Command Shared Object Symbol
510 * ======== ======== ======= ============= =======================
511 * 70.00% 20.00% perf perf [.] main
512 * |
513 * --- main
514 *
515 * 50.00% 0.00% perf perf [.] run_command
516 * |
517 * --- run_command
518 * main
519 *
520 * 30.00% 10.00% bash bash [.] main
521 * |
522 * --- main
523 *
524 * 30.00% 10.00% perf perf [.] cmd_record
525 * |
526 * --- cmd_record
527 * run_command
528 * main
529 *
530 * 20.00% 0.00% bash libc [.] malloc
531 * |
532 * --- malloc
533 * |
534 * |--50.00%-- xmalloc
535 * | main
536 * --50.00%-- main
537 *
538 * 10.00% 10.00% bash [kernel] [k] page_fault
539 * |
540 * --- page_fault
541 * malloc
542 * main
543 *
544 * 10.00% 10.00% bash bash [.] xmalloc
545 * |
546 * --- xmalloc
547 * malloc
548 * xmalloc <--- NOTE: there's a cycle
549 * malloc
550 * xmalloc
551 * main
552 *
553 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
554 * |
555 * --- sys_perf_event_open
556 * run_command
557 * main
558 *
559 * 10.00% 10.00% perf [kernel] [k] page_fault
560 * |
561 * --- page_fault
562 * sys_perf_event_open
563 * run_command
564 * main
565 *
566 * 10.00% 10.00% perf [kernel] [k] schedule
567 * |
568 * --- schedule
569 * run_command
570 * main
571 *
572 * 10.00% 10.00% perf libc [.] free
573 * |
574 * --- free
575 * cmd_record
576 * run_command
577 * main
578 *
579 * 10.00% 10.00% perf libc [.] malloc
580 * |
581 * --- malloc
582 * cmd_record
583 * run_command
584 * main
585 *
586 */
587 struct result expected[] = {
588 { 7000, 2000, "perf", "perf", "main" },
589 { 5000, 0, "perf", "perf", "run_command" },
590 { 3000, 1000, "bash", "bash", "main" },
591 { 3000, 1000, "perf", "perf", "cmd_record" },
592 { 2000, 0, "bash", "libc", "malloc" },
593 { 1000, 1000, "bash", "[kernel]", "page_fault" },
594 { 1000, 1000, "bash", "bash", "xmalloc" },
595 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
596 { 1000, 1000, "perf", "[kernel]", "page_fault" },
597 { 1000, 1000, "perf", "[kernel]", "schedule" },
598 { 1000, 1000, "perf", "libc", "free" },
599 { 1000, 1000, "perf", "libc", "malloc" },
600 };
601 struct callchain_result expected_callchain[] = {
602 {
603 1, { { "perf", "main" }, },
604 },
605 {
606 2, { { "perf", "run_command" },
607 { "perf", "main" }, },
608 },
609 {
610 1, { { "bash", "main" }, },
611 },
612 {
613 3, { { "perf", "cmd_record" },
614 { "perf", "run_command" },
615 { "perf", "main" }, },
616 },
617 {
618 4, { { "libc", "malloc" },
619 { "bash", "xmalloc" },
620 { "bash", "main" },
621 { "bash", "main" }, },
622 },
623 {
624 3, { { "[kernel]", "page_fault" },
625 { "libc", "malloc" },
626 { "bash", "main" }, },
627 },
628 {
629 6, { { "bash", "xmalloc" },
630 { "libc", "malloc" },
631 { "bash", "xmalloc" },
632 { "libc", "malloc" },
633 { "bash", "xmalloc" },
634 { "bash", "main" }, },
635 },
636 {
637 3, { { "[kernel]", "sys_perf_event_open" },
638 { "perf", "run_command" },
639 { "perf", "main" }, },
640 },
641 {
642 4, { { "[kernel]", "page_fault" },
643 { "[kernel]", "sys_perf_event_open" },
644 { "perf", "run_command" },
645 { "perf", "main" }, },
646 },
647 {
648 3, { { "[kernel]", "schedule" },
649 { "perf", "run_command" },
650 { "perf", "main" }, },
651 },
652 {
653 4, { { "libc", "free" },
654 { "perf", "cmd_record" },
655 { "perf", "run_command" },
656 { "perf", "main" }, },
657 },
658 {
659 4, { { "libc", "malloc" },
660 { "perf", "cmd_record" },
661 { "perf", "run_command" },
662 { "perf", "main" }, },
663 },
664 };
665
666 symbol_conf.use_callchain = true;
667 symbol_conf.cumulate_callchain = true;
668 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
669
670 setup_sorting(NULL);
671
672 callchain_param = callchain_param_default;
673 callchain_register_param(&callchain_param);
674
675 err = add_hist_entries(hists, machine);
676 if (err < 0)
677 goto out;
678
679 err = do_test(hists, expected, ARRAY_SIZE(expected),
680 expected_callchain, ARRAY_SIZE(expected_callchain));
681
682out:
683 del_hist_entries(hists);
684 reset_output_field();
685 return err;
686}
687
688int test__hists_cumulate(int subtest __maybe_unused)
689{
690 int err = TEST_FAIL;
691 struct machines machines;
692 struct machine *machine;
693 struct perf_evsel *evsel;
694 struct perf_evlist *evlist = perf_evlist__new();
695 size_t i;
696 test_fn_t testcases[] = {
697 test1,
698 test2,
699 test3,
700 test4,
701 };
702
703 TEST_ASSERT_VAL("No memory", evlist);
704
705 err = parse_events(evlist, "cpu-clock", NULL);
706 if (err)
707 goto out;
708 err = TEST_FAIL;
709
710 machines__init(&machines);
711
712 /* setup threads/dso/map/symbols also */
713 machine = setup_fake_machine(&machines);
714 if (!machine)
715 goto out;
716
717 if (verbose > 1)
718 machine__fprintf(machine, stderr);
719
720 evsel = perf_evlist__first(evlist);
721
722 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
723 err = testcases[i](evsel, machine);
724 if (err < 0)
725 break;
726 }
727
728out:
729 /* tear down everything */
730 perf_evlist__delete(evlist);
731 machines__exit(&machines);
732
733 return err;
734}