Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "util/debug.h"
3#include "util/dso.h"
4#include "util/event.h"
5#include "util/map.h"
6#include "util/symbol.h"
7#include "util/sort.h"
8#include "util/evsel.h"
9#include "util/evlist.h"
10#include "util/machine.h"
11#include "util/thread.h"
12#include "util/parse-events.h"
13#include "tests/tests.h"
14#include "tests/hists_common.h"
15#include <linux/kernel.h>
16
17struct sample {
18 u32 pid;
19 u64 ip;
20 struct thread *thread;
21 struct map *map;
22 struct symbol *sym;
23};
24
25/* For the numbers, see hists_common.c */
26static struct sample fake_samples[] = {
27 /* perf [kernel] schedule() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
29 /* perf [perf] main() */
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
31 /* perf [perf] cmd_record() */
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
33 /* perf [libc] malloc() */
34 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
35 /* perf [libc] free() */
36 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
37 /* perf [perf] main() */
38 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
39 /* perf [kernel] page_fault() */
40 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
41 /* bash [bash] main() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
43 /* bash [bash] xmalloc() */
44 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
45 /* bash [kernel] page_fault() */
46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
47};
48
49/*
50 * Will be cast to struct ip_callchain which has all 64 bit entries
51 * of nr and ips[].
52 */
53static u64 fake_callchains[][10] = {
54 /* schedule => run_command => main */
55 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 /* main */
57 { 1, FAKE_IP_PERF_MAIN, },
58 /* cmd_record => run_command => main */
59 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
60 /* malloc => cmd_record => run_command => main */
61 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
62 FAKE_IP_PERF_MAIN, },
63 /* free => cmd_record => run_command => main */
64 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
65 FAKE_IP_PERF_MAIN, },
66 /* main */
67 { 1, FAKE_IP_PERF_MAIN, },
68 /* page_fault => sys_perf_event_open => run_command => main */
69 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
70 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
71 /* main */
72 { 1, FAKE_IP_BASH_MAIN, },
73 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
74 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
75 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
76 /* page_fault => malloc => main */
77 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
78};
79
80static int add_hist_entries(struct hists *hists, struct machine *machine)
81{
82 struct addr_location al;
83 struct evsel *evsel = hists_to_evsel(hists);
84 struct perf_sample sample = { .period = 1000, };
85 size_t i;
86
87 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
88 struct hist_entry_iter iter = {
89 .evsel = evsel,
90 .sample = &sample,
91 .hide_unresolved = false,
92 };
93
94 if (symbol_conf.cumulate_callchain)
95 iter.ops = &hist_iter_cumulative;
96 else
97 iter.ops = &hist_iter_normal;
98
99 sample.cpumode = PERF_RECORD_MISC_USER;
100 sample.pid = fake_samples[i].pid;
101 sample.tid = fake_samples[i].pid;
102 sample.ip = fake_samples[i].ip;
103 sample.callchain = (struct ip_callchain *)fake_callchains[i];
104
105 if (machine__resolve(machine, &al, &sample) < 0)
106 goto out;
107
108 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
109 NULL) < 0) {
110 addr_location__put(&al);
111 goto out;
112 }
113
114 fake_samples[i].thread = al.thread;
115 fake_samples[i].map = al.map;
116 fake_samples[i].sym = al.sym;
117 }
118
119 return TEST_OK;
120
121out:
122 pr_debug("Not enough memory for adding a hist entry\n");
123 return TEST_FAIL;
124}
125
126static void del_hist_entries(struct hists *hists)
127{
128 struct hist_entry *he;
129 struct rb_root_cached *root_in;
130 struct rb_root_cached *root_out;
131 struct rb_node *node;
132
133 if (hists__has(hists, need_collapse))
134 root_in = &hists->entries_collapsed;
135 else
136 root_in = hists->entries_in;
137
138 root_out = &hists->entries;
139
140 while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
141 node = rb_first_cached(root_out);
142
143 he = rb_entry(node, struct hist_entry, rb_node);
144 rb_erase_cached(node, root_out);
145 rb_erase_cached(&he->rb_node_in, root_in);
146 hist_entry__delete(he);
147 }
148}
149
150typedef int (*test_fn_t)(struct evsel *, struct machine *);
151
152#define COMM(he) (thread__comm_str(he->thread))
153#define DSO(he) (he->ms.map->dso->short_name)
154#define SYM(he) (he->ms.sym->name)
155#define CPU(he) (he->cpu)
156#define PID(he) (he->thread->tid)
157#define DEPTH(he) (he->callchain->max_depth)
158#define CDSO(cl) (cl->ms.map->dso->short_name)
159#define CSYM(cl) (cl->ms.sym->name)
160
161struct result {
162 u64 children;
163 u64 self;
164 const char *comm;
165 const char *dso;
166 const char *sym;
167};
168
169struct callchain_result {
170 u64 nr;
171 struct {
172 const char *dso;
173 const char *sym;
174 } node[10];
175};
176
177static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
178 struct callchain_result *expected_callchain, size_t nr_callchain)
179{
180 char buf[32];
181 size_t i, c;
182 struct hist_entry *he;
183 struct rb_root *root;
184 struct rb_node *node;
185 struct callchain_node *cnode;
186 struct callchain_list *clist;
187
188 /*
189 * adding and deleting hist entries must be done outside of this
190 * function since TEST_ASSERT_VAL() returns in case of failure.
191 */
192 hists__collapse_resort(hists, NULL);
193 evsel__output_resort(hists_to_evsel(hists), NULL);
194
195 if (verbose > 2) {
196 pr_info("use callchain: %d, cumulate callchain: %d\n",
197 symbol_conf.use_callchain,
198 symbol_conf.cumulate_callchain);
199 print_hists_out(hists);
200 }
201
202 root = &hists->entries.rb_root;
203 for (node = rb_first(root), i = 0;
204 node && (he = rb_entry(node, struct hist_entry, rb_node));
205 node = rb_next(node), i++) {
206 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
207
208 TEST_ASSERT_VAL("Incorrect number of hist entry",
209 i < nr_expected);
210 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
211 !strcmp(COMM(he), expected[i].comm) &&
212 !strcmp(DSO(he), expected[i].dso) &&
213 !strcmp(SYM(he), expected[i].sym));
214
215 if (symbol_conf.cumulate_callchain)
216 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
217
218 if (!symbol_conf.use_callchain)
219 continue;
220
221 /* check callchain entries */
222 root = &he->callchain->node.rb_root;
223
224 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
225 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
226
227 c = 0;
228 list_for_each_entry(clist, &cnode->val, list) {
229 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
230
231 TEST_ASSERT_VAL("Incorrect number of callchain entry",
232 c < expected_callchain[i].nr);
233 TEST_ASSERT_VAL(buf,
234 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
235 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
236 c++;
237 }
238 /* TODO: handle multiple child nodes properly */
239 TEST_ASSERT_VAL("Incorrect number of callchain entry",
240 c <= expected_callchain[i].nr);
241 }
242 TEST_ASSERT_VAL("Incorrect number of hist entry",
243 i == nr_expected);
244 TEST_ASSERT_VAL("Incorrect number of callchain entry",
245 !symbol_conf.use_callchain || nr_expected == nr_callchain);
246 return 0;
247}
248
249/* NO callchain + NO children */
250static int test1(struct evsel *evsel, struct machine *machine)
251{
252 int err;
253 struct hists *hists = evsel__hists(evsel);
254 /*
255 * expected output:
256 *
257 * Overhead Command Shared Object Symbol
258 * ======== ======= ============= ==============
259 * 20.00% perf perf [.] main
260 * 10.00% bash [kernel] [k] page_fault
261 * 10.00% bash bash [.] main
262 * 10.00% bash bash [.] xmalloc
263 * 10.00% perf [kernel] [k] page_fault
264 * 10.00% perf [kernel] [k] schedule
265 * 10.00% perf libc [.] free
266 * 10.00% perf libc [.] malloc
267 * 10.00% perf perf [.] cmd_record
268 */
269 struct result expected[] = {
270 { 0, 2000, "perf", "perf", "main" },
271 { 0, 1000, "bash", "[kernel]", "page_fault" },
272 { 0, 1000, "bash", "bash", "main" },
273 { 0, 1000, "bash", "bash", "xmalloc" },
274 { 0, 1000, "perf", "[kernel]", "page_fault" },
275 { 0, 1000, "perf", "[kernel]", "schedule" },
276 { 0, 1000, "perf", "libc", "free" },
277 { 0, 1000, "perf", "libc", "malloc" },
278 { 0, 1000, "perf", "perf", "cmd_record" },
279 };
280
281 symbol_conf.use_callchain = false;
282 symbol_conf.cumulate_callchain = false;
283 evsel__reset_sample_bit(evsel, CALLCHAIN);
284
285 setup_sorting(NULL);
286 callchain_register_param(&callchain_param);
287
288 err = add_hist_entries(hists, machine);
289 if (err < 0)
290 goto out;
291
292 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
293
294out:
295 del_hist_entries(hists);
296 reset_output_field();
297 return err;
298}
299
300/* callchain + NO children */
301static int test2(struct evsel *evsel, struct machine *machine)
302{
303 int err;
304 struct hists *hists = evsel__hists(evsel);
305 /*
306 * expected output:
307 *
308 * Overhead Command Shared Object Symbol
309 * ======== ======= ============= ==============
310 * 20.00% perf perf [.] main
311 * |
312 * --- main
313 *
314 * 10.00% bash [kernel] [k] page_fault
315 * |
316 * --- page_fault
317 * malloc
318 * main
319 *
320 * 10.00% bash bash [.] main
321 * |
322 * --- main
323 *
324 * 10.00% bash bash [.] xmalloc
325 * |
326 * --- xmalloc
327 * malloc
328 * xmalloc <--- NOTE: there's a cycle
329 * malloc
330 * xmalloc
331 * main
332 *
333 * 10.00% perf [kernel] [k] page_fault
334 * |
335 * --- page_fault
336 * sys_perf_event_open
337 * run_command
338 * main
339 *
340 * 10.00% perf [kernel] [k] schedule
341 * |
342 * --- schedule
343 * run_command
344 * main
345 *
346 * 10.00% perf libc [.] free
347 * |
348 * --- free
349 * cmd_record
350 * run_command
351 * main
352 *
353 * 10.00% perf libc [.] malloc
354 * |
355 * --- malloc
356 * cmd_record
357 * run_command
358 * main
359 *
360 * 10.00% perf perf [.] cmd_record
361 * |
362 * --- cmd_record
363 * run_command
364 * main
365 *
366 */
367 struct result expected[] = {
368 { 0, 2000, "perf", "perf", "main" },
369 { 0, 1000, "bash", "[kernel]", "page_fault" },
370 { 0, 1000, "bash", "bash", "main" },
371 { 0, 1000, "bash", "bash", "xmalloc" },
372 { 0, 1000, "perf", "[kernel]", "page_fault" },
373 { 0, 1000, "perf", "[kernel]", "schedule" },
374 { 0, 1000, "perf", "libc", "free" },
375 { 0, 1000, "perf", "libc", "malloc" },
376 { 0, 1000, "perf", "perf", "cmd_record" },
377 };
378 struct callchain_result expected_callchain[] = {
379 {
380 1, { { "perf", "main" }, },
381 },
382 {
383 3, { { "[kernel]", "page_fault" },
384 { "libc", "malloc" },
385 { "bash", "main" }, },
386 },
387 {
388 1, { { "bash", "main" }, },
389 },
390 {
391 6, { { "bash", "xmalloc" },
392 { "libc", "malloc" },
393 { "bash", "xmalloc" },
394 { "libc", "malloc" },
395 { "bash", "xmalloc" },
396 { "bash", "main" }, },
397 },
398 {
399 4, { { "[kernel]", "page_fault" },
400 { "[kernel]", "sys_perf_event_open" },
401 { "perf", "run_command" },
402 { "perf", "main" }, },
403 },
404 {
405 3, { { "[kernel]", "schedule" },
406 { "perf", "run_command" },
407 { "perf", "main" }, },
408 },
409 {
410 4, { { "libc", "free" },
411 { "perf", "cmd_record" },
412 { "perf", "run_command" },
413 { "perf", "main" }, },
414 },
415 {
416 4, { { "libc", "malloc" },
417 { "perf", "cmd_record" },
418 { "perf", "run_command" },
419 { "perf", "main" }, },
420 },
421 {
422 3, { { "perf", "cmd_record" },
423 { "perf", "run_command" },
424 { "perf", "main" }, },
425 },
426 };
427
428 symbol_conf.use_callchain = true;
429 symbol_conf.cumulate_callchain = false;
430 evsel__set_sample_bit(evsel, CALLCHAIN);
431
432 setup_sorting(NULL);
433 callchain_register_param(&callchain_param);
434
435 err = add_hist_entries(hists, machine);
436 if (err < 0)
437 goto out;
438
439 err = do_test(hists, expected, ARRAY_SIZE(expected),
440 expected_callchain, ARRAY_SIZE(expected_callchain));
441
442out:
443 del_hist_entries(hists);
444 reset_output_field();
445 return err;
446}
447
448/* NO callchain + children */
449static int test3(struct evsel *evsel, struct machine *machine)
450{
451 int err;
452 struct hists *hists = evsel__hists(evsel);
453 /*
454 * expected output:
455 *
456 * Children Self Command Shared Object Symbol
457 * ======== ======== ======= ============= =======================
458 * 70.00% 20.00% perf perf [.] main
459 * 50.00% 0.00% perf perf [.] run_command
460 * 30.00% 10.00% bash bash [.] main
461 * 30.00% 10.00% perf perf [.] cmd_record
462 * 20.00% 0.00% bash libc [.] malloc
463 * 10.00% 10.00% bash [kernel] [k] page_fault
464 * 10.00% 10.00% bash bash [.] xmalloc
465 * 10.00% 10.00% perf [kernel] [k] page_fault
466 * 10.00% 10.00% perf libc [.] malloc
467 * 10.00% 10.00% perf [kernel] [k] schedule
468 * 10.00% 10.00% perf libc [.] free
469 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
470 */
471 struct result expected[] = {
472 { 7000, 2000, "perf", "perf", "main" },
473 { 5000, 0, "perf", "perf", "run_command" },
474 { 3000, 1000, "bash", "bash", "main" },
475 { 3000, 1000, "perf", "perf", "cmd_record" },
476 { 2000, 0, "bash", "libc", "malloc" },
477 { 1000, 1000, "bash", "[kernel]", "page_fault" },
478 { 1000, 1000, "bash", "bash", "xmalloc" },
479 { 1000, 1000, "perf", "[kernel]", "page_fault" },
480 { 1000, 1000, "perf", "[kernel]", "schedule" },
481 { 1000, 1000, "perf", "libc", "free" },
482 { 1000, 1000, "perf", "libc", "malloc" },
483 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
484 };
485
486 symbol_conf.use_callchain = false;
487 symbol_conf.cumulate_callchain = true;
488 evsel__reset_sample_bit(evsel, CALLCHAIN);
489
490 setup_sorting(NULL);
491 callchain_register_param(&callchain_param);
492
493 err = add_hist_entries(hists, machine);
494 if (err < 0)
495 goto out;
496
497 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
498
499out:
500 del_hist_entries(hists);
501 reset_output_field();
502 return err;
503}
504
505/* callchain + children */
506static int test4(struct evsel *evsel, struct machine *machine)
507{
508 int err;
509 struct hists *hists = evsel__hists(evsel);
510 /*
511 * expected output:
512 *
513 * Children Self Command Shared Object Symbol
514 * ======== ======== ======= ============= =======================
515 * 70.00% 20.00% perf perf [.] main
516 * |
517 * --- main
518 *
519 * 50.00% 0.00% perf perf [.] run_command
520 * |
521 * --- run_command
522 * main
523 *
524 * 30.00% 10.00% bash bash [.] main
525 * |
526 * --- main
527 *
528 * 30.00% 10.00% perf perf [.] cmd_record
529 * |
530 * --- cmd_record
531 * run_command
532 * main
533 *
534 * 20.00% 0.00% bash libc [.] malloc
535 * |
536 * --- malloc
537 * |
538 * |--50.00%-- xmalloc
539 * | main
540 * --50.00%-- main
541 *
542 * 10.00% 10.00% bash [kernel] [k] page_fault
543 * |
544 * --- page_fault
545 * malloc
546 * main
547 *
548 * 10.00% 10.00% bash bash [.] xmalloc
549 * |
550 * --- xmalloc
551 * malloc
552 * xmalloc <--- NOTE: there's a cycle
553 * malloc
554 * xmalloc
555 * main
556 *
557 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
558 * |
559 * --- sys_perf_event_open
560 * run_command
561 * main
562 *
563 * 10.00% 10.00% perf [kernel] [k] page_fault
564 * |
565 * --- page_fault
566 * sys_perf_event_open
567 * run_command
568 * main
569 *
570 * 10.00% 10.00% perf [kernel] [k] schedule
571 * |
572 * --- schedule
573 * run_command
574 * main
575 *
576 * 10.00% 10.00% perf libc [.] free
577 * |
578 * --- free
579 * cmd_record
580 * run_command
581 * main
582 *
583 * 10.00% 10.00% perf libc [.] malloc
584 * |
585 * --- malloc
586 * cmd_record
587 * run_command
588 * main
589 *
590 */
591 struct result expected[] = {
592 { 7000, 2000, "perf", "perf", "main" },
593 { 5000, 0, "perf", "perf", "run_command" },
594 { 3000, 1000, "bash", "bash", "main" },
595 { 3000, 1000, "perf", "perf", "cmd_record" },
596 { 2000, 0, "bash", "libc", "malloc" },
597 { 1000, 1000, "bash", "[kernel]", "page_fault" },
598 { 1000, 1000, "bash", "bash", "xmalloc" },
599 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
600 { 1000, 1000, "perf", "[kernel]", "page_fault" },
601 { 1000, 1000, "perf", "[kernel]", "schedule" },
602 { 1000, 1000, "perf", "libc", "free" },
603 { 1000, 1000, "perf", "libc", "malloc" },
604 };
605 struct callchain_result expected_callchain[] = {
606 {
607 1, { { "perf", "main" }, },
608 },
609 {
610 2, { { "perf", "run_command" },
611 { "perf", "main" }, },
612 },
613 {
614 1, { { "bash", "main" }, },
615 },
616 {
617 3, { { "perf", "cmd_record" },
618 { "perf", "run_command" },
619 { "perf", "main" }, },
620 },
621 {
622 4, { { "libc", "malloc" },
623 { "bash", "xmalloc" },
624 { "bash", "main" },
625 { "bash", "main" }, },
626 },
627 {
628 3, { { "[kernel]", "page_fault" },
629 { "libc", "malloc" },
630 { "bash", "main" }, },
631 },
632 {
633 6, { { "bash", "xmalloc" },
634 { "libc", "malloc" },
635 { "bash", "xmalloc" },
636 { "libc", "malloc" },
637 { "bash", "xmalloc" },
638 { "bash", "main" }, },
639 },
640 {
641 3, { { "[kernel]", "sys_perf_event_open" },
642 { "perf", "run_command" },
643 { "perf", "main" }, },
644 },
645 {
646 4, { { "[kernel]", "page_fault" },
647 { "[kernel]", "sys_perf_event_open" },
648 { "perf", "run_command" },
649 { "perf", "main" }, },
650 },
651 {
652 3, { { "[kernel]", "schedule" },
653 { "perf", "run_command" },
654 { "perf", "main" }, },
655 },
656 {
657 4, { { "libc", "free" },
658 { "perf", "cmd_record" },
659 { "perf", "run_command" },
660 { "perf", "main" }, },
661 },
662 {
663 4, { { "libc", "malloc" },
664 { "perf", "cmd_record" },
665 { "perf", "run_command" },
666 { "perf", "main" }, },
667 },
668 };
669
670 symbol_conf.use_callchain = true;
671 symbol_conf.cumulate_callchain = true;
672 evsel__set_sample_bit(evsel, CALLCHAIN);
673
674 setup_sorting(NULL);
675
676 callchain_param = callchain_param_default;
677 callchain_register_param(&callchain_param);
678
679 err = add_hist_entries(hists, machine);
680 if (err < 0)
681 goto out;
682
683 err = do_test(hists, expected, ARRAY_SIZE(expected),
684 expected_callchain, ARRAY_SIZE(expected_callchain));
685
686out:
687 del_hist_entries(hists);
688 reset_output_field();
689 return err;
690}
691
692static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
693{
694 int err = TEST_FAIL;
695 struct machines machines;
696 struct machine *machine;
697 struct evsel *evsel;
698 struct evlist *evlist = evlist__new();
699 size_t i;
700 test_fn_t testcases[] = {
701 test1,
702 test2,
703 test3,
704 test4,
705 };
706
707 TEST_ASSERT_VAL("No memory", evlist);
708
709 err = parse_event(evlist, "cpu-clock");
710 if (err)
711 goto out;
712 err = TEST_FAIL;
713
714 machines__init(&machines);
715
716 /* setup threads/dso/map/symbols also */
717 machine = setup_fake_machine(&machines);
718 if (!machine)
719 goto out;
720
721 if (verbose > 1)
722 machine__fprintf(machine, stderr);
723
724 evsel = evlist__first(evlist);
725
726 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
727 err = testcases[i](evsel, machine);
728 if (err < 0)
729 break;
730 }
731
732out:
733 /* tear down everything */
734 evlist__delete(evlist);
735 machines__exit(&machines);
736
737 return err;
738}
739
740DEFINE_SUITE("Cumulate child hist entries", hists_cumulate);
1#include "perf.h"
2#include "util/debug.h"
3#include "util/symbol.h"
4#include "util/sort.h"
5#include "util/evsel.h"
6#include "util/evlist.h"
7#include "util/machine.h"
8#include "util/thread.h"
9#include "util/parse-events.h"
10#include "tests/tests.h"
11#include "tests/hists_common.h"
12
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
21/* For the numbers, see hists_common.c */
22static struct sample fake_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
27 /* perf [perf] cmd_record() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 /* perf [libc] malloc() */
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
31 /* perf [libc] free() */
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
33 /* perf [perf] main() */
34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
35 /* perf [kernel] page_fault() */
36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
37 /* bash [bash] main() */
38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
39 /* bash [bash] xmalloc() */
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
41 /* bash [kernel] page_fault() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43};
44
45/*
46 * Will be casted to struct ip_callchain which has all 64 bit entries
47 * of nr and ips[].
48 */
49static u64 fake_callchains[][10] = {
50 /* schedule => run_command => main */
51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
52 /* main */
53 { 1, FAKE_IP_PERF_MAIN, },
54 /* cmd_record => run_command => main */
55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 /* malloc => cmd_record => run_command => main */
57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
58 FAKE_IP_PERF_MAIN, },
59 /* free => cmd_record => run_command => main */
60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
61 FAKE_IP_PERF_MAIN, },
62 /* main */
63 { 1, FAKE_IP_PERF_MAIN, },
64 /* page_fault => sys_perf_event_open => run_command => main */
65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
67 /* main */
68 { 1, FAKE_IP_BASH_MAIN, },
69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
72 /* page_fault => malloc => main */
73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
74};
75
76static int add_hist_entries(struct hists *hists, struct machine *machine)
77{
78 struct addr_location al;
79 struct perf_evsel *evsel = hists_to_evsel(hists);
80 struct perf_sample sample = { .period = 1000, };
81 size_t i;
82
83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
84 struct hist_entry_iter iter = {
85 .evsel = evsel,
86 .sample = &sample,
87 .hide_unresolved = false,
88 };
89
90 if (symbol_conf.cumulate_callchain)
91 iter.ops = &hist_iter_cumulative;
92 else
93 iter.ops = &hist_iter_normal;
94
95 sample.cpumode = PERF_RECORD_MISC_USER;
96 sample.pid = fake_samples[i].pid;
97 sample.tid = fake_samples[i].pid;
98 sample.ip = fake_samples[i].ip;
99 sample.callchain = (struct ip_callchain *)fake_callchains[i];
100
101 if (machine__resolve(machine, &al, &sample) < 0)
102 goto out;
103
104 if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
105 NULL) < 0) {
106 addr_location__put(&al);
107 goto out;
108 }
109
110 fake_samples[i].thread = al.thread;
111 fake_samples[i].map = al.map;
112 fake_samples[i].sym = al.sym;
113 }
114
115 return TEST_OK;
116
117out:
118 pr_debug("Not enough memory for adding a hist entry\n");
119 return TEST_FAIL;
120}
121
122static void del_hist_entries(struct hists *hists)
123{
124 struct hist_entry *he;
125 struct rb_root *root_in;
126 struct rb_root *root_out;
127 struct rb_node *node;
128
129 if (sort__need_collapse)
130 root_in = &hists->entries_collapsed;
131 else
132 root_in = hists->entries_in;
133
134 root_out = &hists->entries;
135
136 while (!RB_EMPTY_ROOT(root_out)) {
137 node = rb_first(root_out);
138
139 he = rb_entry(node, struct hist_entry, rb_node);
140 rb_erase(node, root_out);
141 rb_erase(&he->rb_node_in, root_in);
142 hist_entry__delete(he);
143 }
144}
145
146typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
147
148#define COMM(he) (thread__comm_str(he->thread))
149#define DSO(he) (he->ms.map->dso->short_name)
150#define SYM(he) (he->ms.sym->name)
151#define CPU(he) (he->cpu)
152#define PID(he) (he->thread->tid)
153#define DEPTH(he) (he->callchain->max_depth)
154#define CDSO(cl) (cl->ms.map->dso->short_name)
155#define CSYM(cl) (cl->ms.sym->name)
156
157struct result {
158 u64 children;
159 u64 self;
160 const char *comm;
161 const char *dso;
162 const char *sym;
163};
164
165struct callchain_result {
166 u64 nr;
167 struct {
168 const char *dso;
169 const char *sym;
170 } node[10];
171};
172
173static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
174 struct callchain_result *expected_callchain, size_t nr_callchain)
175{
176 char buf[32];
177 size_t i, c;
178 struct hist_entry *he;
179 struct rb_root *root;
180 struct rb_node *node;
181 struct callchain_node *cnode;
182 struct callchain_list *clist;
183
184 /*
185 * adding and deleting hist entries must be done outside of this
186 * function since TEST_ASSERT_VAL() returns in case of failure.
187 */
188 hists__collapse_resort(hists, NULL);
189 perf_evsel__output_resort(hists_to_evsel(hists), NULL);
190
191 if (verbose > 2) {
192 pr_info("use callchain: %d, cumulate callchain: %d\n",
193 symbol_conf.use_callchain,
194 symbol_conf.cumulate_callchain);
195 print_hists_out(hists);
196 }
197
198 root = &hists->entries;
199 for (node = rb_first(root), i = 0;
200 node && (he = rb_entry(node, struct hist_entry, rb_node));
201 node = rb_next(node), i++) {
202 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
203
204 TEST_ASSERT_VAL("Incorrect number of hist entry",
205 i < nr_expected);
206 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
207 !strcmp(COMM(he), expected[i].comm) &&
208 !strcmp(DSO(he), expected[i].dso) &&
209 !strcmp(SYM(he), expected[i].sym));
210
211 if (symbol_conf.cumulate_callchain)
212 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
213
214 if (!symbol_conf.use_callchain)
215 continue;
216
217 /* check callchain entries */
218 root = &he->callchain->node.rb_root;
219 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
220
221 c = 0;
222 list_for_each_entry(clist, &cnode->val, list) {
223 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
224
225 TEST_ASSERT_VAL("Incorrect number of callchain entry",
226 c < expected_callchain[i].nr);
227 TEST_ASSERT_VAL(buf,
228 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
229 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
230 c++;
231 }
232 /* TODO: handle multiple child nodes properly */
233 TEST_ASSERT_VAL("Incorrect number of callchain entry",
234 c <= expected_callchain[i].nr);
235 }
236 TEST_ASSERT_VAL("Incorrect number of hist entry",
237 i == nr_expected);
238 TEST_ASSERT_VAL("Incorrect number of callchain entry",
239 !symbol_conf.use_callchain || nr_expected == nr_callchain);
240 return 0;
241}
242
243/* NO callchain + NO children */
244static int test1(struct perf_evsel *evsel, struct machine *machine)
245{
246 int err;
247 struct hists *hists = evsel__hists(evsel);
248 /*
249 * expected output:
250 *
251 * Overhead Command Shared Object Symbol
252 * ======== ======= ============= ==============
253 * 20.00% perf perf [.] main
254 * 10.00% bash [kernel] [k] page_fault
255 * 10.00% bash bash [.] main
256 * 10.00% bash bash [.] xmalloc
257 * 10.00% perf [kernel] [k] page_fault
258 * 10.00% perf [kernel] [k] schedule
259 * 10.00% perf libc [.] free
260 * 10.00% perf libc [.] malloc
261 * 10.00% perf perf [.] cmd_record
262 */
263 struct result expected[] = {
264 { 0, 2000, "perf", "perf", "main" },
265 { 0, 1000, "bash", "[kernel]", "page_fault" },
266 { 0, 1000, "bash", "bash", "main" },
267 { 0, 1000, "bash", "bash", "xmalloc" },
268 { 0, 1000, "perf", "[kernel]", "page_fault" },
269 { 0, 1000, "perf", "[kernel]", "schedule" },
270 { 0, 1000, "perf", "libc", "free" },
271 { 0, 1000, "perf", "libc", "malloc" },
272 { 0, 1000, "perf", "perf", "cmd_record" },
273 };
274
275 symbol_conf.use_callchain = false;
276 symbol_conf.cumulate_callchain = false;
277 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
278
279 setup_sorting(NULL);
280 callchain_register_param(&callchain_param);
281
282 err = add_hist_entries(hists, machine);
283 if (err < 0)
284 goto out;
285
286 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
287
288out:
289 del_hist_entries(hists);
290 reset_output_field();
291 return err;
292}
293
294/* callcain + NO children */
295static int test2(struct perf_evsel *evsel, struct machine *machine)
296{
297 int err;
298 struct hists *hists = evsel__hists(evsel);
299 /*
300 * expected output:
301 *
302 * Overhead Command Shared Object Symbol
303 * ======== ======= ============= ==============
304 * 20.00% perf perf [.] main
305 * |
306 * --- main
307 *
308 * 10.00% bash [kernel] [k] page_fault
309 * |
310 * --- page_fault
311 * malloc
312 * main
313 *
314 * 10.00% bash bash [.] main
315 * |
316 * --- main
317 *
318 * 10.00% bash bash [.] xmalloc
319 * |
320 * --- xmalloc
321 * malloc
322 * xmalloc <--- NOTE: there's a cycle
323 * malloc
324 * xmalloc
325 * main
326 *
327 * 10.00% perf [kernel] [k] page_fault
328 * |
329 * --- page_fault
330 * sys_perf_event_open
331 * run_command
332 * main
333 *
334 * 10.00% perf [kernel] [k] schedule
335 * |
336 * --- schedule
337 * run_command
338 * main
339 *
340 * 10.00% perf libc [.] free
341 * |
342 * --- free
343 * cmd_record
344 * run_command
345 * main
346 *
347 * 10.00% perf libc [.] malloc
348 * |
349 * --- malloc
350 * cmd_record
351 * run_command
352 * main
353 *
354 * 10.00% perf perf [.] cmd_record
355 * |
356 * --- cmd_record
357 * run_command
358 * main
359 *
360 */
361 struct result expected[] = {
362 { 0, 2000, "perf", "perf", "main" },
363 { 0, 1000, "bash", "[kernel]", "page_fault" },
364 { 0, 1000, "bash", "bash", "main" },
365 { 0, 1000, "bash", "bash", "xmalloc" },
366 { 0, 1000, "perf", "[kernel]", "page_fault" },
367 { 0, 1000, "perf", "[kernel]", "schedule" },
368 { 0, 1000, "perf", "libc", "free" },
369 { 0, 1000, "perf", "libc", "malloc" },
370 { 0, 1000, "perf", "perf", "cmd_record" },
371 };
372 struct callchain_result expected_callchain[] = {
373 {
374 1, { { "perf", "main" }, },
375 },
376 {
377 3, { { "[kernel]", "page_fault" },
378 { "libc", "malloc" },
379 { "bash", "main" }, },
380 },
381 {
382 1, { { "bash", "main" }, },
383 },
384 {
385 6, { { "bash", "xmalloc" },
386 { "libc", "malloc" },
387 { "bash", "xmalloc" },
388 { "libc", "malloc" },
389 { "bash", "xmalloc" },
390 { "bash", "main" }, },
391 },
392 {
393 4, { { "[kernel]", "page_fault" },
394 { "[kernel]", "sys_perf_event_open" },
395 { "perf", "run_command" },
396 { "perf", "main" }, },
397 },
398 {
399 3, { { "[kernel]", "schedule" },
400 { "perf", "run_command" },
401 { "perf", "main" }, },
402 },
403 {
404 4, { { "libc", "free" },
405 { "perf", "cmd_record" },
406 { "perf", "run_command" },
407 { "perf", "main" }, },
408 },
409 {
410 4, { { "libc", "malloc" },
411 { "perf", "cmd_record" },
412 { "perf", "run_command" },
413 { "perf", "main" }, },
414 },
415 {
416 3, { { "perf", "cmd_record" },
417 { "perf", "run_command" },
418 { "perf", "main" }, },
419 },
420 };
421
422 symbol_conf.use_callchain = true;
423 symbol_conf.cumulate_callchain = false;
424 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
425
426 setup_sorting(NULL);
427 callchain_register_param(&callchain_param);
428
429 err = add_hist_entries(hists, machine);
430 if (err < 0)
431 goto out;
432
433 err = do_test(hists, expected, ARRAY_SIZE(expected),
434 expected_callchain, ARRAY_SIZE(expected_callchain));
435
436out:
437 del_hist_entries(hists);
438 reset_output_field();
439 return err;
440}
441
442/* NO callchain + children */
443static int test3(struct perf_evsel *evsel, struct machine *machine)
444{
445 int err;
446 struct hists *hists = evsel__hists(evsel);
447 /*
448 * expected output:
449 *
450 * Children Self Command Shared Object Symbol
451 * ======== ======== ======= ============= =======================
452 * 70.00% 20.00% perf perf [.] main
453 * 50.00% 0.00% perf perf [.] run_command
454 * 30.00% 10.00% bash bash [.] main
455 * 30.00% 10.00% perf perf [.] cmd_record
456 * 20.00% 0.00% bash libc [.] malloc
457 * 10.00% 10.00% bash [kernel] [k] page_fault
458 * 10.00% 10.00% bash bash [.] xmalloc
459 * 10.00% 10.00% perf [kernel] [k] page_fault
460 * 10.00% 10.00% perf libc [.] malloc
461 * 10.00% 10.00% perf [kernel] [k] schedule
462 * 10.00% 10.00% perf libc [.] free
463 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
464 */
465 struct result expected[] = {
466 { 7000, 2000, "perf", "perf", "main" },
467 { 5000, 0, "perf", "perf", "run_command" },
468 { 3000, 1000, "bash", "bash", "main" },
469 { 3000, 1000, "perf", "perf", "cmd_record" },
470 { 2000, 0, "bash", "libc", "malloc" },
471 { 1000, 1000, "bash", "[kernel]", "page_fault" },
472 { 1000, 1000, "bash", "bash", "xmalloc" },
473 { 1000, 1000, "perf", "[kernel]", "page_fault" },
474 { 1000, 1000, "perf", "[kernel]", "schedule" },
475 { 1000, 1000, "perf", "libc", "free" },
476 { 1000, 1000, "perf", "libc", "malloc" },
477 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
478 };
479
480 symbol_conf.use_callchain = false;
481 symbol_conf.cumulate_callchain = true;
482 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
483
484 setup_sorting(NULL);
485 callchain_register_param(&callchain_param);
486
487 err = add_hist_entries(hists, machine);
488 if (err < 0)
489 goto out;
490
491 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
492
493out:
494 del_hist_entries(hists);
495 reset_output_field();
496 return err;
497}
498
499/* callchain + children */
500static int test4(struct perf_evsel *evsel, struct machine *machine)
501{
502 int err;
503 struct hists *hists = evsel__hists(evsel);
504 /*
505 * expected output:
506 *
507 * Children Self Command Shared Object Symbol
508 * ======== ======== ======= ============= =======================
509 * 70.00% 20.00% perf perf [.] main
510 * |
511 * --- main
512 *
513 * 50.00% 0.00% perf perf [.] run_command
514 * |
515 * --- run_command
516 * main
517 *
518 * 30.00% 10.00% bash bash [.] main
519 * |
520 * --- main
521 *
522 * 30.00% 10.00% perf perf [.] cmd_record
523 * |
524 * --- cmd_record
525 * run_command
526 * main
527 *
528 * 20.00% 0.00% bash libc [.] malloc
529 * |
530 * --- malloc
531 * |
532 * |--50.00%-- xmalloc
533 * | main
534 * --50.00%-- main
535 *
536 * 10.00% 10.00% bash [kernel] [k] page_fault
537 * |
538 * --- page_fault
539 * malloc
540 * main
541 *
542 * 10.00% 10.00% bash bash [.] xmalloc
543 * |
544 * --- xmalloc
545 * malloc
546 * xmalloc <--- NOTE: there's a cycle
547 * malloc
548 * xmalloc
549 * main
550 *
551 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
552 * |
553 * --- sys_perf_event_open
554 * run_command
555 * main
556 *
557 * 10.00% 10.00% perf [kernel] [k] page_fault
558 * |
559 * --- page_fault
560 * sys_perf_event_open
561 * run_command
562 * main
563 *
564 * 10.00% 10.00% perf [kernel] [k] schedule
565 * |
566 * --- schedule
567 * run_command
568 * main
569 *
570 * 10.00% 10.00% perf libc [.] free
571 * |
572 * --- free
573 * cmd_record
574 * run_command
575 * main
576 *
577 * 10.00% 10.00% perf libc [.] malloc
578 * |
579 * --- malloc
580 * cmd_record
581 * run_command
582 * main
583 *
584 */
585 struct result expected[] = {
586 { 7000, 2000, "perf", "perf", "main" },
587 { 5000, 0, "perf", "perf", "run_command" },
588 { 3000, 1000, "bash", "bash", "main" },
589 { 3000, 1000, "perf", "perf", "cmd_record" },
590 { 2000, 0, "bash", "libc", "malloc" },
591 { 1000, 1000, "bash", "[kernel]", "page_fault" },
592 { 1000, 1000, "bash", "bash", "xmalloc" },
593 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
594 { 1000, 1000, "perf", "[kernel]", "page_fault" },
595 { 1000, 1000, "perf", "[kernel]", "schedule" },
596 { 1000, 1000, "perf", "libc", "free" },
597 { 1000, 1000, "perf", "libc", "malloc" },
598 };
599 struct callchain_result expected_callchain[] = {
600 {
601 1, { { "perf", "main" }, },
602 },
603 {
604 2, { { "perf", "run_command" },
605 { "perf", "main" }, },
606 },
607 {
608 1, { { "bash", "main" }, },
609 },
610 {
611 3, { { "perf", "cmd_record" },
612 { "perf", "run_command" },
613 { "perf", "main" }, },
614 },
615 {
616 4, { { "libc", "malloc" },
617 { "bash", "xmalloc" },
618 { "bash", "main" },
619 { "bash", "main" }, },
620 },
621 {
622 3, { { "[kernel]", "page_fault" },
623 { "libc", "malloc" },
624 { "bash", "main" }, },
625 },
626 {
627 6, { { "bash", "xmalloc" },
628 { "libc", "malloc" },
629 { "bash", "xmalloc" },
630 { "libc", "malloc" },
631 { "bash", "xmalloc" },
632 { "bash", "main" }, },
633 },
634 {
635 3, { { "[kernel]", "sys_perf_event_open" },
636 { "perf", "run_command" },
637 { "perf", "main" }, },
638 },
639 {
640 4, { { "[kernel]", "page_fault" },
641 { "[kernel]", "sys_perf_event_open" },
642 { "perf", "run_command" },
643 { "perf", "main" }, },
644 },
645 {
646 3, { { "[kernel]", "schedule" },
647 { "perf", "run_command" },
648 { "perf", "main" }, },
649 },
650 {
651 4, { { "libc", "free" },
652 { "perf", "cmd_record" },
653 { "perf", "run_command" },
654 { "perf", "main" }, },
655 },
656 {
657 4, { { "libc", "malloc" },
658 { "perf", "cmd_record" },
659 { "perf", "run_command" },
660 { "perf", "main" }, },
661 },
662 };
663
664 symbol_conf.use_callchain = true;
665 symbol_conf.cumulate_callchain = true;
666 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
667
668 setup_sorting(NULL);
669 callchain_register_param(&callchain_param);
670
671 err = add_hist_entries(hists, machine);
672 if (err < 0)
673 goto out;
674
675 err = do_test(hists, expected, ARRAY_SIZE(expected),
676 expected_callchain, ARRAY_SIZE(expected_callchain));
677
678out:
679 del_hist_entries(hists);
680 reset_output_field();
681 return err;
682}
683
684int test__hists_cumulate(int subtest __maybe_unused)
685{
686 int err = TEST_FAIL;
687 struct machines machines;
688 struct machine *machine;
689 struct perf_evsel *evsel;
690 struct perf_evlist *evlist = perf_evlist__new();
691 size_t i;
692 test_fn_t testcases[] = {
693 test1,
694 test2,
695 test3,
696 test4,
697 };
698
699 TEST_ASSERT_VAL("No memory", evlist);
700
701 err = parse_events(evlist, "cpu-clock", NULL);
702 if (err)
703 goto out;
704 err = TEST_FAIL;
705
706 machines__init(&machines);
707
708 /* setup threads/dso/map/symbols also */
709 machine = setup_fake_machine(&machines);
710 if (!machine)
711 goto out;
712
713 if (verbose > 1)
714 machine__fprintf(machine, stderr);
715
716 evsel = perf_evlist__first(evlist);
717
718 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
719 err = testcases[i](evsel, machine);
720 if (err < 0)
721 break;
722 }
723
724out:
725 /* tear down everything */
726 perf_evlist__delete(evlist);
727 machines__exit(&machines);
728
729 return err;
730}