Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include <pthread.h> #include <sched.h> #include <sys/socket.h> #include <test_progs.h> #include "bpf/libbpf_internal.h" #include "test_perf_branches.skel.h" static void check_good_sample(struct test_perf_branches *skel) { int written_global = skel->bss->written_global_out; int required_size = skel->bss->required_size_out; int written_stack = skel->bss->written_stack_out; int pbe_size = sizeof(struct perf_branch_entry); int duration = 0; if (CHECK(!skel->bss->valid, "output not valid", "no valid sample from prog")) return; /* * It's hard to validate the contents of the branch entries b/c it * would require some kind of disassembler and also encoding the * valid jump instructions for supported architectures. So just check * the easy stuff for now. */ CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size); CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack); CHECK(written_stack % pbe_size != 0, "read_branches_stack", "stack bytes written=%d not multiple of struct size=%d\n", written_stack, pbe_size); CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global); CHECK(written_global % pbe_size != 0, "read_branches_global", "global bytes written=%d not multiple of struct size=%d\n", written_global, pbe_size); CHECK(written_global < written_stack, "read_branches_size", "written_global=%d < written_stack=%d\n", written_global, written_stack); } static void check_bad_sample(struct test_perf_branches *skel) { int written_global = skel->bss->written_global_out; int required_size = skel->bss->required_size_out; int written_stack = skel->bss->written_stack_out; int duration = 0; if (CHECK(!skel->bss->valid, "output not valid", "no valid sample from prog")) return; CHECK((required_size != -EINVAL && required_size != -ENOENT), "read_branches_size", "err %d\n", required_size); CHECK((written_stack != -EINVAL && written_stack != -ENOENT), "read_branches_stack", "written %d\n", written_stack); CHECK((written_global != -EINVAL && written_global != -ENOENT), "read_branches_global", "written %d\n", written_global); } static void test_perf_branches_common(int perf_fd, void (*cb)(struct test_perf_branches *)) { struct test_perf_branches *skel; int err, i, duration = 0; bool detached = false; struct bpf_link *link; volatile int j = 0; cpu_set_t cpu_set; skel = test_perf_branches__open_and_load(); if (CHECK(!skel, "test_perf_branches_load", "perf_branches skeleton failed\n")) return; /* attach perf_event */ link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd); if (!ASSERT_OK_PTR(link, "attach_perf_event")) goto out_destroy_skel; /* generate some branches on cpu 0 */ CPU_ZERO(&cpu_set); CPU_SET(0, &cpu_set); err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err)) goto out_destroy; /* spin the loop for a while (random high number) */ for (i = 0; i < 1000000; ++i) ++j; test_perf_branches__detach(skel); detached = true; cb(skel); out_destroy: bpf_link__destroy(link); out_destroy_skel: if (!detached) test_perf_branches__detach(skel); test_perf_branches__destroy(skel); } static void test_perf_branches_hw(void) { struct perf_event_attr attr = {0}; int duration = 0; int pfd; /* create perf event */ attr.size = sizeof(attr); attr.type = PERF_TYPE_HARDWARE; attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.freq = 1; attr.sample_freq = 1000; attr.sample_type = PERF_SAMPLE_BRANCH_STACK; attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); /* * Some setups don't support branch records (virtual machines, !x86), * so skip test in this case. */ if (pfd < 0) { if (errno == ENOENT || errno == EOPNOTSUPP) { printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n", __func__); test__skip(); return; } if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n", pfd, errno)) return; } test_perf_branches_common(pfd, check_good_sample); close(pfd); } /* * Tests negative case -- run bpf_read_branch_records() on improperly configured * perf event. */ static void test_perf_branches_no_hw(void) { struct perf_event_attr attr = {0}; int duration = 0; int pfd; /* create perf event */ attr.size = sizeof(attr); attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd)) return; test_perf_branches_common(pfd, check_bad_sample); close(pfd); } void test_perf_branches(void) { if (test__start_subtest("perf_branches_hw")) test_perf_branches_hw(); if (test__start_subtest("perf_branches_no_hw")) test_perf_branches_no_hw(); } |