Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "test_stacktrace_build_id.skel.h"
4
5void test_stacktrace_build_id_nmi(void)
6{
7 int control_map_fd, stackid_hmap_fd, stackmap_fd;
8 struct test_stacktrace_build_id *skel;
9 int err, pmu_fd;
10 struct perf_event_attr attr = {
11 .freq = 1,
12 .type = PERF_TYPE_HARDWARE,
13 .config = PERF_COUNT_HW_CPU_CYCLES,
14 };
15 __u32 key, prev_key, val, duration = 0;
16 char buf[BPF_BUILD_ID_SIZE];
17 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
18 int build_id_matches = 0, build_id_size;
19 int i, retry = 1;
20
21 attr.sample_freq = read_perf_max_sample_freq();
22
23retry:
24 skel = test_stacktrace_build_id__open();
25 if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
26 return;
27
28 /* override program type */
29 bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
30
31 err = test_stacktrace_build_id__load(skel);
32 if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
33 goto cleanup;
34
35 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
36 0 /* cpu 0 */, -1 /* group id */,
37 0 /* flags */);
38 if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
39 printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
40 test__skip();
41 goto cleanup;
42 }
43 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
44 pmu_fd, errno))
45 goto cleanup;
46
47 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
48 pmu_fd);
49 if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
50 close(pmu_fd);
51 goto cleanup;
52 }
53
54 /* find map fds */
55 control_map_fd = bpf_map__fd(skel->maps.control_map);
56 stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
57 stackmap_fd = bpf_map__fd(skel->maps.stackmap);
58
59 if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
60 goto cleanup;
61 if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
62 goto cleanup;
63 /* disable stack trace collection */
64 key = 0;
65 val = 1;
66 bpf_map_update_elem(control_map_fd, &key, &val, 0);
67
68 /* for every element in stackid_hmap, we can find a corresponding one
69 * in stackmap, and vise versa.
70 */
71 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
72 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
73 "err %d errno %d\n", err, errno))
74 goto cleanup;
75
76 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
77 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
78 "err %d errno %d\n", err, errno))
79 goto cleanup;
80
81 build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
82 err = build_id_size < 0 ? build_id_size : 0;
83
84 if (CHECK(err, "get build_id with readelf",
85 "err %d errno %d\n", err, errno))
86 goto cleanup;
87
88 err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
89 if (CHECK(err, "get_next_key from stackmap",
90 "err %d, errno %d\n", err, errno))
91 goto cleanup;
92
93 do {
94 err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
95 id_offs, sizeof(id_offs), 0);
96 if (CHECK(err, "lookup_elem from stackmap",
97 "err %d, errno %d\n", err, errno))
98 goto cleanup;
99 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
100 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
101 id_offs[i].offset != 0) {
102 if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
103 build_id_matches = 1;
104 }
105 prev_key = key;
106 } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
107
108 /* stack_map_get_build_id_offset() is racy and sometimes can return
109 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
110 * try it one more time.
111 */
112 if (build_id_matches < 1 && retry--) {
113 test_stacktrace_build_id__destroy(skel);
114 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
115 __func__);
116 goto retry;
117 }
118
119 if (CHECK(build_id_matches < 1, "build id match",
120 "Didn't find expected build ID from the map\n"))
121 goto cleanup;
122
123 /*
124 * We intentionally skip compare_stack_ips(). This is because we
125 * only support one in_nmi() ips-to-build_id translation per cpu
126 * at any time, thus stack_amap here will always fallback to
127 * BPF_STACK_BUILD_ID_IP;
128 */
129
130cleanup:
131 test_stacktrace_build_id__destroy(skel);
132}
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "test_stacktrace_build_id.skel.h"
4
5static __u64 read_perf_max_sample_freq(void)
6{
7 __u64 sample_freq = 5000; /* fallback to 5000 on error */
8 FILE *f;
9 __u32 duration = 0;
10
11 f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
12 if (f == NULL)
13 return sample_freq;
14 CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
15 "return default value: 5000,err %d\n", -errno);
16 fclose(f);
17 return sample_freq;
18}
19
20void test_stacktrace_build_id_nmi(void)
21{
22 int control_map_fd, stackid_hmap_fd, stackmap_fd;
23 struct test_stacktrace_build_id *skel;
24 int err, pmu_fd;
25 struct perf_event_attr attr = {
26 .freq = 1,
27 .type = PERF_TYPE_HARDWARE,
28 .config = PERF_COUNT_HW_CPU_CYCLES,
29 };
30 __u32 key, prev_key, val, duration = 0;
31 char buf[256];
32 int i, j;
33 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
34 int build_id_matches = 0;
35 int retry = 1;
36
37 attr.sample_freq = read_perf_max_sample_freq();
38
39retry:
40 skel = test_stacktrace_build_id__open();
41 if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
42 return;
43
44 /* override program type */
45 bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
46
47 err = test_stacktrace_build_id__load(skel);
48 if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
49 goto cleanup;
50
51 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
52 0 /* cpu 0 */, -1 /* group id */,
53 0 /* flags */);
54 if (pmu_fd < 0 && errno == ENOENT) {
55 printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
56 test__skip();
57 goto cleanup;
58 }
59 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
60 pmu_fd, errno))
61 goto cleanup;
62
63 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
64 pmu_fd);
65 if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
66 close(pmu_fd);
67 goto cleanup;
68 }
69
70 /* find map fds */
71 control_map_fd = bpf_map__fd(skel->maps.control_map);
72 stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
73 stackmap_fd = bpf_map__fd(skel->maps.stackmap);
74
75 if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
76 goto cleanup;
77 if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
78 goto cleanup;
79 /* disable stack trace collection */
80 key = 0;
81 val = 1;
82 bpf_map_update_elem(control_map_fd, &key, &val, 0);
83
84 /* for every element in stackid_hmap, we can find a corresponding one
85 * in stackmap, and vise versa.
86 */
87 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
88 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
89 "err %d errno %d\n", err, errno))
90 goto cleanup;
91
92 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
93 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
94 "err %d errno %d\n", err, errno))
95 goto cleanup;
96
97 err = extract_build_id(buf, 256);
98
99 if (CHECK(err, "get build_id with readelf",
100 "err %d errno %d\n", err, errno))
101 goto cleanup;
102
103 err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
104 if (CHECK(err, "get_next_key from stackmap",
105 "err %d, errno %d\n", err, errno))
106 goto cleanup;
107
108 do {
109 char build_id[64];
110
111 err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
112 id_offs, sizeof(id_offs), 0);
113 if (CHECK(err, "lookup_elem from stackmap",
114 "err %d, errno %d\n", err, errno))
115 goto cleanup;
116 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
117 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
118 id_offs[i].offset != 0) {
119 for (j = 0; j < 20; ++j)
120 sprintf(build_id + 2 * j, "%02x",
121 id_offs[i].build_id[j] & 0xff);
122 if (strstr(buf, build_id) != NULL)
123 build_id_matches = 1;
124 }
125 prev_key = key;
126 } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
127
128 /* stack_map_get_build_id_offset() is racy and sometimes can return
129 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
130 * try it one more time.
131 */
132 if (build_id_matches < 1 && retry--) {
133 test_stacktrace_build_id__destroy(skel);
134 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
135 __func__);
136 goto retry;
137 }
138
139 if (CHECK(build_id_matches < 1, "build id match",
140 "Didn't find expected build ID from the map\n"))
141 goto cleanup;
142
143 /*
144 * We intentionally skip compare_stack_ips(). This is because we
145 * only support one in_nmi() ips-to-build_id translation per cpu
146 * at any time, thus stack_amap here will always fallback to
147 * BPF_STACK_BUILD_ID_IP;
148 */
149
150cleanup:
151 test_stacktrace_build_id__destroy(skel);
152}