Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3#include "test_stacktrace_build_id.skel.h"
 
 
 
 
 
 
 
 
 
 
 
 
  4
  5void test_stacktrace_build_id_nmi(void)
  6{
  7	int control_map_fd, stackid_hmap_fd, stackmap_fd;
  8	struct test_stacktrace_build_id *skel;
  9	int err, pmu_fd;
 
 10	struct perf_event_attr attr = {
 11		.freq = 1,
 12		.type = PERF_TYPE_HARDWARE,
 13		.config = PERF_COUNT_HW_CPU_CYCLES,
 14	};
 15	__u32 key, prev_key, val, duration = 0;
 16	char buf[BPF_BUILD_ID_SIZE];
 
 
 
 
 17	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
 18	int build_id_matches = 0, build_id_size;
 19	int i, retry = 1;
 20
 21	attr.sample_freq = read_perf_max_sample_freq();
 22
 23retry:
 24	skel = test_stacktrace_build_id__open();
 25	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
 26		return;
 27
 28	/* override program type */
 29	bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
 30
 31	err = test_stacktrace_build_id__load(skel);
 32	if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
 33		goto cleanup;
 34
 35	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 36			 0 /* cpu 0 */, -1 /* group id */,
 37			 0 /* flags */);
 38	if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) {
 39		printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
 40		test__skip();
 41		goto cleanup;
 42	}
 43	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
 44		  pmu_fd, errno))
 45		goto cleanup;
 46
 47	skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
 48							   pmu_fd);
 49	if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
 50		close(pmu_fd);
 51		goto cleanup;
 52	}
 53
 54	/* find map fds */
 55	control_map_fd = bpf_map__fd(skel->maps.control_map);
 56	stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
 57	stackmap_fd = bpf_map__fd(skel->maps.stackmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59	if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
 60		goto cleanup;
 61	if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
 62		goto cleanup;
 63	/* disable stack trace collection */
 64	key = 0;
 65	val = 1;
 66	bpf_map_update_elem(control_map_fd, &key, &val, 0);
 67
 68	/* for every element in stackid_hmap, we can find a corresponding one
 69	 * in stackmap, and vise versa.
 70	 */
 71	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
 72	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
 73		  "err %d errno %d\n", err, errno))
 74		goto cleanup;
 75
 76	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
 77	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
 78		  "err %d errno %d\n", err, errno))
 79		goto cleanup;
 80
 81	build_id_size = read_build_id("urandom_read", buf, sizeof(buf));
 82	err = build_id_size < 0 ? build_id_size : 0;
 83
 84	if (CHECK(err, "get build_id with readelf",
 85		  "err %d errno %d\n", err, errno))
 86		goto cleanup;
 87
 88	err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
 89	if (CHECK(err, "get_next_key from stackmap",
 90		  "err %d, errno %d\n", err, errno))
 91		goto cleanup;
 92
 93	do {
 94		err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
 95					   id_offs, sizeof(id_offs), 0);
 
 96		if (CHECK(err, "lookup_elem from stackmap",
 97			  "err %d, errno %d\n", err, errno))
 98			goto cleanup;
 99		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
100			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
101			    id_offs[i].offset != 0) {
102				if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0)
 
 
 
103					build_id_matches = 1;
104			}
105		prev_key = key;
106	} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
107
108	/* stack_map_get_build_id_offset() is racy and sometimes can return
109	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
110	 * try it one more time.
111	 */
112	if (build_id_matches < 1 && retry--) {
113		test_stacktrace_build_id__destroy(skel);
 
114		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
115		       __func__);
116		goto retry;
117	}
118
119	if (CHECK(build_id_matches < 1, "build id match",
120		  "Didn't find expected build ID from the map\n"))
121		goto cleanup;
122
123	/*
124	 * We intentionally skip compare_stack_ips(). This is because we
125	 * only support one in_nmi() ips-to-build_id translation per cpu
126	 * at any time, thus stack_amap here will always fallback to
127	 * BPF_STACK_BUILD_ID_IP;
128	 */
129
130cleanup:
131	test_stacktrace_build_id__destroy(skel);
 
 
132}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3
  4static __u64 read_perf_max_sample_freq(void)
  5{
  6	__u64 sample_freq = 5000; /* fallback to 5000 on error */
  7	FILE *f;
  8
  9	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
 10	if (f == NULL)
 11		return sample_freq;
 12	fscanf(f, "%llu", &sample_freq);
 13	fclose(f);
 14	return sample_freq;
 15}
 16
 17void test_stacktrace_build_id_nmi(void)
 18{
 19	int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
 20	const char *prog_name = "tracepoint/random/urandom_read";
 21	const char *file = "./test_stacktrace_build_id.o";
 22	int err, pmu_fd, prog_fd;
 23	struct perf_event_attr attr = {
 24		.freq = 1,
 25		.type = PERF_TYPE_HARDWARE,
 26		.config = PERF_COUNT_HW_CPU_CYCLES,
 27	};
 28	__u32 key, previous_key, val, duration = 0;
 29	struct bpf_program *prog;
 30	struct bpf_object *obj;
 31	struct bpf_link *link;
 32	char buf[256];
 33	int i, j;
 34	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
 35	int build_id_matches = 0;
 36	int retry = 1;
 37
 38	attr.sample_freq = read_perf_max_sample_freq();
 39
 40retry:
 41	err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
 42	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 43		return;
 44
 45	prog = bpf_object__find_program_by_title(obj, prog_name);
 46	if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
 47		goto close_prog;
 
 
 
 48
 49	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 50			 0 /* cpu 0 */, -1 /* group id */,
 51			 0 /* flags */);
 52	if (CHECK(pmu_fd < 0, "perf_event_open",
 53		  "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
 
 
 
 
 54		  pmu_fd, errno))
 55		goto close_prog;
 56
 57	link = bpf_program__attach_perf_event(prog, pmu_fd);
 58	if (CHECK(IS_ERR(link), "attach_perf_event",
 59		  "err %ld\n", PTR_ERR(link))) {
 60		close(pmu_fd);
 61		goto close_prog;
 62	}
 63
 64	/* find map fds */
 65	control_map_fd = bpf_find_map(__func__, obj, "control_map");
 66	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
 67		  "err %d errno %d\n", err, errno))
 68		goto disable_pmu;
 69
 70	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
 71	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
 72		  "err %d errno %d\n", err, errno))
 73		goto disable_pmu;
 74
 75	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
 76	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
 77		  err, errno))
 78		goto disable_pmu;
 79
 80	stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
 81	if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
 82		  "err %d errno %d\n", err, errno))
 83		goto disable_pmu;
 84
 85	if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
 86		goto disable_pmu;
 87	if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
 88		goto disable_pmu;
 89	/* disable stack trace collection */
 90	key = 0;
 91	val = 1;
 92	bpf_map_update_elem(control_map_fd, &key, &val, 0);
 93
 94	/* for every element in stackid_hmap, we can find a corresponding one
 95	 * in stackmap, and vise versa.
 96	 */
 97	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
 98	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
 99		  "err %d errno %d\n", err, errno))
100		goto disable_pmu;
101
102	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
103	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
104		  "err %d errno %d\n", err, errno))
105		goto disable_pmu;
106
107	err = extract_build_id(buf, 256);
 
108
109	if (CHECK(err, "get build_id with readelf",
110		  "err %d errno %d\n", err, errno))
111		goto disable_pmu;
112
113	err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
114	if (CHECK(err, "get_next_key from stackmap",
115		  "err %d, errno %d\n", err, errno))
116		goto disable_pmu;
117
118	do {
119		char build_id[64];
120
121		err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
122		if (CHECK(err, "lookup_elem from stackmap",
123			  "err %d, errno %d\n", err, errno))
124			goto disable_pmu;
125		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
126			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
127			    id_offs[i].offset != 0) {
128				for (j = 0; j < 20; ++j)
129					sprintf(build_id + 2 * j, "%02x",
130						id_offs[i].build_id[j] & 0xff);
131				if (strstr(buf, build_id) != NULL)
132					build_id_matches = 1;
133			}
134		previous_key = key;
135	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
136
137	/* stack_map_get_build_id_offset() is racy and sometimes can return
138	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
139	 * try it one more time.
140	 */
141	if (build_id_matches < 1 && retry--) {
142		bpf_link__destroy(link);
143		bpf_object__close(obj);
144		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
145		       __func__);
146		goto retry;
147	}
148
149	if (CHECK(build_id_matches < 1, "build id match",
150		  "Didn't find expected build ID from the map\n"))
151		goto disable_pmu;
152
153	/*
154	 * We intentionally skip compare_stack_ips(). This is because we
155	 * only support one in_nmi() ips-to-build_id translation per cpu
156	 * at any time, thus stack_amap here will always fallback to
157	 * BPF_STACK_BUILD_ID_IP;
158	 */
159
160disable_pmu:
161	bpf_link__destroy(link);
162close_prog:
163	bpf_object__close(obj);
164}