Loading...
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3#include <pthread.h>
4#include <sched.h>
5#include <sys/socket.h>
6#include <test_progs.h>
7#include "test_perf_buffer.skel.h"
8#include "bpf/libbpf_internal.h"
9
10static int duration;
11
12/* AddressSanitizer sometimes crashes due to data dereference below, due to
13 * this being mmap()'ed memory. Disable instrumentation with
14 * no_sanitize_address attribute
15 */
16__attribute__((no_sanitize_address))
17static void on_sample(void *ctx, int cpu, void *data, __u32 size)
18{
19 int cpu_data = *(int *)data, duration = 0;
20 cpu_set_t *cpu_seen = ctx;
21
22 if (cpu_data != cpu)
23 CHECK(cpu_data != cpu, "check_cpu_data",
24 "cpu_data %d != cpu %d\n", cpu_data, cpu);
25
26 CPU_SET(cpu, cpu_seen);
27}
28
29int trigger_on_cpu(int cpu)
30{
31 cpu_set_t cpu_set;
32 int err;
33
34 CPU_ZERO(&cpu_set);
35 CPU_SET(cpu, &cpu_set);
36
37 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
38 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
39 return err;
40
41 usleep(1);
42
43 return 0;
44}
45
46void serial_test_perf_buffer(void)
47{
48 int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
49 int zero = 0, my_pid = getpid();
50 struct test_perf_buffer *skel;
51 cpu_set_t cpu_seen;
52 struct perf_buffer *pb;
53 int last_fd = -1, fd;
54 bool *online;
55
56 nr_cpus = libbpf_num_possible_cpus();
57 if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
58 return;
59
60 err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
61 &online, &on_len);
62 if (CHECK(err, "nr_on_cpus", "err %d\n", err))
63 return;
64
65 for (i = 0; i < on_len; i++)
66 if (online[i])
67 nr_on_cpus++;
68
69 /* load program */
70 skel = test_perf_buffer__open_and_load();
71 if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
72 goto out_close;
73
74 err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0);
75 if (!ASSERT_OK(err, "my_pid_update"))
76 goto out_close;
77
78 /* attach probe */
79 err = test_perf_buffer__attach(skel);
80 if (CHECK(err, "attach_kprobe", "err %d\n", err))
81 goto out_close;
82
83 /* set up perf buffer */
84 pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
85 on_sample, NULL, &cpu_seen, NULL);
86 if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
87 goto out_close;
88
89 CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
90 "bad fd: %d\n", perf_buffer__epoll_fd(pb));
91
92 /* trigger kprobe on every CPU */
93 CPU_ZERO(&cpu_seen);
94 for (i = 0; i < nr_cpus; i++) {
95 if (i >= on_len || !online[i]) {
96 printf("skipping offline CPU #%d\n", i);
97 continue;
98 }
99
100 if (trigger_on_cpu(i))
101 goto out_close;
102 }
103
104 /* read perf buffer */
105 err = perf_buffer__poll(pb, 100);
106 if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
107 goto out_free_pb;
108
109 if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
110 "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
111 goto out_free_pb;
112
113 if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt",
114 "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus))
115 goto out_close;
116
117 for (i = 0, j = 0; i < nr_cpus; i++) {
118 if (i >= on_len || !online[i])
119 continue;
120
121 fd = perf_buffer__buffer_fd(pb, j);
122 CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
123 last_fd = fd;
124
125 err = perf_buffer__consume_buffer(pb, j);
126 if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
127 goto out_close;
128
129 CPU_CLR(i, &cpu_seen);
130 if (trigger_on_cpu(i))
131 goto out_close;
132
133 err = perf_buffer__consume_buffer(pb, j);
134 if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err))
135 goto out_close;
136
137 if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
138 goto out_close;
139 j++;
140 }
141
142out_free_pb:
143 perf_buffer__free(pb);
144out_close:
145 test_perf_buffer__destroy(skel);
146 free(online);
147}
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3#include <pthread.h>
4#include <sched.h>
5#include <sys/socket.h>
6#include <test_progs.h>
7
8static void on_sample(void *ctx, int cpu, void *data, __u32 size)
9{
10 int cpu_data = *(int *)data, duration = 0;
11 cpu_set_t *cpu_seen = ctx;
12
13 if (cpu_data != cpu)
14 CHECK(cpu_data != cpu, "check_cpu_data",
15 "cpu_data %d != cpu %d\n", cpu_data, cpu);
16
17 CPU_SET(cpu, cpu_seen);
18}
19
20void test_perf_buffer(void)
21{
22 int err, prog_fd, nr_cpus, i, duration = 0;
23 const char *prog_name = "kprobe/sys_nanosleep";
24 const char *file = "./test_perf_buffer.o";
25 struct perf_buffer_opts pb_opts = {};
26 struct bpf_map *perf_buf_map;
27 cpu_set_t cpu_set, cpu_seen;
28 struct bpf_program *prog;
29 struct bpf_object *obj;
30 struct perf_buffer *pb;
31 struct bpf_link *link;
32
33 nr_cpus = libbpf_num_possible_cpus();
34 if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
35 return;
36
37 /* load program */
38 err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
39 if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno))
40 return;
41
42 prog = bpf_object__find_program_by_title(obj, prog_name);
43 if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
44 goto out_close;
45
46 /* load map */
47 perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
48 if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
49 goto out_close;
50
51 /* attach kprobe */
52 link = bpf_program__attach_kprobe(prog, false /* retprobe */,
53 SYS_NANOSLEEP_KPROBE_NAME);
54 if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
55 goto out_close;
56
57 /* set up perf buffer */
58 pb_opts.sample_cb = on_sample;
59 pb_opts.ctx = &cpu_seen;
60 pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
61 if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
62 goto out_detach;
63
64 /* trigger kprobe on every CPU */
65 CPU_ZERO(&cpu_seen);
66 for (i = 0; i < nr_cpus; i++) {
67 CPU_ZERO(&cpu_set);
68 CPU_SET(i, &cpu_set);
69
70 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
71 &cpu_set);
72 if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
73 i, err))
74 goto out_detach;
75
76 usleep(1);
77 }
78
79 /* read perf buffer */
80 err = perf_buffer__poll(pb, 100);
81 if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
82 goto out_free_pb;
83
84 if (CHECK(CPU_COUNT(&cpu_seen) != nr_cpus, "seen_cpu_cnt",
85 "expect %d, seen %d\n", nr_cpus, CPU_COUNT(&cpu_seen)))
86 goto out_free_pb;
87
88out_free_pb:
89 perf_buffer__free(pb);
90out_detach:
91 bpf_link__destroy(link);
92out_close:
93 bpf_object__close(obj);
94}