Loading...
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3
4#include <assert.h>
5#include <fcntl.h>
6#include <linux/perf_event.h>
7#include <linux/bpf.h>
8#include <sched.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <sys/ioctl.h>
12#include <sys/resource.h>
13#include <sys/time.h>
14#include <sys/types.h>
15#include <sys/wait.h>
16#include <unistd.h>
17
18#include "bpf_load.h"
19#include <bpf/bpf.h>
20#include "perf-sys.h"
21
22#define SAMPLE_PERIOD 0x7fffffffffffffffULL
23
24static void check_on_cpu(int cpu, struct perf_event_attr *attr)
25{
26 struct bpf_perf_event_value value2;
27 int pmu_fd, error = 0;
28 cpu_set_t set;
29 __u64 value;
30
31 /* Move to target CPU */
32 CPU_ZERO(&set);
33 CPU_SET(cpu, &set);
34 assert(sched_setaffinity(0, sizeof(set), &set) == 0);
35 /* Open perf event and attach to the perf_event_array */
36 pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
37 if (pmu_fd < 0) {
38 fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
39 error = 1;
40 goto on_exit;
41 }
42 assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
43 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
44 /* Trigger the kprobe */
45 bpf_map_get_next_key(map_fd[1], &cpu, NULL);
46 /* Check the value */
47 if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
48 fprintf(stderr, "Value missing for CPU %d\n", cpu);
49 error = 1;
50 goto on_exit;
51 } else {
52 fprintf(stderr, "CPU %d: %llu\n", cpu, value);
53 }
54 /* The above bpf_map_lookup_elem should trigger the second kprobe */
55 if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
56 fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
57 error = 1;
58 goto on_exit;
59 } else {
60 fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
61 value2.counter, value2.enabled, value2.running);
62 }
63
64on_exit:
65 assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
66 assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
67 assert(close(pmu_fd) == 0 || error);
68 assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
69 exit(error);
70}
71
72static void test_perf_event_array(struct perf_event_attr *attr,
73 const char *name)
74{
75 int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
76 pid_t pid[nr_cpus];
77 int err = 0;
78
79 printf("Test reading %s counters\n", name);
80
81 for (i = 0; i < nr_cpus; i++) {
82 pid[i] = fork();
83 assert(pid[i] >= 0);
84 if (pid[i] == 0) {
85 check_on_cpu(i, attr);
86 exit(1);
87 }
88 }
89
90 for (i = 0; i < nr_cpus; i++) {
91 assert(waitpid(pid[i], &status, 0) == pid[i]);
92 err |= status;
93 }
94
95 if (err)
96 printf("Test: %s FAILED\n", name);
97}
98
99static void test_bpf_perf_event(void)
100{
101 struct perf_event_attr attr_cycles = {
102 .freq = 0,
103 .sample_period = SAMPLE_PERIOD,
104 .inherit = 0,
105 .type = PERF_TYPE_HARDWARE,
106 .read_format = 0,
107 .sample_type = 0,
108 .config = PERF_COUNT_HW_CPU_CYCLES,
109 };
110 struct perf_event_attr attr_clock = {
111 .freq = 0,
112 .sample_period = SAMPLE_PERIOD,
113 .inherit = 0,
114 .type = PERF_TYPE_SOFTWARE,
115 .read_format = 0,
116 .sample_type = 0,
117 .config = PERF_COUNT_SW_CPU_CLOCK,
118 };
119 struct perf_event_attr attr_raw = {
120 .freq = 0,
121 .sample_period = SAMPLE_PERIOD,
122 .inherit = 0,
123 .type = PERF_TYPE_RAW,
124 .read_format = 0,
125 .sample_type = 0,
126 /* Intel Instruction Retired */
127 .config = 0xc0,
128 };
129 struct perf_event_attr attr_l1d_load = {
130 .freq = 0,
131 .sample_period = SAMPLE_PERIOD,
132 .inherit = 0,
133 .type = PERF_TYPE_HW_CACHE,
134 .read_format = 0,
135 .sample_type = 0,
136 .config =
137 PERF_COUNT_HW_CACHE_L1D |
138 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
139 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
140 };
141 struct perf_event_attr attr_llc_miss = {
142 .freq = 0,
143 .sample_period = SAMPLE_PERIOD,
144 .inherit = 0,
145 .type = PERF_TYPE_HW_CACHE,
146 .read_format = 0,
147 .sample_type = 0,
148 .config =
149 PERF_COUNT_HW_CACHE_LL |
150 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
151 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
152 };
153 struct perf_event_attr attr_msr_tsc = {
154 .freq = 0,
155 .sample_period = 0,
156 .inherit = 0,
157 /* From /sys/bus/event_source/devices/msr/ */
158 .type = 7,
159 .read_format = 0,
160 .sample_type = 0,
161 .config = 0,
162 };
163
164 test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
165 test_perf_event_array(&attr_clock, "SOFTWARE-clock");
166 test_perf_event_array(&attr_raw, "RAW-instruction-retired");
167 test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
168
169 /* below tests may fail in qemu */
170 test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
171 test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
172}
173
174int main(int argc, char **argv)
175{
176 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
177 char filename[256];
178
179 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
180
181 setrlimit(RLIMIT_MEMLOCK, &r);
182 if (load_bpf_file(filename)) {
183 printf("%s", bpf_log_buf);
184 return 1;
185 }
186
187 test_bpf_perf_event();
188 return 0;
189}
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3
4#include <assert.h>
5#include <fcntl.h>
6#include <linux/perf_event.h>
7#include <sched.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <sys/ioctl.h>
11#include <sys/resource.h>
12#include <sys/time.h>
13#include <sys/types.h>
14#include <sys/wait.h>
15#include <unistd.h>
16
17#include <bpf/bpf.h>
18#include <bpf/libbpf.h>
19#include "perf-sys.h"
20
21#define SAMPLE_PERIOD 0x7fffffffffffffffULL
22
23/* counters, values, values2 */
24static int map_fd[3];
25
26static void check_on_cpu(int cpu, struct perf_event_attr *attr)
27{
28 struct bpf_perf_event_value value2;
29 int pmu_fd, error = 0;
30 cpu_set_t set;
31 __u64 value;
32
33 /* Move to target CPU */
34 CPU_ZERO(&set);
35 CPU_SET(cpu, &set);
36 assert(sched_setaffinity(0, sizeof(set), &set) == 0);
37 /* Open perf event and attach to the perf_event_array */
38 pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0);
39 if (pmu_fd < 0) {
40 fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu);
41 error = 1;
42 goto on_exit;
43 }
44 assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0);
45 assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0);
46 /* Trigger the kprobe */
47 bpf_map_get_next_key(map_fd[1], &cpu, NULL);
48 /* Check the value */
49 if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) {
50 fprintf(stderr, "Value missing for CPU %d\n", cpu);
51 error = 1;
52 goto on_exit;
53 } else {
54 fprintf(stderr, "CPU %d: %llu\n", cpu, value);
55 }
56 /* The above bpf_map_lookup_elem should trigger the second kprobe */
57 if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) {
58 fprintf(stderr, "Value2 missing for CPU %d\n", cpu);
59 error = 1;
60 goto on_exit;
61 } else {
62 fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu,
63 value2.counter, value2.enabled, value2.running);
64 }
65
66on_exit:
67 assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error);
68 assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error);
69 assert(close(pmu_fd) == 0 || error);
70 assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error);
71 exit(error);
72}
73
74static void test_perf_event_array(struct perf_event_attr *attr,
75 const char *name)
76{
77 int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
78 pid_t pid[nr_cpus];
79 int err = 0;
80
81 printf("Test reading %s counters\n", name);
82
83 for (i = 0; i < nr_cpus; i++) {
84 pid[i] = fork();
85 assert(pid[i] >= 0);
86 if (pid[i] == 0) {
87 check_on_cpu(i, attr);
88 exit(1);
89 }
90 }
91
92 for (i = 0; i < nr_cpus; i++) {
93 assert(waitpid(pid[i], &status, 0) == pid[i]);
94 err |= status;
95 }
96
97 if (err)
98 printf("Test: %s FAILED\n", name);
99}
100
101static void test_bpf_perf_event(void)
102{
103 struct perf_event_attr attr_cycles = {
104 .freq = 0,
105 .sample_period = SAMPLE_PERIOD,
106 .inherit = 0,
107 .type = PERF_TYPE_HARDWARE,
108 .read_format = 0,
109 .sample_type = 0,
110 .config = PERF_COUNT_HW_CPU_CYCLES,
111 };
112 struct perf_event_attr attr_clock = {
113 .freq = 0,
114 .sample_period = SAMPLE_PERIOD,
115 .inherit = 0,
116 .type = PERF_TYPE_SOFTWARE,
117 .read_format = 0,
118 .sample_type = 0,
119 .config = PERF_COUNT_SW_CPU_CLOCK,
120 };
121 struct perf_event_attr attr_raw = {
122 .freq = 0,
123 .sample_period = SAMPLE_PERIOD,
124 .inherit = 0,
125 .type = PERF_TYPE_RAW,
126 .read_format = 0,
127 .sample_type = 0,
128 /* Intel Instruction Retired */
129 .config = 0xc0,
130 };
131 struct perf_event_attr attr_l1d_load = {
132 .freq = 0,
133 .sample_period = SAMPLE_PERIOD,
134 .inherit = 0,
135 .type = PERF_TYPE_HW_CACHE,
136 .read_format = 0,
137 .sample_type = 0,
138 .config =
139 PERF_COUNT_HW_CACHE_L1D |
140 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
141 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
142 };
143 struct perf_event_attr attr_llc_miss = {
144 .freq = 0,
145 .sample_period = SAMPLE_PERIOD,
146 .inherit = 0,
147 .type = PERF_TYPE_HW_CACHE,
148 .read_format = 0,
149 .sample_type = 0,
150 .config =
151 PERF_COUNT_HW_CACHE_LL |
152 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
153 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
154 };
155 struct perf_event_attr attr_msr_tsc = {
156 .freq = 0,
157 .sample_period = 0,
158 .inherit = 0,
159 /* From /sys/bus/event_source/devices/msr/ */
160 .type = 7,
161 .read_format = 0,
162 .sample_type = 0,
163 .config = 0,
164 };
165
166 test_perf_event_array(&attr_cycles, "HARDWARE-cycles");
167 test_perf_event_array(&attr_clock, "SOFTWARE-clock");
168 test_perf_event_array(&attr_raw, "RAW-instruction-retired");
169 test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load");
170
171 /* below tests may fail in qemu */
172 test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss");
173 test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc");
174}
175
176int main(int argc, char **argv)
177{
178 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
179 struct bpf_link *links[2];
180 struct bpf_program *prog;
181 struct bpf_object *obj;
182 char filename[256];
183 int i = 0;
184
185 setrlimit(RLIMIT_MEMLOCK, &r);
186
187 snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
188 obj = bpf_object__open_file(filename, NULL);
189 if (libbpf_get_error(obj)) {
190 fprintf(stderr, "ERROR: opening BPF object file failed\n");
191 return 0;
192 }
193
194 /* load BPF program */
195 if (bpf_object__load(obj)) {
196 fprintf(stderr, "ERROR: loading BPF object file failed\n");
197 goto cleanup;
198 }
199
200 map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counters");
201 map_fd[1] = bpf_object__find_map_fd_by_name(obj, "values");
202 map_fd[2] = bpf_object__find_map_fd_by_name(obj, "values2");
203 if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
204 fprintf(stderr, "ERROR: finding a map in obj file failed\n");
205 goto cleanup;
206 }
207
208 bpf_object__for_each_program(prog, obj) {
209 links[i] = bpf_program__attach(prog);
210 if (libbpf_get_error(links[i])) {
211 fprintf(stderr, "ERROR: bpf_program__attach failed\n");
212 links[i] = NULL;
213 goto cleanup;
214 }
215 i++;
216 }
217
218 test_bpf_perf_event();
219
220cleanup:
221 for (i--; i >= 0; i--)
222 bpf_link__destroy(links[i]);
223
224 bpf_object__close(obj);
225 return 0;
226}