Loading...
Note: File does not exist in v5.9.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
3
4#include <string.h>
5#include <linux/bpf.h>
6#include <linux/limits.h>
7#include <test_progs.h>
8#include "trace_helpers.h"
9#include "test_fill_link_info.skel.h"
10#include "bpf/libbpf_internal.h"
11
12#define TP_CAT "sched"
13#define TP_NAME "sched_switch"
14
15static const char *kmulti_syms[] = {
16 "bpf_fentry_test2",
17 "bpf_fentry_test1",
18 "bpf_fentry_test3",
19};
20#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
21static __u64 kmulti_addrs[KMULTI_CNT];
22
23#define KPROBE_FUNC "bpf_fentry_test1"
24static __u64 kprobe_addr;
25
26#define UPROBE_FILE "/proc/self/exe"
27static ssize_t uprobe_offset;
28/* uprobe attach point */
29static noinline void uprobe_func(void)
30{
31 asm volatile ("");
32}
33
34static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
35 ssize_t offset, ssize_t entry_offset)
36{
37 struct bpf_link_info info;
38 __u32 len = sizeof(info);
39 char buf[PATH_MAX];
40 int err;
41
42 memset(&info, 0, sizeof(info));
43 buf[0] = '\0';
44
45again:
46 err = bpf_link_get_info_by_fd(fd, &info, &len);
47 if (!ASSERT_OK(err, "get_link_info"))
48 return -1;
49
50 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "link_type"))
51 return -1;
52 if (!ASSERT_EQ(info.perf_event.type, type, "perf_type_match"))
53 return -1;
54
55 switch (info.perf_event.type) {
56 case BPF_PERF_EVENT_KPROBE:
57 case BPF_PERF_EVENT_KRETPROBE:
58 ASSERT_EQ(info.perf_event.kprobe.offset, offset, "kprobe_offset");
59
60 /* In case kernel.kptr_restrict is not permitted or MAX_SYMS is reached */
61 if (addr)
62 ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
63 "kprobe_addr");
64
65 if (!info.perf_event.kprobe.func_name) {
66 ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
67 info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
68 info.perf_event.kprobe.name_len = sizeof(buf);
69 goto again;
70 }
71
72 err = strncmp(u64_to_ptr(info.perf_event.kprobe.func_name), KPROBE_FUNC,
73 strlen(KPROBE_FUNC));
74 ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
75 break;
76 case BPF_PERF_EVENT_TRACEPOINT:
77 if (!info.perf_event.tracepoint.tp_name) {
78 ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
79 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
80 info.perf_event.tracepoint.name_len = sizeof(buf);
81 goto again;
82 }
83
84 err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
85 strlen(TP_NAME));
86 ASSERT_EQ(err, 0, "cmp_tp_name");
87 break;
88 case BPF_PERF_EVENT_UPROBE:
89 case BPF_PERF_EVENT_URETPROBE:
90 ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
91
92 if (!info.perf_event.uprobe.file_name) {
93 ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
94 info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
95 info.perf_event.uprobe.name_len = sizeof(buf);
96 goto again;
97 }
98
99 err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
100 strlen(UPROBE_FILE));
101 ASSERT_EQ(err, 0, "cmp_file_name");
102 break;
103 default:
104 err = -1;
105 break;
106 }
107 return err;
108}
109
110static void kprobe_fill_invalid_user_buffer(int fd)
111{
112 struct bpf_link_info info;
113 __u32 len = sizeof(info);
114 int err;
115
116 memset(&info, 0, sizeof(info));
117
118 info.perf_event.kprobe.func_name = 0x1; /* invalid address */
119 err = bpf_link_get_info_by_fd(fd, &info, &len);
120 ASSERT_EQ(err, -EINVAL, "invalid_buff_and_len");
121
122 info.perf_event.kprobe.name_len = 64;
123 err = bpf_link_get_info_by_fd(fd, &info, &len);
124 ASSERT_EQ(err, -EFAULT, "invalid_buff");
125
126 info.perf_event.kprobe.func_name = 0;
127 err = bpf_link_get_info_by_fd(fd, &info, &len);
128 ASSERT_EQ(err, -EINVAL, "invalid_len");
129
130 ASSERT_EQ(info.perf_event.kprobe.addr, 0, "func_addr");
131 ASSERT_EQ(info.perf_event.kprobe.offset, 0, "func_offset");
132 ASSERT_EQ(info.perf_event.type, 0, "type");
133}
134
135static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
136 enum bpf_perf_event_type type,
137 bool invalid)
138{
139 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
140 .attach_mode = PROBE_ATTACH_MODE_LINK,
141 .retprobe = type == BPF_PERF_EVENT_KRETPROBE,
142 );
143 ssize_t entry_offset = 0;
144 struct bpf_link *link;
145 int link_fd, err;
146
147 link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts);
148 if (!ASSERT_OK_PTR(link, "attach_kprobe"))
149 return;
150
151 link_fd = bpf_link__fd(link);
152 if (!invalid) {
153 /* See also arch_adjust_kprobe_addr(). */
154 if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
155 entry_offset = 4;
156 err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset);
157 ASSERT_OK(err, "verify_perf_link_info");
158 } else {
159 kprobe_fill_invalid_user_buffer(link_fd);
160 }
161 bpf_link__destroy(link);
162}
163
164static void test_tp_fill_link_info(struct test_fill_link_info *skel)
165{
166 struct bpf_link *link;
167 int link_fd, err;
168
169 link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
170 if (!ASSERT_OK_PTR(link, "attach_tp"))
171 return;
172
173 link_fd = bpf_link__fd(link);
174 err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
175 ASSERT_OK(err, "verify_perf_link_info");
176 bpf_link__destroy(link);
177}
178
179static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
180 enum bpf_perf_event_type type)
181{
182 struct bpf_link *link;
183 int link_fd, err;
184
185 link = bpf_program__attach_uprobe(skel->progs.uprobe_run,
186 type == BPF_PERF_EVENT_URETPROBE,
187 0, /* self pid */
188 UPROBE_FILE, uprobe_offset);
189 if (!ASSERT_OK_PTR(link, "attach_uprobe"))
190 return;
191
192 link_fd = bpf_link__fd(link);
193 err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0);
194 ASSERT_OK(err, "verify_perf_link_info");
195 bpf_link__destroy(link);
196}
197
198static int verify_kmulti_link_info(int fd, bool retprobe)
199{
200 struct bpf_link_info info;
201 __u32 len = sizeof(info);
202 __u64 addrs[KMULTI_CNT];
203 int flags, i, err;
204
205 memset(&info, 0, sizeof(info));
206
207again:
208 err = bpf_link_get_info_by_fd(fd, &info, &len);
209 if (!ASSERT_OK(err, "get_link_info"))
210 return -1;
211
212 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_KPROBE_MULTI, "kmulti_type"))
213 return -1;
214
215 ASSERT_EQ(info.kprobe_multi.count, KMULTI_CNT, "func_cnt");
216 flags = info.kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN;
217 if (!retprobe)
218 ASSERT_EQ(flags, 0, "kmulti_flags");
219 else
220 ASSERT_NEQ(flags, 0, "kretmulti_flags");
221
222 if (!info.kprobe_multi.addrs) {
223 info.kprobe_multi.addrs = ptr_to_u64(addrs);
224 goto again;
225 }
226 for (i = 0; i < KMULTI_CNT; i++)
227 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
228 return 0;
229}
230
231static void verify_kmulti_invalid_user_buffer(int fd)
232{
233 struct bpf_link_info info;
234 __u32 len = sizeof(info);
235 __u64 addrs[KMULTI_CNT];
236 int err, i;
237
238 memset(&info, 0, sizeof(info));
239
240 info.kprobe_multi.count = KMULTI_CNT;
241 err = bpf_link_get_info_by_fd(fd, &info, &len);
242 ASSERT_EQ(err, -EINVAL, "no_addr");
243
244 info.kprobe_multi.addrs = ptr_to_u64(addrs);
245 info.kprobe_multi.count = 0;
246 err = bpf_link_get_info_by_fd(fd, &info, &len);
247 ASSERT_EQ(err, -EINVAL, "no_cnt");
248
249 for (i = 0; i < KMULTI_CNT; i++)
250 addrs[i] = 0;
251 info.kprobe_multi.count = KMULTI_CNT - 1;
252 err = bpf_link_get_info_by_fd(fd, &info, &len);
253 ASSERT_EQ(err, -ENOSPC, "smaller_cnt");
254 for (i = 0; i < KMULTI_CNT - 1; i++)
255 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
256 ASSERT_EQ(addrs[i], 0, "kmulti_addrs");
257
258 for (i = 0; i < KMULTI_CNT; i++)
259 addrs[i] = 0;
260 info.kprobe_multi.count = KMULTI_CNT + 1;
261 err = bpf_link_get_info_by_fd(fd, &info, &len);
262 ASSERT_EQ(err, 0, "bigger_cnt");
263 for (i = 0; i < KMULTI_CNT; i++)
264 ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
265
266 info.kprobe_multi.count = KMULTI_CNT;
267 info.kprobe_multi.addrs = 0x1; /* invalid addr */
268 err = bpf_link_get_info_by_fd(fd, &info, &len);
269 ASSERT_EQ(err, -EFAULT, "invalid_buff");
270}
271
272static int symbols_cmp_r(const void *a, const void *b)
273{
274 const char **str_a = (const char **) a;
275 const char **str_b = (const char **) b;
276
277 return strcmp(*str_a, *str_b);
278}
279
280static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
281 bool retprobe, bool invalid)
282{
283 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
284 struct bpf_link *link;
285 int link_fd, err;
286
287 opts.syms = kmulti_syms;
288 opts.cnt = KMULTI_CNT;
289 opts.retprobe = retprobe;
290 link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
291 if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
292 return;
293
294 link_fd = bpf_link__fd(link);
295 if (!invalid) {
296 err = verify_kmulti_link_info(link_fd, retprobe);
297 ASSERT_OK(err, "verify_kmulti_link_info");
298 } else {
299 verify_kmulti_invalid_user_buffer(link_fd);
300 }
301 bpf_link__destroy(link);
302}
303
304#define SEC(name) __attribute__((section(name), used))
305
306static short uprobe_link_info_sema_1 SEC(".probes");
307static short uprobe_link_info_sema_2 SEC(".probes");
308static short uprobe_link_info_sema_3 SEC(".probes");
309
310noinline void uprobe_link_info_func_1(void)
311{
312 asm volatile ("");
313 uprobe_link_info_sema_1++;
314}
315
316noinline void uprobe_link_info_func_2(void)
317{
318 asm volatile ("");
319 uprobe_link_info_sema_2++;
320}
321
322noinline void uprobe_link_info_func_3(void)
323{
324 asm volatile ("");
325 uprobe_link_info_sema_3++;
326}
327
328static int
329verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets,
330 __u64 *cookies, __u64 *ref_ctr_offsets)
331{
332 char path[PATH_MAX], path_buf[PATH_MAX];
333 struct bpf_link_info info;
334 __u32 len = sizeof(info);
335 __u64 ref_ctr_offsets_buf[3];
336 __u64 offsets_buf[3];
337 __u64 cookies_buf[3];
338 int i, err, bit;
339 __u32 count = 0;
340
341 memset(path, 0, sizeof(path));
342 err = readlink("/proc/self/exe", path, sizeof(path));
343 if (!ASSERT_NEQ(err, -1, "readlink"))
344 return -1;
345
346 for (bit = 0; bit < 8; bit++) {
347 memset(&info, 0, sizeof(info));
348 info.uprobe_multi.path = ptr_to_u64(path_buf);
349 info.uprobe_multi.path_size = sizeof(path_buf);
350 info.uprobe_multi.count = count;
351
352 if (bit & 0x1)
353 info.uprobe_multi.offsets = ptr_to_u64(offsets_buf);
354 if (bit & 0x2)
355 info.uprobe_multi.cookies = ptr_to_u64(cookies_buf);
356 if (bit & 0x4)
357 info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets_buf);
358
359 err = bpf_link_get_info_by_fd(fd, &info, &len);
360 if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
361 return -1;
362
363 if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_UPROBE_MULTI, "info.type"))
364 return -1;
365
366 ASSERT_EQ(info.uprobe_multi.pid, getpid(), "info.uprobe_multi.pid");
367 ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count");
368 ASSERT_EQ(info.uprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN,
369 retprobe, "info.uprobe_multi.flags.retprobe");
370 ASSERT_EQ(info.uprobe_multi.path_size, strlen(path) + 1, "info.uprobe_multi.path_size");
371 ASSERT_STREQ(path_buf, path, "info.uprobe_multi.path");
372
373 for (i = 0; i < info.uprobe_multi.count; i++) {
374 if (info.uprobe_multi.offsets)
375 ASSERT_EQ(offsets_buf[i], offsets[i], "info.uprobe_multi.offsets");
376 if (info.uprobe_multi.cookies)
377 ASSERT_EQ(cookies_buf[i], cookies[i], "info.uprobe_multi.cookies");
378 if (info.uprobe_multi.ref_ctr_offsets) {
379 ASSERT_EQ(ref_ctr_offsets_buf[i], ref_ctr_offsets[i],
380 "info.uprobe_multi.ref_ctr_offsets");
381 }
382 }
383 count = count ?: info.uprobe_multi.count;
384 }
385
386 return 0;
387}
388
389static void verify_umulti_invalid_user_buffer(int fd)
390{
391 struct bpf_link_info info;
392 __u32 len = sizeof(info);
393 __u64 buf[3];
394 int err;
395
396 /* upath_size defined, not path */
397 memset(&info, 0, sizeof(info));
398 info.uprobe_multi.path_size = 3;
399 err = bpf_link_get_info_by_fd(fd, &info, &len);
400 ASSERT_EQ(err, -EINVAL, "failed_upath_size");
401
402 /* path defined, but small */
403 memset(&info, 0, sizeof(info));
404 info.uprobe_multi.path = ptr_to_u64(buf);
405 info.uprobe_multi.path_size = 3;
406 err = bpf_link_get_info_by_fd(fd, &info, &len);
407 ASSERT_LT(err, 0, "failed_upath_small");
408
409 /* path has wrong pointer */
410 memset(&info, 0, sizeof(info));
411 info.uprobe_multi.path_size = PATH_MAX;
412 info.uprobe_multi.path = 123;
413 err = bpf_link_get_info_by_fd(fd, &info, &len);
414 ASSERT_EQ(err, -EFAULT, "failed_bad_path_ptr");
415
416 /* count zero, with offsets */
417 memset(&info, 0, sizeof(info));
418 info.uprobe_multi.offsets = ptr_to_u64(buf);
419 err = bpf_link_get_info_by_fd(fd, &info, &len);
420 ASSERT_EQ(err, -EINVAL, "failed_count");
421
422 /* offsets not big enough */
423 memset(&info, 0, sizeof(info));
424 info.uprobe_multi.offsets = ptr_to_u64(buf);
425 info.uprobe_multi.count = 2;
426 err = bpf_link_get_info_by_fd(fd, &info, &len);
427 ASSERT_EQ(err, -ENOSPC, "failed_small_count");
428
429 /* offsets has wrong pointer */
430 memset(&info, 0, sizeof(info));
431 info.uprobe_multi.offsets = 123;
432 info.uprobe_multi.count = 3;
433 err = bpf_link_get_info_by_fd(fd, &info, &len);
434 ASSERT_EQ(err, -EFAULT, "failed_wrong_offsets");
435}
436
437static void test_uprobe_multi_fill_link_info(struct test_fill_link_info *skel,
438 bool retprobe, bool invalid)
439{
440 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
441 .retprobe = retprobe,
442 );
443 const char *syms[3] = {
444 "uprobe_link_info_func_1",
445 "uprobe_link_info_func_2",
446 "uprobe_link_info_func_3",
447 };
448 __u64 cookies[3] = {
449 0xdead,
450 0xbeef,
451 0xcafe,
452 };
453 const char *sema[3] = {
454 "uprobe_link_info_sema_1",
455 "uprobe_link_info_sema_2",
456 "uprobe_link_info_sema_3",
457 };
458 __u64 *offsets = NULL, *ref_ctr_offsets;
459 struct bpf_link *link;
460 int link_fd, err;
461
462 err = elf_resolve_syms_offsets("/proc/self/exe", 3, sema,
463 (unsigned long **) &ref_ctr_offsets, STT_OBJECT);
464 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object"))
465 return;
466
467 err = elf_resolve_syms_offsets("/proc/self/exe", 3, syms,
468 (unsigned long **) &offsets, STT_FUNC);
469 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
470 goto out;
471
472 opts.syms = syms;
473 opts.cookies = &cookies[0];
474 opts.ref_ctr_offsets = (unsigned long *) &ref_ctr_offsets[0];
475 opts.cnt = ARRAY_SIZE(syms);
476
477 link = bpf_program__attach_uprobe_multi(skel->progs.umulti_run, 0,
478 "/proc/self/exe", NULL, &opts);
479 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
480 goto out;
481
482 link_fd = bpf_link__fd(link);
483 if (invalid)
484 verify_umulti_invalid_user_buffer(link_fd);
485 else
486 verify_umulti_link_info(link_fd, retprobe, offsets, cookies, ref_ctr_offsets);
487
488 bpf_link__destroy(link);
489out:
490 free(ref_ctr_offsets);
491 free(offsets);
492}
493
494void test_fill_link_info(void)
495{
496 struct test_fill_link_info *skel;
497 int i;
498
499 skel = test_fill_link_info__open_and_load();
500 if (!ASSERT_OK_PTR(skel, "skel_open"))
501 return;
502
503 /* load kallsyms to compare the addr */
504 if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
505 goto cleanup;
506
507 kprobe_addr = ksym_get_addr(KPROBE_FUNC);
508 if (test__start_subtest("kprobe_link_info"))
509 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, false);
510 if (test__start_subtest("kretprobe_link_info"))
511 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KRETPROBE, false);
512 if (test__start_subtest("kprobe_invalid_ubuff"))
513 test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
514 if (test__start_subtest("tracepoint_link_info"))
515 test_tp_fill_link_info(skel);
516
517 uprobe_offset = get_uprobe_offset(&uprobe_func);
518 if (test__start_subtest("uprobe_link_info"))
519 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_UPROBE);
520 if (test__start_subtest("uretprobe_link_info"))
521 test_uprobe_fill_link_info(skel, BPF_PERF_EVENT_URETPROBE);
522
523 qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
524 for (i = 0; i < KMULTI_CNT; i++)
525 kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
526 if (test__start_subtest("kprobe_multi_link_info"))
527 test_kprobe_multi_fill_link_info(skel, false, false);
528 if (test__start_subtest("kretprobe_multi_link_info"))
529 test_kprobe_multi_fill_link_info(skel, true, false);
530 if (test__start_subtest("kprobe_multi_invalid_ubuff"))
531 test_kprobe_multi_fill_link_info(skel, true, true);
532
533 if (test__start_subtest("uprobe_multi_link_info"))
534 test_uprobe_multi_fill_link_info(skel, false, false);
535 if (test__start_subtest("uretprobe_multi_link_info"))
536 test_uprobe_multi_fill_link_info(skel, true, false);
537 if (test__start_subtest("uprobe_multi_invalid"))
538 test_uprobe_multi_fill_link_info(skel, false, true);
539
540cleanup:
541 test_fill_link_info__destroy(skel);
542}