Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "test_attach_kprobe_sleepable.skel.h"
4#include "test_attach_probe_manual.skel.h"
5#include "test_attach_probe.skel.h"
6
7/* this is how USDT semaphore is actually defined, except volatile modifier */
8volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
9
10/* uprobe attach point */
11static noinline void trigger_func(void)
12{
13 asm volatile ("");
14}
15
16/* attach point for byname uprobe */
17static noinline void trigger_func2(void)
18{
19 asm volatile ("");
20}
21
22/* attach point for byname sleepable uprobe */
23static noinline void trigger_func3(void)
24{
25 asm volatile ("");
26}
27
28/* attach point for ref_ctr */
29static noinline void trigger_func4(void)
30{
31 asm volatile ("");
32}
33
34static char test_data[] = "test_data";
35
36/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */
37static void test_attach_probe_manual(enum probe_attach_mode attach_mode)
38{
39 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
40 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
41 struct bpf_link *kprobe_link, *kretprobe_link;
42 struct bpf_link *uprobe_link, *uretprobe_link;
43 struct test_attach_probe_manual *skel;
44 ssize_t uprobe_offset;
45
46 skel = test_attach_probe_manual__open_and_load();
47 if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
48 return;
49
50 uprobe_offset = get_uprobe_offset(&trigger_func);
51 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
52 goto cleanup;
53
54 /* manual-attach kprobe/kretprobe */
55 kprobe_opts.attach_mode = attach_mode;
56 kprobe_opts.retprobe = false;
57 kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
58 SYS_NANOSLEEP_KPROBE_NAME,
59 &kprobe_opts);
60 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
61 goto cleanup;
62 skel->links.handle_kprobe = kprobe_link;
63
64 kprobe_opts.retprobe = true;
65 kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
66 SYS_NANOSLEEP_KPROBE_NAME,
67 &kprobe_opts);
68 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
69 goto cleanup;
70 skel->links.handle_kretprobe = kretprobe_link;
71
72 /* manual-attach uprobe/uretprobe */
73 uprobe_opts.attach_mode = attach_mode;
74 uprobe_opts.ref_ctr_offset = 0;
75 uprobe_opts.retprobe = false;
76 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
77 0 /* self pid */,
78 "/proc/self/exe",
79 uprobe_offset,
80 &uprobe_opts);
81 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
82 goto cleanup;
83 skel->links.handle_uprobe = uprobe_link;
84
85 uprobe_opts.retprobe = true;
86 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
87 -1 /* any pid */,
88 "/proc/self/exe",
89 uprobe_offset, &uprobe_opts);
90 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
91 goto cleanup;
92 skel->links.handle_uretprobe = uretprobe_link;
93
94 /* attach uprobe by function name manually */
95 uprobe_opts.func_name = "trigger_func2";
96 uprobe_opts.retprobe = false;
97 uprobe_opts.ref_ctr_offset = 0;
98 skel->links.handle_uprobe_byname =
99 bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
100 0 /* this pid */,
101 "/proc/self/exe",
102 0, &uprobe_opts);
103 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
104 goto cleanup;
105
106 /* trigger & validate kprobe && kretprobe */
107 usleep(1);
108
109 /* trigger & validate uprobe & uretprobe */
110 trigger_func();
111
112 /* trigger & validate uprobe attached by name */
113 trigger_func2();
114
115 ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
116 ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
117 ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
118 ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
119 ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
120
121cleanup:
122 test_attach_probe_manual__destroy(skel);
123}
124
125static void test_attach_probe_auto(struct test_attach_probe *skel)
126{
127 struct bpf_link *uprobe_err_link;
128
129 /* auto-attachable kprobe and kretprobe */
130 skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
131 ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
132
133 skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
134 ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
135
136 /* verify auto-attach fails for old-style uprobe definition */
137 uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
138 if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
139 "auto-attach should fail for old-style name"))
140 return;
141
142 /* verify auto-attach works */
143 skel->links.handle_uretprobe_byname =
144 bpf_program__attach(skel->progs.handle_uretprobe_byname);
145 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
146 return;
147
148 /* trigger & validate kprobe && kretprobe */
149 usleep(1);
150
151 /* trigger & validate uprobe attached by name */
152 trigger_func2();
153
154 ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
155 ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
156 ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
157}
158
159static void test_uprobe_lib(struct test_attach_probe *skel)
160{
161 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
162 FILE *devnull;
163
164 /* test attach by name for a library function, using the library
165 * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
166 */
167 uprobe_opts.func_name = "fopen";
168 uprobe_opts.retprobe = false;
169 skel->links.handle_uprobe_byname2 =
170 bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
171 0 /* this pid */,
172 "libc.so.6",
173 0, &uprobe_opts);
174 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
175 return;
176
177 uprobe_opts.func_name = "fclose";
178 uprobe_opts.retprobe = true;
179 skel->links.handle_uretprobe_byname2 =
180 bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
181 -1 /* any pid */,
182 "libc.so.6",
183 0, &uprobe_opts);
184 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
185 return;
186
187 /* trigger & validate shared library u[ret]probes attached by name */
188 devnull = fopen("/dev/null", "r");
189 fclose(devnull);
190
191 ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
192 ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
193}
194
195static void test_uprobe_ref_ctr(struct test_attach_probe *skel)
196{
197 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
198 struct bpf_link *uprobe_link, *uretprobe_link;
199 ssize_t uprobe_offset, ref_ctr_offset;
200
201 uprobe_offset = get_uprobe_offset(&trigger_func4);
202 if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr"))
203 return;
204
205 ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
206 if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
207 return;
208
209 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
210
211 uprobe_opts.retprobe = false;
212 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
213 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr,
214 0 /* self pid */,
215 "/proc/self/exe",
216 uprobe_offset,
217 &uprobe_opts);
218 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr"))
219 return;
220 skel->links.handle_uprobe_ref_ctr = uprobe_link;
221
222 ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
223
224 /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
225 uprobe_opts.retprobe = true;
226 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
227 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr,
228 -1 /* any pid */,
229 "/proc/self/exe",
230 uprobe_offset, &uprobe_opts);
231 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr"))
232 return;
233 skel->links.handle_uretprobe_ref_ctr = uretprobe_link;
234}
235
236static void test_kprobe_sleepable(void)
237{
238 struct test_attach_kprobe_sleepable *skel;
239
240 skel = test_attach_kprobe_sleepable__open();
241 if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open"))
242 return;
243
244 /* sleepable kprobe test case needs flags set before loading */
245 if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
246 BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
247 goto cleanup;
248
249 if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel),
250 "skel_kprobe_sleepable_load"))
251 goto cleanup;
252
253 /* sleepable kprobes should not attach successfully */
254 skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
255 ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable");
256
257cleanup:
258 test_attach_kprobe_sleepable__destroy(skel);
259}
260
261static void test_uprobe_sleepable(struct test_attach_probe *skel)
262{
263 /* test sleepable uprobe and uretprobe variants */
264 skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
265 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
266 return;
267
268 skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
269 if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
270 return;
271
272 skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
273 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
274 return;
275
276 skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
277 if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
278 return;
279
280 skel->bss->user_ptr = test_data;
281
282 /* trigger & validate sleepable uprobe attached by name */
283 trigger_func3();
284
285 ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
286 ASSERT_EQ(skel->bss->uprobe_byname3_str_sleepable_res, 10, "check_uprobe_byname3_str_sleepable_res");
287 ASSERT_EQ(skel->bss->uprobe_byname3_res, 11, "check_uprobe_byname3_res");
288 ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 12, "check_uretprobe_byname3_sleepable_res");
289 ASSERT_EQ(skel->bss->uretprobe_byname3_str_sleepable_res, 13, "check_uretprobe_byname3_str_sleepable_res");
290 ASSERT_EQ(skel->bss->uretprobe_byname3_res, 14, "check_uretprobe_byname3_res");
291}
292
293void test_attach_probe(void)
294{
295 struct test_attach_probe *skel;
296
297 skel = test_attach_probe__open();
298 if (!ASSERT_OK_PTR(skel, "skel_open"))
299 return;
300
301 if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
302 goto cleanup;
303 if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
304 goto cleanup;
305
306 if (test__start_subtest("manual-default"))
307 test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT);
308 if (test__start_subtest("manual-legacy"))
309 test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY);
310 if (test__start_subtest("manual-perf"))
311 test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
312 if (test__start_subtest("manual-link"))
313 test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
314
315 if (test__start_subtest("auto"))
316 test_attach_probe_auto(skel);
317 if (test__start_subtest("kprobe-sleepable"))
318 test_kprobe_sleepable();
319 if (test__start_subtest("uprobe-lib"))
320 test_uprobe_lib(skel);
321 if (test__start_subtest("uprobe-sleepable"))
322 test_uprobe_sleepable(skel);
323 if (test__start_subtest("uprobe-ref_ctr"))
324 test_uprobe_ref_ctr(skel);
325
326cleanup:
327 test_attach_probe__destroy(skel);
328 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
329}
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include "test_attach_probe.skel.h"
4
5#if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
6
7#define OP_RT_RA_MASK 0xffff0000UL
8#define LIS_R2 0x3c400000UL
9#define ADDIS_R2_R12 0x3c4c0000UL
10#define ADDI_R2_R2 0x38420000UL
11
12static ssize_t get_offset(ssize_t addr, ssize_t base)
13{
14 u32 *insn = (u32 *) addr;
15
16 /*
17 * A PPC64 ABIv2 function may have a local and a global entry
18 * point. We need to use the local entry point when patching
19 * functions, so identify and step over the global entry point
20 * sequence.
21 *
22 * The global entry point sequence is always of the form:
23 *
24 * addis r2,r12,XXXX
25 * addi r2,r2,XXXX
26 *
27 * A linker optimisation may convert the addis to lis:
28 *
29 * lis r2,XXXX
30 * addi r2,r2,XXXX
31 */
32 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
33 ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
34 ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
35 return (ssize_t)(insn + 2) - base;
36 else
37 return addr - base;
38}
39#else
40#define get_offset(addr, base) (addr - base)
41#endif
42
43ssize_t get_base_addr() {
44 size_t start, offset;
45 char buf[256];
46 FILE *f;
47
48 f = fopen("/proc/self/maps", "r");
49 if (!f)
50 return -errno;
51
52 while (fscanf(f, "%zx-%*x %s %zx %*[^\n]\n",
53 &start, buf, &offset) == 3) {
54 if (strcmp(buf, "r-xp") == 0) {
55 fclose(f);
56 return start - offset;
57 }
58 }
59
60 fclose(f);
61 return -EINVAL;
62}
63
64void test_attach_probe(void)
65{
66 int duration = 0;
67 struct bpf_link *kprobe_link, *kretprobe_link;
68 struct bpf_link *uprobe_link, *uretprobe_link;
69 struct test_attach_probe* skel;
70 size_t uprobe_offset;
71 ssize_t base_addr;
72
73 base_addr = get_base_addr();
74 if (CHECK(base_addr < 0, "get_base_addr",
75 "failed to find base addr: %zd", base_addr))
76 return;
77 uprobe_offset = get_offset((size_t)&get_base_addr, base_addr);
78
79 skel = test_attach_probe__open_and_load();
80 if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
81 return;
82 if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
83 goto cleanup;
84
85 kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
86 false /* retprobe */,
87 SYS_NANOSLEEP_KPROBE_NAME);
88 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
89 goto cleanup;
90 skel->links.handle_kprobe = kprobe_link;
91
92 kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
93 true /* retprobe */,
94 SYS_NANOSLEEP_KPROBE_NAME);
95 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
96 goto cleanup;
97 skel->links.handle_kretprobe = kretprobe_link;
98
99 uprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uprobe,
100 false /* retprobe */,
101 0 /* self pid */,
102 "/proc/self/exe",
103 uprobe_offset);
104 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
105 goto cleanup;
106 skel->links.handle_uprobe = uprobe_link;
107
108 uretprobe_link = bpf_program__attach_uprobe(skel->progs.handle_uretprobe,
109 true /* retprobe */,
110 -1 /* any pid */,
111 "/proc/self/exe",
112 uprobe_offset);
113 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
114 goto cleanup;
115 skel->links.handle_uretprobe = uretprobe_link;
116
117 /* trigger & validate kprobe && kretprobe */
118 usleep(1);
119
120 if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
121 "wrong kprobe res: %d\n", skel->bss->kprobe_res))
122 goto cleanup;
123 if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
124 "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
125 goto cleanup;
126
127 /* trigger & validate uprobe & uretprobe */
128 get_base_addr();
129
130 if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
131 "wrong uprobe res: %d\n", skel->bss->uprobe_res))
132 goto cleanup;
133 if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
134 "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
135 goto cleanup;
136
137cleanup:
138 test_attach_probe__destroy(skel);
139}