Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <unistd.h>
3#include <test_progs.h>
4#include <network_helpers.h>
5#include "tailcall_poke.skel.h"
6#include "tailcall_bpf2bpf_hierarchy2.skel.h"
7#include "tailcall_bpf2bpf_hierarchy3.skel.h"
8#include "tailcall_freplace.skel.h"
9#include "tc_bpf2bpf.skel.h"
10#include "tailcall_fail.skel.h"
11
12/* test_tailcall_1 checks basic functionality by patching multiple locations
13 * in a single program for a single tail call slot with nop->jmp, jmp->nop
14 * and jmp->jmp rewrites. Also checks for nop->nop.
15 */
16static void test_tailcall_1(void)
17{
18 int err, map_fd, prog_fd, main_fd, i, j;
19 struct bpf_map *prog_array;
20 struct bpf_program *prog;
21 struct bpf_object *obj;
22 char prog_name[32];
23 char buff[128] = {};
24 LIBBPF_OPTS(bpf_test_run_opts, topts,
25 .data_in = buff,
26 .data_size_in = sizeof(buff),
27 .repeat = 1,
28 );
29
30 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
31 &prog_fd);
32 if (CHECK_FAIL(err))
33 return;
34
35 prog = bpf_object__find_program_by_name(obj, "entry");
36 if (CHECK_FAIL(!prog))
37 goto out;
38
39 main_fd = bpf_program__fd(prog);
40 if (CHECK_FAIL(main_fd < 0))
41 goto out;
42
43 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
44 if (CHECK_FAIL(!prog_array))
45 goto out;
46
47 map_fd = bpf_map__fd(prog_array);
48 if (CHECK_FAIL(map_fd < 0))
49 goto out;
50
51 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
52 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
53
54 prog = bpf_object__find_program_by_name(obj, prog_name);
55 if (CHECK_FAIL(!prog))
56 goto out;
57
58 prog_fd = bpf_program__fd(prog);
59 if (CHECK_FAIL(prog_fd < 0))
60 goto out;
61
62 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
63 if (CHECK_FAIL(err))
64 goto out;
65 }
66
67 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
68 err = bpf_prog_test_run_opts(main_fd, &topts);
69 ASSERT_OK(err, "tailcall");
70 ASSERT_EQ(topts.retval, i, "tailcall retval");
71
72 err = bpf_map_delete_elem(map_fd, &i);
73 if (CHECK_FAIL(err))
74 goto out;
75 }
76
77 err = bpf_prog_test_run_opts(main_fd, &topts);
78 ASSERT_OK(err, "tailcall");
79 ASSERT_EQ(topts.retval, 3, "tailcall retval");
80
81 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
82 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
83
84 prog = bpf_object__find_program_by_name(obj, prog_name);
85 if (CHECK_FAIL(!prog))
86 goto out;
87
88 prog_fd = bpf_program__fd(prog);
89 if (CHECK_FAIL(prog_fd < 0))
90 goto out;
91
92 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
93 if (CHECK_FAIL(err))
94 goto out;
95 }
96
97 err = bpf_prog_test_run_opts(main_fd, &topts);
98 ASSERT_OK(err, "tailcall");
99 ASSERT_OK(topts.retval, "tailcall retval");
100
101 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
102 j = bpf_map__max_entries(prog_array) - 1 - i;
103 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
104
105 prog = bpf_object__find_program_by_name(obj, prog_name);
106 if (CHECK_FAIL(!prog))
107 goto out;
108
109 prog_fd = bpf_program__fd(prog);
110 if (CHECK_FAIL(prog_fd < 0))
111 goto out;
112
113 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
114 if (CHECK_FAIL(err))
115 goto out;
116 }
117
118 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
119 j = bpf_map__max_entries(prog_array) - 1 - i;
120
121 err = bpf_prog_test_run_opts(main_fd, &topts);
122 ASSERT_OK(err, "tailcall");
123 ASSERT_EQ(topts.retval, j, "tailcall retval");
124
125 err = bpf_map_delete_elem(map_fd, &i);
126 if (CHECK_FAIL(err))
127 goto out;
128 }
129
130 err = bpf_prog_test_run_opts(main_fd, &topts);
131 ASSERT_OK(err, "tailcall");
132 ASSERT_EQ(topts.retval, 3, "tailcall retval");
133
134 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
135 err = bpf_map_delete_elem(map_fd, &i);
136 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
137 goto out;
138
139 err = bpf_prog_test_run_opts(main_fd, &topts);
140 ASSERT_OK(err, "tailcall");
141 ASSERT_EQ(topts.retval, 3, "tailcall retval");
142 }
143
144out:
145 bpf_object__close(obj);
146}
147
148/* test_tailcall_2 checks that patching multiple programs for a single
149 * tail call slot works. It also jumps through several programs and tests
150 * the tail call limit counter.
151 */
152static void test_tailcall_2(void)
153{
154 int err, map_fd, prog_fd, main_fd, i;
155 struct bpf_map *prog_array;
156 struct bpf_program *prog;
157 struct bpf_object *obj;
158 char prog_name[32];
159 char buff[128] = {};
160 LIBBPF_OPTS(bpf_test_run_opts, topts,
161 .data_in = buff,
162 .data_size_in = sizeof(buff),
163 .repeat = 1,
164 );
165
166 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
167 &prog_fd);
168 if (CHECK_FAIL(err))
169 return;
170
171 prog = bpf_object__find_program_by_name(obj, "entry");
172 if (CHECK_FAIL(!prog))
173 goto out;
174
175 main_fd = bpf_program__fd(prog);
176 if (CHECK_FAIL(main_fd < 0))
177 goto out;
178
179 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
180 if (CHECK_FAIL(!prog_array))
181 goto out;
182
183 map_fd = bpf_map__fd(prog_array);
184 if (CHECK_FAIL(map_fd < 0))
185 goto out;
186
187 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
188 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
189
190 prog = bpf_object__find_program_by_name(obj, prog_name);
191 if (CHECK_FAIL(!prog))
192 goto out;
193
194 prog_fd = bpf_program__fd(prog);
195 if (CHECK_FAIL(prog_fd < 0))
196 goto out;
197
198 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
199 if (CHECK_FAIL(err))
200 goto out;
201 }
202
203 err = bpf_prog_test_run_opts(main_fd, &topts);
204 ASSERT_OK(err, "tailcall");
205 ASSERT_EQ(topts.retval, 2, "tailcall retval");
206
207 i = 2;
208 err = bpf_map_delete_elem(map_fd, &i);
209 if (CHECK_FAIL(err))
210 goto out;
211
212 err = bpf_prog_test_run_opts(main_fd, &topts);
213 ASSERT_OK(err, "tailcall");
214 ASSERT_EQ(topts.retval, 1, "tailcall retval");
215
216 i = 0;
217 err = bpf_map_delete_elem(map_fd, &i);
218 if (CHECK_FAIL(err))
219 goto out;
220
221 err = bpf_prog_test_run_opts(main_fd, &topts);
222 ASSERT_OK(err, "tailcall");
223 ASSERT_EQ(topts.retval, 3, "tailcall retval");
224out:
225 bpf_object__close(obj);
226}
227
228static void test_tailcall_count(const char *which, bool test_fentry,
229 bool test_fexit)
230{
231 struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
232 struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
233 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
234 struct bpf_map *prog_array, *data_map;
235 struct bpf_program *prog;
236 char buff[128] = {};
237 LIBBPF_OPTS(bpf_test_run_opts, topts,
238 .data_in = buff,
239 .data_size_in = sizeof(buff),
240 .repeat = 1,
241 );
242
243 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
244 &prog_fd);
245 if (CHECK_FAIL(err))
246 return;
247
248 prog = bpf_object__find_program_by_name(obj, "entry");
249 if (CHECK_FAIL(!prog))
250 goto out;
251
252 main_fd = bpf_program__fd(prog);
253 if (CHECK_FAIL(main_fd < 0))
254 goto out;
255
256 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
257 if (CHECK_FAIL(!prog_array))
258 goto out;
259
260 map_fd = bpf_map__fd(prog_array);
261 if (CHECK_FAIL(map_fd < 0))
262 goto out;
263
264 prog = bpf_object__find_program_by_name(obj, "classifier_0");
265 if (CHECK_FAIL(!prog))
266 goto out;
267
268 prog_fd = bpf_program__fd(prog);
269 if (CHECK_FAIL(prog_fd < 0))
270 goto out;
271
272 i = 0;
273 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
274 if (CHECK_FAIL(err))
275 goto out;
276
277 if (test_fentry) {
278 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
279 NULL);
280 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
281 goto out;
282
283 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
284 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
285 goto out;
286
287 err = bpf_program__set_attach_target(prog, prog_fd,
288 "subprog_tail");
289 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
290 goto out;
291
292 err = bpf_object__load(fentry_obj);
293 if (!ASSERT_OK(err, "load fentry_obj"))
294 goto out;
295
296 fentry_link = bpf_program__attach_trace(prog);
297 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
298 goto out;
299 }
300
301 if (test_fexit) {
302 fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
303 NULL);
304 if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
305 goto out;
306
307 prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
308 if (!ASSERT_OK_PTR(prog, "find fexit prog"))
309 goto out;
310
311 err = bpf_program__set_attach_target(prog, prog_fd,
312 "subprog_tail");
313 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
314 goto out;
315
316 err = bpf_object__load(fexit_obj);
317 if (!ASSERT_OK(err, "load fexit_obj"))
318 goto out;
319
320 fexit_link = bpf_program__attach_trace(prog);
321 if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
322 goto out;
323 }
324
325 err = bpf_prog_test_run_opts(main_fd, &topts);
326 ASSERT_OK(err, "tailcall");
327 ASSERT_EQ(topts.retval, 1, "tailcall retval");
328
329 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
330 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
331 goto out;
332
333 data_fd = bpf_map__fd(data_map);
334 if (CHECK_FAIL(data_fd < 0))
335 goto out;
336
337 i = 0;
338 err = bpf_map_lookup_elem(data_fd, &i, &val);
339 ASSERT_OK(err, "tailcall count");
340 ASSERT_EQ(val, 33, "tailcall count");
341
342 if (test_fentry) {
343 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
344 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
345 "find tailcall_bpf2bpf_fentry.bss map"))
346 goto out;
347
348 data_fd = bpf_map__fd(data_map);
349 if (!ASSERT_FALSE(data_fd < 0,
350 "find tailcall_bpf2bpf_fentry.bss map fd"))
351 goto out;
352
353 i = 0;
354 err = bpf_map_lookup_elem(data_fd, &i, &val);
355 ASSERT_OK(err, "fentry count");
356 ASSERT_EQ(val, 33, "fentry count");
357 }
358
359 if (test_fexit) {
360 data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
361 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
362 "find tailcall_bpf2bpf_fexit.bss map"))
363 goto out;
364
365 data_fd = bpf_map__fd(data_map);
366 if (!ASSERT_FALSE(data_fd < 0,
367 "find tailcall_bpf2bpf_fexit.bss map fd"))
368 goto out;
369
370 i = 0;
371 err = bpf_map_lookup_elem(data_fd, &i, &val);
372 ASSERT_OK(err, "fexit count");
373 ASSERT_EQ(val, 33, "fexit count");
374 }
375
376 i = 0;
377 err = bpf_map_delete_elem(map_fd, &i);
378 if (CHECK_FAIL(err))
379 goto out;
380
381 err = bpf_prog_test_run_opts(main_fd, &topts);
382 ASSERT_OK(err, "tailcall");
383 ASSERT_OK(topts.retval, "tailcall retval");
384out:
385 bpf_link__destroy(fentry_link);
386 bpf_link__destroy(fexit_link);
387 bpf_object__close(fentry_obj);
388 bpf_object__close(fexit_obj);
389 bpf_object__close(obj);
390}
391
392/* test_tailcall_3 checks that the count value of the tail call limit
393 * enforcement matches with expectations. JIT uses direct jump.
394 */
395static void test_tailcall_3(void)
396{
397 test_tailcall_count("tailcall3.bpf.o", false, false);
398}
399
400/* test_tailcall_6 checks that the count value of the tail call limit
401 * enforcement matches with expectations. JIT uses indirect jump.
402 */
403static void test_tailcall_6(void)
404{
405 test_tailcall_count("tailcall6.bpf.o", false, false);
406}
407
408/* test_tailcall_4 checks that the kernel properly selects indirect jump
409 * for the case where the key is not known. Latter is passed via global
410 * data to select different targets we can compare return value of.
411 */
412static void test_tailcall_4(void)
413{
414 int err, map_fd, prog_fd, main_fd, data_fd, i;
415 struct bpf_map *prog_array, *data_map;
416 struct bpf_program *prog;
417 struct bpf_object *obj;
418 static const int zero = 0;
419 char buff[128] = {};
420 char prog_name[32];
421 LIBBPF_OPTS(bpf_test_run_opts, topts,
422 .data_in = buff,
423 .data_size_in = sizeof(buff),
424 .repeat = 1,
425 );
426
427 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
428 &prog_fd);
429 if (CHECK_FAIL(err))
430 return;
431
432 prog = bpf_object__find_program_by_name(obj, "entry");
433 if (CHECK_FAIL(!prog))
434 goto out;
435
436 main_fd = bpf_program__fd(prog);
437 if (CHECK_FAIL(main_fd < 0))
438 goto out;
439
440 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
441 if (CHECK_FAIL(!prog_array))
442 goto out;
443
444 map_fd = bpf_map__fd(prog_array);
445 if (CHECK_FAIL(map_fd < 0))
446 goto out;
447
448 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
449 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
450 goto out;
451
452 data_fd = bpf_map__fd(data_map);
453 if (CHECK_FAIL(data_fd < 0))
454 goto out;
455
456 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
457 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
458
459 prog = bpf_object__find_program_by_name(obj, prog_name);
460 if (CHECK_FAIL(!prog))
461 goto out;
462
463 prog_fd = bpf_program__fd(prog);
464 if (CHECK_FAIL(prog_fd < 0))
465 goto out;
466
467 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
468 if (CHECK_FAIL(err))
469 goto out;
470 }
471
472 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
473 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
474 if (CHECK_FAIL(err))
475 goto out;
476
477 err = bpf_prog_test_run_opts(main_fd, &topts);
478 ASSERT_OK(err, "tailcall");
479 ASSERT_EQ(topts.retval, i, "tailcall retval");
480 }
481
482 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
483 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
484 if (CHECK_FAIL(err))
485 goto out;
486
487 err = bpf_map_delete_elem(map_fd, &i);
488 if (CHECK_FAIL(err))
489 goto out;
490
491 err = bpf_prog_test_run_opts(main_fd, &topts);
492 ASSERT_OK(err, "tailcall");
493 ASSERT_EQ(topts.retval, 3, "tailcall retval");
494 }
495out:
496 bpf_object__close(obj);
497}
498
499/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
500 * an indirect jump when the keys are const but different from different branches.
501 */
502static void test_tailcall_5(void)
503{
504 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
505 struct bpf_map *prog_array, *data_map;
506 struct bpf_program *prog;
507 struct bpf_object *obj;
508 static const int zero = 0;
509 char buff[128] = {};
510 char prog_name[32];
511 LIBBPF_OPTS(bpf_test_run_opts, topts,
512 .data_in = buff,
513 .data_size_in = sizeof(buff),
514 .repeat = 1,
515 );
516
517 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
518 &prog_fd);
519 if (CHECK_FAIL(err))
520 return;
521
522 prog = bpf_object__find_program_by_name(obj, "entry");
523 if (CHECK_FAIL(!prog))
524 goto out;
525
526 main_fd = bpf_program__fd(prog);
527 if (CHECK_FAIL(main_fd < 0))
528 goto out;
529
530 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
531 if (CHECK_FAIL(!prog_array))
532 goto out;
533
534 map_fd = bpf_map__fd(prog_array);
535 if (CHECK_FAIL(map_fd < 0))
536 goto out;
537
538 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
539 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
540 goto out;
541
542 data_fd = bpf_map__fd(data_map);
543 if (CHECK_FAIL(data_fd < 0))
544 goto out;
545
546 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
547 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
548
549 prog = bpf_object__find_program_by_name(obj, prog_name);
550 if (CHECK_FAIL(!prog))
551 goto out;
552
553 prog_fd = bpf_program__fd(prog);
554 if (CHECK_FAIL(prog_fd < 0))
555 goto out;
556
557 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
558 if (CHECK_FAIL(err))
559 goto out;
560 }
561
562 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
563 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
564 if (CHECK_FAIL(err))
565 goto out;
566
567 err = bpf_prog_test_run_opts(main_fd, &topts);
568 ASSERT_OK(err, "tailcall");
569 ASSERT_EQ(topts.retval, i, "tailcall retval");
570 }
571
572 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
573 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
574 if (CHECK_FAIL(err))
575 goto out;
576
577 err = bpf_map_delete_elem(map_fd, &i);
578 if (CHECK_FAIL(err))
579 goto out;
580
581 err = bpf_prog_test_run_opts(main_fd, &topts);
582 ASSERT_OK(err, "tailcall");
583 ASSERT_EQ(topts.retval, 3, "tailcall retval");
584 }
585out:
586 bpf_object__close(obj);
587}
588
589/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
590 * correctly in correlation with BPF subprograms
591 */
592static void test_tailcall_bpf2bpf_1(void)
593{
594 int err, map_fd, prog_fd, main_fd, i;
595 struct bpf_map *prog_array;
596 struct bpf_program *prog;
597 struct bpf_object *obj;
598 char prog_name[32];
599 LIBBPF_OPTS(bpf_test_run_opts, topts,
600 .data_in = &pkt_v4,
601 .data_size_in = sizeof(pkt_v4),
602 .repeat = 1,
603 );
604
605 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
606 &obj, &prog_fd);
607 if (CHECK_FAIL(err))
608 return;
609
610 prog = bpf_object__find_program_by_name(obj, "entry");
611 if (CHECK_FAIL(!prog))
612 goto out;
613
614 main_fd = bpf_program__fd(prog);
615 if (CHECK_FAIL(main_fd < 0))
616 goto out;
617
618 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
619 if (CHECK_FAIL(!prog_array))
620 goto out;
621
622 map_fd = bpf_map__fd(prog_array);
623 if (CHECK_FAIL(map_fd < 0))
624 goto out;
625
626 /* nop -> jmp */
627 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
628 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
629
630 prog = bpf_object__find_program_by_name(obj, prog_name);
631 if (CHECK_FAIL(!prog))
632 goto out;
633
634 prog_fd = bpf_program__fd(prog);
635 if (CHECK_FAIL(prog_fd < 0))
636 goto out;
637
638 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
639 if (CHECK_FAIL(err))
640 goto out;
641 }
642
643 err = bpf_prog_test_run_opts(main_fd, &topts);
644 ASSERT_OK(err, "tailcall");
645 ASSERT_EQ(topts.retval, 1, "tailcall retval");
646
647 /* jmp -> nop, call subprog that will do tailcall */
648 i = 1;
649 err = bpf_map_delete_elem(map_fd, &i);
650 if (CHECK_FAIL(err))
651 goto out;
652
653 err = bpf_prog_test_run_opts(main_fd, &topts);
654 ASSERT_OK(err, "tailcall");
655 ASSERT_OK(topts.retval, "tailcall retval");
656
657 /* make sure that subprog can access ctx and entry prog that
658 * called this subprog can properly return
659 */
660 i = 0;
661 err = bpf_map_delete_elem(map_fd, &i);
662 if (CHECK_FAIL(err))
663 goto out;
664
665 err = bpf_prog_test_run_opts(main_fd, &topts);
666 ASSERT_OK(err, "tailcall");
667 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
668out:
669 bpf_object__close(obj);
670}
671
672/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
673 * enforcement matches with expectations when tailcall is preceded with
674 * bpf2bpf call.
675 */
676static void test_tailcall_bpf2bpf_2(void)
677{
678 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
679 struct bpf_map *prog_array, *data_map;
680 struct bpf_program *prog;
681 struct bpf_object *obj;
682 char buff[128] = {};
683 LIBBPF_OPTS(bpf_test_run_opts, topts,
684 .data_in = buff,
685 .data_size_in = sizeof(buff),
686 .repeat = 1,
687 );
688
689 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
690 &obj, &prog_fd);
691 if (CHECK_FAIL(err))
692 return;
693
694 prog = bpf_object__find_program_by_name(obj, "entry");
695 if (CHECK_FAIL(!prog))
696 goto out;
697
698 main_fd = bpf_program__fd(prog);
699 if (CHECK_FAIL(main_fd < 0))
700 goto out;
701
702 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
703 if (CHECK_FAIL(!prog_array))
704 goto out;
705
706 map_fd = bpf_map__fd(prog_array);
707 if (CHECK_FAIL(map_fd < 0))
708 goto out;
709
710 prog = bpf_object__find_program_by_name(obj, "classifier_0");
711 if (CHECK_FAIL(!prog))
712 goto out;
713
714 prog_fd = bpf_program__fd(prog);
715 if (CHECK_FAIL(prog_fd < 0))
716 goto out;
717
718 i = 0;
719 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
720 if (CHECK_FAIL(err))
721 goto out;
722
723 err = bpf_prog_test_run_opts(main_fd, &topts);
724 ASSERT_OK(err, "tailcall");
725 ASSERT_EQ(topts.retval, 1, "tailcall retval");
726
727 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
728 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
729 goto out;
730
731 data_fd = bpf_map__fd(data_map);
732 if (CHECK_FAIL(data_fd < 0))
733 goto out;
734
735 i = 0;
736 err = bpf_map_lookup_elem(data_fd, &i, &val);
737 ASSERT_OK(err, "tailcall count");
738 ASSERT_EQ(val, 33, "tailcall count");
739
740 i = 0;
741 err = bpf_map_delete_elem(map_fd, &i);
742 if (CHECK_FAIL(err))
743 goto out;
744
745 err = bpf_prog_test_run_opts(main_fd, &topts);
746 ASSERT_OK(err, "tailcall");
747 ASSERT_OK(topts.retval, "tailcall retval");
748out:
749 bpf_object__close(obj);
750}
751
752/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
753 * 256 bytes) can be used within bpf subprograms that have the tailcalls
754 * in them
755 */
756static void test_tailcall_bpf2bpf_3(void)
757{
758 int err, map_fd, prog_fd, main_fd, i;
759 struct bpf_map *prog_array;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
762 char prog_name[32];
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
764 .data_in = &pkt_v4,
765 .data_size_in = sizeof(pkt_v4),
766 .repeat = 1,
767 );
768
769 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 &obj, &prog_fd);
771 if (CHECK_FAIL(err))
772 return;
773
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
776 goto out;
777
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
780 goto out;
781
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
784 goto out;
785
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
788 goto out;
789
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
795 goto out;
796
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
799 goto out;
800
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 if (CHECK_FAIL(err))
803 goto out;
804 }
805
806 err = bpf_prog_test_run_opts(main_fd, &topts);
807 ASSERT_OK(err, "tailcall");
808 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
809
810 i = 1;
811 err = bpf_map_delete_elem(map_fd, &i);
812 if (CHECK_FAIL(err))
813 goto out;
814
815 err = bpf_prog_test_run_opts(main_fd, &topts);
816 ASSERT_OK(err, "tailcall");
817 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
818
819 i = 0;
820 err = bpf_map_delete_elem(map_fd, &i);
821 if (CHECK_FAIL(err))
822 goto out;
823
824 err = bpf_prog_test_run_opts(main_fd, &topts);
825 ASSERT_OK(err, "tailcall");
826 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
827out:
828 bpf_object__close(obj);
829}
830
831#include "tailcall_bpf2bpf4.skel.h"
832
833/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
834 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
835 * counter behaves correctly, bpf program will go through following flow:
836 *
837 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
838 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
839 * subprog2 [here bump global counter] --------^
840 *
841 * We go through first two tailcalls and start counting from the subprog2 where
842 * the loop begins. At the end of the test make sure that the global counter is
843 * equal to 31, because tailcall counter includes the first two tailcalls
844 * whereas global counter is incremented only on loop presented on flow above.
845 *
846 * The noise parameter is used to insert bpf_map_update calls into the logic
847 * to force verifier to patch instructions. This allows us to ensure jump
848 * logic remains correct with instruction movement.
849 */
850static void test_tailcall_bpf2bpf_4(bool noise)
851{
852 int err, map_fd, prog_fd, main_fd, data_fd, i;
853 struct tailcall_bpf2bpf4__bss val;
854 struct bpf_map *prog_array, *data_map;
855 struct bpf_program *prog;
856 struct bpf_object *obj;
857 char prog_name[32];
858 LIBBPF_OPTS(bpf_test_run_opts, topts,
859 .data_in = &pkt_v4,
860 .data_size_in = sizeof(pkt_v4),
861 .repeat = 1,
862 );
863
864 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
865 &obj, &prog_fd);
866 if (CHECK_FAIL(err))
867 return;
868
869 prog = bpf_object__find_program_by_name(obj, "entry");
870 if (CHECK_FAIL(!prog))
871 goto out;
872
873 main_fd = bpf_program__fd(prog);
874 if (CHECK_FAIL(main_fd < 0))
875 goto out;
876
877 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
878 if (CHECK_FAIL(!prog_array))
879 goto out;
880
881 map_fd = bpf_map__fd(prog_array);
882 if (CHECK_FAIL(map_fd < 0))
883 goto out;
884
885 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
886 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
887
888 prog = bpf_object__find_program_by_name(obj, prog_name);
889 if (CHECK_FAIL(!prog))
890 goto out;
891
892 prog_fd = bpf_program__fd(prog);
893 if (CHECK_FAIL(prog_fd < 0))
894 goto out;
895
896 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
897 if (CHECK_FAIL(err))
898 goto out;
899 }
900
901 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
902 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
903 goto out;
904
905 data_fd = bpf_map__fd(data_map);
906 if (CHECK_FAIL(data_fd < 0))
907 goto out;
908
909 i = 0;
910 val.noise = noise;
911 val.count = 0;
912 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
913 if (CHECK_FAIL(err))
914 goto out;
915
916 err = bpf_prog_test_run_opts(main_fd, &topts);
917 ASSERT_OK(err, "tailcall");
918 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
919
920 i = 0;
921 err = bpf_map_lookup_elem(data_fd, &i, &val);
922 ASSERT_OK(err, "tailcall count");
923 ASSERT_EQ(val.count, 31, "tailcall count");
924
925out:
926 bpf_object__close(obj);
927}
928
929#include "tailcall_bpf2bpf6.skel.h"
930
931/* Tail call counting works even when there is data on stack which is
932 * not aligned to 8 bytes.
933 */
934static void test_tailcall_bpf2bpf_6(void)
935{
936 struct tailcall_bpf2bpf6 *obj;
937 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
938 LIBBPF_OPTS(bpf_test_run_opts, topts,
939 .data_in = &pkt_v4,
940 .data_size_in = sizeof(pkt_v4),
941 .repeat = 1,
942 );
943
944 obj = tailcall_bpf2bpf6__open_and_load();
945 if (!ASSERT_OK_PTR(obj, "open and load"))
946 return;
947
948 main_fd = bpf_program__fd(obj->progs.entry);
949 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
950 goto out;
951
952 map_fd = bpf_map__fd(obj->maps.jmp_table);
953 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
954 goto out;
955
956 prog_fd = bpf_program__fd(obj->progs.classifier_0);
957 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
958 goto out;
959
960 i = 0;
961 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
962 if (!ASSERT_OK(err, "jmp_table map update"))
963 goto out;
964
965 err = bpf_prog_test_run_opts(main_fd, &topts);
966 ASSERT_OK(err, "entry prog test run");
967 ASSERT_EQ(topts.retval, 0, "tailcall retval");
968
969 data_fd = bpf_map__fd(obj->maps.bss);
970 if (!ASSERT_GE(data_fd, 0, "bss map fd"))
971 goto out;
972
973 i = 0;
974 err = bpf_map_lookup_elem(data_fd, &i, &val);
975 ASSERT_OK(err, "bss map lookup");
976 ASSERT_EQ(val, 1, "done flag is set");
977
978out:
979 tailcall_bpf2bpf6__destroy(obj);
980}
981
982/* test_tailcall_bpf2bpf_fentry checks that the count value of the tail call
983 * limit enforcement matches with expectations when tailcall is preceded with
984 * bpf2bpf call, and the bpf2bpf call is traced by fentry.
985 */
986static void test_tailcall_bpf2bpf_fentry(void)
987{
988 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, false);
989}
990
991/* test_tailcall_bpf2bpf_fexit checks that the count value of the tail call
992 * limit enforcement matches with expectations when tailcall is preceded with
993 * bpf2bpf call, and the bpf2bpf call is traced by fexit.
994 */
995static void test_tailcall_bpf2bpf_fexit(void)
996{
997 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", false, true);
998}
999
1000/* test_tailcall_bpf2bpf_fentry_fexit checks that the count value of the tail
1001 * call limit enforcement matches with expectations when tailcall is preceded
1002 * with bpf2bpf call, and the bpf2bpf call is traced by both fentry and fexit.
1003 */
1004static void test_tailcall_bpf2bpf_fentry_fexit(void)
1005{
1006 test_tailcall_count("tailcall_bpf2bpf2.bpf.o", true, true);
1007}
1008
1009/* test_tailcall_bpf2bpf_fentry_entry checks that the count value of the tail
1010 * call limit enforcement matches with expectations when tailcall is preceded
1011 * with bpf2bpf call, and the bpf2bpf caller is traced by fentry.
1012 */
1013static void test_tailcall_bpf2bpf_fentry_entry(void)
1014{
1015 struct bpf_object *tgt_obj = NULL, *fentry_obj = NULL;
1016 int err, map_fd, prog_fd, data_fd, i, val;
1017 struct bpf_map *prog_array, *data_map;
1018 struct bpf_link *fentry_link = NULL;
1019 struct bpf_program *prog;
1020 char buff[128] = {};
1021
1022 LIBBPF_OPTS(bpf_test_run_opts, topts,
1023 .data_in = buff,
1024 .data_size_in = sizeof(buff),
1025 .repeat = 1,
1026 );
1027
1028 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o",
1029 BPF_PROG_TYPE_SCHED_CLS,
1030 &tgt_obj, &prog_fd);
1031 if (!ASSERT_OK(err, "load tgt_obj"))
1032 return;
1033
1034 prog_array = bpf_object__find_map_by_name(tgt_obj, "jmp_table");
1035 if (!ASSERT_OK_PTR(prog_array, "find jmp_table map"))
1036 goto out;
1037
1038 map_fd = bpf_map__fd(prog_array);
1039 if (!ASSERT_FALSE(map_fd < 0, "find jmp_table map fd"))
1040 goto out;
1041
1042 prog = bpf_object__find_program_by_name(tgt_obj, "classifier_0");
1043 if (!ASSERT_OK_PTR(prog, "find classifier_0 prog"))
1044 goto out;
1045
1046 prog_fd = bpf_program__fd(prog);
1047 if (!ASSERT_FALSE(prog_fd < 0, "find classifier_0 prog fd"))
1048 goto out;
1049
1050 i = 0;
1051 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1052 if (!ASSERT_OK(err, "update jmp_table"))
1053 goto out;
1054
1055 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1056 NULL);
1057 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1058 goto out;
1059
1060 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1061 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1062 goto out;
1063
1064 err = bpf_program__set_attach_target(prog, prog_fd, "classifier_0");
1065 if (!ASSERT_OK(err, "set_attach_target classifier_0"))
1066 goto out;
1067
1068 err = bpf_object__load(fentry_obj);
1069 if (!ASSERT_OK(err, "load fentry_obj"))
1070 goto out;
1071
1072 fentry_link = bpf_program__attach_trace(prog);
1073 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1074 goto out;
1075
1076 err = bpf_prog_test_run_opts(prog_fd, &topts);
1077 ASSERT_OK(err, "tailcall");
1078 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1079
1080 data_map = bpf_object__find_map_by_name(tgt_obj, "tailcall.bss");
1081 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1082 "find tailcall.bss map"))
1083 goto out;
1084
1085 data_fd = bpf_map__fd(data_map);
1086 if (!ASSERT_FALSE(data_fd < 0, "find tailcall.bss map fd"))
1087 goto out;
1088
1089 i = 0;
1090 err = bpf_map_lookup_elem(data_fd, &i, &val);
1091 ASSERT_OK(err, "tailcall count");
1092 ASSERT_EQ(val, 34, "tailcall count");
1093
1094 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1095 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1096 "find tailcall_bpf2bpf_fentry.bss map"))
1097 goto out;
1098
1099 data_fd = bpf_map__fd(data_map);
1100 if (!ASSERT_FALSE(data_fd < 0,
1101 "find tailcall_bpf2bpf_fentry.bss map fd"))
1102 goto out;
1103
1104 i = 0;
1105 err = bpf_map_lookup_elem(data_fd, &i, &val);
1106 ASSERT_OK(err, "fentry count");
1107 ASSERT_EQ(val, 1, "fentry count");
1108
1109out:
1110 bpf_link__destroy(fentry_link);
1111 bpf_object__close(fentry_obj);
1112 bpf_object__close(tgt_obj);
1113}
1114
1115#define JMP_TABLE "/sys/fs/bpf/jmp_table"
1116
1117static int poke_thread_exit;
1118
1119static void *poke_update(void *arg)
1120{
1121 __u32 zero = 0, prog1_fd, prog2_fd, map_fd;
1122 struct tailcall_poke *call = arg;
1123
1124 map_fd = bpf_map__fd(call->maps.jmp_table);
1125 prog1_fd = bpf_program__fd(call->progs.call1);
1126 prog2_fd = bpf_program__fd(call->progs.call2);
1127
1128 while (!poke_thread_exit) {
1129 bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY);
1130 bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY);
1131 }
1132
1133 return NULL;
1134}
1135
1136/*
1137 * We are trying to hit prog array update during another program load
1138 * that shares the same prog array map.
1139 *
1140 * For that we share the jmp_table map between two skeleton instances
1141 * by pinning the jmp_table to same path. Then first skeleton instance
1142 * periodically updates jmp_table in 'poke update' thread while we load
1143 * the second skeleton instance in the main thread.
1144 */
1145static void test_tailcall_poke(void)
1146{
1147 struct tailcall_poke *call, *test;
1148 int err, cnt = 10;
1149 pthread_t thread;
1150
1151 unlink(JMP_TABLE);
1152
1153 call = tailcall_poke__open_and_load();
1154 if (!ASSERT_OK_PTR(call, "tailcall_poke__open"))
1155 return;
1156
1157 err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE);
1158 if (!ASSERT_OK(err, "bpf_map__pin"))
1159 goto out;
1160
1161 err = pthread_create(&thread, NULL, poke_update, call);
1162 if (!ASSERT_OK(err, "new toggler"))
1163 goto out;
1164
1165 while (cnt--) {
1166 test = tailcall_poke__open();
1167 if (!ASSERT_OK_PTR(test, "tailcall_poke__open"))
1168 break;
1169
1170 err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE);
1171 if (!ASSERT_OK(err, "bpf_map__pin")) {
1172 tailcall_poke__destroy(test);
1173 break;
1174 }
1175
1176 bpf_program__set_autoload(test->progs.test, true);
1177 bpf_program__set_autoload(test->progs.call1, false);
1178 bpf_program__set_autoload(test->progs.call2, false);
1179
1180 err = tailcall_poke__load(test);
1181 tailcall_poke__destroy(test);
1182 if (!ASSERT_OK(err, "tailcall_poke__load"))
1183 break;
1184 }
1185
1186 poke_thread_exit = 1;
1187 ASSERT_OK(pthread_join(thread, NULL), "pthread_join");
1188
1189out:
1190 bpf_map__unpin(call->maps.jmp_table, JMP_TABLE);
1191 tailcall_poke__destroy(call);
1192}
1193
1194static void test_tailcall_hierarchy_count(const char *which, bool test_fentry,
1195 bool test_fexit,
1196 bool test_fentry_entry)
1197{
1198 int err, map_fd, prog_fd, main_data_fd, fentry_data_fd, fexit_data_fd, i, val;
1199 struct bpf_object *obj = NULL, *fentry_obj = NULL, *fexit_obj = NULL;
1200 struct bpf_link *fentry_link = NULL, *fexit_link = NULL;
1201 struct bpf_program *prog, *fentry_prog;
1202 struct bpf_map *prog_array, *data_map;
1203 int fentry_prog_fd;
1204 char buff[128] = {};
1205
1206 LIBBPF_OPTS(bpf_test_run_opts, topts,
1207 .data_in = buff,
1208 .data_size_in = sizeof(buff),
1209 .repeat = 1,
1210 );
1211
1212 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
1213 &prog_fd);
1214 if (!ASSERT_OK(err, "load obj"))
1215 return;
1216
1217 prog = bpf_object__find_program_by_name(obj, "entry");
1218 if (!ASSERT_OK_PTR(prog, "find entry prog"))
1219 goto out;
1220
1221 prog_fd = bpf_program__fd(prog);
1222 if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
1223 goto out;
1224
1225 if (test_fentry_entry) {
1226 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_hierarchy_fentry.bpf.o",
1227 NULL);
1228 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1229 goto out;
1230
1231 fentry_prog = bpf_object__find_program_by_name(fentry_obj,
1232 "fentry");
1233 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1234 goto out;
1235
1236 err = bpf_program__set_attach_target(fentry_prog, prog_fd,
1237 "entry");
1238 if (!ASSERT_OK(err, "set_attach_target entry"))
1239 goto out;
1240
1241 err = bpf_object__load(fentry_obj);
1242 if (!ASSERT_OK(err, "load fentry_obj"))
1243 goto out;
1244
1245 fentry_link = bpf_program__attach_trace(fentry_prog);
1246 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1247 goto out;
1248
1249 fentry_prog_fd = bpf_program__fd(fentry_prog);
1250 if (!ASSERT_GE(fentry_prog_fd, 0, "fentry_prog_fd"))
1251 goto out;
1252
1253 prog_array = bpf_object__find_map_by_name(fentry_obj, "jmp_table");
1254 if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1255 goto out;
1256
1257 map_fd = bpf_map__fd(prog_array);
1258 if (!ASSERT_GE(map_fd, 0, "map_fd"))
1259 goto out;
1260
1261 i = 0;
1262 err = bpf_map_update_elem(map_fd, &i, &fentry_prog_fd, BPF_ANY);
1263 if (!ASSERT_OK(err, "update jmp_table"))
1264 goto out;
1265
1266 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1267 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1268 "find data_map"))
1269 goto out;
1270
1271 } else {
1272 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
1273 if (!ASSERT_OK_PTR(prog_array, "find jmp_table"))
1274 goto out;
1275
1276 map_fd = bpf_map__fd(prog_array);
1277 if (!ASSERT_GE(map_fd, 0, "map_fd"))
1278 goto out;
1279
1280 i = 0;
1281 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
1282 if (!ASSERT_OK(err, "update jmp_table"))
1283 goto out;
1284
1285 data_map = bpf_object__find_map_by_name(obj, ".bss");
1286 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1287 "find data_map"))
1288 goto out;
1289 }
1290
1291 if (test_fentry) {
1292 fentry_obj = bpf_object__open_file("tailcall_bpf2bpf_fentry.bpf.o",
1293 NULL);
1294 if (!ASSERT_OK_PTR(fentry_obj, "open fentry_obj file"))
1295 goto out;
1296
1297 prog = bpf_object__find_program_by_name(fentry_obj, "fentry");
1298 if (!ASSERT_OK_PTR(prog, "find fentry prog"))
1299 goto out;
1300
1301 err = bpf_program__set_attach_target(prog, prog_fd,
1302 "subprog_tail");
1303 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1304 goto out;
1305
1306 err = bpf_object__load(fentry_obj);
1307 if (!ASSERT_OK(err, "load fentry_obj"))
1308 goto out;
1309
1310 fentry_link = bpf_program__attach_trace(prog);
1311 if (!ASSERT_OK_PTR(fentry_link, "attach_trace"))
1312 goto out;
1313 }
1314
1315 if (test_fexit) {
1316 fexit_obj = bpf_object__open_file("tailcall_bpf2bpf_fexit.bpf.o",
1317 NULL);
1318 if (!ASSERT_OK_PTR(fexit_obj, "open fexit_obj file"))
1319 goto out;
1320
1321 prog = bpf_object__find_program_by_name(fexit_obj, "fexit");
1322 if (!ASSERT_OK_PTR(prog, "find fexit prog"))
1323 goto out;
1324
1325 err = bpf_program__set_attach_target(prog, prog_fd,
1326 "subprog_tail");
1327 if (!ASSERT_OK(err, "set_attach_target subprog_tail"))
1328 goto out;
1329
1330 err = bpf_object__load(fexit_obj);
1331 if (!ASSERT_OK(err, "load fexit_obj"))
1332 goto out;
1333
1334 fexit_link = bpf_program__attach_trace(prog);
1335 if (!ASSERT_OK_PTR(fexit_link, "attach_trace"))
1336 goto out;
1337 }
1338
1339 err = bpf_prog_test_run_opts(prog_fd, &topts);
1340 ASSERT_OK(err, "tailcall");
1341 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1342
1343 main_data_fd = bpf_map__fd(data_map);
1344 if (!ASSERT_GE(main_data_fd, 0, "main_data_fd"))
1345 goto out;
1346
1347 i = 0;
1348 err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1349 ASSERT_OK(err, "tailcall count");
1350 ASSERT_EQ(val, 34, "tailcall count");
1351
1352 if (test_fentry) {
1353 data_map = bpf_object__find_map_by_name(fentry_obj, ".bss");
1354 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1355 "find tailcall_bpf2bpf_fentry.bss map"))
1356 goto out;
1357
1358 fentry_data_fd = bpf_map__fd(data_map);
1359 if (!ASSERT_GE(fentry_data_fd, 0,
1360 "find tailcall_bpf2bpf_fentry.bss map fd"))
1361 goto out;
1362
1363 i = 0;
1364 err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1365 ASSERT_OK(err, "fentry count");
1366 ASSERT_EQ(val, 68, "fentry count");
1367 }
1368
1369 if (test_fexit) {
1370 data_map = bpf_object__find_map_by_name(fexit_obj, ".bss");
1371 if (!ASSERT_FALSE(!data_map || !bpf_map__is_internal(data_map),
1372 "find tailcall_bpf2bpf_fexit.bss map"))
1373 goto out;
1374
1375 fexit_data_fd = bpf_map__fd(data_map);
1376 if (!ASSERT_GE(fexit_data_fd, 0,
1377 "find tailcall_bpf2bpf_fexit.bss map fd"))
1378 goto out;
1379
1380 i = 0;
1381 err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1382 ASSERT_OK(err, "fexit count");
1383 ASSERT_EQ(val, 68, "fexit count");
1384 }
1385
1386 i = 0;
1387 err = bpf_map_delete_elem(map_fd, &i);
1388 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1389 goto out;
1390
1391 err = bpf_prog_test_run_opts(prog_fd, &topts);
1392 ASSERT_OK(err, "tailcall");
1393 ASSERT_EQ(topts.retval, 1, "tailcall retval");
1394
1395 i = 0;
1396 err = bpf_map_lookup_elem(main_data_fd, &i, &val);
1397 ASSERT_OK(err, "tailcall count");
1398 ASSERT_EQ(val, 35, "tailcall count");
1399
1400 if (test_fentry) {
1401 i = 0;
1402 err = bpf_map_lookup_elem(fentry_data_fd, &i, &val);
1403 ASSERT_OK(err, "fentry count");
1404 ASSERT_EQ(val, 70, "fentry count");
1405 }
1406
1407 if (test_fexit) {
1408 i = 0;
1409 err = bpf_map_lookup_elem(fexit_data_fd, &i, &val);
1410 ASSERT_OK(err, "fexit count");
1411 ASSERT_EQ(val, 70, "fexit count");
1412 }
1413
1414out:
1415 bpf_link__destroy(fentry_link);
1416 bpf_link__destroy(fexit_link);
1417 bpf_object__close(fentry_obj);
1418 bpf_object__close(fexit_obj);
1419 bpf_object__close(obj);
1420}
1421
1422/* test_tailcall_bpf2bpf_hierarchy_1 checks that the count value of the tail
1423 * call limit enforcement matches with expectations when tailcalls are preceded
1424 * with two bpf2bpf calls.
1425 *
1426 * subprog --tailcall-> entry
1427 * entry <
1428 * subprog --tailcall-> entry
1429 */
1430static void test_tailcall_bpf2bpf_hierarchy_1(void)
1431{
1432 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1433 false, false, false);
1434}
1435
1436/* test_tailcall_bpf2bpf_hierarchy_fentry checks that the count value of the
1437 * tail call limit enforcement matches with expectations when tailcalls are
1438 * preceded with two bpf2bpf calls, and the two subprogs are traced by fentry.
1439 */
1440static void test_tailcall_bpf2bpf_hierarchy_fentry(void)
1441{
1442 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1443 true, false, false);
1444}
1445
1446/* test_tailcall_bpf2bpf_hierarchy_fexit checks that the count value of the tail
1447 * call limit enforcement matches with expectations when tailcalls are preceded
1448 * with two bpf2bpf calls, and the two subprogs are traced by fexit.
1449 */
1450static void test_tailcall_bpf2bpf_hierarchy_fexit(void)
1451{
1452 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1453 false, true, false);
1454}
1455
1456/* test_tailcall_bpf2bpf_hierarchy_fentry_fexit checks that the count value of
1457 * the tail call limit enforcement matches with expectations when tailcalls are
1458 * preceded with two bpf2bpf calls, and the two subprogs are traced by both
1459 * fentry and fexit.
1460 */
1461static void test_tailcall_bpf2bpf_hierarchy_fentry_fexit(void)
1462{
1463 test_tailcall_hierarchy_count("tailcall_bpf2bpf_hierarchy1.bpf.o",
1464 true, true, false);
1465}
1466
1467/* test_tailcall_bpf2bpf_hierarchy_fentry_entry checks that the count value of
1468 * the tail call limit enforcement matches with expectations when tailcalls are
1469 * preceded with two bpf2bpf calls in fentry.
1470 */
1471static void test_tailcall_bpf2bpf_hierarchy_fentry_entry(void)
1472{
1473 test_tailcall_hierarchy_count("tc_dummy.bpf.o", false, false, true);
1474}
1475
1476/* test_tailcall_bpf2bpf_hierarchy_2 checks that the count value of the tail
1477 * call limit enforcement matches with expectations:
1478 *
1479 * subprog_tail0 --tailcall-> classifier_0 -> subprog_tail0
1480 * entry <
1481 * subprog_tail1 --tailcall-> classifier_1 -> subprog_tail1
1482 */
1483static void test_tailcall_bpf2bpf_hierarchy_2(void)
1484{
1485 RUN_TESTS(tailcall_bpf2bpf_hierarchy2);
1486}
1487
1488/* test_tailcall_bpf2bpf_hierarchy_3 checks that the count value of the tail
1489 * call limit enforcement matches with expectations:
1490 *
1491 * subprog with jmp_table0 to classifier_0
1492 * entry --tailcall-> classifier_0 <
1493 * subprog with jmp_table1 to classifier_0
1494 */
1495static void test_tailcall_bpf2bpf_hierarchy_3(void)
1496{
1497 RUN_TESTS(tailcall_bpf2bpf_hierarchy3);
1498}
1499
1500/* test_tailcall_freplace checks that the freplace prog fails to update the
1501 * prog_array map, no matter whether the freplace prog attaches to its target.
1502 */
1503static void test_tailcall_freplace(void)
1504{
1505 struct tailcall_freplace *freplace_skel = NULL;
1506 struct bpf_link *freplace_link = NULL;
1507 struct bpf_program *freplace_prog;
1508 struct tc_bpf2bpf *tc_skel = NULL;
1509 int prog_fd, tc_prog_fd, map_fd;
1510 char buff[128] = {};
1511 int err, key;
1512
1513 LIBBPF_OPTS(bpf_test_run_opts, topts,
1514 .data_in = buff,
1515 .data_size_in = sizeof(buff),
1516 .repeat = 1,
1517 );
1518
1519 freplace_skel = tailcall_freplace__open();
1520 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1521 return;
1522
1523 tc_skel = tc_bpf2bpf__open_and_load();
1524 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1525 goto out;
1526
1527 tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1528 freplace_prog = freplace_skel->progs.entry_freplace;
1529 err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
1530 "subprog_tc");
1531 if (!ASSERT_OK(err, "set_attach_target"))
1532 goto out;
1533
1534 err = tailcall_freplace__load(freplace_skel);
1535 if (!ASSERT_OK(err, "tailcall_freplace__load"))
1536 goto out;
1537
1538 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1539 prog_fd = bpf_program__fd(freplace_prog);
1540 key = 0;
1541 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1542 ASSERT_ERR(err, "update jmp_table failure");
1543
1544 freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
1545 "subprog_tc");
1546 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1547 goto out;
1548
1549 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1550 ASSERT_ERR(err, "update jmp_table failure");
1551
1552out:
1553 bpf_link__destroy(freplace_link);
1554 tailcall_freplace__destroy(freplace_skel);
1555 tc_bpf2bpf__destroy(tc_skel);
1556}
1557
1558/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
1559 * callee prog with freplace prog or fails to update an extended prog to
1560 * prog_array map.
1561 */
1562static void test_tailcall_bpf2bpf_freplace(void)
1563{
1564 struct tailcall_freplace *freplace_skel = NULL;
1565 struct bpf_link *freplace_link = NULL;
1566 struct tc_bpf2bpf *tc_skel = NULL;
1567 char buff[128] = {};
1568 int prog_fd, map_fd;
1569 int err, key;
1570
1571 LIBBPF_OPTS(bpf_test_run_opts, topts,
1572 .data_in = buff,
1573 .data_size_in = sizeof(buff),
1574 .repeat = 1,
1575 );
1576
1577 tc_skel = tc_bpf2bpf__open_and_load();
1578 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
1579 goto out;
1580
1581 prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
1582 freplace_skel = tailcall_freplace__open();
1583 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
1584 goto out;
1585
1586 err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
1587 prog_fd, "subprog_tc");
1588 if (!ASSERT_OK(err, "set_attach_target"))
1589 goto out;
1590
1591 err = tailcall_freplace__load(freplace_skel);
1592 if (!ASSERT_OK(err, "tailcall_freplace__load"))
1593 goto out;
1594
1595 /* OK to attach then detach freplace prog. */
1596
1597 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1598 prog_fd, "subprog_tc");
1599 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1600 goto out;
1601
1602 err = bpf_link__destroy(freplace_link);
1603 if (!ASSERT_OK(err, "destroy link"))
1604 goto out;
1605
1606 /* OK to update prog_array map then delete element from the map. */
1607
1608 key = 0;
1609 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
1610 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1611 if (!ASSERT_OK(err, "update jmp_table"))
1612 goto out;
1613
1614 err = bpf_map_delete_elem(map_fd, &key);
1615 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1616 goto out;
1617
1618 /* Fail to attach a tail callee prog with freplace prog. */
1619
1620 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1621 if (!ASSERT_OK(err, "update jmp_table"))
1622 goto out;
1623
1624 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1625 prog_fd, "subprog_tc");
1626 if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
1627 goto out;
1628
1629 err = bpf_map_delete_elem(map_fd, &key);
1630 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1631 goto out;
1632
1633 /* Fail to update an extended prog to prog_array map. */
1634
1635 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1636 prog_fd, "subprog_tc");
1637 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1638 goto out;
1639
1640 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1641 if (!ASSERT_ERR(err, "update jmp_table failure"))
1642 goto out;
1643
1644out:
1645 bpf_link__destroy(freplace_link);
1646 tailcall_freplace__destroy(freplace_skel);
1647 tc_bpf2bpf__destroy(tc_skel);
1648}
1649
1650static void test_tailcall_failure()
1651{
1652 RUN_TESTS(tailcall_fail);
1653}
1654
1655void test_tailcalls(void)
1656{
1657 if (test__start_subtest("tailcall_1"))
1658 test_tailcall_1();
1659 if (test__start_subtest("tailcall_2"))
1660 test_tailcall_2();
1661 if (test__start_subtest("tailcall_3"))
1662 test_tailcall_3();
1663 if (test__start_subtest("tailcall_4"))
1664 test_tailcall_4();
1665 if (test__start_subtest("tailcall_5"))
1666 test_tailcall_5();
1667 if (test__start_subtest("tailcall_6"))
1668 test_tailcall_6();
1669 if (test__start_subtest("tailcall_bpf2bpf_1"))
1670 test_tailcall_bpf2bpf_1();
1671 if (test__start_subtest("tailcall_bpf2bpf_2"))
1672 test_tailcall_bpf2bpf_2();
1673 if (test__start_subtest("tailcall_bpf2bpf_3"))
1674 test_tailcall_bpf2bpf_3();
1675 if (test__start_subtest("tailcall_bpf2bpf_4"))
1676 test_tailcall_bpf2bpf_4(false);
1677 if (test__start_subtest("tailcall_bpf2bpf_5"))
1678 test_tailcall_bpf2bpf_4(true);
1679 if (test__start_subtest("tailcall_bpf2bpf_6"))
1680 test_tailcall_bpf2bpf_6();
1681 if (test__start_subtest("tailcall_bpf2bpf_fentry"))
1682 test_tailcall_bpf2bpf_fentry();
1683 if (test__start_subtest("tailcall_bpf2bpf_fexit"))
1684 test_tailcall_bpf2bpf_fexit();
1685 if (test__start_subtest("tailcall_bpf2bpf_fentry_fexit"))
1686 test_tailcall_bpf2bpf_fentry_fexit();
1687 if (test__start_subtest("tailcall_bpf2bpf_fentry_entry"))
1688 test_tailcall_bpf2bpf_fentry_entry();
1689 if (test__start_subtest("tailcall_poke"))
1690 test_tailcall_poke();
1691 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_1"))
1692 test_tailcall_bpf2bpf_hierarchy_1();
1693 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry"))
1694 test_tailcall_bpf2bpf_hierarchy_fentry();
1695 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fexit"))
1696 test_tailcall_bpf2bpf_hierarchy_fexit();
1697 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_fexit"))
1698 test_tailcall_bpf2bpf_hierarchy_fentry_fexit();
1699 if (test__start_subtest("tailcall_bpf2bpf_hierarchy_fentry_entry"))
1700 test_tailcall_bpf2bpf_hierarchy_fentry_entry();
1701 test_tailcall_bpf2bpf_hierarchy_2();
1702 test_tailcall_bpf2bpf_hierarchy_3();
1703 if (test__start_subtest("tailcall_freplace"))
1704 test_tailcall_freplace();
1705 if (test__start_subtest("tailcall_bpf2bpf_freplace"))
1706 test_tailcall_bpf2bpf_freplace();
1707 if (test__start_subtest("tailcall_failure"))
1708 test_tailcall_failure();
1709}
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <network_helpers.h>
4
5/* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9static void test_tailcall_1(void)
10{
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
15 char prog_name[32];
16 char buff[128] = {};
17 LIBBPF_OPTS(bpf_test_run_opts, topts,
18 .data_in = buff,
19 .data_size_in = sizeof(buff),
20 .repeat = 1,
21 );
22
23 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
24 &prog_fd);
25 if (CHECK_FAIL(err))
26 return;
27
28 prog = bpf_object__find_program_by_name(obj, "entry");
29 if (CHECK_FAIL(!prog))
30 goto out;
31
32 main_fd = bpf_program__fd(prog);
33 if (CHECK_FAIL(main_fd < 0))
34 goto out;
35
36 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 if (CHECK_FAIL(!prog_array))
38 goto out;
39
40 map_fd = bpf_map__fd(prog_array);
41 if (CHECK_FAIL(map_fd < 0))
42 goto out;
43
44 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
46
47 prog = bpf_object__find_program_by_name(obj, prog_name);
48 if (CHECK_FAIL(!prog))
49 goto out;
50
51 prog_fd = bpf_program__fd(prog);
52 if (CHECK_FAIL(prog_fd < 0))
53 goto out;
54
55 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
56 if (CHECK_FAIL(err))
57 goto out;
58 }
59
60 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 err = bpf_prog_test_run_opts(main_fd, &topts);
62 ASSERT_OK(err, "tailcall");
63 ASSERT_EQ(topts.retval, i, "tailcall retval");
64
65 err = bpf_map_delete_elem(map_fd, &i);
66 if (CHECK_FAIL(err))
67 goto out;
68 }
69
70 err = bpf_prog_test_run_opts(main_fd, &topts);
71 ASSERT_OK(err, "tailcall");
72 ASSERT_EQ(topts.retval, 3, "tailcall retval");
73
74 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
76
77 prog = bpf_object__find_program_by_name(obj, prog_name);
78 if (CHECK_FAIL(!prog))
79 goto out;
80
81 prog_fd = bpf_program__fd(prog);
82 if (CHECK_FAIL(prog_fd < 0))
83 goto out;
84
85 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
86 if (CHECK_FAIL(err))
87 goto out;
88 }
89
90 err = bpf_prog_test_run_opts(main_fd, &topts);
91 ASSERT_OK(err, "tailcall");
92 ASSERT_OK(topts.retval, "tailcall retval");
93
94 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 j = bpf_map__max_entries(prog_array) - 1 - i;
96 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
97
98 prog = bpf_object__find_program_by_name(obj, prog_name);
99 if (CHECK_FAIL(!prog))
100 goto out;
101
102 prog_fd = bpf_program__fd(prog);
103 if (CHECK_FAIL(prog_fd < 0))
104 goto out;
105
106 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107 if (CHECK_FAIL(err))
108 goto out;
109 }
110
111 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 j = bpf_map__max_entries(prog_array) - 1 - i;
113
114 err = bpf_prog_test_run_opts(main_fd, &topts);
115 ASSERT_OK(err, "tailcall");
116 ASSERT_EQ(topts.retval, j, "tailcall retval");
117
118 err = bpf_map_delete_elem(map_fd, &i);
119 if (CHECK_FAIL(err))
120 goto out;
121 }
122
123 err = bpf_prog_test_run_opts(main_fd, &topts);
124 ASSERT_OK(err, "tailcall");
125 ASSERT_EQ(topts.retval, 3, "tailcall retval");
126
127 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 goto out;
131
132 err = bpf_prog_test_run_opts(main_fd, &topts);
133 ASSERT_OK(err, "tailcall");
134 ASSERT_EQ(topts.retval, 3, "tailcall retval");
135 }
136
137out:
138 bpf_object__close(obj);
139}
140
141/* test_tailcall_2 checks that patching multiple programs for a single
142 * tail call slot works. It also jumps through several programs and tests
143 * the tail call limit counter.
144 */
145static void test_tailcall_2(void)
146{
147 int err, map_fd, prog_fd, main_fd, i;
148 struct bpf_map *prog_array;
149 struct bpf_program *prog;
150 struct bpf_object *obj;
151 char prog_name[32];
152 char buff[128] = {};
153 LIBBPF_OPTS(bpf_test_run_opts, topts,
154 .data_in = buff,
155 .data_size_in = sizeof(buff),
156 .repeat = 1,
157 );
158
159 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160 &prog_fd);
161 if (CHECK_FAIL(err))
162 return;
163
164 prog = bpf_object__find_program_by_name(obj, "entry");
165 if (CHECK_FAIL(!prog))
166 goto out;
167
168 main_fd = bpf_program__fd(prog);
169 if (CHECK_FAIL(main_fd < 0))
170 goto out;
171
172 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 if (CHECK_FAIL(!prog_array))
174 goto out;
175
176 map_fd = bpf_map__fd(prog_array);
177 if (CHECK_FAIL(map_fd < 0))
178 goto out;
179
180 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182
183 prog = bpf_object__find_program_by_name(obj, prog_name);
184 if (CHECK_FAIL(!prog))
185 goto out;
186
187 prog_fd = bpf_program__fd(prog);
188 if (CHECK_FAIL(prog_fd < 0))
189 goto out;
190
191 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192 if (CHECK_FAIL(err))
193 goto out;
194 }
195
196 err = bpf_prog_test_run_opts(main_fd, &topts);
197 ASSERT_OK(err, "tailcall");
198 ASSERT_EQ(topts.retval, 2, "tailcall retval");
199
200 i = 2;
201 err = bpf_map_delete_elem(map_fd, &i);
202 if (CHECK_FAIL(err))
203 goto out;
204
205 err = bpf_prog_test_run_opts(main_fd, &topts);
206 ASSERT_OK(err, "tailcall");
207 ASSERT_EQ(topts.retval, 1, "tailcall retval");
208
209 i = 0;
210 err = bpf_map_delete_elem(map_fd, &i);
211 if (CHECK_FAIL(err))
212 goto out;
213
214 err = bpf_prog_test_run_opts(main_fd, &topts);
215 ASSERT_OK(err, "tailcall");
216 ASSERT_EQ(topts.retval, 3, "tailcall retval");
217out:
218 bpf_object__close(obj);
219}
220
221static void test_tailcall_count(const char *which)
222{
223 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224 struct bpf_map *prog_array, *data_map;
225 struct bpf_program *prog;
226 struct bpf_object *obj;
227 char buff[128] = {};
228 LIBBPF_OPTS(bpf_test_run_opts, topts,
229 .data_in = buff,
230 .data_size_in = sizeof(buff),
231 .repeat = 1,
232 );
233
234 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
235 &prog_fd);
236 if (CHECK_FAIL(err))
237 return;
238
239 prog = bpf_object__find_program_by_name(obj, "entry");
240 if (CHECK_FAIL(!prog))
241 goto out;
242
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
245 goto out;
246
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
249 goto out;
250
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
253 goto out;
254
255 prog = bpf_object__find_program_by_name(obj, "classifier_0");
256 if (CHECK_FAIL(!prog))
257 goto out;
258
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
261 goto out;
262
263 i = 0;
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 if (CHECK_FAIL(err))
266 goto out;
267
268 err = bpf_prog_test_run_opts(main_fd, &topts);
269 ASSERT_OK(err, "tailcall");
270 ASSERT_EQ(topts.retval, 1, "tailcall retval");
271
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274 return;
275
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
278 return;
279
280 i = 0;
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 ASSERT_OK(err, "tailcall count");
283 ASSERT_EQ(val, 33, "tailcall count");
284
285 i = 0;
286 err = bpf_map_delete_elem(map_fd, &i);
287 if (CHECK_FAIL(err))
288 goto out;
289
290 err = bpf_prog_test_run_opts(main_fd, &topts);
291 ASSERT_OK(err, "tailcall");
292 ASSERT_OK(topts.retval, "tailcall retval");
293out:
294 bpf_object__close(obj);
295}
296
297/* test_tailcall_3 checks that the count value of the tail call limit
298 * enforcement matches with expectations. JIT uses direct jump.
299 */
300static void test_tailcall_3(void)
301{
302 test_tailcall_count("tailcall3.bpf.o");
303}
304
305/* test_tailcall_6 checks that the count value of the tail call limit
306 * enforcement matches with expectations. JIT uses indirect jump.
307 */
308static void test_tailcall_6(void)
309{
310 test_tailcall_count("tailcall6.bpf.o");
311}
312
313/* test_tailcall_4 checks that the kernel properly selects indirect jump
314 * for the case where the key is not known. Latter is passed via global
315 * data to select different targets we can compare return value of.
316 */
317static void test_tailcall_4(void)
318{
319 int err, map_fd, prog_fd, main_fd, data_fd, i;
320 struct bpf_map *prog_array, *data_map;
321 struct bpf_program *prog;
322 struct bpf_object *obj;
323 static const int zero = 0;
324 char buff[128] = {};
325 char prog_name[32];
326 LIBBPF_OPTS(bpf_test_run_opts, topts,
327 .data_in = buff,
328 .data_size_in = sizeof(buff),
329 .repeat = 1,
330 );
331
332 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
333 &prog_fd);
334 if (CHECK_FAIL(err))
335 return;
336
337 prog = bpf_object__find_program_by_name(obj, "entry");
338 if (CHECK_FAIL(!prog))
339 goto out;
340
341 main_fd = bpf_program__fd(prog);
342 if (CHECK_FAIL(main_fd < 0))
343 goto out;
344
345 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346 if (CHECK_FAIL(!prog_array))
347 goto out;
348
349 map_fd = bpf_map__fd(prog_array);
350 if (CHECK_FAIL(map_fd < 0))
351 goto out;
352
353 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
355 return;
356
357 data_fd = bpf_map__fd(data_map);
358 if (CHECK_FAIL(map_fd < 0))
359 return;
360
361 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
363
364 prog = bpf_object__find_program_by_name(obj, prog_name);
365 if (CHECK_FAIL(!prog))
366 goto out;
367
368 prog_fd = bpf_program__fd(prog);
369 if (CHECK_FAIL(prog_fd < 0))
370 goto out;
371
372 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
373 if (CHECK_FAIL(err))
374 goto out;
375 }
376
377 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
379 if (CHECK_FAIL(err))
380 goto out;
381
382 err = bpf_prog_test_run_opts(main_fd, &topts);
383 ASSERT_OK(err, "tailcall");
384 ASSERT_EQ(topts.retval, i, "tailcall retval");
385 }
386
387 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
389 if (CHECK_FAIL(err))
390 goto out;
391
392 err = bpf_map_delete_elem(map_fd, &i);
393 if (CHECK_FAIL(err))
394 goto out;
395
396 err = bpf_prog_test_run_opts(main_fd, &topts);
397 ASSERT_OK(err, "tailcall");
398 ASSERT_EQ(topts.retval, 3, "tailcall retval");
399 }
400out:
401 bpf_object__close(obj);
402}
403
404/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405 * an indirect jump when the keys are const but different from different branches.
406 */
407static void test_tailcall_5(void)
408{
409 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410 struct bpf_map *prog_array, *data_map;
411 struct bpf_program *prog;
412 struct bpf_object *obj;
413 static const int zero = 0;
414 char buff[128] = {};
415 char prog_name[32];
416 LIBBPF_OPTS(bpf_test_run_opts, topts,
417 .data_in = buff,
418 .data_size_in = sizeof(buff),
419 .repeat = 1,
420 );
421
422 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
423 &prog_fd);
424 if (CHECK_FAIL(err))
425 return;
426
427 prog = bpf_object__find_program_by_name(obj, "entry");
428 if (CHECK_FAIL(!prog))
429 goto out;
430
431 main_fd = bpf_program__fd(prog);
432 if (CHECK_FAIL(main_fd < 0))
433 goto out;
434
435 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436 if (CHECK_FAIL(!prog_array))
437 goto out;
438
439 map_fd = bpf_map__fd(prog_array);
440 if (CHECK_FAIL(map_fd < 0))
441 goto out;
442
443 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
445 return;
446
447 data_fd = bpf_map__fd(data_map);
448 if (CHECK_FAIL(map_fd < 0))
449 return;
450
451 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
453
454 prog = bpf_object__find_program_by_name(obj, prog_name);
455 if (CHECK_FAIL(!prog))
456 goto out;
457
458 prog_fd = bpf_program__fd(prog);
459 if (CHECK_FAIL(prog_fd < 0))
460 goto out;
461
462 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
463 if (CHECK_FAIL(err))
464 goto out;
465 }
466
467 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
469 if (CHECK_FAIL(err))
470 goto out;
471
472 err = bpf_prog_test_run_opts(main_fd, &topts);
473 ASSERT_OK(err, "tailcall");
474 ASSERT_EQ(topts.retval, i, "tailcall retval");
475 }
476
477 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
479 if (CHECK_FAIL(err))
480 goto out;
481
482 err = bpf_map_delete_elem(map_fd, &i);
483 if (CHECK_FAIL(err))
484 goto out;
485
486 err = bpf_prog_test_run_opts(main_fd, &topts);
487 ASSERT_OK(err, "tailcall");
488 ASSERT_EQ(topts.retval, 3, "tailcall retval");
489 }
490out:
491 bpf_object__close(obj);
492}
493
494/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495 * correctly in correlation with BPF subprograms
496 */
497static void test_tailcall_bpf2bpf_1(void)
498{
499 int err, map_fd, prog_fd, main_fd, i;
500 struct bpf_map *prog_array;
501 struct bpf_program *prog;
502 struct bpf_object *obj;
503 char prog_name[32];
504 LIBBPF_OPTS(bpf_test_run_opts, topts,
505 .data_in = &pkt_v4,
506 .data_size_in = sizeof(pkt_v4),
507 .repeat = 1,
508 );
509
510 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
511 &obj, &prog_fd);
512 if (CHECK_FAIL(err))
513 return;
514
515 prog = bpf_object__find_program_by_name(obj, "entry");
516 if (CHECK_FAIL(!prog))
517 goto out;
518
519 main_fd = bpf_program__fd(prog);
520 if (CHECK_FAIL(main_fd < 0))
521 goto out;
522
523 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 if (CHECK_FAIL(!prog_array))
525 goto out;
526
527 map_fd = bpf_map__fd(prog_array);
528 if (CHECK_FAIL(map_fd < 0))
529 goto out;
530
531 /* nop -> jmp */
532 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
534
535 prog = bpf_object__find_program_by_name(obj, prog_name);
536 if (CHECK_FAIL(!prog))
537 goto out;
538
539 prog_fd = bpf_program__fd(prog);
540 if (CHECK_FAIL(prog_fd < 0))
541 goto out;
542
543 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
544 if (CHECK_FAIL(err))
545 goto out;
546 }
547
548 err = bpf_prog_test_run_opts(main_fd, &topts);
549 ASSERT_OK(err, "tailcall");
550 ASSERT_EQ(topts.retval, 1, "tailcall retval");
551
552 /* jmp -> nop, call subprog that will do tailcall */
553 i = 1;
554 err = bpf_map_delete_elem(map_fd, &i);
555 if (CHECK_FAIL(err))
556 goto out;
557
558 err = bpf_prog_test_run_opts(main_fd, &topts);
559 ASSERT_OK(err, "tailcall");
560 ASSERT_OK(topts.retval, "tailcall retval");
561
562 /* make sure that subprog can access ctx and entry prog that
563 * called this subprog can properly return
564 */
565 i = 0;
566 err = bpf_map_delete_elem(map_fd, &i);
567 if (CHECK_FAIL(err))
568 goto out;
569
570 err = bpf_prog_test_run_opts(main_fd, &topts);
571 ASSERT_OK(err, "tailcall");
572 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
573out:
574 bpf_object__close(obj);
575}
576
577/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578 * enforcement matches with expectations when tailcall is preceded with
579 * bpf2bpf call.
580 */
581static void test_tailcall_bpf2bpf_2(void)
582{
583 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584 struct bpf_map *prog_array, *data_map;
585 struct bpf_program *prog;
586 struct bpf_object *obj;
587 char buff[128] = {};
588 LIBBPF_OPTS(bpf_test_run_opts, topts,
589 .data_in = buff,
590 .data_size_in = sizeof(buff),
591 .repeat = 1,
592 );
593
594 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
595 &obj, &prog_fd);
596 if (CHECK_FAIL(err))
597 return;
598
599 prog = bpf_object__find_program_by_name(obj, "entry");
600 if (CHECK_FAIL(!prog))
601 goto out;
602
603 main_fd = bpf_program__fd(prog);
604 if (CHECK_FAIL(main_fd < 0))
605 goto out;
606
607 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608 if (CHECK_FAIL(!prog_array))
609 goto out;
610
611 map_fd = bpf_map__fd(prog_array);
612 if (CHECK_FAIL(map_fd < 0))
613 goto out;
614
615 prog = bpf_object__find_program_by_name(obj, "classifier_0");
616 if (CHECK_FAIL(!prog))
617 goto out;
618
619 prog_fd = bpf_program__fd(prog);
620 if (CHECK_FAIL(prog_fd < 0))
621 goto out;
622
623 i = 0;
624 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
625 if (CHECK_FAIL(err))
626 goto out;
627
628 err = bpf_prog_test_run_opts(main_fd, &topts);
629 ASSERT_OK(err, "tailcall");
630 ASSERT_EQ(topts.retval, 1, "tailcall retval");
631
632 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
634 return;
635
636 data_fd = bpf_map__fd(data_map);
637 if (CHECK_FAIL(map_fd < 0))
638 return;
639
640 i = 0;
641 err = bpf_map_lookup_elem(data_fd, &i, &val);
642 ASSERT_OK(err, "tailcall count");
643 ASSERT_EQ(val, 33, "tailcall count");
644
645 i = 0;
646 err = bpf_map_delete_elem(map_fd, &i);
647 if (CHECK_FAIL(err))
648 goto out;
649
650 err = bpf_prog_test_run_opts(main_fd, &topts);
651 ASSERT_OK(err, "tailcall");
652 ASSERT_OK(topts.retval, "tailcall retval");
653out:
654 bpf_object__close(obj);
655}
656
657/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658 * 256 bytes) can be used within bpf subprograms that have the tailcalls
659 * in them
660 */
661static void test_tailcall_bpf2bpf_3(void)
662{
663 int err, map_fd, prog_fd, main_fd, i;
664 struct bpf_map *prog_array;
665 struct bpf_program *prog;
666 struct bpf_object *obj;
667 char prog_name[32];
668 LIBBPF_OPTS(bpf_test_run_opts, topts,
669 .data_in = &pkt_v4,
670 .data_size_in = sizeof(pkt_v4),
671 .repeat = 1,
672 );
673
674 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
675 &obj, &prog_fd);
676 if (CHECK_FAIL(err))
677 return;
678
679 prog = bpf_object__find_program_by_name(obj, "entry");
680 if (CHECK_FAIL(!prog))
681 goto out;
682
683 main_fd = bpf_program__fd(prog);
684 if (CHECK_FAIL(main_fd < 0))
685 goto out;
686
687 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688 if (CHECK_FAIL(!prog_array))
689 goto out;
690
691 map_fd = bpf_map__fd(prog_array);
692 if (CHECK_FAIL(map_fd < 0))
693 goto out;
694
695 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
697
698 prog = bpf_object__find_program_by_name(obj, prog_name);
699 if (CHECK_FAIL(!prog))
700 goto out;
701
702 prog_fd = bpf_program__fd(prog);
703 if (CHECK_FAIL(prog_fd < 0))
704 goto out;
705
706 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
707 if (CHECK_FAIL(err))
708 goto out;
709 }
710
711 err = bpf_prog_test_run_opts(main_fd, &topts);
712 ASSERT_OK(err, "tailcall");
713 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
714
715 i = 1;
716 err = bpf_map_delete_elem(map_fd, &i);
717 if (CHECK_FAIL(err))
718 goto out;
719
720 err = bpf_prog_test_run_opts(main_fd, &topts);
721 ASSERT_OK(err, "tailcall");
722 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
723
724 i = 0;
725 err = bpf_map_delete_elem(map_fd, &i);
726 if (CHECK_FAIL(err))
727 goto out;
728
729 err = bpf_prog_test_run_opts(main_fd, &topts);
730 ASSERT_OK(err, "tailcall");
731 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
732out:
733 bpf_object__close(obj);
734}
735
736#include "tailcall_bpf2bpf4.skel.h"
737
738/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740 * counter behaves correctly, bpf program will go through following flow:
741 *
742 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744 * subprog2 [here bump global counter] --------^
745 *
746 * We go through first two tailcalls and start counting from the subprog2 where
747 * the loop begins. At the end of the test make sure that the global counter is
748 * equal to 31, because tailcall counter includes the first two tailcalls
749 * whereas global counter is incremented only on loop presented on flow above.
750 *
751 * The noise parameter is used to insert bpf_map_update calls into the logic
752 * to force verifier to patch instructions. This allows us to ensure jump
753 * logic remains correct with instruction movement.
754 */
755static void test_tailcall_bpf2bpf_4(bool noise)
756{
757 int err, map_fd, prog_fd, main_fd, data_fd, i;
758 struct tailcall_bpf2bpf4__bss val;
759 struct bpf_map *prog_array, *data_map;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
762 char prog_name[32];
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
764 .data_in = &pkt_v4,
765 .data_size_in = sizeof(pkt_v4),
766 .repeat = 1,
767 );
768
769 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 &obj, &prog_fd);
771 if (CHECK_FAIL(err))
772 return;
773
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
776 goto out;
777
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
780 goto out;
781
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
784 goto out;
785
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
788 goto out;
789
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
795 goto out;
796
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
799 goto out;
800
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 if (CHECK_FAIL(err))
803 goto out;
804 }
805
806 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
808 return;
809
810 data_fd = bpf_map__fd(data_map);
811 if (CHECK_FAIL(map_fd < 0))
812 return;
813
814 i = 0;
815 val.noise = noise;
816 val.count = 0;
817 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
818 if (CHECK_FAIL(err))
819 goto out;
820
821 err = bpf_prog_test_run_opts(main_fd, &topts);
822 ASSERT_OK(err, "tailcall");
823 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
824
825 i = 0;
826 err = bpf_map_lookup_elem(data_fd, &i, &val);
827 ASSERT_OK(err, "tailcall count");
828 ASSERT_EQ(val.count, 31, "tailcall count");
829
830out:
831 bpf_object__close(obj);
832}
833
834#include "tailcall_bpf2bpf6.skel.h"
835
836/* Tail call counting works even when there is data on stack which is
837 * not aligned to 8 bytes.
838 */
839static void test_tailcall_bpf2bpf_6(void)
840{
841 struct tailcall_bpf2bpf6 *obj;
842 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843 LIBBPF_OPTS(bpf_test_run_opts, topts,
844 .data_in = &pkt_v4,
845 .data_size_in = sizeof(pkt_v4),
846 .repeat = 1,
847 );
848
849 obj = tailcall_bpf2bpf6__open_and_load();
850 if (!ASSERT_OK_PTR(obj, "open and load"))
851 return;
852
853 main_fd = bpf_program__fd(obj->progs.entry);
854 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
855 goto out;
856
857 map_fd = bpf_map__fd(obj->maps.jmp_table);
858 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
859 goto out;
860
861 prog_fd = bpf_program__fd(obj->progs.classifier_0);
862 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
863 goto out;
864
865 i = 0;
866 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867 if (!ASSERT_OK(err, "jmp_table map update"))
868 goto out;
869
870 err = bpf_prog_test_run_opts(main_fd, &topts);
871 ASSERT_OK(err, "entry prog test run");
872 ASSERT_EQ(topts.retval, 0, "tailcall retval");
873
874 data_fd = bpf_map__fd(obj->maps.bss);
875 if (!ASSERT_GE(map_fd, 0, "bss map fd"))
876 goto out;
877
878 i = 0;
879 err = bpf_map_lookup_elem(data_fd, &i, &val);
880 ASSERT_OK(err, "bss map lookup");
881 ASSERT_EQ(val, 1, "done flag is set");
882
883out:
884 tailcall_bpf2bpf6__destroy(obj);
885}
886
887void test_tailcalls(void)
888{
889 if (test__start_subtest("tailcall_1"))
890 test_tailcall_1();
891 if (test__start_subtest("tailcall_2"))
892 test_tailcall_2();
893 if (test__start_subtest("tailcall_3"))
894 test_tailcall_3();
895 if (test__start_subtest("tailcall_4"))
896 test_tailcall_4();
897 if (test__start_subtest("tailcall_5"))
898 test_tailcall_5();
899 if (test__start_subtest("tailcall_6"))
900 test_tailcall_6();
901 if (test__start_subtest("tailcall_bpf2bpf_1"))
902 test_tailcall_bpf2bpf_1();
903 if (test__start_subtest("tailcall_bpf2bpf_2"))
904 test_tailcall_bpf2bpf_2();
905 if (test__start_subtest("tailcall_bpf2bpf_3"))
906 test_tailcall_bpf2bpf_3();
907 if (test__start_subtest("tailcall_bpf2bpf_4"))
908 test_tailcall_bpf2bpf_4(false);
909 if (test__start_subtest("tailcall_bpf2bpf_5"))
910 test_tailcall_bpf2bpf_4(true);
911 if (test__start_subtest("tailcall_bpf2bpf_6"))
912 test_tailcall_bpf2bpf_6();
913}