Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <network_helpers.h>
4
5/* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9static void test_tailcall_1(void)
10{
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
15 char prog_name[32];
16 char buff[128] = {};
17 LIBBPF_OPTS(bpf_test_run_opts, topts,
18 .data_in = buff,
19 .data_size_in = sizeof(buff),
20 .repeat = 1,
21 );
22
23 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
24 &prog_fd);
25 if (CHECK_FAIL(err))
26 return;
27
28 prog = bpf_object__find_program_by_name(obj, "entry");
29 if (CHECK_FAIL(!prog))
30 goto out;
31
32 main_fd = bpf_program__fd(prog);
33 if (CHECK_FAIL(main_fd < 0))
34 goto out;
35
36 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 if (CHECK_FAIL(!prog_array))
38 goto out;
39
40 map_fd = bpf_map__fd(prog_array);
41 if (CHECK_FAIL(map_fd < 0))
42 goto out;
43
44 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
46
47 prog = bpf_object__find_program_by_name(obj, prog_name);
48 if (CHECK_FAIL(!prog))
49 goto out;
50
51 prog_fd = bpf_program__fd(prog);
52 if (CHECK_FAIL(prog_fd < 0))
53 goto out;
54
55 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
56 if (CHECK_FAIL(err))
57 goto out;
58 }
59
60 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 err = bpf_prog_test_run_opts(main_fd, &topts);
62 ASSERT_OK(err, "tailcall");
63 ASSERT_EQ(topts.retval, i, "tailcall retval");
64
65 err = bpf_map_delete_elem(map_fd, &i);
66 if (CHECK_FAIL(err))
67 goto out;
68 }
69
70 err = bpf_prog_test_run_opts(main_fd, &topts);
71 ASSERT_OK(err, "tailcall");
72 ASSERT_EQ(topts.retval, 3, "tailcall retval");
73
74 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
76
77 prog = bpf_object__find_program_by_name(obj, prog_name);
78 if (CHECK_FAIL(!prog))
79 goto out;
80
81 prog_fd = bpf_program__fd(prog);
82 if (CHECK_FAIL(prog_fd < 0))
83 goto out;
84
85 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
86 if (CHECK_FAIL(err))
87 goto out;
88 }
89
90 err = bpf_prog_test_run_opts(main_fd, &topts);
91 ASSERT_OK(err, "tailcall");
92 ASSERT_OK(topts.retval, "tailcall retval");
93
94 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 j = bpf_map__max_entries(prog_array) - 1 - i;
96 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
97
98 prog = bpf_object__find_program_by_name(obj, prog_name);
99 if (CHECK_FAIL(!prog))
100 goto out;
101
102 prog_fd = bpf_program__fd(prog);
103 if (CHECK_FAIL(prog_fd < 0))
104 goto out;
105
106 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107 if (CHECK_FAIL(err))
108 goto out;
109 }
110
111 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 j = bpf_map__max_entries(prog_array) - 1 - i;
113
114 err = bpf_prog_test_run_opts(main_fd, &topts);
115 ASSERT_OK(err, "tailcall");
116 ASSERT_EQ(topts.retval, j, "tailcall retval");
117
118 err = bpf_map_delete_elem(map_fd, &i);
119 if (CHECK_FAIL(err))
120 goto out;
121 }
122
123 err = bpf_prog_test_run_opts(main_fd, &topts);
124 ASSERT_OK(err, "tailcall");
125 ASSERT_EQ(topts.retval, 3, "tailcall retval");
126
127 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 goto out;
131
132 err = bpf_prog_test_run_opts(main_fd, &topts);
133 ASSERT_OK(err, "tailcall");
134 ASSERT_EQ(topts.retval, 3, "tailcall retval");
135 }
136
137out:
138 bpf_object__close(obj);
139}
140
141/* test_tailcall_2 checks that patching multiple programs for a single
142 * tail call slot works. It also jumps through several programs and tests
143 * the tail call limit counter.
144 */
145static void test_tailcall_2(void)
146{
147 int err, map_fd, prog_fd, main_fd, i;
148 struct bpf_map *prog_array;
149 struct bpf_program *prog;
150 struct bpf_object *obj;
151 char prog_name[32];
152 char buff[128] = {};
153 LIBBPF_OPTS(bpf_test_run_opts, topts,
154 .data_in = buff,
155 .data_size_in = sizeof(buff),
156 .repeat = 1,
157 );
158
159 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160 &prog_fd);
161 if (CHECK_FAIL(err))
162 return;
163
164 prog = bpf_object__find_program_by_name(obj, "entry");
165 if (CHECK_FAIL(!prog))
166 goto out;
167
168 main_fd = bpf_program__fd(prog);
169 if (CHECK_FAIL(main_fd < 0))
170 goto out;
171
172 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 if (CHECK_FAIL(!prog_array))
174 goto out;
175
176 map_fd = bpf_map__fd(prog_array);
177 if (CHECK_FAIL(map_fd < 0))
178 goto out;
179
180 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182
183 prog = bpf_object__find_program_by_name(obj, prog_name);
184 if (CHECK_FAIL(!prog))
185 goto out;
186
187 prog_fd = bpf_program__fd(prog);
188 if (CHECK_FAIL(prog_fd < 0))
189 goto out;
190
191 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192 if (CHECK_FAIL(err))
193 goto out;
194 }
195
196 err = bpf_prog_test_run_opts(main_fd, &topts);
197 ASSERT_OK(err, "tailcall");
198 ASSERT_EQ(topts.retval, 2, "tailcall retval");
199
200 i = 2;
201 err = bpf_map_delete_elem(map_fd, &i);
202 if (CHECK_FAIL(err))
203 goto out;
204
205 err = bpf_prog_test_run_opts(main_fd, &topts);
206 ASSERT_OK(err, "tailcall");
207 ASSERT_EQ(topts.retval, 1, "tailcall retval");
208
209 i = 0;
210 err = bpf_map_delete_elem(map_fd, &i);
211 if (CHECK_FAIL(err))
212 goto out;
213
214 err = bpf_prog_test_run_opts(main_fd, &topts);
215 ASSERT_OK(err, "tailcall");
216 ASSERT_EQ(topts.retval, 3, "tailcall retval");
217out:
218 bpf_object__close(obj);
219}
220
221static void test_tailcall_count(const char *which)
222{
223 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224 struct bpf_map *prog_array, *data_map;
225 struct bpf_program *prog;
226 struct bpf_object *obj;
227 char buff[128] = {};
228 LIBBPF_OPTS(bpf_test_run_opts, topts,
229 .data_in = buff,
230 .data_size_in = sizeof(buff),
231 .repeat = 1,
232 );
233
234 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
235 &prog_fd);
236 if (CHECK_FAIL(err))
237 return;
238
239 prog = bpf_object__find_program_by_name(obj, "entry");
240 if (CHECK_FAIL(!prog))
241 goto out;
242
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
245 goto out;
246
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
249 goto out;
250
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
253 goto out;
254
255 prog = bpf_object__find_program_by_name(obj, "classifier_0");
256 if (CHECK_FAIL(!prog))
257 goto out;
258
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
261 goto out;
262
263 i = 0;
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 if (CHECK_FAIL(err))
266 goto out;
267
268 err = bpf_prog_test_run_opts(main_fd, &topts);
269 ASSERT_OK(err, "tailcall");
270 ASSERT_EQ(topts.retval, 1, "tailcall retval");
271
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274 return;
275
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
278 return;
279
280 i = 0;
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 ASSERT_OK(err, "tailcall count");
283 ASSERT_EQ(val, 33, "tailcall count");
284
285 i = 0;
286 err = bpf_map_delete_elem(map_fd, &i);
287 if (CHECK_FAIL(err))
288 goto out;
289
290 err = bpf_prog_test_run_opts(main_fd, &topts);
291 ASSERT_OK(err, "tailcall");
292 ASSERT_OK(topts.retval, "tailcall retval");
293out:
294 bpf_object__close(obj);
295}
296
297/* test_tailcall_3 checks that the count value of the tail call limit
298 * enforcement matches with expectations. JIT uses direct jump.
299 */
300static void test_tailcall_3(void)
301{
302 test_tailcall_count("tailcall3.bpf.o");
303}
304
305/* test_tailcall_6 checks that the count value of the tail call limit
306 * enforcement matches with expectations. JIT uses indirect jump.
307 */
308static void test_tailcall_6(void)
309{
310 test_tailcall_count("tailcall6.bpf.o");
311}
312
313/* test_tailcall_4 checks that the kernel properly selects indirect jump
314 * for the case where the key is not known. Latter is passed via global
315 * data to select different targets we can compare return value of.
316 */
317static void test_tailcall_4(void)
318{
319 int err, map_fd, prog_fd, main_fd, data_fd, i;
320 struct bpf_map *prog_array, *data_map;
321 struct bpf_program *prog;
322 struct bpf_object *obj;
323 static const int zero = 0;
324 char buff[128] = {};
325 char prog_name[32];
326 LIBBPF_OPTS(bpf_test_run_opts, topts,
327 .data_in = buff,
328 .data_size_in = sizeof(buff),
329 .repeat = 1,
330 );
331
332 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
333 &prog_fd);
334 if (CHECK_FAIL(err))
335 return;
336
337 prog = bpf_object__find_program_by_name(obj, "entry");
338 if (CHECK_FAIL(!prog))
339 goto out;
340
341 main_fd = bpf_program__fd(prog);
342 if (CHECK_FAIL(main_fd < 0))
343 goto out;
344
345 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346 if (CHECK_FAIL(!prog_array))
347 goto out;
348
349 map_fd = bpf_map__fd(prog_array);
350 if (CHECK_FAIL(map_fd < 0))
351 goto out;
352
353 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
355 return;
356
357 data_fd = bpf_map__fd(data_map);
358 if (CHECK_FAIL(map_fd < 0))
359 return;
360
361 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
363
364 prog = bpf_object__find_program_by_name(obj, prog_name);
365 if (CHECK_FAIL(!prog))
366 goto out;
367
368 prog_fd = bpf_program__fd(prog);
369 if (CHECK_FAIL(prog_fd < 0))
370 goto out;
371
372 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
373 if (CHECK_FAIL(err))
374 goto out;
375 }
376
377 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
379 if (CHECK_FAIL(err))
380 goto out;
381
382 err = bpf_prog_test_run_opts(main_fd, &topts);
383 ASSERT_OK(err, "tailcall");
384 ASSERT_EQ(topts.retval, i, "tailcall retval");
385 }
386
387 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
389 if (CHECK_FAIL(err))
390 goto out;
391
392 err = bpf_map_delete_elem(map_fd, &i);
393 if (CHECK_FAIL(err))
394 goto out;
395
396 err = bpf_prog_test_run_opts(main_fd, &topts);
397 ASSERT_OK(err, "tailcall");
398 ASSERT_EQ(topts.retval, 3, "tailcall retval");
399 }
400out:
401 bpf_object__close(obj);
402}
403
404/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405 * an indirect jump when the keys are const but different from different branches.
406 */
407static void test_tailcall_5(void)
408{
409 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410 struct bpf_map *prog_array, *data_map;
411 struct bpf_program *prog;
412 struct bpf_object *obj;
413 static const int zero = 0;
414 char buff[128] = {};
415 char prog_name[32];
416 LIBBPF_OPTS(bpf_test_run_opts, topts,
417 .data_in = buff,
418 .data_size_in = sizeof(buff),
419 .repeat = 1,
420 );
421
422 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
423 &prog_fd);
424 if (CHECK_FAIL(err))
425 return;
426
427 prog = bpf_object__find_program_by_name(obj, "entry");
428 if (CHECK_FAIL(!prog))
429 goto out;
430
431 main_fd = bpf_program__fd(prog);
432 if (CHECK_FAIL(main_fd < 0))
433 goto out;
434
435 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436 if (CHECK_FAIL(!prog_array))
437 goto out;
438
439 map_fd = bpf_map__fd(prog_array);
440 if (CHECK_FAIL(map_fd < 0))
441 goto out;
442
443 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
445 return;
446
447 data_fd = bpf_map__fd(data_map);
448 if (CHECK_FAIL(map_fd < 0))
449 return;
450
451 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
453
454 prog = bpf_object__find_program_by_name(obj, prog_name);
455 if (CHECK_FAIL(!prog))
456 goto out;
457
458 prog_fd = bpf_program__fd(prog);
459 if (CHECK_FAIL(prog_fd < 0))
460 goto out;
461
462 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
463 if (CHECK_FAIL(err))
464 goto out;
465 }
466
467 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
469 if (CHECK_FAIL(err))
470 goto out;
471
472 err = bpf_prog_test_run_opts(main_fd, &topts);
473 ASSERT_OK(err, "tailcall");
474 ASSERT_EQ(topts.retval, i, "tailcall retval");
475 }
476
477 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
479 if (CHECK_FAIL(err))
480 goto out;
481
482 err = bpf_map_delete_elem(map_fd, &i);
483 if (CHECK_FAIL(err))
484 goto out;
485
486 err = bpf_prog_test_run_opts(main_fd, &topts);
487 ASSERT_OK(err, "tailcall");
488 ASSERT_EQ(topts.retval, 3, "tailcall retval");
489 }
490out:
491 bpf_object__close(obj);
492}
493
494/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495 * correctly in correlation with BPF subprograms
496 */
497static void test_tailcall_bpf2bpf_1(void)
498{
499 int err, map_fd, prog_fd, main_fd, i;
500 struct bpf_map *prog_array;
501 struct bpf_program *prog;
502 struct bpf_object *obj;
503 char prog_name[32];
504 LIBBPF_OPTS(bpf_test_run_opts, topts,
505 .data_in = &pkt_v4,
506 .data_size_in = sizeof(pkt_v4),
507 .repeat = 1,
508 );
509
510 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
511 &obj, &prog_fd);
512 if (CHECK_FAIL(err))
513 return;
514
515 prog = bpf_object__find_program_by_name(obj, "entry");
516 if (CHECK_FAIL(!prog))
517 goto out;
518
519 main_fd = bpf_program__fd(prog);
520 if (CHECK_FAIL(main_fd < 0))
521 goto out;
522
523 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 if (CHECK_FAIL(!prog_array))
525 goto out;
526
527 map_fd = bpf_map__fd(prog_array);
528 if (CHECK_FAIL(map_fd < 0))
529 goto out;
530
531 /* nop -> jmp */
532 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
534
535 prog = bpf_object__find_program_by_name(obj, prog_name);
536 if (CHECK_FAIL(!prog))
537 goto out;
538
539 prog_fd = bpf_program__fd(prog);
540 if (CHECK_FAIL(prog_fd < 0))
541 goto out;
542
543 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
544 if (CHECK_FAIL(err))
545 goto out;
546 }
547
548 err = bpf_prog_test_run_opts(main_fd, &topts);
549 ASSERT_OK(err, "tailcall");
550 ASSERT_EQ(topts.retval, 1, "tailcall retval");
551
552 /* jmp -> nop, call subprog that will do tailcall */
553 i = 1;
554 err = bpf_map_delete_elem(map_fd, &i);
555 if (CHECK_FAIL(err))
556 goto out;
557
558 err = bpf_prog_test_run_opts(main_fd, &topts);
559 ASSERT_OK(err, "tailcall");
560 ASSERT_OK(topts.retval, "tailcall retval");
561
562 /* make sure that subprog can access ctx and entry prog that
563 * called this subprog can properly return
564 */
565 i = 0;
566 err = bpf_map_delete_elem(map_fd, &i);
567 if (CHECK_FAIL(err))
568 goto out;
569
570 err = bpf_prog_test_run_opts(main_fd, &topts);
571 ASSERT_OK(err, "tailcall");
572 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
573out:
574 bpf_object__close(obj);
575}
576
577/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578 * enforcement matches with expectations when tailcall is preceded with
579 * bpf2bpf call.
580 */
581static void test_tailcall_bpf2bpf_2(void)
582{
583 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584 struct bpf_map *prog_array, *data_map;
585 struct bpf_program *prog;
586 struct bpf_object *obj;
587 char buff[128] = {};
588 LIBBPF_OPTS(bpf_test_run_opts, topts,
589 .data_in = buff,
590 .data_size_in = sizeof(buff),
591 .repeat = 1,
592 );
593
594 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
595 &obj, &prog_fd);
596 if (CHECK_FAIL(err))
597 return;
598
599 prog = bpf_object__find_program_by_name(obj, "entry");
600 if (CHECK_FAIL(!prog))
601 goto out;
602
603 main_fd = bpf_program__fd(prog);
604 if (CHECK_FAIL(main_fd < 0))
605 goto out;
606
607 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608 if (CHECK_FAIL(!prog_array))
609 goto out;
610
611 map_fd = bpf_map__fd(prog_array);
612 if (CHECK_FAIL(map_fd < 0))
613 goto out;
614
615 prog = bpf_object__find_program_by_name(obj, "classifier_0");
616 if (CHECK_FAIL(!prog))
617 goto out;
618
619 prog_fd = bpf_program__fd(prog);
620 if (CHECK_FAIL(prog_fd < 0))
621 goto out;
622
623 i = 0;
624 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
625 if (CHECK_FAIL(err))
626 goto out;
627
628 err = bpf_prog_test_run_opts(main_fd, &topts);
629 ASSERT_OK(err, "tailcall");
630 ASSERT_EQ(topts.retval, 1, "tailcall retval");
631
632 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
634 return;
635
636 data_fd = bpf_map__fd(data_map);
637 if (CHECK_FAIL(map_fd < 0))
638 return;
639
640 i = 0;
641 err = bpf_map_lookup_elem(data_fd, &i, &val);
642 ASSERT_OK(err, "tailcall count");
643 ASSERT_EQ(val, 33, "tailcall count");
644
645 i = 0;
646 err = bpf_map_delete_elem(map_fd, &i);
647 if (CHECK_FAIL(err))
648 goto out;
649
650 err = bpf_prog_test_run_opts(main_fd, &topts);
651 ASSERT_OK(err, "tailcall");
652 ASSERT_OK(topts.retval, "tailcall retval");
653out:
654 bpf_object__close(obj);
655}
656
657/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658 * 256 bytes) can be used within bpf subprograms that have the tailcalls
659 * in them
660 */
661static void test_tailcall_bpf2bpf_3(void)
662{
663 int err, map_fd, prog_fd, main_fd, i;
664 struct bpf_map *prog_array;
665 struct bpf_program *prog;
666 struct bpf_object *obj;
667 char prog_name[32];
668 LIBBPF_OPTS(bpf_test_run_opts, topts,
669 .data_in = &pkt_v4,
670 .data_size_in = sizeof(pkt_v4),
671 .repeat = 1,
672 );
673
674 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
675 &obj, &prog_fd);
676 if (CHECK_FAIL(err))
677 return;
678
679 prog = bpf_object__find_program_by_name(obj, "entry");
680 if (CHECK_FAIL(!prog))
681 goto out;
682
683 main_fd = bpf_program__fd(prog);
684 if (CHECK_FAIL(main_fd < 0))
685 goto out;
686
687 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688 if (CHECK_FAIL(!prog_array))
689 goto out;
690
691 map_fd = bpf_map__fd(prog_array);
692 if (CHECK_FAIL(map_fd < 0))
693 goto out;
694
695 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
697
698 prog = bpf_object__find_program_by_name(obj, prog_name);
699 if (CHECK_FAIL(!prog))
700 goto out;
701
702 prog_fd = bpf_program__fd(prog);
703 if (CHECK_FAIL(prog_fd < 0))
704 goto out;
705
706 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
707 if (CHECK_FAIL(err))
708 goto out;
709 }
710
711 err = bpf_prog_test_run_opts(main_fd, &topts);
712 ASSERT_OK(err, "tailcall");
713 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
714
715 i = 1;
716 err = bpf_map_delete_elem(map_fd, &i);
717 if (CHECK_FAIL(err))
718 goto out;
719
720 err = bpf_prog_test_run_opts(main_fd, &topts);
721 ASSERT_OK(err, "tailcall");
722 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
723
724 i = 0;
725 err = bpf_map_delete_elem(map_fd, &i);
726 if (CHECK_FAIL(err))
727 goto out;
728
729 err = bpf_prog_test_run_opts(main_fd, &topts);
730 ASSERT_OK(err, "tailcall");
731 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
732out:
733 bpf_object__close(obj);
734}
735
736#include "tailcall_bpf2bpf4.skel.h"
737
738/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740 * counter behaves correctly, bpf program will go through following flow:
741 *
742 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744 * subprog2 [here bump global counter] --------^
745 *
746 * We go through first two tailcalls and start counting from the subprog2 where
747 * the loop begins. At the end of the test make sure that the global counter is
748 * equal to 31, because tailcall counter includes the first two tailcalls
749 * whereas global counter is incremented only on loop presented on flow above.
750 *
751 * The noise parameter is used to insert bpf_map_update calls into the logic
752 * to force verifier to patch instructions. This allows us to ensure jump
753 * logic remains correct with instruction movement.
754 */
755static void test_tailcall_bpf2bpf_4(bool noise)
756{
757 int err, map_fd, prog_fd, main_fd, data_fd, i;
758 struct tailcall_bpf2bpf4__bss val;
759 struct bpf_map *prog_array, *data_map;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
762 char prog_name[32];
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
764 .data_in = &pkt_v4,
765 .data_size_in = sizeof(pkt_v4),
766 .repeat = 1,
767 );
768
769 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 &obj, &prog_fd);
771 if (CHECK_FAIL(err))
772 return;
773
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
776 goto out;
777
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
780 goto out;
781
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
784 goto out;
785
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
788 goto out;
789
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
795 goto out;
796
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
799 goto out;
800
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 if (CHECK_FAIL(err))
803 goto out;
804 }
805
806 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
808 return;
809
810 data_fd = bpf_map__fd(data_map);
811 if (CHECK_FAIL(map_fd < 0))
812 return;
813
814 i = 0;
815 val.noise = noise;
816 val.count = 0;
817 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
818 if (CHECK_FAIL(err))
819 goto out;
820
821 err = bpf_prog_test_run_opts(main_fd, &topts);
822 ASSERT_OK(err, "tailcall");
823 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
824
825 i = 0;
826 err = bpf_map_lookup_elem(data_fd, &i, &val);
827 ASSERT_OK(err, "tailcall count");
828 ASSERT_EQ(val.count, 31, "tailcall count");
829
830out:
831 bpf_object__close(obj);
832}
833
834#include "tailcall_bpf2bpf6.skel.h"
835
836/* Tail call counting works even when there is data on stack which is
837 * not aligned to 8 bytes.
838 */
839static void test_tailcall_bpf2bpf_6(void)
840{
841 struct tailcall_bpf2bpf6 *obj;
842 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843 LIBBPF_OPTS(bpf_test_run_opts, topts,
844 .data_in = &pkt_v4,
845 .data_size_in = sizeof(pkt_v4),
846 .repeat = 1,
847 );
848
849 obj = tailcall_bpf2bpf6__open_and_load();
850 if (!ASSERT_OK_PTR(obj, "open and load"))
851 return;
852
853 main_fd = bpf_program__fd(obj->progs.entry);
854 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
855 goto out;
856
857 map_fd = bpf_map__fd(obj->maps.jmp_table);
858 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
859 goto out;
860
861 prog_fd = bpf_program__fd(obj->progs.classifier_0);
862 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
863 goto out;
864
865 i = 0;
866 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867 if (!ASSERT_OK(err, "jmp_table map update"))
868 goto out;
869
870 err = bpf_prog_test_run_opts(main_fd, &topts);
871 ASSERT_OK(err, "entry prog test run");
872 ASSERT_EQ(topts.retval, 0, "tailcall retval");
873
874 data_fd = bpf_map__fd(obj->maps.bss);
875 if (!ASSERT_GE(map_fd, 0, "bss map fd"))
876 goto out;
877
878 i = 0;
879 err = bpf_map_lookup_elem(data_fd, &i, &val);
880 ASSERT_OK(err, "bss map lookup");
881 ASSERT_EQ(val, 1, "done flag is set");
882
883out:
884 tailcall_bpf2bpf6__destroy(obj);
885}
886
887void test_tailcalls(void)
888{
889 if (test__start_subtest("tailcall_1"))
890 test_tailcall_1();
891 if (test__start_subtest("tailcall_2"))
892 test_tailcall_2();
893 if (test__start_subtest("tailcall_3"))
894 test_tailcall_3();
895 if (test__start_subtest("tailcall_4"))
896 test_tailcall_4();
897 if (test__start_subtest("tailcall_5"))
898 test_tailcall_5();
899 if (test__start_subtest("tailcall_6"))
900 test_tailcall_6();
901 if (test__start_subtest("tailcall_bpf2bpf_1"))
902 test_tailcall_bpf2bpf_1();
903 if (test__start_subtest("tailcall_bpf2bpf_2"))
904 test_tailcall_bpf2bpf_2();
905 if (test__start_subtest("tailcall_bpf2bpf_3"))
906 test_tailcall_bpf2bpf_3();
907 if (test__start_subtest("tailcall_bpf2bpf_4"))
908 test_tailcall_bpf2bpf_4(false);
909 if (test__start_subtest("tailcall_bpf2bpf_5"))
910 test_tailcall_bpf2bpf_4(true);
911 if (test__start_subtest("tailcall_bpf2bpf_6"))
912 test_tailcall_bpf2bpf_6();
913}
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4/* test_tailcall_1 checks basic functionality by patching multiple locations
5 * in a single program for a single tail call slot with nop->jmp, jmp->nop
6 * and jmp->jmp rewrites. Also checks for nop->nop.
7 */
8static void test_tailcall_1(void)
9{
10 int err, map_fd, prog_fd, main_fd, i, j;
11 struct bpf_map *prog_array;
12 struct bpf_program *prog;
13 struct bpf_object *obj;
14 __u32 retval, duration;
15 char prog_name[32];
16 char buff[128] = {};
17
18 err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
19 &prog_fd);
20 if (CHECK_FAIL(err))
21 return;
22
23 prog = bpf_object__find_program_by_title(obj, "classifier");
24 if (CHECK_FAIL(!prog))
25 goto out;
26
27 main_fd = bpf_program__fd(prog);
28 if (CHECK_FAIL(main_fd < 0))
29 goto out;
30
31 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
32 if (CHECK_FAIL(!prog_array))
33 goto out;
34
35 map_fd = bpf_map__fd(prog_array);
36 if (CHECK_FAIL(map_fd < 0))
37 goto out;
38
39 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
40 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
41
42 prog = bpf_object__find_program_by_title(obj, prog_name);
43 if (CHECK_FAIL(!prog))
44 goto out;
45
46 prog_fd = bpf_program__fd(prog);
47 if (CHECK_FAIL(prog_fd < 0))
48 goto out;
49
50 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
51 if (CHECK_FAIL(err))
52 goto out;
53 }
54
55 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
56 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
57 &duration, &retval, NULL);
58 CHECK(err || retval != i, "tailcall",
59 "err %d errno %d retval %d\n", err, errno, retval);
60
61 err = bpf_map_delete_elem(map_fd, &i);
62 if (CHECK_FAIL(err))
63 goto out;
64 }
65
66 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
67 &duration, &retval, NULL);
68 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
69 err, errno, retval);
70
71 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
72 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
73
74 prog = bpf_object__find_program_by_title(obj, prog_name);
75 if (CHECK_FAIL(!prog))
76 goto out;
77
78 prog_fd = bpf_program__fd(prog);
79 if (CHECK_FAIL(prog_fd < 0))
80 goto out;
81
82 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
83 if (CHECK_FAIL(err))
84 goto out;
85 }
86
87 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
88 &duration, &retval, NULL);
89 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
90 err, errno, retval);
91
92 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
93 j = bpf_map__def(prog_array)->max_entries - 1 - i;
94 snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
95
96 prog = bpf_object__find_program_by_title(obj, prog_name);
97 if (CHECK_FAIL(!prog))
98 goto out;
99
100 prog_fd = bpf_program__fd(prog);
101 if (CHECK_FAIL(prog_fd < 0))
102 goto out;
103
104 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
105 if (CHECK_FAIL(err))
106 goto out;
107 }
108
109 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
110 j = bpf_map__def(prog_array)->max_entries - 1 - i;
111
112 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
113 &duration, &retval, NULL);
114 CHECK(err || retval != j, "tailcall",
115 "err %d errno %d retval %d\n", err, errno, retval);
116
117 err = bpf_map_delete_elem(map_fd, &i);
118 if (CHECK_FAIL(err))
119 goto out;
120 }
121
122 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
123 &duration, &retval, NULL);
124 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
125 err, errno, retval);
126
127 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 goto out;
131
132 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
133 &duration, &retval, NULL);
134 CHECK(err || retval != 3, "tailcall",
135 "err %d errno %d retval %d\n", err, errno, retval);
136 }
137
138out:
139 bpf_object__close(obj);
140}
141
142/* test_tailcall_2 checks that patching multiple programs for a single
143 * tail call slot works. It also jumps through several programs and tests
144 * the tail call limit counter.
145 */
146static void test_tailcall_2(void)
147{
148 int err, map_fd, prog_fd, main_fd, i;
149 struct bpf_map *prog_array;
150 struct bpf_program *prog;
151 struct bpf_object *obj;
152 __u32 retval, duration;
153 char prog_name[32];
154 char buff[128] = {};
155
156 err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
157 &prog_fd);
158 if (CHECK_FAIL(err))
159 return;
160
161 prog = bpf_object__find_program_by_title(obj, "classifier");
162 if (CHECK_FAIL(!prog))
163 goto out;
164
165 main_fd = bpf_program__fd(prog);
166 if (CHECK_FAIL(main_fd < 0))
167 goto out;
168
169 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
170 if (CHECK_FAIL(!prog_array))
171 goto out;
172
173 map_fd = bpf_map__fd(prog_array);
174 if (CHECK_FAIL(map_fd < 0))
175 goto out;
176
177 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
178 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
179
180 prog = bpf_object__find_program_by_title(obj, prog_name);
181 if (CHECK_FAIL(!prog))
182 goto out;
183
184 prog_fd = bpf_program__fd(prog);
185 if (CHECK_FAIL(prog_fd < 0))
186 goto out;
187
188 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
189 if (CHECK_FAIL(err))
190 goto out;
191 }
192
193 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
194 &duration, &retval, NULL);
195 CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
196 err, errno, retval);
197
198 i = 2;
199 err = bpf_map_delete_elem(map_fd, &i);
200 if (CHECK_FAIL(err))
201 goto out;
202
203 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
204 &duration, &retval, NULL);
205 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
206 err, errno, retval);
207
208 i = 0;
209 err = bpf_map_delete_elem(map_fd, &i);
210 if (CHECK_FAIL(err))
211 goto out;
212
213 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
214 &duration, &retval, NULL);
215 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
216 err, errno, retval);
217out:
218 bpf_object__close(obj);
219}
220
221/* test_tailcall_3 checks that the count value of the tail call limit
222 * enforcement matches with expectations.
223 */
224static void test_tailcall_3(void)
225{
226 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
227 struct bpf_map *prog_array, *data_map;
228 struct bpf_program *prog;
229 struct bpf_object *obj;
230 __u32 retval, duration;
231 char buff[128] = {};
232
233 err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
234 &prog_fd);
235 if (CHECK_FAIL(err))
236 return;
237
238 prog = bpf_object__find_program_by_title(obj, "classifier");
239 if (CHECK_FAIL(!prog))
240 goto out;
241
242 main_fd = bpf_program__fd(prog);
243 if (CHECK_FAIL(main_fd < 0))
244 goto out;
245
246 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
247 if (CHECK_FAIL(!prog_array))
248 goto out;
249
250 map_fd = bpf_map__fd(prog_array);
251 if (CHECK_FAIL(map_fd < 0))
252 goto out;
253
254 prog = bpf_object__find_program_by_title(obj, "classifier/0");
255 if (CHECK_FAIL(!prog))
256 goto out;
257
258 prog_fd = bpf_program__fd(prog);
259 if (CHECK_FAIL(prog_fd < 0))
260 goto out;
261
262 i = 0;
263 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
264 if (CHECK_FAIL(err))
265 goto out;
266
267 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
268 &duration, &retval, NULL);
269 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
270 err, errno, retval);
271
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274 return;
275
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
278 return;
279
280 i = 0;
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
283 err, errno, val);
284
285 i = 0;
286 err = bpf_map_delete_elem(map_fd, &i);
287 if (CHECK_FAIL(err))
288 goto out;
289
290 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
291 &duration, &retval, NULL);
292 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
293 err, errno, retval);
294out:
295 bpf_object__close(obj);
296}
297
298/* test_tailcall_4 checks that the kernel properly selects indirect jump
299 * for the case where the key is not known. Latter is passed via global
300 * data to select different targets we can compare return value of.
301 */
302static void test_tailcall_4(void)
303{
304 int err, map_fd, prog_fd, main_fd, data_fd, i;
305 struct bpf_map *prog_array, *data_map;
306 struct bpf_program *prog;
307 struct bpf_object *obj;
308 __u32 retval, duration;
309 static const int zero = 0;
310 char buff[128] = {};
311 char prog_name[32];
312
313 err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
314 &prog_fd);
315 if (CHECK_FAIL(err))
316 return;
317
318 prog = bpf_object__find_program_by_title(obj, "classifier");
319 if (CHECK_FAIL(!prog))
320 goto out;
321
322 main_fd = bpf_program__fd(prog);
323 if (CHECK_FAIL(main_fd < 0))
324 goto out;
325
326 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
327 if (CHECK_FAIL(!prog_array))
328 goto out;
329
330 map_fd = bpf_map__fd(prog_array);
331 if (CHECK_FAIL(map_fd < 0))
332 goto out;
333
334 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
335 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
336 return;
337
338 data_fd = bpf_map__fd(data_map);
339 if (CHECK_FAIL(map_fd < 0))
340 return;
341
342 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
343 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
344
345 prog = bpf_object__find_program_by_title(obj, prog_name);
346 if (CHECK_FAIL(!prog))
347 goto out;
348
349 prog_fd = bpf_program__fd(prog);
350 if (CHECK_FAIL(prog_fd < 0))
351 goto out;
352
353 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
354 if (CHECK_FAIL(err))
355 goto out;
356 }
357
358 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
359 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
360 if (CHECK_FAIL(err))
361 goto out;
362
363 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
364 &duration, &retval, NULL);
365 CHECK(err || retval != i, "tailcall",
366 "err %d errno %d retval %d\n", err, errno, retval);
367 }
368
369 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
370 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
371 if (CHECK_FAIL(err))
372 goto out;
373
374 err = bpf_map_delete_elem(map_fd, &i);
375 if (CHECK_FAIL(err))
376 goto out;
377
378 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
379 &duration, &retval, NULL);
380 CHECK(err || retval != 3, "tailcall",
381 "err %d errno %d retval %d\n", err, errno, retval);
382 }
383out:
384 bpf_object__close(obj);
385}
386
387/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
388 * an indirect jump when the keys are const but different from different branches.
389 */
390static void test_tailcall_5(void)
391{
392 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
393 struct bpf_map *prog_array, *data_map;
394 struct bpf_program *prog;
395 struct bpf_object *obj;
396 __u32 retval, duration;
397 static const int zero = 0;
398 char buff[128] = {};
399 char prog_name[32];
400
401 err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
402 &prog_fd);
403 if (CHECK_FAIL(err))
404 return;
405
406 prog = bpf_object__find_program_by_title(obj, "classifier");
407 if (CHECK_FAIL(!prog))
408 goto out;
409
410 main_fd = bpf_program__fd(prog);
411 if (CHECK_FAIL(main_fd < 0))
412 goto out;
413
414 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
415 if (CHECK_FAIL(!prog_array))
416 goto out;
417
418 map_fd = bpf_map__fd(prog_array);
419 if (CHECK_FAIL(map_fd < 0))
420 goto out;
421
422 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
423 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
424 return;
425
426 data_fd = bpf_map__fd(data_map);
427 if (CHECK_FAIL(map_fd < 0))
428 return;
429
430 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
431 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
432
433 prog = bpf_object__find_program_by_title(obj, prog_name);
434 if (CHECK_FAIL(!prog))
435 goto out;
436
437 prog_fd = bpf_program__fd(prog);
438 if (CHECK_FAIL(prog_fd < 0))
439 goto out;
440
441 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
442 if (CHECK_FAIL(err))
443 goto out;
444 }
445
446 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
447 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
448 if (CHECK_FAIL(err))
449 goto out;
450
451 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
452 &duration, &retval, NULL);
453 CHECK(err || retval != i, "tailcall",
454 "err %d errno %d retval %d\n", err, errno, retval);
455 }
456
457 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
458 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
459 if (CHECK_FAIL(err))
460 goto out;
461
462 err = bpf_map_delete_elem(map_fd, &i);
463 if (CHECK_FAIL(err))
464 goto out;
465
466 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
467 &duration, &retval, NULL);
468 CHECK(err || retval != 3, "tailcall",
469 "err %d errno %d retval %d\n", err, errno, retval);
470 }
471out:
472 bpf_object__close(obj);
473}
474
475void test_tailcalls(void)
476{
477 if (test__start_subtest("tailcall_1"))
478 test_tailcall_1();
479 if (test__start_subtest("tailcall_2"))
480 test_tailcall_2();
481 if (test__start_subtest("tailcall_3"))
482 test_tailcall_3();
483 if (test__start_subtest("tailcall_4"))
484 test_tailcall_4();
485 if (test__start_subtest("tailcall_5"))
486 test_tailcall_5();
487}