Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <network_helpers.h>
4
5/* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9static void test_tailcall_1(void)
10{
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
15 char prog_name[32];
16 char buff[128] = {};
17 LIBBPF_OPTS(bpf_test_run_opts, topts,
18 .data_in = buff,
19 .data_size_in = sizeof(buff),
20 .repeat = 1,
21 );
22
23 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
24 &prog_fd);
25 if (CHECK_FAIL(err))
26 return;
27
28 prog = bpf_object__find_program_by_name(obj, "entry");
29 if (CHECK_FAIL(!prog))
30 goto out;
31
32 main_fd = bpf_program__fd(prog);
33 if (CHECK_FAIL(main_fd < 0))
34 goto out;
35
36 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 if (CHECK_FAIL(!prog_array))
38 goto out;
39
40 map_fd = bpf_map__fd(prog_array);
41 if (CHECK_FAIL(map_fd < 0))
42 goto out;
43
44 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
46
47 prog = bpf_object__find_program_by_name(obj, prog_name);
48 if (CHECK_FAIL(!prog))
49 goto out;
50
51 prog_fd = bpf_program__fd(prog);
52 if (CHECK_FAIL(prog_fd < 0))
53 goto out;
54
55 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
56 if (CHECK_FAIL(err))
57 goto out;
58 }
59
60 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 err = bpf_prog_test_run_opts(main_fd, &topts);
62 ASSERT_OK(err, "tailcall");
63 ASSERT_EQ(topts.retval, i, "tailcall retval");
64
65 err = bpf_map_delete_elem(map_fd, &i);
66 if (CHECK_FAIL(err))
67 goto out;
68 }
69
70 err = bpf_prog_test_run_opts(main_fd, &topts);
71 ASSERT_OK(err, "tailcall");
72 ASSERT_EQ(topts.retval, 3, "tailcall retval");
73
74 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
76
77 prog = bpf_object__find_program_by_name(obj, prog_name);
78 if (CHECK_FAIL(!prog))
79 goto out;
80
81 prog_fd = bpf_program__fd(prog);
82 if (CHECK_FAIL(prog_fd < 0))
83 goto out;
84
85 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
86 if (CHECK_FAIL(err))
87 goto out;
88 }
89
90 err = bpf_prog_test_run_opts(main_fd, &topts);
91 ASSERT_OK(err, "tailcall");
92 ASSERT_OK(topts.retval, "tailcall retval");
93
94 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 j = bpf_map__max_entries(prog_array) - 1 - i;
96 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
97
98 prog = bpf_object__find_program_by_name(obj, prog_name);
99 if (CHECK_FAIL(!prog))
100 goto out;
101
102 prog_fd = bpf_program__fd(prog);
103 if (CHECK_FAIL(prog_fd < 0))
104 goto out;
105
106 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107 if (CHECK_FAIL(err))
108 goto out;
109 }
110
111 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 j = bpf_map__max_entries(prog_array) - 1 - i;
113
114 err = bpf_prog_test_run_opts(main_fd, &topts);
115 ASSERT_OK(err, "tailcall");
116 ASSERT_EQ(topts.retval, j, "tailcall retval");
117
118 err = bpf_map_delete_elem(map_fd, &i);
119 if (CHECK_FAIL(err))
120 goto out;
121 }
122
123 err = bpf_prog_test_run_opts(main_fd, &topts);
124 ASSERT_OK(err, "tailcall");
125 ASSERT_EQ(topts.retval, 3, "tailcall retval");
126
127 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 err = bpf_map_delete_elem(map_fd, &i);
129 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 goto out;
131
132 err = bpf_prog_test_run_opts(main_fd, &topts);
133 ASSERT_OK(err, "tailcall");
134 ASSERT_EQ(topts.retval, 3, "tailcall retval");
135 }
136
137out:
138 bpf_object__close(obj);
139}
140
141/* test_tailcall_2 checks that patching multiple programs for a single
142 * tail call slot works. It also jumps through several programs and tests
143 * the tail call limit counter.
144 */
145static void test_tailcall_2(void)
146{
147 int err, map_fd, prog_fd, main_fd, i;
148 struct bpf_map *prog_array;
149 struct bpf_program *prog;
150 struct bpf_object *obj;
151 char prog_name[32];
152 char buff[128] = {};
153 LIBBPF_OPTS(bpf_test_run_opts, topts,
154 .data_in = buff,
155 .data_size_in = sizeof(buff),
156 .repeat = 1,
157 );
158
159 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160 &prog_fd);
161 if (CHECK_FAIL(err))
162 return;
163
164 prog = bpf_object__find_program_by_name(obj, "entry");
165 if (CHECK_FAIL(!prog))
166 goto out;
167
168 main_fd = bpf_program__fd(prog);
169 if (CHECK_FAIL(main_fd < 0))
170 goto out;
171
172 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 if (CHECK_FAIL(!prog_array))
174 goto out;
175
176 map_fd = bpf_map__fd(prog_array);
177 if (CHECK_FAIL(map_fd < 0))
178 goto out;
179
180 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182
183 prog = bpf_object__find_program_by_name(obj, prog_name);
184 if (CHECK_FAIL(!prog))
185 goto out;
186
187 prog_fd = bpf_program__fd(prog);
188 if (CHECK_FAIL(prog_fd < 0))
189 goto out;
190
191 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192 if (CHECK_FAIL(err))
193 goto out;
194 }
195
196 err = bpf_prog_test_run_opts(main_fd, &topts);
197 ASSERT_OK(err, "tailcall");
198 ASSERT_EQ(topts.retval, 2, "tailcall retval");
199
200 i = 2;
201 err = bpf_map_delete_elem(map_fd, &i);
202 if (CHECK_FAIL(err))
203 goto out;
204
205 err = bpf_prog_test_run_opts(main_fd, &topts);
206 ASSERT_OK(err, "tailcall");
207 ASSERT_EQ(topts.retval, 1, "tailcall retval");
208
209 i = 0;
210 err = bpf_map_delete_elem(map_fd, &i);
211 if (CHECK_FAIL(err))
212 goto out;
213
214 err = bpf_prog_test_run_opts(main_fd, &topts);
215 ASSERT_OK(err, "tailcall");
216 ASSERT_EQ(topts.retval, 3, "tailcall retval");
217out:
218 bpf_object__close(obj);
219}
220
221static void test_tailcall_count(const char *which)
222{
223 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224 struct bpf_map *prog_array, *data_map;
225 struct bpf_program *prog;
226 struct bpf_object *obj;
227 char buff[128] = {};
228 LIBBPF_OPTS(bpf_test_run_opts, topts,
229 .data_in = buff,
230 .data_size_in = sizeof(buff),
231 .repeat = 1,
232 );
233
234 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
235 &prog_fd);
236 if (CHECK_FAIL(err))
237 return;
238
239 prog = bpf_object__find_program_by_name(obj, "entry");
240 if (CHECK_FAIL(!prog))
241 goto out;
242
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
245 goto out;
246
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
249 goto out;
250
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
253 goto out;
254
255 prog = bpf_object__find_program_by_name(obj, "classifier_0");
256 if (CHECK_FAIL(!prog))
257 goto out;
258
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
261 goto out;
262
263 i = 0;
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 if (CHECK_FAIL(err))
266 goto out;
267
268 err = bpf_prog_test_run_opts(main_fd, &topts);
269 ASSERT_OK(err, "tailcall");
270 ASSERT_EQ(topts.retval, 1, "tailcall retval");
271
272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274 return;
275
276 data_fd = bpf_map__fd(data_map);
277 if (CHECK_FAIL(map_fd < 0))
278 return;
279
280 i = 0;
281 err = bpf_map_lookup_elem(data_fd, &i, &val);
282 ASSERT_OK(err, "tailcall count");
283 ASSERT_EQ(val, 33, "tailcall count");
284
285 i = 0;
286 err = bpf_map_delete_elem(map_fd, &i);
287 if (CHECK_FAIL(err))
288 goto out;
289
290 err = bpf_prog_test_run_opts(main_fd, &topts);
291 ASSERT_OK(err, "tailcall");
292 ASSERT_OK(topts.retval, "tailcall retval");
293out:
294 bpf_object__close(obj);
295}
296
297/* test_tailcall_3 checks that the count value of the tail call limit
298 * enforcement matches with expectations. JIT uses direct jump.
299 */
300static void test_tailcall_3(void)
301{
302 test_tailcall_count("tailcall3.bpf.o");
303}
304
305/* test_tailcall_6 checks that the count value of the tail call limit
306 * enforcement matches with expectations. JIT uses indirect jump.
307 */
308static void test_tailcall_6(void)
309{
310 test_tailcall_count("tailcall6.bpf.o");
311}
312
313/* test_tailcall_4 checks that the kernel properly selects indirect jump
314 * for the case where the key is not known. Latter is passed via global
315 * data to select different targets we can compare return value of.
316 */
317static void test_tailcall_4(void)
318{
319 int err, map_fd, prog_fd, main_fd, data_fd, i;
320 struct bpf_map *prog_array, *data_map;
321 struct bpf_program *prog;
322 struct bpf_object *obj;
323 static const int zero = 0;
324 char buff[128] = {};
325 char prog_name[32];
326 LIBBPF_OPTS(bpf_test_run_opts, topts,
327 .data_in = buff,
328 .data_size_in = sizeof(buff),
329 .repeat = 1,
330 );
331
332 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
333 &prog_fd);
334 if (CHECK_FAIL(err))
335 return;
336
337 prog = bpf_object__find_program_by_name(obj, "entry");
338 if (CHECK_FAIL(!prog))
339 goto out;
340
341 main_fd = bpf_program__fd(prog);
342 if (CHECK_FAIL(main_fd < 0))
343 goto out;
344
345 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346 if (CHECK_FAIL(!prog_array))
347 goto out;
348
349 map_fd = bpf_map__fd(prog_array);
350 if (CHECK_FAIL(map_fd < 0))
351 goto out;
352
353 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
355 return;
356
357 data_fd = bpf_map__fd(data_map);
358 if (CHECK_FAIL(map_fd < 0))
359 return;
360
361 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
363
364 prog = bpf_object__find_program_by_name(obj, prog_name);
365 if (CHECK_FAIL(!prog))
366 goto out;
367
368 prog_fd = bpf_program__fd(prog);
369 if (CHECK_FAIL(prog_fd < 0))
370 goto out;
371
372 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
373 if (CHECK_FAIL(err))
374 goto out;
375 }
376
377 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
379 if (CHECK_FAIL(err))
380 goto out;
381
382 err = bpf_prog_test_run_opts(main_fd, &topts);
383 ASSERT_OK(err, "tailcall");
384 ASSERT_EQ(topts.retval, i, "tailcall retval");
385 }
386
387 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
389 if (CHECK_FAIL(err))
390 goto out;
391
392 err = bpf_map_delete_elem(map_fd, &i);
393 if (CHECK_FAIL(err))
394 goto out;
395
396 err = bpf_prog_test_run_opts(main_fd, &topts);
397 ASSERT_OK(err, "tailcall");
398 ASSERT_EQ(topts.retval, 3, "tailcall retval");
399 }
400out:
401 bpf_object__close(obj);
402}
403
404/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405 * an indirect jump when the keys are const but different from different branches.
406 */
407static void test_tailcall_5(void)
408{
409 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410 struct bpf_map *prog_array, *data_map;
411 struct bpf_program *prog;
412 struct bpf_object *obj;
413 static const int zero = 0;
414 char buff[128] = {};
415 char prog_name[32];
416 LIBBPF_OPTS(bpf_test_run_opts, topts,
417 .data_in = buff,
418 .data_size_in = sizeof(buff),
419 .repeat = 1,
420 );
421
422 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
423 &prog_fd);
424 if (CHECK_FAIL(err))
425 return;
426
427 prog = bpf_object__find_program_by_name(obj, "entry");
428 if (CHECK_FAIL(!prog))
429 goto out;
430
431 main_fd = bpf_program__fd(prog);
432 if (CHECK_FAIL(main_fd < 0))
433 goto out;
434
435 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436 if (CHECK_FAIL(!prog_array))
437 goto out;
438
439 map_fd = bpf_map__fd(prog_array);
440 if (CHECK_FAIL(map_fd < 0))
441 goto out;
442
443 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
445 return;
446
447 data_fd = bpf_map__fd(data_map);
448 if (CHECK_FAIL(map_fd < 0))
449 return;
450
451 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
453
454 prog = bpf_object__find_program_by_name(obj, prog_name);
455 if (CHECK_FAIL(!prog))
456 goto out;
457
458 prog_fd = bpf_program__fd(prog);
459 if (CHECK_FAIL(prog_fd < 0))
460 goto out;
461
462 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
463 if (CHECK_FAIL(err))
464 goto out;
465 }
466
467 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
469 if (CHECK_FAIL(err))
470 goto out;
471
472 err = bpf_prog_test_run_opts(main_fd, &topts);
473 ASSERT_OK(err, "tailcall");
474 ASSERT_EQ(topts.retval, i, "tailcall retval");
475 }
476
477 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
479 if (CHECK_FAIL(err))
480 goto out;
481
482 err = bpf_map_delete_elem(map_fd, &i);
483 if (CHECK_FAIL(err))
484 goto out;
485
486 err = bpf_prog_test_run_opts(main_fd, &topts);
487 ASSERT_OK(err, "tailcall");
488 ASSERT_EQ(topts.retval, 3, "tailcall retval");
489 }
490out:
491 bpf_object__close(obj);
492}
493
494/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495 * correctly in correlation with BPF subprograms
496 */
497static void test_tailcall_bpf2bpf_1(void)
498{
499 int err, map_fd, prog_fd, main_fd, i;
500 struct bpf_map *prog_array;
501 struct bpf_program *prog;
502 struct bpf_object *obj;
503 char prog_name[32];
504 LIBBPF_OPTS(bpf_test_run_opts, topts,
505 .data_in = &pkt_v4,
506 .data_size_in = sizeof(pkt_v4),
507 .repeat = 1,
508 );
509
510 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
511 &obj, &prog_fd);
512 if (CHECK_FAIL(err))
513 return;
514
515 prog = bpf_object__find_program_by_name(obj, "entry");
516 if (CHECK_FAIL(!prog))
517 goto out;
518
519 main_fd = bpf_program__fd(prog);
520 if (CHECK_FAIL(main_fd < 0))
521 goto out;
522
523 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 if (CHECK_FAIL(!prog_array))
525 goto out;
526
527 map_fd = bpf_map__fd(prog_array);
528 if (CHECK_FAIL(map_fd < 0))
529 goto out;
530
531 /* nop -> jmp */
532 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
534
535 prog = bpf_object__find_program_by_name(obj, prog_name);
536 if (CHECK_FAIL(!prog))
537 goto out;
538
539 prog_fd = bpf_program__fd(prog);
540 if (CHECK_FAIL(prog_fd < 0))
541 goto out;
542
543 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
544 if (CHECK_FAIL(err))
545 goto out;
546 }
547
548 err = bpf_prog_test_run_opts(main_fd, &topts);
549 ASSERT_OK(err, "tailcall");
550 ASSERT_EQ(topts.retval, 1, "tailcall retval");
551
552 /* jmp -> nop, call subprog that will do tailcall */
553 i = 1;
554 err = bpf_map_delete_elem(map_fd, &i);
555 if (CHECK_FAIL(err))
556 goto out;
557
558 err = bpf_prog_test_run_opts(main_fd, &topts);
559 ASSERT_OK(err, "tailcall");
560 ASSERT_OK(topts.retval, "tailcall retval");
561
562 /* make sure that subprog can access ctx and entry prog that
563 * called this subprog can properly return
564 */
565 i = 0;
566 err = bpf_map_delete_elem(map_fd, &i);
567 if (CHECK_FAIL(err))
568 goto out;
569
570 err = bpf_prog_test_run_opts(main_fd, &topts);
571 ASSERT_OK(err, "tailcall");
572 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
573out:
574 bpf_object__close(obj);
575}
576
577/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578 * enforcement matches with expectations when tailcall is preceded with
579 * bpf2bpf call.
580 */
581static void test_tailcall_bpf2bpf_2(void)
582{
583 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584 struct bpf_map *prog_array, *data_map;
585 struct bpf_program *prog;
586 struct bpf_object *obj;
587 char buff[128] = {};
588 LIBBPF_OPTS(bpf_test_run_opts, topts,
589 .data_in = buff,
590 .data_size_in = sizeof(buff),
591 .repeat = 1,
592 );
593
594 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
595 &obj, &prog_fd);
596 if (CHECK_FAIL(err))
597 return;
598
599 prog = bpf_object__find_program_by_name(obj, "entry");
600 if (CHECK_FAIL(!prog))
601 goto out;
602
603 main_fd = bpf_program__fd(prog);
604 if (CHECK_FAIL(main_fd < 0))
605 goto out;
606
607 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608 if (CHECK_FAIL(!prog_array))
609 goto out;
610
611 map_fd = bpf_map__fd(prog_array);
612 if (CHECK_FAIL(map_fd < 0))
613 goto out;
614
615 prog = bpf_object__find_program_by_name(obj, "classifier_0");
616 if (CHECK_FAIL(!prog))
617 goto out;
618
619 prog_fd = bpf_program__fd(prog);
620 if (CHECK_FAIL(prog_fd < 0))
621 goto out;
622
623 i = 0;
624 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
625 if (CHECK_FAIL(err))
626 goto out;
627
628 err = bpf_prog_test_run_opts(main_fd, &topts);
629 ASSERT_OK(err, "tailcall");
630 ASSERT_EQ(topts.retval, 1, "tailcall retval");
631
632 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
634 return;
635
636 data_fd = bpf_map__fd(data_map);
637 if (CHECK_FAIL(map_fd < 0))
638 return;
639
640 i = 0;
641 err = bpf_map_lookup_elem(data_fd, &i, &val);
642 ASSERT_OK(err, "tailcall count");
643 ASSERT_EQ(val, 33, "tailcall count");
644
645 i = 0;
646 err = bpf_map_delete_elem(map_fd, &i);
647 if (CHECK_FAIL(err))
648 goto out;
649
650 err = bpf_prog_test_run_opts(main_fd, &topts);
651 ASSERT_OK(err, "tailcall");
652 ASSERT_OK(topts.retval, "tailcall retval");
653out:
654 bpf_object__close(obj);
655}
656
657/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658 * 256 bytes) can be used within bpf subprograms that have the tailcalls
659 * in them
660 */
661static void test_tailcall_bpf2bpf_3(void)
662{
663 int err, map_fd, prog_fd, main_fd, i;
664 struct bpf_map *prog_array;
665 struct bpf_program *prog;
666 struct bpf_object *obj;
667 char prog_name[32];
668 LIBBPF_OPTS(bpf_test_run_opts, topts,
669 .data_in = &pkt_v4,
670 .data_size_in = sizeof(pkt_v4),
671 .repeat = 1,
672 );
673
674 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
675 &obj, &prog_fd);
676 if (CHECK_FAIL(err))
677 return;
678
679 prog = bpf_object__find_program_by_name(obj, "entry");
680 if (CHECK_FAIL(!prog))
681 goto out;
682
683 main_fd = bpf_program__fd(prog);
684 if (CHECK_FAIL(main_fd < 0))
685 goto out;
686
687 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688 if (CHECK_FAIL(!prog_array))
689 goto out;
690
691 map_fd = bpf_map__fd(prog_array);
692 if (CHECK_FAIL(map_fd < 0))
693 goto out;
694
695 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
697
698 prog = bpf_object__find_program_by_name(obj, prog_name);
699 if (CHECK_FAIL(!prog))
700 goto out;
701
702 prog_fd = bpf_program__fd(prog);
703 if (CHECK_FAIL(prog_fd < 0))
704 goto out;
705
706 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
707 if (CHECK_FAIL(err))
708 goto out;
709 }
710
711 err = bpf_prog_test_run_opts(main_fd, &topts);
712 ASSERT_OK(err, "tailcall");
713 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
714
715 i = 1;
716 err = bpf_map_delete_elem(map_fd, &i);
717 if (CHECK_FAIL(err))
718 goto out;
719
720 err = bpf_prog_test_run_opts(main_fd, &topts);
721 ASSERT_OK(err, "tailcall");
722 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
723
724 i = 0;
725 err = bpf_map_delete_elem(map_fd, &i);
726 if (CHECK_FAIL(err))
727 goto out;
728
729 err = bpf_prog_test_run_opts(main_fd, &topts);
730 ASSERT_OK(err, "tailcall");
731 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
732out:
733 bpf_object__close(obj);
734}
735
736#include "tailcall_bpf2bpf4.skel.h"
737
738/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740 * counter behaves correctly, bpf program will go through following flow:
741 *
742 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744 * subprog2 [here bump global counter] --------^
745 *
746 * We go through first two tailcalls and start counting from the subprog2 where
747 * the loop begins. At the end of the test make sure that the global counter is
748 * equal to 31, because tailcall counter includes the first two tailcalls
749 * whereas global counter is incremented only on loop presented on flow above.
750 *
751 * The noise parameter is used to insert bpf_map_update calls into the logic
752 * to force verifier to patch instructions. This allows us to ensure jump
753 * logic remains correct with instruction movement.
754 */
755static void test_tailcall_bpf2bpf_4(bool noise)
756{
757 int err, map_fd, prog_fd, main_fd, data_fd, i;
758 struct tailcall_bpf2bpf4__bss val;
759 struct bpf_map *prog_array, *data_map;
760 struct bpf_program *prog;
761 struct bpf_object *obj;
762 char prog_name[32];
763 LIBBPF_OPTS(bpf_test_run_opts, topts,
764 .data_in = &pkt_v4,
765 .data_size_in = sizeof(pkt_v4),
766 .repeat = 1,
767 );
768
769 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 &obj, &prog_fd);
771 if (CHECK_FAIL(err))
772 return;
773
774 prog = bpf_object__find_program_by_name(obj, "entry");
775 if (CHECK_FAIL(!prog))
776 goto out;
777
778 main_fd = bpf_program__fd(prog);
779 if (CHECK_FAIL(main_fd < 0))
780 goto out;
781
782 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 if (CHECK_FAIL(!prog_array))
784 goto out;
785
786 map_fd = bpf_map__fd(prog_array);
787 if (CHECK_FAIL(map_fd < 0))
788 goto out;
789
790 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792
793 prog = bpf_object__find_program_by_name(obj, prog_name);
794 if (CHECK_FAIL(!prog))
795 goto out;
796
797 prog_fd = bpf_program__fd(prog);
798 if (CHECK_FAIL(prog_fd < 0))
799 goto out;
800
801 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 if (CHECK_FAIL(err))
803 goto out;
804 }
805
806 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
808 return;
809
810 data_fd = bpf_map__fd(data_map);
811 if (CHECK_FAIL(map_fd < 0))
812 return;
813
814 i = 0;
815 val.noise = noise;
816 val.count = 0;
817 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
818 if (CHECK_FAIL(err))
819 goto out;
820
821 err = bpf_prog_test_run_opts(main_fd, &topts);
822 ASSERT_OK(err, "tailcall");
823 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
824
825 i = 0;
826 err = bpf_map_lookup_elem(data_fd, &i, &val);
827 ASSERT_OK(err, "tailcall count");
828 ASSERT_EQ(val.count, 31, "tailcall count");
829
830out:
831 bpf_object__close(obj);
832}
833
834#include "tailcall_bpf2bpf6.skel.h"
835
836/* Tail call counting works even when there is data on stack which is
837 * not aligned to 8 bytes.
838 */
839static void test_tailcall_bpf2bpf_6(void)
840{
841 struct tailcall_bpf2bpf6 *obj;
842 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843 LIBBPF_OPTS(bpf_test_run_opts, topts,
844 .data_in = &pkt_v4,
845 .data_size_in = sizeof(pkt_v4),
846 .repeat = 1,
847 );
848
849 obj = tailcall_bpf2bpf6__open_and_load();
850 if (!ASSERT_OK_PTR(obj, "open and load"))
851 return;
852
853 main_fd = bpf_program__fd(obj->progs.entry);
854 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
855 goto out;
856
857 map_fd = bpf_map__fd(obj->maps.jmp_table);
858 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
859 goto out;
860
861 prog_fd = bpf_program__fd(obj->progs.classifier_0);
862 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
863 goto out;
864
865 i = 0;
866 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867 if (!ASSERT_OK(err, "jmp_table map update"))
868 goto out;
869
870 err = bpf_prog_test_run_opts(main_fd, &topts);
871 ASSERT_OK(err, "entry prog test run");
872 ASSERT_EQ(topts.retval, 0, "tailcall retval");
873
874 data_fd = bpf_map__fd(obj->maps.bss);
875 if (!ASSERT_GE(map_fd, 0, "bss map fd"))
876 goto out;
877
878 i = 0;
879 err = bpf_map_lookup_elem(data_fd, &i, &val);
880 ASSERT_OK(err, "bss map lookup");
881 ASSERT_EQ(val, 1, "done flag is set");
882
883out:
884 tailcall_bpf2bpf6__destroy(obj);
885}
886
887void test_tailcalls(void)
888{
889 if (test__start_subtest("tailcall_1"))
890 test_tailcall_1();
891 if (test__start_subtest("tailcall_2"))
892 test_tailcall_2();
893 if (test__start_subtest("tailcall_3"))
894 test_tailcall_3();
895 if (test__start_subtest("tailcall_4"))
896 test_tailcall_4();
897 if (test__start_subtest("tailcall_5"))
898 test_tailcall_5();
899 if (test__start_subtest("tailcall_6"))
900 test_tailcall_6();
901 if (test__start_subtest("tailcall_bpf2bpf_1"))
902 test_tailcall_bpf2bpf_1();
903 if (test__start_subtest("tailcall_bpf2bpf_2"))
904 test_tailcall_bpf2bpf_2();
905 if (test__start_subtest("tailcall_bpf2bpf_3"))
906 test_tailcall_bpf2bpf_3();
907 if (test__start_subtest("tailcall_bpf2bpf_4"))
908 test_tailcall_bpf2bpf_4(false);
909 if (test__start_subtest("tailcall_bpf2bpf_5"))
910 test_tailcall_bpf2bpf_4(true);
911 if (test__start_subtest("tailcall_bpf2bpf_6"))
912 test_tailcall_bpf2bpf_6();
913}
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <network_helpers.h>
4
5/* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9static void test_tailcall_1(void)
10{
11 int err, map_fd, prog_fd, main_fd, i, j;
12 struct bpf_map *prog_array;
13 struct bpf_program *prog;
14 struct bpf_object *obj;
15 __u32 retval, duration;
16 char prog_name[32];
17 char buff[128] = {};
18
19 err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
20 &prog_fd);
21 if (CHECK_FAIL(err))
22 return;
23
24 prog = bpf_object__find_program_by_title(obj, "classifier");
25 if (CHECK_FAIL(!prog))
26 goto out;
27
28 main_fd = bpf_program__fd(prog);
29 if (CHECK_FAIL(main_fd < 0))
30 goto out;
31
32 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
33 if (CHECK_FAIL(!prog_array))
34 goto out;
35
36 map_fd = bpf_map__fd(prog_array);
37 if (CHECK_FAIL(map_fd < 0))
38 goto out;
39
40 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
41 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
42
43 prog = bpf_object__find_program_by_title(obj, prog_name);
44 if (CHECK_FAIL(!prog))
45 goto out;
46
47 prog_fd = bpf_program__fd(prog);
48 if (CHECK_FAIL(prog_fd < 0))
49 goto out;
50
51 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
52 if (CHECK_FAIL(err))
53 goto out;
54 }
55
56 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
57 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
58 &duration, &retval, NULL);
59 CHECK(err || retval != i, "tailcall",
60 "err %d errno %d retval %d\n", err, errno, retval);
61
62 err = bpf_map_delete_elem(map_fd, &i);
63 if (CHECK_FAIL(err))
64 goto out;
65 }
66
67 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
68 &duration, &retval, NULL);
69 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
70 err, errno, retval);
71
72 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
73 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
74
75 prog = bpf_object__find_program_by_title(obj, prog_name);
76 if (CHECK_FAIL(!prog))
77 goto out;
78
79 prog_fd = bpf_program__fd(prog);
80 if (CHECK_FAIL(prog_fd < 0))
81 goto out;
82
83 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
84 if (CHECK_FAIL(err))
85 goto out;
86 }
87
88 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
89 &duration, &retval, NULL);
90 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
91 err, errno, retval);
92
93 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
94 j = bpf_map__def(prog_array)->max_entries - 1 - i;
95 snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
96
97 prog = bpf_object__find_program_by_title(obj, prog_name);
98 if (CHECK_FAIL(!prog))
99 goto out;
100
101 prog_fd = bpf_program__fd(prog);
102 if (CHECK_FAIL(prog_fd < 0))
103 goto out;
104
105 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
106 if (CHECK_FAIL(err))
107 goto out;
108 }
109
110 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
111 j = bpf_map__def(prog_array)->max_entries - 1 - i;
112
113 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
114 &duration, &retval, NULL);
115 CHECK(err || retval != j, "tailcall",
116 "err %d errno %d retval %d\n", err, errno, retval);
117
118 err = bpf_map_delete_elem(map_fd, &i);
119 if (CHECK_FAIL(err))
120 goto out;
121 }
122
123 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
124 &duration, &retval, NULL);
125 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
126 err, errno, retval);
127
128 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
129 err = bpf_map_delete_elem(map_fd, &i);
130 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
131 goto out;
132
133 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
134 &duration, &retval, NULL);
135 CHECK(err || retval != 3, "tailcall",
136 "err %d errno %d retval %d\n", err, errno, retval);
137 }
138
139out:
140 bpf_object__close(obj);
141}
142
143/* test_tailcall_2 checks that patching multiple programs for a single
144 * tail call slot works. It also jumps through several programs and tests
145 * the tail call limit counter.
146 */
147static void test_tailcall_2(void)
148{
149 int err, map_fd, prog_fd, main_fd, i;
150 struct bpf_map *prog_array;
151 struct bpf_program *prog;
152 struct bpf_object *obj;
153 __u32 retval, duration;
154 char prog_name[32];
155 char buff[128] = {};
156
157 err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
158 &prog_fd);
159 if (CHECK_FAIL(err))
160 return;
161
162 prog = bpf_object__find_program_by_title(obj, "classifier");
163 if (CHECK_FAIL(!prog))
164 goto out;
165
166 main_fd = bpf_program__fd(prog);
167 if (CHECK_FAIL(main_fd < 0))
168 goto out;
169
170 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
171 if (CHECK_FAIL(!prog_array))
172 goto out;
173
174 map_fd = bpf_map__fd(prog_array);
175 if (CHECK_FAIL(map_fd < 0))
176 goto out;
177
178 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
179 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
180
181 prog = bpf_object__find_program_by_title(obj, prog_name);
182 if (CHECK_FAIL(!prog))
183 goto out;
184
185 prog_fd = bpf_program__fd(prog);
186 if (CHECK_FAIL(prog_fd < 0))
187 goto out;
188
189 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
190 if (CHECK_FAIL(err))
191 goto out;
192 }
193
194 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
195 &duration, &retval, NULL);
196 CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
197 err, errno, retval);
198
199 i = 2;
200 err = bpf_map_delete_elem(map_fd, &i);
201 if (CHECK_FAIL(err))
202 goto out;
203
204 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
205 &duration, &retval, NULL);
206 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
207 err, errno, retval);
208
209 i = 0;
210 err = bpf_map_delete_elem(map_fd, &i);
211 if (CHECK_FAIL(err))
212 goto out;
213
214 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
215 &duration, &retval, NULL);
216 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
217 err, errno, retval);
218out:
219 bpf_object__close(obj);
220}
221
222/* test_tailcall_3 checks that the count value of the tail call limit
223 * enforcement matches with expectations.
224 */
225static void test_tailcall_3(void)
226{
227 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
228 struct bpf_map *prog_array, *data_map;
229 struct bpf_program *prog;
230 struct bpf_object *obj;
231 __u32 retval, duration;
232 char buff[128] = {};
233
234 err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
235 &prog_fd);
236 if (CHECK_FAIL(err))
237 return;
238
239 prog = bpf_object__find_program_by_title(obj, "classifier");
240 if (CHECK_FAIL(!prog))
241 goto out;
242
243 main_fd = bpf_program__fd(prog);
244 if (CHECK_FAIL(main_fd < 0))
245 goto out;
246
247 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 if (CHECK_FAIL(!prog_array))
249 goto out;
250
251 map_fd = bpf_map__fd(prog_array);
252 if (CHECK_FAIL(map_fd < 0))
253 goto out;
254
255 prog = bpf_object__find_program_by_title(obj, "classifier/0");
256 if (CHECK_FAIL(!prog))
257 goto out;
258
259 prog_fd = bpf_program__fd(prog);
260 if (CHECK_FAIL(prog_fd < 0))
261 goto out;
262
263 i = 0;
264 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 if (CHECK_FAIL(err))
266 goto out;
267
268 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
269 &duration, &retval, NULL);
270 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
271 err, errno, retval);
272
273 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
274 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
275 return;
276
277 data_fd = bpf_map__fd(data_map);
278 if (CHECK_FAIL(map_fd < 0))
279 return;
280
281 i = 0;
282 err = bpf_map_lookup_elem(data_fd, &i, &val);
283 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
284 err, errno, val);
285
286 i = 0;
287 err = bpf_map_delete_elem(map_fd, &i);
288 if (CHECK_FAIL(err))
289 goto out;
290
291 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
292 &duration, &retval, NULL);
293 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
294 err, errno, retval);
295out:
296 bpf_object__close(obj);
297}
298
299/* test_tailcall_4 checks that the kernel properly selects indirect jump
300 * for the case where the key is not known. Latter is passed via global
301 * data to select different targets we can compare return value of.
302 */
303static void test_tailcall_4(void)
304{
305 int err, map_fd, prog_fd, main_fd, data_fd, i;
306 struct bpf_map *prog_array, *data_map;
307 struct bpf_program *prog;
308 struct bpf_object *obj;
309 __u32 retval, duration;
310 static const int zero = 0;
311 char buff[128] = {};
312 char prog_name[32];
313
314 err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
315 &prog_fd);
316 if (CHECK_FAIL(err))
317 return;
318
319 prog = bpf_object__find_program_by_title(obj, "classifier");
320 if (CHECK_FAIL(!prog))
321 goto out;
322
323 main_fd = bpf_program__fd(prog);
324 if (CHECK_FAIL(main_fd < 0))
325 goto out;
326
327 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
328 if (CHECK_FAIL(!prog_array))
329 goto out;
330
331 map_fd = bpf_map__fd(prog_array);
332 if (CHECK_FAIL(map_fd < 0))
333 goto out;
334
335 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
336 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
337 return;
338
339 data_fd = bpf_map__fd(data_map);
340 if (CHECK_FAIL(map_fd < 0))
341 return;
342
343 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
344 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
345
346 prog = bpf_object__find_program_by_title(obj, prog_name);
347 if (CHECK_FAIL(!prog))
348 goto out;
349
350 prog_fd = bpf_program__fd(prog);
351 if (CHECK_FAIL(prog_fd < 0))
352 goto out;
353
354 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
355 if (CHECK_FAIL(err))
356 goto out;
357 }
358
359 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
360 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
361 if (CHECK_FAIL(err))
362 goto out;
363
364 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
365 &duration, &retval, NULL);
366 CHECK(err || retval != i, "tailcall",
367 "err %d errno %d retval %d\n", err, errno, retval);
368 }
369
370 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
371 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
372 if (CHECK_FAIL(err))
373 goto out;
374
375 err = bpf_map_delete_elem(map_fd, &i);
376 if (CHECK_FAIL(err))
377 goto out;
378
379 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
380 &duration, &retval, NULL);
381 CHECK(err || retval != 3, "tailcall",
382 "err %d errno %d retval %d\n", err, errno, retval);
383 }
384out:
385 bpf_object__close(obj);
386}
387
388/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
389 * an indirect jump when the keys are const but different from different branches.
390 */
391static void test_tailcall_5(void)
392{
393 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
394 struct bpf_map *prog_array, *data_map;
395 struct bpf_program *prog;
396 struct bpf_object *obj;
397 __u32 retval, duration;
398 static const int zero = 0;
399 char buff[128] = {};
400 char prog_name[32];
401
402 err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
403 &prog_fd);
404 if (CHECK_FAIL(err))
405 return;
406
407 prog = bpf_object__find_program_by_title(obj, "classifier");
408 if (CHECK_FAIL(!prog))
409 goto out;
410
411 main_fd = bpf_program__fd(prog);
412 if (CHECK_FAIL(main_fd < 0))
413 goto out;
414
415 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
416 if (CHECK_FAIL(!prog_array))
417 goto out;
418
419 map_fd = bpf_map__fd(prog_array);
420 if (CHECK_FAIL(map_fd < 0))
421 goto out;
422
423 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
424 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
425 return;
426
427 data_fd = bpf_map__fd(data_map);
428 if (CHECK_FAIL(map_fd < 0))
429 return;
430
431 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
432 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
433
434 prog = bpf_object__find_program_by_title(obj, prog_name);
435 if (CHECK_FAIL(!prog))
436 goto out;
437
438 prog_fd = bpf_program__fd(prog);
439 if (CHECK_FAIL(prog_fd < 0))
440 goto out;
441
442 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
443 if (CHECK_FAIL(err))
444 goto out;
445 }
446
447 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
448 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
449 if (CHECK_FAIL(err))
450 goto out;
451
452 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
453 &duration, &retval, NULL);
454 CHECK(err || retval != i, "tailcall",
455 "err %d errno %d retval %d\n", err, errno, retval);
456 }
457
458 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
459 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
460 if (CHECK_FAIL(err))
461 goto out;
462
463 err = bpf_map_delete_elem(map_fd, &i);
464 if (CHECK_FAIL(err))
465 goto out;
466
467 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
468 &duration, &retval, NULL);
469 CHECK(err || retval != 3, "tailcall",
470 "err %d errno %d retval %d\n", err, errno, retval);
471 }
472out:
473 bpf_object__close(obj);
474}
475
476/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
477 * correctly in correlation with BPF subprograms
478 */
479static void test_tailcall_bpf2bpf_1(void)
480{
481 int err, map_fd, prog_fd, main_fd, i;
482 struct bpf_map *prog_array;
483 struct bpf_program *prog;
484 struct bpf_object *obj;
485 __u32 retval, duration;
486 char prog_name[32];
487
488 err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
489 &obj, &prog_fd);
490 if (CHECK_FAIL(err))
491 return;
492
493 prog = bpf_object__find_program_by_title(obj, "classifier");
494 if (CHECK_FAIL(!prog))
495 goto out;
496
497 main_fd = bpf_program__fd(prog);
498 if (CHECK_FAIL(main_fd < 0))
499 goto out;
500
501 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
502 if (CHECK_FAIL(!prog_array))
503 goto out;
504
505 map_fd = bpf_map__fd(prog_array);
506 if (CHECK_FAIL(map_fd < 0))
507 goto out;
508
509 /* nop -> jmp */
510 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
511 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
512
513 prog = bpf_object__find_program_by_title(obj, prog_name);
514 if (CHECK_FAIL(!prog))
515 goto out;
516
517 prog_fd = bpf_program__fd(prog);
518 if (CHECK_FAIL(prog_fd < 0))
519 goto out;
520
521 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
522 if (CHECK_FAIL(err))
523 goto out;
524 }
525
526 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
527 0, &retval, &duration);
528 CHECK(err || retval != 1, "tailcall",
529 "err %d errno %d retval %d\n", err, errno, retval);
530
531 /* jmp -> nop, call subprog that will do tailcall */
532 i = 1;
533 err = bpf_map_delete_elem(map_fd, &i);
534 if (CHECK_FAIL(err))
535 goto out;
536
537 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
538 0, &retval, &duration);
539 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
540 err, errno, retval);
541
542 /* make sure that subprog can access ctx and entry prog that
543 * called this subprog can properly return
544 */
545 i = 0;
546 err = bpf_map_delete_elem(map_fd, &i);
547 if (CHECK_FAIL(err))
548 goto out;
549
550 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
551 0, &retval, &duration);
552 CHECK(err || retval != sizeof(pkt_v4) * 2,
553 "tailcall", "err %d errno %d retval %d\n",
554 err, errno, retval);
555out:
556 bpf_object__close(obj);
557}
558
559/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
560 * enforcement matches with expectations when tailcall is preceded with
561 * bpf2bpf call.
562 */
563static void test_tailcall_bpf2bpf_2(void)
564{
565 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
566 struct bpf_map *prog_array, *data_map;
567 struct bpf_program *prog;
568 struct bpf_object *obj;
569 __u32 retval, duration;
570 char buff[128] = {};
571
572 err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
573 &obj, &prog_fd);
574 if (CHECK_FAIL(err))
575 return;
576
577 prog = bpf_object__find_program_by_title(obj, "classifier");
578 if (CHECK_FAIL(!prog))
579 goto out;
580
581 main_fd = bpf_program__fd(prog);
582 if (CHECK_FAIL(main_fd < 0))
583 goto out;
584
585 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
586 if (CHECK_FAIL(!prog_array))
587 goto out;
588
589 map_fd = bpf_map__fd(prog_array);
590 if (CHECK_FAIL(map_fd < 0))
591 goto out;
592
593 prog = bpf_object__find_program_by_title(obj, "classifier/0");
594 if (CHECK_FAIL(!prog))
595 goto out;
596
597 prog_fd = bpf_program__fd(prog);
598 if (CHECK_FAIL(prog_fd < 0))
599 goto out;
600
601 i = 0;
602 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
603 if (CHECK_FAIL(err))
604 goto out;
605
606 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
607 &duration, &retval, NULL);
608 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
609 err, errno, retval);
610
611 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
612 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
613 return;
614
615 data_fd = bpf_map__fd(data_map);
616 if (CHECK_FAIL(map_fd < 0))
617 return;
618
619 i = 0;
620 err = bpf_map_lookup_elem(data_fd, &i, &val);
621 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
622 err, errno, val);
623
624 i = 0;
625 err = bpf_map_delete_elem(map_fd, &i);
626 if (CHECK_FAIL(err))
627 goto out;
628
629 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
630 &duration, &retval, NULL);
631 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
632 err, errno, retval);
633out:
634 bpf_object__close(obj);
635}
636
637/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
638 * 256 bytes) can be used within bpf subprograms that have the tailcalls
639 * in them
640 */
641static void test_tailcall_bpf2bpf_3(void)
642{
643 int err, map_fd, prog_fd, main_fd, i;
644 struct bpf_map *prog_array;
645 struct bpf_program *prog;
646 struct bpf_object *obj;
647 __u32 retval, duration;
648 char prog_name[32];
649
650 err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
651 &obj, &prog_fd);
652 if (CHECK_FAIL(err))
653 return;
654
655 prog = bpf_object__find_program_by_title(obj, "classifier");
656 if (CHECK_FAIL(!prog))
657 goto out;
658
659 main_fd = bpf_program__fd(prog);
660 if (CHECK_FAIL(main_fd < 0))
661 goto out;
662
663 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
664 if (CHECK_FAIL(!prog_array))
665 goto out;
666
667 map_fd = bpf_map__fd(prog_array);
668 if (CHECK_FAIL(map_fd < 0))
669 goto out;
670
671 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
672 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
673
674 prog = bpf_object__find_program_by_title(obj, prog_name);
675 if (CHECK_FAIL(!prog))
676 goto out;
677
678 prog_fd = bpf_program__fd(prog);
679 if (CHECK_FAIL(prog_fd < 0))
680 goto out;
681
682 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
683 if (CHECK_FAIL(err))
684 goto out;
685 }
686
687 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
688 &duration, &retval, NULL);
689 CHECK(err || retval != sizeof(pkt_v4) * 3,
690 "tailcall", "err %d errno %d retval %d\n",
691 err, errno, retval);
692
693 i = 1;
694 err = bpf_map_delete_elem(map_fd, &i);
695 if (CHECK_FAIL(err))
696 goto out;
697
698 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
699 &duration, &retval, NULL);
700 CHECK(err || retval != sizeof(pkt_v4),
701 "tailcall", "err %d errno %d retval %d\n",
702 err, errno, retval);
703
704 i = 0;
705 err = bpf_map_delete_elem(map_fd, &i);
706 if (CHECK_FAIL(err))
707 goto out;
708
709 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
710 &duration, &retval, NULL);
711 CHECK(err || retval != sizeof(pkt_v4) * 2,
712 "tailcall", "err %d errno %d retval %d\n",
713 err, errno, retval);
714out:
715 bpf_object__close(obj);
716}
717
718#include "tailcall_bpf2bpf4.skel.h"
719
720/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
721 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
722 * counter behaves correctly, bpf program will go through following flow:
723 *
724 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
725 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
726 * subprog2 [here bump global counter] --------^
727 *
728 * We go through first two tailcalls and start counting from the subprog2 where
729 * the loop begins. At the end of the test make sure that the global counter is
730 * equal to 31, because tailcall counter includes the first two tailcalls
731 * whereas global counter is incremented only on loop presented on flow above.
732 *
733 * The noise parameter is used to insert bpf_map_update calls into the logic
734 * to force verifier to patch instructions. This allows us to ensure jump
735 * logic remains correct with instruction movement.
736 */
737static void test_tailcall_bpf2bpf_4(bool noise)
738{
739 int err, map_fd, prog_fd, main_fd, data_fd, i;
740 struct tailcall_bpf2bpf4__bss val;
741 struct bpf_map *prog_array, *data_map;
742 struct bpf_program *prog;
743 struct bpf_object *obj;
744 __u32 retval, duration;
745 char prog_name[32];
746
747 err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
748 &obj, &prog_fd);
749 if (CHECK_FAIL(err))
750 return;
751
752 prog = bpf_object__find_program_by_title(obj, "classifier");
753 if (CHECK_FAIL(!prog))
754 goto out;
755
756 main_fd = bpf_program__fd(prog);
757 if (CHECK_FAIL(main_fd < 0))
758 goto out;
759
760 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
761 if (CHECK_FAIL(!prog_array))
762 goto out;
763
764 map_fd = bpf_map__fd(prog_array);
765 if (CHECK_FAIL(map_fd < 0))
766 goto out;
767
768 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
769 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
770
771 prog = bpf_object__find_program_by_title(obj, prog_name);
772 if (CHECK_FAIL(!prog))
773 goto out;
774
775 prog_fd = bpf_program__fd(prog);
776 if (CHECK_FAIL(prog_fd < 0))
777 goto out;
778
779 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
780 if (CHECK_FAIL(err))
781 goto out;
782 }
783
784 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
785 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
786 return;
787
788 data_fd = bpf_map__fd(data_map);
789 if (CHECK_FAIL(map_fd < 0))
790 return;
791
792 i = 0;
793 val.noise = noise;
794 val.count = 0;
795 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
796 if (CHECK_FAIL(err))
797 goto out;
798
799 err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
800 &duration, &retval, NULL);
801 CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
802 err, errno, retval);
803
804 i = 0;
805 err = bpf_map_lookup_elem(data_fd, &i, &val);
806 CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n",
807 err, errno, val.count);
808
809out:
810 bpf_object__close(obj);
811}
812
813void test_tailcalls(void)
814{
815 if (test__start_subtest("tailcall_1"))
816 test_tailcall_1();
817 if (test__start_subtest("tailcall_2"))
818 test_tailcall_2();
819 if (test__start_subtest("tailcall_3"))
820 test_tailcall_3();
821 if (test__start_subtest("tailcall_4"))
822 test_tailcall_4();
823 if (test__start_subtest("tailcall_5"))
824 test_tailcall_5();
825 if (test__start_subtest("tailcall_bpf2bpf_1"))
826 test_tailcall_bpf2bpf_1();
827 if (test__start_subtest("tailcall_bpf2bpf_2"))
828 test_tailcall_bpf2bpf_2();
829 if (test__start_subtest("tailcall_bpf2bpf_3"))
830 test_tailcall_bpf2bpf_3();
831 if (test__start_subtest("tailcall_bpf2bpf_4"))
832 test_tailcall_bpf2bpf_4(false);
833 if (test__start_subtest("tailcall_bpf2bpf_5"))
834 test_tailcall_bpf2bpf_4(true);
835}