Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "bpf_misc.h"
3#include "bpf_experimental.h"
4
5struct {
6 __uint(type, BPF_MAP_TYPE_ARRAY);
7 __uint(max_entries, 8);
8 __type(key, __u32);
9 __type(value, __u64);
10} map SEC(".maps");
11
12struct {
13 __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
14 __uint(max_entries, 8);
15} ringbuf SEC(".maps");
16
17struct vm_area_struct;
18struct bpf_map;
19
20struct buf_context {
21 char *buf;
22};
23
24struct num_context {
25 __u64 i;
26 __u64 j;
27};
28
29__u8 choice_arr[2] = { 0, 1 };
30
31static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
32{
33 if (idx == 0) {
34 ctx->buf = (char *)(0xDEAD);
35 return 0;
36 }
37
38 if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
39 return 1;
40
41 return 0;
42}
43
44SEC("?raw_tp")
45__failure __msg("R1 type=scalar expected=fp")
46int unsafe_on_2nd_iter(void *unused)
47{
48 char buf[4];
49 struct buf_context loop_ctx = { .buf = buf };
50
51 bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
52 return 0;
53}
54
55static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
56{
57 ctx->i = 0;
58 return 0;
59}
60
61SEC("?raw_tp")
62__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
63int unsafe_on_zero_iter(void *unused)
64{
65 struct num_context loop_ctx = { .i = 32 };
66
67 bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
68 return choice_arr[loop_ctx.i];
69}
70
71static int widening_cb(__u32 idx, struct num_context *ctx)
72{
73 ++ctx->i;
74 return 0;
75}
76
77SEC("?raw_tp")
78__success
79int widening(void *unused)
80{
81 struct num_context loop_ctx = { .i = 0, .j = 1 };
82
83 bpf_loop(100, widening_cb, &loop_ctx, 0);
84 /* loop_ctx.j is not changed during callback iteration,
85 * verifier should not apply widening to it.
86 */
87 return choice_arr[loop_ctx.j];
88}
89
90static int loop_detection_cb(__u32 idx, struct num_context *ctx)
91{
92 for (;;) {}
93 return 0;
94}
95
96SEC("?raw_tp")
97__failure __msg("infinite loop detected")
98int loop_detection(void *unused)
99{
100 struct num_context loop_ctx = { .i = 0 };
101
102 bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
103 return 0;
104}
105
106static __always_inline __u64 oob_state_machine(struct num_context *ctx)
107{
108 switch (ctx->i) {
109 case 0:
110 ctx->i = 1;
111 break;
112 case 1:
113 ctx->i = 32;
114 break;
115 }
116 return 0;
117}
118
119static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
120{
121 return oob_state_machine(data);
122}
123
124SEC("?raw_tp")
125__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
126int unsafe_for_each_map_elem(void *unused)
127{
128 struct num_context loop_ctx = { .i = 0 };
129
130 bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
131 return choice_arr[loop_ctx.i];
132}
133
134static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
135{
136 return oob_state_machine(data);
137}
138
139SEC("?raw_tp")
140__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
141int unsafe_ringbuf_drain(void *unused)
142{
143 struct num_context loop_ctx = { .i = 0 };
144
145 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
146 return choice_arr[loop_ctx.i];
147}
148
149static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
150{
151 return oob_state_machine(data);
152}
153
154SEC("?raw_tp")
155__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
156int unsafe_find_vma(void *unused)
157{
158 struct task_struct *task = bpf_get_current_task_btf();
159 struct num_context loop_ctx = { .i = 0 };
160
161 bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
162 return choice_arr[loop_ctx.i];
163}
164
165static int iter_limit_cb(__u32 idx, struct num_context *ctx)
166{
167 ctx->i++;
168 return 0;
169}
170
171SEC("?raw_tp")
172__success
173int bpf_loop_iter_limit_ok(void *unused)
174{
175 struct num_context ctx = { .i = 0 };
176
177 bpf_loop(1, iter_limit_cb, &ctx, 0);
178 return choice_arr[ctx.i];
179}
180
181SEC("?raw_tp")
182__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
183int bpf_loop_iter_limit_overflow(void *unused)
184{
185 struct num_context ctx = { .i = 0 };
186
187 bpf_loop(2, iter_limit_cb, &ctx, 0);
188 return choice_arr[ctx.i];
189}
190
191static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
192{
193 ctx->i += 100;
194 return 0;
195}
196
197static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
198{
199 ctx->i += 10;
200 return 0;
201}
202
203static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
204{
205 ctx->i += 1;
206 bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
207 bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
208 return 0;
209}
210
211/* Check that path visiting every callback function once had been
212 * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
213 * with each decimal digit corresponding to a callback visit marker.
214 */
215SEC("socket")
216__success __retval(111111)
217int bpf_loop_iter_limit_nested(void *unused)
218{
219 struct num_context ctx1 = { .i = 0 };
220 struct num_context ctx2 = { .i = 0 };
221 __u64 a, b, c;
222
223 bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
224 bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
225 a = ctx1.i;
226 b = ctx2.i;
227 /* Force 'ctx1.i' and 'ctx2.i' precise. */
228 c = choice_arr[(a + b) % 2];
229 /* This makes 'c' zero, but neither clang nor verifier know it. */
230 c /= 10;
231 /* Make sure that verifier does not visit 'impossible' states:
232 * enumerate all possible callback visit masks.
233 */
234 if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
235 b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
236 asm volatile ("r0 /= 0;" ::: "r0");
237 return 1000 * a + b + c;
238}
239
240struct iter_limit_bug_ctx {
241 __u64 a;
242 __u64 b;
243 __u64 c;
244};
245
246static __naked void iter_limit_bug_cb(void)
247{
248 /* This is the same as C code below, but written
249 * in assembly to control which branches are fall-through.
250 *
251 * switch (bpf_get_prandom_u32()) {
252 * case 1: ctx->a = 42; break;
253 * case 2: ctx->b = 42; break;
254 * default: ctx->c = 42; break;
255 * }
256 */
257 asm volatile (
258 "r9 = r2;"
259 "call %[bpf_get_prandom_u32];"
260 "r1 = r0;"
261 "r2 = 42;"
262 "r0 = 0;"
263 "if r1 == 0x1 goto 1f;"
264 "if r1 == 0x2 goto 2f;"
265 "*(u64 *)(r9 + 16) = r2;"
266 "exit;"
267 "1: *(u64 *)(r9 + 0) = r2;"
268 "exit;"
269 "2: *(u64 *)(r9 + 8) = r2;"
270 "exit;"
271 :
272 : __imm(bpf_get_prandom_u32)
273 : __clobber_all
274 );
275}
276
277int tmp_var;
278SEC("socket")
279__failure __msg("infinite loop detected at insn 2")
280__naked void jgt_imm64_and_may_goto(void)
281{
282 asm volatile (" \
283 r0 = %[tmp_var] ll; \
284l0_%=: .byte 0xe5; /* may_goto */ \
285 .byte 0; /* regs */ \
286 .short -3; /* off -3 */ \
287 .long 0; /* imm */ \
288 if r0 > 10 goto l0_%=; \
289 r0 = 0; \
290 exit; \
291" :: __imm_addr(tmp_var)
292 : __clobber_all);
293}
294
295SEC("socket")
296__failure __msg("infinite loop detected at insn 1")
297__naked void may_goto_self(void)
298{
299 asm volatile (" \
300 r0 = *(u32 *)(r10 - 4); \
301l0_%=: .byte 0xe5; /* may_goto */ \
302 .byte 0; /* regs */ \
303 .short -1; /* off -1 */ \
304 .long 0; /* imm */ \
305 if r0 > 10 goto l0_%=; \
306 r0 = 0; \
307 exit; \
308" ::: __clobber_all);
309}
310
311SEC("socket")
312__success __retval(0)
313__naked void may_goto_neg_off(void)
314{
315 asm volatile (" \
316 r0 = *(u32 *)(r10 - 4); \
317 goto l0_%=; \
318 goto l1_%=; \
319l0_%=: .byte 0xe5; /* may_goto */ \
320 .byte 0; /* regs */ \
321 .short -2; /* off -2 */ \
322 .long 0; /* imm */ \
323 if r0 > 10 goto l0_%=; \
324l1_%=: r0 = 0; \
325 exit; \
326" ::: __clobber_all);
327}
328
329SEC("tc")
330__failure
331__flag(BPF_F_TEST_STATE_FREQ)
332int iter_limit_bug(struct __sk_buff *skb)
333{
334 struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
335
336 bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
337
338 /* This is the same as C code below,
339 * written in assembly to guarantee checks order.
340 *
341 * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
342 * asm volatile("r1 /= 0;":::"r1");
343 */
344 asm volatile (
345 "r1 = *(u64 *)%[ctx_a];"
346 "if r1 != 42 goto 1f;"
347 "r1 = *(u64 *)%[ctx_b];"
348 "if r1 != 42 goto 1f;"
349 "r1 = *(u64 *)%[ctx_c];"
350 "if r1 != 7 goto 1f;"
351 "r1 /= 0;"
352 "1:"
353 :
354 : [ctx_a]"m"(ctx.a),
355 [ctx_b]"m"(ctx.b),
356 [ctx_c]"m"(ctx.c)
357 : "r1"
358 );
359 return 0;
360}
361
362SEC("socket")
363__success __retval(0)
364__naked void ja_and_may_goto(void)
365{
366 asm volatile (" \
367l0_%=: .byte 0xe5; /* may_goto */ \
368 .byte 0; /* regs */ \
369 .short 1; /* off 1 */ \
370 .long 0; /* imm */ \
371 goto l0_%=; \
372 r0 = 0; \
373 exit; \
374" ::: __clobber_common);
375}
376
377SEC("socket")
378__success __retval(0)
379__naked void ja_and_may_goto2(void)
380{
381 asm volatile (" \
382l0_%=: r0 = 0; \
383 .byte 0xe5; /* may_goto */ \
384 .byte 0; /* regs */ \
385 .short 1; /* off 1 */ \
386 .long 0; /* imm */ \
387 goto l0_%=; \
388 r0 = 0; \
389 exit; \
390" ::: __clobber_common);
391}
392
393SEC("socket")
394__success __retval(0)
395__naked void jlt_and_may_goto(void)
396{
397 asm volatile (" \
398l0_%=: call %[bpf_jiffies64]; \
399 .byte 0xe5; /* may_goto */ \
400 .byte 0; /* regs */ \
401 .short 1; /* off 1 */ \
402 .long 0; /* imm */ \
403 if r0 < 10 goto l0_%=; \
404 r0 = 0; \
405 exit; \
406" :: __imm(bpf_jiffies64)
407 : __clobber_all);
408}
409
410#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
411 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
412 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
413 defined(__TARGET_ARCH_loongarch)) && \
414 __clang_major__ >= 18
415SEC("socket")
416__success __retval(0)
417__naked void gotol_and_may_goto(void)
418{
419 asm volatile (" \
420l0_%=: r0 = 0; \
421 .byte 0xe5; /* may_goto */ \
422 .byte 0; /* regs */ \
423 .short 1; /* off 1 */ \
424 .long 0; /* imm */ \
425 gotol l0_%=; \
426 r0 = 0; \
427 exit; \
428" ::: __clobber_common);
429}
430#endif
431
432SEC("socket")
433__success __retval(0)
434__naked void ja_and_may_goto_subprog(void)
435{
436 asm volatile (" \
437 call subprog_with_may_goto; \
438 exit; \
439" ::: __clobber_all);
440}
441
442static __naked __noinline __used
443void subprog_with_may_goto(void)
444{
445 asm volatile (" \
446l0_%=: .byte 0xe5; /* may_goto */ \
447 .byte 0; /* regs */ \
448 .short 1; /* off 1 */ \
449 .long 0; /* imm */ \
450 goto l0_%=; \
451 r0 = 0; \
452 exit; \
453" ::: __clobber_all);
454}
455
456#define ARR_SZ 1000000
457int zero;
458char arr[ARR_SZ];
459
460SEC("socket")
461__success __retval(0xd495cdc0)
462int cond_break1(const void *ctx)
463{
464 unsigned long i;
465 unsigned int sum = 0;
466
467 for (i = zero; i < ARR_SZ && can_loop; i++)
468 sum += i;
469 for (i = zero; i < ARR_SZ; i++) {
470 barrier_var(i);
471 sum += i + arr[i];
472 cond_break;
473 }
474
475 return sum;
476}
477
478SEC("socket")
479__success __retval(999000000)
480int cond_break2(const void *ctx)
481{
482 int i, j;
483 int sum = 0;
484
485 for (i = zero; i < 1000 && can_loop; i++)
486 for (j = zero; j < 1000; j++) {
487 sum += i + j;
488 cond_break;
489 }
490 return sum;
491}
492
493static __noinline int loop(void)
494{
495 int i, sum = 0;
496
497 for (i = zero; i <= 1000000 && can_loop; i++)
498 sum += i;
499
500 return sum;
501}
502
503SEC("socket")
504__success __retval(0x6a5a2920)
505int cond_break3(const void *ctx)
506{
507 return loop();
508}
509
510SEC("socket")
511__success __retval(1)
512int cond_break4(const void *ctx)
513{
514 int cnt = zero;
515
516 for (;;) {
517 /* should eventually break out of the loop */
518 cond_break;
519 cnt++;
520 }
521 /* if we looped a bit, it's a success */
522 return cnt > 1 ? 1 : 0;
523}
524
525static __noinline int static_subprog(void)
526{
527 int cnt = zero;
528
529 for (;;) {
530 cond_break;
531 cnt++;
532 }
533
534 return cnt;
535}
536
537SEC("socket")
538__success __retval(1)
539int cond_break5(const void *ctx)
540{
541 int cnt1 = zero, cnt2;
542
543 for (;;) {
544 cond_break;
545 cnt1++;
546 }
547
548 cnt2 = static_subprog();
549
550 /* main and subprog have to loop a bit */
551 return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
552}
553
554#define ARR2_SZ 1000
555SEC(".data.arr2")
556char arr2[ARR2_SZ];
557
558SEC("socket")
559__success __flag(BPF_F_TEST_STATE_FREQ)
560int loop_inside_iter(const void *ctx)
561{
562 struct bpf_iter_num it;
563 int *v, sum = 0;
564 __u64 i = 0;
565
566 bpf_iter_num_new(&it, 0, ARR2_SZ);
567 while ((v = bpf_iter_num_next(&it))) {
568 if (i < ARR2_SZ)
569 sum += arr2[i++];
570 }
571 bpf_iter_num_destroy(&it);
572 return sum;
573}
574
575SEC("socket")
576__success __flag(BPF_F_TEST_STATE_FREQ)
577int loop_inside_iter_signed(const void *ctx)
578{
579 struct bpf_iter_num it;
580 int *v, sum = 0;
581 long i = 0;
582
583 bpf_iter_num_new(&it, 0, ARR2_SZ);
584 while ((v = bpf_iter_num_next(&it))) {
585 if (i < ARR2_SZ && i >= 0)
586 sum += arr2[i++];
587 }
588 bpf_iter_num_destroy(&it);
589 return sum;
590}
591
592volatile const int limit = ARR2_SZ;
593
594SEC("socket")
595__success __flag(BPF_F_TEST_STATE_FREQ)
596int loop_inside_iter_volatile_limit(const void *ctx)
597{
598 struct bpf_iter_num it;
599 int *v, sum = 0;
600 __u64 i = 0;
601
602 bpf_iter_num_new(&it, 0, ARR2_SZ);
603 while ((v = bpf_iter_num_next(&it))) {
604 if (i < limit)
605 sum += arr2[i++];
606 }
607 bpf_iter_num_destroy(&it);
608 return sum;
609}
610
611#define ARR_LONG_SZ 1000
612
613SEC(".data.arr_long")
614long arr_long[ARR_LONG_SZ];
615
616SEC("socket")
617__success
618int test1(const void *ctx)
619{
620 long i;
621
622 for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
623 arr_long[i] = i;
624 return 0;
625}
626
627SEC("socket")
628__success
629int test2(const void *ctx)
630{
631 __u64 i;
632
633 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
634 barrier_var(i);
635 arr_long[i] = i;
636 }
637 return 0;
638}
639
640SEC(".data.arr_foo")
641struct {
642 int a;
643 int b;
644} arr_foo[ARR_LONG_SZ];
645
646SEC("socket")
647__success
648int test3(const void *ctx)
649{
650 __u64 i;
651
652 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
653 barrier_var(i);
654 arr_foo[i].a = i;
655 arr_foo[i].b = i;
656 }
657 return 0;
658}
659
660SEC("socket")
661__success
662int test4(const void *ctx)
663{
664 long i;
665
666 for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
667 barrier_var(i);
668 arr_foo[i].a = i;
669 arr_foo[i].b = i;
670 }
671 return 0;
672}
673
674char buf[10] SEC(".data.buf");
675
676SEC("socket")
677__description("check add const")
678__success
679__naked void check_add_const(void)
680{
681 /* typical LLVM generated loop with may_goto */
682 asm volatile (" \
683 call %[bpf_ktime_get_ns]; \
684 if r0 > 9 goto l1_%=; \
685l0_%=: r1 = %[buf]; \
686 r2 = r0; \
687 r1 += r2; \
688 r3 = *(u8 *)(r1 +0); \
689 .byte 0xe5; /* may_goto */ \
690 .byte 0; /* regs */ \
691 .short 4; /* off of l1_%=: */ \
692 .long 0; /* imm */ \
693 r0 = r2; \
694 r0 += 1; \
695 if r2 < 9 goto l0_%=; \
696 exit; \
697l1_%=: r0 = 0; \
698 exit; \
699" :
700 : __imm(bpf_ktime_get_ns),
701 __imm_ptr(buf)
702 : __clobber_common);
703}
704
705SEC("socket")
706__failure
707__msg("*(u8 *)(r7 +0) = r0")
708__msg("invalid access to map value, value_size=10 off=10 size=1")
709__naked void check_add_const_3regs(void)
710{
711 asm volatile (
712 "r6 = %[buf];"
713 "r7 = %[buf];"
714 "call %[bpf_ktime_get_ns];"
715 "r1 = r0;" /* link r0.id == r1.id == r2.id */
716 "r2 = r0;"
717 "r1 += 1;" /* r1 == r0+1 */
718 "r2 += 2;" /* r2 == r0+2 */
719 "if r0 > 8 goto 1f;" /* r0 range [0, 8] */
720 "r6 += r1;" /* r1 range [1, 9] */
721 "r7 += r2;" /* r2 range [2, 10] */
722 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
723 "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
724 "1: exit;"
725 :
726 : __imm(bpf_ktime_get_ns),
727 __imm_ptr(buf)
728 : __clobber_common);
729}
730
731SEC("socket")
732__failure
733__msg("*(u8 *)(r8 -1) = r0")
734__msg("invalid access to map value, value_size=10 off=10 size=1")
735__naked void check_add_const_3regs_2if(void)
736{
737 asm volatile (
738 "r6 = %[buf];"
739 "r7 = %[buf];"
740 "r8 = %[buf];"
741 "call %[bpf_ktime_get_ns];"
742 "if r0 < 2 goto 1f;"
743 "r1 = r0;" /* link r0.id == r1.id == r2.id */
744 "r2 = r0;"
745 "r1 += 1;" /* r1 == r0+1 */
746 "r2 += 2;" /* r2 == r0+2 */
747 "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
748 "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
749 "r6 += r0;" /* r0 range [0, 9] */
750 "r7 += r1;" /* r1 range [1, 10] */
751 "r8 += r2;" /* r2 range [2, 11] */
752 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
753 "*(u8 *)(r7 -1) = r0;" /* safe */
754 "*(u8 *)(r8 -1) = r0;" /* unsafe */
755 "1: exit;"
756 :
757 : __imm(bpf_ktime_get_ns),
758 __imm_ptr(buf)
759 : __clobber_common);
760}
761
762SEC("socket")
763__failure
764__flag(BPF_F_TEST_STATE_FREQ)
765__naked void check_add_const_regsafe_off(void)
766{
767 asm volatile (
768 "r8 = %[buf];"
769 "call %[bpf_ktime_get_ns];"
770 "r6 = r0;"
771 "call %[bpf_ktime_get_ns];"
772 "r7 = r0;"
773 "call %[bpf_ktime_get_ns];"
774 "r1 = r0;" /* same ids for r1 and r0 */
775 "if r6 > r7 goto 1f;" /* this jump can't be predicted */
776 "r1 += 1;" /* r1.off == +1 */
777 "goto 2f;"
778 "1: r1 += 100;" /* r1.off == +100 */
779 "goto +0;" /* verify r1.off in regsafe() after this insn */
780 "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
781 "r8 += r1;"
782 "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */
783 "3: exit;"
784 :
785 : __imm(bpf_ktime_get_ns),
786 __imm_ptr(buf)
787 : __clobber_common);
788}
789
790char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "bpf_misc.h"
6
7struct {
8 __uint(type, BPF_MAP_TYPE_ARRAY);
9 __uint(max_entries, 8);
10 __type(key, __u32);
11 __type(value, __u64);
12} map SEC(".maps");
13
14struct {
15 __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
16 __uint(max_entries, 8);
17} ringbuf SEC(".maps");
18
19struct vm_area_struct;
20struct bpf_map;
21
22struct buf_context {
23 char *buf;
24};
25
26struct num_context {
27 __u64 i;
28 __u64 j;
29};
30
31__u8 choice_arr[2] = { 0, 1 };
32
33static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
34{
35 if (idx == 0) {
36 ctx->buf = (char *)(0xDEAD);
37 return 0;
38 }
39
40 if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
41 return 1;
42
43 return 0;
44}
45
46SEC("?raw_tp")
47__failure __msg("R1 type=scalar expected=fp")
48int unsafe_on_2nd_iter(void *unused)
49{
50 char buf[4];
51 struct buf_context loop_ctx = { .buf = buf };
52
53 bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
54 return 0;
55}
56
57static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
58{
59 ctx->i = 0;
60 return 0;
61}
62
63SEC("?raw_tp")
64__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
65int unsafe_on_zero_iter(void *unused)
66{
67 struct num_context loop_ctx = { .i = 32 };
68
69 bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
70 return choice_arr[loop_ctx.i];
71}
72
73static int widening_cb(__u32 idx, struct num_context *ctx)
74{
75 ++ctx->i;
76 return 0;
77}
78
79SEC("?raw_tp")
80__success
81int widening(void *unused)
82{
83 struct num_context loop_ctx = { .i = 0, .j = 1 };
84
85 bpf_loop(100, widening_cb, &loop_ctx, 0);
86 /* loop_ctx.j is not changed during callback iteration,
87 * verifier should not apply widening to it.
88 */
89 return choice_arr[loop_ctx.j];
90}
91
92static int loop_detection_cb(__u32 idx, struct num_context *ctx)
93{
94 for (;;) {}
95 return 0;
96}
97
98SEC("?raw_tp")
99__failure __msg("infinite loop detected")
100int loop_detection(void *unused)
101{
102 struct num_context loop_ctx = { .i = 0 };
103
104 bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
105 return 0;
106}
107
108static __always_inline __u64 oob_state_machine(struct num_context *ctx)
109{
110 switch (ctx->i) {
111 case 0:
112 ctx->i = 1;
113 break;
114 case 1:
115 ctx->i = 32;
116 break;
117 }
118 return 0;
119}
120
121static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
122{
123 return oob_state_machine(data);
124}
125
126SEC("?raw_tp")
127__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
128int unsafe_for_each_map_elem(void *unused)
129{
130 struct num_context loop_ctx = { .i = 0 };
131
132 bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
133 return choice_arr[loop_ctx.i];
134}
135
136static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
137{
138 return oob_state_machine(data);
139}
140
141SEC("?raw_tp")
142__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
143int unsafe_ringbuf_drain(void *unused)
144{
145 struct num_context loop_ctx = { .i = 0 };
146
147 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
148 return choice_arr[loop_ctx.i];
149}
150
151static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
152{
153 return oob_state_machine(data);
154}
155
156SEC("?raw_tp")
157__failure __msg("invalid access to map value, value_size=2 off=32 size=1")
158int unsafe_find_vma(void *unused)
159{
160 struct task_struct *task = bpf_get_current_task_btf();
161 struct num_context loop_ctx = { .i = 0 };
162
163 bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
164 return choice_arr[loop_ctx.i];
165}
166
167static int iter_limit_cb(__u32 idx, struct num_context *ctx)
168{
169 ctx->i++;
170 return 0;
171}
172
173SEC("?raw_tp")
174__success
175int bpf_loop_iter_limit_ok(void *unused)
176{
177 struct num_context ctx = { .i = 0 };
178
179 bpf_loop(1, iter_limit_cb, &ctx, 0);
180 return choice_arr[ctx.i];
181}
182
183SEC("?raw_tp")
184__failure __msg("invalid access to map value, value_size=2 off=2 size=1")
185int bpf_loop_iter_limit_overflow(void *unused)
186{
187 struct num_context ctx = { .i = 0 };
188
189 bpf_loop(2, iter_limit_cb, &ctx, 0);
190 return choice_arr[ctx.i];
191}
192
193static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
194{
195 ctx->i += 100;
196 return 0;
197}
198
199static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
200{
201 ctx->i += 10;
202 return 0;
203}
204
205static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
206{
207 ctx->i += 1;
208 bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
209 bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
210 return 0;
211}
212
213/* Check that path visiting every callback function once had been
214 * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
215 * with each decimal digit corresponding to a callback visit marker.
216 */
217SEC("socket")
218__success __retval(111111)
219int bpf_loop_iter_limit_nested(void *unused)
220{
221 struct num_context ctx1 = { .i = 0 };
222 struct num_context ctx2 = { .i = 0 };
223 __u64 a, b, c;
224
225 bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
226 bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
227 a = ctx1.i;
228 b = ctx2.i;
229 /* Force 'ctx1.i' and 'ctx2.i' precise. */
230 c = choice_arr[(a + b) % 2];
231 /* This makes 'c' zero, but neither clang nor verifier know it. */
232 c /= 10;
233 /* Make sure that verifier does not visit 'impossible' states:
234 * enumerate all possible callback visit masks.
235 */
236 if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
237 b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
238 asm volatile ("r0 /= 0;" ::: "r0");
239 return 1000 * a + b + c;
240}
241
242struct iter_limit_bug_ctx {
243 __u64 a;
244 __u64 b;
245 __u64 c;
246};
247
248static __naked void iter_limit_bug_cb(void)
249{
250 /* This is the same as C code below, but written
251 * in assembly to control which branches are fall-through.
252 *
253 * switch (bpf_get_prandom_u32()) {
254 * case 1: ctx->a = 42; break;
255 * case 2: ctx->b = 42; break;
256 * default: ctx->c = 42; break;
257 * }
258 */
259 asm volatile (
260 "r9 = r2;"
261 "call %[bpf_get_prandom_u32];"
262 "r1 = r0;"
263 "r2 = 42;"
264 "r0 = 0;"
265 "if r1 == 0x1 goto 1f;"
266 "if r1 == 0x2 goto 2f;"
267 "*(u64 *)(r9 + 16) = r2;"
268 "exit;"
269 "1: *(u64 *)(r9 + 0) = r2;"
270 "exit;"
271 "2: *(u64 *)(r9 + 8) = r2;"
272 "exit;"
273 :
274 : __imm(bpf_get_prandom_u32)
275 : __clobber_all
276 );
277}
278
279SEC("tc")
280__failure
281__flag(BPF_F_TEST_STATE_FREQ)
282int iter_limit_bug(struct __sk_buff *skb)
283{
284 struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
285
286 bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
287
288 /* This is the same as C code below,
289 * written in assembly to guarantee checks order.
290 *
291 * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
292 * asm volatile("r1 /= 0;":::"r1");
293 */
294 asm volatile (
295 "r1 = *(u64 *)%[ctx_a];"
296 "if r1 != 42 goto 1f;"
297 "r1 = *(u64 *)%[ctx_b];"
298 "if r1 != 42 goto 1f;"
299 "r1 = *(u64 *)%[ctx_c];"
300 "if r1 != 7 goto 1f;"
301 "r1 /= 0;"
302 "1:"
303 :
304 : [ctx_a]"m"(ctx.a),
305 [ctx_b]"m"(ctx.b),
306 [ctx_c]"m"(ctx.c)
307 : "r1"
308 );
309 return 0;
310}
311
312char _license[] SEC("license") = "GPL";