Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "bpf_misc.h"
6
7/* Check that precision marks propagate through scalar IDs.
8 * Registers r{0,1,2} have the same scalar ID.
9 * Range information is propagated for scalars sharing same ID.
10 * Check that precision mark for r0 causes precision marks for r{1,2}
11 * when range information is propagated for 'if <reg> <op> <const>' insn.
12 */
13SEC("socket")
14__success __log_level(2)
15/* first 'if' branch */
16__msg("6: (0f) r3 += r0")
17__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
18__msg("frame0: parent state regs=r0,r1,r2 stack=:")
19__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
20/* second 'if' branch */
21__msg("from 4 to 5: ")
22__msg("6: (0f) r3 += r0")
23__msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10")
24__msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0")
25/* parent state already has r{0,1,2} as precise */
26__msg("frame0: parent state regs= stack=:")
27__flag(BPF_F_TEST_STATE_FREQ)
28__naked void linked_regs_bpf_k(void)
29{
30 asm volatile (
31 /* r0 = random number up to 0xff */
32 "call %[bpf_ktime_get_ns];"
33 "r0 &= 0xff;"
34 /* tie r0.id == r1.id == r2.id */
35 "r1 = r0;"
36 "r2 = r0;"
37 "if r1 > 7 goto +0;"
38 /* force r0 to be precise, this eventually marks r1 and r2 as
39 * precise as well because of shared IDs
40 */
41 "r3 = r10;"
42 "r3 += r0;"
43 "r0 = 0;"
44 "exit;"
45 :
46 : __imm(bpf_ktime_get_ns)
47 : __clobber_all);
48}
49
50/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed,
51 * check that verifier marks r{1,2} as precise while backtracking
52 * 'if r1 > ...' with r0 already marked.
53 */
54SEC("socket")
55__success __log_level(2)
56__flag(BPF_F_TEST_STATE_FREQ)
57__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0")
58__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
59__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
60__naked void linked_regs_bpf_x_src(void)
61{
62 asm volatile (
63 /* r0 = random number up to 0xff */
64 "call %[bpf_ktime_get_ns];"
65 "r0 &= 0xff;"
66 /* tie r0.id == r1.id == r2.id */
67 "r1 = r0;"
68 "r2 = r0;"
69 "r3 = 7;"
70 "if r1 > r3 goto +0;"
71 /* force r0 to be precise, this eventually marks r1 and r2 as
72 * precise as well because of shared IDs
73 */
74 "r4 = r10;"
75 "r4 += r0;"
76 "r0 = 0;"
77 "exit;"
78 :
79 : __imm(bpf_ktime_get_ns)
80 : __clobber_all);
81}
82
83/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed,
84 * check that verifier marks r{0,1,2} as precise while backtracking
85 * 'if r1 > r3' with r3 already marked.
86 */
87SEC("socket")
88__success __log_level(2)
89__flag(BPF_F_TEST_STATE_FREQ)
90__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0")
91__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:")
92__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7")
93__naked void linked_regs_bpf_x_dst(void)
94{
95 asm volatile (
96 /* r0 = random number up to 0xff */
97 "call %[bpf_ktime_get_ns];"
98 "r0 &= 0xff;"
99 /* tie r0.id == r1.id == r2.id */
100 "r1 = r0;"
101 "r2 = r0;"
102 "r3 = 7;"
103 "if r1 > r3 goto +0;"
104 /* force r0 to be precise, this eventually marks r1 and r2 as
105 * precise as well because of shared IDs
106 */
107 "r4 = r10;"
108 "r4 += r3;"
109 "r0 = 0;"
110 "exit;"
111 :
112 : __imm(bpf_ktime_get_ns)
113 : __clobber_all);
114}
115
116/* Same as linked_regs_bpf_k, but break one of the
117 * links, note that r1 is absent from regs=... in __msg below.
118 */
119SEC("socket")
120__success __log_level(2)
121__msg("7: (0f) r3 += r0")
122__msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10")
123__msg("frame0: parent state regs=r0 stack=:")
124__msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0")
125__msg("frame0: parent state regs=r0,r2 stack=:")
126__flag(BPF_F_TEST_STATE_FREQ)
127__naked void linked_regs_broken_link(void)
128{
129 asm volatile (
130 /* r0 = random number up to 0xff */
131 "call %[bpf_ktime_get_ns];"
132 "r0 &= 0xff;"
133 /* tie r0.id == r1.id == r2.id */
134 "r1 = r0;"
135 "r2 = r0;"
136 /* break link for r1, this is the only line that differs
137 * compared to the previous test
138 */
139 "r1 = 0;"
140 "if r0 > 7 goto +0;"
141 /* force r0 to be precise,
142 * this eventually marks r2 as precise because of shared IDs
143 */
144 "r3 = r10;"
145 "r3 += r0;"
146 "r0 = 0;"
147 "exit;"
148 :
149 : __imm(bpf_ktime_get_ns)
150 : __clobber_all);
151}
152
153/* Check that precision marks propagate through scalar IDs.
154 * Use the same scalar ID in multiple stack frames, check that
155 * precision information is propagated up the call stack.
156 */
157SEC("socket")
158__success __log_level(2)
159__msg("12: (0f) r2 += r1")
160/* Current state */
161__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
162__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
163__msg("frame2: parent state regs=r1 stack=")
164__msg("frame1: parent state regs= stack=")
165__msg("frame0: parent state regs= stack=")
166/* Parent state */
167__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
168__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
169__msg("frame2: parent state regs=r1 stack=")
170/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
171 * looks for all registers with frame2.r1.id in the current state
172 */
173__msg("frame1: parent state regs=r6,r7 stack=")
174__msg("frame0: parent state regs=r6 stack=")
175/* Parent state */
176__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
177__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
178/* frame1.r1 is marked because of backtracking of call instruction */
179__msg("frame1: parent state regs=r1,r6,r7 stack=")
180__msg("frame0: parent state regs=r6 stack=")
181/* Parent state */
182__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
183__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
184__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
185__msg("frame1: parent state regs=r1 stack=")
186__msg("frame0: parent state regs=r6 stack=")
187/* Parent state */
188__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
189__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
190__msg("frame0: parent state regs=r1,r6 stack=")
191/* Parent state */
192__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
193__msg("frame0: regs=r1,r6 stack= before 3: (bf) r6 = r0")
194__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
195__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
196__flag(BPF_F_TEST_STATE_FREQ)
197__naked void precision_many_frames(void)
198{
199 asm volatile (
200 /* r0 = random number up to 0xff */
201 "call %[bpf_ktime_get_ns];"
202 "r0 &= 0xff;"
203 /* tie r0.id == r1.id == r6.id */
204 "r1 = r0;"
205 "r6 = r0;"
206 "call precision_many_frames__foo;"
207 "exit;"
208 :
209 : __imm(bpf_ktime_get_ns)
210 : __clobber_all);
211}
212
213static __naked __noinline __used
214void precision_many_frames__foo(void)
215{
216 asm volatile (
217 /* conflate one of the register numbers (r6) with outer frame,
218 * to verify that those are tracked independently
219 */
220 "r6 = r1;"
221 "r7 = r1;"
222 "call precision_many_frames__bar;"
223 "exit"
224 ::: __clobber_all);
225}
226
227static __naked __noinline __used
228void precision_many_frames__bar(void)
229{
230 asm volatile (
231 "if r1 > 7 goto +0;"
232 /* force r1 to be precise, this eventually marks:
233 * - bar frame r1
234 * - foo frame r{1,6,7}
235 * - main frame r{1,6}
236 */
237 "r2 = r10;"
238 "r2 += r1;"
239 "r0 = 0;"
240 "exit;"
241 ::: __clobber_all);
242}
243
244/* Check that scalars with the same IDs are marked precise on stack as
245 * well as in registers.
246 */
247SEC("socket")
248__success __log_level(2)
249__msg("11: (0f) r2 += r1")
250/* foo frame */
251__msg("frame1: regs=r1 stack= before 10: (bf) r2 = r10")
252__msg("frame1: regs=r1 stack= before 9: (25) if r1 > 0x7 goto pc+0")
253__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
254__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
255__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
256/* main frame */
257__msg("frame0: regs=r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
258__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
259__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
260__flag(BPF_F_TEST_STATE_FREQ)
261__naked void precision_stack(void)
262{
263 asm volatile (
264 /* r0 = random number up to 0xff */
265 "call %[bpf_ktime_get_ns];"
266 "r0 &= 0xff;"
267 /* tie r0.id == r1.id == fp[-8].id */
268 "r1 = r0;"
269 "*(u64*)(r10 - 8) = r1;"
270 "call precision_stack__foo;"
271 "r0 = 0;"
272 "exit;"
273 :
274 : __imm(bpf_ktime_get_ns)
275 : __clobber_all);
276}
277
278static __naked __noinline __used
279void precision_stack__foo(void)
280{
281 asm volatile (
282 /* conflate one of the register numbers (r6) with outer frame,
283 * to verify that those are tracked independently
284 */
285 "*(u64*)(r10 - 8) = r1;"
286 "*(u64*)(r10 - 16) = r1;"
287 "if r1 > 7 goto +0;"
288 /* force r1 to be precise, this eventually marks:
289 * - foo frame r1,fp{-8,-16}
290 * - main frame r1,fp{-8}
291 */
292 "r2 = r10;"
293 "r2 += r1;"
294 "exit"
295 ::: __clobber_all);
296}
297
298/* Use two separate scalar IDs to check that these are propagated
299 * independently.
300 */
301SEC("socket")
302__success __log_level(2)
303/* r{6,7} */
304__msg("12: (0f) r3 += r7")
305__msg("frame0: regs=r7 stack= before 11: (bf) r3 = r10")
306__msg("frame0: regs=r7 stack= before 9: (25) if r7 > 0x7 goto pc+0")
307/* ... skip some insns ... */
308__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
309__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
310/* r{8,9} */
311__msg("13: (0f) r3 += r9")
312__msg("frame0: regs=r9 stack= before 12: (0f) r3 += r7")
313/* ... skip some insns ... */
314__msg("frame0: regs=r9 stack= before 10: (25) if r9 > 0x7 goto pc+0")
315__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
316__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
317__flag(BPF_F_TEST_STATE_FREQ)
318__naked void precision_two_ids(void)
319{
320 asm volatile (
321 /* r6 = random number up to 0xff
322 * r6.id == r7.id
323 */
324 "call %[bpf_ktime_get_ns];"
325 "r0 &= 0xff;"
326 "r6 = r0;"
327 "r7 = r0;"
328 /* same, but for r{8,9} */
329 "call %[bpf_ktime_get_ns];"
330 "r0 &= 0xff;"
331 "r8 = r0;"
332 "r9 = r0;"
333 /* clear r0 id */
334 "r0 = 0;"
335 /* propagate equal scalars precision */
336 "if r7 > 7 goto +0;"
337 "if r9 > 7 goto +0;"
338 "r3 = r10;"
339 /* force r7 to be precise, this also marks r6 */
340 "r3 += r7;"
341 /* force r9 to be precise, this also marks r8 */
342 "r3 += r9;"
343 "exit;"
344 :
345 : __imm(bpf_ktime_get_ns)
346 : __clobber_all);
347}
348
349SEC("socket")
350__success __log_level(2)
351__flag(BPF_F_TEST_STATE_FREQ)
352/* check thar r0 and r6 have different IDs after 'if',
353 * collect_linked_regs() can't tie more than 6 registers for a single insn.
354 */
355__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
356__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2")
357/* check that r{0-5} are marked precise after 'if' */
358__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
359__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
360__naked void linked_regs_too_many_regs(void)
361{
362 asm volatile (
363 /* r0 = random number up to 0xff */
364 "call %[bpf_ktime_get_ns];"
365 "r0 &= 0xff;"
366 /* tie r{0-6} IDs */
367 "r1 = r0;"
368 "r2 = r0;"
369 "r3 = r0;"
370 "r4 = r0;"
371 "r5 = r0;"
372 "r6 = r0;"
373 /* propagate range for r{0-6} */
374 "if r0 > 7 goto +0;"
375 /* make r6 appear in the log */
376 "r6 = r6;"
377 /* force r0 to be precise,
378 * this would cause r{0-4} to be precise because of shared IDs
379 */
380 "r7 = r10;"
381 "r7 += r0;"
382 "r0 = 0;"
383 "exit;"
384 :
385 : __imm(bpf_ktime_get_ns)
386 : __clobber_all);
387}
388
389SEC("socket")
390__failure __log_level(2)
391__flag(BPF_F_TEST_STATE_FREQ)
392__msg("regs=r7 stack= before 5: (3d) if r8 >= r0")
393__msg("parent state regs=r0,r7,r8")
394__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1")
395__msg("div by zero")
396__naked void linked_regs_broken_link_2(void)
397{
398 asm volatile (
399 "call %[bpf_get_prandom_u32];"
400 "r7 = r0;"
401 "r8 = r0;"
402 "call %[bpf_get_prandom_u32];"
403 "if r0 > 1 goto +0;"
404 /* r7.id == r8.id,
405 * thus r7 precision implies r8 precision,
406 * which implies r0 precision because of the conditional below.
407 */
408 "if r8 >= r0 goto 1f;"
409 /* break id relation between r7 and r8 */
410 "r8 += r8;"
411 /* make r7 precise */
412 "if r7 == 0 goto 1f;"
413 "r0 /= 0;"
414"1:"
415 "r0 = 42;"
416 "exit;"
417 :
418 : __imm(bpf_get_prandom_u32)
419 : __clobber_all);
420}
421
422/* Check that mark_chain_precision() for one of the conditional jump
423 * operands does not trigger equal scalars precision propagation.
424 */
425SEC("socket")
426__success __log_level(2)
427__msg("3: (25) if r1 > 0x100 goto pc+0")
428__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0")
429__naked void cjmp_no_linked_regs_trigger(void)
430{
431 asm volatile (
432 /* r0 = random number up to 0xff */
433 "call %[bpf_ktime_get_ns];"
434 "r0 &= 0xff;"
435 /* tie r0.id == r1.id */
436 "r1 = r0;"
437 /* the jump below would be predicted, thus r1 would be marked precise,
438 * this should not imply precision mark for r0
439 */
440 "if r1 > 256 goto +0;"
441 "r0 = 0;"
442 "exit;"
443 :
444 : __imm(bpf_ktime_get_ns)
445 : __clobber_all);
446}
447
448/* Verify that check_ids() is used by regsafe() for scalars.
449 *
450 * r9 = ... some pointer with range X ...
451 * r6 = ... unbound scalar ID=a ...
452 * r7 = ... unbound scalar ID=b ...
453 * if (r6 > r7) goto +1
454 * r7 = r6
455 * if (r7 > X) goto exit
456 * r9 += r6
457 * ... access memory using r9 ...
458 *
459 * The memory access is safe only if r7 is bounded,
460 * which is true for one branch and not true for another.
461 */
462SEC("socket")
463__failure __msg("register with unbounded min value")
464__flag(BPF_F_TEST_STATE_FREQ)
465__naked void check_ids_in_regsafe(void)
466{
467 asm volatile (
468 /* Bump allocated stack */
469 "r1 = 0;"
470 "*(u64*)(r10 - 8) = r1;"
471 /* r9 = pointer to stack */
472 "r9 = r10;"
473 "r9 += -8;"
474 /* r7 = ktime_get_ns() */
475 "call %[bpf_ktime_get_ns];"
476 "r7 = r0;"
477 /* r6 = ktime_get_ns() */
478 "call %[bpf_ktime_get_ns];"
479 "r6 = r0;"
480 /* if r6 > r7 is an unpredictable jump */
481 "if r6 > r7 goto l1_%=;"
482 "r7 = r6;"
483"l1_%=:"
484 /* if r7 > 4 ...; transfers range to r6 on one execution path
485 * but does not transfer on another
486 */
487 "if r7 > 4 goto l2_%=;"
488 /* Access memory at r9[r6], r6 is not always bounded */
489 "r9 += r6;"
490 "r0 = *(u8*)(r9 + 0);"
491"l2_%=:"
492 "r0 = 0;"
493 "exit;"
494 :
495 : __imm(bpf_ktime_get_ns)
496 : __clobber_all);
497}
498
499/* Similar to check_ids_in_regsafe.
500 * The l0 could be reached in two states:
501 *
502 * (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
503 * (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
504 *
505 * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
506 * This example would be considered safe without changes to
507 * mark_chain_precision() to track scalar values with equal IDs.
508 */
509SEC("socket")
510__failure __msg("register with unbounded min value")
511__flag(BPF_F_TEST_STATE_FREQ)
512__naked void check_ids_in_regsafe_2(void)
513{
514 asm volatile (
515 /* Bump allocated stack */
516 "r1 = 0;"
517 "*(u64*)(r10 - 8) = r1;"
518 /* r9 = pointer to stack */
519 "r9 = r10;"
520 "r9 += -8;"
521 /* r8 = ktime_get_ns() */
522 "call %[bpf_ktime_get_ns];"
523 "r8 = r0;"
524 /* r7 = ktime_get_ns() */
525 "call %[bpf_ktime_get_ns];"
526 "r7 = r0;"
527 /* r6 = ktime_get_ns() */
528 "call %[bpf_ktime_get_ns];"
529 "r6 = r0;"
530 /* scratch .id from r0 */
531 "r0 = 0;"
532 /* if r6 > r7 is an unpredictable jump */
533 "if r6 > r7 goto l1_%=;"
534 /* tie r6 and r7 .id */
535 "r6 = r7;"
536"l0_%=:"
537 /* if r7 > 4 exit(0) */
538 "if r7 > 4 goto l2_%=;"
539 /* Access memory at r9[r6] */
540 "r9 += r6;"
541 "r0 = *(u8*)(r9 + 0);"
542"l2_%=:"
543 "r0 = 0;"
544 "exit;"
545"l1_%=:"
546 /* tie r6 and r8 .id */
547 "r6 = r8;"
548 "goto l0_%=;"
549 :
550 : __imm(bpf_ktime_get_ns)
551 : __clobber_all);
552}
553
554/* Check that scalar IDs *are not* generated on register to register
555 * assignments if source register is a constant.
556 *
557 * If such IDs *are* generated the 'l1' below would be reached in
558 * two states:
559 *
560 * (1) r1{.id=A}, r2{.id=A}
561 * (2) r1{.id=C}, r2{.id=C}
562 *
563 * Thus forcing 'if r1 == r2' verification twice.
564 */
565SEC("socket")
566__success __log_level(2)
567__msg("11: (1d) if r3 == r4 goto pc+0")
568__msg("frame 0: propagating r3,r4")
569__msg("11: safe")
570__msg("processed 15 insns")
571__flag(BPF_F_TEST_STATE_FREQ)
572__naked void no_scalar_id_for_const(void)
573{
574 asm volatile (
575 "call %[bpf_ktime_get_ns];"
576 /* unpredictable jump */
577 "if r0 > 7 goto l0_%=;"
578 /* possibly generate same scalar ids for r3 and r4 */
579 "r1 = 0;"
580 "r1 = r1;"
581 "r3 = r1;"
582 "r4 = r1;"
583 "goto l1_%=;"
584"l0_%=:"
585 /* possibly generate different scalar ids for r3 and r4 */
586 "r1 = 0;"
587 "r2 = 0;"
588 "r3 = r1;"
589 "r4 = r2;"
590"l1_%=:"
591 /* predictable jump, marks r3 and r4 precise */
592 "if r3 == r4 goto +0;"
593 "r0 = 0;"
594 "exit;"
595 :
596 : __imm(bpf_ktime_get_ns)
597 : __clobber_all);
598}
599
600/* Same as no_scalar_id_for_const() but for 32-bit values */
601SEC("socket")
602__success __log_level(2)
603__msg("11: (1e) if w3 == w4 goto pc+0")
604__msg("frame 0: propagating r3,r4")
605__msg("11: safe")
606__msg("processed 15 insns")
607__flag(BPF_F_TEST_STATE_FREQ)
608__naked void no_scalar_id_for_const32(void)
609{
610 asm volatile (
611 "call %[bpf_ktime_get_ns];"
612 /* unpredictable jump */
613 "if r0 > 7 goto l0_%=;"
614 /* possibly generate same scalar ids for r3 and r4 */
615 "w1 = 0;"
616 "w1 = w1;"
617 "w3 = w1;"
618 "w4 = w1;"
619 "goto l1_%=;"
620"l0_%=:"
621 /* possibly generate different scalar ids for r3 and r4 */
622 "w1 = 0;"
623 "w2 = 0;"
624 "w3 = w1;"
625 "w4 = w2;"
626"l1_%=:"
627 /* predictable jump, marks r1 and r2 precise */
628 "if w3 == w4 goto +0;"
629 "r0 = 0;"
630 "exit;"
631 :
632 : __imm(bpf_ktime_get_ns)
633 : __clobber_all);
634}
635
636/* Check that unique scalar IDs are ignored when new verifier state is
637 * compared to cached verifier state. For this test:
638 * - cached state has no id on r1
639 * - new state has a unique id on r1
640 */
641SEC("socket")
642__success __log_level(2)
643__msg("6: (25) if r6 > 0x7 goto pc+1")
644__msg("7: (57) r1 &= 255")
645__msg("8: (bf) r2 = r10")
646__msg("from 6 to 8: safe")
647__msg("processed 12 insns")
648__flag(BPF_F_TEST_STATE_FREQ)
649__naked void ignore_unique_scalar_ids_cur(void)
650{
651 asm volatile (
652 "call %[bpf_ktime_get_ns];"
653 "r6 = r0;"
654 "call %[bpf_ktime_get_ns];"
655 "r0 &= 0xff;"
656 /* r1.id == r0.id */
657 "r1 = r0;"
658 /* make r1.id unique */
659 "r0 = 0;"
660 "if r6 > 7 goto l0_%=;"
661 /* clear r1 id, but keep the range compatible */
662 "r1 &= 0xff;"
663"l0_%=:"
664 /* get here in two states:
665 * - first: r1 has no id (cached state)
666 * - second: r1 has a unique id (should be considered equivalent)
667 */
668 "r2 = r10;"
669 "r2 += r1;"
670 "exit;"
671 :
672 : __imm(bpf_ktime_get_ns)
673 : __clobber_all);
674}
675
676/* Check that unique scalar IDs are ignored when new verifier state is
677 * compared to cached verifier state. For this test:
678 * - cached state has a unique id on r1
679 * - new state has no id on r1
680 */
681SEC("socket")
682__success __log_level(2)
683__msg("6: (25) if r6 > 0x7 goto pc+1")
684__msg("7: (05) goto pc+1")
685__msg("9: (bf) r2 = r10")
686__msg("9: safe")
687__msg("processed 13 insns")
688__flag(BPF_F_TEST_STATE_FREQ)
689__naked void ignore_unique_scalar_ids_old(void)
690{
691 asm volatile (
692 "call %[bpf_ktime_get_ns];"
693 "r6 = r0;"
694 "call %[bpf_ktime_get_ns];"
695 "r0 &= 0xff;"
696 /* r1.id == r0.id */
697 "r1 = r0;"
698 /* make r1.id unique */
699 "r0 = 0;"
700 "if r6 > 7 goto l1_%=;"
701 "goto l0_%=;"
702"l1_%=:"
703 /* clear r1 id, but keep the range compatible */
704 "r1 &= 0xff;"
705"l0_%=:"
706 /* get here in two states:
707 * - first: r1 has a unique id (cached state)
708 * - second: r1 has no id (should be considered equivalent)
709 */
710 "r2 = r10;"
711 "r2 += r1;"
712 "exit;"
713 :
714 : __imm(bpf_ktime_get_ns)
715 : __clobber_all);
716}
717
718/* Check that two different scalar IDs in a verified state can't be
719 * mapped to the same scalar ID in current state.
720 */
721SEC("socket")
722__success __log_level(2)
723/* The exit instruction should be reachable from two states,
724 * use two matches and "processed .. insns" to ensure this.
725 */
726__msg("13: (95) exit")
727__msg("13: (95) exit")
728__msg("processed 18 insns")
729__flag(BPF_F_TEST_STATE_FREQ)
730__naked void two_old_ids_one_cur_id(void)
731{
732 asm volatile (
733 /* Give unique scalar IDs to r{6,7} */
734 "call %[bpf_ktime_get_ns];"
735 "r0 &= 0xff;"
736 "r6 = r0;"
737 "call %[bpf_ktime_get_ns];"
738 "r0 &= 0xff;"
739 "r7 = r0;"
740 "r0 = 0;"
741 /* Maybe make r{6,7} IDs identical */
742 "if r6 > r7 goto l0_%=;"
743 "goto l1_%=;"
744"l0_%=:"
745 "r6 = r7;"
746"l1_%=:"
747 /* Mark r{6,7} precise.
748 * Get here in two states:
749 * - first: r6{.id=A}, r7{.id=B} (cached state)
750 * - second: r6{.id=A}, r7{.id=A}
751 * Currently we don't want to consider such states equivalent.
752 * Thus "exit;" would be verified twice.
753 */
754 "r2 = r10;"
755 "r2 += r6;"
756 "r2 += r7;"
757 "exit;"
758 :
759 : __imm(bpf_ktime_get_ns)
760 : __clobber_all);
761}
762
763SEC("socket")
764/* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */
765__flag(BPF_F_TEST_RND_HI32)
766__success
767/* This test was added because of a bug in verifier.c:sync_linked_regs(),
768 * upon range propagation it destroyed subreg_def marks for registers.
769 * The subreg_def mark is used to decide whether zero extension instructions
770 * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it
771 * also causes generation of statements to randomize upper halves of
772 * read registers.
773 *
774 * The test is written in a way to return an upper half of a register
775 * that is affected by range propagation and must have it's subreg_def
776 * preserved. This gives a return value of 0 and leads to undefined
777 * return value if subreg_def mark is not preserved.
778 */
779__retval(0)
780/* Check that verifier believes r1/r0 are zero at exit */
781__log_level(2)
782__msg("4: (77) r1 >>= 32 ; R1_w=0")
783__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
784__msg("6: (95) exit")
785__msg("from 3 to 4")
786__msg("4: (77) r1 >>= 32 ; R1_w=0")
787__msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0")
788__msg("6: (95) exit")
789/* Verify that statements to randomize upper half of r1 had not been
790 * generated.
791 */
792__xlated("call unknown")
793__xlated("r0 &= 2147483647")
794__xlated("w1 = w0")
795/* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm
796 * are the only CI archs that do not need zero extension for subregs.
797 */
798#if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64)
799__xlated("w1 = w1")
800#endif
801__xlated("if w0 < 0xa goto pc+0")
802__xlated("r1 >>= 32")
803__xlated("r0 = r1")
804__xlated("exit")
805__naked void linked_regs_and_subreg_def(void)
806{
807 asm volatile (
808 "call %[bpf_ktime_get_ns];"
809 /* make sure r0 is in 32-bit range, otherwise w1 = w0 won't
810 * assign same IDs to registers.
811 */
812 "r0 &= 0x7fffffff;"
813 /* link w1 and w0 via ID */
814 "w1 = w0;"
815 /* 'if' statement propagates range info from w0 to w1,
816 * but should not affect w1->subreg_def property.
817 */
818 "if w0 < 10 goto +0;"
819 /* r1 is read here, on archs that require subreg zero
820 * extension this would cause zext patch generation.
821 */
822 "r1 >>= 32;"
823 "r0 = r1;"
824 "exit;"
825 :
826 : __imm(bpf_ktime_get_ns)
827 : __clobber_all);
828}
829
830char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/bpf.h>
4#include <bpf/bpf_helpers.h>
5#include "bpf_misc.h"
6
7/* Check that precision marks propagate through scalar IDs.
8 * Registers r{0,1,2} have the same scalar ID at the moment when r0 is
9 * marked to be precise, this mark is immediately propagated to r{1,2}.
10 */
11SEC("socket")
12__success __log_level(2)
13__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10")
14__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
15__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
16__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
17__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
18__flag(BPF_F_TEST_STATE_FREQ)
19__naked void precision_same_state(void)
20{
21 asm volatile (
22 /* r0 = random number up to 0xff */
23 "call %[bpf_ktime_get_ns];"
24 "r0 &= 0xff;"
25 /* tie r0.id == r1.id == r2.id */
26 "r1 = r0;"
27 "r2 = r0;"
28 /* force r0 to be precise, this immediately marks r1 and r2 as
29 * precise as well because of shared IDs
30 */
31 "r3 = r10;"
32 "r3 += r0;"
33 "r0 = 0;"
34 "exit;"
35 :
36 : __imm(bpf_ktime_get_ns)
37 : __clobber_all);
38}
39
40/* Same as precision_same_state, but mark propagates through state /
41 * parent state boundary.
42 */
43SEC("socket")
44__success __log_level(2)
45__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1")
46__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10")
47__msg("frame0: parent state regs=r0,r1,r2 stack=:")
48__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
49__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
50__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
51__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
52__msg("frame0: parent state regs=r0 stack=:")
53__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
54__flag(BPF_F_TEST_STATE_FREQ)
55__naked void precision_cross_state(void)
56{
57 asm volatile (
58 /* r0 = random number up to 0xff */
59 "call %[bpf_ktime_get_ns];"
60 "r0 &= 0xff;"
61 /* tie r0.id == r1.id == r2.id */
62 "r1 = r0;"
63 "r2 = r0;"
64 /* force checkpoint */
65 "goto +0;"
66 /* force r0 to be precise, this immediately marks r1 and r2 as
67 * precise as well because of shared IDs
68 */
69 "r3 = r10;"
70 "r3 += r0;"
71 "r0 = 0;"
72 "exit;"
73 :
74 : __imm(bpf_ktime_get_ns)
75 : __clobber_all);
76}
77
78/* Same as precision_same_state, but break one of the
79 * links, note that r1 is absent from regs=... in __msg below.
80 */
81SEC("socket")
82__success __log_level(2)
83__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10")
84__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0")
85__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0")
86__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0")
87__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
88__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
89__flag(BPF_F_TEST_STATE_FREQ)
90__naked void precision_same_state_broken_link(void)
91{
92 asm volatile (
93 /* r0 = random number up to 0xff */
94 "call %[bpf_ktime_get_ns];"
95 "r0 &= 0xff;"
96 /* tie r0.id == r1.id == r2.id */
97 "r1 = r0;"
98 "r2 = r0;"
99 /* break link for r1, this is the only line that differs
100 * compared to the previous test
101 */
102 "r1 = 0;"
103 /* force r0 to be precise, this immediately marks r1 and r2 as
104 * precise as well because of shared IDs
105 */
106 "r3 = r10;"
107 "r3 += r0;"
108 "r0 = 0;"
109 "exit;"
110 :
111 : __imm(bpf_ktime_get_ns)
112 : __clobber_all);
113}
114
115/* Same as precision_same_state_broken_link, but with state /
116 * parent state boundary.
117 */
118SEC("socket")
119__success __log_level(2)
120__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10")
121__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0")
122__msg("frame0: parent state regs=r0,r2 stack=:")
123__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
124__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
125__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
126__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
127__msg("frame0: parent state regs=r0 stack=:")
128__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
129__flag(BPF_F_TEST_STATE_FREQ)
130__naked void precision_cross_state_broken_link(void)
131{
132 asm volatile (
133 /* r0 = random number up to 0xff */
134 "call %[bpf_ktime_get_ns];"
135 "r0 &= 0xff;"
136 /* tie r0.id == r1.id == r2.id */
137 "r1 = r0;"
138 "r2 = r0;"
139 /* force checkpoint, although link between r1 and r{0,2} is
140 * broken by the next statement current precision tracking
141 * algorithm can't react to it and propagates mark for r1 to
142 * the parent state.
143 */
144 "goto +0;"
145 /* break link for r1, this is the only line that differs
146 * compared to precision_cross_state()
147 */
148 "r1 = 0;"
149 /* force r0 to be precise, this immediately marks r1 and r2 as
150 * precise as well because of shared IDs
151 */
152 "r3 = r10;"
153 "r3 += r0;"
154 "r0 = 0;"
155 "exit;"
156 :
157 : __imm(bpf_ktime_get_ns)
158 : __clobber_all);
159}
160
161/* Check that precision marks propagate through scalar IDs.
162 * Use the same scalar ID in multiple stack frames, check that
163 * precision information is propagated up the call stack.
164 */
165SEC("socket")
166__success __log_level(2)
167__msg("11: (0f) r2 += r1")
168/* Current state */
169__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1")
170__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10")
171__msg("frame2: parent state regs=r1 stack=")
172/* frame1.r{6,7} are marked because mark_precise_scalar_ids()
173 * looks for all registers with frame2.r1.id in the current state
174 */
175__msg("frame1: parent state regs=r6,r7 stack=")
176__msg("frame0: parent state regs=r6 stack=")
177/* Parent state */
178__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
179__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
180/* frame1.r1 is marked because of backtracking of call instruction */
181__msg("frame1: parent state regs=r1,r6,r7 stack=")
182__msg("frame0: parent state regs=r6 stack=")
183/* Parent state */
184__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
185__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
186__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
187__msg("frame1: parent state regs=r1 stack=")
188__msg("frame0: parent state regs=r6 stack=")
189/* Parent state */
190__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
191__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
192__msg("frame0: parent state regs=r1,r6 stack=")
193/* Parent state */
194__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
195__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0")
196__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
197__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
198__flag(BPF_F_TEST_STATE_FREQ)
199__naked void precision_many_frames(void)
200{
201 asm volatile (
202 /* r0 = random number up to 0xff */
203 "call %[bpf_ktime_get_ns];"
204 "r0 &= 0xff;"
205 /* tie r0.id == r1.id == r6.id */
206 "r1 = r0;"
207 "r6 = r0;"
208 "call precision_many_frames__foo;"
209 "exit;"
210 :
211 : __imm(bpf_ktime_get_ns)
212 : __clobber_all);
213}
214
215static __naked __noinline __used
216void precision_many_frames__foo(void)
217{
218 asm volatile (
219 /* conflate one of the register numbers (r6) with outer frame,
220 * to verify that those are tracked independently
221 */
222 "r6 = r1;"
223 "r7 = r1;"
224 "call precision_many_frames__bar;"
225 "exit"
226 ::: __clobber_all);
227}
228
229static __naked __noinline __used
230void precision_many_frames__bar(void)
231{
232 asm volatile (
233 /* force r1 to be precise, this immediately marks:
234 * - bar frame r1
235 * - foo frame r{1,6,7}
236 * - main frame r{1,6}
237 */
238 "r2 = r10;"
239 "r2 += r1;"
240 "r0 = 0;"
241 "exit;"
242 ::: __clobber_all);
243}
244
245/* Check that scalars with the same IDs are marked precise on stack as
246 * well as in registers.
247 */
248SEC("socket")
249__success __log_level(2)
250/* foo frame */
251__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10")
252__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
253__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
254__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
255/* main frame */
256__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
257__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
258__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
259__flag(BPF_F_TEST_STATE_FREQ)
260__naked void precision_stack(void)
261{
262 asm volatile (
263 /* r0 = random number up to 0xff */
264 "call %[bpf_ktime_get_ns];"
265 "r0 &= 0xff;"
266 /* tie r0.id == r1.id == fp[-8].id */
267 "r1 = r0;"
268 "*(u64*)(r10 - 8) = r1;"
269 "call precision_stack__foo;"
270 "r0 = 0;"
271 "exit;"
272 :
273 : __imm(bpf_ktime_get_ns)
274 : __clobber_all);
275}
276
277static __naked __noinline __used
278void precision_stack__foo(void)
279{
280 asm volatile (
281 /* conflate one of the register numbers (r6) with outer frame,
282 * to verify that those are tracked independently
283 */
284 "*(u64*)(r10 - 8) = r1;"
285 "*(u64*)(r10 - 16) = r1;"
286 /* force r1 to be precise, this immediately marks:
287 * - foo frame r1,fp{-8,-16}
288 * - main frame r1,fp{-8}
289 */
290 "r2 = r10;"
291 "r2 += r1;"
292 "exit"
293 ::: __clobber_all);
294}
295
296/* Use two separate scalar IDs to check that these are propagated
297 * independently.
298 */
299SEC("socket")
300__success __log_level(2)
301/* r{6,7} */
302__msg("11: (0f) r3 += r7")
303__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10")
304/* ... skip some insns ... */
305__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
306__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
307/* r{8,9} */
308__msg("12: (0f) r3 += r9")
309__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7")
310/* ... skip some insns ... */
311__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
312__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
313__flag(BPF_F_TEST_STATE_FREQ)
314__naked void precision_two_ids(void)
315{
316 asm volatile (
317 /* r6 = random number up to 0xff
318 * r6.id == r7.id
319 */
320 "call %[bpf_ktime_get_ns];"
321 "r0 &= 0xff;"
322 "r6 = r0;"
323 "r7 = r0;"
324 /* same, but for r{8,9} */
325 "call %[bpf_ktime_get_ns];"
326 "r0 &= 0xff;"
327 "r8 = r0;"
328 "r9 = r0;"
329 /* clear r0 id */
330 "r0 = 0;"
331 /* force checkpoint */
332 "goto +0;"
333 "r3 = r10;"
334 /* force r7 to be precise, this also marks r6 */
335 "r3 += r7;"
336 /* force r9 to be precise, this also marks r8 */
337 "r3 += r9;"
338 "exit;"
339 :
340 : __imm(bpf_ktime_get_ns)
341 : __clobber_all);
342}
343
344/* Verify that check_ids() is used by regsafe() for scalars.
345 *
346 * r9 = ... some pointer with range X ...
347 * r6 = ... unbound scalar ID=a ...
348 * r7 = ... unbound scalar ID=b ...
349 * if (r6 > r7) goto +1
350 * r7 = r6
351 * if (r7 > X) goto exit
352 * r9 += r6
353 * ... access memory using r9 ...
354 *
355 * The memory access is safe only if r7 is bounded,
356 * which is true for one branch and not true for another.
357 */
358SEC("socket")
359__failure __msg("register with unbounded min value")
360__flag(BPF_F_TEST_STATE_FREQ)
361__naked void check_ids_in_regsafe(void)
362{
363 asm volatile (
364 /* Bump allocated stack */
365 "r1 = 0;"
366 "*(u64*)(r10 - 8) = r1;"
367 /* r9 = pointer to stack */
368 "r9 = r10;"
369 "r9 += -8;"
370 /* r7 = ktime_get_ns() */
371 "call %[bpf_ktime_get_ns];"
372 "r7 = r0;"
373 /* r6 = ktime_get_ns() */
374 "call %[bpf_ktime_get_ns];"
375 "r6 = r0;"
376 /* if r6 > r7 is an unpredictable jump */
377 "if r6 > r7 goto l1_%=;"
378 "r7 = r6;"
379"l1_%=:"
380 /* if r7 > 4 ...; transfers range to r6 on one execution path
381 * but does not transfer on another
382 */
383 "if r7 > 4 goto l2_%=;"
384 /* Access memory at r9[r6], r6 is not always bounded */
385 "r9 += r6;"
386 "r0 = *(u8*)(r9 + 0);"
387"l2_%=:"
388 "r0 = 0;"
389 "exit;"
390 :
391 : __imm(bpf_ktime_get_ns)
392 : __clobber_all);
393}
394
395/* Similar to check_ids_in_regsafe.
396 * The l0 could be reached in two states:
397 *
398 * (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
399 * (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
400 *
401 * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
402 * This example would be considered safe without changes to
403 * mark_chain_precision() to track scalar values with equal IDs.
404 */
405SEC("socket")
406__failure __msg("register with unbounded min value")
407__flag(BPF_F_TEST_STATE_FREQ)
408__naked void check_ids_in_regsafe_2(void)
409{
410 asm volatile (
411 /* Bump allocated stack */
412 "r1 = 0;"
413 "*(u64*)(r10 - 8) = r1;"
414 /* r9 = pointer to stack */
415 "r9 = r10;"
416 "r9 += -8;"
417 /* r8 = ktime_get_ns() */
418 "call %[bpf_ktime_get_ns];"
419 "r8 = r0;"
420 /* r7 = ktime_get_ns() */
421 "call %[bpf_ktime_get_ns];"
422 "r7 = r0;"
423 /* r6 = ktime_get_ns() */
424 "call %[bpf_ktime_get_ns];"
425 "r6 = r0;"
426 /* scratch .id from r0 */
427 "r0 = 0;"
428 /* if r6 > r7 is an unpredictable jump */
429 "if r6 > r7 goto l1_%=;"
430 /* tie r6 and r7 .id */
431 "r6 = r7;"
432"l0_%=:"
433 /* if r7 > 4 exit(0) */
434 "if r7 > 4 goto l2_%=;"
435 /* Access memory at r9[r6] */
436 "r9 += r6;"
437 "r0 = *(u8*)(r9 + 0);"
438"l2_%=:"
439 "r0 = 0;"
440 "exit;"
441"l1_%=:"
442 /* tie r6 and r8 .id */
443 "r6 = r8;"
444 "goto l0_%=;"
445 :
446 : __imm(bpf_ktime_get_ns)
447 : __clobber_all);
448}
449
450/* Check that scalar IDs *are not* generated on register to register
451 * assignments if source register is a constant.
452 *
453 * If such IDs *are* generated the 'l1' below would be reached in
454 * two states:
455 *
456 * (1) r1{.id=A}, r2{.id=A}
457 * (2) r1{.id=C}, r2{.id=C}
458 *
459 * Thus forcing 'if r1 == r2' verification twice.
460 */
461SEC("socket")
462__success __log_level(2)
463__msg("11: (1d) if r3 == r4 goto pc+0")
464__msg("frame 0: propagating r3,r4")
465__msg("11: safe")
466__msg("processed 15 insns")
467__flag(BPF_F_TEST_STATE_FREQ)
468__naked void no_scalar_id_for_const(void)
469{
470 asm volatile (
471 "call %[bpf_ktime_get_ns];"
472 /* unpredictable jump */
473 "if r0 > 7 goto l0_%=;"
474 /* possibly generate same scalar ids for r3 and r4 */
475 "r1 = 0;"
476 "r1 = r1;"
477 "r3 = r1;"
478 "r4 = r1;"
479 "goto l1_%=;"
480"l0_%=:"
481 /* possibly generate different scalar ids for r3 and r4 */
482 "r1 = 0;"
483 "r2 = 0;"
484 "r3 = r1;"
485 "r4 = r2;"
486"l1_%=:"
487 /* predictable jump, marks r3 and r4 precise */
488 "if r3 == r4 goto +0;"
489 "r0 = 0;"
490 "exit;"
491 :
492 : __imm(bpf_ktime_get_ns)
493 : __clobber_all);
494}
495
496/* Same as no_scalar_id_for_const() but for 32-bit values */
497SEC("socket")
498__success __log_level(2)
499__msg("11: (1e) if w3 == w4 goto pc+0")
500__msg("frame 0: propagating r3,r4")
501__msg("11: safe")
502__msg("processed 15 insns")
503__flag(BPF_F_TEST_STATE_FREQ)
504__naked void no_scalar_id_for_const32(void)
505{
506 asm volatile (
507 "call %[bpf_ktime_get_ns];"
508 /* unpredictable jump */
509 "if r0 > 7 goto l0_%=;"
510 /* possibly generate same scalar ids for r3 and r4 */
511 "w1 = 0;"
512 "w1 = w1;"
513 "w3 = w1;"
514 "w4 = w1;"
515 "goto l1_%=;"
516"l0_%=:"
517 /* possibly generate different scalar ids for r3 and r4 */
518 "w1 = 0;"
519 "w2 = 0;"
520 "w3 = w1;"
521 "w4 = w2;"
522"l1_%=:"
523 /* predictable jump, marks r1 and r2 precise */
524 "if w3 == w4 goto +0;"
525 "r0 = 0;"
526 "exit;"
527 :
528 : __imm(bpf_ktime_get_ns)
529 : __clobber_all);
530}
531
532/* Check that unique scalar IDs are ignored when new verifier state is
533 * compared to cached verifier state. For this test:
534 * - cached state has no id on r1
535 * - new state has a unique id on r1
536 */
537SEC("socket")
538__success __log_level(2)
539__msg("6: (25) if r6 > 0x7 goto pc+1")
540__msg("7: (57) r1 &= 255")
541__msg("8: (bf) r2 = r10")
542__msg("from 6 to 8: safe")
543__msg("processed 12 insns")
544__flag(BPF_F_TEST_STATE_FREQ)
545__naked void ignore_unique_scalar_ids_cur(void)
546{
547 asm volatile (
548 "call %[bpf_ktime_get_ns];"
549 "r6 = r0;"
550 "call %[bpf_ktime_get_ns];"
551 "r0 &= 0xff;"
552 /* r1.id == r0.id */
553 "r1 = r0;"
554 /* make r1.id unique */
555 "r0 = 0;"
556 "if r6 > 7 goto l0_%=;"
557 /* clear r1 id, but keep the range compatible */
558 "r1 &= 0xff;"
559"l0_%=:"
560 /* get here in two states:
561 * - first: r1 has no id (cached state)
562 * - second: r1 has a unique id (should be considered equivalent)
563 */
564 "r2 = r10;"
565 "r2 += r1;"
566 "exit;"
567 :
568 : __imm(bpf_ktime_get_ns)
569 : __clobber_all);
570}
571
572/* Check that unique scalar IDs are ignored when new verifier state is
573 * compared to cached verifier state. For this test:
574 * - cached state has a unique id on r1
575 * - new state has no id on r1
576 */
577SEC("socket")
578__success __log_level(2)
579__msg("6: (25) if r6 > 0x7 goto pc+1")
580__msg("7: (05) goto pc+1")
581__msg("9: (bf) r2 = r10")
582__msg("9: safe")
583__msg("processed 13 insns")
584__flag(BPF_F_TEST_STATE_FREQ)
585__naked void ignore_unique_scalar_ids_old(void)
586{
587 asm volatile (
588 "call %[bpf_ktime_get_ns];"
589 "r6 = r0;"
590 "call %[bpf_ktime_get_ns];"
591 "r0 &= 0xff;"
592 /* r1.id == r0.id */
593 "r1 = r0;"
594 /* make r1.id unique */
595 "r0 = 0;"
596 "if r6 > 7 goto l1_%=;"
597 "goto l0_%=;"
598"l1_%=:"
599 /* clear r1 id, but keep the range compatible */
600 "r1 &= 0xff;"
601"l0_%=:"
602 /* get here in two states:
603 * - first: r1 has a unique id (cached state)
604 * - second: r1 has no id (should be considered equivalent)
605 */
606 "r2 = r10;"
607 "r2 += r1;"
608 "exit;"
609 :
610 : __imm(bpf_ktime_get_ns)
611 : __clobber_all);
612}
613
614/* Check that two different scalar IDs in a verified state can't be
615 * mapped to the same scalar ID in current state.
616 */
617SEC("socket")
618__success __log_level(2)
619/* The exit instruction should be reachable from two states,
620 * use two matches and "processed .. insns" to ensure this.
621 */
622__msg("13: (95) exit")
623__msg("13: (95) exit")
624__msg("processed 18 insns")
625__flag(BPF_F_TEST_STATE_FREQ)
626__naked void two_old_ids_one_cur_id(void)
627{
628 asm volatile (
629 /* Give unique scalar IDs to r{6,7} */
630 "call %[bpf_ktime_get_ns];"
631 "r0 &= 0xff;"
632 "r6 = r0;"
633 "call %[bpf_ktime_get_ns];"
634 "r0 &= 0xff;"
635 "r7 = r0;"
636 "r0 = 0;"
637 /* Maybe make r{6,7} IDs identical */
638 "if r6 > r7 goto l0_%=;"
639 "goto l1_%=;"
640"l0_%=:"
641 "r6 = r7;"
642"l1_%=:"
643 /* Mark r{6,7} precise.
644 * Get here in two states:
645 * - first: r6{.id=A}, r7{.id=B} (cached state)
646 * - second: r6{.id=A}, r7{.id=A}
647 * Currently we don't want to consider such states equivalent.
648 * Thus "exit;" would be verified twice.
649 */
650 "r2 = r10;"
651 "r2 += r6;"
652 "r2 += r7;"
653 "exit;"
654 :
655 : __imm(bpf_ktime_get_ns)
656 : __clobber_all);
657}
658
659char _license[] SEC("license") = "GPL";