Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4#include <errno.h>
5#include <string.h>
6#include <linux/bpf.h>
7#include <bpf/bpf_helpers.h>
8#include "bpf_misc.h"
9#include <../../../tools/include/linux/filter.h>
10
11int vals[] SEC(".data.vals") = {1, 2, 3, 4};
12
13__naked __noinline __used
14static unsigned long identity_subprog()
15{
16 /* the simplest *static* 64-bit identity function */
17 asm volatile (
18 "r0 = r1;"
19 "exit;"
20 );
21}
22
23__noinline __used
24unsigned long global_identity_subprog(__u64 x)
25{
26 /* the simplest *global* 64-bit identity function */
27 return x;
28}
29
30__naked __noinline __used
31static unsigned long callback_subprog()
32{
33 /* the simplest callback function */
34 asm volatile (
35 "r0 = 0;"
36 "exit;"
37 );
38}
39
40SEC("?raw_tp")
41__success __log_level(2)
42__msg("7: (0f) r1 += r0")
43__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
44__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
45__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
46__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
47__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
48__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
49__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
50__naked int subprog_result_precise(void)
51{
52 asm volatile (
53 "r6 = 3;"
54 /* pass r6 through r1 into subprog to get it back as r0;
55 * this whole chain will have to be marked as precise later
56 */
57 "r1 = r6;"
58 "call identity_subprog;"
59 /* now use subprog's returned value (which is a
60 * r6 -> r1 -> r0 chain), as index into vals array, forcing
61 * all of that to be known precisely
62 */
63 "r0 *= 4;"
64 "r1 = %[vals];"
65 /* here r0->r1->r6 chain is forced to be precise and has to be
66 * propagated back to the beginning, including through the
67 * subprog call
68 */
69 "r1 += r0;"
70 "r0 = *(u32 *)(r1 + 0);"
71 "exit;"
72 :
73 : __imm_ptr(vals)
74 : __clobber_common, "r6"
75 );
76}
77
78__naked __noinline __used
79static unsigned long fp_leaking_subprog()
80{
81 asm volatile (
82 ".8byte %[r0_eq_r10_cast_s8];"
83 "exit;"
84 :: __imm_insn(r0_eq_r10_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_10, 8))
85 );
86}
87
88__naked __noinline __used
89static unsigned long sneaky_fp_leaking_subprog()
90{
91 asm volatile (
92 "r1 = r10;"
93 ".8byte %[r0_eq_r1_cast_s8];"
94 "exit;"
95 :: __imm_insn(r0_eq_r1_cast_s8, BPF_MOVSX64_REG(BPF_REG_0, BPF_REG_1, 8))
96 );
97}
98
99SEC("?raw_tp")
100__success __log_level(2)
101__msg("6: (0f) r1 += r0")
102__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
103__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
104__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
105__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
106__msg("mark_precise: frame0: regs=r0 stack= before 10: (95) exit")
107__msg("mark_precise: frame1: regs=r0 stack= before 9: (bf) r0 = (s8)r10")
108__msg("7: R0_w=scalar")
109__naked int fp_precise_subprog_result(void)
110{
111 asm volatile (
112 "call fp_leaking_subprog;"
113 /* use subprog's returned value (which is derived from r10=fp
114 * register), as index into vals array, forcing all of that to
115 * be known precisely
116 */
117 "r0 &= 3;"
118 "r0 *= 4;"
119 "r1 = %[vals];"
120 /* force precision marking */
121 "r1 += r0;"
122 "r0 = *(u32 *)(r1 + 0);"
123 "exit;"
124 :
125 : __imm_ptr(vals)
126 : __clobber_common
127 );
128}
129
130SEC("?raw_tp")
131__success __log_level(2)
132__msg("6: (0f) r1 += r0")
133__msg("mark_precise: frame0: last_idx 6 first_idx 0 subseq_idx -1")
134__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6")
135__msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4")
136__msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3")
137__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
138__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = (s8)r1")
139/* here r1 is marked precise, even though it's fp register, but that's fine
140 * because by the time we get out of subprogram it has to be derived from r10
141 * anyways, at which point we'll break precision chain
142 */
143__msg("mark_precise: frame1: regs=r1 stack= before 9: (bf) r1 = r10")
144__msg("7: R0_w=scalar")
145__naked int sneaky_fp_precise_subprog_result(void)
146{
147 asm volatile (
148 "call sneaky_fp_leaking_subprog;"
149 /* use subprog's returned value (which is derived from r10=fp
150 * register), as index into vals array, forcing all of that to
151 * be known precisely
152 */
153 "r0 &= 3;"
154 "r0 *= 4;"
155 "r1 = %[vals];"
156 /* force precision marking */
157 "r1 += r0;"
158 "r0 = *(u32 *)(r1 + 0);"
159 "exit;"
160 :
161 : __imm_ptr(vals)
162 : __clobber_common
163 );
164}
165
166SEC("?raw_tp")
167__success __log_level(2)
168__msg("9: (0f) r1 += r0")
169__msg("mark_precise: frame0: last_idx 9 first_idx 0")
170__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
171__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
172__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
173__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
174__naked int global_subprog_result_precise(void)
175{
176 asm volatile (
177 "r6 = 3;"
178 /* pass r6 through r1 into subprog to get it back as r0;
179 * given global_identity_subprog is global, precision won't
180 * propagate all the way back to r6
181 */
182 "r1 = r6;"
183 "call global_identity_subprog;"
184 /* now use subprog's returned value (which is unknown now, so
185 * we need to clamp it), as index into vals array, forcing r0
186 * to be marked precise (with no effect on r6, though)
187 */
188 "if r0 < %[vals_arr_sz] goto 1f;"
189 "r0 = %[vals_arr_sz] - 1;"
190 "1:"
191 "r0 *= 4;"
192 "r1 = %[vals];"
193 /* here r0 is forced to be precise and has to be
194 * propagated back to the global subprog call, but it
195 * shouldn't go all the way to mark r6 as precise
196 */
197 "r1 += r0;"
198 "r0 = *(u32 *)(r1 + 0);"
199 "exit;"
200 :
201 : __imm_ptr(vals),
202 __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
203 : __clobber_common, "r6"
204 );
205}
206
207__naked __noinline __used
208static unsigned long loop_callback_bad()
209{
210 /* bpf_loop() callback that can return values outside of [0, 1] range */
211 asm volatile (
212 "call %[bpf_get_prandom_u32];"
213 "if r0 s> 1000 goto 1f;"
214 "r0 = 0;"
215 "1:"
216 "goto +0;" /* checkpoint */
217 /* bpf_loop() expects [0, 1] values, so branch above skipping
218 * r0 = 0; should lead to a failure, but if exit instruction
219 * doesn't enforce r0's precision, this callback will be
220 * successfully verified
221 */
222 "exit;"
223 :
224 : __imm(bpf_get_prandom_u32)
225 : __clobber_common
226 );
227}
228
229SEC("?raw_tp")
230__failure __log_level(2)
231__flag(BPF_F_TEST_STATE_FREQ)
232/* check that fallthrough code path marks r0 as precise */
233__msg("mark_precise: frame1: regs=r0 stack= before 11: (b7) r0 = 0")
234/* check that we have branch code path doing its own validation */
235__msg("from 10 to 12: frame1: R0=scalar(smin=umin=1001")
236/* check that branch code path marks r0 as precise, before failing */
237__msg("mark_precise: frame1: regs=r0 stack= before 9: (85) call bpf_get_prandom_u32#7")
238__msg("At callback return the register R0 has smin=1001 should have been in [0, 1]")
239__naked int callback_precise_return_fail(void)
240{
241 asm volatile (
242 "r1 = 1;" /* nr_loops */
243 "r2 = %[loop_callback_bad];" /* callback_fn */
244 "r3 = 0;" /* callback_ctx */
245 "r4 = 0;" /* flags */
246 "call %[bpf_loop];"
247
248 "r0 = 0;"
249 "exit;"
250 :
251 : __imm_ptr(loop_callback_bad),
252 __imm(bpf_loop)
253 : __clobber_common
254 );
255}
256
257SEC("?raw_tp")
258__success __log_level(2)
259/* First simulated path does not include callback body,
260 * r1 and r4 are always precise for bpf_loop() calls.
261 */
262__msg("9: (85) call bpf_loop#181")
263__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
264__msg("mark_precise: frame0: parent state regs=r4 stack=:")
265__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
266__msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0")
267__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
268__msg("mark_precise: frame0: parent state regs=r1 stack=:")
269__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
270__msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0")
271__msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0")
272__msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8")
273__msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6")
274__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
275/* r6 precision propagation */
276__msg("14: (0f) r1 += r6")
277__msg("mark_precise: frame0: last_idx 14 first_idx 9")
278__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
279__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
280__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
281__msg("mark_precise: frame0: regs=r0,r6 stack= before 10: (bf) r6 = r0")
282__msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
283/* State entering callback body popped from states stack */
284__msg("from 9 to 17: frame1:")
285__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
286__msg("17: (b7) r0 = 0")
287__msg("18: (95) exit")
288__msg("returning from callee:")
289__msg("to caller at 9:")
290__msg("frame 0: propagating r1,r4")
291__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
292__msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit")
293__msg("from 18 to 9: safe")
294__naked int callback_result_precise(void)
295{
296 asm volatile (
297 "r6 = 3;"
298
299 /* call subprog and use result; r0 shouldn't propagate back to
300 * callback_subprog
301 */
302 "r1 = r6;" /* nr_loops */
303 "r2 = %[callback_subprog];" /* callback_fn */
304 "r3 = 0;" /* callback_ctx */
305 "r4 = 0;" /* flags */
306 "call %[bpf_loop];"
307
308 "r6 = r0;"
309 "if r6 > 3 goto 1f;"
310 "r6 *= 4;"
311 "r1 = %[vals];"
312 /* here r6 is forced to be precise and has to be propagated
313 * back to the bpf_loop() call, but not beyond
314 */
315 "r1 += r6;"
316 "r0 = *(u32 *)(r1 + 0);"
317 "1:"
318 "exit;"
319 :
320 : __imm_ptr(vals),
321 __imm_ptr(callback_subprog),
322 __imm(bpf_loop)
323 : __clobber_common, "r6"
324 );
325}
326
327SEC("?raw_tp")
328__success __log_level(2)
329__msg("7: (0f) r1 += r6")
330__msg("mark_precise: frame0: last_idx 7 first_idx 0")
331__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
332__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
333__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
334__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
335__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
336__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
337__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
338__naked int parent_callee_saved_reg_precise(void)
339{
340 asm volatile (
341 "r6 = 3;"
342
343 /* call subprog and ignore result; we need this call only to
344 * complicate jump history
345 */
346 "r1 = 0;"
347 "call identity_subprog;"
348
349 "r6 *= 4;"
350 "r1 = %[vals];"
351 /* here r6 is forced to be precise and has to be propagated
352 * back to the beginning, handling (and ignoring) subprog call
353 */
354 "r1 += r6;"
355 "r0 = *(u32 *)(r1 + 0);"
356 "exit;"
357 :
358 : __imm_ptr(vals)
359 : __clobber_common, "r6"
360 );
361}
362
363SEC("?raw_tp")
364__success __log_level(2)
365__msg("7: (0f) r1 += r6")
366__msg("mark_precise: frame0: last_idx 7 first_idx 0")
367__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
368__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
369__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
370__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
371__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
372__naked int parent_callee_saved_reg_precise_global(void)
373{
374 asm volatile (
375 "r6 = 3;"
376
377 /* call subprog and ignore result; we need this call only to
378 * complicate jump history
379 */
380 "r1 = 0;"
381 "call global_identity_subprog;"
382
383 "r6 *= 4;"
384 "r1 = %[vals];"
385 /* here r6 is forced to be precise and has to be propagated
386 * back to the beginning, handling (and ignoring) subprog call
387 */
388 "r1 += r6;"
389 "r0 = *(u32 *)(r1 + 0);"
390 "exit;"
391 :
392 : __imm_ptr(vals)
393 : __clobber_common, "r6"
394 );
395}
396
397SEC("?raw_tp")
398__success __log_level(2)
399/* First simulated path does not include callback body */
400__msg("12: (0f) r1 += r6")
401__msg("mark_precise: frame0: last_idx 12 first_idx 9")
402__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
403__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
404__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
405__msg("mark_precise: frame0: parent state regs=r6 stack=:")
406__msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
407__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
408__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
409__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
410__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
411__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
412/* State entering callback body popped from states stack */
413__msg("from 9 to 15: frame1:")
414__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
415__msg("15: (b7) r0 = 0")
416__msg("16: (95) exit")
417__msg("returning from callee:")
418__msg("to caller at 9:")
419/* r1, r4 are always precise for bpf_loop(),
420 * r6 was marked before backtracking to callback body.
421 */
422__msg("frame 0: propagating r1,r4,r6")
423__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
424__msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit")
425__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
426__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
427__msg("mark_precise: frame0: parent state regs= stack=:")
428__msg("from 16 to 9: safe")
429__naked int parent_callee_saved_reg_precise_with_callback(void)
430{
431 asm volatile (
432 "r6 = 3;"
433
434 /* call subprog and ignore result; we need this call only to
435 * complicate jump history
436 */
437 "r1 = 1;" /* nr_loops */
438 "r2 = %[callback_subprog];" /* callback_fn */
439 "r3 = 0;" /* callback_ctx */
440 "r4 = 0;" /* flags */
441 "call %[bpf_loop];"
442
443 "r6 *= 4;"
444 "r1 = %[vals];"
445 /* here r6 is forced to be precise and has to be propagated
446 * back to the beginning, handling (and ignoring) callback call
447 */
448 "r1 += r6;"
449 "r0 = *(u32 *)(r1 + 0);"
450 "exit;"
451 :
452 : __imm_ptr(vals),
453 __imm_ptr(callback_subprog),
454 __imm(bpf_loop)
455 : __clobber_common, "r6"
456 );
457}
458
459SEC("?raw_tp")
460__success __log_level(2)
461__msg("9: (0f) r1 += r6")
462__msg("mark_precise: frame0: last_idx 9 first_idx 6")
463__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
464__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
465__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
466__msg("mark_precise: frame0: parent state regs= stack=-8:")
467__msg("mark_precise: frame0: last_idx 13 first_idx 0")
468__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
469__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
470__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
471__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
472__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
473__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
474__naked int parent_stack_slot_precise(void)
475{
476 asm volatile (
477 /* spill reg */
478 "r6 = 3;"
479 "*(u64 *)(r10 - 8) = r6;"
480
481 /* call subprog and ignore result; we need this call only to
482 * complicate jump history
483 */
484 "r1 = 0;"
485 "call identity_subprog;"
486
487 /* restore reg from stack; in this case we'll be carrying
488 * stack mask when going back into subprog through jump
489 * history
490 */
491 "r6 = *(u64 *)(r10 - 8);"
492
493 "r6 *= 4;"
494 "r1 = %[vals];"
495 /* here r6 is forced to be precise and has to be propagated
496 * back to the beginning, handling (and ignoring) subprog call
497 */
498 "r1 += r6;"
499 "r0 = *(u32 *)(r1 + 0);"
500 "exit;"
501 :
502 : __imm_ptr(vals)
503 : __clobber_common, "r6"
504 );
505}
506
507SEC("?raw_tp")
508__success __log_level(2)
509__msg("9: (0f) r1 += r6")
510__msg("mark_precise: frame0: last_idx 9 first_idx 0")
511__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
512__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
513__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
514__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
515__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
516__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
517__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
518__naked int parent_stack_slot_precise_global(void)
519{
520 asm volatile (
521 /* spill reg */
522 "r6 = 3;"
523 "*(u64 *)(r10 - 8) = r6;"
524
525 /* call subprog and ignore result; we need this call only to
526 * complicate jump history
527 */
528 "r1 = 0;"
529 "call global_identity_subprog;"
530
531 /* restore reg from stack; in this case we'll be carrying
532 * stack mask when going back into subprog through jump
533 * history
534 */
535 "r6 = *(u64 *)(r10 - 8);"
536
537 "r6 *= 4;"
538 "r1 = %[vals];"
539 /* here r6 is forced to be precise and has to be propagated
540 * back to the beginning, handling (and ignoring) subprog call
541 */
542 "r1 += r6;"
543 "r0 = *(u32 *)(r1 + 0);"
544 "exit;"
545 :
546 : __imm_ptr(vals)
547 : __clobber_common, "r6"
548 );
549}
550
551SEC("?raw_tp")
552__success __log_level(2)
553/* First simulated path does not include callback body */
554__msg("14: (0f) r1 += r6")
555__msg("mark_precise: frame0: last_idx 14 first_idx 10")
556__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
557__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
558__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
559__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
560__msg("mark_precise: frame0: parent state regs= stack=-8:")
561__msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
562__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
563__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
564__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
565__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
566__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
567__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
568/* State entering callback body popped from states stack */
569__msg("from 10 to 17: frame1:")
570__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
571__msg("17: (b7) r0 = 0")
572__msg("18: (95) exit")
573__msg("returning from callee:")
574__msg("to caller at 10:")
575/* r1, r4 are always precise for bpf_loop(),
576 * fp-8 was marked before backtracking to callback body.
577 */
578__msg("frame 0: propagating r1,r4,fp-8")
579__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
580__msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit")
581__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
582__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
583__msg("mark_precise: frame0: parent state regs= stack=:")
584__msg("from 18 to 10: safe")
585__naked int parent_stack_slot_precise_with_callback(void)
586{
587 asm volatile (
588 /* spill reg */
589 "r6 = 3;"
590 "*(u64 *)(r10 - 8) = r6;"
591
592 /* ensure we have callback frame in jump history */
593 "r1 = r6;" /* nr_loops */
594 "r2 = %[callback_subprog];" /* callback_fn */
595 "r3 = 0;" /* callback_ctx */
596 "r4 = 0;" /* flags */
597 "call %[bpf_loop];"
598
599 /* restore reg from stack; in this case we'll be carrying
600 * stack mask when going back into subprog through jump
601 * history
602 */
603 "r6 = *(u64 *)(r10 - 8);"
604
605 "r6 *= 4;"
606 "r1 = %[vals];"
607 /* here r6 is forced to be precise and has to be propagated
608 * back to the beginning, handling (and ignoring) subprog call
609 */
610 "r1 += r6;"
611 "r0 = *(u32 *)(r1 + 0);"
612 "exit;"
613 :
614 : __imm_ptr(vals),
615 __imm_ptr(callback_subprog),
616 __imm(bpf_loop)
617 : __clobber_common, "r6"
618 );
619}
620
621__noinline __used
622static __u64 subprog_with_precise_arg(__u64 x)
623{
624 return vals[x]; /* x is forced to be precise */
625}
626
627SEC("?raw_tp")
628__success __log_level(2)
629__msg("8: (0f) r2 += r1")
630__msg("mark_precise: frame1: last_idx 8 first_idx 0")
631__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
632__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
633__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
634__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
635__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
636__naked int subprog_arg_precise(void)
637{
638 asm volatile (
639 "r6 = 3;"
640 "r1 = r6;"
641 /* subprog_with_precise_arg expects its argument to be
642 * precise, so r1->r6 will be marked precise from inside the
643 * subprog
644 */
645 "call subprog_with_precise_arg;"
646 "r0 += r6;"
647 "exit;"
648 :
649 :
650 : __clobber_common, "r6"
651 );
652}
653
654/* r1 is pointer to stack slot;
655 * r2 is a register to spill into that slot
656 * subprog also spills r2 into its own stack slot
657 */
658__naked __noinline __used
659static __u64 subprog_spill_reg_precise(void)
660{
661 asm volatile (
662 /* spill to parent stack */
663 "*(u64 *)(r1 + 0) = r2;"
664 /* spill to subprog stack (we use -16 offset to avoid
665 * accidental confusion with parent's -8 stack slot in
666 * verifier log output)
667 */
668 "*(u64 *)(r10 - 16) = r2;"
669 /* use both spills as return result to propagete precision everywhere */
670 "r0 = *(u64 *)(r10 - 16);"
671 "r2 = *(u64 *)(r1 + 0);"
672 "r0 += r2;"
673 "exit;"
674 );
675}
676
677SEC("?raw_tp")
678__success __log_level(2)
679__msg("10: (0f) r1 += r7")
680__msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
681__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
682__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
683__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
684__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
685__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
686__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
687__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
688__msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
689__msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
690__msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
691__msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
692__msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
693__msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
694__msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
695__msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
696__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
697__naked int subprog_spill_into_parent_stack_slot_precise(void)
698{
699 asm volatile (
700 "r6 = 1;"
701
702 /* pass pointer to stack slot and r6 to subprog;
703 * r6 will be marked precise and spilled into fp-8 slot, which
704 * also should be marked precise
705 */
706 "r1 = r10;"
707 "r1 += -8;"
708 "r2 = r6;"
709 "call subprog_spill_reg_precise;"
710
711 /* restore reg from stack; in this case we'll be carrying
712 * stack mask when going back into subprog through jump
713 * history
714 */
715 "r7 = *(u64 *)(r10 - 8);"
716
717 "r7 *= 4;"
718 "r1 = %[vals];"
719 /* here r7 is forced to be precise and has to be propagated
720 * back to the beginning, handling subprog call and logic
721 */
722 "r1 += r7;"
723 "r0 = *(u32 *)(r1 + 0);"
724 "exit;"
725 :
726 : __imm_ptr(vals)
727 : __clobber_common, "r6", "r7"
728 );
729}
730
731SEC("?raw_tp")
732__success __log_level(2)
733__msg("17: (0f) r1 += r0")
734__msg("mark_precise: frame0: last_idx 17 first_idx 0 subseq_idx -1")
735__msg("mark_precise: frame0: regs=r0 stack= before 16: (bf) r1 = r7")
736__msg("mark_precise: frame0: regs=r0 stack= before 15: (27) r0 *= 4")
737__msg("mark_precise: frame0: regs=r0 stack= before 14: (79) r0 = *(u64 *)(r10 -16)")
738__msg("mark_precise: frame0: regs= stack=-16 before 13: (7b) *(u64 *)(r7 -8) = r0")
739__msg("mark_precise: frame0: regs=r0 stack= before 12: (79) r0 = *(u64 *)(r8 +16)")
740__msg("mark_precise: frame0: regs= stack=-16 before 11: (7b) *(u64 *)(r8 +16) = r0")
741__msg("mark_precise: frame0: regs=r0 stack= before 10: (79) r0 = *(u64 *)(r7 -8)")
742__msg("mark_precise: frame0: regs= stack=-16 before 9: (7b) *(u64 *)(r10 -16) = r0")
743__msg("mark_precise: frame0: regs=r0 stack= before 8: (07) r8 += -32")
744__msg("mark_precise: frame0: regs=r0 stack= before 7: (bf) r8 = r10")
745__msg("mark_precise: frame0: regs=r0 stack= before 6: (07) r7 += -8")
746__msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r7 = r10")
747__msg("mark_precise: frame0: regs=r0 stack= before 21: (95) exit")
748__msg("mark_precise: frame1: regs=r0 stack= before 20: (bf) r0 = r1")
749__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+15")
750__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
751__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
752__naked int stack_slot_aliases_precision(void)
753{
754 asm volatile (
755 "r6 = 1;"
756 /* pass r6 through r1 into subprog to get it back as r0;
757 * this whole chain will have to be marked as precise later
758 */
759 "r1 = r6;"
760 "call identity_subprog;"
761 /* let's setup two registers that are aliased to r10 */
762 "r7 = r10;"
763 "r7 += -8;" /* r7 = r10 - 8 */
764 "r8 = r10;"
765 "r8 += -32;" /* r8 = r10 - 32 */
766 /* now spill subprog's return value (a r6 -> r1 -> r0 chain)
767 * a few times through different stack pointer regs, making
768 * sure to use r10, r7, and r8 both in LDX and STX insns, and
769 * *importantly* also using a combination of const var_off and
770 * insn->off to validate that we record final stack slot
771 * correctly, instead of relying on just insn->off derivation,
772 * which is only valid for r10-based stack offset
773 */
774 "*(u64 *)(r10 - 16) = r0;"
775 "r0 = *(u64 *)(r7 - 8);" /* r7 - 8 == r10 - 16 */
776 "*(u64 *)(r8 + 16) = r0;" /* r8 + 16 = r10 - 16 */
777 "r0 = *(u64 *)(r8 + 16);"
778 "*(u64 *)(r7 - 8) = r0;"
779 "r0 = *(u64 *)(r10 - 16);"
780 /* get ready to use r0 as an index into array to force precision */
781 "r0 *= 4;"
782 "r1 = %[vals];"
783 /* here r0->r1->r6 chain is forced to be precise and has to be
784 * propagated back to the beginning, including through the
785 * subprog call and all the stack spills and loads
786 */
787 "r1 += r0;"
788 "r0 = *(u32 *)(r1 + 0);"
789 "exit;"
790 :
791 : __imm_ptr(vals)
792 : __clobber_common, "r6"
793 );
794}
795
796char _license[] SEC("license") = "GPL";