Loading...
Note: File does not exist in v3.1.
1{
2 "precise: test 1",
3 .insns = {
4 BPF_MOV64_IMM(BPF_REG_0, 1),
5 BPF_LD_MAP_FD(BPF_REG_6, 0),
6 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
8 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
10 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
11 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12 BPF_EXIT_INSN(),
13
14 BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
15
16 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
17 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
18 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
19 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
20 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
21 BPF_EXIT_INSN(),
22
23 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
24
25 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
26 BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
27 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
28 BPF_EXIT_INSN(),
29
30 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
31 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
32 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
33 BPF_MOV64_IMM(BPF_REG_3, 0),
34 BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
35 BPF_EXIT_INSN(),
36 },
37 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
38 .fixup_map_array_48b = { 1 },
39 .result = VERBOSE_ACCEPT,
40 .errstr =
41 "mark_precise: frame0: last_idx 26 first_idx 20\
42 mark_precise: frame0: regs=r2 stack= before 25\
43 mark_precise: frame0: regs=r2 stack= before 24\
44 mark_precise: frame0: regs=r2 stack= before 23\
45 mark_precise: frame0: regs=r2 stack= before 22\
46 mark_precise: frame0: regs=r2 stack= before 20\
47 mark_precise: frame0: parent state regs=r2,r9 stack=:\
48 mark_precise: frame0: last_idx 19 first_idx 10\
49 mark_precise: frame0: regs=r2,r9 stack= before 19\
50 mark_precise: frame0: regs=r9 stack= before 18\
51 mark_precise: frame0: regs=r8,r9 stack= before 17\
52 mark_precise: frame0: regs=r0,r9 stack= before 15\
53 mark_precise: frame0: regs=r0,r9 stack= before 14\
54 mark_precise: frame0: regs=r9 stack= before 13\
55 mark_precise: frame0: regs=r9 stack= before 12\
56 mark_precise: frame0: regs=r9 stack= before 11\
57 mark_precise: frame0: regs=r9 stack= before 10\
58 mark_precise: frame0: parent state regs= stack=:",
59},
60{
61 "precise: test 2",
62 .insns = {
63 BPF_MOV64_IMM(BPF_REG_0, 1),
64 BPF_LD_MAP_FD(BPF_REG_6, 0),
65 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
66 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
67 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
68 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0),
69 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
70 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
71 BPF_EXIT_INSN(),
72
73 BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
74
75 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
76 BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
77 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
78 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
79 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
80 BPF_EXIT_INSN(),
81
82 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
83
84 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */
85 BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
86 BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
87 BPF_EXIT_INSN(),
88
89 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
90 BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
92 BPF_MOV64_IMM(BPF_REG_3, 0),
93 BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
94 BPF_EXIT_INSN(),
95 },
96 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
97 .fixup_map_array_48b = { 1 },
98 .result = VERBOSE_ACCEPT,
99 .flags = BPF_F_TEST_STATE_FREQ,
100 .errstr =
101 "26: (85) call bpf_probe_read_kernel#113\
102 mark_precise: frame0: last_idx 26 first_idx 22\
103 mark_precise: frame0: regs=r2 stack= before 25\
104 mark_precise: frame0: regs=r2 stack= before 24\
105 mark_precise: frame0: regs=r2 stack= before 23\
106 mark_precise: frame0: regs=r2 stack= before 22\
107 mark_precise: frame0: parent state regs=r2 stack=:\
108 mark_precise: frame0: last_idx 20 first_idx 20\
109 mark_precise: frame0: regs=r2 stack= before 20\
110 mark_precise: frame0: parent state regs=r2,r9 stack=:\
111 mark_precise: frame0: last_idx 19 first_idx 17\
112 mark_precise: frame0: regs=r2,r9 stack= before 19\
113 mark_precise: frame0: regs=r9 stack= before 18\
114 mark_precise: frame0: regs=r8,r9 stack= before 17\
115 mark_precise: frame0: parent state regs= stack=:",
116},
117{
118 "precise: cross frame pruning",
119 .insns = {
120 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
121 BPF_MOV64_IMM(BPF_REG_8, 0),
122 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
123 BPF_MOV64_IMM(BPF_REG_8, 1),
124 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
125 BPF_MOV64_IMM(BPF_REG_9, 0),
126 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
127 BPF_MOV64_IMM(BPF_REG_9, 1),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
130 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
131 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
132 BPF_MOV64_IMM(BPF_REG_0, 0),
133 BPF_EXIT_INSN(),
134 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
135 BPF_EXIT_INSN(),
136 },
137 .prog_type = BPF_PROG_TYPE_XDP,
138 .flags = BPF_F_TEST_STATE_FREQ,
139 .errstr = "!read_ok",
140 .result = REJECT,
141},
142{
143 "precise: ST zero to stack insn is supported",
144 .insns = {
145 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
146 BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
147 /* not a register spill, so we stop precision propagation for R4 here */
148 BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
149 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
150 BPF_MOV64_IMM(BPF_REG_0, -1),
151 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
152 BPF_EXIT_INSN(),
153 },
154 .prog_type = BPF_PROG_TYPE_XDP,
155 .flags = BPF_F_TEST_STATE_FREQ,
156 .errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
157 mark_precise: frame0: parent state regs=r4 stack=:\
158 mark_precise: frame0: last_idx 4 first_idx 2\
159 mark_precise: frame0: regs=r4 stack= before 4\
160 mark_precise: frame0: regs=r4 stack= before 3\
161 mark_precise: frame0: last_idx 5 first_idx 5\
162 mark_precise: frame0: parent state regs=r0 stack=:\
163 mark_precise: frame0: last_idx 4 first_idx 2\
164 mark_precise: frame0: regs=r0 stack= before 4\
165 5: R0=-1 R4=0",
166 .result = VERBOSE_ACCEPT,
167 .retval = -1,
168},
169{
170 "precise: STX insn causing spi > allocated_stack",
171 .insns = {
172 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
173 /* make later reg spill more interesting by having somewhat known scalar */
174 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
175 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
176 BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
177 BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
178 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
179 BPF_MOV64_IMM(BPF_REG_0, -1),
180 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0),
181 BPF_EXIT_INSN(),
182 },
183 .prog_type = BPF_PROG_TYPE_XDP,
184 .flags = BPF_F_TEST_STATE_FREQ,
185 .errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
186 mark_precise: frame0: parent state regs=r4 stack=:\
187 mark_precise: frame0: last_idx 6 first_idx 4\
188 mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
189 mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
190 mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
191 mark_precise: frame0: parent state regs=r0 stack=:\
192 mark_precise: frame0: last_idx 3 first_idx 3\
193 mark_precise: frame0: regs=r0 stack= before 3: (55) if r3 != 0x7b goto pc+0\
194 mark_precise: frame0: regs=r0 stack= before 2: (bf) r3 = r10\
195 mark_precise: frame0: regs=r0 stack= before 1: (57) r0 &= 255\
196 mark_precise: frame0: parent state regs=r0 stack=:\
197 mark_precise: frame0: last_idx 0 first_idx 0\
198 mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7\
199 mark_precise: frame0: last_idx 7 first_idx 7\
200 mark_precise: frame0: parent state regs= stack=:",
201 .result = VERBOSE_ACCEPT,
202 .retval = -1,
203},
204{
205 "precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
206 .insns = {
207 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
208 BPF_LD_MAP_FD(BPF_REG_6, 0),
209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
210 BPF_MOV64_IMM(BPF_REG_2, 1),
211 BPF_MOV64_IMM(BPF_REG_3, 0),
212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
213 BPF_MOV64_IMM(BPF_REG_2, 0x1000),
214 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
215 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
216 BPF_EXIT_INSN(),
217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
218 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
219 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
220 BPF_MOV64_IMM(BPF_REG_0, 0),
221 BPF_EXIT_INSN(),
222 },
223 .fixup_map_ringbuf = { 1 },
224 .prog_type = BPF_PROG_TYPE_XDP,
225 .flags = BPF_F_TEST_STATE_FREQ | F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
226 .errstr = "invalid access to memory, mem_size=1 off=42 size=8",
227 .result = REJECT,
228},
229{
230 "precise: program doesn't prematurely prune branches",
231 .insns = {
232 BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
233 BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
234 BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
235 BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
236 BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
237 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
238 BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
239 BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
240 BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
241 BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
242 BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
243 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
244 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
245 BPF_LD_MAP_FD(BPF_REG_4, 0),
246 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
247 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
249 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
250 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
251 BPF_EXIT_INSN(),
252 BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
253 BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
254 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
255 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
256 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
257 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
258 BPF_EXIT_INSN(),
259 },
260 .fixup_map_array_48b = { 13 },
261 .prog_type = BPF_PROG_TYPE_XDP,
262 .result = REJECT,
263 .errstr = "register with unbounded min value is not allowed",
264},