Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
3
4#include <linux/bpf.h>
5#include <../../../include/linux/filter.h>
6#include <bpf/bpf_helpers.h>
7#include "bpf_misc.h"
8
9#define MAX_ENTRIES 11
10
11struct test_val {
12 unsigned int index;
13 int foo[MAX_ENTRIES];
14};
15
16struct {
17 __uint(type, BPF_MAP_TYPE_HASH);
18 __uint(max_entries, 1);
19 __type(key, long long);
20 __type(value, struct test_val);
21} map_hash_48b SEC(".maps");
22
23struct {
24 __uint(type, BPF_MAP_TYPE_HASH);
25 __uint(max_entries, 1);
26 __type(key, long long);
27 __type(value, long long);
28} map_hash_8b SEC(".maps");
29
30SEC("socket")
31__description("pointer/scalar confusion in state equality check (way 1)")
32__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
33__retval(POINTER_VALUE)
34__naked void state_equality_check_way_1(void)
35{
36 asm volatile (" \
37 r1 = 0; \
38 *(u64*)(r10 - 8) = r1; \
39 r2 = r10; \
40 r2 += -8; \
41 r1 = %[map_hash_8b] ll; \
42 call %[bpf_map_lookup_elem]; \
43 if r0 == 0 goto l0_%=; \
44 r0 = *(u64*)(r0 + 0); \
45 goto l1_%=; \
46l0_%=: r0 = r10; \
47l1_%=: goto l2_%=; \
48l2_%=: exit; \
49" :
50 : __imm(bpf_map_lookup_elem),
51 __imm_addr(map_hash_8b)
52 : __clobber_all);
53}
54
55SEC("socket")
56__description("pointer/scalar confusion in state equality check (way 2)")
57__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
58__retval(POINTER_VALUE)
59__naked void state_equality_check_way_2(void)
60{
61 asm volatile (" \
62 r1 = 0; \
63 *(u64*)(r10 - 8) = r1; \
64 r2 = r10; \
65 r2 += -8; \
66 r1 = %[map_hash_8b] ll; \
67 call %[bpf_map_lookup_elem]; \
68 if r0 != 0 goto l0_%=; \
69 r0 = r10; \
70 goto l1_%=; \
71l0_%=: r0 = *(u64*)(r0 + 0); \
72l1_%=: exit; \
73" :
74 : __imm(bpf_map_lookup_elem),
75 __imm_addr(map_hash_8b)
76 : __clobber_all);
77}
78
79SEC("lwt_in")
80__description("liveness pruning and write screening")
81__failure __msg("R0 !read_ok")
82__naked void liveness_pruning_and_write_screening(void)
83{
84 asm volatile (" \
85 /* Get an unknown value */ \
86 r2 = *(u32*)(r1 + 0); \
87 /* branch conditions teach us nothing about R2 */\
88 if r2 >= 0 goto l0_%=; \
89 r0 = 0; \
90l0_%=: if r2 >= 0 goto l1_%=; \
91 r0 = 0; \
92l1_%=: exit; \
93" ::: __clobber_all);
94}
95
96SEC("socket")
97__description("varlen_map_value_access pruning")
98__failure __msg("R0 unbounded memory access")
99__failure_unpriv __msg_unpriv("R0 leaks addr")
100__flag(BPF_F_ANY_ALIGNMENT)
101__naked void varlen_map_value_access_pruning(void)
102{
103 asm volatile (" \
104 r1 = 0; \
105 *(u64*)(r10 - 8) = r1; \
106 r2 = r10; \
107 r2 += -8; \
108 r1 = %[map_hash_48b] ll; \
109 call %[bpf_map_lookup_elem]; \
110 if r0 == 0 goto l0_%=; \
111 r1 = *(u64*)(r0 + 0); \
112 w2 = %[max_entries]; \
113 if r2 s> r1 goto l1_%=; \
114 w1 = 0; \
115l1_%=: w1 <<= 2; \
116 r0 += r1; \
117 goto l2_%=; \
118l2_%=: r1 = %[test_val_foo]; \
119 *(u64*)(r0 + 0) = r1; \
120l0_%=: exit; \
121" :
122 : __imm(bpf_map_lookup_elem),
123 __imm_addr(map_hash_48b),
124 __imm_const(max_entries, MAX_ENTRIES),
125 __imm_const(test_val_foo, offsetof(struct test_val, foo))
126 : __clobber_all);
127}
128
129SEC("tracepoint")
130__description("search pruning: all branches should be verified (nop operation)")
131__failure __msg("R6 invalid mem access 'scalar'")
132__naked void should_be_verified_nop_operation(void)
133{
134 asm volatile (" \
135 r2 = r10; \
136 r2 += -8; \
137 r1 = 0; \
138 *(u64*)(r2 + 0) = r1; \
139 r1 = %[map_hash_8b] ll; \
140 call %[bpf_map_lookup_elem]; \
141 if r0 == 0 goto l0_%=; \
142 r3 = *(u64*)(r0 + 0); \
143 if r3 == 0xbeef goto l1_%=; \
144 r4 = 0; \
145 goto l2_%=; \
146l1_%=: r4 = 1; \
147l2_%=: *(u64*)(r10 - 16) = r4; \
148 call %[bpf_ktime_get_ns]; \
149 r5 = *(u64*)(r10 - 16); \
150 if r5 == 0 goto l0_%=; \
151 r6 = 0; \
152 r1 = 0xdead; \
153 *(u64*)(r6 + 0) = r1; \
154l0_%=: exit; \
155" :
156 : __imm(bpf_ktime_get_ns),
157 __imm(bpf_map_lookup_elem),
158 __imm_addr(map_hash_8b)
159 : __clobber_all);
160}
161
162SEC("socket")
163__description("search pruning: all branches should be verified (invalid stack access)")
164/* in privileged mode reads from uninitialized stack locations are permitted */
165__success __failure_unpriv
166__msg_unpriv("invalid read from stack off -16+0 size 8")
167__retval(0)
168__naked void be_verified_invalid_stack_access(void)
169{
170 asm volatile (" \
171 r2 = r10; \
172 r2 += -8; \
173 r1 = 0; \
174 *(u64*)(r2 + 0) = r1; \
175 r1 = %[map_hash_8b] ll; \
176 call %[bpf_map_lookup_elem]; \
177 if r0 == 0 goto l0_%=; \
178 r3 = *(u64*)(r0 + 0); \
179 r4 = 0; \
180 if r3 == 0xbeef goto l1_%=; \
181 *(u64*)(r10 - 16) = r4; \
182 goto l2_%=; \
183l1_%=: *(u64*)(r10 - 24) = r4; \
184l2_%=: call %[bpf_ktime_get_ns]; \
185 r5 = *(u64*)(r10 - 16); \
186l0_%=: exit; \
187" :
188 : __imm(bpf_ktime_get_ns),
189 __imm(bpf_map_lookup_elem),
190 __imm_addr(map_hash_8b)
191 : __clobber_all);
192}
193
194SEC("tracepoint")
195__description("precision tracking for u32 spill/fill")
196__failure __msg("R0 min value is outside of the allowed memory range")
197__naked void tracking_for_u32_spill_fill(void)
198{
199 asm volatile (" \
200 r7 = r1; \
201 call %[bpf_get_prandom_u32]; \
202 w6 = 32; \
203 if r0 == 0 goto l0_%=; \
204 w6 = 4; \
205l0_%=: /* Additional insns to introduce a pruning point. */\
206 call %[bpf_get_prandom_u32]; \
207 r3 = 0; \
208 r3 = 0; \
209 if r0 == 0 goto l1_%=; \
210 r3 = 0; \
211l1_%=: /* u32 spill/fill */ \
212 *(u32*)(r10 - 8) = r6; \
213 r8 = *(u32*)(r10 - 8); \
214 /* out-of-bound map value access for r6=32 */ \
215 r1 = 0; \
216 *(u64*)(r10 - 16) = r1; \
217 r2 = r10; \
218 r2 += -16; \
219 r1 = %[map_hash_8b] ll; \
220 call %[bpf_map_lookup_elem]; \
221 if r0 == 0 goto l2_%=; \
222 r0 += r8; \
223 r1 = *(u32*)(r0 + 0); \
224l2_%=: r0 = 0; \
225 exit; \
226" :
227 : __imm(bpf_get_prandom_u32),
228 __imm(bpf_map_lookup_elem),
229 __imm_addr(map_hash_8b)
230 : __clobber_all);
231}
232
233SEC("tracepoint")
234__description("precision tracking for u32 spills, u64 fill")
235__failure __msg("div by zero")
236__naked void for_u32_spills_u64_fill(void)
237{
238 asm volatile (" \
239 call %[bpf_get_prandom_u32]; \
240 r6 = r0; \
241 w7 = 0xffffffff; \
242 /* Additional insns to introduce a pruning point. */\
243 r3 = 1; \
244 r3 = 1; \
245 r3 = 1; \
246 r3 = 1; \
247 call %[bpf_get_prandom_u32]; \
248 if r0 == 0 goto l0_%=; \
249 r3 = 1; \
250l0_%=: w3 /= 0; \
251 /* u32 spills, u64 fill */ \
252 *(u32*)(r10 - 4) = r6; \
253 *(u32*)(r10 - 8) = r7; \
254 r8 = *(u64*)(r10 - 8); \
255 /* if r8 != X goto pc+1 r8 known in fallthrough branch */\
256 if r8 != 0xffffffff goto l1_%=; \
257 r3 = 1; \
258l1_%=: /* if r8 == X goto pc+1 condition always true on first\
259 * traversal, so starts backtracking to mark r8 as requiring\
260 * precision. r7 marked as needing precision. r6 not marked\
261 * since it's not tracked. \
262 */ \
263 if r8 == 0xffffffff goto l2_%=; \
264 /* fails if r8 correctly marked unknown after fill. */\
265 w3 /= 0; \
266l2_%=: r0 = 0; \
267 exit; \
268" :
269 : __imm(bpf_get_prandom_u32)
270 : __clobber_all);
271}
272
273SEC("socket")
274__description("allocated_stack")
275__success __msg("processed 15 insns")
276__success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
277__naked void allocated_stack(void)
278{
279 asm volatile (" \
280 r6 = r1; \
281 call %[bpf_get_prandom_u32]; \
282 r7 = r0; \
283 if r0 == 0 goto l0_%=; \
284 r0 = 0; \
285 *(u64*)(r10 - 8) = r6; \
286 r6 = *(u64*)(r10 - 8); \
287 *(u8*)(r10 - 9) = r7; \
288 r7 = *(u8*)(r10 - 9); \
289l0_%=: if r0 != 0 goto l1_%=; \
290l1_%=: if r0 != 0 goto l2_%=; \
291l2_%=: if r0 != 0 goto l3_%=; \
292l3_%=: if r0 != 0 goto l4_%=; \
293l4_%=: exit; \
294" :
295 : __imm(bpf_get_prandom_u32)
296 : __clobber_all);
297}
298
299/* The test performs a conditional 64-bit write to a stack location
300 * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
301 * then data is read from fp[-8]. This sequence is unsafe.
302 *
303 * The test would be mistakenly marked as safe w/o dst register parent
304 * preservation in verifier.c:copy_register_state() function.
305 *
306 * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
307 * checkpoint state after conditional 64-bit assignment.
308 */
309
310SEC("socket")
311__description("write tracking and register parent chain bug")
312/* in privileged mode reads from uninitialized stack locations are permitted */
313__success __failure_unpriv
314__msg_unpriv("invalid read from stack off -8+1 size 8")
315__retval(0) __flag(BPF_F_TEST_STATE_FREQ)
316__naked void and_register_parent_chain_bug(void)
317{
318 asm volatile (" \
319 /* r6 = ktime_get_ns() */ \
320 call %[bpf_ktime_get_ns]; \
321 r6 = r0; \
322 /* r0 = ktime_get_ns() */ \
323 call %[bpf_ktime_get_ns]; \
324 /* if r0 > r6 goto +1 */ \
325 if r0 > r6 goto l0_%=; \
326 /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \
327 r0 = 0xdeadbeef; \
328 *(u64*)(r10 - 8) = r0; \
329l0_%=: r1 = 42; \
330 *(u8*)(r10 - 8) = r1; \
331 r2 = *(u64*)(r10 - 8); \
332 /* exit(0) */ \
333 r0 = 0; \
334 exit; \
335" :
336 : __imm(bpf_ktime_get_ns)
337 : __clobber_all);
338}
339
340/* Without checkpoint forcibly inserted at the back-edge a loop this
341 * test would take a very long time to verify.
342 */
343SEC("kprobe")
344__failure __log_level(4)
345__msg("BPF program is too large.")
346__naked void short_loop1(void)
347{
348 asm volatile (
349 " r7 = *(u16 *)(r1 +0);"
350 "1: r7 += 0x1ab064b9;"
351 " .8byte %[jset];" /* same as 'if r7 & 0x702000 goto 1b;' */
352 " r7 &= 0x1ee60e;"
353 " r7 += r1;"
354 " if r7 s> 0x37d2 goto +0;"
355 " r0 = 0;"
356 " exit;"
357 :
358 : __imm_insn(jset, BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x702000, -2))
359 : __clobber_all);
360}
361
362char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define MAX_ENTRIES 11
9
10struct test_val {
11 unsigned int index;
12 int foo[MAX_ENTRIES];
13};
14
15struct {
16 __uint(type, BPF_MAP_TYPE_HASH);
17 __uint(max_entries, 1);
18 __type(key, long long);
19 __type(value, struct test_val);
20} map_hash_48b SEC(".maps");
21
22struct {
23 __uint(type, BPF_MAP_TYPE_HASH);
24 __uint(max_entries, 1);
25 __type(key, long long);
26 __type(value, long long);
27} map_hash_8b SEC(".maps");
28
29SEC("socket")
30__description("pointer/scalar confusion in state equality check (way 1)")
31__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
32__retval(POINTER_VALUE)
33__naked void state_equality_check_way_1(void)
34{
35 asm volatile (" \
36 r1 = 0; \
37 *(u64*)(r10 - 8) = r1; \
38 r2 = r10; \
39 r2 += -8; \
40 r1 = %[map_hash_8b] ll; \
41 call %[bpf_map_lookup_elem]; \
42 if r0 == 0 goto l0_%=; \
43 r0 = *(u64*)(r0 + 0); \
44 goto l1_%=; \
45l0_%=: r0 = r10; \
46l1_%=: goto l2_%=; \
47l2_%=: exit; \
48" :
49 : __imm(bpf_map_lookup_elem),
50 __imm_addr(map_hash_8b)
51 : __clobber_all);
52}
53
54SEC("socket")
55__description("pointer/scalar confusion in state equality check (way 2)")
56__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
57__retval(POINTER_VALUE)
58__naked void state_equality_check_way_2(void)
59{
60 asm volatile (" \
61 r1 = 0; \
62 *(u64*)(r10 - 8) = r1; \
63 r2 = r10; \
64 r2 += -8; \
65 r1 = %[map_hash_8b] ll; \
66 call %[bpf_map_lookup_elem]; \
67 if r0 != 0 goto l0_%=; \
68 r0 = r10; \
69 goto l1_%=; \
70l0_%=: r0 = *(u64*)(r0 + 0); \
71l1_%=: exit; \
72" :
73 : __imm(bpf_map_lookup_elem),
74 __imm_addr(map_hash_8b)
75 : __clobber_all);
76}
77
78SEC("lwt_in")
79__description("liveness pruning and write screening")
80__failure __msg("R0 !read_ok")
81__naked void liveness_pruning_and_write_screening(void)
82{
83 asm volatile (" \
84 /* Get an unknown value */ \
85 r2 = *(u32*)(r1 + 0); \
86 /* branch conditions teach us nothing about R2 */\
87 if r2 >= 0 goto l0_%=; \
88 r0 = 0; \
89l0_%=: if r2 >= 0 goto l1_%=; \
90 r0 = 0; \
91l1_%=: exit; \
92" ::: __clobber_all);
93}
94
95SEC("socket")
96__description("varlen_map_value_access pruning")
97__failure __msg("R0 unbounded memory access")
98__failure_unpriv __msg_unpriv("R0 leaks addr")
99__flag(BPF_F_ANY_ALIGNMENT)
100__naked void varlen_map_value_access_pruning(void)
101{
102 asm volatile (" \
103 r1 = 0; \
104 *(u64*)(r10 - 8) = r1; \
105 r2 = r10; \
106 r2 += -8; \
107 r1 = %[map_hash_48b] ll; \
108 call %[bpf_map_lookup_elem]; \
109 if r0 == 0 goto l0_%=; \
110 r1 = *(u64*)(r0 + 0); \
111 w2 = %[max_entries]; \
112 if r2 s> r1 goto l1_%=; \
113 w1 = 0; \
114l1_%=: w1 <<= 2; \
115 r0 += r1; \
116 goto l2_%=; \
117l2_%=: r1 = %[test_val_foo]; \
118 *(u64*)(r0 + 0) = r1; \
119l0_%=: exit; \
120" :
121 : __imm(bpf_map_lookup_elem),
122 __imm_addr(map_hash_48b),
123 __imm_const(max_entries, MAX_ENTRIES),
124 __imm_const(test_val_foo, offsetof(struct test_val, foo))
125 : __clobber_all);
126}
127
128SEC("tracepoint")
129__description("search pruning: all branches should be verified (nop operation)")
130__failure __msg("R6 invalid mem access 'scalar'")
131__naked void should_be_verified_nop_operation(void)
132{
133 asm volatile (" \
134 r2 = r10; \
135 r2 += -8; \
136 r1 = 0; \
137 *(u64*)(r2 + 0) = r1; \
138 r1 = %[map_hash_8b] ll; \
139 call %[bpf_map_lookup_elem]; \
140 if r0 == 0 goto l0_%=; \
141 r3 = *(u64*)(r0 + 0); \
142 if r3 == 0xbeef goto l1_%=; \
143 r4 = 0; \
144 goto l2_%=; \
145l1_%=: r4 = 1; \
146l2_%=: *(u64*)(r10 - 16) = r4; \
147 call %[bpf_ktime_get_ns]; \
148 r5 = *(u64*)(r10 - 16); \
149 if r5 == 0 goto l0_%=; \
150 r6 = 0; \
151 r1 = 0xdead; \
152 *(u64*)(r6 + 0) = r1; \
153l0_%=: exit; \
154" :
155 : __imm(bpf_ktime_get_ns),
156 __imm(bpf_map_lookup_elem),
157 __imm_addr(map_hash_8b)
158 : __clobber_all);
159}
160
161SEC("socket")
162__description("search pruning: all branches should be verified (invalid stack access)")
163/* in privileged mode reads from uninitialized stack locations are permitted */
164__success __failure_unpriv
165__msg_unpriv("invalid read from stack off -16+0 size 8")
166__retval(0)
167__naked void be_verified_invalid_stack_access(void)
168{
169 asm volatile (" \
170 r2 = r10; \
171 r2 += -8; \
172 r1 = 0; \
173 *(u64*)(r2 + 0) = r1; \
174 r1 = %[map_hash_8b] ll; \
175 call %[bpf_map_lookup_elem]; \
176 if r0 == 0 goto l0_%=; \
177 r3 = *(u64*)(r0 + 0); \
178 r4 = 0; \
179 if r3 == 0xbeef goto l1_%=; \
180 *(u64*)(r10 - 16) = r4; \
181 goto l2_%=; \
182l1_%=: *(u64*)(r10 - 24) = r4; \
183l2_%=: call %[bpf_ktime_get_ns]; \
184 r5 = *(u64*)(r10 - 16); \
185l0_%=: exit; \
186" :
187 : __imm(bpf_ktime_get_ns),
188 __imm(bpf_map_lookup_elem),
189 __imm_addr(map_hash_8b)
190 : __clobber_all);
191}
192
193SEC("tracepoint")
194__description("precision tracking for u32 spill/fill")
195__failure __msg("R0 min value is outside of the allowed memory range")
196__naked void tracking_for_u32_spill_fill(void)
197{
198 asm volatile (" \
199 r7 = r1; \
200 call %[bpf_get_prandom_u32]; \
201 w6 = 32; \
202 if r0 == 0 goto l0_%=; \
203 w6 = 4; \
204l0_%=: /* Additional insns to introduce a pruning point. */\
205 call %[bpf_get_prandom_u32]; \
206 r3 = 0; \
207 r3 = 0; \
208 if r0 == 0 goto l1_%=; \
209 r3 = 0; \
210l1_%=: /* u32 spill/fill */ \
211 *(u32*)(r10 - 8) = r6; \
212 r8 = *(u32*)(r10 - 8); \
213 /* out-of-bound map value access for r6=32 */ \
214 r1 = 0; \
215 *(u64*)(r10 - 16) = r1; \
216 r2 = r10; \
217 r2 += -16; \
218 r1 = %[map_hash_8b] ll; \
219 call %[bpf_map_lookup_elem]; \
220 if r0 == 0 goto l2_%=; \
221 r0 += r8; \
222 r1 = *(u32*)(r0 + 0); \
223l2_%=: r0 = 0; \
224 exit; \
225" :
226 : __imm(bpf_get_prandom_u32),
227 __imm(bpf_map_lookup_elem),
228 __imm_addr(map_hash_8b)
229 : __clobber_all);
230}
231
232SEC("tracepoint")
233__description("precision tracking for u32 spills, u64 fill")
234__failure __msg("div by zero")
235__naked void for_u32_spills_u64_fill(void)
236{
237 asm volatile (" \
238 call %[bpf_get_prandom_u32]; \
239 r6 = r0; \
240 w7 = 0xffffffff; \
241 /* Additional insns to introduce a pruning point. */\
242 r3 = 1; \
243 r3 = 1; \
244 r3 = 1; \
245 r3 = 1; \
246 call %[bpf_get_prandom_u32]; \
247 if r0 == 0 goto l0_%=; \
248 r3 = 1; \
249l0_%=: w3 /= 0; \
250 /* u32 spills, u64 fill */ \
251 *(u32*)(r10 - 4) = r6; \
252 *(u32*)(r10 - 8) = r7; \
253 r8 = *(u64*)(r10 - 8); \
254 /* if r8 != X goto pc+1 r8 known in fallthrough branch */\
255 if r8 != 0xffffffff goto l1_%=; \
256 r3 = 1; \
257l1_%=: /* if r8 == X goto pc+1 condition always true on first\
258 * traversal, so starts backtracking to mark r8 as requiring\
259 * precision. r7 marked as needing precision. r6 not marked\
260 * since it's not tracked. \
261 */ \
262 if r8 == 0xffffffff goto l2_%=; \
263 /* fails if r8 correctly marked unknown after fill. */\
264 w3 /= 0; \
265l2_%=: r0 = 0; \
266 exit; \
267" :
268 : __imm(bpf_get_prandom_u32)
269 : __clobber_all);
270}
271
272SEC("socket")
273__description("allocated_stack")
274__success __msg("processed 15 insns")
275__success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
276__naked void allocated_stack(void)
277{
278 asm volatile (" \
279 r6 = r1; \
280 call %[bpf_get_prandom_u32]; \
281 r7 = r0; \
282 if r0 == 0 goto l0_%=; \
283 r0 = 0; \
284 *(u64*)(r10 - 8) = r6; \
285 r6 = *(u64*)(r10 - 8); \
286 *(u8*)(r10 - 9) = r7; \
287 r7 = *(u8*)(r10 - 9); \
288l0_%=: if r0 != 0 goto l1_%=; \
289l1_%=: if r0 != 0 goto l2_%=; \
290l2_%=: if r0 != 0 goto l3_%=; \
291l3_%=: if r0 != 0 goto l4_%=; \
292l4_%=: exit; \
293" :
294 : __imm(bpf_get_prandom_u32)
295 : __clobber_all);
296}
297
298/* The test performs a conditional 64-bit write to a stack location
299 * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
300 * then data is read from fp[-8]. This sequence is unsafe.
301 *
302 * The test would be mistakenly marked as safe w/o dst register parent
303 * preservation in verifier.c:copy_register_state() function.
304 *
305 * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
306 * checkpoint state after conditional 64-bit assignment.
307 */
308
309SEC("socket")
310__description("write tracking and register parent chain bug")
311/* in privileged mode reads from uninitialized stack locations are permitted */
312__success __failure_unpriv
313__msg_unpriv("invalid read from stack off -8+1 size 8")
314__retval(0) __flag(BPF_F_TEST_STATE_FREQ)
315__naked void and_register_parent_chain_bug(void)
316{
317 asm volatile (" \
318 /* r6 = ktime_get_ns() */ \
319 call %[bpf_ktime_get_ns]; \
320 r6 = r0; \
321 /* r0 = ktime_get_ns() */ \
322 call %[bpf_ktime_get_ns]; \
323 /* if r0 > r6 goto +1 */ \
324 if r0 > r6 goto l0_%=; \
325 /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \
326 r0 = 0xdeadbeef; \
327 *(u64*)(r10 - 8) = r0; \
328l0_%=: r1 = 42; \
329 *(u8*)(r10 - 8) = r1; \
330 r2 = *(u64*)(r10 - 8); \
331 /* exit(0) */ \
332 r0 = 0; \
333 exit; \
334" :
335 : __imm(bpf_ktime_get_ns)
336 : __clobber_all);
337}
338
339char _license[] SEC("license") = "GPL";