Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9#define offsetofend(TYPE, MEMBER) \
10 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
11
12struct {
13 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14 __uint(max_entries, 1);
15 __type(key, __u32);
16 __type(value, __u64);
17} map_reuseport_array SEC(".maps");
18
19struct {
20 __uint(type, BPF_MAP_TYPE_SOCKHASH);
21 __uint(max_entries, 1);
22 __type(key, int);
23 __type(value, int);
24} map_sockhash SEC(".maps");
25
26struct {
27 __uint(type, BPF_MAP_TYPE_SOCKMAP);
28 __uint(max_entries, 1);
29 __type(key, int);
30 __type(value, int);
31} map_sockmap SEC(".maps");
32
33struct {
34 __uint(type, BPF_MAP_TYPE_XSKMAP);
35 __uint(max_entries, 1);
36 __type(key, int);
37 __type(value, int);
38} map_xskmap SEC(".maps");
39
40struct val {
41 int cnt;
42 struct bpf_spin_lock l;
43};
44
45struct {
46 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47 __uint(max_entries, 0);
48 __type(key, int);
49 __type(value, struct val);
50 __uint(map_flags, BPF_F_NO_PREALLOC);
51} sk_storage_map SEC(".maps");
52
53struct {
54 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
55 __uint(max_entries, 1);
56 __uint(key_size, sizeof(__u32));
57 __uint(value_size, sizeof(__u32));
58} jmp_table SEC(".maps");
59
60SEC("cgroup/skb")
61__description("skb->sk: no NULL check")
62__failure __msg("invalid mem access 'sock_common_or_null'")
63__failure_unpriv
64__naked void skb_sk_no_null_check(void)
65{
66 asm volatile (" \
67 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
68 r0 = *(u32*)(r1 + 0); \
69 r0 = 0; \
70 exit; \
71" :
72 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
73 : __clobber_all);
74}
75
76SEC("cgroup/skb")
77__description("skb->sk: sk->family [non fullsock field]")
78__success __success_unpriv __retval(0)
79__naked void sk_family_non_fullsock_field_1(void)
80{
81 asm volatile (" \
82 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
83 if r1 != 0 goto l0_%=; \
84 r0 = 0; \
85 exit; \
86l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
87 r0 = 0; \
88 exit; \
89" :
90 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
91 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
92 : __clobber_all);
93}
94
95SEC("cgroup/skb")
96__description("skb->sk: sk->type [fullsock field]")
97__failure __msg("invalid sock_common access")
98__failure_unpriv
99__naked void sk_sk_type_fullsock_field_1(void)
100{
101 asm volatile (" \
102 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
103 if r1 != 0 goto l0_%=; \
104 r0 = 0; \
105 exit; \
106l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
107 r0 = 0; \
108 exit; \
109" :
110 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
111 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
112 : __clobber_all);
113}
114
115SEC("cgroup/skb")
116__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
117__failure __msg("type=sock_common_or_null expected=sock_common")
118__failure_unpriv
119__naked void sk_no_skb_sk_check_1(void)
120{
121 asm volatile (" \
122 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
123 call %[bpf_sk_fullsock]; \
124 r0 = 0; \
125 exit; \
126" :
127 : __imm(bpf_sk_fullsock),
128 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
129 : __clobber_all);
130}
131
132SEC("cgroup/skb")
133__description("sk_fullsock(skb->sk): no NULL check on ret")
134__failure __msg("invalid mem access 'sock_or_null'")
135__failure_unpriv
136__naked void no_null_check_on_ret_1(void)
137{
138 asm volatile (" \
139 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
140 if r1 != 0 goto l0_%=; \
141 r0 = 0; \
142 exit; \
143l0_%=: call %[bpf_sk_fullsock]; \
144 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
145 r0 = 0; \
146 exit; \
147" :
148 : __imm(bpf_sk_fullsock),
149 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
150 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
151 : __clobber_all);
152}
153
154SEC("cgroup/skb")
155__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
156__success __success_unpriv __retval(0)
157__naked void sk_sk_type_fullsock_field_2(void)
158{
159 asm volatile (" \
160 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
161 if r1 != 0 goto l0_%=; \
162 r0 = 0; \
163 exit; \
164l0_%=: call %[bpf_sk_fullsock]; \
165 if r0 != 0 goto l1_%=; \
166 r0 = 0; \
167 exit; \
168l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
169 r0 = 0; \
170 exit; \
171" :
172 : __imm(bpf_sk_fullsock),
173 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
174 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
175 : __clobber_all);
176}
177
178SEC("cgroup/skb")
179__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
180__success __success_unpriv __retval(0)
181__naked void sk_family_non_fullsock_field_2(void)
182{
183 asm volatile (" \
184 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
185 if r1 != 0 goto l0_%=; \
186 r0 = 0; \
187 exit; \
188l0_%=: call %[bpf_sk_fullsock]; \
189 if r0 != 0 goto l1_%=; \
190 exit; \
191l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
192 r0 = 0; \
193 exit; \
194" :
195 : __imm(bpf_sk_fullsock),
196 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
197 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
198 : __clobber_all);
199}
200
201SEC("cgroup/skb")
202__description("sk_fullsock(skb->sk): sk->state [narrow load]")
203__success __success_unpriv __retval(0)
204__naked void sk_sk_state_narrow_load(void)
205{
206 asm volatile (" \
207 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
208 if r1 != 0 goto l0_%=; \
209 r0 = 0; \
210 exit; \
211l0_%=: call %[bpf_sk_fullsock]; \
212 if r0 != 0 goto l1_%=; \
213 r0 = 0; \
214 exit; \
215l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
216 r0 = 0; \
217 exit; \
218" :
219 : __imm(bpf_sk_fullsock),
220 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
221 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
222 : __clobber_all);
223}
224
225SEC("cgroup/skb")
226__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
227__success __success_unpriv __retval(0)
228__naked void port_word_load_backward_compatibility(void)
229{
230 asm volatile (" \
231 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
232 if r1 != 0 goto l0_%=; \
233 r0 = 0; \
234 exit; \
235l0_%=: call %[bpf_sk_fullsock]; \
236 if r0 != 0 goto l1_%=; \
237 r0 = 0; \
238 exit; \
239l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
240 r0 = 0; \
241 exit; \
242" :
243 : __imm(bpf_sk_fullsock),
244 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
245 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
246 : __clobber_all);
247}
248
249SEC("cgroup/skb")
250__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
251__success __success_unpriv __retval(0)
252__naked void sk_dst_port_half_load(void)
253{
254 asm volatile (" \
255 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
256 if r1 != 0 goto l0_%=; \
257 r0 = 0; \
258 exit; \
259l0_%=: call %[bpf_sk_fullsock]; \
260 if r0 != 0 goto l1_%=; \
261 r0 = 0; \
262 exit; \
263l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
264 r0 = 0; \
265 exit; \
266" :
267 : __imm(bpf_sk_fullsock),
268 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
269 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
270 : __clobber_all);
271}
272
273SEC("cgroup/skb")
274__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
275__failure __msg("invalid sock access")
276__failure_unpriv
277__naked void dst_port_half_load_invalid_1(void)
278{
279 asm volatile (" \
280 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
281 if r1 != 0 goto l0_%=; \
282 r0 = 0; \
283 exit; \
284l0_%=: call %[bpf_sk_fullsock]; \
285 if r0 != 0 goto l1_%=; \
286 r0 = 0; \
287 exit; \
288l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
289 r0 = 0; \
290 exit; \
291" :
292 : __imm(bpf_sk_fullsock),
293 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
294 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
295 : __clobber_all);
296}
297
298SEC("cgroup/skb")
299__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
300__success __success_unpriv __retval(0)
301__naked void sk_dst_port_byte_load(void)
302{
303 asm volatile (" \
304 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
305 if r1 != 0 goto l0_%=; \
306 r0 = 0; \
307 exit; \
308l0_%=: call %[bpf_sk_fullsock]; \
309 if r0 != 0 goto l1_%=; \
310 r0 = 0; \
311 exit; \
312l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
313 r2 = *(u8*)(r0 + %[__imm_0]); \
314 r0 = 0; \
315 exit; \
316" :
317 : __imm(bpf_sk_fullsock),
318 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
319 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
320 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
321 : __clobber_all);
322}
323
324SEC("cgroup/skb")
325__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
326__failure __msg("invalid sock access")
327__failure_unpriv
328__naked void dst_port_byte_load_invalid(void)
329{
330 asm volatile (" \
331 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
332 if r1 != 0 goto l0_%=; \
333 r0 = 0; \
334 exit; \
335l0_%=: call %[bpf_sk_fullsock]; \
336 if r0 != 0 goto l1_%=; \
337 r0 = 0; \
338 exit; \
339l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
340 r0 = 0; \
341 exit; \
342" :
343 : __imm(bpf_sk_fullsock),
344 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
345 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
346 : __clobber_all);
347}
348
349SEC("cgroup/skb")
350__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
351__failure __msg("invalid sock access")
352__failure_unpriv
353__naked void dst_port_half_load_invalid_2(void)
354{
355 asm volatile (" \
356 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
357 if r1 != 0 goto l0_%=; \
358 r0 = 0; \
359 exit; \
360l0_%=: call %[bpf_sk_fullsock]; \
361 if r0 != 0 goto l1_%=; \
362 r0 = 0; \
363 exit; \
364l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
365 r0 = 0; \
366 exit; \
367" :
368 : __imm(bpf_sk_fullsock),
369 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
370 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
371 : __clobber_all);
372}
373
374SEC("cgroup/skb")
375__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
376__success __success_unpriv __retval(0)
377__naked void dst_ip6_load_2nd_byte(void)
378{
379 asm volatile (" \
380 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
381 if r1 != 0 goto l0_%=; \
382 r0 = 0; \
383 exit; \
384l0_%=: call %[bpf_sk_fullsock]; \
385 if r0 != 0 goto l1_%=; \
386 r0 = 0; \
387 exit; \
388l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
389 r0 = 0; \
390 exit; \
391" :
392 : __imm(bpf_sk_fullsock),
393 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
394 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
395 : __clobber_all);
396}
397
398SEC("cgroup/skb")
399__description("sk_fullsock(skb->sk): sk->type [narrow load]")
400__success __success_unpriv __retval(0)
401__naked void sk_sk_type_narrow_load(void)
402{
403 asm volatile (" \
404 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
405 if r1 != 0 goto l0_%=; \
406 r0 = 0; \
407 exit; \
408l0_%=: call %[bpf_sk_fullsock]; \
409 if r0 != 0 goto l1_%=; \
410 r0 = 0; \
411 exit; \
412l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
413 r0 = 0; \
414 exit; \
415" :
416 : __imm(bpf_sk_fullsock),
417 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
418 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
419 : __clobber_all);
420}
421
422SEC("cgroup/skb")
423__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
424__success __success_unpriv __retval(0)
425__naked void sk_sk_protocol_narrow_load(void)
426{
427 asm volatile (" \
428 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
429 if r1 != 0 goto l0_%=; \
430 r0 = 0; \
431 exit; \
432l0_%=: call %[bpf_sk_fullsock]; \
433 if r0 != 0 goto l1_%=; \
434 r0 = 0; \
435 exit; \
436l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
437 r0 = 0; \
438 exit; \
439" :
440 : __imm(bpf_sk_fullsock),
441 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
442 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
443 : __clobber_all);
444}
445
446SEC("cgroup/skb")
447__description("sk_fullsock(skb->sk): beyond last field")
448__failure __msg("invalid sock access")
449__failure_unpriv
450__naked void skb_sk_beyond_last_field_1(void)
451{
452 asm volatile (" \
453 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
454 if r1 != 0 goto l0_%=; \
455 r0 = 0; \
456 exit; \
457l0_%=: call %[bpf_sk_fullsock]; \
458 if r0 != 0 goto l1_%=; \
459 r0 = 0; \
460 exit; \
461l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
462 r0 = 0; \
463 exit; \
464" :
465 : __imm(bpf_sk_fullsock),
466 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
467 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
468 : __clobber_all);
469}
470
471SEC("cgroup/skb")
472__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
473__failure __msg("type=sock_common_or_null expected=sock_common")
474__failure_unpriv
475__naked void sk_no_skb_sk_check_2(void)
476{
477 asm volatile (" \
478 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
479 call %[bpf_tcp_sock]; \
480 r0 = 0; \
481 exit; \
482" :
483 : __imm(bpf_tcp_sock),
484 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
485 : __clobber_all);
486}
487
488SEC("cgroup/skb")
489__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
490__failure __msg("invalid mem access 'tcp_sock_or_null'")
491__failure_unpriv
492__naked void no_null_check_on_ret_2(void)
493{
494 asm volatile (" \
495 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
496 if r1 != 0 goto l0_%=; \
497 r0 = 0; \
498 exit; \
499l0_%=: call %[bpf_tcp_sock]; \
500 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
501 r0 = 0; \
502 exit; \
503" :
504 : __imm(bpf_tcp_sock),
505 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
506 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
507 : __clobber_all);
508}
509
510SEC("cgroup/skb")
511__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
512__success __success_unpriv __retval(0)
513__naked void skb_sk_tp_snd_cwnd_1(void)
514{
515 asm volatile (" \
516 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
517 if r1 != 0 goto l0_%=; \
518 r0 = 0; \
519 exit; \
520l0_%=: call %[bpf_tcp_sock]; \
521 if r0 != 0 goto l1_%=; \
522 exit; \
523l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
524 r0 = 0; \
525 exit; \
526" :
527 : __imm(bpf_tcp_sock),
528 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
529 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
530 : __clobber_all);
531}
532
533SEC("cgroup/skb")
534__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
535__success __success_unpriv __retval(0)
536__naked void skb_sk_tp_bytes_acked(void)
537{
538 asm volatile (" \
539 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
540 if r1 != 0 goto l0_%=; \
541 r0 = 0; \
542 exit; \
543l0_%=: call %[bpf_tcp_sock]; \
544 if r0 != 0 goto l1_%=; \
545 exit; \
546l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
547 r0 = 0; \
548 exit; \
549" :
550 : __imm(bpf_tcp_sock),
551 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
552 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
553 : __clobber_all);
554}
555
556SEC("cgroup/skb")
557__description("bpf_tcp_sock(skb->sk): beyond last field")
558__failure __msg("invalid tcp_sock access")
559__failure_unpriv
560__naked void skb_sk_beyond_last_field_2(void)
561{
562 asm volatile (" \
563 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
564 if r1 != 0 goto l0_%=; \
565 r0 = 0; \
566 exit; \
567l0_%=: call %[bpf_tcp_sock]; \
568 if r0 != 0 goto l1_%=; \
569 exit; \
570l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
571 r0 = 0; \
572 exit; \
573" :
574 : __imm(bpf_tcp_sock),
575 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
576 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
577 : __clobber_all);
578}
579
580SEC("cgroup/skb")
581__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
582__success __success_unpriv __retval(0)
583__naked void skb_sk_tp_snd_cwnd_2(void)
584{
585 asm volatile (" \
586 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
587 if r1 != 0 goto l0_%=; \
588 r0 = 0; \
589 exit; \
590l0_%=: call %[bpf_sk_fullsock]; \
591 if r0 != 0 goto l1_%=; \
592 exit; \
593l1_%=: r1 = r0; \
594 call %[bpf_tcp_sock]; \
595 if r0 != 0 goto l2_%=; \
596 exit; \
597l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
598 r0 = 0; \
599 exit; \
600" :
601 : __imm(bpf_sk_fullsock),
602 __imm(bpf_tcp_sock),
603 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
604 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
605 : __clobber_all);
606}
607
608SEC("tc")
609__description("bpf_sk_release(skb->sk)")
610__failure __msg("R1 must be referenced when passed to release function")
611__naked void bpf_sk_release_skb_sk(void)
612{
613 asm volatile (" \
614 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
615 if r1 == 0 goto l0_%=; \
616 call %[bpf_sk_release]; \
617l0_%=: r0 = 0; \
618 exit; \
619" :
620 : __imm(bpf_sk_release),
621 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
622 : __clobber_all);
623}
624
625SEC("tc")
626__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
627__failure __msg("R1 must be referenced when passed to release function")
628__naked void bpf_sk_fullsock_skb_sk(void)
629{
630 asm volatile (" \
631 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
632 if r1 != 0 goto l0_%=; \
633 r0 = 0; \
634 exit; \
635l0_%=: call %[bpf_sk_fullsock]; \
636 if r0 != 0 goto l1_%=; \
637 exit; \
638l1_%=: r1 = r0; \
639 call %[bpf_sk_release]; \
640 r0 = 1; \
641 exit; \
642" :
643 : __imm(bpf_sk_fullsock),
644 __imm(bpf_sk_release),
645 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
646 : __clobber_all);
647}
648
649SEC("tc")
650__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
651__failure __msg("R1 must be referenced when passed to release function")
652__naked void bpf_tcp_sock_skb_sk(void)
653{
654 asm volatile (" \
655 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
656 if r1 != 0 goto l0_%=; \
657 r0 = 0; \
658 exit; \
659l0_%=: call %[bpf_tcp_sock]; \
660 if r0 != 0 goto l1_%=; \
661 exit; \
662l1_%=: r1 = r0; \
663 call %[bpf_sk_release]; \
664 r0 = 1; \
665 exit; \
666" :
667 : __imm(bpf_sk_release),
668 __imm(bpf_tcp_sock),
669 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
670 : __clobber_all);
671}
672
673SEC("tc")
674__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
675__success __retval(0)
676__naked void sk_null_0_value_null(void)
677{
678 asm volatile (" \
679 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
680 if r1 != 0 goto l0_%=; \
681 r0 = 0; \
682 exit; \
683l0_%=: call %[bpf_sk_fullsock]; \
684 if r0 != 0 goto l1_%=; \
685 r0 = 0; \
686 exit; \
687l1_%=: r4 = 0; \
688 r3 = 0; \
689 r2 = r0; \
690 r1 = %[sk_storage_map] ll; \
691 call %[bpf_sk_storage_get]; \
692 r0 = 0; \
693 exit; \
694" :
695 : __imm(bpf_sk_fullsock),
696 __imm(bpf_sk_storage_get),
697 __imm_addr(sk_storage_map),
698 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
699 : __clobber_all);
700}
701
702SEC("tc")
703__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
704__failure __msg("R3 type=scalar expected=fp")
705__naked void sk_1_1_value_1(void)
706{
707 asm volatile (" \
708 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
709 if r1 != 0 goto l0_%=; \
710 r0 = 0; \
711 exit; \
712l0_%=: call %[bpf_sk_fullsock]; \
713 if r0 != 0 goto l1_%=; \
714 r0 = 0; \
715 exit; \
716l1_%=: r4 = 1; \
717 r3 = 1; \
718 r2 = r0; \
719 r1 = %[sk_storage_map] ll; \
720 call %[bpf_sk_storage_get]; \
721 r0 = 0; \
722 exit; \
723" :
724 : __imm(bpf_sk_fullsock),
725 __imm(bpf_sk_storage_get),
726 __imm_addr(sk_storage_map),
727 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
728 : __clobber_all);
729}
730
731SEC("tc")
732__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
733__success __retval(0)
734__naked void stack_value_1_stack_value(void)
735{
736 asm volatile (" \
737 r2 = 0; \
738 *(u64*)(r10 - 8) = r2; \
739 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
740 if r1 != 0 goto l0_%=; \
741 r0 = 0; \
742 exit; \
743l0_%=: call %[bpf_sk_fullsock]; \
744 if r0 != 0 goto l1_%=; \
745 r0 = 0; \
746 exit; \
747l1_%=: r4 = 1; \
748 r3 = r10; \
749 r3 += -8; \
750 r2 = r0; \
751 r1 = %[sk_storage_map] ll; \
752 call %[bpf_sk_storage_get]; \
753 r0 = 0; \
754 exit; \
755" :
756 : __imm(bpf_sk_fullsock),
757 __imm(bpf_sk_storage_get),
758 __imm_addr(sk_storage_map),
759 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
760 : __clobber_all);
761}
762
763SEC("tc")
764__description("bpf_map_lookup_elem(smap, &key)")
765__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
766__naked void map_lookup_elem_smap_key(void)
767{
768 asm volatile (" \
769 r1 = 0; \
770 *(u32*)(r10 - 4) = r1; \
771 r2 = r10; \
772 r2 += -4; \
773 r1 = %[sk_storage_map] ll; \
774 call %[bpf_map_lookup_elem]; \
775 r0 = 0; \
776 exit; \
777" :
778 : __imm(bpf_map_lookup_elem),
779 __imm_addr(sk_storage_map)
780 : __clobber_all);
781}
782
783SEC("xdp")
784__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
785__success __retval(0)
786__naked void xskmap_key_xs_queue_id(void)
787{
788 asm volatile (" \
789 r1 = 0; \
790 *(u32*)(r10 - 8) = r1; \
791 r2 = r10; \
792 r2 += -8; \
793 r1 = %[map_xskmap] ll; \
794 call %[bpf_map_lookup_elem]; \
795 if r0 != 0 goto l0_%=; \
796 exit; \
797l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
798 r0 = 0; \
799 exit; \
800" :
801 : __imm(bpf_map_lookup_elem),
802 __imm_addr(map_xskmap),
803 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
804 : __clobber_all);
805}
806
807SEC("sk_skb")
808__description("bpf_map_lookup_elem(sockmap, &key)")
809__failure __msg("Unreleased reference id=2 alloc_insn=6")
810__naked void map_lookup_elem_sockmap_key(void)
811{
812 asm volatile (" \
813 r1 = 0; \
814 *(u32*)(r10 - 4) = r1; \
815 r2 = r10; \
816 r2 += -4; \
817 r1 = %[map_sockmap] ll; \
818 call %[bpf_map_lookup_elem]; \
819 r0 = 0; \
820 exit; \
821" :
822 : __imm(bpf_map_lookup_elem),
823 __imm_addr(map_sockmap)
824 : __clobber_all);
825}
826
827SEC("sk_skb")
828__description("bpf_map_lookup_elem(sockhash, &key)")
829__failure __msg("Unreleased reference id=2 alloc_insn=6")
830__naked void map_lookup_elem_sockhash_key(void)
831{
832 asm volatile (" \
833 r1 = 0; \
834 *(u32*)(r10 - 4) = r1; \
835 r2 = r10; \
836 r2 += -4; \
837 r1 = %[map_sockhash] ll; \
838 call %[bpf_map_lookup_elem]; \
839 r0 = 0; \
840 exit; \
841" :
842 : __imm(bpf_map_lookup_elem),
843 __imm_addr(map_sockhash)
844 : __clobber_all);
845}
846
847SEC("sk_skb")
848__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
849__success
850__naked void field_bpf_sk_release_sk_1(void)
851{
852 asm volatile (" \
853 r1 = 0; \
854 *(u32*)(r10 - 4) = r1; \
855 r2 = r10; \
856 r2 += -4; \
857 r1 = %[map_sockmap] ll; \
858 call %[bpf_map_lookup_elem]; \
859 if r0 != 0 goto l0_%=; \
860 exit; \
861l0_%=: r1 = r0; \
862 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
863 call %[bpf_sk_release]; \
864 exit; \
865" :
866 : __imm(bpf_map_lookup_elem),
867 __imm(bpf_sk_release),
868 __imm_addr(map_sockmap),
869 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
870 : __clobber_all);
871}
872
873SEC("sk_skb")
874__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
875__success
876__naked void field_bpf_sk_release_sk_2(void)
877{
878 asm volatile (" \
879 r1 = 0; \
880 *(u32*)(r10 - 4) = r1; \
881 r2 = r10; \
882 r2 += -4; \
883 r1 = %[map_sockhash] ll; \
884 call %[bpf_map_lookup_elem]; \
885 if r0 != 0 goto l0_%=; \
886 exit; \
887l0_%=: r1 = r0; \
888 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
889 call %[bpf_sk_release]; \
890 exit; \
891" :
892 : __imm(bpf_map_lookup_elem),
893 __imm(bpf_sk_release),
894 __imm_addr(map_sockhash),
895 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
896 : __clobber_all);
897}
898
899SEC("sk_reuseport")
900__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
901__success
902__naked void ctx_reuseport_array_key_flags(void)
903{
904 asm volatile (" \
905 r4 = 0; \
906 r2 = 0; \
907 *(u32*)(r10 - 4) = r2; \
908 r3 = r10; \
909 r3 += -4; \
910 r2 = %[map_reuseport_array] ll; \
911 call %[bpf_sk_select_reuseport]; \
912 exit; \
913" :
914 : __imm(bpf_sk_select_reuseport),
915 __imm_addr(map_reuseport_array)
916 : __clobber_all);
917}
918
919SEC("sk_reuseport")
920__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
921__success
922__naked void reuseport_ctx_sockmap_key_flags(void)
923{
924 asm volatile (" \
925 r4 = 0; \
926 r2 = 0; \
927 *(u32*)(r10 - 4) = r2; \
928 r3 = r10; \
929 r3 += -4; \
930 r2 = %[map_sockmap] ll; \
931 call %[bpf_sk_select_reuseport]; \
932 exit; \
933" :
934 : __imm(bpf_sk_select_reuseport),
935 __imm_addr(map_sockmap)
936 : __clobber_all);
937}
938
939SEC("sk_reuseport")
940__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
941__success
942__naked void reuseport_ctx_sockhash_key_flags(void)
943{
944 asm volatile (" \
945 r4 = 0; \
946 r2 = 0; \
947 *(u32*)(r10 - 4) = r2; \
948 r3 = r10; \
949 r3 += -4; \
950 r2 = %[map_sockmap] ll; \
951 call %[bpf_sk_select_reuseport]; \
952 exit; \
953" :
954 : __imm(bpf_sk_select_reuseport),
955 __imm_addr(map_sockmap)
956 : __clobber_all);
957}
958
959SEC("tc")
960__description("mark null check on return value of bpf_skc_to helpers")
961__failure __msg("invalid mem access")
962__naked void of_bpf_skc_to_helpers(void)
963{
964 asm volatile (" \
965 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
966 if r1 != 0 goto l0_%=; \
967 r0 = 0; \
968 exit; \
969l0_%=: r6 = r1; \
970 call %[bpf_skc_to_tcp_sock]; \
971 r7 = r0; \
972 r1 = r6; \
973 call %[bpf_skc_to_tcp_request_sock]; \
974 r8 = r0; \
975 if r8 != 0 goto l1_%=; \
976 r0 = 0; \
977 exit; \
978l1_%=: r0 = *(u8*)(r7 + 0); \
979 exit; \
980" :
981 : __imm(bpf_skc_to_tcp_request_sock),
982 __imm(bpf_skc_to_tcp_sock),
983 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
984 : __clobber_all);
985}
986
987SEC("cgroup/post_bind4")
988__description("sk->src_ip6[0] [load 1st byte]")
989__failure __msg("invalid bpf_context access off=28 size=2")
990__naked void post_bind4_read_src_ip6(void)
991{
992 asm volatile (" \
993 r6 = r1; \
994 r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \
995 r0 = 1; \
996 exit; \
997" :
998 : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0]))
999 : __clobber_all);
1000}
1001
1002SEC("cgroup/post_bind4")
1003__description("sk->mark [load mark]")
1004__failure __msg("invalid bpf_context access off=16 size=2")
1005__naked void post_bind4_read_mark(void)
1006{
1007 asm volatile (" \
1008 r6 = r1; \
1009 r7 = *(u16*)(r6 + %[bpf_sock_mark]); \
1010 r0 = 1; \
1011 exit; \
1012" :
1013 : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark))
1014 : __clobber_all);
1015}
1016
1017SEC("cgroup/post_bind6")
1018__description("sk->src_ip4 [load src_ip4]")
1019__failure __msg("invalid bpf_context access off=24 size=2")
1020__naked void post_bind6_read_src_ip4(void)
1021{
1022 asm volatile (" \
1023 r6 = r1; \
1024 r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \
1025 r0 = 1; \
1026 exit; \
1027" :
1028 : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4))
1029 : __clobber_all);
1030}
1031
1032SEC("cgroup/sock_create")
1033__description("sk->src_port [word load]")
1034__failure __msg("invalid bpf_context access off=44 size=2")
1035__naked void sock_create_read_src_port(void)
1036{
1037 asm volatile (" \
1038 r6 = r1; \
1039 r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \
1040 r0 = 1; \
1041 exit; \
1042" :
1043 : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port))
1044 : __clobber_all);
1045}
1046
1047__noinline
1048long skb_pull_data2(struct __sk_buff *sk, __u32 len)
1049{
1050 return bpf_skb_pull_data(sk, len);
1051}
1052
1053__noinline
1054long skb_pull_data1(struct __sk_buff *sk, __u32 len)
1055{
1056 return skb_pull_data2(sk, len);
1057}
1058
1059/* global function calls bpf_skb_pull_data(), which invalidates packet
1060 * pointers established before global function call.
1061 */
1062SEC("tc")
1063__failure __msg("invalid mem access")
1064int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
1065{
1066 int *p = (void *)(long)sk->data;
1067
1068 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1069 return TCX_DROP;
1070 skb_pull_data1(sk, 0);
1071 *p = 42; /* this is unsafe */
1072 return TCX_PASS;
1073}
1074
1075__noinline
1076int tail_call(struct __sk_buff *sk)
1077{
1078 bpf_tail_call_static(sk, &jmp_table, 0);
1079 return 0;
1080}
1081
1082/* Tail calls invalidate packet pointers. */
1083SEC("tc")
1084__failure __msg("invalid mem access")
1085int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
1086{
1087 int *p = (void *)(long)sk->data;
1088
1089 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1090 return TCX_DROP;
1091 tail_call(sk);
1092 *p = 42; /* this is unsafe */
1093 return TCX_PASS;
1094}
1095
1096char _license[] SEC("license") = "GPL";