Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9#define offsetofend(TYPE, MEMBER) \
10 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
11
12struct {
13 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14 __uint(max_entries, 1);
15 __type(key, __u32);
16 __type(value, __u64);
17} map_reuseport_array SEC(".maps");
18
19struct {
20 __uint(type, BPF_MAP_TYPE_SOCKHASH);
21 __uint(max_entries, 1);
22 __type(key, int);
23 __type(value, int);
24} map_sockhash SEC(".maps");
25
26struct {
27 __uint(type, BPF_MAP_TYPE_SOCKMAP);
28 __uint(max_entries, 1);
29 __type(key, int);
30 __type(value, int);
31} map_sockmap SEC(".maps");
32
33struct {
34 __uint(type, BPF_MAP_TYPE_XSKMAP);
35 __uint(max_entries, 1);
36 __type(key, int);
37 __type(value, int);
38} map_xskmap SEC(".maps");
39
40struct val {
41 int cnt;
42 struct bpf_spin_lock l;
43};
44
45struct {
46 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47 __uint(max_entries, 0);
48 __type(key, int);
49 __type(value, struct val);
50 __uint(map_flags, BPF_F_NO_PREALLOC);
51} sk_storage_map SEC(".maps");
52
53struct {
54 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
55 __uint(max_entries, 1);
56 __uint(key_size, sizeof(__u32));
57 __uint(value_size, sizeof(__u32));
58} jmp_table SEC(".maps");
59
60SEC("cgroup/skb")
61__description("skb->sk: no NULL check")
62__failure __msg("invalid mem access 'sock_common_or_null'")
63__failure_unpriv
64__naked void skb_sk_no_null_check(void)
65{
66 asm volatile (" \
67 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
68 r0 = *(u32*)(r1 + 0); \
69 r0 = 0; \
70 exit; \
71" :
72 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
73 : __clobber_all);
74}
75
76SEC("cgroup/skb")
77__description("skb->sk: sk->family [non fullsock field]")
78__success __success_unpriv __retval(0)
79__naked void sk_family_non_fullsock_field_1(void)
80{
81 asm volatile (" \
82 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
83 if r1 != 0 goto l0_%=; \
84 r0 = 0; \
85 exit; \
86l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
87 r0 = 0; \
88 exit; \
89" :
90 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
91 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
92 : __clobber_all);
93}
94
95SEC("cgroup/skb")
96__description("skb->sk: sk->type [fullsock field]")
97__failure __msg("invalid sock_common access")
98__failure_unpriv
99__naked void sk_sk_type_fullsock_field_1(void)
100{
101 asm volatile (" \
102 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
103 if r1 != 0 goto l0_%=; \
104 r0 = 0; \
105 exit; \
106l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
107 r0 = 0; \
108 exit; \
109" :
110 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
111 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
112 : __clobber_all);
113}
114
115SEC("cgroup/skb")
116__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
117__failure __msg("type=sock_common_or_null expected=sock_common")
118__failure_unpriv
119__naked void sk_no_skb_sk_check_1(void)
120{
121 asm volatile (" \
122 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
123 call %[bpf_sk_fullsock]; \
124 r0 = 0; \
125 exit; \
126" :
127 : __imm(bpf_sk_fullsock),
128 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
129 : __clobber_all);
130}
131
132SEC("cgroup/skb")
133__description("sk_fullsock(skb->sk): no NULL check on ret")
134__failure __msg("invalid mem access 'sock_or_null'")
135__failure_unpriv
136__naked void no_null_check_on_ret_1(void)
137{
138 asm volatile (" \
139 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
140 if r1 != 0 goto l0_%=; \
141 r0 = 0; \
142 exit; \
143l0_%=: call %[bpf_sk_fullsock]; \
144 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
145 r0 = 0; \
146 exit; \
147" :
148 : __imm(bpf_sk_fullsock),
149 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
150 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
151 : __clobber_all);
152}
153
154SEC("cgroup/skb")
155__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
156__success __success_unpriv __retval(0)
157__naked void sk_sk_type_fullsock_field_2(void)
158{
159 asm volatile (" \
160 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
161 if r1 != 0 goto l0_%=; \
162 r0 = 0; \
163 exit; \
164l0_%=: call %[bpf_sk_fullsock]; \
165 if r0 != 0 goto l1_%=; \
166 r0 = 0; \
167 exit; \
168l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
169 r0 = 0; \
170 exit; \
171" :
172 : __imm(bpf_sk_fullsock),
173 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
174 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
175 : __clobber_all);
176}
177
178SEC("cgroup/skb")
179__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
180__success __success_unpriv __retval(0)
181__naked void sk_family_non_fullsock_field_2(void)
182{
183 asm volatile (" \
184 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
185 if r1 != 0 goto l0_%=; \
186 r0 = 0; \
187 exit; \
188l0_%=: call %[bpf_sk_fullsock]; \
189 if r0 != 0 goto l1_%=; \
190 exit; \
191l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
192 r0 = 0; \
193 exit; \
194" :
195 : __imm(bpf_sk_fullsock),
196 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
197 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
198 : __clobber_all);
199}
200
201SEC("cgroup/skb")
202__description("sk_fullsock(skb->sk): sk->state [narrow load]")
203__success __success_unpriv __retval(0)
204__naked void sk_sk_state_narrow_load(void)
205{
206 asm volatile (" \
207 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
208 if r1 != 0 goto l0_%=; \
209 r0 = 0; \
210 exit; \
211l0_%=: call %[bpf_sk_fullsock]; \
212 if r0 != 0 goto l1_%=; \
213 r0 = 0; \
214 exit; \
215l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
216 r0 = 0; \
217 exit; \
218" :
219 : __imm(bpf_sk_fullsock),
220 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
221 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
222 : __clobber_all);
223}
224
225SEC("cgroup/skb")
226__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
227__success __success_unpriv __retval(0)
228__naked void port_word_load_backward_compatibility(void)
229{
230 asm volatile (" \
231 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
232 if r1 != 0 goto l0_%=; \
233 r0 = 0; \
234 exit; \
235l0_%=: call %[bpf_sk_fullsock]; \
236 if r0 != 0 goto l1_%=; \
237 r0 = 0; \
238 exit; \
239l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
240 r0 = 0; \
241 exit; \
242" :
243 : __imm(bpf_sk_fullsock),
244 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
245 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
246 : __clobber_all);
247}
248
249SEC("cgroup/skb")
250__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
251__success __success_unpriv __retval(0)
252__naked void sk_dst_port_half_load(void)
253{
254 asm volatile (" \
255 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
256 if r1 != 0 goto l0_%=; \
257 r0 = 0; \
258 exit; \
259l0_%=: call %[bpf_sk_fullsock]; \
260 if r0 != 0 goto l1_%=; \
261 r0 = 0; \
262 exit; \
263l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
264 r0 = 0; \
265 exit; \
266" :
267 : __imm(bpf_sk_fullsock),
268 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
269 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
270 : __clobber_all);
271}
272
273SEC("cgroup/skb")
274__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
275__failure __msg("invalid sock access")
276__failure_unpriv
277__naked void dst_port_half_load_invalid_1(void)
278{
279 asm volatile (" \
280 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
281 if r1 != 0 goto l0_%=; \
282 r0 = 0; \
283 exit; \
284l0_%=: call %[bpf_sk_fullsock]; \
285 if r0 != 0 goto l1_%=; \
286 r0 = 0; \
287 exit; \
288l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
289 r0 = 0; \
290 exit; \
291" :
292 : __imm(bpf_sk_fullsock),
293 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
294 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
295 : __clobber_all);
296}
297
298SEC("cgroup/skb")
299__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
300__success __success_unpriv __retval(0)
301__naked void sk_dst_port_byte_load(void)
302{
303 asm volatile (" \
304 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
305 if r1 != 0 goto l0_%=; \
306 r0 = 0; \
307 exit; \
308l0_%=: call %[bpf_sk_fullsock]; \
309 if r0 != 0 goto l1_%=; \
310 r0 = 0; \
311 exit; \
312l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
313 r2 = *(u8*)(r0 + %[__imm_0]); \
314 r0 = 0; \
315 exit; \
316" :
317 : __imm(bpf_sk_fullsock),
318 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
319 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
320 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
321 : __clobber_all);
322}
323
324SEC("cgroup/skb")
325__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
326__failure __msg("invalid sock access")
327__failure_unpriv
328__naked void dst_port_byte_load_invalid(void)
329{
330 asm volatile (" \
331 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
332 if r1 != 0 goto l0_%=; \
333 r0 = 0; \
334 exit; \
335l0_%=: call %[bpf_sk_fullsock]; \
336 if r0 != 0 goto l1_%=; \
337 r0 = 0; \
338 exit; \
339l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
340 r0 = 0; \
341 exit; \
342" :
343 : __imm(bpf_sk_fullsock),
344 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
345 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
346 : __clobber_all);
347}
348
349SEC("cgroup/skb")
350__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
351__failure __msg("invalid sock access")
352__failure_unpriv
353__naked void dst_port_half_load_invalid_2(void)
354{
355 asm volatile (" \
356 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
357 if r1 != 0 goto l0_%=; \
358 r0 = 0; \
359 exit; \
360l0_%=: call %[bpf_sk_fullsock]; \
361 if r0 != 0 goto l1_%=; \
362 r0 = 0; \
363 exit; \
364l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
365 r0 = 0; \
366 exit; \
367" :
368 : __imm(bpf_sk_fullsock),
369 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
370 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
371 : __clobber_all);
372}
373
374SEC("cgroup/skb")
375__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
376__success __success_unpriv __retval(0)
377__naked void dst_ip6_load_2nd_byte(void)
378{
379 asm volatile (" \
380 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
381 if r1 != 0 goto l0_%=; \
382 r0 = 0; \
383 exit; \
384l0_%=: call %[bpf_sk_fullsock]; \
385 if r0 != 0 goto l1_%=; \
386 r0 = 0; \
387 exit; \
388l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
389 r0 = 0; \
390 exit; \
391" :
392 : __imm(bpf_sk_fullsock),
393 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
394 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
395 : __clobber_all);
396}
397
398SEC("cgroup/skb")
399__description("sk_fullsock(skb->sk): sk->type [narrow load]")
400__success __success_unpriv __retval(0)
401__naked void sk_sk_type_narrow_load(void)
402{
403 asm volatile (" \
404 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
405 if r1 != 0 goto l0_%=; \
406 r0 = 0; \
407 exit; \
408l0_%=: call %[bpf_sk_fullsock]; \
409 if r0 != 0 goto l1_%=; \
410 r0 = 0; \
411 exit; \
412l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
413 r0 = 0; \
414 exit; \
415" :
416 : __imm(bpf_sk_fullsock),
417 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
418 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
419 : __clobber_all);
420}
421
422SEC("cgroup/skb")
423__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
424__success __success_unpriv __retval(0)
425__naked void sk_sk_protocol_narrow_load(void)
426{
427 asm volatile (" \
428 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
429 if r1 != 0 goto l0_%=; \
430 r0 = 0; \
431 exit; \
432l0_%=: call %[bpf_sk_fullsock]; \
433 if r0 != 0 goto l1_%=; \
434 r0 = 0; \
435 exit; \
436l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
437 r0 = 0; \
438 exit; \
439" :
440 : __imm(bpf_sk_fullsock),
441 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
442 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
443 : __clobber_all);
444}
445
446SEC("cgroup/skb")
447__description("sk_fullsock(skb->sk): beyond last field")
448__failure __msg("invalid sock access")
449__failure_unpriv
450__naked void skb_sk_beyond_last_field_1(void)
451{
452 asm volatile (" \
453 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
454 if r1 != 0 goto l0_%=; \
455 r0 = 0; \
456 exit; \
457l0_%=: call %[bpf_sk_fullsock]; \
458 if r0 != 0 goto l1_%=; \
459 r0 = 0; \
460 exit; \
461l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
462 r0 = 0; \
463 exit; \
464" :
465 : __imm(bpf_sk_fullsock),
466 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
467 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
468 : __clobber_all);
469}
470
471SEC("cgroup/skb")
472__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
473__failure __msg("type=sock_common_or_null expected=sock_common")
474__failure_unpriv
475__naked void sk_no_skb_sk_check_2(void)
476{
477 asm volatile (" \
478 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
479 call %[bpf_tcp_sock]; \
480 r0 = 0; \
481 exit; \
482" :
483 : __imm(bpf_tcp_sock),
484 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
485 : __clobber_all);
486}
487
488SEC("cgroup/skb")
489__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
490__failure __msg("invalid mem access 'tcp_sock_or_null'")
491__failure_unpriv
492__naked void no_null_check_on_ret_2(void)
493{
494 asm volatile (" \
495 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
496 if r1 != 0 goto l0_%=; \
497 r0 = 0; \
498 exit; \
499l0_%=: call %[bpf_tcp_sock]; \
500 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
501 r0 = 0; \
502 exit; \
503" :
504 : __imm(bpf_tcp_sock),
505 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
506 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
507 : __clobber_all);
508}
509
510SEC("cgroup/skb")
511__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
512__success __success_unpriv __retval(0)
513__naked void skb_sk_tp_snd_cwnd_1(void)
514{
515 asm volatile (" \
516 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
517 if r1 != 0 goto l0_%=; \
518 r0 = 0; \
519 exit; \
520l0_%=: call %[bpf_tcp_sock]; \
521 if r0 != 0 goto l1_%=; \
522 exit; \
523l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
524 r0 = 0; \
525 exit; \
526" :
527 : __imm(bpf_tcp_sock),
528 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
529 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
530 : __clobber_all);
531}
532
533SEC("cgroup/skb")
534__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
535__success __success_unpriv __retval(0)
536__naked void skb_sk_tp_bytes_acked(void)
537{
538 asm volatile (" \
539 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
540 if r1 != 0 goto l0_%=; \
541 r0 = 0; \
542 exit; \
543l0_%=: call %[bpf_tcp_sock]; \
544 if r0 != 0 goto l1_%=; \
545 exit; \
546l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
547 r0 = 0; \
548 exit; \
549" :
550 : __imm(bpf_tcp_sock),
551 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
552 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
553 : __clobber_all);
554}
555
556SEC("cgroup/skb")
557__description("bpf_tcp_sock(skb->sk): beyond last field")
558__failure __msg("invalid tcp_sock access")
559__failure_unpriv
560__naked void skb_sk_beyond_last_field_2(void)
561{
562 asm volatile (" \
563 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
564 if r1 != 0 goto l0_%=; \
565 r0 = 0; \
566 exit; \
567l0_%=: call %[bpf_tcp_sock]; \
568 if r0 != 0 goto l1_%=; \
569 exit; \
570l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
571 r0 = 0; \
572 exit; \
573" :
574 : __imm(bpf_tcp_sock),
575 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
576 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
577 : __clobber_all);
578}
579
580SEC("cgroup/skb")
581__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
582__success __success_unpriv __retval(0)
583__naked void skb_sk_tp_snd_cwnd_2(void)
584{
585 asm volatile (" \
586 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
587 if r1 != 0 goto l0_%=; \
588 r0 = 0; \
589 exit; \
590l0_%=: call %[bpf_sk_fullsock]; \
591 if r0 != 0 goto l1_%=; \
592 exit; \
593l1_%=: r1 = r0; \
594 call %[bpf_tcp_sock]; \
595 if r0 != 0 goto l2_%=; \
596 exit; \
597l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
598 r0 = 0; \
599 exit; \
600" :
601 : __imm(bpf_sk_fullsock),
602 __imm(bpf_tcp_sock),
603 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
604 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
605 : __clobber_all);
606}
607
608SEC("tc")
609__description("bpf_sk_release(skb->sk)")
610__failure __msg("R1 must be referenced when passed to release function")
611__naked void bpf_sk_release_skb_sk(void)
612{
613 asm volatile (" \
614 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
615 if r1 == 0 goto l0_%=; \
616 call %[bpf_sk_release]; \
617l0_%=: r0 = 0; \
618 exit; \
619" :
620 : __imm(bpf_sk_release),
621 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
622 : __clobber_all);
623}
624
625SEC("tc")
626__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
627__failure __msg("R1 must be referenced when passed to release function")
628__naked void bpf_sk_fullsock_skb_sk(void)
629{
630 asm volatile (" \
631 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
632 if r1 != 0 goto l0_%=; \
633 r0 = 0; \
634 exit; \
635l0_%=: call %[bpf_sk_fullsock]; \
636 if r0 != 0 goto l1_%=; \
637 exit; \
638l1_%=: r1 = r0; \
639 call %[bpf_sk_release]; \
640 r0 = 1; \
641 exit; \
642" :
643 : __imm(bpf_sk_fullsock),
644 __imm(bpf_sk_release),
645 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
646 : __clobber_all);
647}
648
649SEC("tc")
650__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
651__failure __msg("R1 must be referenced when passed to release function")
652__naked void bpf_tcp_sock_skb_sk(void)
653{
654 asm volatile (" \
655 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
656 if r1 != 0 goto l0_%=; \
657 r0 = 0; \
658 exit; \
659l0_%=: call %[bpf_tcp_sock]; \
660 if r0 != 0 goto l1_%=; \
661 exit; \
662l1_%=: r1 = r0; \
663 call %[bpf_sk_release]; \
664 r0 = 1; \
665 exit; \
666" :
667 : __imm(bpf_sk_release),
668 __imm(bpf_tcp_sock),
669 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
670 : __clobber_all);
671}
672
673SEC("tc")
674__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
675__success __retval(0)
676__naked void sk_null_0_value_null(void)
677{
678 asm volatile (" \
679 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
680 if r1 != 0 goto l0_%=; \
681 r0 = 0; \
682 exit; \
683l0_%=: call %[bpf_sk_fullsock]; \
684 if r0 != 0 goto l1_%=; \
685 r0 = 0; \
686 exit; \
687l1_%=: r4 = 0; \
688 r3 = 0; \
689 r2 = r0; \
690 r1 = %[sk_storage_map] ll; \
691 call %[bpf_sk_storage_get]; \
692 r0 = 0; \
693 exit; \
694" :
695 : __imm(bpf_sk_fullsock),
696 __imm(bpf_sk_storage_get),
697 __imm_addr(sk_storage_map),
698 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
699 : __clobber_all);
700}
701
702SEC("tc")
703__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
704__failure __msg("R3 type=scalar expected=fp")
705__naked void sk_1_1_value_1(void)
706{
707 asm volatile (" \
708 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
709 if r1 != 0 goto l0_%=; \
710 r0 = 0; \
711 exit; \
712l0_%=: call %[bpf_sk_fullsock]; \
713 if r0 != 0 goto l1_%=; \
714 r0 = 0; \
715 exit; \
716l1_%=: r4 = 1; \
717 r3 = 1; \
718 r2 = r0; \
719 r1 = %[sk_storage_map] ll; \
720 call %[bpf_sk_storage_get]; \
721 r0 = 0; \
722 exit; \
723" :
724 : __imm(bpf_sk_fullsock),
725 __imm(bpf_sk_storage_get),
726 __imm_addr(sk_storage_map),
727 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
728 : __clobber_all);
729}
730
731SEC("tc")
732__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
733__success __retval(0)
734__naked void stack_value_1_stack_value(void)
735{
736 asm volatile (" \
737 r2 = 0; \
738 *(u64*)(r10 - 8) = r2; \
739 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
740 if r1 != 0 goto l0_%=; \
741 r0 = 0; \
742 exit; \
743l0_%=: call %[bpf_sk_fullsock]; \
744 if r0 != 0 goto l1_%=; \
745 r0 = 0; \
746 exit; \
747l1_%=: r4 = 1; \
748 r3 = r10; \
749 r3 += -8; \
750 r2 = r0; \
751 r1 = %[sk_storage_map] ll; \
752 call %[bpf_sk_storage_get]; \
753 r0 = 0; \
754 exit; \
755" :
756 : __imm(bpf_sk_fullsock),
757 __imm(bpf_sk_storage_get),
758 __imm_addr(sk_storage_map),
759 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
760 : __clobber_all);
761}
762
763SEC("tc")
764__description("bpf_map_lookup_elem(smap, &key)")
765__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
766__naked void map_lookup_elem_smap_key(void)
767{
768 asm volatile (" \
769 r1 = 0; \
770 *(u32*)(r10 - 4) = r1; \
771 r2 = r10; \
772 r2 += -4; \
773 r1 = %[sk_storage_map] ll; \
774 call %[bpf_map_lookup_elem]; \
775 r0 = 0; \
776 exit; \
777" :
778 : __imm(bpf_map_lookup_elem),
779 __imm_addr(sk_storage_map)
780 : __clobber_all);
781}
782
783SEC("xdp")
784__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
785__success __retval(0)
786__naked void xskmap_key_xs_queue_id(void)
787{
788 asm volatile (" \
789 r1 = 0; \
790 *(u32*)(r10 - 8) = r1; \
791 r2 = r10; \
792 r2 += -8; \
793 r1 = %[map_xskmap] ll; \
794 call %[bpf_map_lookup_elem]; \
795 if r0 != 0 goto l0_%=; \
796 exit; \
797l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
798 r0 = 0; \
799 exit; \
800" :
801 : __imm(bpf_map_lookup_elem),
802 __imm_addr(map_xskmap),
803 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
804 : __clobber_all);
805}
806
807SEC("sk_skb")
808__description("bpf_map_lookup_elem(sockmap, &key)")
809__failure __msg("Unreleased reference id=2 alloc_insn=6")
810__naked void map_lookup_elem_sockmap_key(void)
811{
812 asm volatile (" \
813 r1 = 0; \
814 *(u32*)(r10 - 4) = r1; \
815 r2 = r10; \
816 r2 += -4; \
817 r1 = %[map_sockmap] ll; \
818 call %[bpf_map_lookup_elem]; \
819 r0 = 0; \
820 exit; \
821" :
822 : __imm(bpf_map_lookup_elem),
823 __imm_addr(map_sockmap)
824 : __clobber_all);
825}
826
827SEC("sk_skb")
828__description("bpf_map_lookup_elem(sockhash, &key)")
829__failure __msg("Unreleased reference id=2 alloc_insn=6")
830__naked void map_lookup_elem_sockhash_key(void)
831{
832 asm volatile (" \
833 r1 = 0; \
834 *(u32*)(r10 - 4) = r1; \
835 r2 = r10; \
836 r2 += -4; \
837 r1 = %[map_sockhash] ll; \
838 call %[bpf_map_lookup_elem]; \
839 r0 = 0; \
840 exit; \
841" :
842 : __imm(bpf_map_lookup_elem),
843 __imm_addr(map_sockhash)
844 : __clobber_all);
845}
846
847SEC("sk_skb")
848__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
849__success
850__naked void field_bpf_sk_release_sk_1(void)
851{
852 asm volatile (" \
853 r1 = 0; \
854 *(u32*)(r10 - 4) = r1; \
855 r2 = r10; \
856 r2 += -4; \
857 r1 = %[map_sockmap] ll; \
858 call %[bpf_map_lookup_elem]; \
859 if r0 != 0 goto l0_%=; \
860 exit; \
861l0_%=: r1 = r0; \
862 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
863 call %[bpf_sk_release]; \
864 exit; \
865" :
866 : __imm(bpf_map_lookup_elem),
867 __imm(bpf_sk_release),
868 __imm_addr(map_sockmap),
869 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
870 : __clobber_all);
871}
872
873SEC("sk_skb")
874__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
875__success
876__naked void field_bpf_sk_release_sk_2(void)
877{
878 asm volatile (" \
879 r1 = 0; \
880 *(u32*)(r10 - 4) = r1; \
881 r2 = r10; \
882 r2 += -4; \
883 r1 = %[map_sockhash] ll; \
884 call %[bpf_map_lookup_elem]; \
885 if r0 != 0 goto l0_%=; \
886 exit; \
887l0_%=: r1 = r0; \
888 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
889 call %[bpf_sk_release]; \
890 exit; \
891" :
892 : __imm(bpf_map_lookup_elem),
893 __imm(bpf_sk_release),
894 __imm_addr(map_sockhash),
895 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
896 : __clobber_all);
897}
898
899SEC("sk_reuseport")
900__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
901__success
902__naked void ctx_reuseport_array_key_flags(void)
903{
904 asm volatile (" \
905 r4 = 0; \
906 r2 = 0; \
907 *(u32*)(r10 - 4) = r2; \
908 r3 = r10; \
909 r3 += -4; \
910 r2 = %[map_reuseport_array] ll; \
911 call %[bpf_sk_select_reuseport]; \
912 exit; \
913" :
914 : __imm(bpf_sk_select_reuseport),
915 __imm_addr(map_reuseport_array)
916 : __clobber_all);
917}
918
919SEC("sk_reuseport")
920__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
921__success
922__naked void reuseport_ctx_sockmap_key_flags(void)
923{
924 asm volatile (" \
925 r4 = 0; \
926 r2 = 0; \
927 *(u32*)(r10 - 4) = r2; \
928 r3 = r10; \
929 r3 += -4; \
930 r2 = %[map_sockmap] ll; \
931 call %[bpf_sk_select_reuseport]; \
932 exit; \
933" :
934 : __imm(bpf_sk_select_reuseport),
935 __imm_addr(map_sockmap)
936 : __clobber_all);
937}
938
939SEC("sk_reuseport")
940__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
941__success
942__naked void reuseport_ctx_sockhash_key_flags(void)
943{
944 asm volatile (" \
945 r4 = 0; \
946 r2 = 0; \
947 *(u32*)(r10 - 4) = r2; \
948 r3 = r10; \
949 r3 += -4; \
950 r2 = %[map_sockmap] ll; \
951 call %[bpf_sk_select_reuseport]; \
952 exit; \
953" :
954 : __imm(bpf_sk_select_reuseport),
955 __imm_addr(map_sockmap)
956 : __clobber_all);
957}
958
959SEC("tc")
960__description("mark null check on return value of bpf_skc_to helpers")
961__failure __msg("invalid mem access")
962__naked void of_bpf_skc_to_helpers(void)
963{
964 asm volatile (" \
965 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
966 if r1 != 0 goto l0_%=; \
967 r0 = 0; \
968 exit; \
969l0_%=: r6 = r1; \
970 call %[bpf_skc_to_tcp_sock]; \
971 r7 = r0; \
972 r1 = r6; \
973 call %[bpf_skc_to_tcp_request_sock]; \
974 r8 = r0; \
975 if r8 != 0 goto l1_%=; \
976 r0 = 0; \
977 exit; \
978l1_%=: r0 = *(u8*)(r7 + 0); \
979 exit; \
980" :
981 : __imm(bpf_skc_to_tcp_request_sock),
982 __imm(bpf_skc_to_tcp_sock),
983 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
984 : __clobber_all);
985}
986
987SEC("cgroup/post_bind4")
988__description("sk->src_ip6[0] [load 1st byte]")
989__failure __msg("invalid bpf_context access off=28 size=2")
990__naked void post_bind4_read_src_ip6(void)
991{
992 asm volatile (" \
993 r6 = r1; \
994 r7 = *(u16*)(r6 + %[bpf_sock_src_ip6_0]); \
995 r0 = 1; \
996 exit; \
997" :
998 : __imm_const(bpf_sock_src_ip6_0, offsetof(struct bpf_sock, src_ip6[0]))
999 : __clobber_all);
1000}
1001
1002SEC("cgroup/post_bind4")
1003__description("sk->mark [load mark]")
1004__failure __msg("invalid bpf_context access off=16 size=2")
1005__naked void post_bind4_read_mark(void)
1006{
1007 asm volatile (" \
1008 r6 = r1; \
1009 r7 = *(u16*)(r6 + %[bpf_sock_mark]); \
1010 r0 = 1; \
1011 exit; \
1012" :
1013 : __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark))
1014 : __clobber_all);
1015}
1016
1017SEC("cgroup/post_bind6")
1018__description("sk->src_ip4 [load src_ip4]")
1019__failure __msg("invalid bpf_context access off=24 size=2")
1020__naked void post_bind6_read_src_ip4(void)
1021{
1022 asm volatile (" \
1023 r6 = r1; \
1024 r7 = *(u16*)(r6 + %[bpf_sock_src_ip4]); \
1025 r0 = 1; \
1026 exit; \
1027" :
1028 : __imm_const(bpf_sock_src_ip4, offsetof(struct bpf_sock, src_ip4))
1029 : __clobber_all);
1030}
1031
1032SEC("cgroup/sock_create")
1033__description("sk->src_port [word load]")
1034__failure __msg("invalid bpf_context access off=44 size=2")
1035__naked void sock_create_read_src_port(void)
1036{
1037 asm volatile (" \
1038 r6 = r1; \
1039 r7 = *(u16*)(r6 + %[bpf_sock_src_port]); \
1040 r0 = 1; \
1041 exit; \
1042" :
1043 : __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port))
1044 : __clobber_all);
1045}
1046
1047__noinline
1048long skb_pull_data2(struct __sk_buff *sk, __u32 len)
1049{
1050 return bpf_skb_pull_data(sk, len);
1051}
1052
1053__noinline
1054long skb_pull_data1(struct __sk_buff *sk, __u32 len)
1055{
1056 return skb_pull_data2(sk, len);
1057}
1058
1059/* global function calls bpf_skb_pull_data(), which invalidates packet
1060 * pointers established before global function call.
1061 */
1062SEC("tc")
1063__failure __msg("invalid mem access")
1064int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
1065{
1066 int *p = (void *)(long)sk->data;
1067
1068 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1069 return TCX_DROP;
1070 skb_pull_data1(sk, 0);
1071 *p = 42; /* this is unsafe */
1072 return TCX_PASS;
1073}
1074
1075__noinline
1076int tail_call(struct __sk_buff *sk)
1077{
1078 bpf_tail_call_static(sk, &jmp_table, 0);
1079 return 0;
1080}
1081
1082/* Tail calls invalidate packet pointers. */
1083SEC("tc")
1084__failure __msg("invalid mem access")
1085int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
1086{
1087 int *p = (void *)(long)sk->data;
1088
1089 if ((void *)(p + 1) > (void *)(long)sk->data_end)
1090 return TCX_DROP;
1091 tail_call(sk);
1092 *p = 42; /* this is unsafe */
1093 return TCX_PASS;
1094}
1095
1096char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2/* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6#include "bpf_misc.h"
7
8#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9#define offsetofend(TYPE, MEMBER) \
10 (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
11
12struct {
13 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14 __uint(max_entries, 1);
15 __type(key, __u32);
16 __type(value, __u64);
17} map_reuseport_array SEC(".maps");
18
19struct {
20 __uint(type, BPF_MAP_TYPE_SOCKHASH);
21 __uint(max_entries, 1);
22 __type(key, int);
23 __type(value, int);
24} map_sockhash SEC(".maps");
25
26struct {
27 __uint(type, BPF_MAP_TYPE_SOCKMAP);
28 __uint(max_entries, 1);
29 __type(key, int);
30 __type(value, int);
31} map_sockmap SEC(".maps");
32
33struct {
34 __uint(type, BPF_MAP_TYPE_XSKMAP);
35 __uint(max_entries, 1);
36 __type(key, int);
37 __type(value, int);
38} map_xskmap SEC(".maps");
39
40struct val {
41 int cnt;
42 struct bpf_spin_lock l;
43};
44
45struct {
46 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47 __uint(max_entries, 0);
48 __type(key, int);
49 __type(value, struct val);
50 __uint(map_flags, BPF_F_NO_PREALLOC);
51} sk_storage_map SEC(".maps");
52
53SEC("cgroup/skb")
54__description("skb->sk: no NULL check")
55__failure __msg("invalid mem access 'sock_common_or_null'")
56__failure_unpriv
57__naked void skb_sk_no_null_check(void)
58{
59 asm volatile (" \
60 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
61 r0 = *(u32*)(r1 + 0); \
62 r0 = 0; \
63 exit; \
64" :
65 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
66 : __clobber_all);
67}
68
69SEC("cgroup/skb")
70__description("skb->sk: sk->family [non fullsock field]")
71__success __success_unpriv __retval(0)
72__naked void sk_family_non_fullsock_field_1(void)
73{
74 asm volatile (" \
75 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
76 if r1 != 0 goto l0_%=; \
77 r0 = 0; \
78 exit; \
79l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
80 r0 = 0; \
81 exit; \
82" :
83 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
84 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
85 : __clobber_all);
86}
87
88SEC("cgroup/skb")
89__description("skb->sk: sk->type [fullsock field]")
90__failure __msg("invalid sock_common access")
91__failure_unpriv
92__naked void sk_sk_type_fullsock_field_1(void)
93{
94 asm volatile (" \
95 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
96 if r1 != 0 goto l0_%=; \
97 r0 = 0; \
98 exit; \
99l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
100 r0 = 0; \
101 exit; \
102" :
103 : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
104 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
105 : __clobber_all);
106}
107
108SEC("cgroup/skb")
109__description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
110__failure __msg("type=sock_common_or_null expected=sock_common")
111__failure_unpriv
112__naked void sk_no_skb_sk_check_1(void)
113{
114 asm volatile (" \
115 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
116 call %[bpf_sk_fullsock]; \
117 r0 = 0; \
118 exit; \
119" :
120 : __imm(bpf_sk_fullsock),
121 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
122 : __clobber_all);
123}
124
125SEC("cgroup/skb")
126__description("sk_fullsock(skb->sk): no NULL check on ret")
127__failure __msg("invalid mem access 'sock_or_null'")
128__failure_unpriv
129__naked void no_null_check_on_ret_1(void)
130{
131 asm volatile (" \
132 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
133 if r1 != 0 goto l0_%=; \
134 r0 = 0; \
135 exit; \
136l0_%=: call %[bpf_sk_fullsock]; \
137 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
138 r0 = 0; \
139 exit; \
140" :
141 : __imm(bpf_sk_fullsock),
142 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
143 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
144 : __clobber_all);
145}
146
147SEC("cgroup/skb")
148__description("sk_fullsock(skb->sk): sk->type [fullsock field]")
149__success __success_unpriv __retval(0)
150__naked void sk_sk_type_fullsock_field_2(void)
151{
152 asm volatile (" \
153 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
154 if r1 != 0 goto l0_%=; \
155 r0 = 0; \
156 exit; \
157l0_%=: call %[bpf_sk_fullsock]; \
158 if r0 != 0 goto l1_%=; \
159 r0 = 0; \
160 exit; \
161l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
162 r0 = 0; \
163 exit; \
164" :
165 : __imm(bpf_sk_fullsock),
166 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
168 : __clobber_all);
169}
170
171SEC("cgroup/skb")
172__description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
173__success __success_unpriv __retval(0)
174__naked void sk_family_non_fullsock_field_2(void)
175{
176 asm volatile (" \
177 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
178 if r1 != 0 goto l0_%=; \
179 r0 = 0; \
180 exit; \
181l0_%=: call %[bpf_sk_fullsock]; \
182 if r0 != 0 goto l1_%=; \
183 exit; \
184l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
185 r0 = 0; \
186 exit; \
187" :
188 : __imm(bpf_sk_fullsock),
189 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
190 __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
191 : __clobber_all);
192}
193
194SEC("cgroup/skb")
195__description("sk_fullsock(skb->sk): sk->state [narrow load]")
196__success __success_unpriv __retval(0)
197__naked void sk_sk_state_narrow_load(void)
198{
199 asm volatile (" \
200 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
201 if r1 != 0 goto l0_%=; \
202 r0 = 0; \
203 exit; \
204l0_%=: call %[bpf_sk_fullsock]; \
205 if r0 != 0 goto l1_%=; \
206 r0 = 0; \
207 exit; \
208l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
209 r0 = 0; \
210 exit; \
211" :
212 : __imm(bpf_sk_fullsock),
213 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
214 __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
215 : __clobber_all);
216}
217
218SEC("cgroup/skb")
219__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
220__success __success_unpriv __retval(0)
221__naked void port_word_load_backward_compatibility(void)
222{
223 asm volatile (" \
224 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
225 if r1 != 0 goto l0_%=; \
226 r0 = 0; \
227 exit; \
228l0_%=: call %[bpf_sk_fullsock]; \
229 if r0 != 0 goto l1_%=; \
230 r0 = 0; \
231 exit; \
232l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
233 r0 = 0; \
234 exit; \
235" :
236 : __imm(bpf_sk_fullsock),
237 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
238 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
239 : __clobber_all);
240}
241
242SEC("cgroup/skb")
243__description("sk_fullsock(skb->sk): sk->dst_port [half load]")
244__success __success_unpriv __retval(0)
245__naked void sk_dst_port_half_load(void)
246{
247 asm volatile (" \
248 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
249 if r1 != 0 goto l0_%=; \
250 r0 = 0; \
251 exit; \
252l0_%=: call %[bpf_sk_fullsock]; \
253 if r0 != 0 goto l1_%=; \
254 r0 = 0; \
255 exit; \
256l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
257 r0 = 0; \
258 exit; \
259" :
260 : __imm(bpf_sk_fullsock),
261 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
262 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
263 : __clobber_all);
264}
265
266SEC("cgroup/skb")
267__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
268__failure __msg("invalid sock access")
269__failure_unpriv
270__naked void dst_port_half_load_invalid_1(void)
271{
272 asm volatile (" \
273 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
274 if r1 != 0 goto l0_%=; \
275 r0 = 0; \
276 exit; \
277l0_%=: call %[bpf_sk_fullsock]; \
278 if r0 != 0 goto l1_%=; \
279 r0 = 0; \
280 exit; \
281l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
282 r0 = 0; \
283 exit; \
284" :
285 : __imm(bpf_sk_fullsock),
286 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
287 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
288 : __clobber_all);
289}
290
291SEC("cgroup/skb")
292__description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
293__success __success_unpriv __retval(0)
294__naked void sk_dst_port_byte_load(void)
295{
296 asm volatile (" \
297 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
298 if r1 != 0 goto l0_%=; \
299 r0 = 0; \
300 exit; \
301l0_%=: call %[bpf_sk_fullsock]; \
302 if r0 != 0 goto l1_%=; \
303 r0 = 0; \
304 exit; \
305l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
306 r2 = *(u8*)(r0 + %[__imm_0]); \
307 r0 = 0; \
308 exit; \
309" :
310 : __imm(bpf_sk_fullsock),
311 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
312 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
313 __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
314 : __clobber_all);
315}
316
317SEC("cgroup/skb")
318__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
319__failure __msg("invalid sock access")
320__failure_unpriv
321__naked void dst_port_byte_load_invalid(void)
322{
323 asm volatile (" \
324 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
325 if r1 != 0 goto l0_%=; \
326 r0 = 0; \
327 exit; \
328l0_%=: call %[bpf_sk_fullsock]; \
329 if r0 != 0 goto l1_%=; \
330 r0 = 0; \
331 exit; \
332l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
333 r0 = 0; \
334 exit; \
335" :
336 : __imm(bpf_sk_fullsock),
337 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
338 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
339 : __clobber_all);
340}
341
342SEC("cgroup/skb")
343__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
344__failure __msg("invalid sock access")
345__failure_unpriv
346__naked void dst_port_half_load_invalid_2(void)
347{
348 asm volatile (" \
349 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
350 if r1 != 0 goto l0_%=; \
351 r0 = 0; \
352 exit; \
353l0_%=: call %[bpf_sk_fullsock]; \
354 if r0 != 0 goto l1_%=; \
355 r0 = 0; \
356 exit; \
357l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
358 r0 = 0; \
359 exit; \
360" :
361 : __imm(bpf_sk_fullsock),
362 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
363 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
364 : __clobber_all);
365}
366
367SEC("cgroup/skb")
368__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
369__success __success_unpriv __retval(0)
370__naked void dst_ip6_load_2nd_byte(void)
371{
372 asm volatile (" \
373 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
374 if r1 != 0 goto l0_%=; \
375 r0 = 0; \
376 exit; \
377l0_%=: call %[bpf_sk_fullsock]; \
378 if r0 != 0 goto l1_%=; \
379 r0 = 0; \
380 exit; \
381l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
382 r0 = 0; \
383 exit; \
384" :
385 : __imm(bpf_sk_fullsock),
386 __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
387 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
388 : __clobber_all);
389}
390
391SEC("cgroup/skb")
392__description("sk_fullsock(skb->sk): sk->type [narrow load]")
393__success __success_unpriv __retval(0)
394__naked void sk_sk_type_narrow_load(void)
395{
396 asm volatile (" \
397 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
398 if r1 != 0 goto l0_%=; \
399 r0 = 0; \
400 exit; \
401l0_%=: call %[bpf_sk_fullsock]; \
402 if r0 != 0 goto l1_%=; \
403 r0 = 0; \
404 exit; \
405l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
406 r0 = 0; \
407 exit; \
408" :
409 : __imm(bpf_sk_fullsock),
410 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
411 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
412 : __clobber_all);
413}
414
415SEC("cgroup/skb")
416__description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
417__success __success_unpriv __retval(0)
418__naked void sk_sk_protocol_narrow_load(void)
419{
420 asm volatile (" \
421 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
422 if r1 != 0 goto l0_%=; \
423 r0 = 0; \
424 exit; \
425l0_%=: call %[bpf_sk_fullsock]; \
426 if r0 != 0 goto l1_%=; \
427 r0 = 0; \
428 exit; \
429l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
430 r0 = 0; \
431 exit; \
432" :
433 : __imm(bpf_sk_fullsock),
434 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
435 __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
436 : __clobber_all);
437}
438
439SEC("cgroup/skb")
440__description("sk_fullsock(skb->sk): beyond last field")
441__failure __msg("invalid sock access")
442__failure_unpriv
443__naked void skb_sk_beyond_last_field_1(void)
444{
445 asm volatile (" \
446 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
447 if r1 != 0 goto l0_%=; \
448 r0 = 0; \
449 exit; \
450l0_%=: call %[bpf_sk_fullsock]; \
451 if r0 != 0 goto l1_%=; \
452 r0 = 0; \
453 exit; \
454l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
455 r0 = 0; \
456 exit; \
457" :
458 : __imm(bpf_sk_fullsock),
459 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
460 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
461 : __clobber_all);
462}
463
464SEC("cgroup/skb")
465__description("bpf_tcp_sock(skb->sk): no !skb->sk check")
466__failure __msg("type=sock_common_or_null expected=sock_common")
467__failure_unpriv
468__naked void sk_no_skb_sk_check_2(void)
469{
470 asm volatile (" \
471 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
472 call %[bpf_tcp_sock]; \
473 r0 = 0; \
474 exit; \
475" :
476 : __imm(bpf_tcp_sock),
477 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
478 : __clobber_all);
479}
480
481SEC("cgroup/skb")
482__description("bpf_tcp_sock(skb->sk): no NULL check on ret")
483__failure __msg("invalid mem access 'tcp_sock_or_null'")
484__failure_unpriv
485__naked void no_null_check_on_ret_2(void)
486{
487 asm volatile (" \
488 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
489 if r1 != 0 goto l0_%=; \
490 r0 = 0; \
491 exit; \
492l0_%=: call %[bpf_tcp_sock]; \
493 r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
494 r0 = 0; \
495 exit; \
496" :
497 : __imm(bpf_tcp_sock),
498 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
499 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
500 : __clobber_all);
501}
502
503SEC("cgroup/skb")
504__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
505__success __success_unpriv __retval(0)
506__naked void skb_sk_tp_snd_cwnd_1(void)
507{
508 asm volatile (" \
509 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
510 if r1 != 0 goto l0_%=; \
511 r0 = 0; \
512 exit; \
513l0_%=: call %[bpf_tcp_sock]; \
514 if r0 != 0 goto l1_%=; \
515 exit; \
516l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
517 r0 = 0; \
518 exit; \
519" :
520 : __imm(bpf_tcp_sock),
521 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
522 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
523 : __clobber_all);
524}
525
526SEC("cgroup/skb")
527__description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
528__success __success_unpriv __retval(0)
529__naked void skb_sk_tp_bytes_acked(void)
530{
531 asm volatile (" \
532 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
533 if r1 != 0 goto l0_%=; \
534 r0 = 0; \
535 exit; \
536l0_%=: call %[bpf_tcp_sock]; \
537 if r0 != 0 goto l1_%=; \
538 exit; \
539l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
540 r0 = 0; \
541 exit; \
542" :
543 : __imm(bpf_tcp_sock),
544 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
545 __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
546 : __clobber_all);
547}
548
549SEC("cgroup/skb")
550__description("bpf_tcp_sock(skb->sk): beyond last field")
551__failure __msg("invalid tcp_sock access")
552__failure_unpriv
553__naked void skb_sk_beyond_last_field_2(void)
554{
555 asm volatile (" \
556 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
557 if r1 != 0 goto l0_%=; \
558 r0 = 0; \
559 exit; \
560l0_%=: call %[bpf_tcp_sock]; \
561 if r0 != 0 goto l1_%=; \
562 exit; \
563l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
564 r0 = 0; \
565 exit; \
566" :
567 : __imm(bpf_tcp_sock),
568 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
569 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
570 : __clobber_all);
571}
572
573SEC("cgroup/skb")
574__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
575__success __success_unpriv __retval(0)
576__naked void skb_sk_tp_snd_cwnd_2(void)
577{
578 asm volatile (" \
579 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
580 if r1 != 0 goto l0_%=; \
581 r0 = 0; \
582 exit; \
583l0_%=: call %[bpf_sk_fullsock]; \
584 if r0 != 0 goto l1_%=; \
585 exit; \
586l1_%=: r1 = r0; \
587 call %[bpf_tcp_sock]; \
588 if r0 != 0 goto l2_%=; \
589 exit; \
590l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
591 r0 = 0; \
592 exit; \
593" :
594 : __imm(bpf_sk_fullsock),
595 __imm(bpf_tcp_sock),
596 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
597 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
598 : __clobber_all);
599}
600
601SEC("tc")
602__description("bpf_sk_release(skb->sk)")
603__failure __msg("R1 must be referenced when passed to release function")
604__naked void bpf_sk_release_skb_sk(void)
605{
606 asm volatile (" \
607 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
608 if r1 == 0 goto l0_%=; \
609 call %[bpf_sk_release]; \
610l0_%=: r0 = 0; \
611 exit; \
612" :
613 : __imm(bpf_sk_release),
614 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
615 : __clobber_all);
616}
617
618SEC("tc")
619__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
620__failure __msg("R1 must be referenced when passed to release function")
621__naked void bpf_sk_fullsock_skb_sk(void)
622{
623 asm volatile (" \
624 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
625 if r1 != 0 goto l0_%=; \
626 r0 = 0; \
627 exit; \
628l0_%=: call %[bpf_sk_fullsock]; \
629 if r0 != 0 goto l1_%=; \
630 exit; \
631l1_%=: r1 = r0; \
632 call %[bpf_sk_release]; \
633 r0 = 1; \
634 exit; \
635" :
636 : __imm(bpf_sk_fullsock),
637 __imm(bpf_sk_release),
638 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
639 : __clobber_all);
640}
641
642SEC("tc")
643__description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
644__failure __msg("R1 must be referenced when passed to release function")
645__naked void bpf_tcp_sock_skb_sk(void)
646{
647 asm volatile (" \
648 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
649 if r1 != 0 goto l0_%=; \
650 r0 = 0; \
651 exit; \
652l0_%=: call %[bpf_tcp_sock]; \
653 if r0 != 0 goto l1_%=; \
654 exit; \
655l1_%=: r1 = r0; \
656 call %[bpf_sk_release]; \
657 r0 = 1; \
658 exit; \
659" :
660 : __imm(bpf_sk_release),
661 __imm(bpf_tcp_sock),
662 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
663 : __clobber_all);
664}
665
666SEC("tc")
667__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
668__success __retval(0)
669__naked void sk_null_0_value_null(void)
670{
671 asm volatile (" \
672 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
673 if r1 != 0 goto l0_%=; \
674 r0 = 0; \
675 exit; \
676l0_%=: call %[bpf_sk_fullsock]; \
677 if r0 != 0 goto l1_%=; \
678 r0 = 0; \
679 exit; \
680l1_%=: r4 = 0; \
681 r3 = 0; \
682 r2 = r0; \
683 r1 = %[sk_storage_map] ll; \
684 call %[bpf_sk_storage_get]; \
685 r0 = 0; \
686 exit; \
687" :
688 : __imm(bpf_sk_fullsock),
689 __imm(bpf_sk_storage_get),
690 __imm_addr(sk_storage_map),
691 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
692 : __clobber_all);
693}
694
695SEC("tc")
696__description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
697__failure __msg("R3 type=scalar expected=fp")
698__naked void sk_1_1_value_1(void)
699{
700 asm volatile (" \
701 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
702 if r1 != 0 goto l0_%=; \
703 r0 = 0; \
704 exit; \
705l0_%=: call %[bpf_sk_fullsock]; \
706 if r0 != 0 goto l1_%=; \
707 r0 = 0; \
708 exit; \
709l1_%=: r4 = 1; \
710 r3 = 1; \
711 r2 = r0; \
712 r1 = %[sk_storage_map] ll; \
713 call %[bpf_sk_storage_get]; \
714 r0 = 0; \
715 exit; \
716" :
717 : __imm(bpf_sk_fullsock),
718 __imm(bpf_sk_storage_get),
719 __imm_addr(sk_storage_map),
720 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
721 : __clobber_all);
722}
723
724SEC("tc")
725__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
726__success __retval(0)
727__naked void stack_value_1_stack_value(void)
728{
729 asm volatile (" \
730 r2 = 0; \
731 *(u64*)(r10 - 8) = r2; \
732 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
733 if r1 != 0 goto l0_%=; \
734 r0 = 0; \
735 exit; \
736l0_%=: call %[bpf_sk_fullsock]; \
737 if r0 != 0 goto l1_%=; \
738 r0 = 0; \
739 exit; \
740l1_%=: r4 = 1; \
741 r3 = r10; \
742 r3 += -8; \
743 r2 = r0; \
744 r1 = %[sk_storage_map] ll; \
745 call %[bpf_sk_storage_get]; \
746 r0 = 0; \
747 exit; \
748" :
749 : __imm(bpf_sk_fullsock),
750 __imm(bpf_sk_storage_get),
751 __imm_addr(sk_storage_map),
752 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
753 : __clobber_all);
754}
755
756SEC("tc")
757__description("bpf_map_lookup_elem(smap, &key)")
758__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
759__naked void map_lookup_elem_smap_key(void)
760{
761 asm volatile (" \
762 r1 = 0; \
763 *(u32*)(r10 - 4) = r1; \
764 r2 = r10; \
765 r2 += -4; \
766 r1 = %[sk_storage_map] ll; \
767 call %[bpf_map_lookup_elem]; \
768 r0 = 0; \
769 exit; \
770" :
771 : __imm(bpf_map_lookup_elem),
772 __imm_addr(sk_storage_map)
773 : __clobber_all);
774}
775
776SEC("xdp")
777__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
778__success __retval(0)
779__naked void xskmap_key_xs_queue_id(void)
780{
781 asm volatile (" \
782 r1 = 0; \
783 *(u32*)(r10 - 8) = r1; \
784 r2 = r10; \
785 r2 += -8; \
786 r1 = %[map_xskmap] ll; \
787 call %[bpf_map_lookup_elem]; \
788 if r0 != 0 goto l0_%=; \
789 exit; \
790l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
791 r0 = 0; \
792 exit; \
793" :
794 : __imm(bpf_map_lookup_elem),
795 __imm_addr(map_xskmap),
796 __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
797 : __clobber_all);
798}
799
800SEC("sk_skb")
801__description("bpf_map_lookup_elem(sockmap, &key)")
802__failure __msg("Unreleased reference id=2 alloc_insn=6")
803__naked void map_lookup_elem_sockmap_key(void)
804{
805 asm volatile (" \
806 r1 = 0; \
807 *(u32*)(r10 - 4) = r1; \
808 r2 = r10; \
809 r2 += -4; \
810 r1 = %[map_sockmap] ll; \
811 call %[bpf_map_lookup_elem]; \
812 r0 = 0; \
813 exit; \
814" :
815 : __imm(bpf_map_lookup_elem),
816 __imm_addr(map_sockmap)
817 : __clobber_all);
818}
819
820SEC("sk_skb")
821__description("bpf_map_lookup_elem(sockhash, &key)")
822__failure __msg("Unreleased reference id=2 alloc_insn=6")
823__naked void map_lookup_elem_sockhash_key(void)
824{
825 asm volatile (" \
826 r1 = 0; \
827 *(u32*)(r10 - 4) = r1; \
828 r2 = r10; \
829 r2 += -4; \
830 r1 = %[map_sockhash] ll; \
831 call %[bpf_map_lookup_elem]; \
832 r0 = 0; \
833 exit; \
834" :
835 : __imm(bpf_map_lookup_elem),
836 __imm_addr(map_sockhash)
837 : __clobber_all);
838}
839
840SEC("sk_skb")
841__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
842__success
843__naked void field_bpf_sk_release_sk_1(void)
844{
845 asm volatile (" \
846 r1 = 0; \
847 *(u32*)(r10 - 4) = r1; \
848 r2 = r10; \
849 r2 += -4; \
850 r1 = %[map_sockmap] ll; \
851 call %[bpf_map_lookup_elem]; \
852 if r0 != 0 goto l0_%=; \
853 exit; \
854l0_%=: r1 = r0; \
855 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
856 call %[bpf_sk_release]; \
857 exit; \
858" :
859 : __imm(bpf_map_lookup_elem),
860 __imm(bpf_sk_release),
861 __imm_addr(map_sockmap),
862 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
863 : __clobber_all);
864}
865
866SEC("sk_skb")
867__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
868__success
869__naked void field_bpf_sk_release_sk_2(void)
870{
871 asm volatile (" \
872 r1 = 0; \
873 *(u32*)(r10 - 4) = r1; \
874 r2 = r10; \
875 r2 += -4; \
876 r1 = %[map_sockhash] ll; \
877 call %[bpf_map_lookup_elem]; \
878 if r0 != 0 goto l0_%=; \
879 exit; \
880l0_%=: r1 = r0; \
881 r0 = *(u32*)(r0 + %[bpf_sock_type]); \
882 call %[bpf_sk_release]; \
883 exit; \
884" :
885 : __imm(bpf_map_lookup_elem),
886 __imm(bpf_sk_release),
887 __imm_addr(map_sockhash),
888 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
889 : __clobber_all);
890}
891
892SEC("sk_reuseport")
893__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
894__success
895__naked void ctx_reuseport_array_key_flags(void)
896{
897 asm volatile (" \
898 r4 = 0; \
899 r2 = 0; \
900 *(u32*)(r10 - 4) = r2; \
901 r3 = r10; \
902 r3 += -4; \
903 r2 = %[map_reuseport_array] ll; \
904 call %[bpf_sk_select_reuseport]; \
905 exit; \
906" :
907 : __imm(bpf_sk_select_reuseport),
908 __imm_addr(map_reuseport_array)
909 : __clobber_all);
910}
911
912SEC("sk_reuseport")
913__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
914__success
915__naked void reuseport_ctx_sockmap_key_flags(void)
916{
917 asm volatile (" \
918 r4 = 0; \
919 r2 = 0; \
920 *(u32*)(r10 - 4) = r2; \
921 r3 = r10; \
922 r3 += -4; \
923 r2 = %[map_sockmap] ll; \
924 call %[bpf_sk_select_reuseport]; \
925 exit; \
926" :
927 : __imm(bpf_sk_select_reuseport),
928 __imm_addr(map_sockmap)
929 : __clobber_all);
930}
931
932SEC("sk_reuseport")
933__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
934__success
935__naked void reuseport_ctx_sockhash_key_flags(void)
936{
937 asm volatile (" \
938 r4 = 0; \
939 r2 = 0; \
940 *(u32*)(r10 - 4) = r2; \
941 r3 = r10; \
942 r3 += -4; \
943 r2 = %[map_sockmap] ll; \
944 call %[bpf_sk_select_reuseport]; \
945 exit; \
946" :
947 : __imm(bpf_sk_select_reuseport),
948 __imm_addr(map_sockmap)
949 : __clobber_all);
950}
951
952SEC("tc")
953__description("mark null check on return value of bpf_skc_to helpers")
954__failure __msg("invalid mem access")
955__naked void of_bpf_skc_to_helpers(void)
956{
957 asm volatile (" \
958 r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
959 if r1 != 0 goto l0_%=; \
960 r0 = 0; \
961 exit; \
962l0_%=: r6 = r1; \
963 call %[bpf_skc_to_tcp_sock]; \
964 r7 = r0; \
965 r1 = r6; \
966 call %[bpf_skc_to_tcp_request_sock]; \
967 r8 = r0; \
968 if r8 != 0 goto l1_%=; \
969 r0 = 0; \
970 exit; \
971l1_%=: r0 = *(u8*)(r7 + 0); \
972 exit; \
973" :
974 : __imm(bpf_skc_to_tcp_request_sock),
975 __imm(bpf_skc_to_tcp_sock),
976 __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
977 : __clobber_all);
978}
979
980char _license[] SEC("license") = "GPL";