Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Converted from tools/testing/selftests/bpf/verifier/value.c */
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6#include "bpf_misc.h"
  7
  8#define MAX_ENTRIES 11
  9
 10struct test_val {
 11	unsigned int index;
 12	int foo[MAX_ENTRIES];
 13};
 14
 15struct {
 16	__uint(type, BPF_MAP_TYPE_HASH);
 17	__uint(max_entries, 1);
 18	__type(key, long long);
 19	__type(value, struct test_val);
 20} map_hash_48b SEC(".maps");
 21
 22SEC("socket")
 23__description("map element value store of cleared call register")
 24__failure __msg("R1 !read_ok")
 25__failure_unpriv __msg_unpriv("R1 !read_ok")
 26__naked void store_of_cleared_call_register(void)
 27{
 28	asm volatile ("					\
 29	r2 = r10;					\
 30	r2 += -8;					\
 31	r1 = 0;						\
 32	*(u64*)(r2 + 0) = r1;				\
 33	r1 = %[map_hash_48b] ll;			\
 34	call %[bpf_map_lookup_elem];			\
 35	if r0 == 0 goto l0_%=;				\
 36	*(u64*)(r0 + 0) = r1;				\
 37l0_%=:	exit;						\
 38"	:
 39	: __imm(bpf_map_lookup_elem),
 40	  __imm_addr(map_hash_48b)
 41	: __clobber_all);
 42}
 43
 44SEC("socket")
 45__description("map element value with unaligned store")
 46__success __failure_unpriv __msg_unpriv("R0 leaks addr")
 47__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
 48__naked void element_value_with_unaligned_store(void)
 49{
 50	asm volatile ("					\
 51	r2 = r10;					\
 52	r2 += -8;					\
 53	r1 = 0;						\
 54	*(u64*)(r2 + 0) = r1;				\
 55	r1 = %[map_hash_48b] ll;			\
 56	call %[bpf_map_lookup_elem];			\
 57	if r0 == 0 goto l0_%=;				\
 58	r0 += 3;					\
 59	r1 = 42;					\
 60	*(u64*)(r0 + 0) = r1;				\
 61	r1 = 43;					\
 62	*(u64*)(r0 + 2) = r1;				\
 63	r1 = 44;					\
 64	*(u64*)(r0 - 2) = r1;				\
 65	r8 = r0;					\
 66	r1 = 32;					\
 67	*(u64*)(r8 + 0) = r1;				\
 68	r1 = 33;					\
 69	*(u64*)(r8 + 2) = r1;				\
 70	r1 = 34;					\
 71	*(u64*)(r8 - 2) = r1;				\
 72	r8 += 5;					\
 73	r1 = 22;					\
 74	*(u64*)(r8 + 0) = r1;				\
 75	r1 = 23;					\
 76	*(u64*)(r8 + 4) = r1;				\
 77	r1 = 24;					\
 78	*(u64*)(r8 - 7) = r1;				\
 79	r7 = r8;					\
 80	r7 += 3;					\
 81	r1 = 22;					\
 82	*(u64*)(r7 + 0) = r1;				\
 83	r1 = 23;					\
 84	*(u64*)(r7 + 4) = r1;				\
 85	r1 = 24;					\
 86	*(u64*)(r7 - 4) = r1;				\
 87l0_%=:	exit;						\
 88"	:
 89	: __imm(bpf_map_lookup_elem),
 90	  __imm_addr(map_hash_48b)
 91	: __clobber_all);
 92}
 93
 94SEC("socket")
 95__description("map element value with unaligned load")
 96__success __failure_unpriv __msg_unpriv("R0 leaks addr")
 97__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
 98__naked void element_value_with_unaligned_load(void)
 99{
100	asm volatile ("					\
101	r2 = r10;					\
102	r2 += -8;					\
103	r1 = 0;						\
104	*(u64*)(r2 + 0) = r1;				\
105	r1 = %[map_hash_48b] ll;			\
106	call %[bpf_map_lookup_elem];			\
107	if r0 == 0 goto l0_%=;				\
108	r1 = *(u32*)(r0 + 0);				\
109	if r1 >= %[max_entries] goto l0_%=;		\
110	r0 += 3;					\
111	r7 = *(u64*)(r0 + 0);				\
112	r7 = *(u64*)(r0 + 2);				\
113	r8 = r0;					\
114	r7 = *(u64*)(r8 + 0);				\
115	r7 = *(u64*)(r8 + 2);				\
116	r0 += 5;					\
117	r7 = *(u64*)(r0 + 0);				\
118	r7 = *(u64*)(r0 + 4);				\
119l0_%=:	exit;						\
120"	:
121	: __imm(bpf_map_lookup_elem),
122	  __imm_addr(map_hash_48b),
123	  __imm_const(max_entries, MAX_ENTRIES)
124	: __clobber_all);
125}
126
127SEC("socket")
128__description("map element value is preserved across register spilling")
129__success __failure_unpriv __msg_unpriv("R0 leaks addr")
130__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
131__naked void is_preserved_across_register_spilling(void)
132{
133	asm volatile ("					\
134	r2 = r10;					\
135	r2 += -8;					\
136	r1 = 0;						\
137	*(u64*)(r2 + 0) = r1;				\
138	r1 = %[map_hash_48b] ll;			\
139	call %[bpf_map_lookup_elem];			\
140	if r0 == 0 goto l0_%=;				\
141	r0 += %[test_val_foo];				\
142	r1 = 42;					\
143	*(u64*)(r0 + 0) = r1;				\
144	r1 = r10;					\
145	r1 += -184;					\
146	*(u64*)(r1 + 0) = r0;				\
147	r3 = *(u64*)(r1 + 0);				\
148	r1 = 42;					\
149	*(u64*)(r3 + 0) = r1;				\
150l0_%=:	exit;						\
151"	:
152	: __imm(bpf_map_lookup_elem),
153	  __imm_addr(map_hash_48b),
154	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
155	: __clobber_all);
156}
157
158char _license[] SEC("license") = "GPL";
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Converted from tools/testing/selftests/bpf/verifier/value.c */
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6#include "bpf_misc.h"
  7
  8#define MAX_ENTRIES 11
  9
 10struct test_val {
 11	unsigned int index;
 12	int foo[MAX_ENTRIES];
 13};
 14
 15struct {
 16	__uint(type, BPF_MAP_TYPE_HASH);
 17	__uint(max_entries, 1);
 18	__type(key, long long);
 19	__type(value, struct test_val);
 20} map_hash_48b SEC(".maps");
 21
 22SEC("socket")
 23__description("map element value store of cleared call register")
 24__failure __msg("R1 !read_ok")
 25__failure_unpriv __msg_unpriv("R1 !read_ok")
 26__naked void store_of_cleared_call_register(void)
 27{
 28	asm volatile ("					\
 29	r2 = r10;					\
 30	r2 += -8;					\
 31	r1 = 0;						\
 32	*(u64*)(r2 + 0) = r1;				\
 33	r1 = %[map_hash_48b] ll;			\
 34	call %[bpf_map_lookup_elem];			\
 35	if r0 == 0 goto l0_%=;				\
 36	*(u64*)(r0 + 0) = r1;				\
 37l0_%=:	exit;						\
 38"	:
 39	: __imm(bpf_map_lookup_elem),
 40	  __imm_addr(map_hash_48b)
 41	: __clobber_all);
 42}
 43
 44SEC("socket")
 45__description("map element value with unaligned store")
 46__success __failure_unpriv __msg_unpriv("R0 leaks addr")
 47__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
 48__naked void element_value_with_unaligned_store(void)
 49{
 50	asm volatile ("					\
 51	r2 = r10;					\
 52	r2 += -8;					\
 53	r1 = 0;						\
 54	*(u64*)(r2 + 0) = r1;				\
 55	r1 = %[map_hash_48b] ll;			\
 56	call %[bpf_map_lookup_elem];			\
 57	if r0 == 0 goto l0_%=;				\
 58	r0 += 3;					\
 59	r1 = 42;					\
 60	*(u64*)(r0 + 0) = r1;				\
 61	r1 = 43;					\
 62	*(u64*)(r0 + 2) = r1;				\
 63	r1 = 44;					\
 64	*(u64*)(r0 - 2) = r1;				\
 65	r8 = r0;					\
 66	r1 = 32;					\
 67	*(u64*)(r8 + 0) = r1;				\
 68	r1 = 33;					\
 69	*(u64*)(r8 + 2) = r1;				\
 70	r1 = 34;					\
 71	*(u64*)(r8 - 2) = r1;				\
 72	r8 += 5;					\
 73	r1 = 22;					\
 74	*(u64*)(r8 + 0) = r1;				\
 75	r1 = 23;					\
 76	*(u64*)(r8 + 4) = r1;				\
 77	r1 = 24;					\
 78	*(u64*)(r8 - 7) = r1;				\
 79	r7 = r8;					\
 80	r7 += 3;					\
 81	r1 = 22;					\
 82	*(u64*)(r7 + 0) = r1;				\
 83	r1 = 23;					\
 84	*(u64*)(r7 + 4) = r1;				\
 85	r1 = 24;					\
 86	*(u64*)(r7 - 4) = r1;				\
 87l0_%=:	exit;						\
 88"	:
 89	: __imm(bpf_map_lookup_elem),
 90	  __imm_addr(map_hash_48b)
 91	: __clobber_all);
 92}
 93
 94SEC("socket")
 95__description("map element value with unaligned load")
 96__success __failure_unpriv __msg_unpriv("R0 leaks addr")
 97__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
 98__naked void element_value_with_unaligned_load(void)
 99{
100	asm volatile ("					\
101	r2 = r10;					\
102	r2 += -8;					\
103	r1 = 0;						\
104	*(u64*)(r2 + 0) = r1;				\
105	r1 = %[map_hash_48b] ll;			\
106	call %[bpf_map_lookup_elem];			\
107	if r0 == 0 goto l0_%=;				\
108	r1 = *(u32*)(r0 + 0);				\
109	if r1 >= %[max_entries] goto l0_%=;		\
110	r0 += 3;					\
111	r7 = *(u64*)(r0 + 0);				\
112	r7 = *(u64*)(r0 + 2);				\
113	r8 = r0;					\
114	r7 = *(u64*)(r8 + 0);				\
115	r7 = *(u64*)(r8 + 2);				\
116	r0 += 5;					\
117	r7 = *(u64*)(r0 + 0);				\
118	r7 = *(u64*)(r0 + 4);				\
119l0_%=:	exit;						\
120"	:
121	: __imm(bpf_map_lookup_elem),
122	  __imm_addr(map_hash_48b),
123	  __imm_const(max_entries, MAX_ENTRIES)
124	: __clobber_all);
125}
126
127SEC("socket")
128__description("map element value is preserved across register spilling")
129__success __failure_unpriv __msg_unpriv("R0 leaks addr")
130__retval(0) __flag(BPF_F_ANY_ALIGNMENT)
131__naked void is_preserved_across_register_spilling(void)
132{
133	asm volatile ("					\
134	r2 = r10;					\
135	r2 += -8;					\
136	r1 = 0;						\
137	*(u64*)(r2 + 0) = r1;				\
138	r1 = %[map_hash_48b] ll;			\
139	call %[bpf_map_lookup_elem];			\
140	if r0 == 0 goto l0_%=;				\
141	r0 += %[test_val_foo];				\
142	r1 = 42;					\
143	*(u64*)(r0 + 0) = r1;				\
144	r1 = r10;					\
145	r1 += -184;					\
146	*(u64*)(r1 + 0) = r0;				\
147	r3 = *(u64*)(r1 + 0);				\
148	r1 = 42;					\
149	*(u64*)(r3 + 0) = r1;				\
150l0_%=:	exit;						\
151"	:
152	: __imm(bpf_map_lookup_elem),
153	  __imm_addr(map_hash_48b),
154	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
155	: __clobber_all);
156}
157
158char _license[] SEC("license") = "GPL";