Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6#include "bpf_misc.h"
  7
  8SEC("lwt_in")
  9__description("invalid direct packet write for LWT_IN")
 10__failure __msg("cannot write into packet")
 11__naked void packet_write_for_lwt_in(void)
 12{
 13	asm volatile ("					\
 14	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 15	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 16	r0 = r2;					\
 17	r0 += 8;					\
 18	if r0 > r3 goto l0_%=;				\
 19	*(u8*)(r2 + 0) = r2;				\
 20l0_%=:	r0 = 0;						\
 21	exit;						\
 22"	:
 23	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 24	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 25	: __clobber_all);
 26}
 27
 28SEC("lwt_out")
 29__description("invalid direct packet write for LWT_OUT")
 30__failure __msg("cannot write into packet")
 31__naked void packet_write_for_lwt_out(void)
 32{
 33	asm volatile ("					\
 34	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 35	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 36	r0 = r2;					\
 37	r0 += 8;					\
 38	if r0 > r3 goto l0_%=;				\
 39	*(u8*)(r2 + 0) = r2;				\
 40l0_%=:	r0 = 0;						\
 41	exit;						\
 42"	:
 43	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 44	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 45	: __clobber_all);
 46}
 47
 48SEC("lwt_xmit")
 49__description("direct packet write for LWT_XMIT")
 50__success __retval(0)
 51__naked void packet_write_for_lwt_xmit(void)
 52{
 53	asm volatile ("					\
 54	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 55	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 56	r0 = r2;					\
 57	r0 += 8;					\
 58	if r0 > r3 goto l0_%=;				\
 59	*(u8*)(r2 + 0) = r2;				\
 60l0_%=:	r0 = 0;						\
 61	exit;						\
 62"	:
 63	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 64	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 65	: __clobber_all);
 66}
 67
 68SEC("lwt_in")
 69__description("direct packet read for LWT_IN")
 70__success __retval(0)
 71__naked void packet_read_for_lwt_in(void)
 72{
 73	asm volatile ("					\
 74	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 75	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 76	r0 = r2;					\
 77	r0 += 8;					\
 78	if r0 > r3 goto l0_%=;				\
 79	r0 = *(u8*)(r2 + 0);				\
 80l0_%=:	r0 = 0;						\
 81	exit;						\
 82"	:
 83	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 84	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 85	: __clobber_all);
 86}
 87
 88SEC("lwt_out")
 89__description("direct packet read for LWT_OUT")
 90__success __retval(0)
 91__naked void packet_read_for_lwt_out(void)
 92{
 93	asm volatile ("					\
 94	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 95	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 96	r0 = r2;					\
 97	r0 += 8;					\
 98	if r0 > r3 goto l0_%=;				\
 99	r0 = *(u8*)(r2 + 0);				\
100l0_%=:	r0 = 0;						\
101	exit;						\
102"	:
103	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
104	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
105	: __clobber_all);
106}
107
108SEC("lwt_xmit")
109__description("direct packet read for LWT_XMIT")
110__success __retval(0)
111__naked void packet_read_for_lwt_xmit(void)
112{
113	asm volatile ("					\
114	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
115	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
116	r0 = r2;					\
117	r0 += 8;					\
118	if r0 > r3 goto l0_%=;				\
119	r0 = *(u8*)(r2 + 0);				\
120l0_%=:	r0 = 0;						\
121	exit;						\
122"	:
123	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
124	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
125	: __clobber_all);
126}
127
128SEC("lwt_xmit")
129__description("overlapping checks for direct packet access")
130__success __retval(0)
131__naked void checks_for_direct_packet_access(void)
132{
133	asm volatile ("					\
134	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
135	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
136	r0 = r2;					\
137	r0 += 8;					\
138	if r0 > r3 goto l0_%=;				\
139	r1 = r2;					\
140	r1 += 6;					\
141	if r1 > r3 goto l0_%=;				\
142	r0 = *(u16*)(r2 + 6);				\
143l0_%=:	r0 = 0;						\
144	exit;						\
145"	:
146	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
147	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
148	: __clobber_all);
149}
150
151SEC("lwt_xmit")
152__description("make headroom for LWT_XMIT")
153__success __retval(0)
154__naked void make_headroom_for_lwt_xmit(void)
155{
156	asm volatile ("					\
157	r6 = r1;					\
158	r2 = 34;					\
159	r3 = 0;						\
160	call %[bpf_skb_change_head];			\
161	/* split for s390 to succeed */			\
162	r1 = r6;					\
163	r2 = 42;					\
164	r3 = 0;						\
165	call %[bpf_skb_change_head];			\
166	r0 = 0;						\
167	exit;						\
168"	:
169	: __imm(bpf_skb_change_head)
170	: __clobber_all);
171}
172
173SEC("socket")
174__description("invalid access of tc_classid for LWT_IN")
175__failure __msg("invalid bpf_context access")
176__failure_unpriv
177__naked void tc_classid_for_lwt_in(void)
178{
179	asm volatile ("					\
180	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
181	exit;						\
182"	:
183	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
184	: __clobber_all);
185}
186
187SEC("socket")
188__description("invalid access of tc_classid for LWT_OUT")
189__failure __msg("invalid bpf_context access")
190__failure_unpriv
191__naked void tc_classid_for_lwt_out(void)
192{
193	asm volatile ("					\
194	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
195	exit;						\
196"	:
197	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
198	: __clobber_all);
199}
200
201SEC("socket")
202__description("invalid access of tc_classid for LWT_XMIT")
203__failure __msg("invalid bpf_context access")
204__failure_unpriv
205__naked void tc_classid_for_lwt_xmit(void)
206{
207	asm volatile ("					\
208	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
209	exit;						\
210"	:
211	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
212	: __clobber_all);
213}
214
215SEC("lwt_in")
216__description("check skb->tc_classid half load not permitted for lwt prog")
217__failure __msg("invalid bpf_context access")
218__naked void not_permitted_for_lwt_prog(void)
219{
220	asm volatile (
221	"r0 = 0;"
222#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
223	"r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
224#else
225	"r0 = *(u16*)(r1 + %[__imm_0]);"
226#endif
227	"exit;"
228	:
229	: __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
230	  __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
231	: __clobber_all);
232}
233
234char _license[] SEC("license") = "GPL";
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6#include "bpf_misc.h"
  7
  8SEC("lwt_in")
  9__description("invalid direct packet write for LWT_IN")
 10__failure __msg("cannot write into packet")
 11__naked void packet_write_for_lwt_in(void)
 12{
 13	asm volatile ("					\
 14	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 15	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 16	r0 = r2;					\
 17	r0 += 8;					\
 18	if r0 > r3 goto l0_%=;				\
 19	*(u8*)(r2 + 0) = r2;				\
 20l0_%=:	r0 = 0;						\
 21	exit;						\
 22"	:
 23	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 24	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 25	: __clobber_all);
 26}
 27
 28SEC("lwt_out")
 29__description("invalid direct packet write for LWT_OUT")
 30__failure __msg("cannot write into packet")
 31__naked void packet_write_for_lwt_out(void)
 32{
 33	asm volatile ("					\
 34	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 35	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 36	r0 = r2;					\
 37	r0 += 8;					\
 38	if r0 > r3 goto l0_%=;				\
 39	*(u8*)(r2 + 0) = r2;				\
 40l0_%=:	r0 = 0;						\
 41	exit;						\
 42"	:
 43	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 44	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 45	: __clobber_all);
 46}
 47
 48SEC("lwt_xmit")
 49__description("direct packet write for LWT_XMIT")
 50__success __retval(0)
 51__naked void packet_write_for_lwt_xmit(void)
 52{
 53	asm volatile ("					\
 54	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 55	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 56	r0 = r2;					\
 57	r0 += 8;					\
 58	if r0 > r3 goto l0_%=;				\
 59	*(u8*)(r2 + 0) = r2;				\
 60l0_%=:	r0 = 0;						\
 61	exit;						\
 62"	:
 63	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 64	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 65	: __clobber_all);
 66}
 67
 68SEC("lwt_in")
 69__description("direct packet read for LWT_IN")
 70__success __retval(0)
 71__naked void packet_read_for_lwt_in(void)
 72{
 73	asm volatile ("					\
 74	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 75	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 76	r0 = r2;					\
 77	r0 += 8;					\
 78	if r0 > r3 goto l0_%=;				\
 79	r0 = *(u8*)(r2 + 0);				\
 80l0_%=:	r0 = 0;						\
 81	exit;						\
 82"	:
 83	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
 84	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
 85	: __clobber_all);
 86}
 87
 88SEC("lwt_out")
 89__description("direct packet read for LWT_OUT")
 90__success __retval(0)
 91__naked void packet_read_for_lwt_out(void)
 92{
 93	asm volatile ("					\
 94	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
 95	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
 96	r0 = r2;					\
 97	r0 += 8;					\
 98	if r0 > r3 goto l0_%=;				\
 99	r0 = *(u8*)(r2 + 0);				\
100l0_%=:	r0 = 0;						\
101	exit;						\
102"	:
103	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
104	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
105	: __clobber_all);
106}
107
108SEC("lwt_xmit")
109__description("direct packet read for LWT_XMIT")
110__success __retval(0)
111__naked void packet_read_for_lwt_xmit(void)
112{
113	asm volatile ("					\
114	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
115	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
116	r0 = r2;					\
117	r0 += 8;					\
118	if r0 > r3 goto l0_%=;				\
119	r0 = *(u8*)(r2 + 0);				\
120l0_%=:	r0 = 0;						\
121	exit;						\
122"	:
123	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
124	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
125	: __clobber_all);
126}
127
128SEC("lwt_xmit")
129__description("overlapping checks for direct packet access")
130__success __retval(0)
131__naked void checks_for_direct_packet_access(void)
132{
133	asm volatile ("					\
134	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
135	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
136	r0 = r2;					\
137	r0 += 8;					\
138	if r0 > r3 goto l0_%=;				\
139	r1 = r2;					\
140	r1 += 6;					\
141	if r1 > r3 goto l0_%=;				\
142	r0 = *(u16*)(r2 + 6);				\
143l0_%=:	r0 = 0;						\
144	exit;						\
145"	:
146	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
147	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
148	: __clobber_all);
149}
150
151SEC("lwt_xmit")
152__description("make headroom for LWT_XMIT")
153__success __retval(0)
154__naked void make_headroom_for_lwt_xmit(void)
155{
156	asm volatile ("					\
157	r6 = r1;					\
158	r2 = 34;					\
159	r3 = 0;						\
160	call %[bpf_skb_change_head];			\
161	/* split for s390 to succeed */			\
162	r1 = r6;					\
163	r2 = 42;					\
164	r3 = 0;						\
165	call %[bpf_skb_change_head];			\
166	r0 = 0;						\
167	exit;						\
168"	:
169	: __imm(bpf_skb_change_head)
170	: __clobber_all);
171}
172
173SEC("socket")
174__description("invalid access of tc_classid for LWT_IN")
175__failure __msg("invalid bpf_context access")
176__failure_unpriv
177__naked void tc_classid_for_lwt_in(void)
178{
179	asm volatile ("					\
180	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
181	exit;						\
182"	:
183	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
184	: __clobber_all);
185}
186
187SEC("socket")
188__description("invalid access of tc_classid for LWT_OUT")
189__failure __msg("invalid bpf_context access")
190__failure_unpriv
191__naked void tc_classid_for_lwt_out(void)
192{
193	asm volatile ("					\
194	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
195	exit;						\
196"	:
197	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
198	: __clobber_all);
199}
200
201SEC("socket")
202__description("invalid access of tc_classid for LWT_XMIT")
203__failure __msg("invalid bpf_context access")
204__failure_unpriv
205__naked void tc_classid_for_lwt_xmit(void)
206{
207	asm volatile ("					\
208	r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]);	\
209	exit;						\
210"	:
211	: __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
212	: __clobber_all);
213}
214
215SEC("lwt_in")
216__description("check skb->tc_classid half load not permitted for lwt prog")
217__failure __msg("invalid bpf_context access")
218__naked void not_permitted_for_lwt_prog(void)
219{
220	asm volatile (
221	"r0 = 0;"
222#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
223	"r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
224#else
225	"r0 = *(u16*)(r1 + %[__imm_0]);"
226#endif
227	"exit;"
228	:
229	: __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
230	  __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
231	: __clobber_all);
232}
233
234char _license[] SEC("license") = "GPL";