Loading...
1{
2 "access skb fields ok",
3 .insns = {
4 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5 offsetof(struct __sk_buff, len)),
6 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
7 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8 offsetof(struct __sk_buff, mark)),
9 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
10 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11 offsetof(struct __sk_buff, pkt_type)),
12 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
13 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14 offsetof(struct __sk_buff, queue_mapping)),
15 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
16 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
17 offsetof(struct __sk_buff, protocol)),
18 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
19 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
20 offsetof(struct __sk_buff, vlan_present)),
21 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
22 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
23 offsetof(struct __sk_buff, vlan_tci)),
24 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
25 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
26 offsetof(struct __sk_buff, napi_id)),
27 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
28 BPF_EXIT_INSN(),
29 },
30 .result = ACCEPT,
31},
32{
33 "access skb fields bad1",
34 .insns = {
35 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
36 BPF_EXIT_INSN(),
37 },
38 .errstr = "invalid bpf_context access",
39 .result = REJECT,
40},
41{
42 "access skb fields bad2",
43 .insns = {
44 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
45 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
46 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
48 BPF_LD_MAP_FD(BPF_REG_1, 0),
49 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
50 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
51 BPF_EXIT_INSN(),
52 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
53 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
54 offsetof(struct __sk_buff, pkt_type)),
55 BPF_EXIT_INSN(),
56 },
57 .fixup_map_hash_8b = { 4 },
58 .errstr = "different pointers",
59 .errstr_unpriv = "R1 pointer comparison",
60 .result = REJECT,
61},
62{
63 "access skb fields bad3",
64 .insns = {
65 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, pkt_type)),
68 BPF_EXIT_INSN(),
69 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
70 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
72 BPF_LD_MAP_FD(BPF_REG_1, 0),
73 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
74 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
75 BPF_EXIT_INSN(),
76 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
77 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
78 },
79 .fixup_map_hash_8b = { 6 },
80 .errstr = "different pointers",
81 .errstr_unpriv = "R1 pointer comparison",
82 .result = REJECT,
83},
84{
85 "access skb fields bad4",
86 .insns = {
87 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
88 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
89 offsetof(struct __sk_buff, len)),
90 BPF_MOV64_IMM(BPF_REG_0, 0),
91 BPF_EXIT_INSN(),
92 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
93 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
95 BPF_LD_MAP_FD(BPF_REG_1, 0),
96 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
97 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
98 BPF_EXIT_INSN(),
99 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
100 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
101 },
102 .fixup_map_hash_8b = { 7 },
103 .errstr = "different pointers",
104 .errstr_unpriv = "R1 pointer comparison",
105 .result = REJECT,
106},
107{
108 "invalid access __sk_buff family",
109 .insns = {
110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
111 offsetof(struct __sk_buff, family)),
112 BPF_EXIT_INSN(),
113 },
114 .errstr = "invalid bpf_context access",
115 .result = REJECT,
116},
117{
118 "invalid access __sk_buff remote_ip4",
119 .insns = {
120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
121 offsetof(struct __sk_buff, remote_ip4)),
122 BPF_EXIT_INSN(),
123 },
124 .errstr = "invalid bpf_context access",
125 .result = REJECT,
126},
127{
128 "invalid access __sk_buff local_ip4",
129 .insns = {
130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
131 offsetof(struct __sk_buff, local_ip4)),
132 BPF_EXIT_INSN(),
133 },
134 .errstr = "invalid bpf_context access",
135 .result = REJECT,
136},
137{
138 "invalid access __sk_buff remote_ip6",
139 .insns = {
140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
141 offsetof(struct __sk_buff, remote_ip6)),
142 BPF_EXIT_INSN(),
143 },
144 .errstr = "invalid bpf_context access",
145 .result = REJECT,
146},
147{
148 "invalid access __sk_buff local_ip6",
149 .insns = {
150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
151 offsetof(struct __sk_buff, local_ip6)),
152 BPF_EXIT_INSN(),
153 },
154 .errstr = "invalid bpf_context access",
155 .result = REJECT,
156},
157{
158 "invalid access __sk_buff remote_port",
159 .insns = {
160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
161 offsetof(struct __sk_buff, remote_port)),
162 BPF_EXIT_INSN(),
163 },
164 .errstr = "invalid bpf_context access",
165 .result = REJECT,
166},
167{
168 "invalid access __sk_buff remote_port",
169 .insns = {
170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
171 offsetof(struct __sk_buff, local_port)),
172 BPF_EXIT_INSN(),
173 },
174 .errstr = "invalid bpf_context access",
175 .result = REJECT,
176},
177{
178 "valid access __sk_buff family",
179 .insns = {
180 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
181 offsetof(struct __sk_buff, family)),
182 BPF_EXIT_INSN(),
183 },
184 .result = ACCEPT,
185 .prog_type = BPF_PROG_TYPE_SK_SKB,
186},
187{
188 "valid access __sk_buff remote_ip4",
189 .insns = {
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, remote_ip4)),
192 BPF_EXIT_INSN(),
193 },
194 .result = ACCEPT,
195 .prog_type = BPF_PROG_TYPE_SK_SKB,
196},
197{
198 "valid access __sk_buff local_ip4",
199 .insns = {
200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
201 offsetof(struct __sk_buff, local_ip4)),
202 BPF_EXIT_INSN(),
203 },
204 .result = ACCEPT,
205 .prog_type = BPF_PROG_TYPE_SK_SKB,
206},
207{
208 "valid access __sk_buff remote_ip6",
209 .insns = {
210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
211 offsetof(struct __sk_buff, remote_ip6[0])),
212 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
213 offsetof(struct __sk_buff, remote_ip6[1])),
214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
215 offsetof(struct __sk_buff, remote_ip6[2])),
216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
217 offsetof(struct __sk_buff, remote_ip6[3])),
218 BPF_EXIT_INSN(),
219 },
220 .result = ACCEPT,
221 .prog_type = BPF_PROG_TYPE_SK_SKB,
222},
223{
224 "valid access __sk_buff local_ip6",
225 .insns = {
226 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
227 offsetof(struct __sk_buff, local_ip6[0])),
228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
229 offsetof(struct __sk_buff, local_ip6[1])),
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
231 offsetof(struct __sk_buff, local_ip6[2])),
232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
233 offsetof(struct __sk_buff, local_ip6[3])),
234 BPF_EXIT_INSN(),
235 },
236 .result = ACCEPT,
237 .prog_type = BPF_PROG_TYPE_SK_SKB,
238},
239{
240 "valid access __sk_buff remote_port",
241 .insns = {
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, remote_port)),
244 BPF_EXIT_INSN(),
245 },
246 .result = ACCEPT,
247 .prog_type = BPF_PROG_TYPE_SK_SKB,
248},
249{
250 "valid access __sk_buff remote_port",
251 .insns = {
252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
253 offsetof(struct __sk_buff, local_port)),
254 BPF_EXIT_INSN(),
255 },
256 .result = ACCEPT,
257 .prog_type = BPF_PROG_TYPE_SK_SKB,
258},
259{
260 "invalid access of tc_classid for SK_SKB",
261 .insns = {
262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
263 offsetof(struct __sk_buff, tc_classid)),
264 BPF_EXIT_INSN(),
265 },
266 .result = REJECT,
267 .prog_type = BPF_PROG_TYPE_SK_SKB,
268 .errstr = "invalid bpf_context access",
269},
270{
271 "invalid access of skb->mark for SK_SKB",
272 .insns = {
273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
274 offsetof(struct __sk_buff, mark)),
275 BPF_EXIT_INSN(),
276 },
277 .result = REJECT,
278 .prog_type = BPF_PROG_TYPE_SK_SKB,
279 .errstr = "invalid bpf_context access",
280},
281{
282 "check skb->mark is not writeable by SK_SKB",
283 .insns = {
284 BPF_MOV64_IMM(BPF_REG_0, 0),
285 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
286 offsetof(struct __sk_buff, mark)),
287 BPF_EXIT_INSN(),
288 },
289 .result = REJECT,
290 .prog_type = BPF_PROG_TYPE_SK_SKB,
291 .errstr = "invalid bpf_context access",
292},
293{
294 "check skb->tc_index is writeable by SK_SKB",
295 .insns = {
296 BPF_MOV64_IMM(BPF_REG_0, 0),
297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
298 offsetof(struct __sk_buff, tc_index)),
299 BPF_EXIT_INSN(),
300 },
301 .result = ACCEPT,
302 .prog_type = BPF_PROG_TYPE_SK_SKB,
303},
304{
305 "check skb->priority is writeable by SK_SKB",
306 .insns = {
307 BPF_MOV64_IMM(BPF_REG_0, 0),
308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
309 offsetof(struct __sk_buff, priority)),
310 BPF_EXIT_INSN(),
311 },
312 .result = ACCEPT,
313 .prog_type = BPF_PROG_TYPE_SK_SKB,
314},
315{
316 "direct packet read for SK_SKB",
317 .insns = {
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
319 offsetof(struct __sk_buff, data)),
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
321 offsetof(struct __sk_buff, data_end)),
322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
324 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
327 BPF_EXIT_INSN(),
328 },
329 .result = ACCEPT,
330 .prog_type = BPF_PROG_TYPE_SK_SKB,
331},
332{
333 "direct packet write for SK_SKB",
334 .insns = {
335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
336 offsetof(struct __sk_buff, data)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
338 offsetof(struct __sk_buff, data_end)),
339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
341 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
342 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
343 BPF_MOV64_IMM(BPF_REG_0, 0),
344 BPF_EXIT_INSN(),
345 },
346 .result = ACCEPT,
347 .prog_type = BPF_PROG_TYPE_SK_SKB,
348},
349{
350 "overlapping checks for direct packet access SK_SKB",
351 .insns = {
352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
353 offsetof(struct __sk_buff, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
355 offsetof(struct __sk_buff, data_end)),
356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
362 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
363 BPF_MOV64_IMM(BPF_REG_0, 0),
364 BPF_EXIT_INSN(),
365 },
366 .result = ACCEPT,
367 .prog_type = BPF_PROG_TYPE_SK_SKB,
368},
369{
370 "check skb->mark is not writeable by sockets",
371 .insns = {
372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
373 offsetof(struct __sk_buff, mark)),
374 BPF_EXIT_INSN(),
375 },
376 .errstr = "invalid bpf_context access",
377 .errstr_unpriv = "R1 leaks addr",
378 .result = REJECT,
379},
380{
381 "check skb->tc_index is not writeable by sockets",
382 .insns = {
383 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
384 offsetof(struct __sk_buff, tc_index)),
385 BPF_EXIT_INSN(),
386 },
387 .errstr = "invalid bpf_context access",
388 .errstr_unpriv = "R1 leaks addr",
389 .result = REJECT,
390},
391{
392 "check cb access: byte",
393 .insns = {
394 BPF_MOV64_IMM(BPF_REG_0, 0),
395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
396 offsetof(struct __sk_buff, cb[0])),
397 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
398 offsetof(struct __sk_buff, cb[0]) + 1),
399 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
400 offsetof(struct __sk_buff, cb[0]) + 2),
401 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
402 offsetof(struct __sk_buff, cb[0]) + 3),
403 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
404 offsetof(struct __sk_buff, cb[1])),
405 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
406 offsetof(struct __sk_buff, cb[1]) + 1),
407 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
408 offsetof(struct __sk_buff, cb[1]) + 2),
409 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
410 offsetof(struct __sk_buff, cb[1]) + 3),
411 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
412 offsetof(struct __sk_buff, cb[2])),
413 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
414 offsetof(struct __sk_buff, cb[2]) + 1),
415 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
416 offsetof(struct __sk_buff, cb[2]) + 2),
417 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
418 offsetof(struct __sk_buff, cb[2]) + 3),
419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
420 offsetof(struct __sk_buff, cb[3])),
421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
422 offsetof(struct __sk_buff, cb[3]) + 1),
423 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
424 offsetof(struct __sk_buff, cb[3]) + 2),
425 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
426 offsetof(struct __sk_buff, cb[3]) + 3),
427 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
428 offsetof(struct __sk_buff, cb[4])),
429 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
430 offsetof(struct __sk_buff, cb[4]) + 1),
431 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
432 offsetof(struct __sk_buff, cb[4]) + 2),
433 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
434 offsetof(struct __sk_buff, cb[4]) + 3),
435 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
436 offsetof(struct __sk_buff, cb[0])),
437 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
438 offsetof(struct __sk_buff, cb[0]) + 1),
439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
440 offsetof(struct __sk_buff, cb[0]) + 2),
441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
442 offsetof(struct __sk_buff, cb[0]) + 3),
443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
444 offsetof(struct __sk_buff, cb[1])),
445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
446 offsetof(struct __sk_buff, cb[1]) + 1),
447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
448 offsetof(struct __sk_buff, cb[1]) + 2),
449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
450 offsetof(struct __sk_buff, cb[1]) + 3),
451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, cb[2])),
453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
454 offsetof(struct __sk_buff, cb[2]) + 1),
455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
456 offsetof(struct __sk_buff, cb[2]) + 2),
457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
458 offsetof(struct __sk_buff, cb[2]) + 3),
459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
460 offsetof(struct __sk_buff, cb[3])),
461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
462 offsetof(struct __sk_buff, cb[3]) + 1),
463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
464 offsetof(struct __sk_buff, cb[3]) + 2),
465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
466 offsetof(struct __sk_buff, cb[3]) + 3),
467 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, cb[4])),
469 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
470 offsetof(struct __sk_buff, cb[4]) + 1),
471 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
472 offsetof(struct __sk_buff, cb[4]) + 2),
473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
474 offsetof(struct __sk_buff, cb[4]) + 3),
475 BPF_EXIT_INSN(),
476 },
477 .result = ACCEPT,
478},
479{
480 "__sk_buff->hash, offset 0, byte store not permitted",
481 .insns = {
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
484 offsetof(struct __sk_buff, hash)),
485 BPF_EXIT_INSN(),
486 },
487 .errstr = "invalid bpf_context access",
488 .result = REJECT,
489},
490{
491 "__sk_buff->tc_index, offset 3, byte store not permitted",
492 .insns = {
493 BPF_MOV64_IMM(BPF_REG_0, 0),
494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
495 offsetof(struct __sk_buff, tc_index) + 3),
496 BPF_EXIT_INSN(),
497 },
498 .errstr = "invalid bpf_context access",
499 .result = REJECT,
500},
501{
502 "check skb->hash byte load permitted",
503 .insns = {
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505#if __BYTE_ORDER == __LITTLE_ENDIAN
506 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
507 offsetof(struct __sk_buff, hash)),
508#else
509 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
510 offsetof(struct __sk_buff, hash) + 3),
511#endif
512 BPF_EXIT_INSN(),
513 },
514 .result = ACCEPT,
515},
516{
517 "check skb->hash byte load permitted 1",
518 .insns = {
519 BPF_MOV64_IMM(BPF_REG_0, 0),
520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
521 offsetof(struct __sk_buff, hash) + 1),
522 BPF_EXIT_INSN(),
523 },
524 .result = ACCEPT,
525},
526{
527 "check skb->hash byte load permitted 2",
528 .insns = {
529 BPF_MOV64_IMM(BPF_REG_0, 0),
530 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
531 offsetof(struct __sk_buff, hash) + 2),
532 BPF_EXIT_INSN(),
533 },
534 .result = ACCEPT,
535},
536{
537 "check skb->hash byte load permitted 3",
538 .insns = {
539 BPF_MOV64_IMM(BPF_REG_0, 0),
540#if __BYTE_ORDER == __LITTLE_ENDIAN
541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
542 offsetof(struct __sk_buff, hash) + 3),
543#else
544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, hash)),
546#endif
547 BPF_EXIT_INSN(),
548 },
549 .result = ACCEPT,
550},
551{
552 "check cb access: byte, wrong type",
553 .insns = {
554 BPF_MOV64_IMM(BPF_REG_0, 0),
555 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
556 offsetof(struct __sk_buff, cb[0])),
557 BPF_EXIT_INSN(),
558 },
559 .errstr = "invalid bpf_context access",
560 .result = REJECT,
561 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
562},
563{
564 "check cb access: half",
565 .insns = {
566 BPF_MOV64_IMM(BPF_REG_0, 0),
567 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
568 offsetof(struct __sk_buff, cb[0])),
569 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
570 offsetof(struct __sk_buff, cb[0]) + 2),
571 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
572 offsetof(struct __sk_buff, cb[1])),
573 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
574 offsetof(struct __sk_buff, cb[1]) + 2),
575 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
576 offsetof(struct __sk_buff, cb[2])),
577 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
578 offsetof(struct __sk_buff, cb[2]) + 2),
579 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
580 offsetof(struct __sk_buff, cb[3])),
581 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
582 offsetof(struct __sk_buff, cb[3]) + 2),
583 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
584 offsetof(struct __sk_buff, cb[4])),
585 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
586 offsetof(struct __sk_buff, cb[4]) + 2),
587 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
588 offsetof(struct __sk_buff, cb[0])),
589 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
590 offsetof(struct __sk_buff, cb[0]) + 2),
591 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
592 offsetof(struct __sk_buff, cb[1])),
593 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
594 offsetof(struct __sk_buff, cb[1]) + 2),
595 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
596 offsetof(struct __sk_buff, cb[2])),
597 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
598 offsetof(struct __sk_buff, cb[2]) + 2),
599 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
600 offsetof(struct __sk_buff, cb[3])),
601 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
602 offsetof(struct __sk_buff, cb[3]) + 2),
603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
604 offsetof(struct __sk_buff, cb[4])),
605 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
606 offsetof(struct __sk_buff, cb[4]) + 2),
607 BPF_EXIT_INSN(),
608 },
609 .result = ACCEPT,
610},
611{
612 "check cb access: half, unaligned",
613 .insns = {
614 BPF_MOV64_IMM(BPF_REG_0, 0),
615 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
616 offsetof(struct __sk_buff, cb[0]) + 1),
617 BPF_EXIT_INSN(),
618 },
619 .errstr = "misaligned context access",
620 .result = REJECT,
621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
622},
623{
624 "check __sk_buff->hash, offset 0, half store not permitted",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 0),
627 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
628 offsetof(struct __sk_buff, hash)),
629 BPF_EXIT_INSN(),
630 },
631 .errstr = "invalid bpf_context access",
632 .result = REJECT,
633},
634{
635 "check __sk_buff->tc_index, offset 2, half store not permitted",
636 .insns = {
637 BPF_MOV64_IMM(BPF_REG_0, 0),
638 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
639 offsetof(struct __sk_buff, tc_index) + 2),
640 BPF_EXIT_INSN(),
641 },
642 .errstr = "invalid bpf_context access",
643 .result = REJECT,
644},
645{
646 "check skb->hash half load permitted",
647 .insns = {
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649#if __BYTE_ORDER == __LITTLE_ENDIAN
650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, hash)),
652#else
653 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, hash) + 2),
655#endif
656 BPF_EXIT_INSN(),
657 },
658 .result = ACCEPT,
659},
660{
661 "check skb->hash half load permitted 2",
662 .insns = {
663 BPF_MOV64_IMM(BPF_REG_0, 0),
664#if __BYTE_ORDER == __LITTLE_ENDIAN
665 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, hash) + 2),
667#else
668 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, hash)),
670#endif
671 BPF_EXIT_INSN(),
672 },
673 .result = ACCEPT,
674},
675{
676 "check skb->hash half load not permitted, unaligned 1",
677 .insns = {
678 BPF_MOV64_IMM(BPF_REG_0, 0),
679#if __BYTE_ORDER == __LITTLE_ENDIAN
680 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, hash) + 1),
682#else
683 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, hash) + 3),
685#endif
686 BPF_EXIT_INSN(),
687 },
688 .errstr = "invalid bpf_context access",
689 .result = REJECT,
690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
691},
692{
693 "check skb->hash half load not permitted, unaligned 3",
694 .insns = {
695 BPF_MOV64_IMM(BPF_REG_0, 0),
696#if __BYTE_ORDER == __LITTLE_ENDIAN
697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
698 offsetof(struct __sk_buff, hash) + 3),
699#else
700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
701 offsetof(struct __sk_buff, hash) + 1),
702#endif
703 BPF_EXIT_INSN(),
704 },
705 .errstr = "invalid bpf_context access",
706 .result = REJECT,
707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
708},
709{
710 "check cb access: half, wrong type",
711 .insns = {
712 BPF_MOV64_IMM(BPF_REG_0, 0),
713 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
714 offsetof(struct __sk_buff, cb[0])),
715 BPF_EXIT_INSN(),
716 },
717 .errstr = "invalid bpf_context access",
718 .result = REJECT,
719 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
720},
721{
722 "check cb access: word",
723 .insns = {
724 BPF_MOV64_IMM(BPF_REG_0, 0),
725 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
726 offsetof(struct __sk_buff, cb[0])),
727 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
728 offsetof(struct __sk_buff, cb[1])),
729 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
730 offsetof(struct __sk_buff, cb[2])),
731 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
732 offsetof(struct __sk_buff, cb[3])),
733 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
734 offsetof(struct __sk_buff, cb[4])),
735 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
736 offsetof(struct __sk_buff, cb[0])),
737 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
738 offsetof(struct __sk_buff, cb[1])),
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
740 offsetof(struct __sk_buff, cb[2])),
741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
742 offsetof(struct __sk_buff, cb[3])),
743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
744 offsetof(struct __sk_buff, cb[4])),
745 BPF_EXIT_INSN(),
746 },
747 .result = ACCEPT,
748},
749{
750 "check cb access: word, unaligned 1",
751 .insns = {
752 BPF_MOV64_IMM(BPF_REG_0, 0),
753 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
754 offsetof(struct __sk_buff, cb[0]) + 2),
755 BPF_EXIT_INSN(),
756 },
757 .errstr = "misaligned context access",
758 .result = REJECT,
759 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
760},
761{
762 "check cb access: word, unaligned 2",
763 .insns = {
764 BPF_MOV64_IMM(BPF_REG_0, 0),
765 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
766 offsetof(struct __sk_buff, cb[4]) + 1),
767 BPF_EXIT_INSN(),
768 },
769 .errstr = "misaligned context access",
770 .result = REJECT,
771 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
772},
773{
774 "check cb access: word, unaligned 3",
775 .insns = {
776 BPF_MOV64_IMM(BPF_REG_0, 0),
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
778 offsetof(struct __sk_buff, cb[4]) + 2),
779 BPF_EXIT_INSN(),
780 },
781 .errstr = "misaligned context access",
782 .result = REJECT,
783 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
784},
785{
786 "check cb access: word, unaligned 4",
787 .insns = {
788 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
790 offsetof(struct __sk_buff, cb[4]) + 3),
791 BPF_EXIT_INSN(),
792 },
793 .errstr = "misaligned context access",
794 .result = REJECT,
795 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
796},
797{
798 "check cb access: double",
799 .insns = {
800 BPF_MOV64_IMM(BPF_REG_0, 0),
801 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
802 offsetof(struct __sk_buff, cb[0])),
803 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
804 offsetof(struct __sk_buff, cb[2])),
805 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
806 offsetof(struct __sk_buff, cb[0])),
807 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
808 offsetof(struct __sk_buff, cb[2])),
809 BPF_EXIT_INSN(),
810 },
811 .result = ACCEPT,
812},
813{
814 "check cb access: double, unaligned 1",
815 .insns = {
816 BPF_MOV64_IMM(BPF_REG_0, 0),
817 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
818 offsetof(struct __sk_buff, cb[1])),
819 BPF_EXIT_INSN(),
820 },
821 .errstr = "misaligned context access",
822 .result = REJECT,
823 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
824},
825{
826 "check cb access: double, unaligned 2",
827 .insns = {
828 BPF_MOV64_IMM(BPF_REG_0, 0),
829 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
830 offsetof(struct __sk_buff, cb[3])),
831 BPF_EXIT_INSN(),
832 },
833 .errstr = "misaligned context access",
834 .result = REJECT,
835 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
836},
837{
838 "check cb access: double, oob 1",
839 .insns = {
840 BPF_MOV64_IMM(BPF_REG_0, 0),
841 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
842 offsetof(struct __sk_buff, cb[4])),
843 BPF_EXIT_INSN(),
844 },
845 .errstr = "invalid bpf_context access",
846 .result = REJECT,
847},
848{
849 "check cb access: double, oob 2",
850 .insns = {
851 BPF_MOV64_IMM(BPF_REG_0, 0),
852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
853 offsetof(struct __sk_buff, cb[4])),
854 BPF_EXIT_INSN(),
855 },
856 .errstr = "invalid bpf_context access",
857 .result = REJECT,
858},
859{
860 "check __sk_buff->ifindex dw store not permitted",
861 .insns = {
862 BPF_MOV64_IMM(BPF_REG_0, 0),
863 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
864 offsetof(struct __sk_buff, ifindex)),
865 BPF_EXIT_INSN(),
866 },
867 .errstr = "invalid bpf_context access",
868 .result = REJECT,
869},
870{
871 "check __sk_buff->ifindex dw load not permitted",
872 .insns = {
873 BPF_MOV64_IMM(BPF_REG_0, 0),
874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, ifindex)),
876 BPF_EXIT_INSN(),
877 },
878 .errstr = "invalid bpf_context access",
879 .result = REJECT,
880},
881{
882 "check cb access: double, wrong type",
883 .insns = {
884 BPF_MOV64_IMM(BPF_REG_0, 0),
885 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[0])),
887 BPF_EXIT_INSN(),
888 },
889 .errstr = "invalid bpf_context access",
890 .result = REJECT,
891 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
892},
893{
894 "check out of range skb->cb access",
895 .insns = {
896 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
897 offsetof(struct __sk_buff, cb[0]) + 256),
898 BPF_EXIT_INSN(),
899 },
900 .errstr = "invalid bpf_context access",
901 .errstr_unpriv = "",
902 .result = REJECT,
903 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
904},
905{
906 "write skb fields from socket prog",
907 .insns = {
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[4])),
910 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
911 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
912 offsetof(struct __sk_buff, mark)),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, tc_index)),
915 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
916 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
917 offsetof(struct __sk_buff, cb[0])),
918 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
919 offsetof(struct __sk_buff, cb[2])),
920 BPF_EXIT_INSN(),
921 },
922 .result = ACCEPT,
923 .errstr_unpriv = "R1 leaks addr",
924 .result_unpriv = REJECT,
925},
926{
927 "write skb fields from tc_cls_act prog",
928 .insns = {
929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[0])),
931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
932 offsetof(struct __sk_buff, mark)),
933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, tc_index)),
935 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
936 offsetof(struct __sk_buff, tc_index)),
937 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
938 offsetof(struct __sk_buff, cb[3])),
939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, tstamp)),
941 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
942 offsetof(struct __sk_buff, tstamp)),
943 BPF_EXIT_INSN(),
944 },
945 .errstr_unpriv = "",
946 .result_unpriv = REJECT,
947 .result = ACCEPT,
948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
949},
950{
951 "check skb->data half load not permitted",
952 .insns = {
953 BPF_MOV64_IMM(BPF_REG_0, 0),
954#if __BYTE_ORDER == __LITTLE_ENDIAN
955 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
956 offsetof(struct __sk_buff, data)),
957#else
958 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
959 offsetof(struct __sk_buff, data) + 2),
960#endif
961 BPF_EXIT_INSN(),
962 },
963 .result = REJECT,
964 .errstr = "invalid bpf_context access",
965},
966{
967 "read gso_segs from CGROUP_SKB",
968 .insns = {
969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
970 offsetof(struct __sk_buff, gso_segs)),
971 BPF_MOV64_IMM(BPF_REG_0, 0),
972 BPF_EXIT_INSN(),
973 },
974 .result = ACCEPT,
975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
976},
977{
978 "read gso_segs from CGROUP_SKB",
979 .insns = {
980 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
981 offsetof(struct __sk_buff, gso_segs)),
982 BPF_MOV64_IMM(BPF_REG_0, 0),
983 BPF_EXIT_INSN(),
984 },
985 .result = ACCEPT,
986 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
987},
988{
989 "write gso_segs from CGROUP_SKB",
990 .insns = {
991 BPF_MOV64_IMM(BPF_REG_0, 0),
992 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
993 offsetof(struct __sk_buff, gso_segs)),
994 BPF_MOV64_IMM(BPF_REG_0, 0),
995 BPF_EXIT_INSN(),
996 },
997 .result = REJECT,
998 .result_unpriv = REJECT,
999 .errstr = "invalid bpf_context access off=164 size=4",
1000 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1001},
1002{
1003 "read gso_segs from CLS",
1004 .insns = {
1005 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1006 offsetof(struct __sk_buff, gso_segs)),
1007 BPF_MOV64_IMM(BPF_REG_0, 0),
1008 BPF_EXIT_INSN(),
1009 },
1010 .result = ACCEPT,
1011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1012},
1013{
1014 "read gso_size from CGROUP_SKB",
1015 .insns = {
1016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1017 offsetof(struct __sk_buff, gso_size)),
1018 BPF_MOV64_IMM(BPF_REG_0, 0),
1019 BPF_EXIT_INSN(),
1020 },
1021 .result = ACCEPT,
1022 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1023},
1024{
1025 "read gso_size from CGROUP_SKB",
1026 .insns = {
1027 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1028 offsetof(struct __sk_buff, gso_size)),
1029 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 BPF_EXIT_INSN(),
1031 },
1032 .result = ACCEPT,
1033 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1034},
1035{
1036 "write gso_size from CGROUP_SKB",
1037 .insns = {
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, gso_size)),
1041 BPF_MOV64_IMM(BPF_REG_0, 0),
1042 BPF_EXIT_INSN(),
1043 },
1044 .result = REJECT,
1045 .result_unpriv = REJECT,
1046 .errstr = "invalid bpf_context access off=176 size=4",
1047 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1048},
1049{
1050 "read gso_size from CLS",
1051 .insns = {
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, gso_size)),
1054 BPF_MOV64_IMM(BPF_REG_0, 0),
1055 BPF_EXIT_INSN(),
1056 },
1057 .result = ACCEPT,
1058 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1059},
1060{
1061 "check wire_len is not readable by sockets",
1062 .insns = {
1063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, wire_len)),
1065 BPF_EXIT_INSN(),
1066 },
1067 .errstr = "invalid bpf_context access",
1068 .result = REJECT,
1069},
1070{
1071 "check wire_len is readable by tc classifier",
1072 .insns = {
1073 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1074 offsetof(struct __sk_buff, wire_len)),
1075 BPF_EXIT_INSN(),
1076 },
1077 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1078 .result = ACCEPT,
1079},
1080{
1081 "check wire_len is not writable by tc classifier",
1082 .insns = {
1083 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1084 offsetof(struct __sk_buff, wire_len)),
1085 BPF_EXIT_INSN(),
1086 },
1087 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1088 .errstr = "invalid bpf_context access",
1089 .errstr_unpriv = "R1 leaks addr",
1090 .result = REJECT,
1091},
1{
2 "access skb fields ok",
3 .insns = {
4 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5 offsetof(struct __sk_buff, len)),
6 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
7 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8 offsetof(struct __sk_buff, mark)),
9 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
10 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11 offsetof(struct __sk_buff, pkt_type)),
12 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
13 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14 offsetof(struct __sk_buff, queue_mapping)),
15 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
16 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
17 offsetof(struct __sk_buff, protocol)),
18 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
19 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
20 offsetof(struct __sk_buff, vlan_present)),
21 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
22 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
23 offsetof(struct __sk_buff, vlan_tci)),
24 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
25 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
26 offsetof(struct __sk_buff, napi_id)),
27 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
28 BPF_EXIT_INSN(),
29 },
30 .result = ACCEPT,
31},
32{
33 "access skb fields bad1",
34 .insns = {
35 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
36 BPF_EXIT_INSN(),
37 },
38 .errstr = "invalid bpf_context access",
39 .result = REJECT,
40},
41{
42 "access skb fields bad2",
43 .insns = {
44 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
45 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
46 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
47 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
48 BPF_LD_MAP_FD(BPF_REG_1, 0),
49 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
50 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
51 BPF_EXIT_INSN(),
52 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
53 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
54 offsetof(struct __sk_buff, pkt_type)),
55 BPF_EXIT_INSN(),
56 },
57 .fixup_map_hash_8b = { 4 },
58 .errstr = "different pointers",
59 .errstr_unpriv = "R1 pointer comparison",
60 .result = REJECT,
61},
62{
63 "access skb fields bad3",
64 .insns = {
65 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
66 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
67 offsetof(struct __sk_buff, pkt_type)),
68 BPF_EXIT_INSN(),
69 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
70 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
72 BPF_LD_MAP_FD(BPF_REG_1, 0),
73 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
74 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
75 BPF_EXIT_INSN(),
76 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
77 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
78 },
79 .fixup_map_hash_8b = { 6 },
80 .errstr = "different pointers",
81 .errstr_unpriv = "R1 pointer comparison",
82 .result = REJECT,
83},
84{
85 "access skb fields bad4",
86 .insns = {
87 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
88 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
89 offsetof(struct __sk_buff, len)),
90 BPF_MOV64_IMM(BPF_REG_0, 0),
91 BPF_EXIT_INSN(),
92 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
93 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
94 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
95 BPF_LD_MAP_FD(BPF_REG_1, 0),
96 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
97 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
98 BPF_EXIT_INSN(),
99 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
100 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
101 },
102 .fixup_map_hash_8b = { 7 },
103 .errstr = "different pointers",
104 .errstr_unpriv = "R1 pointer comparison",
105 .result = REJECT,
106},
107{
108 "invalid access __sk_buff family",
109 .insns = {
110 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
111 offsetof(struct __sk_buff, family)),
112 BPF_EXIT_INSN(),
113 },
114 .errstr = "invalid bpf_context access",
115 .result = REJECT,
116},
117{
118 "invalid access __sk_buff remote_ip4",
119 .insns = {
120 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
121 offsetof(struct __sk_buff, remote_ip4)),
122 BPF_EXIT_INSN(),
123 },
124 .errstr = "invalid bpf_context access",
125 .result = REJECT,
126},
127{
128 "invalid access __sk_buff local_ip4",
129 .insns = {
130 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
131 offsetof(struct __sk_buff, local_ip4)),
132 BPF_EXIT_INSN(),
133 },
134 .errstr = "invalid bpf_context access",
135 .result = REJECT,
136},
137{
138 "invalid access __sk_buff remote_ip6",
139 .insns = {
140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
141 offsetof(struct __sk_buff, remote_ip6)),
142 BPF_EXIT_INSN(),
143 },
144 .errstr = "invalid bpf_context access",
145 .result = REJECT,
146},
147{
148 "invalid access __sk_buff local_ip6",
149 .insns = {
150 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
151 offsetof(struct __sk_buff, local_ip6)),
152 BPF_EXIT_INSN(),
153 },
154 .errstr = "invalid bpf_context access",
155 .result = REJECT,
156},
157{
158 "invalid access __sk_buff remote_port",
159 .insns = {
160 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
161 offsetof(struct __sk_buff, remote_port)),
162 BPF_EXIT_INSN(),
163 },
164 .errstr = "invalid bpf_context access",
165 .result = REJECT,
166},
167{
168 "invalid access __sk_buff remote_port",
169 .insns = {
170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
171 offsetof(struct __sk_buff, local_port)),
172 BPF_EXIT_INSN(),
173 },
174 .errstr = "invalid bpf_context access",
175 .result = REJECT,
176},
177{
178 "valid access __sk_buff family",
179 .insns = {
180 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
181 offsetof(struct __sk_buff, family)),
182 BPF_EXIT_INSN(),
183 },
184 .result = ACCEPT,
185 .prog_type = BPF_PROG_TYPE_SK_SKB,
186},
187{
188 "valid access __sk_buff remote_ip4",
189 .insns = {
190 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
191 offsetof(struct __sk_buff, remote_ip4)),
192 BPF_EXIT_INSN(),
193 },
194 .result = ACCEPT,
195 .prog_type = BPF_PROG_TYPE_SK_SKB,
196},
197{
198 "valid access __sk_buff local_ip4",
199 .insns = {
200 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
201 offsetof(struct __sk_buff, local_ip4)),
202 BPF_EXIT_INSN(),
203 },
204 .result = ACCEPT,
205 .prog_type = BPF_PROG_TYPE_SK_SKB,
206},
207{
208 "valid access __sk_buff remote_ip6",
209 .insns = {
210 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
211 offsetof(struct __sk_buff, remote_ip6[0])),
212 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
213 offsetof(struct __sk_buff, remote_ip6[1])),
214 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
215 offsetof(struct __sk_buff, remote_ip6[2])),
216 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
217 offsetof(struct __sk_buff, remote_ip6[3])),
218 BPF_EXIT_INSN(),
219 },
220 .result = ACCEPT,
221 .prog_type = BPF_PROG_TYPE_SK_SKB,
222},
223{
224 "valid access __sk_buff local_ip6",
225 .insns = {
226 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
227 offsetof(struct __sk_buff, local_ip6[0])),
228 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
229 offsetof(struct __sk_buff, local_ip6[1])),
230 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
231 offsetof(struct __sk_buff, local_ip6[2])),
232 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
233 offsetof(struct __sk_buff, local_ip6[3])),
234 BPF_EXIT_INSN(),
235 },
236 .result = ACCEPT,
237 .prog_type = BPF_PROG_TYPE_SK_SKB,
238},
239{
240 "valid access __sk_buff remote_port",
241 .insns = {
242 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
243 offsetof(struct __sk_buff, remote_port)),
244 BPF_EXIT_INSN(),
245 },
246 .result = ACCEPT,
247 .prog_type = BPF_PROG_TYPE_SK_SKB,
248},
249{
250 "valid access __sk_buff remote_port",
251 .insns = {
252 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
253 offsetof(struct __sk_buff, local_port)),
254 BPF_EXIT_INSN(),
255 },
256 .result = ACCEPT,
257 .prog_type = BPF_PROG_TYPE_SK_SKB,
258},
259{
260 "invalid access of tc_classid for SK_SKB",
261 .insns = {
262 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
263 offsetof(struct __sk_buff, tc_classid)),
264 BPF_EXIT_INSN(),
265 },
266 .result = REJECT,
267 .prog_type = BPF_PROG_TYPE_SK_SKB,
268 .errstr = "invalid bpf_context access",
269},
270{
271 "invalid access of skb->mark for SK_SKB",
272 .insns = {
273 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
274 offsetof(struct __sk_buff, mark)),
275 BPF_EXIT_INSN(),
276 },
277 .result = REJECT,
278 .prog_type = BPF_PROG_TYPE_SK_SKB,
279 .errstr = "invalid bpf_context access",
280},
281{
282 "check skb->mark is not writeable by SK_SKB",
283 .insns = {
284 BPF_MOV64_IMM(BPF_REG_0, 0),
285 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
286 offsetof(struct __sk_buff, mark)),
287 BPF_EXIT_INSN(),
288 },
289 .result = REJECT,
290 .prog_type = BPF_PROG_TYPE_SK_SKB,
291 .errstr = "invalid bpf_context access",
292},
293{
294 "check skb->tc_index is writeable by SK_SKB",
295 .insns = {
296 BPF_MOV64_IMM(BPF_REG_0, 0),
297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
298 offsetof(struct __sk_buff, tc_index)),
299 BPF_EXIT_INSN(),
300 },
301 .result = ACCEPT,
302 .prog_type = BPF_PROG_TYPE_SK_SKB,
303},
304{
305 "check skb->priority is writeable by SK_SKB",
306 .insns = {
307 BPF_MOV64_IMM(BPF_REG_0, 0),
308 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
309 offsetof(struct __sk_buff, priority)),
310 BPF_EXIT_INSN(),
311 },
312 .result = ACCEPT,
313 .prog_type = BPF_PROG_TYPE_SK_SKB,
314},
315{
316 "direct packet read for SK_SKB",
317 .insns = {
318 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
319 offsetof(struct __sk_buff, data)),
320 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
321 offsetof(struct __sk_buff, data_end)),
322 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
323 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
324 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
326 BPF_MOV64_IMM(BPF_REG_0, 0),
327 BPF_EXIT_INSN(),
328 },
329 .result = ACCEPT,
330 .prog_type = BPF_PROG_TYPE_SK_SKB,
331},
332{
333 "direct packet write for SK_SKB",
334 .insns = {
335 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
336 offsetof(struct __sk_buff, data)),
337 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
338 offsetof(struct __sk_buff, data_end)),
339 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
341 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
342 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
343 BPF_MOV64_IMM(BPF_REG_0, 0),
344 BPF_EXIT_INSN(),
345 },
346 .result = ACCEPT,
347 .prog_type = BPF_PROG_TYPE_SK_SKB,
348},
349{
350 "overlapping checks for direct packet access SK_SKB",
351 .insns = {
352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
353 offsetof(struct __sk_buff, data)),
354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
355 offsetof(struct __sk_buff, data_end)),
356 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
358 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
359 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
361 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
362 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
363 BPF_MOV64_IMM(BPF_REG_0, 0),
364 BPF_EXIT_INSN(),
365 },
366 .result = ACCEPT,
367 .prog_type = BPF_PROG_TYPE_SK_SKB,
368},
369{
370 "check skb->mark is not writeable by sockets",
371 .insns = {
372 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
373 offsetof(struct __sk_buff, mark)),
374 BPF_EXIT_INSN(),
375 },
376 .errstr = "invalid bpf_context access",
377 .errstr_unpriv = "R1 leaks addr",
378 .result = REJECT,
379},
380{
381 "check skb->tc_index is not writeable by sockets",
382 .insns = {
383 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
384 offsetof(struct __sk_buff, tc_index)),
385 BPF_EXIT_INSN(),
386 },
387 .errstr = "invalid bpf_context access",
388 .errstr_unpriv = "R1 leaks addr",
389 .result = REJECT,
390},
391{
392 "check cb access: byte",
393 .insns = {
394 BPF_MOV64_IMM(BPF_REG_0, 0),
395 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
396 offsetof(struct __sk_buff, cb[0])),
397 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
398 offsetof(struct __sk_buff, cb[0]) + 1),
399 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
400 offsetof(struct __sk_buff, cb[0]) + 2),
401 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
402 offsetof(struct __sk_buff, cb[0]) + 3),
403 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
404 offsetof(struct __sk_buff, cb[1])),
405 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
406 offsetof(struct __sk_buff, cb[1]) + 1),
407 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
408 offsetof(struct __sk_buff, cb[1]) + 2),
409 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
410 offsetof(struct __sk_buff, cb[1]) + 3),
411 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
412 offsetof(struct __sk_buff, cb[2])),
413 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
414 offsetof(struct __sk_buff, cb[2]) + 1),
415 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
416 offsetof(struct __sk_buff, cb[2]) + 2),
417 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
418 offsetof(struct __sk_buff, cb[2]) + 3),
419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
420 offsetof(struct __sk_buff, cb[3])),
421 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
422 offsetof(struct __sk_buff, cb[3]) + 1),
423 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
424 offsetof(struct __sk_buff, cb[3]) + 2),
425 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
426 offsetof(struct __sk_buff, cb[3]) + 3),
427 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
428 offsetof(struct __sk_buff, cb[4])),
429 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
430 offsetof(struct __sk_buff, cb[4]) + 1),
431 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
432 offsetof(struct __sk_buff, cb[4]) + 2),
433 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
434 offsetof(struct __sk_buff, cb[4]) + 3),
435 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
436 offsetof(struct __sk_buff, cb[0])),
437 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
438 offsetof(struct __sk_buff, cb[0]) + 1),
439 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
440 offsetof(struct __sk_buff, cb[0]) + 2),
441 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
442 offsetof(struct __sk_buff, cb[0]) + 3),
443 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
444 offsetof(struct __sk_buff, cb[1])),
445 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
446 offsetof(struct __sk_buff, cb[1]) + 1),
447 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
448 offsetof(struct __sk_buff, cb[1]) + 2),
449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
450 offsetof(struct __sk_buff, cb[1]) + 3),
451 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
452 offsetof(struct __sk_buff, cb[2])),
453 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
454 offsetof(struct __sk_buff, cb[2]) + 1),
455 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
456 offsetof(struct __sk_buff, cb[2]) + 2),
457 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
458 offsetof(struct __sk_buff, cb[2]) + 3),
459 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
460 offsetof(struct __sk_buff, cb[3])),
461 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
462 offsetof(struct __sk_buff, cb[3]) + 1),
463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
464 offsetof(struct __sk_buff, cb[3]) + 2),
465 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
466 offsetof(struct __sk_buff, cb[3]) + 3),
467 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
468 offsetof(struct __sk_buff, cb[4])),
469 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
470 offsetof(struct __sk_buff, cb[4]) + 1),
471 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
472 offsetof(struct __sk_buff, cb[4]) + 2),
473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
474 offsetof(struct __sk_buff, cb[4]) + 3),
475 BPF_EXIT_INSN(),
476 },
477 .result = ACCEPT,
478},
479{
480 "__sk_buff->hash, offset 0, byte store not permitted",
481 .insns = {
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
484 offsetof(struct __sk_buff, hash)),
485 BPF_EXIT_INSN(),
486 },
487 .errstr = "invalid bpf_context access",
488 .result = REJECT,
489},
490{
491 "__sk_buff->tc_index, offset 3, byte store not permitted",
492 .insns = {
493 BPF_MOV64_IMM(BPF_REG_0, 0),
494 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
495 offsetof(struct __sk_buff, tc_index) + 3),
496 BPF_EXIT_INSN(),
497 },
498 .errstr = "invalid bpf_context access",
499 .result = REJECT,
500},
501{
502 "check skb->hash byte load permitted",
503 .insns = {
504 BPF_MOV64_IMM(BPF_REG_0, 0),
505#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
506 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
507 offsetof(struct __sk_buff, hash)),
508#else
509 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
510 offsetof(struct __sk_buff, hash) + 3),
511#endif
512 BPF_EXIT_INSN(),
513 },
514 .result = ACCEPT,
515},
516{
517 "check skb->hash byte load permitted 1",
518 .insns = {
519 BPF_MOV64_IMM(BPF_REG_0, 0),
520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
521 offsetof(struct __sk_buff, hash) + 1),
522 BPF_EXIT_INSN(),
523 },
524 .result = ACCEPT,
525},
526{
527 "check skb->hash byte load permitted 2",
528 .insns = {
529 BPF_MOV64_IMM(BPF_REG_0, 0),
530 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
531 offsetof(struct __sk_buff, hash) + 2),
532 BPF_EXIT_INSN(),
533 },
534 .result = ACCEPT,
535},
536{
537 "check skb->hash byte load permitted 3",
538 .insns = {
539 BPF_MOV64_IMM(BPF_REG_0, 0),
540#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
541 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
542 offsetof(struct __sk_buff, hash) + 3),
543#else
544 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
545 offsetof(struct __sk_buff, hash)),
546#endif
547 BPF_EXIT_INSN(),
548 },
549 .result = ACCEPT,
550},
551{
552 "check cb access: byte, wrong type",
553 .insns = {
554 BPF_MOV64_IMM(BPF_REG_0, 0),
555 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
556 offsetof(struct __sk_buff, cb[0])),
557 BPF_EXIT_INSN(),
558 },
559 .errstr = "invalid bpf_context access",
560 .result = REJECT,
561 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
562},
563{
564 "check cb access: half",
565 .insns = {
566 BPF_MOV64_IMM(BPF_REG_0, 0),
567 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
568 offsetof(struct __sk_buff, cb[0])),
569 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
570 offsetof(struct __sk_buff, cb[0]) + 2),
571 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
572 offsetof(struct __sk_buff, cb[1])),
573 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
574 offsetof(struct __sk_buff, cb[1]) + 2),
575 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
576 offsetof(struct __sk_buff, cb[2])),
577 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
578 offsetof(struct __sk_buff, cb[2]) + 2),
579 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
580 offsetof(struct __sk_buff, cb[3])),
581 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
582 offsetof(struct __sk_buff, cb[3]) + 2),
583 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
584 offsetof(struct __sk_buff, cb[4])),
585 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
586 offsetof(struct __sk_buff, cb[4]) + 2),
587 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
588 offsetof(struct __sk_buff, cb[0])),
589 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
590 offsetof(struct __sk_buff, cb[0]) + 2),
591 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
592 offsetof(struct __sk_buff, cb[1])),
593 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
594 offsetof(struct __sk_buff, cb[1]) + 2),
595 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
596 offsetof(struct __sk_buff, cb[2])),
597 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
598 offsetof(struct __sk_buff, cb[2]) + 2),
599 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
600 offsetof(struct __sk_buff, cb[3])),
601 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
602 offsetof(struct __sk_buff, cb[3]) + 2),
603 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
604 offsetof(struct __sk_buff, cb[4])),
605 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
606 offsetof(struct __sk_buff, cb[4]) + 2),
607 BPF_EXIT_INSN(),
608 },
609 .result = ACCEPT,
610},
611{
612 "check cb access: half, unaligned",
613 .insns = {
614 BPF_MOV64_IMM(BPF_REG_0, 0),
615 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
616 offsetof(struct __sk_buff, cb[0]) + 1),
617 BPF_EXIT_INSN(),
618 },
619 .errstr = "misaligned context access",
620 .result = REJECT,
621 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
622},
623{
624 "check __sk_buff->hash, offset 0, half store not permitted",
625 .insns = {
626 BPF_MOV64_IMM(BPF_REG_0, 0),
627 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
628 offsetof(struct __sk_buff, hash)),
629 BPF_EXIT_INSN(),
630 },
631 .errstr = "invalid bpf_context access",
632 .result = REJECT,
633},
634{
635 "check __sk_buff->tc_index, offset 2, half store not permitted",
636 .insns = {
637 BPF_MOV64_IMM(BPF_REG_0, 0),
638 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
639 offsetof(struct __sk_buff, tc_index) + 2),
640 BPF_EXIT_INSN(),
641 },
642 .errstr = "invalid bpf_context access",
643 .result = REJECT,
644},
645{
646 "check skb->hash half load permitted",
647 .insns = {
648 BPF_MOV64_IMM(BPF_REG_0, 0),
649#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
650 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
651 offsetof(struct __sk_buff, hash)),
652#else
653 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
654 offsetof(struct __sk_buff, hash) + 2),
655#endif
656 BPF_EXIT_INSN(),
657 },
658 .result = ACCEPT,
659},
660{
661 "check skb->hash half load permitted 2",
662 .insns = {
663 BPF_MOV64_IMM(BPF_REG_0, 0),
664#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
665 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
666 offsetof(struct __sk_buff, hash) + 2),
667#else
668 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
669 offsetof(struct __sk_buff, hash)),
670#endif
671 BPF_EXIT_INSN(),
672 },
673 .result = ACCEPT,
674},
675{
676 "check skb->hash half load not permitted, unaligned 1",
677 .insns = {
678 BPF_MOV64_IMM(BPF_REG_0, 0),
679#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
680 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
681 offsetof(struct __sk_buff, hash) + 1),
682#else
683 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
684 offsetof(struct __sk_buff, hash) + 3),
685#endif
686 BPF_EXIT_INSN(),
687 },
688 .errstr = "invalid bpf_context access",
689 .result = REJECT,
690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
691},
692{
693 "check skb->hash half load not permitted, unaligned 3",
694 .insns = {
695 BPF_MOV64_IMM(BPF_REG_0, 0),
696#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
697 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
698 offsetof(struct __sk_buff, hash) + 3),
699#else
700 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
701 offsetof(struct __sk_buff, hash) + 1),
702#endif
703 BPF_EXIT_INSN(),
704 },
705 .errstr = "invalid bpf_context access",
706 .result = REJECT,
707 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
708},
709{
710 "check cb access: half, wrong type",
711 .insns = {
712 BPF_MOV64_IMM(BPF_REG_0, 0),
713 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
714 offsetof(struct __sk_buff, cb[0])),
715 BPF_EXIT_INSN(),
716 },
717 .errstr = "invalid bpf_context access",
718 .result = REJECT,
719 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
720},
721{
722 "check cb access: word",
723 .insns = {
724 BPF_MOV64_IMM(BPF_REG_0, 0),
725 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
726 offsetof(struct __sk_buff, cb[0])),
727 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
728 offsetof(struct __sk_buff, cb[1])),
729 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
730 offsetof(struct __sk_buff, cb[2])),
731 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
732 offsetof(struct __sk_buff, cb[3])),
733 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
734 offsetof(struct __sk_buff, cb[4])),
735 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
736 offsetof(struct __sk_buff, cb[0])),
737 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
738 offsetof(struct __sk_buff, cb[1])),
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
740 offsetof(struct __sk_buff, cb[2])),
741 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
742 offsetof(struct __sk_buff, cb[3])),
743 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
744 offsetof(struct __sk_buff, cb[4])),
745 BPF_EXIT_INSN(),
746 },
747 .result = ACCEPT,
748},
749{
750 "check cb access: word, unaligned 1",
751 .insns = {
752 BPF_MOV64_IMM(BPF_REG_0, 0),
753 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
754 offsetof(struct __sk_buff, cb[0]) + 2),
755 BPF_EXIT_INSN(),
756 },
757 .errstr = "misaligned context access",
758 .result = REJECT,
759 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
760},
761{
762 "check cb access: word, unaligned 2",
763 .insns = {
764 BPF_MOV64_IMM(BPF_REG_0, 0),
765 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
766 offsetof(struct __sk_buff, cb[4]) + 1),
767 BPF_EXIT_INSN(),
768 },
769 .errstr = "misaligned context access",
770 .result = REJECT,
771 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
772},
773{
774 "check cb access: word, unaligned 3",
775 .insns = {
776 BPF_MOV64_IMM(BPF_REG_0, 0),
777 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
778 offsetof(struct __sk_buff, cb[4]) + 2),
779 BPF_EXIT_INSN(),
780 },
781 .errstr = "misaligned context access",
782 .result = REJECT,
783 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
784},
785{
786 "check cb access: word, unaligned 4",
787 .insns = {
788 BPF_MOV64_IMM(BPF_REG_0, 0),
789 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
790 offsetof(struct __sk_buff, cb[4]) + 3),
791 BPF_EXIT_INSN(),
792 },
793 .errstr = "misaligned context access",
794 .result = REJECT,
795 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
796},
797{
798 "check cb access: double",
799 .insns = {
800 BPF_MOV64_IMM(BPF_REG_0, 0),
801 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
802 offsetof(struct __sk_buff, cb[0])),
803 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
804 offsetof(struct __sk_buff, cb[2])),
805 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
806 offsetof(struct __sk_buff, cb[0])),
807 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
808 offsetof(struct __sk_buff, cb[2])),
809 BPF_EXIT_INSN(),
810 },
811 .result = ACCEPT,
812},
813{
814 "check cb access: double, unaligned 1",
815 .insns = {
816 BPF_MOV64_IMM(BPF_REG_0, 0),
817 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
818 offsetof(struct __sk_buff, cb[1])),
819 BPF_EXIT_INSN(),
820 },
821 .errstr = "misaligned context access",
822 .result = REJECT,
823 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
824},
825{
826 "check cb access: double, unaligned 2",
827 .insns = {
828 BPF_MOV64_IMM(BPF_REG_0, 0),
829 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
830 offsetof(struct __sk_buff, cb[3])),
831 BPF_EXIT_INSN(),
832 },
833 .errstr = "misaligned context access",
834 .result = REJECT,
835 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
836},
837{
838 "check cb access: double, oob 1",
839 .insns = {
840 BPF_MOV64_IMM(BPF_REG_0, 0),
841 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
842 offsetof(struct __sk_buff, cb[4])),
843 BPF_EXIT_INSN(),
844 },
845 .errstr = "invalid bpf_context access",
846 .result = REJECT,
847},
848{
849 "check cb access: double, oob 2",
850 .insns = {
851 BPF_MOV64_IMM(BPF_REG_0, 0),
852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
853 offsetof(struct __sk_buff, cb[4])),
854 BPF_EXIT_INSN(),
855 },
856 .errstr = "invalid bpf_context access",
857 .result = REJECT,
858},
859{
860 "check __sk_buff->ifindex dw store not permitted",
861 .insns = {
862 BPF_MOV64_IMM(BPF_REG_0, 0),
863 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
864 offsetof(struct __sk_buff, ifindex)),
865 BPF_EXIT_INSN(),
866 },
867 .errstr = "invalid bpf_context access",
868 .result = REJECT,
869},
870{
871 "check __sk_buff->ifindex dw load not permitted",
872 .insns = {
873 BPF_MOV64_IMM(BPF_REG_0, 0),
874 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, ifindex)),
876 BPF_EXIT_INSN(),
877 },
878 .errstr = "invalid bpf_context access",
879 .result = REJECT,
880},
881{
882 "check cb access: double, wrong type",
883 .insns = {
884 BPF_MOV64_IMM(BPF_REG_0, 0),
885 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
886 offsetof(struct __sk_buff, cb[0])),
887 BPF_EXIT_INSN(),
888 },
889 .errstr = "invalid bpf_context access",
890 .result = REJECT,
891 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
892},
893{
894 "check out of range skb->cb access",
895 .insns = {
896 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
897 offsetof(struct __sk_buff, cb[0]) + 256),
898 BPF_EXIT_INSN(),
899 },
900 .errstr = "invalid bpf_context access",
901 .errstr_unpriv = "",
902 .result = REJECT,
903 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
904},
905{
906 "write skb fields from socket prog",
907 .insns = {
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[4])),
910 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
911 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
912 offsetof(struct __sk_buff, mark)),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, tc_index)),
915 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
916 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
917 offsetof(struct __sk_buff, cb[0])),
918 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
919 offsetof(struct __sk_buff, cb[2])),
920 BPF_EXIT_INSN(),
921 },
922 .result = ACCEPT,
923 .errstr_unpriv = "R1 leaks addr",
924 .result_unpriv = REJECT,
925},
926{
927 "write skb fields from tc_cls_act prog",
928 .insns = {
929 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
930 offsetof(struct __sk_buff, cb[0])),
931 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
932 offsetof(struct __sk_buff, mark)),
933 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
934 offsetof(struct __sk_buff, tc_index)),
935 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
936 offsetof(struct __sk_buff, tc_index)),
937 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
938 offsetof(struct __sk_buff, cb[3])),
939 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
940 offsetof(struct __sk_buff, tstamp)),
941 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
942 offsetof(struct __sk_buff, tstamp)),
943 BPF_EXIT_INSN(),
944 },
945 .errstr_unpriv = "",
946 .result_unpriv = REJECT,
947 .result = ACCEPT,
948 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
949},
950{
951 "check skb->data half load not permitted",
952 .insns = {
953 BPF_MOV64_IMM(BPF_REG_0, 0),
954#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
955 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
956 offsetof(struct __sk_buff, data)),
957#else
958 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
959 offsetof(struct __sk_buff, data) + 2),
960#endif
961 BPF_EXIT_INSN(),
962 },
963 .result = REJECT,
964 .errstr = "invalid bpf_context access",
965},
966{
967 "read gso_segs from CGROUP_SKB",
968 .insns = {
969 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
970 offsetof(struct __sk_buff, gso_segs)),
971 BPF_MOV64_IMM(BPF_REG_0, 0),
972 BPF_EXIT_INSN(),
973 },
974 .result = ACCEPT,
975 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
976},
977{
978 "read gso_segs from CGROUP_SKB",
979 .insns = {
980 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
981 offsetof(struct __sk_buff, gso_segs)),
982 BPF_MOV64_IMM(BPF_REG_0, 0),
983 BPF_EXIT_INSN(),
984 },
985 .result = ACCEPT,
986 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
987},
988{
989 "write gso_segs from CGROUP_SKB",
990 .insns = {
991 BPF_MOV64_IMM(BPF_REG_0, 0),
992 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
993 offsetof(struct __sk_buff, gso_segs)),
994 BPF_MOV64_IMM(BPF_REG_0, 0),
995 BPF_EXIT_INSN(),
996 },
997 .result = REJECT,
998 .result_unpriv = REJECT,
999 .errstr = "invalid bpf_context access off=164 size=4",
1000 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1001},
1002{
1003 "read gso_segs from CLS",
1004 .insns = {
1005 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1006 offsetof(struct __sk_buff, gso_segs)),
1007 BPF_MOV64_IMM(BPF_REG_0, 0),
1008 BPF_EXIT_INSN(),
1009 },
1010 .result = ACCEPT,
1011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1012},
1013{
1014 "read gso_size from CGROUP_SKB",
1015 .insns = {
1016 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1017 offsetof(struct __sk_buff, gso_size)),
1018 BPF_MOV64_IMM(BPF_REG_0, 0),
1019 BPF_EXIT_INSN(),
1020 },
1021 .result = ACCEPT,
1022 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1023},
1024{
1025 "read gso_size from CGROUP_SKB",
1026 .insns = {
1027 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1028 offsetof(struct __sk_buff, gso_size)),
1029 BPF_MOV64_IMM(BPF_REG_0, 0),
1030 BPF_EXIT_INSN(),
1031 },
1032 .result = ACCEPT,
1033 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1034},
1035{
1036 "write gso_size from CGROUP_SKB",
1037 .insns = {
1038 BPF_MOV64_IMM(BPF_REG_0, 0),
1039 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1040 offsetof(struct __sk_buff, gso_size)),
1041 BPF_MOV64_IMM(BPF_REG_0, 0),
1042 BPF_EXIT_INSN(),
1043 },
1044 .result = REJECT,
1045 .result_unpriv = REJECT,
1046 .errstr = "invalid bpf_context access off=176 size=4",
1047 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1048},
1049{
1050 "read gso_size from CLS",
1051 .insns = {
1052 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1053 offsetof(struct __sk_buff, gso_size)),
1054 BPF_MOV64_IMM(BPF_REG_0, 0),
1055 BPF_EXIT_INSN(),
1056 },
1057 .result = ACCEPT,
1058 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1059},
1060{
1061 "padding after gso_size is not accessible",
1062 .insns = {
1063 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1064 offsetofend(struct __sk_buff, gso_size)),
1065 BPF_MOV64_IMM(BPF_REG_0, 0),
1066 BPF_EXIT_INSN(),
1067 },
1068 .result = REJECT,
1069 .result_unpriv = REJECT,
1070 .errstr = "invalid bpf_context access off=180 size=4",
1071 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1072},
1073{
1074 "read hwtstamp from CGROUP_SKB",
1075 .insns = {
1076 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1077 offsetof(struct __sk_buff, hwtstamp)),
1078 BPF_MOV64_IMM(BPF_REG_0, 0),
1079 BPF_EXIT_INSN(),
1080 },
1081 .result = ACCEPT,
1082 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1083},
1084{
1085 "read hwtstamp from CGROUP_SKB",
1086 .insns = {
1087 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1088 offsetof(struct __sk_buff, hwtstamp)),
1089 BPF_MOV64_IMM(BPF_REG_0, 0),
1090 BPF_EXIT_INSN(),
1091 },
1092 .result = ACCEPT,
1093 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1094},
1095{
1096 "write hwtstamp from CGROUP_SKB",
1097 .insns = {
1098 BPF_MOV64_IMM(BPF_REG_0, 0),
1099 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1100 offsetof(struct __sk_buff, hwtstamp)),
1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1102 BPF_EXIT_INSN(),
1103 },
1104 .result = REJECT,
1105 .result_unpriv = REJECT,
1106 .errstr = "invalid bpf_context access off=184 size=8",
1107 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
1108},
1109{
1110 "read hwtstamp from CLS",
1111 .insns = {
1112 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1113 offsetof(struct __sk_buff, hwtstamp)),
1114 BPF_MOV64_IMM(BPF_REG_0, 0),
1115 BPF_EXIT_INSN(),
1116 },
1117 .result = ACCEPT,
1118 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1119},
1120{
1121 "check wire_len is not readable by sockets",
1122 .insns = {
1123 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1124 offsetof(struct __sk_buff, wire_len)),
1125 BPF_EXIT_INSN(),
1126 },
1127 .errstr = "invalid bpf_context access",
1128 .result = REJECT,
1129},
1130{
1131 "check wire_len is readable by tc classifier",
1132 .insns = {
1133 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1134 offsetof(struct __sk_buff, wire_len)),
1135 BPF_EXIT_INSN(),
1136 },
1137 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1138 .result = ACCEPT,
1139},
1140{
1141 "check wire_len is not writable by tc classifier",
1142 .insns = {
1143 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1144 offsetof(struct __sk_buff, wire_len)),
1145 BPF_EXIT_INSN(),
1146 },
1147 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1148 .errstr = "invalid bpf_context access",
1149 .errstr_unpriv = "R1 leaks addr",
1150 .result = REJECT,
1151},
1152{
1153 "pkt > pkt_end taken check",
1154 .insns = {
1155 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
1156 offsetof(struct __sk_buff, data_end)),
1157 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
1158 offsetof(struct __sk_buff, data)),
1159 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
1160 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
1161 BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
1162 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
1163 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
1164 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
1165 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), // 8. if r3 > r2 goto 10
1166 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
1167 BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
1168 BPF_EXIT_INSN(), // 11. exit
1169 },
1170 .result = ACCEPT,
1171 .prog_type = BPF_PROG_TYPE_SK_SKB,
1172 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1173},
1174{
1175 "pkt_end < pkt taken check",
1176 .insns = {
1177 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, // 0. r2 = *(u32 *)(r1 + data_end)
1178 offsetof(struct __sk_buff, data_end)),
1179 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, // 1. r4 = *(u32 *)(r1 + data)
1180 offsetof(struct __sk_buff, data)),
1181 BPF_MOV64_REG(BPF_REG_3, BPF_REG_4), // 2. r3 = r4
1182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42), // 3. r3 += 42
1183 BPF_MOV64_IMM(BPF_REG_1, 0), // 4. r1 = 0
1184 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2), // 5. if r3 > r2 goto 8
1185 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14), // 6. r4 += 14
1186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), // 7. r1 = r4
1187 BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1), // 8. if r2 < r3 goto 10
1188 BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9), // 9. r2 = *(u8 *)(r1 + 9)
1189 BPF_MOV64_IMM(BPF_REG_0, 0), // 10. r0 = 0
1190 BPF_EXIT_INSN(), // 11. exit
1191 },
1192 .result = ACCEPT,
1193 .prog_type = BPF_PROG_TYPE_SK_SKB,
1194 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1195},