Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Testsuite for eBPF verifier
4 *
5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * Copyright (c) 2017 Facebook
7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
8 */
9
10#include <endian.h>
11#include <asm/types.h>
12#include <linux/types.h>
13#include <stdint.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <unistd.h>
17#include <errno.h>
18#include <string.h>
19#include <stddef.h>
20#include <stdbool.h>
21#include <sched.h>
22#include <limits.h>
23#include <assert.h>
24
25#include <linux/unistd.h>
26#include <linux/filter.h>
27#include <linux/bpf_perf_event.h>
28#include <linux/bpf.h>
29#include <linux/if_ether.h>
30#include <linux/btf.h>
31
32#include <bpf/btf.h>
33#include <bpf/bpf.h>
34#include <bpf/libbpf.h>
35
36#include "autoconf_helper.h"
37#include "unpriv_helpers.h"
38#include "cap_helpers.h"
39#include "bpf_rand.h"
40#include "bpf_util.h"
41#include "test_btf.h"
42#include "../../../include/linux/filter.h"
43#include "testing_helpers.h"
44
45#define MAX_INSNS BPF_MAXINSNS
46#define MAX_EXPECTED_INSNS 32
47#define MAX_UNEXPECTED_INSNS 32
48#define MAX_TEST_INSNS 1000000
49#define MAX_FIXUPS 8
50#define MAX_NR_MAPS 23
51#define MAX_TEST_RUNS 8
52#define POINTER_VALUE 0xcafe4all
53#define TEST_DATA_LEN 64
54#define MAX_FUNC_INFOS 8
55#define MAX_BTF_STRINGS 256
56#define MAX_BTF_TYPES 256
57
58#define INSN_OFF_MASK ((__s16)0xFFFF)
59#define INSN_IMM_MASK ((__s32)0xFFFFFFFF)
60#define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
61
62#define DEFAULT_LIBBPF_LOG_LEVEL 4
63
64#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
65#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
66#define F_NEEDS_JIT_ENABLED (1 << 2)
67
68/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
69#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
70 1ULL << CAP_PERFMON | \
71 1ULL << CAP_BPF)
72#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
73static bool unpriv_disabled = false;
74static bool jit_disabled;
75static int skips;
76static bool verbose = false;
77static int verif_log_level = 0;
78
79struct kfunc_btf_id_pair {
80 const char *kfunc;
81 int insn_idx;
82};
83
84struct bpf_test {
85 const char *descr;
86 struct bpf_insn insns[MAX_INSNS];
87 struct bpf_insn *fill_insns;
88 /* If specified, test engine looks for this sequence of
89 * instructions in the BPF program after loading. Allows to
90 * test rewrites applied by verifier. Use values
91 * INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
92 * fields if content does not matter. The test case fails if
93 * specified instructions are not found.
94 *
95 * The sequence could be split into sub-sequences by adding
96 * SKIP_INSNS instruction at the end of each sub-sequence. In
97 * such case sub-sequences are searched for one after another.
98 */
99 struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
100 /* If specified, test engine applies same pattern matching
101 * logic as for `expected_insns`. If the specified pattern is
102 * matched test case is marked as failed.
103 */
104 struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
105 int fixup_map_hash_8b[MAX_FIXUPS];
106 int fixup_map_hash_48b[MAX_FIXUPS];
107 int fixup_map_hash_16b[MAX_FIXUPS];
108 int fixup_map_array_48b[MAX_FIXUPS];
109 int fixup_map_sockmap[MAX_FIXUPS];
110 int fixup_map_sockhash[MAX_FIXUPS];
111 int fixup_map_xskmap[MAX_FIXUPS];
112 int fixup_map_stacktrace[MAX_FIXUPS];
113 int fixup_prog1[MAX_FIXUPS];
114 int fixup_prog2[MAX_FIXUPS];
115 int fixup_map_in_map[MAX_FIXUPS];
116 int fixup_cgroup_storage[MAX_FIXUPS];
117 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
118 int fixup_map_spin_lock[MAX_FIXUPS];
119 int fixup_map_array_ro[MAX_FIXUPS];
120 int fixup_map_array_wo[MAX_FIXUPS];
121 int fixup_map_array_small[MAX_FIXUPS];
122 int fixup_sk_storage_map[MAX_FIXUPS];
123 int fixup_map_event_output[MAX_FIXUPS];
124 int fixup_map_reuseport_array[MAX_FIXUPS];
125 int fixup_map_ringbuf[MAX_FIXUPS];
126 int fixup_map_timer[MAX_FIXUPS];
127 int fixup_map_kptr[MAX_FIXUPS];
128 struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
129 /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
130 * Can be a tab-separated sequence of expected strings. An empty string
131 * means no log verification.
132 */
133 const char *errstr;
134 const char *errstr_unpriv;
135 uint32_t insn_processed;
136 int prog_len;
137 enum {
138 UNDEF,
139 ACCEPT,
140 REJECT,
141 VERBOSE_ACCEPT,
142 } result, result_unpriv;
143 enum bpf_prog_type prog_type;
144 uint8_t flags;
145 void (*fill_helper)(struct bpf_test *self);
146 int runs;
147#define bpf_testdata_struct_t \
148 struct { \
149 uint32_t retval, retval_unpriv; \
150 union { \
151 __u8 data[TEST_DATA_LEN]; \
152 __u64 data64[TEST_DATA_LEN / 8]; \
153 }; \
154 }
155 union {
156 bpf_testdata_struct_t;
157 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
158 };
159 enum bpf_attach_type expected_attach_type;
160 const char *kfunc;
161 struct bpf_func_info func_info[MAX_FUNC_INFOS];
162 int func_info_cnt;
163 char btf_strings[MAX_BTF_STRINGS];
164 /* A set of BTF types to load when specified,
165 * use macro definitions from test_btf.h,
166 * must end with BTF_END_RAW
167 */
168 __u32 btf_types[MAX_BTF_TYPES];
169};
170
171/* Note we want this to be 64 bit aligned so that the end of our array is
172 * actually the end of the structure.
173 */
174#define MAX_ENTRIES 11
175
176struct test_val {
177 unsigned int index;
178 int foo[MAX_ENTRIES];
179};
180
181struct other_val {
182 long long foo;
183 long long bar;
184};
185
186static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
187{
188 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
189#define PUSH_CNT 51
190 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
191 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
192 struct bpf_insn *insn = self->fill_insns;
193 int i = 0, j, k = 0;
194
195 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
196loop:
197 for (j = 0; j < PUSH_CNT; j++) {
198 insn[i++] = BPF_LD_ABS(BPF_B, 0);
199 /* jump to error label */
200 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
201 i++;
202 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
203 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
204 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
205 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
206 BPF_FUNC_skb_vlan_push);
207 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
208 i++;
209 }
210
211 for (j = 0; j < PUSH_CNT; j++) {
212 insn[i++] = BPF_LD_ABS(BPF_B, 0);
213 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
214 i++;
215 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
216 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
217 BPF_FUNC_skb_vlan_pop);
218 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
219 i++;
220 }
221 if (++k < 5)
222 goto loop;
223
224 for (; i < len - 3; i++)
225 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
226 insn[len - 3] = BPF_JMP_A(1);
227 /* error label */
228 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
229 insn[len - 1] = BPF_EXIT_INSN();
230 self->prog_len = len;
231}
232
233static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
234{
235 struct bpf_insn *insn = self->fill_insns;
236 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
237 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
238 * to extend the error value of the inlined ld_abs sequence which then
239 * contains 7 insns. so, set the dividend to 7 so the testcase could
240 * work on all arches.
241 */
242 unsigned int len = (1 << 15) / 7;
243 int i = 0;
244
245 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
246 insn[i++] = BPF_LD_ABS(BPF_B, 0);
247 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
248 i++;
249 while (i < len - 1)
250 insn[i++] = BPF_LD_ABS(BPF_B, 1);
251 insn[i] = BPF_EXIT_INSN();
252 self->prog_len = i + 1;
253}
254
255static void bpf_fill_rand_ld_dw(struct bpf_test *self)
256{
257 struct bpf_insn *insn = self->fill_insns;
258 uint64_t res = 0;
259 int i = 0;
260
261 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
262 while (i < self->retval) {
263 uint64_t val = bpf_semi_rand_get();
264 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
265
266 res ^= val;
267 insn[i++] = tmp[0];
268 insn[i++] = tmp[1];
269 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
270 }
271 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
272 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
273 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
274 insn[i] = BPF_EXIT_INSN();
275 self->prog_len = i + 1;
276 res ^= (res >> 32);
277 self->retval = (uint32_t)res;
278}
279
280#define MAX_JMP_SEQ 8192
281
282/* test the sequence of 8k jumps */
283static void bpf_fill_scale1(struct bpf_test *self)
284{
285 struct bpf_insn *insn = self->fill_insns;
286 int i = 0, k = 0;
287
288 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
289 /* test to check that the long sequence of jumps is acceptable */
290 while (k++ < MAX_JMP_SEQ) {
291 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
292 BPF_FUNC_get_prandom_u32);
293 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
294 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
295 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
296 -8 * (k % 64 + 1));
297 }
298 /* is_state_visited() doesn't allocate state for pruning for every jump.
299 * Hence multiply jmps by 4 to accommodate that heuristic
300 */
301 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
302 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
303 insn[i] = BPF_EXIT_INSN();
304 self->prog_len = i + 1;
305 self->retval = 42;
306}
307
308/* test the sequence of 8k jumps in inner most function (function depth 8)*/
309static void bpf_fill_scale2(struct bpf_test *self)
310{
311 struct bpf_insn *insn = self->fill_insns;
312 int i = 0, k = 0;
313
314#define FUNC_NEST 7
315 for (k = 0; k < FUNC_NEST; k++) {
316 insn[i++] = BPF_CALL_REL(1);
317 insn[i++] = BPF_EXIT_INSN();
318 }
319 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
320 /* test to check that the long sequence of jumps is acceptable */
321 k = 0;
322 while (k++ < MAX_JMP_SEQ) {
323 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
324 BPF_FUNC_get_prandom_u32);
325 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
326 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
327 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
328 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
329 }
330 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
331 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
332 insn[i] = BPF_EXIT_INSN();
333 self->prog_len = i + 1;
334 self->retval = 42;
335}
336
337static void bpf_fill_scale(struct bpf_test *self)
338{
339 switch (self->retval) {
340 case 1:
341 return bpf_fill_scale1(self);
342 case 2:
343 return bpf_fill_scale2(self);
344 default:
345 self->prog_len = 0;
346 break;
347 }
348}
349
350static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
351{
352 unsigned int len = 259, hlen = 128;
353 int i;
354
355 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
356 for (i = 1; i <= hlen; i++) {
357 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
358 insn[i + hlen] = BPF_JMP_A(hlen - i);
359 }
360 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
361 insn[len - 1] = BPF_EXIT_INSN();
362
363 return len;
364}
365
366static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
367{
368 unsigned int len = 4100, jmp_off = 2048;
369 int i, j;
370
371 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
372 for (i = 1; i <= jmp_off; i++) {
373 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
374 }
375 insn[i++] = BPF_JMP_A(jmp_off);
376 for (; i <= jmp_off * 2 + 1; i+=16) {
377 for (j = 0; j < 16; j++) {
378 insn[i + j] = BPF_JMP_A(16 - j - 1);
379 }
380 }
381
382 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
383 insn[len - 1] = BPF_EXIT_INSN();
384
385 return len;
386}
387
388static void bpf_fill_torturous_jumps(struct bpf_test *self)
389{
390 struct bpf_insn *insn = self->fill_insns;
391 int i = 0;
392
393 switch (self->retval) {
394 case 1:
395 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
396 return;
397 case 2:
398 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
399 return;
400 case 3:
401 /* main */
402 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
403 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
404 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
405 insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
406 insn[i++] = BPF_EXIT_INSN();
407
408 /* subprog 1 */
409 i += bpf_fill_torturous_jumps_insn_1(insn + i);
410
411 /* subprog 2 */
412 i += bpf_fill_torturous_jumps_insn_2(insn + i);
413
414 self->prog_len = i;
415 return;
416 default:
417 self->prog_len = 0;
418 break;
419 }
420}
421
422static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
423{
424 struct bpf_insn *insn = self->fill_insns;
425 /* This test was added to catch a specific use after free
426 * error, which happened upon BPF program reallocation.
427 * Reallocation is handled by core.c:bpf_prog_realloc, which
428 * reuses old memory if page boundary is not crossed. The
429 * value of `len` is chosen to cross this boundary on bpf_loop
430 * patching.
431 */
432 const int len = getpagesize() - 25;
433 int callback_load_idx;
434 int callback_idx;
435 int i = 0;
436
437 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
438 callback_load_idx = i;
439 insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
440 BPF_REG_2, BPF_PSEUDO_FUNC, 0,
441 777 /* filled below */);
442 insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
443 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
444 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
445 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
446
447 while (i < len - 3)
448 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
449 insn[i++] = BPF_EXIT_INSN();
450
451 callback_idx = i;
452 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
453 insn[i++] = BPF_EXIT_INSN();
454
455 insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
456 self->func_info[1].insn_off = callback_idx;
457 self->prog_len = i;
458 assert(i == len);
459}
460
461/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
462#define BPF_SK_LOOKUP(func) \
463 /* struct bpf_sock_tuple tuple = {} */ \
464 BPF_MOV64_IMM(BPF_REG_2, 0), \
465 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
466 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
467 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
468 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
469 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
471 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
474 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
475 BPF_MOV64_IMM(BPF_REG_4, 0), \
476 BPF_MOV64_IMM(BPF_REG_5, 0), \
477 BPF_EMIT_CALL(BPF_FUNC_ ## func)
478
479/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
480 * value into 0 and does necessary preparation for direct packet access
481 * through r2. The allowed access range is 8 bytes.
482 */
483#define BPF_DIRECT_PKT_R2 \
484 BPF_MOV64_IMM(BPF_REG_0, 0), \
485 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
486 offsetof(struct __sk_buff, data)), \
487 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
488 offsetof(struct __sk_buff, data_end)), \
489 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
490 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
491 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
492 BPF_EXIT_INSN()
493
494/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
495 * positive u32, and zero-extend it into 64-bit.
496 */
497#define BPF_RAND_UEXT_R7 \
498 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
499 BPF_FUNC_get_prandom_u32), \
500 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
501 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
502 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
503
504/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
505 * negative u32, and sign-extend it into 64-bit.
506 */
507#define BPF_RAND_SEXT_R7 \
508 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
509 BPF_FUNC_get_prandom_u32), \
510 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
511 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
512 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
513 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
514
515static struct bpf_test tests[] = {
516#define FILL_ARRAY
517#include <verifier/tests.h>
518#undef FILL_ARRAY
519};
520
521static int probe_filter_length(const struct bpf_insn *fp)
522{
523 int len;
524
525 for (len = MAX_INSNS - 1; len > 0; --len)
526 if (fp[len].code != 0 || fp[len].imm != 0)
527 break;
528 return len + 1;
529}
530
531static bool skip_unsupported_map(enum bpf_map_type map_type)
532{
533 if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
534 printf("SKIP (unsupported map type %d)\n", map_type);
535 skips++;
536 return true;
537 }
538 return false;
539}
540
541static int __create_map(uint32_t type, uint32_t size_key,
542 uint32_t size_value, uint32_t max_elem,
543 uint32_t extra_flags)
544{
545 LIBBPF_OPTS(bpf_map_create_opts, opts);
546 int fd;
547
548 opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
549 fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
550 if (fd < 0) {
551 if (skip_unsupported_map(type))
552 return -1;
553 printf("Failed to create hash map '%s'!\n", strerror(errno));
554 }
555
556 return fd;
557}
558
559static int create_map(uint32_t type, uint32_t size_key,
560 uint32_t size_value, uint32_t max_elem)
561{
562 return __create_map(type, size_key, size_value, max_elem, 0);
563}
564
565static void update_map(int fd, int index)
566{
567 struct test_val value = {
568 .index = (6 + 1) * sizeof(int),
569 .foo[6] = 0xabcdef12,
570 };
571
572 assert(!bpf_map_update_elem(fd, &index, &value, 0));
573}
574
575static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
576{
577 struct bpf_insn prog[] = {
578 BPF_MOV64_IMM(BPF_REG_0, ret),
579 BPF_EXIT_INSN(),
580 };
581
582 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
583}
584
585static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
586 int idx, int ret)
587{
588 struct bpf_insn prog[] = {
589 BPF_MOV64_IMM(BPF_REG_3, idx),
590 BPF_LD_MAP_FD(BPF_REG_2, mfd),
591 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
592 BPF_FUNC_tail_call),
593 BPF_MOV64_IMM(BPF_REG_0, ret),
594 BPF_EXIT_INSN(),
595 };
596
597 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
598}
599
600static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
601 int p1key, int p2key, int p3key)
602{
603 int mfd, p1fd, p2fd, p3fd;
604
605 mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
606 sizeof(int), max_elem, NULL);
607 if (mfd < 0) {
608 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
609 return -1;
610 printf("Failed to create prog array '%s'!\n", strerror(errno));
611 return -1;
612 }
613
614 p1fd = create_prog_dummy_simple(prog_type, 42);
615 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
616 p3fd = create_prog_dummy_simple(prog_type, 24);
617 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
618 goto err;
619 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
620 goto err;
621 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
622 goto err;
623 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
624err:
625 close(mfd);
626 mfd = -1;
627 }
628 close(p3fd);
629 close(p2fd);
630 close(p1fd);
631 return mfd;
632}
633
634static int create_map_in_map(void)
635{
636 LIBBPF_OPTS(bpf_map_create_opts, opts);
637 int inner_map_fd, outer_map_fd;
638
639 inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
640 sizeof(int), 1, NULL);
641 if (inner_map_fd < 0) {
642 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
643 return -1;
644 printf("Failed to create array '%s'!\n", strerror(errno));
645 return inner_map_fd;
646 }
647
648 opts.inner_map_fd = inner_map_fd;
649 outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
650 sizeof(int), sizeof(int), 1, &opts);
651 if (outer_map_fd < 0) {
652 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
653 return -1;
654 printf("Failed to create array of maps '%s'!\n",
655 strerror(errno));
656 }
657
658 close(inner_map_fd);
659
660 return outer_map_fd;
661}
662
663static int create_cgroup_storage(bool percpu)
664{
665 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
666 BPF_MAP_TYPE_CGROUP_STORAGE;
667 int fd;
668
669 fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
670 TEST_DATA_LEN, 0, NULL);
671 if (fd < 0) {
672 if (skip_unsupported_map(type))
673 return -1;
674 printf("Failed to create cgroup storage '%s'!\n",
675 strerror(errno));
676 }
677
678 return fd;
679}
680
681/* struct bpf_spin_lock {
682 * int val;
683 * };
684 * struct val {
685 * int cnt;
686 * struct bpf_spin_lock l;
687 * };
688 * struct bpf_timer {
689 * __u64 :64;
690 * __u64 :64;
691 * } __attribute__((aligned(8)));
692 * struct timer {
693 * struct bpf_timer t;
694 * };
695 * struct btf_ptr {
696 * struct prog_test_ref_kfunc __kptr_untrusted *ptr;
697 * struct prog_test_ref_kfunc __kptr *ptr;
698 * struct prog_test_member __kptr *ptr;
699 * }
700 */
701static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
702 "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_untrusted"
703 "\0prog_test_member";
704static __u32 btf_raw_types[] = {
705 /* int */
706 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
707 /* struct bpf_spin_lock */ /* [2] */
708 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
709 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
710 /* struct val */ /* [3] */
711 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
712 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
713 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
714 /* struct bpf_timer */ /* [4] */
715 BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
716 /* struct timer */ /* [5] */
717 BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
718 BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
719 /* struct prog_test_ref_kfunc */ /* [6] */
720 BTF_STRUCT_ENC(51, 0, 0),
721 BTF_STRUCT_ENC(95, 0, 0), /* [7] */
722 /* type tag "kptr_untrusted" */
723 BTF_TYPE_TAG_ENC(80, 6), /* [8] */
724 /* type tag "kptr" */
725 BTF_TYPE_TAG_ENC(75, 6), /* [9] */
726 BTF_TYPE_TAG_ENC(75, 7), /* [10] */
727 BTF_PTR_ENC(8), /* [11] */
728 BTF_PTR_ENC(9), /* [12] */
729 BTF_PTR_ENC(10), /* [13] */
730 /* struct btf_ptr */ /* [14] */
731 BTF_STRUCT_ENC(43, 3, 24),
732 BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr_untrusted *ptr; */
733 BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr *ptr; */
734 BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */
735};
736
737static char bpf_vlog[UINT_MAX >> 8];
738
739static int load_btf_spec(__u32 *types, int types_len,
740 const char *strings, int strings_len)
741{
742 struct btf_header hdr = {
743 .magic = BTF_MAGIC,
744 .version = BTF_VERSION,
745 .hdr_len = sizeof(struct btf_header),
746 .type_len = types_len,
747 .str_off = types_len,
748 .str_len = strings_len,
749 };
750 void *ptr, *raw_btf;
751 int btf_fd;
752 LIBBPF_OPTS(bpf_btf_load_opts, opts,
753 .log_buf = bpf_vlog,
754 .log_size = sizeof(bpf_vlog),
755 .log_level = (verbose
756 ? verif_log_level
757 : DEFAULT_LIBBPF_LOG_LEVEL),
758 );
759
760 raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
761
762 ptr = raw_btf;
763 memcpy(ptr, &hdr, sizeof(hdr));
764 ptr += sizeof(hdr);
765 memcpy(ptr, types, hdr.type_len);
766 ptr += hdr.type_len;
767 memcpy(ptr, strings, hdr.str_len);
768 ptr += hdr.str_len;
769
770 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
771 if (btf_fd < 0)
772 printf("Failed to load BTF spec: '%s'\n", strerror(errno));
773
774 free(raw_btf);
775
776 return btf_fd < 0 ? -1 : btf_fd;
777}
778
779static int load_btf(void)
780{
781 return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
782 btf_str_sec, sizeof(btf_str_sec));
783}
784
785static int load_btf_for_test(struct bpf_test *test)
786{
787 int types_num = 0;
788
789 while (types_num < MAX_BTF_TYPES &&
790 test->btf_types[types_num] != BTF_END_RAW)
791 ++types_num;
792
793 int types_len = types_num * sizeof(test->btf_types[0]);
794
795 return load_btf_spec(test->btf_types, types_len,
796 test->btf_strings, sizeof(test->btf_strings));
797}
798
799static int create_map_spin_lock(void)
800{
801 LIBBPF_OPTS(bpf_map_create_opts, opts,
802 .btf_key_type_id = 1,
803 .btf_value_type_id = 3,
804 );
805 int fd, btf_fd;
806
807 btf_fd = load_btf();
808 if (btf_fd < 0)
809 return -1;
810 opts.btf_fd = btf_fd;
811 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
812 if (fd < 0)
813 printf("Failed to create map with spin_lock\n");
814 return fd;
815}
816
817static int create_sk_storage_map(void)
818{
819 LIBBPF_OPTS(bpf_map_create_opts, opts,
820 .map_flags = BPF_F_NO_PREALLOC,
821 .btf_key_type_id = 1,
822 .btf_value_type_id = 3,
823 );
824 int fd, btf_fd;
825
826 btf_fd = load_btf();
827 if (btf_fd < 0)
828 return -1;
829 opts.btf_fd = btf_fd;
830 fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
831 close(opts.btf_fd);
832 if (fd < 0)
833 printf("Failed to create sk_storage_map\n");
834 return fd;
835}
836
837static int create_map_timer(void)
838{
839 LIBBPF_OPTS(bpf_map_create_opts, opts,
840 .btf_key_type_id = 1,
841 .btf_value_type_id = 5,
842 );
843 int fd, btf_fd;
844
845 btf_fd = load_btf();
846 if (btf_fd < 0)
847 return -1;
848
849 opts.btf_fd = btf_fd;
850 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
851 if (fd < 0)
852 printf("Failed to create map with timer\n");
853 return fd;
854}
855
856static int create_map_kptr(void)
857{
858 LIBBPF_OPTS(bpf_map_create_opts, opts,
859 .btf_key_type_id = 1,
860 .btf_value_type_id = 14,
861 );
862 int fd, btf_fd;
863
864 btf_fd = load_btf();
865 if (btf_fd < 0)
866 return -1;
867
868 opts.btf_fd = btf_fd;
869 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
870 if (fd < 0)
871 printf("Failed to create map with btf_id pointer\n");
872 return fd;
873}
874
875static void set_root(bool set)
876{
877 __u64 caps;
878
879 if (set) {
880 if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps))
881 perror("cap_disable_effective(CAP_SYS_ADMIN)");
882 } else {
883 if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps))
884 perror("cap_disable_effective(CAP_SYS_ADMIN)");
885 }
886}
887
888static __u64 ptr_to_u64(const void *ptr)
889{
890 return (uintptr_t) ptr;
891}
892
893static struct btf *btf__load_testmod_btf(struct btf *vmlinux)
894{
895 struct bpf_btf_info info;
896 __u32 len = sizeof(info);
897 struct btf *btf = NULL;
898 char name[64];
899 __u32 id = 0;
900 int err, fd;
901
902 /* Iterate all loaded BTF objects and find bpf_testmod,
903 * we need SYS_ADMIN cap for that.
904 */
905 set_root(true);
906
907 while (true) {
908 err = bpf_btf_get_next_id(id, &id);
909 if (err) {
910 if (errno == ENOENT)
911 break;
912 perror("bpf_btf_get_next_id failed");
913 break;
914 }
915
916 fd = bpf_btf_get_fd_by_id(id);
917 if (fd < 0) {
918 if (errno == ENOENT)
919 continue;
920 perror("bpf_btf_get_fd_by_id failed");
921 break;
922 }
923
924 memset(&info, 0, sizeof(info));
925 info.name_len = sizeof(name);
926 info.name = ptr_to_u64(name);
927 len = sizeof(info);
928
929 err = bpf_obj_get_info_by_fd(fd, &info, &len);
930 if (err) {
931 close(fd);
932 perror("bpf_obj_get_info_by_fd failed");
933 break;
934 }
935
936 if (strcmp("bpf_testmod", name)) {
937 close(fd);
938 continue;
939 }
940
941 btf = btf__load_from_kernel_by_id_split(id, vmlinux);
942 if (!btf) {
943 close(fd);
944 break;
945 }
946
947 /* We need the fd to stay open so it can be used in fd_array.
948 * The final cleanup call to btf__free will free btf object
949 * and close the file descriptor.
950 */
951 btf__set_fd(btf, fd);
952 break;
953 }
954
955 set_root(false);
956 return btf;
957}
958
959static struct btf *testmod_btf;
960static struct btf *vmlinux_btf;
961
962static void kfuncs_cleanup(void)
963{
964 btf__free(testmod_btf);
965 btf__free(vmlinux_btf);
966}
967
968static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array,
969 struct kfunc_btf_id_pair *fixup_kfunc_btf_id)
970{
971 /* Patch in kfunc BTF IDs */
972 while (fixup_kfunc_btf_id->kfunc) {
973 int btf_id = 0;
974
975 /* try to find kfunc in kernel BTF */
976 vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf();
977 if (vmlinux_btf) {
978 btf_id = btf__find_by_name_kind(vmlinux_btf,
979 fixup_kfunc_btf_id->kfunc,
980 BTF_KIND_FUNC);
981 btf_id = btf_id < 0 ? 0 : btf_id;
982 }
983
984 /* kfunc not found in kernel BTF, try bpf_testmod BTF */
985 if (!btf_id) {
986 testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf);
987 if (testmod_btf) {
988 btf_id = btf__find_by_name_kind(testmod_btf,
989 fixup_kfunc_btf_id->kfunc,
990 BTF_KIND_FUNC);
991 btf_id = btf_id < 0 ? 0 : btf_id;
992 if (btf_id) {
993 /* We put bpf_testmod module fd into fd_array
994 * and its index 1 into instruction 'off'.
995 */
996 *fd_array = btf__fd(testmod_btf);
997 prog[fixup_kfunc_btf_id->insn_idx].off = 1;
998 }
999 }
1000 }
1001
1002 prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
1003 fixup_kfunc_btf_id++;
1004 }
1005}
1006
1007static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
1008 struct bpf_insn *prog, int *map_fds, int *fd_array)
1009{
1010 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
1011 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
1012 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
1013 int *fixup_map_array_48b = test->fixup_map_array_48b;
1014 int *fixup_map_sockmap = test->fixup_map_sockmap;
1015 int *fixup_map_sockhash = test->fixup_map_sockhash;
1016 int *fixup_map_xskmap = test->fixup_map_xskmap;
1017 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
1018 int *fixup_prog1 = test->fixup_prog1;
1019 int *fixup_prog2 = test->fixup_prog2;
1020 int *fixup_map_in_map = test->fixup_map_in_map;
1021 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
1022 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
1023 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
1024 int *fixup_map_array_ro = test->fixup_map_array_ro;
1025 int *fixup_map_array_wo = test->fixup_map_array_wo;
1026 int *fixup_map_array_small = test->fixup_map_array_small;
1027 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
1028 int *fixup_map_event_output = test->fixup_map_event_output;
1029 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
1030 int *fixup_map_ringbuf = test->fixup_map_ringbuf;
1031 int *fixup_map_timer = test->fixup_map_timer;
1032 int *fixup_map_kptr = test->fixup_map_kptr;
1033
1034 if (test->fill_helper) {
1035 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
1036 test->fill_helper(test);
1037 }
1038
1039 /* Allocating HTs with 1 elem is fine here, since we only test
1040 * for verifier and not do a runtime lookup, so the only thing
1041 * that really matters is value size in this case.
1042 */
1043 if (*fixup_map_hash_8b) {
1044 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1045 sizeof(long long), 1);
1046 do {
1047 prog[*fixup_map_hash_8b].imm = map_fds[0];
1048 fixup_map_hash_8b++;
1049 } while (*fixup_map_hash_8b);
1050 }
1051
1052 if (*fixup_map_hash_48b) {
1053 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1054 sizeof(struct test_val), 1);
1055 do {
1056 prog[*fixup_map_hash_48b].imm = map_fds[1];
1057 fixup_map_hash_48b++;
1058 } while (*fixup_map_hash_48b);
1059 }
1060
1061 if (*fixup_map_hash_16b) {
1062 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
1063 sizeof(struct other_val), 1);
1064 do {
1065 prog[*fixup_map_hash_16b].imm = map_fds[2];
1066 fixup_map_hash_16b++;
1067 } while (*fixup_map_hash_16b);
1068 }
1069
1070 if (*fixup_map_array_48b) {
1071 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1072 sizeof(struct test_val), 1);
1073 update_map(map_fds[3], 0);
1074 do {
1075 prog[*fixup_map_array_48b].imm = map_fds[3];
1076 fixup_map_array_48b++;
1077 } while (*fixup_map_array_48b);
1078 }
1079
1080 if (*fixup_prog1) {
1081 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
1082 do {
1083 prog[*fixup_prog1].imm = map_fds[4];
1084 fixup_prog1++;
1085 } while (*fixup_prog1);
1086 }
1087
1088 if (*fixup_prog2) {
1089 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
1090 do {
1091 prog[*fixup_prog2].imm = map_fds[5];
1092 fixup_prog2++;
1093 } while (*fixup_prog2);
1094 }
1095
1096 if (*fixup_map_in_map) {
1097 map_fds[6] = create_map_in_map();
1098 do {
1099 prog[*fixup_map_in_map].imm = map_fds[6];
1100 fixup_map_in_map++;
1101 } while (*fixup_map_in_map);
1102 }
1103
1104 if (*fixup_cgroup_storage) {
1105 map_fds[7] = create_cgroup_storage(false);
1106 do {
1107 prog[*fixup_cgroup_storage].imm = map_fds[7];
1108 fixup_cgroup_storage++;
1109 } while (*fixup_cgroup_storage);
1110 }
1111
1112 if (*fixup_percpu_cgroup_storage) {
1113 map_fds[8] = create_cgroup_storage(true);
1114 do {
1115 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
1116 fixup_percpu_cgroup_storage++;
1117 } while (*fixup_percpu_cgroup_storage);
1118 }
1119 if (*fixup_map_sockmap) {
1120 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
1121 sizeof(int), 1);
1122 do {
1123 prog[*fixup_map_sockmap].imm = map_fds[9];
1124 fixup_map_sockmap++;
1125 } while (*fixup_map_sockmap);
1126 }
1127 if (*fixup_map_sockhash) {
1128 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
1129 sizeof(int), 1);
1130 do {
1131 prog[*fixup_map_sockhash].imm = map_fds[10];
1132 fixup_map_sockhash++;
1133 } while (*fixup_map_sockhash);
1134 }
1135 if (*fixup_map_xskmap) {
1136 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
1137 sizeof(int), 1);
1138 do {
1139 prog[*fixup_map_xskmap].imm = map_fds[11];
1140 fixup_map_xskmap++;
1141 } while (*fixup_map_xskmap);
1142 }
1143 if (*fixup_map_stacktrace) {
1144 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
1145 sizeof(u64), 1);
1146 do {
1147 prog[*fixup_map_stacktrace].imm = map_fds[12];
1148 fixup_map_stacktrace++;
1149 } while (*fixup_map_stacktrace);
1150 }
1151 if (*fixup_map_spin_lock) {
1152 map_fds[13] = create_map_spin_lock();
1153 do {
1154 prog[*fixup_map_spin_lock].imm = map_fds[13];
1155 fixup_map_spin_lock++;
1156 } while (*fixup_map_spin_lock);
1157 }
1158 if (*fixup_map_array_ro) {
1159 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1160 sizeof(struct test_val), 1,
1161 BPF_F_RDONLY_PROG);
1162 update_map(map_fds[14], 0);
1163 do {
1164 prog[*fixup_map_array_ro].imm = map_fds[14];
1165 fixup_map_array_ro++;
1166 } while (*fixup_map_array_ro);
1167 }
1168 if (*fixup_map_array_wo) {
1169 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1170 sizeof(struct test_val), 1,
1171 BPF_F_WRONLY_PROG);
1172 update_map(map_fds[15], 0);
1173 do {
1174 prog[*fixup_map_array_wo].imm = map_fds[15];
1175 fixup_map_array_wo++;
1176 } while (*fixup_map_array_wo);
1177 }
1178 if (*fixup_map_array_small) {
1179 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1180 1, 1, 0);
1181 update_map(map_fds[16], 0);
1182 do {
1183 prog[*fixup_map_array_small].imm = map_fds[16];
1184 fixup_map_array_small++;
1185 } while (*fixup_map_array_small);
1186 }
1187 if (*fixup_sk_storage_map) {
1188 map_fds[17] = create_sk_storage_map();
1189 do {
1190 prog[*fixup_sk_storage_map].imm = map_fds[17];
1191 fixup_sk_storage_map++;
1192 } while (*fixup_sk_storage_map);
1193 }
1194 if (*fixup_map_event_output) {
1195 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1196 sizeof(int), sizeof(int), 1, 0);
1197 do {
1198 prog[*fixup_map_event_output].imm = map_fds[18];
1199 fixup_map_event_output++;
1200 } while (*fixup_map_event_output);
1201 }
1202 if (*fixup_map_reuseport_array) {
1203 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1204 sizeof(u32), sizeof(u64), 1, 0);
1205 do {
1206 prog[*fixup_map_reuseport_array].imm = map_fds[19];
1207 fixup_map_reuseport_array++;
1208 } while (*fixup_map_reuseport_array);
1209 }
1210 if (*fixup_map_ringbuf) {
1211 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
1212 0, getpagesize());
1213 do {
1214 prog[*fixup_map_ringbuf].imm = map_fds[20];
1215 fixup_map_ringbuf++;
1216 } while (*fixup_map_ringbuf);
1217 }
1218 if (*fixup_map_timer) {
1219 map_fds[21] = create_map_timer();
1220 do {
1221 prog[*fixup_map_timer].imm = map_fds[21];
1222 fixup_map_timer++;
1223 } while (*fixup_map_timer);
1224 }
1225 if (*fixup_map_kptr) {
1226 map_fds[22] = create_map_kptr();
1227 do {
1228 prog[*fixup_map_kptr].imm = map_fds[22];
1229 fixup_map_kptr++;
1230 } while (*fixup_map_kptr);
1231 }
1232
1233 fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id);
1234}
1235
1236static int set_admin(bool admin)
1237{
1238 int err;
1239
1240 if (admin) {
1241 err = cap_enable_effective(ADMIN_CAPS, NULL);
1242 if (err)
1243 perror("cap_enable_effective(ADMIN_CAPS)");
1244 } else {
1245 err = cap_disable_effective(ADMIN_CAPS, NULL);
1246 if (err)
1247 perror("cap_disable_effective(ADMIN_CAPS)");
1248 }
1249
1250 return err;
1251}
1252
1253static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
1254 void *data, size_t size_data)
1255{
1256 __u8 tmp[TEST_DATA_LEN << 2];
1257 __u32 size_tmp = sizeof(tmp);
1258 int err, saved_errno;
1259 LIBBPF_OPTS(bpf_test_run_opts, topts,
1260 .data_in = data,
1261 .data_size_in = size_data,
1262 .data_out = tmp,
1263 .data_size_out = size_tmp,
1264 .repeat = 1,
1265 );
1266
1267 if (unpriv)
1268 set_admin(true);
1269 err = bpf_prog_test_run_opts(fd_prog, &topts);
1270 saved_errno = errno;
1271
1272 if (unpriv)
1273 set_admin(false);
1274
1275 if (err) {
1276 switch (saved_errno) {
1277 case ENOTSUPP:
1278 printf("Did not run the program (not supported) ");
1279 return 0;
1280 case EPERM:
1281 if (unpriv) {
1282 printf("Did not run the program (no permission) ");
1283 return 0;
1284 }
1285 /* fallthrough; */
1286 default:
1287 printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
1288 strerror(saved_errno));
1289 return err;
1290 }
1291 }
1292
1293 if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
1294 printf("FAIL retval %d != %d ", topts.retval, expected_val);
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301/* Returns true if every part of exp (tab-separated) appears in log, in order.
1302 *
1303 * If exp is an empty string, returns true.
1304 */
1305static bool cmp_str_seq(const char *log, const char *exp)
1306{
1307 char needle[200];
1308 const char *p, *q;
1309 int len;
1310
1311 do {
1312 if (!strlen(exp))
1313 break;
1314 p = strchr(exp, '\t');
1315 if (!p)
1316 p = exp + strlen(exp);
1317
1318 len = p - exp;
1319 if (len >= sizeof(needle) || !len) {
1320 printf("FAIL\nTestcase bug\n");
1321 return false;
1322 }
1323 strncpy(needle, exp, len);
1324 needle[len] = 0;
1325 q = strstr(log, needle);
1326 if (!q) {
1327 printf("FAIL\nUnexpected verifier log!\n"
1328 "EXP: %s\nRES:\n", needle);
1329 return false;
1330 }
1331 log = q + len;
1332 exp = p + 1;
1333 } while (*p);
1334 return true;
1335}
1336
1337static bool is_null_insn(struct bpf_insn *insn)
1338{
1339 struct bpf_insn null_insn = {};
1340
1341 return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
1342}
1343
1344static bool is_skip_insn(struct bpf_insn *insn)
1345{
1346 struct bpf_insn skip_insn = SKIP_INSNS();
1347
1348 return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
1349}
1350
1351static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
1352{
1353 int i;
1354
1355 for (i = 0; i < max_len; ++i) {
1356 if (is_null_insn(&seq[i]))
1357 return i;
1358 }
1359 return max_len;
1360}
1361
1362static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
1363{
1364 struct bpf_insn orig_masked;
1365
1366 memcpy(&orig_masked, orig, sizeof(orig_masked));
1367 if (masked->imm == INSN_IMM_MASK)
1368 orig_masked.imm = INSN_IMM_MASK;
1369 if (masked->off == INSN_OFF_MASK)
1370 orig_masked.off = INSN_OFF_MASK;
1371
1372 return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
1373}
1374
1375static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
1376 int seq_len, int subseq_len)
1377{
1378 int i, j;
1379
1380 if (subseq_len > seq_len)
1381 return -1;
1382
1383 for (i = 0; i < seq_len - subseq_len + 1; ++i) {
1384 bool found = true;
1385
1386 for (j = 0; j < subseq_len; ++j) {
1387 if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
1388 found = false;
1389 break;
1390 }
1391 }
1392 if (found)
1393 return i;
1394 }
1395
1396 return -1;
1397}
1398
1399static int find_skip_insn_marker(struct bpf_insn *seq, int len)
1400{
1401 int i;
1402
1403 for (i = 0; i < len; ++i)
1404 if (is_skip_insn(&seq[i]))
1405 return i;
1406
1407 return -1;
1408}
1409
1410/* Return true if all sub-sequences in `subseqs` could be found in
1411 * `seq` one after another. Sub-sequences are separated by a single
1412 * nil instruction.
1413 */
1414static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
1415 int seq_len, int max_subseqs_len)
1416{
1417 int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
1418
1419 while (subseqs_len > 0) {
1420 int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
1421 int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
1422 int subseq_idx = find_insn_subseq(seq, subseqs,
1423 seq_len, cur_subseq_len);
1424
1425 if (subseq_idx < 0)
1426 return false;
1427 seq += subseq_idx + cur_subseq_len;
1428 seq_len -= subseq_idx + cur_subseq_len;
1429 subseqs += cur_subseq_len + 1;
1430 subseqs_len -= cur_subseq_len + 1;
1431 }
1432
1433 return true;
1434}
1435
1436static void print_insn(struct bpf_insn *buf, int cnt)
1437{
1438 int i;
1439
1440 printf(" addr op d s off imm\n");
1441 for (i = 0; i < cnt; ++i) {
1442 struct bpf_insn *insn = &buf[i];
1443
1444 if (is_null_insn(insn))
1445 break;
1446
1447 if (is_skip_insn(insn))
1448 printf(" ...\n");
1449 else
1450 printf(" %04x: %02x %1x %x %04hx %08x\n",
1451 i, insn->code, insn->dst_reg,
1452 insn->src_reg, insn->off, insn->imm);
1453 }
1454}
1455
1456static bool check_xlated_program(struct bpf_test *test, int fd_prog)
1457{
1458 struct bpf_insn *buf;
1459 unsigned int cnt;
1460 bool result = true;
1461 bool check_expected = !is_null_insn(test->expected_insns);
1462 bool check_unexpected = !is_null_insn(test->unexpected_insns);
1463
1464 if (!check_expected && !check_unexpected)
1465 goto out;
1466
1467 if (get_xlated_program(fd_prog, &buf, &cnt)) {
1468 printf("FAIL: can't get xlated program\n");
1469 result = false;
1470 goto out;
1471 }
1472
1473 if (check_expected &&
1474 !find_all_insn_subseqs(buf, test->expected_insns,
1475 cnt, MAX_EXPECTED_INSNS)) {
1476 printf("FAIL: can't find expected subsequence of instructions\n");
1477 result = false;
1478 if (verbose) {
1479 printf("Program:\n");
1480 print_insn(buf, cnt);
1481 printf("Expected subsequence:\n");
1482 print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
1483 }
1484 }
1485
1486 if (check_unexpected &&
1487 find_all_insn_subseqs(buf, test->unexpected_insns,
1488 cnt, MAX_UNEXPECTED_INSNS)) {
1489 printf("FAIL: found unexpected subsequence of instructions\n");
1490 result = false;
1491 if (verbose) {
1492 printf("Program:\n");
1493 print_insn(buf, cnt);
1494 printf("Un-expected subsequence:\n");
1495 print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
1496 }
1497 }
1498
1499 free(buf);
1500 out:
1501 return result;
1502}
1503
1504static void do_test_single(struct bpf_test *test, bool unpriv,
1505 int *passes, int *errors)
1506{
1507 int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
1508 int prog_len, prog_type = test->prog_type;
1509 struct bpf_insn *prog = test->insns;
1510 LIBBPF_OPTS(bpf_prog_load_opts, opts);
1511 int run_errs, run_successes;
1512 int map_fds[MAX_NR_MAPS];
1513 const char *expected_err;
1514 int fd_array[2] = { -1, -1 };
1515 int saved_errno;
1516 int fixup_skips;
1517 __u32 pflags;
1518 int i, err;
1519
1520 if ((test->flags & F_NEEDS_JIT_ENABLED) && jit_disabled) {
1521 printf("SKIP (requires BPF JIT)\n");
1522 skips++;
1523 sched_yield();
1524 return;
1525 }
1526
1527 fd_prog = -1;
1528 for (i = 0; i < MAX_NR_MAPS; i++)
1529 map_fds[i] = -1;
1530 btf_fd = -1;
1531
1532 if (!prog_type)
1533 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1534 fixup_skips = skips;
1535 do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]);
1536 if (test->fill_insns) {
1537 prog = test->fill_insns;
1538 prog_len = test->prog_len;
1539 } else {
1540 prog_len = probe_filter_length(prog);
1541 }
1542 /* If there were some map skips during fixup due to missing bpf
1543 * features, skip this test.
1544 */
1545 if (fixup_skips != skips)
1546 return;
1547
1548 pflags = testing_prog_flags();
1549 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1550 pflags |= BPF_F_STRICT_ALIGNMENT;
1551 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1552 pflags |= BPF_F_ANY_ALIGNMENT;
1553 if (test->flags & ~3)
1554 pflags |= test->flags;
1555
1556 expected_ret = unpriv && test->result_unpriv != UNDEF ?
1557 test->result_unpriv : test->result;
1558 expected_err = unpriv && test->errstr_unpriv ?
1559 test->errstr_unpriv : test->errstr;
1560
1561 opts.expected_attach_type = test->expected_attach_type;
1562 if (verbose)
1563 opts.log_level = verif_log_level | 4; /* force stats */
1564 else if (expected_ret == VERBOSE_ACCEPT)
1565 opts.log_level = 2;
1566 else
1567 opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
1568 opts.prog_flags = pflags;
1569 if (fd_array[1] != -1)
1570 opts.fd_array = &fd_array[0];
1571
1572 if ((prog_type == BPF_PROG_TYPE_TRACING ||
1573 prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) {
1574 int attach_btf_id;
1575
1576 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1577 opts.expected_attach_type);
1578 if (attach_btf_id < 0) {
1579 printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1580 test->kfunc);
1581 (*errors)++;
1582 return;
1583 }
1584
1585 opts.attach_btf_id = attach_btf_id;
1586 }
1587
1588 if (test->btf_types[0] != 0) {
1589 btf_fd = load_btf_for_test(test);
1590 if (btf_fd < 0)
1591 goto fail_log;
1592 opts.prog_btf_fd = btf_fd;
1593 }
1594
1595 if (test->func_info_cnt != 0) {
1596 opts.func_info = test->func_info;
1597 opts.func_info_cnt = test->func_info_cnt;
1598 opts.func_info_rec_size = sizeof(test->func_info[0]);
1599 }
1600
1601 opts.log_buf = bpf_vlog;
1602 opts.log_size = sizeof(bpf_vlog);
1603 fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
1604 saved_errno = errno;
1605
1606 /* BPF_PROG_TYPE_TRACING requires more setup and
1607 * bpf_probe_prog_type won't give correct answer
1608 */
1609 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1610 !libbpf_probe_bpf_prog_type(prog_type, NULL)) {
1611 printf("SKIP (unsupported program type %d)\n", prog_type);
1612 skips++;
1613 goto close_fds;
1614 }
1615
1616 if (fd_prog < 0 && saved_errno == ENOTSUPP) {
1617 printf("SKIP (program uses an unsupported feature)\n");
1618 skips++;
1619 goto close_fds;
1620 }
1621
1622 alignment_prevented_execution = 0;
1623
1624 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1625 if (fd_prog < 0) {
1626 printf("FAIL\nFailed to load prog '%s'!\n",
1627 strerror(saved_errno));
1628 goto fail_log;
1629 }
1630#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1631 if (fd_prog >= 0 &&
1632 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1633 alignment_prevented_execution = 1;
1634#endif
1635 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1636 goto fail_log;
1637 }
1638 } else {
1639 if (fd_prog >= 0) {
1640 printf("FAIL\nUnexpected success to load!\n");
1641 goto fail_log;
1642 }
1643 if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
1644 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1645 expected_err, bpf_vlog);
1646 goto fail_log;
1647 }
1648 }
1649
1650 if (!unpriv && test->insn_processed) {
1651 uint32_t insn_processed;
1652 char *proc;
1653
1654 proc = strstr(bpf_vlog, "processed ");
1655 insn_processed = atoi(proc + 10);
1656 if (test->insn_processed != insn_processed) {
1657 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1658 insn_processed, test->insn_processed);
1659 goto fail_log;
1660 }
1661 }
1662
1663 if (verbose)
1664 printf(", verifier log:\n%s", bpf_vlog);
1665
1666 if (!check_xlated_program(test, fd_prog))
1667 goto fail_log;
1668
1669 run_errs = 0;
1670 run_successes = 0;
1671 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1672 uint32_t expected_val;
1673 int i;
1674
1675 if (!test->runs)
1676 test->runs = 1;
1677
1678 for (i = 0; i < test->runs; i++) {
1679 if (unpriv && test->retvals[i].retval_unpriv)
1680 expected_val = test->retvals[i].retval_unpriv;
1681 else
1682 expected_val = test->retvals[i].retval;
1683
1684 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1685 test->retvals[i].data,
1686 sizeof(test->retvals[i].data));
1687 if (err) {
1688 printf("(run %d/%d) ", i + 1, test->runs);
1689 run_errs++;
1690 } else {
1691 run_successes++;
1692 }
1693 }
1694 }
1695
1696 if (!run_errs) {
1697 (*passes)++;
1698 if (run_successes > 1)
1699 printf("%d cases ", run_successes);
1700 printf("OK");
1701 if (alignment_prevented_execution)
1702 printf(" (NOTE: not executed due to unknown alignment)");
1703 printf("\n");
1704 } else {
1705 printf("\n");
1706 goto fail_log;
1707 }
1708close_fds:
1709 if (test->fill_insns)
1710 free(test->fill_insns);
1711 close(fd_prog);
1712 close(btf_fd);
1713 for (i = 0; i < MAX_NR_MAPS; i++)
1714 close(map_fds[i]);
1715 sched_yield();
1716 return;
1717fail_log:
1718 (*errors)++;
1719 printf("%s", bpf_vlog);
1720 goto close_fds;
1721}
1722
1723static bool is_admin(void)
1724{
1725 __u64 caps;
1726
1727 /* The test checks for finer cap as CAP_NET_ADMIN,
1728 * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
1729 * Thus, disable CAP_SYS_ADMIN at the beginning.
1730 */
1731 if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
1732 perror("cap_disable_effective(CAP_SYS_ADMIN)");
1733 return false;
1734 }
1735
1736 return (caps & ADMIN_CAPS) == ADMIN_CAPS;
1737}
1738
1739static bool test_as_unpriv(struct bpf_test *test)
1740{
1741#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1742 /* Some architectures have strict alignment requirements. In
1743 * that case, the BPF verifier detects if a program has
1744 * unaligned accesses and rejects them. A user can pass
1745 * BPF_F_ANY_ALIGNMENT to a program to override this
1746 * check. That, however, will only work when a privileged user
1747 * loads a program. An unprivileged user loading a program
1748 * with this flag will be rejected prior entering the
1749 * verifier.
1750 */
1751 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1752 return false;
1753#endif
1754 return !test->prog_type ||
1755 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1756 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1757}
1758
1759static int do_test(bool unpriv, unsigned int from, unsigned int to)
1760{
1761 int i, passes = 0, errors = 0;
1762
1763 /* ensure previous instance of the module is unloaded */
1764 unload_bpf_testmod(verbose);
1765
1766 if (load_bpf_testmod(verbose))
1767 return EXIT_FAILURE;
1768
1769 for (i = from; i < to; i++) {
1770 struct bpf_test *test = &tests[i];
1771
1772 /* Program types that are not supported by non-root we
1773 * skip right away.
1774 */
1775 if (test_as_unpriv(test) && unpriv_disabled) {
1776 printf("#%d/u %s SKIP\n", i, test->descr);
1777 skips++;
1778 } else if (test_as_unpriv(test)) {
1779 if (!unpriv)
1780 set_admin(false);
1781 printf("#%d/u %s ", i, test->descr);
1782 do_test_single(test, true, &passes, &errors);
1783 if (!unpriv)
1784 set_admin(true);
1785 }
1786
1787 if (unpriv) {
1788 printf("#%d/p %s SKIP\n", i, test->descr);
1789 skips++;
1790 } else {
1791 printf("#%d/p %s ", i, test->descr);
1792 do_test_single(test, false, &passes, &errors);
1793 }
1794 }
1795
1796 unload_bpf_testmod(verbose);
1797 kfuncs_cleanup();
1798
1799 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1800 skips, errors);
1801 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1802}
1803
1804int main(int argc, char **argv)
1805{
1806 unsigned int from = 0, to = ARRAY_SIZE(tests);
1807 bool unpriv = !is_admin();
1808 int arg = 1;
1809
1810 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1811 arg++;
1812 verbose = true;
1813 verif_log_level = 1;
1814 argc--;
1815 }
1816 if (argc > 1 && strcmp(argv[1], "-vv") == 0) {
1817 arg++;
1818 verbose = true;
1819 verif_log_level = 2;
1820 argc--;
1821 }
1822
1823 if (argc == 3) {
1824 unsigned int l = atoi(argv[arg]);
1825 unsigned int u = atoi(argv[arg + 1]);
1826
1827 if (l < to && u < to) {
1828 from = l;
1829 to = u + 1;
1830 }
1831 } else if (argc == 2) {
1832 unsigned int t = atoi(argv[arg]);
1833
1834 if (t < to) {
1835 from = t;
1836 to = t + 1;
1837 }
1838 }
1839
1840 unpriv_disabled = get_unpriv_disabled();
1841 if (unpriv && unpriv_disabled) {
1842 printf("Cannot run as unprivileged user with sysctl %s.\n",
1843 UNPRIV_SYSCTL);
1844 return EXIT_FAILURE;
1845 }
1846
1847 jit_disabled = !is_jit_enabled();
1848
1849 /* Use libbpf 1.0 API mode */
1850 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1851
1852 bpf_semi_rand_init();
1853 return do_test(unpriv, from, to);
1854}
1/*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10
11#include <stdio.h>
12#include <unistd.h>
13#include <errno.h>
14#include <string.h>
15#include <stddef.h>
16#include <stdbool.h>
17#include <sched.h>
18
19#include <sys/resource.h>
20
21#include <linux/unistd.h>
22#include <linux/filter.h>
23#include <linux/bpf_perf_event.h>
24#include <linux/bpf.h>
25
26#include "../../../include/linux/filter.h"
27
28#include "bpf_sys.h"
29
30#ifndef ARRAY_SIZE
31# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
32#endif
33
34#define MAX_INSNS 512
35#define MAX_FIXUPS 8
36
37struct bpf_test {
38 const char *descr;
39 struct bpf_insn insns[MAX_INSNS];
40 int fixup_map1[MAX_FIXUPS];
41 int fixup_map2[MAX_FIXUPS];
42 int fixup_prog[MAX_FIXUPS];
43 const char *errstr;
44 const char *errstr_unpriv;
45 enum {
46 UNDEF,
47 ACCEPT,
48 REJECT
49 } result, result_unpriv;
50 enum bpf_prog_type prog_type;
51};
52
53/* Note we want this to be 64 bit aligned so that the end of our array is
54 * actually the end of the structure.
55 */
56#define MAX_ENTRIES 11
57
58struct test_val {
59 unsigned int index;
60 int foo[MAX_ENTRIES];
61};
62
63static struct bpf_test tests[] = {
64 {
65 "add+sub+mul",
66 .insns = {
67 BPF_MOV64_IMM(BPF_REG_1, 1),
68 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
69 BPF_MOV64_IMM(BPF_REG_2, 3),
70 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
71 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
72 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
73 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
74 BPF_EXIT_INSN(),
75 },
76 .result = ACCEPT,
77 },
78 {
79 "unreachable",
80 .insns = {
81 BPF_EXIT_INSN(),
82 BPF_EXIT_INSN(),
83 },
84 .errstr = "unreachable",
85 .result = REJECT,
86 },
87 {
88 "unreachable2",
89 .insns = {
90 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
91 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
92 BPF_EXIT_INSN(),
93 },
94 .errstr = "unreachable",
95 .result = REJECT,
96 },
97 {
98 "out of range jump",
99 .insns = {
100 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
101 BPF_EXIT_INSN(),
102 },
103 .errstr = "jump out of range",
104 .result = REJECT,
105 },
106 {
107 "out of range jump2",
108 .insns = {
109 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
110 BPF_EXIT_INSN(),
111 },
112 .errstr = "jump out of range",
113 .result = REJECT,
114 },
115 {
116 "test1 ld_imm64",
117 .insns = {
118 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
119 BPF_LD_IMM64(BPF_REG_0, 0),
120 BPF_LD_IMM64(BPF_REG_0, 0),
121 BPF_LD_IMM64(BPF_REG_0, 1),
122 BPF_LD_IMM64(BPF_REG_0, 1),
123 BPF_MOV64_IMM(BPF_REG_0, 2),
124 BPF_EXIT_INSN(),
125 },
126 .errstr = "invalid BPF_LD_IMM insn",
127 .errstr_unpriv = "R1 pointer comparison",
128 .result = REJECT,
129 },
130 {
131 "test2 ld_imm64",
132 .insns = {
133 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
134 BPF_LD_IMM64(BPF_REG_0, 0),
135 BPF_LD_IMM64(BPF_REG_0, 0),
136 BPF_LD_IMM64(BPF_REG_0, 1),
137 BPF_LD_IMM64(BPF_REG_0, 1),
138 BPF_EXIT_INSN(),
139 },
140 .errstr = "invalid BPF_LD_IMM insn",
141 .errstr_unpriv = "R1 pointer comparison",
142 .result = REJECT,
143 },
144 {
145 "test3 ld_imm64",
146 .insns = {
147 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
148 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
149 BPF_LD_IMM64(BPF_REG_0, 0),
150 BPF_LD_IMM64(BPF_REG_0, 0),
151 BPF_LD_IMM64(BPF_REG_0, 1),
152 BPF_LD_IMM64(BPF_REG_0, 1),
153 BPF_EXIT_INSN(),
154 },
155 .errstr = "invalid bpf_ld_imm64 insn",
156 .result = REJECT,
157 },
158 {
159 "test4 ld_imm64",
160 .insns = {
161 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
162 BPF_EXIT_INSN(),
163 },
164 .errstr = "invalid bpf_ld_imm64 insn",
165 .result = REJECT,
166 },
167 {
168 "test5 ld_imm64",
169 .insns = {
170 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
171 },
172 .errstr = "invalid bpf_ld_imm64 insn",
173 .result = REJECT,
174 },
175 {
176 "no bpf_exit",
177 .insns = {
178 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
179 },
180 .errstr = "jump out of range",
181 .result = REJECT,
182 },
183 {
184 "loop (back-edge)",
185 .insns = {
186 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
187 BPF_EXIT_INSN(),
188 },
189 .errstr = "back-edge",
190 .result = REJECT,
191 },
192 {
193 "loop2 (back-edge)",
194 .insns = {
195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
196 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
197 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
198 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
199 BPF_EXIT_INSN(),
200 },
201 .errstr = "back-edge",
202 .result = REJECT,
203 },
204 {
205 "conditional loop",
206 .insns = {
207 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
209 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
210 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
211 BPF_EXIT_INSN(),
212 },
213 .errstr = "back-edge",
214 .result = REJECT,
215 },
216 {
217 "read uninitialized register",
218 .insns = {
219 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
220 BPF_EXIT_INSN(),
221 },
222 .errstr = "R2 !read_ok",
223 .result = REJECT,
224 },
225 {
226 "read invalid register",
227 .insns = {
228 BPF_MOV64_REG(BPF_REG_0, -1),
229 BPF_EXIT_INSN(),
230 },
231 .errstr = "R15 is invalid",
232 .result = REJECT,
233 },
234 {
235 "program doesn't init R0 before exit",
236 .insns = {
237 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "R0 !read_ok",
241 .result = REJECT,
242 },
243 {
244 "program doesn't init R0 before exit in all branches",
245 .insns = {
246 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
247 BPF_MOV64_IMM(BPF_REG_0, 1),
248 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
249 BPF_EXIT_INSN(),
250 },
251 .errstr = "R0 !read_ok",
252 .errstr_unpriv = "R1 pointer comparison",
253 .result = REJECT,
254 },
255 {
256 "stack out of bounds",
257 .insns = {
258 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
259 BPF_EXIT_INSN(),
260 },
261 .errstr = "invalid stack",
262 .result = REJECT,
263 },
264 {
265 "invalid call insn1",
266 .insns = {
267 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
268 BPF_EXIT_INSN(),
269 },
270 .errstr = "BPF_CALL uses reserved",
271 .result = REJECT,
272 },
273 {
274 "invalid call insn2",
275 .insns = {
276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
277 BPF_EXIT_INSN(),
278 },
279 .errstr = "BPF_CALL uses reserved",
280 .result = REJECT,
281 },
282 {
283 "invalid function call",
284 .insns = {
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
286 BPF_EXIT_INSN(),
287 },
288 .errstr = "invalid func unknown#1234567",
289 .result = REJECT,
290 },
291 {
292 "uninitialized stack1",
293 .insns = {
294 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
295 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
296 BPF_LD_MAP_FD(BPF_REG_1, 0),
297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
298 BPF_FUNC_map_lookup_elem),
299 BPF_EXIT_INSN(),
300 },
301 .fixup_map1 = { 2 },
302 .errstr = "invalid indirect read from stack",
303 .result = REJECT,
304 },
305 {
306 "uninitialized stack2",
307 .insns = {
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
309 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
310 BPF_EXIT_INSN(),
311 },
312 .errstr = "invalid read from stack",
313 .result = REJECT,
314 },
315 {
316 "invalid argument register",
317 .insns = {
318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
319 BPF_FUNC_get_cgroup_classid),
320 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
321 BPF_FUNC_get_cgroup_classid),
322 BPF_EXIT_INSN(),
323 },
324 .errstr = "R1 !read_ok",
325 .result = REJECT,
326 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 },
328 {
329 "non-invalid argument register",
330 .insns = {
331 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
333 BPF_FUNC_get_cgroup_classid),
334 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
336 BPF_FUNC_get_cgroup_classid),
337 BPF_EXIT_INSN(),
338 },
339 .result = ACCEPT,
340 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
341 },
342 {
343 "check valid spill/fill",
344 .insns = {
345 /* spill R1(ctx) into stack */
346 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
347 /* fill it back into R2 */
348 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
349 /* should be able to access R0 = *(R2 + 8) */
350 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
351 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
352 BPF_EXIT_INSN(),
353 },
354 .errstr_unpriv = "R0 leaks addr",
355 .result = ACCEPT,
356 .result_unpriv = REJECT,
357 },
358 {
359 "check valid spill/fill, skb mark",
360 .insns = {
361 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
362 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
363 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
364 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
365 offsetof(struct __sk_buff, mark)),
366 BPF_EXIT_INSN(),
367 },
368 .result = ACCEPT,
369 .result_unpriv = ACCEPT,
370 },
371 {
372 "check corrupted spill/fill",
373 .insns = {
374 /* spill R1(ctx) into stack */
375 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
376 /* mess up with R1 pointer on stack */
377 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
378 /* fill back into R0 should fail */
379 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
380 BPF_EXIT_INSN(),
381 },
382 .errstr_unpriv = "attempt to corrupt spilled",
383 .errstr = "corrupted spill",
384 .result = REJECT,
385 },
386 {
387 "invalid src register in STX",
388 .insns = {
389 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
390 BPF_EXIT_INSN(),
391 },
392 .errstr = "R15 is invalid",
393 .result = REJECT,
394 },
395 {
396 "invalid dst register in STX",
397 .insns = {
398 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
399 BPF_EXIT_INSN(),
400 },
401 .errstr = "R14 is invalid",
402 .result = REJECT,
403 },
404 {
405 "invalid dst register in ST",
406 .insns = {
407 BPF_ST_MEM(BPF_B, 14, -1, -1),
408 BPF_EXIT_INSN(),
409 },
410 .errstr = "R14 is invalid",
411 .result = REJECT,
412 },
413 {
414 "invalid src register in LDX",
415 .insns = {
416 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
417 BPF_EXIT_INSN(),
418 },
419 .errstr = "R12 is invalid",
420 .result = REJECT,
421 },
422 {
423 "invalid dst register in LDX",
424 .insns = {
425 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
426 BPF_EXIT_INSN(),
427 },
428 .errstr = "R11 is invalid",
429 .result = REJECT,
430 },
431 {
432 "junk insn",
433 .insns = {
434 BPF_RAW_INSN(0, 0, 0, 0, 0),
435 BPF_EXIT_INSN(),
436 },
437 .errstr = "invalid BPF_LD_IMM",
438 .result = REJECT,
439 },
440 {
441 "junk insn2",
442 .insns = {
443 BPF_RAW_INSN(1, 0, 0, 0, 0),
444 BPF_EXIT_INSN(),
445 },
446 .errstr = "BPF_LDX uses reserved fields",
447 .result = REJECT,
448 },
449 {
450 "junk insn3",
451 .insns = {
452 BPF_RAW_INSN(-1, 0, 0, 0, 0),
453 BPF_EXIT_INSN(),
454 },
455 .errstr = "invalid BPF_ALU opcode f0",
456 .result = REJECT,
457 },
458 {
459 "junk insn4",
460 .insns = {
461 BPF_RAW_INSN(-1, -1, -1, -1, -1),
462 BPF_EXIT_INSN(),
463 },
464 .errstr = "invalid BPF_ALU opcode f0",
465 .result = REJECT,
466 },
467 {
468 "junk insn5",
469 .insns = {
470 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
471 BPF_EXIT_INSN(),
472 },
473 .errstr = "BPF_ALU uses reserved fields",
474 .result = REJECT,
475 },
476 {
477 "misaligned read from stack",
478 .insns = {
479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
480 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
481 BPF_EXIT_INSN(),
482 },
483 .errstr = "misaligned access",
484 .result = REJECT,
485 },
486 {
487 "invalid map_fd for function call",
488 .insns = {
489 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
490 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
491 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
492 BPF_LD_MAP_FD(BPF_REG_1, 0),
493 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
494 BPF_FUNC_map_delete_elem),
495 BPF_EXIT_INSN(),
496 },
497 .errstr = "fd 0 is not pointing to valid bpf_map",
498 .result = REJECT,
499 },
500 {
501 "don't check return value before access",
502 .insns = {
503 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
504 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
506 BPF_LD_MAP_FD(BPF_REG_1, 0),
507 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
508 BPF_FUNC_map_lookup_elem),
509 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
510 BPF_EXIT_INSN(),
511 },
512 .fixup_map1 = { 3 },
513 .errstr = "R0 invalid mem access 'map_value_or_null'",
514 .result = REJECT,
515 },
516 {
517 "access memory with incorrect alignment",
518 .insns = {
519 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
520 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
522 BPF_LD_MAP_FD(BPF_REG_1, 0),
523 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
524 BPF_FUNC_map_lookup_elem),
525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
526 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
527 BPF_EXIT_INSN(),
528 },
529 .fixup_map1 = { 3 },
530 .errstr = "misaligned access",
531 .result = REJECT,
532 },
533 {
534 "sometimes access memory with incorrect alignment",
535 .insns = {
536 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
537 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
538 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
539 BPF_LD_MAP_FD(BPF_REG_1, 0),
540 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
541 BPF_FUNC_map_lookup_elem),
542 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
543 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
544 BPF_EXIT_INSN(),
545 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
546 BPF_EXIT_INSN(),
547 },
548 .fixup_map1 = { 3 },
549 .errstr = "R0 invalid mem access",
550 .errstr_unpriv = "R0 leaks addr",
551 .result = REJECT,
552 },
553 {
554 "jump test 1",
555 .insns = {
556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
557 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
559 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
560 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
561 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
562 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
563 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
565 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
566 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
567 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
568 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
569 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
570 BPF_MOV64_IMM(BPF_REG_0, 0),
571 BPF_EXIT_INSN(),
572 },
573 .errstr_unpriv = "R1 pointer comparison",
574 .result_unpriv = REJECT,
575 .result = ACCEPT,
576 },
577 {
578 "jump test 2",
579 .insns = {
580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
581 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
582 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
583 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
584 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
585 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
586 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
587 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
588 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
589 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
590 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
591 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
592 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
593 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
594 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
595 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
597 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
598 BPF_MOV64_IMM(BPF_REG_0, 0),
599 BPF_EXIT_INSN(),
600 },
601 .errstr_unpriv = "R1 pointer comparison",
602 .result_unpriv = REJECT,
603 .result = ACCEPT,
604 },
605 {
606 "jump test 3",
607 .insns = {
608 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
609 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
610 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
612 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
613 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
614 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
616 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
617 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
618 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
619 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
620 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
622 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
623 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
624 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
625 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
626 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
627 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
628 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
629 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
630 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
631 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
632 BPF_LD_MAP_FD(BPF_REG_1, 0),
633 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
634 BPF_FUNC_map_delete_elem),
635 BPF_EXIT_INSN(),
636 },
637 .fixup_map1 = { 24 },
638 .errstr_unpriv = "R1 pointer comparison",
639 .result_unpriv = REJECT,
640 .result = ACCEPT,
641 },
642 {
643 "jump test 4",
644 .insns = {
645 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
646 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
647 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
648 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
651 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
652 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
653 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
654 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
656 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
657 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
658 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
659 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
660 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
662 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
663 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
664 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
669 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
670 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
671 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
672 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
673 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
674 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
675 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
676 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
682 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
683 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
685 BPF_MOV64_IMM(BPF_REG_0, 0),
686 BPF_EXIT_INSN(),
687 },
688 .errstr_unpriv = "R1 pointer comparison",
689 .result_unpriv = REJECT,
690 .result = ACCEPT,
691 },
692 {
693 "jump test 5",
694 .insns = {
695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
696 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
697 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
698 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
699 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
700 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
701 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
702 BPF_MOV64_IMM(BPF_REG_0, 0),
703 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
704 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
705 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
706 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
707 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
708 BPF_MOV64_IMM(BPF_REG_0, 0),
709 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
710 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
711 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
712 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
713 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
714 BPF_MOV64_IMM(BPF_REG_0, 0),
715 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
716 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
717 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
718 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
720 BPF_MOV64_IMM(BPF_REG_0, 0),
721 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
722 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
723 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
724 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
725 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
726 BPF_MOV64_IMM(BPF_REG_0, 0),
727 BPF_EXIT_INSN(),
728 },
729 .errstr_unpriv = "R1 pointer comparison",
730 .result_unpriv = REJECT,
731 .result = ACCEPT,
732 },
733 {
734 "access skb fields ok",
735 .insns = {
736 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
737 offsetof(struct __sk_buff, len)),
738 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
740 offsetof(struct __sk_buff, mark)),
741 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
742 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
743 offsetof(struct __sk_buff, pkt_type)),
744 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
745 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
746 offsetof(struct __sk_buff, queue_mapping)),
747 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
748 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
749 offsetof(struct __sk_buff, protocol)),
750 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
751 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
752 offsetof(struct __sk_buff, vlan_present)),
753 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
754 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
755 offsetof(struct __sk_buff, vlan_tci)),
756 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
757 BPF_EXIT_INSN(),
758 },
759 .result = ACCEPT,
760 },
761 {
762 "access skb fields bad1",
763 .insns = {
764 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
765 BPF_EXIT_INSN(),
766 },
767 .errstr = "invalid bpf_context access",
768 .result = REJECT,
769 },
770 {
771 "access skb fields bad2",
772 .insns = {
773 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
774 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
775 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
776 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
777 BPF_LD_MAP_FD(BPF_REG_1, 0),
778 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
779 BPF_FUNC_map_lookup_elem),
780 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
781 BPF_EXIT_INSN(),
782 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
784 offsetof(struct __sk_buff, pkt_type)),
785 BPF_EXIT_INSN(),
786 },
787 .fixup_map1 = { 4 },
788 .errstr = "different pointers",
789 .errstr_unpriv = "R1 pointer comparison",
790 .result = REJECT,
791 },
792 {
793 "access skb fields bad3",
794 .insns = {
795 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
796 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
797 offsetof(struct __sk_buff, pkt_type)),
798 BPF_EXIT_INSN(),
799 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
802 BPF_LD_MAP_FD(BPF_REG_1, 0),
803 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
804 BPF_FUNC_map_lookup_elem),
805 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
806 BPF_EXIT_INSN(),
807 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
808 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
809 },
810 .fixup_map1 = { 6 },
811 .errstr = "different pointers",
812 .errstr_unpriv = "R1 pointer comparison",
813 .result = REJECT,
814 },
815 {
816 "access skb fields bad4",
817 .insns = {
818 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
819 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
820 offsetof(struct __sk_buff, len)),
821 BPF_MOV64_IMM(BPF_REG_0, 0),
822 BPF_EXIT_INSN(),
823 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
824 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
826 BPF_LD_MAP_FD(BPF_REG_1, 0),
827 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
828 BPF_FUNC_map_lookup_elem),
829 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
830 BPF_EXIT_INSN(),
831 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
832 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
833 },
834 .fixup_map1 = { 7 },
835 .errstr = "different pointers",
836 .errstr_unpriv = "R1 pointer comparison",
837 .result = REJECT,
838 },
839 {
840 "check skb->mark is not writeable by sockets",
841 .insns = {
842 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
843 offsetof(struct __sk_buff, mark)),
844 BPF_EXIT_INSN(),
845 },
846 .errstr = "invalid bpf_context access",
847 .errstr_unpriv = "R1 leaks addr",
848 .result = REJECT,
849 },
850 {
851 "check skb->tc_index is not writeable by sockets",
852 .insns = {
853 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
854 offsetof(struct __sk_buff, tc_index)),
855 BPF_EXIT_INSN(),
856 },
857 .errstr = "invalid bpf_context access",
858 .errstr_unpriv = "R1 leaks addr",
859 .result = REJECT,
860 },
861 {
862 "check non-u32 access to cb",
863 .insns = {
864 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
865 offsetof(struct __sk_buff, cb[0])),
866 BPF_EXIT_INSN(),
867 },
868 .errstr = "invalid bpf_context access",
869 .errstr_unpriv = "R1 leaks addr",
870 .result = REJECT,
871 },
872 {
873 "check out of range skb->cb access",
874 .insns = {
875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 offsetof(struct __sk_buff, cb[0]) + 256),
877 BPF_EXIT_INSN(),
878 },
879 .errstr = "invalid bpf_context access",
880 .errstr_unpriv = "",
881 .result = REJECT,
882 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
883 },
884 {
885 "write skb fields from socket prog",
886 .insns = {
887 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
888 offsetof(struct __sk_buff, cb[4])),
889 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
890 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
891 offsetof(struct __sk_buff, mark)),
892 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
893 offsetof(struct __sk_buff, tc_index)),
894 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
895 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
896 offsetof(struct __sk_buff, cb[0])),
897 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
898 offsetof(struct __sk_buff, cb[2])),
899 BPF_EXIT_INSN(),
900 },
901 .result = ACCEPT,
902 .errstr_unpriv = "R1 leaks addr",
903 .result_unpriv = REJECT,
904 },
905 {
906 "write skb fields from tc_cls_act prog",
907 .insns = {
908 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
909 offsetof(struct __sk_buff, cb[0])),
910 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
911 offsetof(struct __sk_buff, mark)),
912 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, tc_index)),
914 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
915 offsetof(struct __sk_buff, tc_index)),
916 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
917 offsetof(struct __sk_buff, cb[3])),
918 BPF_EXIT_INSN(),
919 },
920 .errstr_unpriv = "",
921 .result_unpriv = REJECT,
922 .result = ACCEPT,
923 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
924 },
925 {
926 "PTR_TO_STACK store/load",
927 .insns = {
928 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
930 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
931 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
932 BPF_EXIT_INSN(),
933 },
934 .result = ACCEPT,
935 },
936 {
937 "PTR_TO_STACK store/load - bad alignment on off",
938 .insns = {
939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
941 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
942 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
943 BPF_EXIT_INSN(),
944 },
945 .result = REJECT,
946 .errstr = "misaligned access off -6 size 8",
947 },
948 {
949 "PTR_TO_STACK store/load - bad alignment on reg",
950 .insns = {
951 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
953 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
954 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
955 BPF_EXIT_INSN(),
956 },
957 .result = REJECT,
958 .errstr = "misaligned access off -2 size 8",
959 },
960 {
961 "PTR_TO_STACK store/load - out of bounds low",
962 .insns = {
963 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
964 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
965 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
966 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
967 BPF_EXIT_INSN(),
968 },
969 .result = REJECT,
970 .errstr = "invalid stack off=-79992 size=8",
971 },
972 {
973 "PTR_TO_STACK store/load - out of bounds high",
974 .insns = {
975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
977 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
978 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
979 BPF_EXIT_INSN(),
980 },
981 .result = REJECT,
982 .errstr = "invalid stack off=0 size=8",
983 },
984 {
985 "unpriv: return pointer",
986 .insns = {
987 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
988 BPF_EXIT_INSN(),
989 },
990 .result = ACCEPT,
991 .result_unpriv = REJECT,
992 .errstr_unpriv = "R0 leaks addr",
993 },
994 {
995 "unpriv: add const to pointer",
996 .insns = {
997 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
998 BPF_MOV64_IMM(BPF_REG_0, 0),
999 BPF_EXIT_INSN(),
1000 },
1001 .result = ACCEPT,
1002 .result_unpriv = REJECT,
1003 .errstr_unpriv = "R1 pointer arithmetic",
1004 },
1005 {
1006 "unpriv: add pointer to pointer",
1007 .insns = {
1008 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1009 BPF_MOV64_IMM(BPF_REG_0, 0),
1010 BPF_EXIT_INSN(),
1011 },
1012 .result = ACCEPT,
1013 .result_unpriv = REJECT,
1014 .errstr_unpriv = "R1 pointer arithmetic",
1015 },
1016 {
1017 "unpriv: neg pointer",
1018 .insns = {
1019 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1020 BPF_MOV64_IMM(BPF_REG_0, 0),
1021 BPF_EXIT_INSN(),
1022 },
1023 .result = ACCEPT,
1024 .result_unpriv = REJECT,
1025 .errstr_unpriv = "R1 pointer arithmetic",
1026 },
1027 {
1028 "unpriv: cmp pointer with const",
1029 .insns = {
1030 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1031 BPF_MOV64_IMM(BPF_REG_0, 0),
1032 BPF_EXIT_INSN(),
1033 },
1034 .result = ACCEPT,
1035 .result_unpriv = REJECT,
1036 .errstr_unpriv = "R1 pointer comparison",
1037 },
1038 {
1039 "unpriv: cmp pointer with pointer",
1040 .insns = {
1041 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1042 BPF_MOV64_IMM(BPF_REG_0, 0),
1043 BPF_EXIT_INSN(),
1044 },
1045 .result = ACCEPT,
1046 .result_unpriv = REJECT,
1047 .errstr_unpriv = "R10 pointer comparison",
1048 },
1049 {
1050 "unpriv: check that printk is disallowed",
1051 .insns = {
1052 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1053 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1055 BPF_MOV64_IMM(BPF_REG_2, 8),
1056 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1057 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1058 BPF_FUNC_trace_printk),
1059 BPF_MOV64_IMM(BPF_REG_0, 0),
1060 BPF_EXIT_INSN(),
1061 },
1062 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1063 .result_unpriv = REJECT,
1064 .result = ACCEPT,
1065 },
1066 {
1067 "unpriv: pass pointer to helper function",
1068 .insns = {
1069 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1070 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1071 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1072 BPF_LD_MAP_FD(BPF_REG_1, 0),
1073 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1074 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1075 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1076 BPF_FUNC_map_update_elem),
1077 BPF_MOV64_IMM(BPF_REG_0, 0),
1078 BPF_EXIT_INSN(),
1079 },
1080 .fixup_map1 = { 3 },
1081 .errstr_unpriv = "R4 leaks addr",
1082 .result_unpriv = REJECT,
1083 .result = ACCEPT,
1084 },
1085 {
1086 "unpriv: indirectly pass pointer on stack to helper function",
1087 .insns = {
1088 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1089 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1091 BPF_LD_MAP_FD(BPF_REG_1, 0),
1092 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1093 BPF_FUNC_map_lookup_elem),
1094 BPF_MOV64_IMM(BPF_REG_0, 0),
1095 BPF_EXIT_INSN(),
1096 },
1097 .fixup_map1 = { 3 },
1098 .errstr = "invalid indirect read from stack off -8+0 size 8",
1099 .result = REJECT,
1100 },
1101 {
1102 "unpriv: mangle pointer on stack 1",
1103 .insns = {
1104 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1105 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1106 BPF_MOV64_IMM(BPF_REG_0, 0),
1107 BPF_EXIT_INSN(),
1108 },
1109 .errstr_unpriv = "attempt to corrupt spilled",
1110 .result_unpriv = REJECT,
1111 .result = ACCEPT,
1112 },
1113 {
1114 "unpriv: mangle pointer on stack 2",
1115 .insns = {
1116 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1117 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1118 BPF_MOV64_IMM(BPF_REG_0, 0),
1119 BPF_EXIT_INSN(),
1120 },
1121 .errstr_unpriv = "attempt to corrupt spilled",
1122 .result_unpriv = REJECT,
1123 .result = ACCEPT,
1124 },
1125 {
1126 "unpriv: read pointer from stack in small chunks",
1127 .insns = {
1128 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1129 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1130 BPF_MOV64_IMM(BPF_REG_0, 0),
1131 BPF_EXIT_INSN(),
1132 },
1133 .errstr = "invalid size",
1134 .result = REJECT,
1135 },
1136 {
1137 "unpriv: write pointer into ctx",
1138 .insns = {
1139 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1140 BPF_MOV64_IMM(BPF_REG_0, 0),
1141 BPF_EXIT_INSN(),
1142 },
1143 .errstr_unpriv = "R1 leaks addr",
1144 .result_unpriv = REJECT,
1145 .errstr = "invalid bpf_context access",
1146 .result = REJECT,
1147 },
1148 {
1149 "unpriv: spill/fill of ctx",
1150 .insns = {
1151 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1152 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1153 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1154 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1155 BPF_MOV64_IMM(BPF_REG_0, 0),
1156 BPF_EXIT_INSN(),
1157 },
1158 .result = ACCEPT,
1159 },
1160 {
1161 "unpriv: spill/fill of ctx 2",
1162 .insns = {
1163 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1165 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1166 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1167 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1168 BPF_FUNC_get_hash_recalc),
1169 BPF_EXIT_INSN(),
1170 },
1171 .result = ACCEPT,
1172 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1173 },
1174 {
1175 "unpriv: spill/fill of ctx 3",
1176 .insns = {
1177 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1179 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1180 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1181 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1182 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1183 BPF_FUNC_get_hash_recalc),
1184 BPF_EXIT_INSN(),
1185 },
1186 .result = REJECT,
1187 .errstr = "R1 type=fp expected=ctx",
1188 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1189 },
1190 {
1191 "unpriv: spill/fill of ctx 4",
1192 .insns = {
1193 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1195 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1196 BPF_MOV64_IMM(BPF_REG_0, 1),
1197 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1198 BPF_REG_0, -8, 0),
1199 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1200 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1201 BPF_FUNC_get_hash_recalc),
1202 BPF_EXIT_INSN(),
1203 },
1204 .result = REJECT,
1205 .errstr = "R1 type=inv expected=ctx",
1206 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1207 },
1208 {
1209 "unpriv: spill/fill of different pointers stx",
1210 .insns = {
1211 BPF_MOV64_IMM(BPF_REG_3, 42),
1212 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1215 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1217 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1218 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1219 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1220 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1221 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1222 offsetof(struct __sk_buff, mark)),
1223 BPF_MOV64_IMM(BPF_REG_0, 0),
1224 BPF_EXIT_INSN(),
1225 },
1226 .result = REJECT,
1227 .errstr = "same insn cannot be used with different pointers",
1228 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1229 },
1230 {
1231 "unpriv: spill/fill of different pointers ldx",
1232 .insns = {
1233 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1235 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1236 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1238 -(__s32)offsetof(struct bpf_perf_event_data,
1239 sample_period) - 8),
1240 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1241 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1242 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1243 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1244 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1245 offsetof(struct bpf_perf_event_data,
1246 sample_period)),
1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1248 BPF_EXIT_INSN(),
1249 },
1250 .result = REJECT,
1251 .errstr = "same insn cannot be used with different pointers",
1252 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1253 },
1254 {
1255 "unpriv: write pointer into map elem value",
1256 .insns = {
1257 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1258 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1259 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1260 BPF_LD_MAP_FD(BPF_REG_1, 0),
1261 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1262 BPF_FUNC_map_lookup_elem),
1263 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1264 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1265 BPF_EXIT_INSN(),
1266 },
1267 .fixup_map1 = { 3 },
1268 .errstr_unpriv = "R0 leaks addr",
1269 .result_unpriv = REJECT,
1270 .result = ACCEPT,
1271 },
1272 {
1273 "unpriv: partial copy of pointer",
1274 .insns = {
1275 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1276 BPF_MOV64_IMM(BPF_REG_0, 0),
1277 BPF_EXIT_INSN(),
1278 },
1279 .errstr_unpriv = "R10 partial copy",
1280 .result_unpriv = REJECT,
1281 .result = ACCEPT,
1282 },
1283 {
1284 "unpriv: pass pointer to tail_call",
1285 .insns = {
1286 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1287 BPF_LD_MAP_FD(BPF_REG_2, 0),
1288 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1289 BPF_FUNC_tail_call),
1290 BPF_MOV64_IMM(BPF_REG_0, 0),
1291 BPF_EXIT_INSN(),
1292 },
1293 .fixup_prog = { 1 },
1294 .errstr_unpriv = "R3 leaks addr into helper",
1295 .result_unpriv = REJECT,
1296 .result = ACCEPT,
1297 },
1298 {
1299 "unpriv: cmp map pointer with zero",
1300 .insns = {
1301 BPF_MOV64_IMM(BPF_REG_1, 0),
1302 BPF_LD_MAP_FD(BPF_REG_1, 0),
1303 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1304 BPF_MOV64_IMM(BPF_REG_0, 0),
1305 BPF_EXIT_INSN(),
1306 },
1307 .fixup_map1 = { 1 },
1308 .errstr_unpriv = "R1 pointer comparison",
1309 .result_unpriv = REJECT,
1310 .result = ACCEPT,
1311 },
1312 {
1313 "unpriv: write into frame pointer",
1314 .insns = {
1315 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1316 BPF_MOV64_IMM(BPF_REG_0, 0),
1317 BPF_EXIT_INSN(),
1318 },
1319 .errstr = "frame pointer is read only",
1320 .result = REJECT,
1321 },
1322 {
1323 "unpriv: spill/fill frame pointer",
1324 .insns = {
1325 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1327 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1328 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1329 BPF_MOV64_IMM(BPF_REG_0, 0),
1330 BPF_EXIT_INSN(),
1331 },
1332 .errstr = "frame pointer is read only",
1333 .result = REJECT,
1334 },
1335 {
1336 "unpriv: cmp of frame pointer",
1337 .insns = {
1338 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1339 BPF_MOV64_IMM(BPF_REG_0, 0),
1340 BPF_EXIT_INSN(),
1341 },
1342 .errstr_unpriv = "R10 pointer comparison",
1343 .result_unpriv = REJECT,
1344 .result = ACCEPT,
1345 },
1346 {
1347 "unpriv: cmp of stack pointer",
1348 .insns = {
1349 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1350 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1351 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1352 BPF_MOV64_IMM(BPF_REG_0, 0),
1353 BPF_EXIT_INSN(),
1354 },
1355 .errstr_unpriv = "R2 pointer comparison",
1356 .result_unpriv = REJECT,
1357 .result = ACCEPT,
1358 },
1359 {
1360 "unpriv: obfuscate stack pointer",
1361 .insns = {
1362 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1363 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1364 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1365 BPF_MOV64_IMM(BPF_REG_0, 0),
1366 BPF_EXIT_INSN(),
1367 },
1368 .errstr_unpriv = "R2 pointer arithmetic",
1369 .result_unpriv = REJECT,
1370 .result = ACCEPT,
1371 },
1372 {
1373 "raw_stack: no skb_load_bytes",
1374 .insns = {
1375 BPF_MOV64_IMM(BPF_REG_2, 4),
1376 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1378 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1379 BPF_MOV64_IMM(BPF_REG_4, 8),
1380 /* Call to skb_load_bytes() omitted. */
1381 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1382 BPF_EXIT_INSN(),
1383 },
1384 .result = REJECT,
1385 .errstr = "invalid read from stack off -8+0 size 8",
1386 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1387 },
1388 {
1389 "raw_stack: skb_load_bytes, negative len",
1390 .insns = {
1391 BPF_MOV64_IMM(BPF_REG_2, 4),
1392 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1394 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1395 BPF_MOV64_IMM(BPF_REG_4, -8),
1396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1397 BPF_FUNC_skb_load_bytes),
1398 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1399 BPF_EXIT_INSN(),
1400 },
1401 .result = REJECT,
1402 .errstr = "invalid stack type R3",
1403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1404 },
1405 {
1406 "raw_stack: skb_load_bytes, negative len 2",
1407 .insns = {
1408 BPF_MOV64_IMM(BPF_REG_2, 4),
1409 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1410 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1411 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1412 BPF_MOV64_IMM(BPF_REG_4, ~0),
1413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1414 BPF_FUNC_skb_load_bytes),
1415 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1416 BPF_EXIT_INSN(),
1417 },
1418 .result = REJECT,
1419 .errstr = "invalid stack type R3",
1420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1421 },
1422 {
1423 "raw_stack: skb_load_bytes, zero len",
1424 .insns = {
1425 BPF_MOV64_IMM(BPF_REG_2, 4),
1426 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1427 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1428 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1429 BPF_MOV64_IMM(BPF_REG_4, 0),
1430 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1431 BPF_FUNC_skb_load_bytes),
1432 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1433 BPF_EXIT_INSN(),
1434 },
1435 .result = REJECT,
1436 .errstr = "invalid stack type R3",
1437 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1438 },
1439 {
1440 "raw_stack: skb_load_bytes, no init",
1441 .insns = {
1442 BPF_MOV64_IMM(BPF_REG_2, 4),
1443 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1445 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1446 BPF_MOV64_IMM(BPF_REG_4, 8),
1447 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1448 BPF_FUNC_skb_load_bytes),
1449 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1450 BPF_EXIT_INSN(),
1451 },
1452 .result = ACCEPT,
1453 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1454 },
1455 {
1456 "raw_stack: skb_load_bytes, init",
1457 .insns = {
1458 BPF_MOV64_IMM(BPF_REG_2, 4),
1459 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1460 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1461 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
1462 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1463 BPF_MOV64_IMM(BPF_REG_4, 8),
1464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1465 BPF_FUNC_skb_load_bytes),
1466 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1467 BPF_EXIT_INSN(),
1468 },
1469 .result = ACCEPT,
1470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1471 },
1472 {
1473 "raw_stack: skb_load_bytes, spilled regs around bounds",
1474 .insns = {
1475 BPF_MOV64_IMM(BPF_REG_2, 4),
1476 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1478 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1479 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
1480 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1481 BPF_MOV64_IMM(BPF_REG_4, 8),
1482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1483 BPF_FUNC_skb_load_bytes),
1484 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1485 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1486 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1487 offsetof(struct __sk_buff, mark)),
1488 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1489 offsetof(struct __sk_buff, priority)),
1490 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1491 BPF_EXIT_INSN(),
1492 },
1493 .result = ACCEPT,
1494 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1495 },
1496 {
1497 "raw_stack: skb_load_bytes, spilled regs corruption",
1498 .insns = {
1499 BPF_MOV64_IMM(BPF_REG_2, 4),
1500 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1502 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1503 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1504 BPF_MOV64_IMM(BPF_REG_4, 8),
1505 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1506 BPF_FUNC_skb_load_bytes),
1507 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1508 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1509 offsetof(struct __sk_buff, mark)),
1510 BPF_EXIT_INSN(),
1511 },
1512 .result = REJECT,
1513 .errstr = "R0 invalid mem access 'inv'",
1514 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1515 },
1516 {
1517 "raw_stack: skb_load_bytes, spilled regs corruption 2",
1518 .insns = {
1519 BPF_MOV64_IMM(BPF_REG_2, 4),
1520 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1521 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1522 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1523 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1524 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
1525 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1526 BPF_MOV64_IMM(BPF_REG_4, 8),
1527 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1528 BPF_FUNC_skb_load_bytes),
1529 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1530 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1531 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
1532 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1533 offsetof(struct __sk_buff, mark)),
1534 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1535 offsetof(struct __sk_buff, priority)),
1536 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1537 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
1538 offsetof(struct __sk_buff, pkt_type)),
1539 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1540 BPF_EXIT_INSN(),
1541 },
1542 .result = REJECT,
1543 .errstr = "R3 invalid mem access 'inv'",
1544 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1545 },
1546 {
1547 "raw_stack: skb_load_bytes, spilled regs + data",
1548 .insns = {
1549 BPF_MOV64_IMM(BPF_REG_2, 4),
1550 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1551 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
1552 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
1553 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1554 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
1555 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1556 BPF_MOV64_IMM(BPF_REG_4, 8),
1557 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1558 BPF_FUNC_skb_load_bytes),
1559 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
1560 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
1561 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
1562 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1563 offsetof(struct __sk_buff, mark)),
1564 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
1565 offsetof(struct __sk_buff, priority)),
1566 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
1567 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
1568 BPF_EXIT_INSN(),
1569 },
1570 .result = ACCEPT,
1571 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1572 },
1573 {
1574 "raw_stack: skb_load_bytes, invalid access 1",
1575 .insns = {
1576 BPF_MOV64_IMM(BPF_REG_2, 4),
1577 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1578 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
1579 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1580 BPF_MOV64_IMM(BPF_REG_4, 8),
1581 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1582 BPF_FUNC_skb_load_bytes),
1583 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1584 BPF_EXIT_INSN(),
1585 },
1586 .result = REJECT,
1587 .errstr = "invalid stack type R3 off=-513 access_size=8",
1588 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1589 },
1590 {
1591 "raw_stack: skb_load_bytes, invalid access 2",
1592 .insns = {
1593 BPF_MOV64_IMM(BPF_REG_2, 4),
1594 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1595 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
1596 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1597 BPF_MOV64_IMM(BPF_REG_4, 8),
1598 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1599 BPF_FUNC_skb_load_bytes),
1600 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1601 BPF_EXIT_INSN(),
1602 },
1603 .result = REJECT,
1604 .errstr = "invalid stack type R3 off=-1 access_size=8",
1605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1606 },
1607 {
1608 "raw_stack: skb_load_bytes, invalid access 3",
1609 .insns = {
1610 BPF_MOV64_IMM(BPF_REG_2, 4),
1611 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
1613 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1614 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
1615 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1616 BPF_FUNC_skb_load_bytes),
1617 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1618 BPF_EXIT_INSN(),
1619 },
1620 .result = REJECT,
1621 .errstr = "invalid stack type R3 off=-1 access_size=-1",
1622 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1623 },
1624 {
1625 "raw_stack: skb_load_bytes, invalid access 4",
1626 .insns = {
1627 BPF_MOV64_IMM(BPF_REG_2, 4),
1628 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
1630 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1631 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
1632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1633 BPF_FUNC_skb_load_bytes),
1634 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1635 BPF_EXIT_INSN(),
1636 },
1637 .result = REJECT,
1638 .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
1639 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1640 },
1641 {
1642 "raw_stack: skb_load_bytes, invalid access 5",
1643 .insns = {
1644 BPF_MOV64_IMM(BPF_REG_2, 4),
1645 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1648 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
1649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1650 BPF_FUNC_skb_load_bytes),
1651 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1652 BPF_EXIT_INSN(),
1653 },
1654 .result = REJECT,
1655 .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
1656 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1657 },
1658 {
1659 "raw_stack: skb_load_bytes, invalid access 6",
1660 .insns = {
1661 BPF_MOV64_IMM(BPF_REG_2, 4),
1662 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1665 BPF_MOV64_IMM(BPF_REG_4, 0),
1666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1667 BPF_FUNC_skb_load_bytes),
1668 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1669 BPF_EXIT_INSN(),
1670 },
1671 .result = REJECT,
1672 .errstr = "invalid stack type R3 off=-512 access_size=0",
1673 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1674 },
1675 {
1676 "raw_stack: skb_load_bytes, large access",
1677 .insns = {
1678 BPF_MOV64_IMM(BPF_REG_2, 4),
1679 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
1681 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1682 BPF_MOV64_IMM(BPF_REG_4, 512),
1683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1684 BPF_FUNC_skb_load_bytes),
1685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1686 BPF_EXIT_INSN(),
1687 },
1688 .result = ACCEPT,
1689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1690 },
1691 {
1692 "direct packet access: test1",
1693 .insns = {
1694 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1695 offsetof(struct __sk_buff, data)),
1696 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1697 offsetof(struct __sk_buff, data_end)),
1698 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1700 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1701 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1702 BPF_MOV64_IMM(BPF_REG_0, 0),
1703 BPF_EXIT_INSN(),
1704 },
1705 .result = ACCEPT,
1706 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1707 },
1708 {
1709 "direct packet access: test2",
1710 .insns = {
1711 BPF_MOV64_IMM(BPF_REG_0, 1),
1712 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
1713 offsetof(struct __sk_buff, data_end)),
1714 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1715 offsetof(struct __sk_buff, data)),
1716 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
1717 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
1718 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
1719 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
1720 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
1721 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
1722 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1723 offsetof(struct __sk_buff, data)),
1724 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
1725 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
1726 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
1727 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
1728 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
1729 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
1730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1731 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1732 offsetof(struct __sk_buff, data_end)),
1733 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
1734 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
1735 BPF_MOV64_IMM(BPF_REG_0, 0),
1736 BPF_EXIT_INSN(),
1737 },
1738 .result = ACCEPT,
1739 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1740 },
1741 {
1742 "direct packet access: test3",
1743 .insns = {
1744 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1745 offsetof(struct __sk_buff, data)),
1746 BPF_MOV64_IMM(BPF_REG_0, 0),
1747 BPF_EXIT_INSN(),
1748 },
1749 .errstr = "invalid bpf_context access off=76",
1750 .result = REJECT,
1751 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1752 },
1753 {
1754 "direct packet access: test4 (write)",
1755 .insns = {
1756 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1757 offsetof(struct __sk_buff, data)),
1758 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1759 offsetof(struct __sk_buff, data_end)),
1760 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1761 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1762 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1763 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1764 BPF_MOV64_IMM(BPF_REG_0, 0),
1765 BPF_EXIT_INSN(),
1766 },
1767 .result = ACCEPT,
1768 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1769 },
1770 {
1771 "direct packet access: test5 (pkt_end >= reg, good access)",
1772 .insns = {
1773 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1774 offsetof(struct __sk_buff, data)),
1775 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1776 offsetof(struct __sk_buff, data_end)),
1777 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1778 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1779 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
1780 BPF_MOV64_IMM(BPF_REG_0, 1),
1781 BPF_EXIT_INSN(),
1782 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1783 BPF_MOV64_IMM(BPF_REG_0, 0),
1784 BPF_EXIT_INSN(),
1785 },
1786 .result = ACCEPT,
1787 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1788 },
1789 {
1790 "direct packet access: test6 (pkt_end >= reg, bad access)",
1791 .insns = {
1792 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1793 offsetof(struct __sk_buff, data)),
1794 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1795 offsetof(struct __sk_buff, data_end)),
1796 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1798 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
1799 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1800 BPF_MOV64_IMM(BPF_REG_0, 1),
1801 BPF_EXIT_INSN(),
1802 BPF_MOV64_IMM(BPF_REG_0, 0),
1803 BPF_EXIT_INSN(),
1804 },
1805 .errstr = "invalid access to packet",
1806 .result = REJECT,
1807 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1808 },
1809 {
1810 "direct packet access: test7 (pkt_end >= reg, both accesses)",
1811 .insns = {
1812 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1813 offsetof(struct __sk_buff, data)),
1814 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1815 offsetof(struct __sk_buff, data_end)),
1816 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1817 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1818 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
1819 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1820 BPF_MOV64_IMM(BPF_REG_0, 1),
1821 BPF_EXIT_INSN(),
1822 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1823 BPF_MOV64_IMM(BPF_REG_0, 0),
1824 BPF_EXIT_INSN(),
1825 },
1826 .errstr = "invalid access to packet",
1827 .result = REJECT,
1828 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1829 },
1830 {
1831 "direct packet access: test8 (double test, variant 1)",
1832 .insns = {
1833 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1834 offsetof(struct __sk_buff, data)),
1835 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1836 offsetof(struct __sk_buff, data_end)),
1837 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1838 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1839 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
1840 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1841 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1842 BPF_MOV64_IMM(BPF_REG_0, 1),
1843 BPF_EXIT_INSN(),
1844 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1845 BPF_MOV64_IMM(BPF_REG_0, 0),
1846 BPF_EXIT_INSN(),
1847 },
1848 .result = ACCEPT,
1849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1850 },
1851 {
1852 "direct packet access: test9 (double test, variant 2)",
1853 .insns = {
1854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1855 offsetof(struct __sk_buff, data)),
1856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1857 offsetof(struct __sk_buff, data_end)),
1858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1860 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
1861 BPF_MOV64_IMM(BPF_REG_0, 1),
1862 BPF_EXIT_INSN(),
1863 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1864 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1865 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1866 BPF_MOV64_IMM(BPF_REG_0, 0),
1867 BPF_EXIT_INSN(),
1868 },
1869 .result = ACCEPT,
1870 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1871 },
1872 {
1873 "direct packet access: test10 (write invalid)",
1874 .insns = {
1875 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1876 offsetof(struct __sk_buff, data)),
1877 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1878 offsetof(struct __sk_buff, data_end)),
1879 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1880 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1881 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1882 BPF_MOV64_IMM(BPF_REG_0, 0),
1883 BPF_EXIT_INSN(),
1884 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1885 BPF_MOV64_IMM(BPF_REG_0, 0),
1886 BPF_EXIT_INSN(),
1887 },
1888 .errstr = "invalid access to packet",
1889 .result = REJECT,
1890 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1891 },
1892 {
1893 "helper access to packet: test1, valid packet_ptr range",
1894 .insns = {
1895 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1896 offsetof(struct xdp_md, data)),
1897 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1898 offsetof(struct xdp_md, data_end)),
1899 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1900 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1901 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
1902 BPF_LD_MAP_FD(BPF_REG_1, 0),
1903 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1904 BPF_MOV64_IMM(BPF_REG_4, 0),
1905 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1906 BPF_FUNC_map_update_elem),
1907 BPF_MOV64_IMM(BPF_REG_0, 0),
1908 BPF_EXIT_INSN(),
1909 },
1910 .fixup_map1 = { 5 },
1911 .result_unpriv = ACCEPT,
1912 .result = ACCEPT,
1913 .prog_type = BPF_PROG_TYPE_XDP,
1914 },
1915 {
1916 "helper access to packet: test2, unchecked packet_ptr",
1917 .insns = {
1918 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1919 offsetof(struct xdp_md, data)),
1920 BPF_LD_MAP_FD(BPF_REG_1, 0),
1921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1922 BPF_FUNC_map_lookup_elem),
1923 BPF_MOV64_IMM(BPF_REG_0, 0),
1924 BPF_EXIT_INSN(),
1925 },
1926 .fixup_map1 = { 1 },
1927 .result = REJECT,
1928 .errstr = "invalid access to packet",
1929 .prog_type = BPF_PROG_TYPE_XDP,
1930 },
1931 {
1932 "helper access to packet: test3, variable add",
1933 .insns = {
1934 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1935 offsetof(struct xdp_md, data)),
1936 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1937 offsetof(struct xdp_md, data_end)),
1938 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
1940 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
1941 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
1942 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1943 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
1944 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
1945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
1946 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
1947 BPF_LD_MAP_FD(BPF_REG_1, 0),
1948 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
1949 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1950 BPF_FUNC_map_lookup_elem),
1951 BPF_MOV64_IMM(BPF_REG_0, 0),
1952 BPF_EXIT_INSN(),
1953 },
1954 .fixup_map1 = { 11 },
1955 .result = ACCEPT,
1956 .prog_type = BPF_PROG_TYPE_XDP,
1957 },
1958 {
1959 "helper access to packet: test4, packet_ptr with bad range",
1960 .insns = {
1961 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1962 offsetof(struct xdp_md, data)),
1963 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1964 offsetof(struct xdp_md, data_end)),
1965 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1966 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
1967 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
1968 BPF_MOV64_IMM(BPF_REG_0, 0),
1969 BPF_EXIT_INSN(),
1970 BPF_LD_MAP_FD(BPF_REG_1, 0),
1971 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1972 BPF_FUNC_map_lookup_elem),
1973 BPF_MOV64_IMM(BPF_REG_0, 0),
1974 BPF_EXIT_INSN(),
1975 },
1976 .fixup_map1 = { 7 },
1977 .result = REJECT,
1978 .errstr = "invalid access to packet",
1979 .prog_type = BPF_PROG_TYPE_XDP,
1980 },
1981 {
1982 "helper access to packet: test5, packet_ptr with too short range",
1983 .insns = {
1984 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1985 offsetof(struct xdp_md, data)),
1986 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1987 offsetof(struct xdp_md, data_end)),
1988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
1989 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
1991 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
1992 BPF_LD_MAP_FD(BPF_REG_1, 0),
1993 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1994 BPF_FUNC_map_lookup_elem),
1995 BPF_MOV64_IMM(BPF_REG_0, 0),
1996 BPF_EXIT_INSN(),
1997 },
1998 .fixup_map1 = { 6 },
1999 .result = REJECT,
2000 .errstr = "invalid access to packet",
2001 .prog_type = BPF_PROG_TYPE_XDP,
2002 },
2003 {
2004 "helper access to packet: test6, cls valid packet_ptr range",
2005 .insns = {
2006 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2007 offsetof(struct __sk_buff, data)),
2008 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2009 offsetof(struct __sk_buff, data_end)),
2010 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2012 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2013 BPF_LD_MAP_FD(BPF_REG_1, 0),
2014 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2015 BPF_MOV64_IMM(BPF_REG_4, 0),
2016 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2017 BPF_FUNC_map_update_elem),
2018 BPF_MOV64_IMM(BPF_REG_0, 0),
2019 BPF_EXIT_INSN(),
2020 },
2021 .fixup_map1 = { 5 },
2022 .result = ACCEPT,
2023 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2024 },
2025 {
2026 "helper access to packet: test7, cls unchecked packet_ptr",
2027 .insns = {
2028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2029 offsetof(struct __sk_buff, data)),
2030 BPF_LD_MAP_FD(BPF_REG_1, 0),
2031 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2032 BPF_FUNC_map_lookup_elem),
2033 BPF_MOV64_IMM(BPF_REG_0, 0),
2034 BPF_EXIT_INSN(),
2035 },
2036 .fixup_map1 = { 1 },
2037 .result = REJECT,
2038 .errstr = "invalid access to packet",
2039 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2040 },
2041 {
2042 "helper access to packet: test8, cls variable add",
2043 .insns = {
2044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2045 offsetof(struct __sk_buff, data)),
2046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2047 offsetof(struct __sk_buff, data_end)),
2048 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2049 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2050 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2051 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2052 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2053 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2054 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2055 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2056 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2057 BPF_LD_MAP_FD(BPF_REG_1, 0),
2058 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2060 BPF_FUNC_map_lookup_elem),
2061 BPF_MOV64_IMM(BPF_REG_0, 0),
2062 BPF_EXIT_INSN(),
2063 },
2064 .fixup_map1 = { 11 },
2065 .result = ACCEPT,
2066 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2067 },
2068 {
2069 "helper access to packet: test9, cls packet_ptr with bad range",
2070 .insns = {
2071 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2072 offsetof(struct __sk_buff, data)),
2073 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2074 offsetof(struct __sk_buff, data_end)),
2075 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2076 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2077 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2078 BPF_MOV64_IMM(BPF_REG_0, 0),
2079 BPF_EXIT_INSN(),
2080 BPF_LD_MAP_FD(BPF_REG_1, 0),
2081 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2082 BPF_FUNC_map_lookup_elem),
2083 BPF_MOV64_IMM(BPF_REG_0, 0),
2084 BPF_EXIT_INSN(),
2085 },
2086 .fixup_map1 = { 7 },
2087 .result = REJECT,
2088 .errstr = "invalid access to packet",
2089 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2090 },
2091 {
2092 "helper access to packet: test10, cls packet_ptr with too short range",
2093 .insns = {
2094 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2095 offsetof(struct __sk_buff, data)),
2096 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2097 offsetof(struct __sk_buff, data_end)),
2098 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
2099 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
2101 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
2102 BPF_LD_MAP_FD(BPF_REG_1, 0),
2103 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2104 BPF_FUNC_map_lookup_elem),
2105 BPF_MOV64_IMM(BPF_REG_0, 0),
2106 BPF_EXIT_INSN(),
2107 },
2108 .fixup_map1 = { 6 },
2109 .result = REJECT,
2110 .errstr = "invalid access to packet",
2111 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2112 },
2113 {
2114 "helper access to packet: test11, cls unsuitable helper 1",
2115 .insns = {
2116 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2117 offsetof(struct __sk_buff, data)),
2118 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2119 offsetof(struct __sk_buff, data_end)),
2120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2121 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2122 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
2123 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
2124 BPF_MOV64_IMM(BPF_REG_2, 0),
2125 BPF_MOV64_IMM(BPF_REG_4, 42),
2126 BPF_MOV64_IMM(BPF_REG_5, 0),
2127 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2128 BPF_FUNC_skb_store_bytes),
2129 BPF_MOV64_IMM(BPF_REG_0, 0),
2130 BPF_EXIT_INSN(),
2131 },
2132 .result = REJECT,
2133 .errstr = "helper access to the packet",
2134 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2135 },
2136 {
2137 "helper access to packet: test12, cls unsuitable helper 2",
2138 .insns = {
2139 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2140 offsetof(struct __sk_buff, data)),
2141 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2142 offsetof(struct __sk_buff, data_end)),
2143 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
2145 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
2146 BPF_MOV64_IMM(BPF_REG_2, 0),
2147 BPF_MOV64_IMM(BPF_REG_4, 4),
2148 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2149 BPF_FUNC_skb_load_bytes),
2150 BPF_MOV64_IMM(BPF_REG_0, 0),
2151 BPF_EXIT_INSN(),
2152 },
2153 .result = REJECT,
2154 .errstr = "helper access to the packet",
2155 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2156 },
2157 {
2158 "helper access to packet: test13, cls helper ok",
2159 .insns = {
2160 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2161 offsetof(struct __sk_buff, data)),
2162 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2163 offsetof(struct __sk_buff, data_end)),
2164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2165 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2167 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2168 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2169 BPF_MOV64_IMM(BPF_REG_2, 4),
2170 BPF_MOV64_IMM(BPF_REG_3, 0),
2171 BPF_MOV64_IMM(BPF_REG_4, 0),
2172 BPF_MOV64_IMM(BPF_REG_5, 0),
2173 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2174 BPF_FUNC_csum_diff),
2175 BPF_MOV64_IMM(BPF_REG_0, 0),
2176 BPF_EXIT_INSN(),
2177 },
2178 .result = ACCEPT,
2179 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2180 },
2181 {
2182 "helper access to packet: test14, cls helper fail sub",
2183 .insns = {
2184 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2185 offsetof(struct __sk_buff, data)),
2186 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2187 offsetof(struct __sk_buff, data_end)),
2188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2189 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2191 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2192 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
2193 BPF_MOV64_IMM(BPF_REG_2, 4),
2194 BPF_MOV64_IMM(BPF_REG_3, 0),
2195 BPF_MOV64_IMM(BPF_REG_4, 0),
2196 BPF_MOV64_IMM(BPF_REG_5, 0),
2197 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2198 BPF_FUNC_csum_diff),
2199 BPF_MOV64_IMM(BPF_REG_0, 0),
2200 BPF_EXIT_INSN(),
2201 },
2202 .result = REJECT,
2203 .errstr = "type=inv expected=fp",
2204 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2205 },
2206 {
2207 "helper access to packet: test15, cls helper fail range 1",
2208 .insns = {
2209 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2210 offsetof(struct __sk_buff, data)),
2211 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2212 offsetof(struct __sk_buff, data_end)),
2213 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2214 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2215 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2216 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2218 BPF_MOV64_IMM(BPF_REG_2, 8),
2219 BPF_MOV64_IMM(BPF_REG_3, 0),
2220 BPF_MOV64_IMM(BPF_REG_4, 0),
2221 BPF_MOV64_IMM(BPF_REG_5, 0),
2222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2223 BPF_FUNC_csum_diff),
2224 BPF_MOV64_IMM(BPF_REG_0, 0),
2225 BPF_EXIT_INSN(),
2226 },
2227 .result = REJECT,
2228 .errstr = "invalid access to packet",
2229 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2230 },
2231 {
2232 "helper access to packet: test16, cls helper fail range 2",
2233 .insns = {
2234 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2235 offsetof(struct __sk_buff, data)),
2236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2237 offsetof(struct __sk_buff, data_end)),
2238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2239 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2241 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2242 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2243 BPF_MOV64_IMM(BPF_REG_2, -9),
2244 BPF_MOV64_IMM(BPF_REG_3, 0),
2245 BPF_MOV64_IMM(BPF_REG_4, 0),
2246 BPF_MOV64_IMM(BPF_REG_5, 0),
2247 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2248 BPF_FUNC_csum_diff),
2249 BPF_MOV64_IMM(BPF_REG_0, 0),
2250 BPF_EXIT_INSN(),
2251 },
2252 .result = REJECT,
2253 .errstr = "invalid access to packet",
2254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2255 },
2256 {
2257 "helper access to packet: test17, cls helper fail range 3",
2258 .insns = {
2259 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2260 offsetof(struct __sk_buff, data)),
2261 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2262 offsetof(struct __sk_buff, data_end)),
2263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2264 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2266 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2267 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2268 BPF_MOV64_IMM(BPF_REG_2, ~0),
2269 BPF_MOV64_IMM(BPF_REG_3, 0),
2270 BPF_MOV64_IMM(BPF_REG_4, 0),
2271 BPF_MOV64_IMM(BPF_REG_5, 0),
2272 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2273 BPF_FUNC_csum_diff),
2274 BPF_MOV64_IMM(BPF_REG_0, 0),
2275 BPF_EXIT_INSN(),
2276 },
2277 .result = REJECT,
2278 .errstr = "invalid access to packet",
2279 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2280 },
2281 {
2282 "helper access to packet: test18, cls helper fail range zero",
2283 .insns = {
2284 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2285 offsetof(struct __sk_buff, data)),
2286 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2287 offsetof(struct __sk_buff, data_end)),
2288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2289 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2290 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2291 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2293 BPF_MOV64_IMM(BPF_REG_2, 0),
2294 BPF_MOV64_IMM(BPF_REG_3, 0),
2295 BPF_MOV64_IMM(BPF_REG_4, 0),
2296 BPF_MOV64_IMM(BPF_REG_5, 0),
2297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2298 BPF_FUNC_csum_diff),
2299 BPF_MOV64_IMM(BPF_REG_0, 0),
2300 BPF_EXIT_INSN(),
2301 },
2302 .result = REJECT,
2303 .errstr = "invalid access to packet",
2304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2305 },
2306 {
2307 "helper access to packet: test19, pkt end as input",
2308 .insns = {
2309 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2310 offsetof(struct __sk_buff, data)),
2311 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2312 offsetof(struct __sk_buff, data_end)),
2313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2314 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2315 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2316 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2317 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
2318 BPF_MOV64_IMM(BPF_REG_2, 4),
2319 BPF_MOV64_IMM(BPF_REG_3, 0),
2320 BPF_MOV64_IMM(BPF_REG_4, 0),
2321 BPF_MOV64_IMM(BPF_REG_5, 0),
2322 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2323 BPF_FUNC_csum_diff),
2324 BPF_MOV64_IMM(BPF_REG_0, 0),
2325 BPF_EXIT_INSN(),
2326 },
2327 .result = REJECT,
2328 .errstr = "R1 type=pkt_end expected=fp",
2329 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2330 },
2331 {
2332 "helper access to packet: test20, wrong reg",
2333 .insns = {
2334 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
2335 offsetof(struct __sk_buff, data)),
2336 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2337 offsetof(struct __sk_buff, data_end)),
2338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
2339 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2340 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
2341 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
2342 BPF_MOV64_IMM(BPF_REG_2, 4),
2343 BPF_MOV64_IMM(BPF_REG_3, 0),
2344 BPF_MOV64_IMM(BPF_REG_4, 0),
2345 BPF_MOV64_IMM(BPF_REG_5, 0),
2346 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2347 BPF_FUNC_csum_diff),
2348 BPF_MOV64_IMM(BPF_REG_0, 0),
2349 BPF_EXIT_INSN(),
2350 },
2351 .result = REJECT,
2352 .errstr = "invalid access to packet",
2353 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2354 },
2355 {
2356 "valid map access into an array with a constant",
2357 .insns = {
2358 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2359 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2360 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2361 BPF_LD_MAP_FD(BPF_REG_1, 0),
2362 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2363 BPF_FUNC_map_lookup_elem),
2364 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2365 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2366 offsetof(struct test_val, foo)),
2367 BPF_EXIT_INSN(),
2368 },
2369 .fixup_map2 = { 3 },
2370 .errstr_unpriv = "R0 leaks addr",
2371 .result_unpriv = REJECT,
2372 .result = ACCEPT,
2373 },
2374 {
2375 "valid map access into an array with a register",
2376 .insns = {
2377 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2378 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2380 BPF_LD_MAP_FD(BPF_REG_1, 0),
2381 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2382 BPF_FUNC_map_lookup_elem),
2383 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2384 BPF_MOV64_IMM(BPF_REG_1, 4),
2385 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2386 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2387 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2388 offsetof(struct test_val, foo)),
2389 BPF_EXIT_INSN(),
2390 },
2391 .fixup_map2 = { 3 },
2392 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2393 .result_unpriv = REJECT,
2394 .result = ACCEPT,
2395 },
2396 {
2397 "valid map access into an array with a variable",
2398 .insns = {
2399 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2400 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2401 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2402 BPF_LD_MAP_FD(BPF_REG_1, 0),
2403 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2404 BPF_FUNC_map_lookup_elem),
2405 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
2406 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2407 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
2408 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2409 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2410 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2411 offsetof(struct test_val, foo)),
2412 BPF_EXIT_INSN(),
2413 },
2414 .fixup_map2 = { 3 },
2415 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2416 .result_unpriv = REJECT,
2417 .result = ACCEPT,
2418 },
2419 {
2420 "valid map access into an array with a signed variable",
2421 .insns = {
2422 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2423 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2424 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2425 BPF_LD_MAP_FD(BPF_REG_1, 0),
2426 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2427 BPF_FUNC_map_lookup_elem),
2428 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
2429 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2430 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
2431 BPF_MOV32_IMM(BPF_REG_1, 0),
2432 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
2433 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
2434 BPF_MOV32_IMM(BPF_REG_1, 0),
2435 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
2436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2437 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2438 offsetof(struct test_val, foo)),
2439 BPF_EXIT_INSN(),
2440 },
2441 .fixup_map2 = { 3 },
2442 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2443 .result_unpriv = REJECT,
2444 .result = ACCEPT,
2445 },
2446 {
2447 "invalid map access into an array with a constant",
2448 .insns = {
2449 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2450 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2451 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2452 BPF_LD_MAP_FD(BPF_REG_1, 0),
2453 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2454 BPF_FUNC_map_lookup_elem),
2455 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2456 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
2457 offsetof(struct test_val, foo)),
2458 BPF_EXIT_INSN(),
2459 },
2460 .fixup_map2 = { 3 },
2461 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
2462 .result = REJECT,
2463 },
2464 {
2465 "invalid map access into an array with a register",
2466 .insns = {
2467 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2468 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2469 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2470 BPF_LD_MAP_FD(BPF_REG_1, 0),
2471 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2472 BPF_FUNC_map_lookup_elem),
2473 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2474 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
2475 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2476 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2477 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2478 offsetof(struct test_val, foo)),
2479 BPF_EXIT_INSN(),
2480 },
2481 .fixup_map2 = { 3 },
2482 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2483 .errstr = "R0 min value is outside of the array range",
2484 .result_unpriv = REJECT,
2485 .result = REJECT,
2486 },
2487 {
2488 "invalid map access into an array with a variable",
2489 .insns = {
2490 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2491 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2493 BPF_LD_MAP_FD(BPF_REG_1, 0),
2494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2495 BPF_FUNC_map_lookup_elem),
2496 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
2497 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2498 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2499 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2500 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2501 offsetof(struct test_val, foo)),
2502 BPF_EXIT_INSN(),
2503 },
2504 .fixup_map2 = { 3 },
2505 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2506 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
2507 .result_unpriv = REJECT,
2508 .result = REJECT,
2509 },
2510 {
2511 "invalid map access into an array with no floor check",
2512 .insns = {
2513 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2514 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2515 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2516 BPF_LD_MAP_FD(BPF_REG_1, 0),
2517 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2518 BPF_FUNC_map_lookup_elem),
2519 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
2520 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2521 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
2522 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
2523 BPF_MOV32_IMM(BPF_REG_1, 0),
2524 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
2525 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2526 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2527 offsetof(struct test_val, foo)),
2528 BPF_EXIT_INSN(),
2529 },
2530 .fixup_map2 = { 3 },
2531 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2532 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
2533 .result_unpriv = REJECT,
2534 .result = REJECT,
2535 },
2536 {
2537 "invalid map access into an array with a invalid max check",
2538 .insns = {
2539 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2540 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2541 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2542 BPF_LD_MAP_FD(BPF_REG_1, 0),
2543 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2544 BPF_FUNC_map_lookup_elem),
2545 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
2546 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2547 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
2548 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2549 BPF_MOV32_IMM(BPF_REG_1, 0),
2550 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
2551 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2552 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
2553 offsetof(struct test_val, foo)),
2554 BPF_EXIT_INSN(),
2555 },
2556 .fixup_map2 = { 3 },
2557 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2558 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
2559 .result_unpriv = REJECT,
2560 .result = REJECT,
2561 },
2562 {
2563 "invalid map access into an array with a invalid max check",
2564 .insns = {
2565 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2566 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2567 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2568 BPF_LD_MAP_FD(BPF_REG_1, 0),
2569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2570 BPF_FUNC_map_lookup_elem),
2571 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
2572 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
2573 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2574 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2575 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2576 BPF_LD_MAP_FD(BPF_REG_1, 0),
2577 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2578 BPF_FUNC_map_lookup_elem),
2579 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
2580 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
2581 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2582 offsetof(struct test_val, foo)),
2583 BPF_EXIT_INSN(),
2584 },
2585 .fixup_map2 = { 3, 11 },
2586 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2587 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
2588 .result_unpriv = REJECT,
2589 .result = REJECT,
2590 },
2591 {
2592 "multiple registers share map_lookup_elem result",
2593 .insns = {
2594 BPF_MOV64_IMM(BPF_REG_1, 10),
2595 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
2596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2598 BPF_LD_MAP_FD(BPF_REG_1, 0),
2599 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2600 BPF_FUNC_map_lookup_elem),
2601 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2602 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2603 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
2604 BPF_EXIT_INSN(),
2605 },
2606 .fixup_map1 = { 4 },
2607 .result = ACCEPT,
2608 .prog_type = BPF_PROG_TYPE_SCHED_CLS
2609 },
2610 {
2611 "invalid memory access with multiple map_lookup_elem calls",
2612 .insns = {
2613 BPF_MOV64_IMM(BPF_REG_1, 10),
2614 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
2615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2617 BPF_LD_MAP_FD(BPF_REG_1, 0),
2618 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2619 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
2620 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2621 BPF_FUNC_map_lookup_elem),
2622 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2625 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2626 BPF_FUNC_map_lookup_elem),
2627 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2628 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
2629 BPF_EXIT_INSN(),
2630 },
2631 .fixup_map1 = { 4 },
2632 .result = REJECT,
2633 .errstr = "R4 !read_ok",
2634 .prog_type = BPF_PROG_TYPE_SCHED_CLS
2635 },
2636 {
2637 "valid indirect map_lookup_elem access with 2nd lookup in branch",
2638 .insns = {
2639 BPF_MOV64_IMM(BPF_REG_1, 10),
2640 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
2641 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2643 BPF_LD_MAP_FD(BPF_REG_1, 0),
2644 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2645 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
2646 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2647 BPF_FUNC_map_lookup_elem),
2648 BPF_MOV64_IMM(BPF_REG_2, 10),
2649 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
2650 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2651 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2652 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2653 BPF_FUNC_map_lookup_elem),
2654 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2655 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2656 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
2657 BPF_EXIT_INSN(),
2658 },
2659 .fixup_map1 = { 4 },
2660 .result = ACCEPT,
2661 .prog_type = BPF_PROG_TYPE_SCHED_CLS
2662 },
2663 {
2664 "multiple registers share map_lookup_elem bad reg type",
2665 .insns = {
2666 BPF_MOV64_IMM(BPF_REG_1, 10),
2667 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
2668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2670 BPF_LD_MAP_FD(BPF_REG_1, 0),
2671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2672 BPF_FUNC_map_lookup_elem),
2673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2674 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
2675 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2676 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2677 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2678 BPF_MOV64_IMM(BPF_REG_1, 1),
2679 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2680 BPF_MOV64_IMM(BPF_REG_1, 2),
2681 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
2682 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
2683 BPF_MOV64_IMM(BPF_REG_1, 3),
2684 BPF_EXIT_INSN(),
2685 },
2686 .fixup_map1 = { 4 },
2687 .result = REJECT,
2688 .errstr = "R3 invalid mem access 'inv'",
2689 .prog_type = BPF_PROG_TYPE_SCHED_CLS
2690 },
2691 {
2692 "invalid map access from else condition",
2693 .insns = {
2694 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2697 BPF_LD_MAP_FD(BPF_REG_1, 0),
2698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2699 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
2700 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
2701 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
2702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
2703 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
2704 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
2705 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
2706 BPF_EXIT_INSN(),
2707 },
2708 .fixup_map2 = { 3 },
2709 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
2710 .result = REJECT,
2711 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2712 .result_unpriv = REJECT,
2713 },
2714 {
2715 "constant register |= constant should keep constant type",
2716 .insns = {
2717 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
2719 BPF_MOV64_IMM(BPF_REG_2, 34),
2720 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
2721 BPF_MOV64_IMM(BPF_REG_3, 0),
2722 BPF_EMIT_CALL(BPF_FUNC_probe_read),
2723 BPF_EXIT_INSN(),
2724 },
2725 .result = ACCEPT,
2726 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
2727 },
2728 {
2729 "constant register |= constant should not bypass stack boundary checks",
2730 .insns = {
2731 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
2733 BPF_MOV64_IMM(BPF_REG_2, 34),
2734 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
2735 BPF_MOV64_IMM(BPF_REG_3, 0),
2736 BPF_EMIT_CALL(BPF_FUNC_probe_read),
2737 BPF_EXIT_INSN(),
2738 },
2739 .errstr = "invalid stack type R1 off=-48 access_size=58",
2740 .result = REJECT,
2741 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
2742 },
2743 {
2744 "constant register |= constant register should keep constant type",
2745 .insns = {
2746 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
2748 BPF_MOV64_IMM(BPF_REG_2, 34),
2749 BPF_MOV64_IMM(BPF_REG_4, 13),
2750 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
2751 BPF_MOV64_IMM(BPF_REG_3, 0),
2752 BPF_EMIT_CALL(BPF_FUNC_probe_read),
2753 BPF_EXIT_INSN(),
2754 },
2755 .result = ACCEPT,
2756 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
2757 },
2758 {
2759 "constant register |= constant register should not bypass stack boundary checks",
2760 .insns = {
2761 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
2763 BPF_MOV64_IMM(BPF_REG_2, 34),
2764 BPF_MOV64_IMM(BPF_REG_4, 24),
2765 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
2766 BPF_MOV64_IMM(BPF_REG_3, 0),
2767 BPF_EMIT_CALL(BPF_FUNC_probe_read),
2768 BPF_EXIT_INSN(),
2769 },
2770 .errstr = "invalid stack type R1 off=-48 access_size=58",
2771 .result = REJECT,
2772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
2773 },
2774 {
2775 "invalid direct packet write for LWT_IN",
2776 .insns = {
2777 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2778 offsetof(struct __sk_buff, data)),
2779 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2780 offsetof(struct __sk_buff, data_end)),
2781 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2782 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2783 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2784 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2785 BPF_MOV64_IMM(BPF_REG_0, 0),
2786 BPF_EXIT_INSN(),
2787 },
2788 .errstr = "cannot write into packet",
2789 .result = REJECT,
2790 .prog_type = BPF_PROG_TYPE_LWT_IN,
2791 },
2792 {
2793 "invalid direct packet write for LWT_OUT",
2794 .insns = {
2795 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2796 offsetof(struct __sk_buff, data)),
2797 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2798 offsetof(struct __sk_buff, data_end)),
2799 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2800 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2801 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2802 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2803 BPF_MOV64_IMM(BPF_REG_0, 0),
2804 BPF_EXIT_INSN(),
2805 },
2806 .errstr = "cannot write into packet",
2807 .result = REJECT,
2808 .prog_type = BPF_PROG_TYPE_LWT_OUT,
2809 },
2810 {
2811 "direct packet write for LWT_XMIT",
2812 .insns = {
2813 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2814 offsetof(struct __sk_buff, data)),
2815 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2816 offsetof(struct __sk_buff, data_end)),
2817 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2819 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2820 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2821 BPF_MOV64_IMM(BPF_REG_0, 0),
2822 BPF_EXIT_INSN(),
2823 },
2824 .result = ACCEPT,
2825 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
2826 },
2827 {
2828 "direct packet read for LWT_IN",
2829 .insns = {
2830 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2831 offsetof(struct __sk_buff, data)),
2832 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2833 offsetof(struct __sk_buff, data_end)),
2834 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2836 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2837 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2838 BPF_MOV64_IMM(BPF_REG_0, 0),
2839 BPF_EXIT_INSN(),
2840 },
2841 .result = ACCEPT,
2842 .prog_type = BPF_PROG_TYPE_LWT_IN,
2843 },
2844 {
2845 "direct packet read for LWT_OUT",
2846 .insns = {
2847 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2848 offsetof(struct __sk_buff, data)),
2849 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2850 offsetof(struct __sk_buff, data_end)),
2851 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2852 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2853 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2854 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2855 BPF_MOV64_IMM(BPF_REG_0, 0),
2856 BPF_EXIT_INSN(),
2857 },
2858 .result = ACCEPT,
2859 .prog_type = BPF_PROG_TYPE_LWT_OUT,
2860 },
2861 {
2862 "direct packet read for LWT_XMIT",
2863 .insns = {
2864 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2865 offsetof(struct __sk_buff, data)),
2866 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2867 offsetof(struct __sk_buff, data_end)),
2868 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2870 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2871 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2872 BPF_MOV64_IMM(BPF_REG_0, 0),
2873 BPF_EXIT_INSN(),
2874 },
2875 .result = ACCEPT,
2876 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
2877 },
2878 {
2879 "invalid access of tc_classid for LWT_IN",
2880 .insns = {
2881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2882 offsetof(struct __sk_buff, tc_classid)),
2883 BPF_EXIT_INSN(),
2884 },
2885 .result = REJECT,
2886 .errstr = "invalid bpf_context access",
2887 },
2888 {
2889 "invalid access of tc_classid for LWT_OUT",
2890 .insns = {
2891 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2892 offsetof(struct __sk_buff, tc_classid)),
2893 BPF_EXIT_INSN(),
2894 },
2895 .result = REJECT,
2896 .errstr = "invalid bpf_context access",
2897 },
2898 {
2899 "invalid access of tc_classid for LWT_XMIT",
2900 .insns = {
2901 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2902 offsetof(struct __sk_buff, tc_classid)),
2903 BPF_EXIT_INSN(),
2904 },
2905 .result = REJECT,
2906 .errstr = "invalid bpf_context access",
2907 },
2908};
2909
2910static int probe_filter_length(const struct bpf_insn *fp)
2911{
2912 int len;
2913
2914 for (len = MAX_INSNS - 1; len > 0; --len)
2915 if (fp[len].code != 0 || fp[len].imm != 0)
2916 break;
2917 return len + 1;
2918}
2919
2920static int create_map(uint32_t size_value, uint32_t max_elem)
2921{
2922 int fd;
2923
2924 fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
2925 size_value, max_elem, BPF_F_NO_PREALLOC);
2926 if (fd < 0)
2927 printf("Failed to create hash map '%s'!\n", strerror(errno));
2928
2929 return fd;
2930}
2931
2932static int create_prog_array(void)
2933{
2934 int fd;
2935
2936 fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
2937 sizeof(int), 4, 0);
2938 if (fd < 0)
2939 printf("Failed to create prog array '%s'!\n", strerror(errno));
2940
2941 return fd;
2942}
2943
2944static char bpf_vlog[32768];
2945
2946static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
2947 int *fd_f1, int *fd_f2, int *fd_f3)
2948{
2949 int *fixup_map1 = test->fixup_map1;
2950 int *fixup_map2 = test->fixup_map2;
2951 int *fixup_prog = test->fixup_prog;
2952
2953 /* Allocating HTs with 1 elem is fine here, since we only test
2954 * for verifier and not do a runtime lookup, so the only thing
2955 * that really matters is value size in this case.
2956 */
2957 if (*fixup_map1) {
2958 *fd_f1 = create_map(sizeof(long long), 1);
2959 do {
2960 prog[*fixup_map1].imm = *fd_f1;
2961 fixup_map1++;
2962 } while (*fixup_map1);
2963 }
2964
2965 if (*fixup_map2) {
2966 *fd_f2 = create_map(sizeof(struct test_val), 1);
2967 do {
2968 prog[*fixup_map2].imm = *fd_f2;
2969 fixup_map2++;
2970 } while (*fixup_map2);
2971 }
2972
2973 if (*fixup_prog) {
2974 *fd_f3 = create_prog_array();
2975 do {
2976 prog[*fixup_prog].imm = *fd_f3;
2977 fixup_prog++;
2978 } while (*fixup_prog);
2979 }
2980}
2981
2982static void do_test_single(struct bpf_test *test, bool unpriv,
2983 int *passes, int *errors)
2984{
2985 struct bpf_insn *prog = test->insns;
2986 int prog_len = probe_filter_length(prog);
2987 int prog_type = test->prog_type;
2988 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
2989 int fd_prog, expected_ret;
2990 const char *expected_err;
2991
2992 do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
2993
2994 fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
2995 prog, prog_len * sizeof(struct bpf_insn),
2996 "GPL", bpf_vlog, sizeof(bpf_vlog));
2997
2998 expected_ret = unpriv && test->result_unpriv != UNDEF ?
2999 test->result_unpriv : test->result;
3000 expected_err = unpriv && test->errstr_unpriv ?
3001 test->errstr_unpriv : test->errstr;
3002 if (expected_ret == ACCEPT) {
3003 if (fd_prog < 0) {
3004 printf("FAIL\nFailed to load prog '%s'!\n",
3005 strerror(errno));
3006 goto fail_log;
3007 }
3008 } else {
3009 if (fd_prog >= 0) {
3010 printf("FAIL\nUnexpected success to load!\n");
3011 goto fail_log;
3012 }
3013 if (!strstr(bpf_vlog, expected_err)) {
3014 printf("FAIL\nUnexpected error message!\n");
3015 goto fail_log;
3016 }
3017 }
3018
3019 (*passes)++;
3020 printf("OK\n");
3021close_fds:
3022 close(fd_prog);
3023 close(fd_f1);
3024 close(fd_f2);
3025 close(fd_f3);
3026 sched_yield();
3027 return;
3028fail_log:
3029 (*errors)++;
3030 printf("%s", bpf_vlog);
3031 goto close_fds;
3032}
3033
3034static int do_test(bool unpriv, unsigned int from, unsigned int to)
3035{
3036 int i, passes = 0, errors = 0;
3037
3038 for (i = from; i < to; i++) {
3039 struct bpf_test *test = &tests[i];
3040
3041 /* Program types that are not supported by non-root we
3042 * skip right away.
3043 */
3044 if (unpriv && test->prog_type)
3045 continue;
3046
3047 printf("#%d %s ", i, test->descr);
3048 do_test_single(test, unpriv, &passes, &errors);
3049 }
3050
3051 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
3052 return errors ? -errors : 0;
3053}
3054
3055int main(int argc, char **argv)
3056{
3057 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
3058 struct rlimit rlim = { 1 << 20, 1 << 20 };
3059 unsigned int from = 0, to = ARRAY_SIZE(tests);
3060 bool unpriv = geteuid() != 0;
3061
3062 if (argc == 3) {
3063 unsigned int l = atoi(argv[argc - 2]);
3064 unsigned int u = atoi(argv[argc - 1]);
3065
3066 if (l < to && u < to) {
3067 from = l;
3068 to = u + 1;
3069 }
3070 } else if (argc == 2) {
3071 unsigned int t = atoi(argv[argc - 1]);
3072
3073 if (t < to) {
3074 from = t;
3075 to = t + 1;
3076 }
3077 }
3078
3079 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
3080 return do_test(unpriv, from, to);
3081}