Loading...
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
6 *
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */
23
24#include <linux/filter.h>
25#include <linux/skbuff.h>
26#include <linux/vmalloc.h>
27#include <linux/random.h>
28#include <linux/moduleloader.h>
29#include <linux/bpf.h>
30#include <linux/frame.h>
31#include <linux/rbtree_latch.h>
32#include <linux/kallsyms.h>
33#include <linux/rcupdate.h>
34
35#include <asm/unaligned.h>
36
37/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70
71 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
77struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78{
79 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80 struct bpf_prog_aux *aux;
81 struct bpf_prog *fp;
82
83 size = round_up(size, PAGE_SIZE);
84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 if (fp == NULL)
86 return NULL;
87
88 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
90 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
95 fp->aux = aux;
96 fp->aux->prog = fp;
97 fp->jit_requested = ebpf_jit_enabled();
98
99 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100
101 return fp;
102}
103EXPORT_SYMBOL_GPL(bpf_prog_alloc);
104
105struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
106 gfp_t gfp_extra_flags)
107{
108 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
109 struct bpf_prog *fp;
110 u32 pages, delta;
111 int ret;
112
113 BUG_ON(fp_old == NULL);
114
115 size = round_up(size, PAGE_SIZE);
116 pages = size / PAGE_SIZE;
117 if (pages <= fp_old->pages)
118 return fp_old;
119
120 delta = pages - fp_old->pages;
121 ret = __bpf_prog_charge(fp_old->aux->user, delta);
122 if (ret)
123 return NULL;
124
125 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
126 if (fp == NULL) {
127 __bpf_prog_uncharge(fp_old->aux->user, delta);
128 } else {
129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
130 fp->pages = pages;
131 fp->aux->prog = fp;
132
133 /* We keep fp->aux from fp_old around in the new
134 * reallocated structure.
135 */
136 fp_old->aux = NULL;
137 __bpf_prog_free(fp_old);
138 }
139
140 return fp;
141}
142
143void __bpf_prog_free(struct bpf_prog *fp)
144{
145 kfree(fp->aux);
146 vfree(fp);
147}
148
149int bpf_prog_calc_tag(struct bpf_prog *fp)
150{
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
152 u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 u32 digest[SHA_DIGEST_WORDS];
154 u32 ws[SHA_WORKSPACE_WORDS];
155 u32 i, bsize, psize, blocks;
156 struct bpf_insn *dst;
157 bool was_ld_map;
158 u8 *raw, *todo;
159 __be32 *result;
160 __be64 *bits;
161
162 raw = vmalloc(raw_size);
163 if (!raw)
164 return -ENOMEM;
165
166 sha_init(digest);
167 memset(ws, 0, sizeof(ws));
168
169 /* We need to take out the map fd for the digest calculation
170 * since they are unstable from user space side.
171 */
172 dst = (void *)raw;
173 for (i = 0, was_ld_map = false; i < fp->len; i++) {
174 dst[i] = fp->insnsi[i];
175 if (!was_ld_map &&
176 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
177 dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
178 was_ld_map = true;
179 dst[i].imm = 0;
180 } else if (was_ld_map &&
181 dst[i].code == 0 &&
182 dst[i].dst_reg == 0 &&
183 dst[i].src_reg == 0 &&
184 dst[i].off == 0) {
185 was_ld_map = false;
186 dst[i].imm = 0;
187 } else {
188 was_ld_map = false;
189 }
190 }
191
192 psize = bpf_prog_insn_size(fp);
193 memset(&raw[psize], 0, raw_size - psize);
194 raw[psize++] = 0x80;
195
196 bsize = round_up(psize, SHA_MESSAGE_BYTES);
197 blocks = bsize / SHA_MESSAGE_BYTES;
198 todo = raw;
199 if (bsize - psize >= sizeof(__be64)) {
200 bits = (__be64 *)(todo + bsize - sizeof(__be64));
201 } else {
202 bits = (__be64 *)(todo + bsize + bits_offset);
203 blocks++;
204 }
205 *bits = cpu_to_be64((psize - 1) << 3);
206
207 while (blocks--) {
208 sha_transform(digest, todo, ws);
209 todo += SHA_MESSAGE_BYTES;
210 }
211
212 result = (__force __be32 *)digest;
213 for (i = 0; i < SHA_DIGEST_WORDS; i++)
214 result[i] = cpu_to_be32(digest[i]);
215 memcpy(fp->tag, result, sizeof(fp->tag));
216
217 vfree(raw);
218 return 0;
219}
220
221static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
222 u32 curr, const bool probe_pass)
223{
224 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
225 s64 imm = insn->imm;
226
227 if (curr < pos && curr + imm + 1 > pos)
228 imm += delta;
229 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
230 imm -= delta;
231 if (imm < imm_min || imm > imm_max)
232 return -ERANGE;
233 if (!probe_pass)
234 insn->imm = imm;
235 return 0;
236}
237
238static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
239 u32 curr, const bool probe_pass)
240{
241 const s32 off_min = S16_MIN, off_max = S16_MAX;
242 s32 off = insn->off;
243
244 if (curr < pos && curr + off + 1 > pos)
245 off += delta;
246 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
247 off -= delta;
248 if (off < off_min || off > off_max)
249 return -ERANGE;
250 if (!probe_pass)
251 insn->off = off;
252 return 0;
253}
254
255static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
256 const bool probe_pass)
257{
258 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
259 struct bpf_insn *insn = prog->insnsi;
260 int ret = 0;
261
262 for (i = 0; i < insn_cnt; i++, insn++) {
263 u8 code;
264
265 /* In the probing pass we still operate on the original,
266 * unpatched image in order to check overflows before we
267 * do any other adjustments. Therefore skip the patchlet.
268 */
269 if (probe_pass && i == pos) {
270 i += delta + 1;
271 insn++;
272 }
273 code = insn->code;
274 if (BPF_CLASS(code) != BPF_JMP ||
275 BPF_OP(code) == BPF_EXIT)
276 continue;
277 /* Adjust offset of jmps if we cross patch boundaries. */
278 if (BPF_OP(code) == BPF_CALL) {
279 if (insn->src_reg != BPF_PSEUDO_CALL)
280 continue;
281 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
282 probe_pass);
283 } else {
284 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
285 probe_pass);
286 }
287 if (ret)
288 break;
289 }
290
291 return ret;
292}
293
294struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
295 const struct bpf_insn *patch, u32 len)
296{
297 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
298 const u32 cnt_max = S16_MAX;
299 struct bpf_prog *prog_adj;
300
301 /* Since our patchlet doesn't expand the image, we're done. */
302 if (insn_delta == 0) {
303 memcpy(prog->insnsi + off, patch, sizeof(*patch));
304 return prog;
305 }
306
307 insn_adj_cnt = prog->len + insn_delta;
308
309 /* Reject anything that would potentially let the insn->off
310 * target overflow when we have excessive program expansions.
311 * We need to probe here before we do any reallocation where
312 * we afterwards may not fail anymore.
313 */
314 if (insn_adj_cnt > cnt_max &&
315 bpf_adj_branches(prog, off, insn_delta, true))
316 return NULL;
317
318 /* Several new instructions need to be inserted. Make room
319 * for them. Likely, there's no need for a new allocation as
320 * last page could have large enough tailroom.
321 */
322 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
323 GFP_USER);
324 if (!prog_adj)
325 return NULL;
326
327 prog_adj->len = insn_adj_cnt;
328
329 /* Patching happens in 3 steps:
330 *
331 * 1) Move over tail of insnsi from next instruction onwards,
332 * so we can patch the single target insn with one or more
333 * new ones (patching is always from 1 to n insns, n > 0).
334 * 2) Inject new instructions at the target location.
335 * 3) Adjust branch offsets if necessary.
336 */
337 insn_rest = insn_adj_cnt - off - len;
338
339 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
340 sizeof(*patch) * insn_rest);
341 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
342
343 /* We are guaranteed to not fail at this point, otherwise
344 * the ship has sailed to reverse to the original state. An
345 * overflow cannot happen at this point.
346 */
347 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
348
349 return prog_adj;
350}
351
352#ifdef CONFIG_BPF_JIT
353/* All BPF JIT sysctl knobs here. */
354int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
355int bpf_jit_harden __read_mostly;
356int bpf_jit_kallsyms __read_mostly;
357
358static __always_inline void
359bpf_get_prog_addr_region(const struct bpf_prog *prog,
360 unsigned long *symbol_start,
361 unsigned long *symbol_end)
362{
363 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
364 unsigned long addr = (unsigned long)hdr;
365
366 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
367
368 *symbol_start = addr;
369 *symbol_end = addr + hdr->pages * PAGE_SIZE;
370}
371
372static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
373{
374 const char *end = sym + KSYM_NAME_LEN;
375
376 BUILD_BUG_ON(sizeof("bpf_prog_") +
377 sizeof(prog->tag) * 2 +
378 /* name has been null terminated.
379 * We should need +1 for the '_' preceding
380 * the name. However, the null character
381 * is double counted between the name and the
382 * sizeof("bpf_prog_") above, so we omit
383 * the +1 here.
384 */
385 sizeof(prog->aux->name) > KSYM_NAME_LEN);
386
387 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
388 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
389 if (prog->aux->name[0])
390 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
391 else
392 *sym = 0;
393}
394
395static __always_inline unsigned long
396bpf_get_prog_addr_start(struct latch_tree_node *n)
397{
398 unsigned long symbol_start, symbol_end;
399 const struct bpf_prog_aux *aux;
400
401 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
402 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
403
404 return symbol_start;
405}
406
407static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
408 struct latch_tree_node *b)
409{
410 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
411}
412
413static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
414{
415 unsigned long val = (unsigned long)key;
416 unsigned long symbol_start, symbol_end;
417 const struct bpf_prog_aux *aux;
418
419 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
420 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
421
422 if (val < symbol_start)
423 return -1;
424 if (val >= symbol_end)
425 return 1;
426
427 return 0;
428}
429
430static const struct latch_tree_ops bpf_tree_ops = {
431 .less = bpf_tree_less,
432 .comp = bpf_tree_comp,
433};
434
435static DEFINE_SPINLOCK(bpf_lock);
436static LIST_HEAD(bpf_kallsyms);
437static struct latch_tree_root bpf_tree __cacheline_aligned;
438
439static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
440{
441 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
442 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
443 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
444}
445
446static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
447{
448 if (list_empty(&aux->ksym_lnode))
449 return;
450
451 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
452 list_del_rcu(&aux->ksym_lnode);
453}
454
455static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
456{
457 return fp->jited && !bpf_prog_was_classic(fp);
458}
459
460static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
461{
462 return list_empty(&fp->aux->ksym_lnode) ||
463 fp->aux->ksym_lnode.prev == LIST_POISON2;
464}
465
466void bpf_prog_kallsyms_add(struct bpf_prog *fp)
467{
468 if (!bpf_prog_kallsyms_candidate(fp) ||
469 !capable(CAP_SYS_ADMIN))
470 return;
471
472 spin_lock_bh(&bpf_lock);
473 bpf_prog_ksym_node_add(fp->aux);
474 spin_unlock_bh(&bpf_lock);
475}
476
477void bpf_prog_kallsyms_del(struct bpf_prog *fp)
478{
479 if (!bpf_prog_kallsyms_candidate(fp))
480 return;
481
482 spin_lock_bh(&bpf_lock);
483 bpf_prog_ksym_node_del(fp->aux);
484 spin_unlock_bh(&bpf_lock);
485}
486
487static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
488{
489 struct latch_tree_node *n;
490
491 if (!bpf_jit_kallsyms_enabled())
492 return NULL;
493
494 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
495 return n ?
496 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
497 NULL;
498}
499
500const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
501 unsigned long *off, char *sym)
502{
503 unsigned long symbol_start, symbol_end;
504 struct bpf_prog *prog;
505 char *ret = NULL;
506
507 rcu_read_lock();
508 prog = bpf_prog_kallsyms_find(addr);
509 if (prog) {
510 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
511 bpf_get_prog_name(prog, sym);
512
513 ret = sym;
514 if (size)
515 *size = symbol_end - symbol_start;
516 if (off)
517 *off = addr - symbol_start;
518 }
519 rcu_read_unlock();
520
521 return ret;
522}
523
524bool is_bpf_text_address(unsigned long addr)
525{
526 bool ret;
527
528 rcu_read_lock();
529 ret = bpf_prog_kallsyms_find(addr) != NULL;
530 rcu_read_unlock();
531
532 return ret;
533}
534
535int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
536 char *sym)
537{
538 unsigned long symbol_start, symbol_end;
539 struct bpf_prog_aux *aux;
540 unsigned int it = 0;
541 int ret = -ERANGE;
542
543 if (!bpf_jit_kallsyms_enabled())
544 return ret;
545
546 rcu_read_lock();
547 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
548 if (it++ != symnum)
549 continue;
550
551 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
552 bpf_get_prog_name(aux->prog, sym);
553
554 *value = symbol_start;
555 *type = BPF_SYM_ELF_TYPE;
556
557 ret = 0;
558 break;
559 }
560 rcu_read_unlock();
561
562 return ret;
563}
564
565struct bpf_binary_header *
566bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
567 unsigned int alignment,
568 bpf_jit_fill_hole_t bpf_fill_ill_insns)
569{
570 struct bpf_binary_header *hdr;
571 unsigned int size, hole, start;
572
573 /* Most of BPF filters are really small, but if some of them
574 * fill a page, allow at least 128 extra bytes to insert a
575 * random section of illegal instructions.
576 */
577 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
578 hdr = module_alloc(size);
579 if (hdr == NULL)
580 return NULL;
581
582 /* Fill space with illegal/arch-dep instructions. */
583 bpf_fill_ill_insns(hdr, size);
584
585 hdr->pages = size / PAGE_SIZE;
586 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
587 PAGE_SIZE - sizeof(*hdr));
588 start = (get_random_int() % hole) & ~(alignment - 1);
589
590 /* Leave a random number of instructions before BPF code. */
591 *image_ptr = &hdr->image[start];
592
593 return hdr;
594}
595
596void bpf_jit_binary_free(struct bpf_binary_header *hdr)
597{
598 module_memfree(hdr);
599}
600
601/* This symbol is only overridden by archs that have different
602 * requirements than the usual eBPF JITs, f.e. when they only
603 * implement cBPF JIT, do not set images read-only, etc.
604 */
605void __weak bpf_jit_free(struct bpf_prog *fp)
606{
607 if (fp->jited) {
608 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
609
610 bpf_jit_binary_unlock_ro(hdr);
611 bpf_jit_binary_free(hdr);
612
613 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
614 }
615
616 bpf_prog_unlock_free(fp);
617}
618
619static int bpf_jit_blind_insn(const struct bpf_insn *from,
620 const struct bpf_insn *aux,
621 struct bpf_insn *to_buff)
622{
623 struct bpf_insn *to = to_buff;
624 u32 imm_rnd = get_random_int();
625 s16 off;
626
627 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
628 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
629
630 if (from->imm == 0 &&
631 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
632 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
633 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
634 goto out;
635 }
636
637 switch (from->code) {
638 case BPF_ALU | BPF_ADD | BPF_K:
639 case BPF_ALU | BPF_SUB | BPF_K:
640 case BPF_ALU | BPF_AND | BPF_K:
641 case BPF_ALU | BPF_OR | BPF_K:
642 case BPF_ALU | BPF_XOR | BPF_K:
643 case BPF_ALU | BPF_MUL | BPF_K:
644 case BPF_ALU | BPF_MOV | BPF_K:
645 case BPF_ALU | BPF_DIV | BPF_K:
646 case BPF_ALU | BPF_MOD | BPF_K:
647 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
648 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
649 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
650 break;
651
652 case BPF_ALU64 | BPF_ADD | BPF_K:
653 case BPF_ALU64 | BPF_SUB | BPF_K:
654 case BPF_ALU64 | BPF_AND | BPF_K:
655 case BPF_ALU64 | BPF_OR | BPF_K:
656 case BPF_ALU64 | BPF_XOR | BPF_K:
657 case BPF_ALU64 | BPF_MUL | BPF_K:
658 case BPF_ALU64 | BPF_MOV | BPF_K:
659 case BPF_ALU64 | BPF_DIV | BPF_K:
660 case BPF_ALU64 | BPF_MOD | BPF_K:
661 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
662 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
663 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
664 break;
665
666 case BPF_JMP | BPF_JEQ | BPF_K:
667 case BPF_JMP | BPF_JNE | BPF_K:
668 case BPF_JMP | BPF_JGT | BPF_K:
669 case BPF_JMP | BPF_JLT | BPF_K:
670 case BPF_JMP | BPF_JGE | BPF_K:
671 case BPF_JMP | BPF_JLE | BPF_K:
672 case BPF_JMP | BPF_JSGT | BPF_K:
673 case BPF_JMP | BPF_JSLT | BPF_K:
674 case BPF_JMP | BPF_JSGE | BPF_K:
675 case BPF_JMP | BPF_JSLE | BPF_K:
676 case BPF_JMP | BPF_JSET | BPF_K:
677 /* Accommodate for extra offset in case of a backjump. */
678 off = from->off;
679 if (off < 0)
680 off -= 2;
681 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
682 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
683 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
684 break;
685
686 case BPF_LD | BPF_ABS | BPF_W:
687 case BPF_LD | BPF_ABS | BPF_H:
688 case BPF_LD | BPF_ABS | BPF_B:
689 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
690 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
691 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
692 break;
693
694 case BPF_LD | BPF_IND | BPF_W:
695 case BPF_LD | BPF_IND | BPF_H:
696 case BPF_LD | BPF_IND | BPF_B:
697 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
698 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
699 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
700 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
701 break;
702
703 case BPF_LD | BPF_IMM | BPF_DW:
704 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
705 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
706 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
707 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
708 break;
709 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
710 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
711 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
712 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
713 break;
714
715 case BPF_ST | BPF_MEM | BPF_DW:
716 case BPF_ST | BPF_MEM | BPF_W:
717 case BPF_ST | BPF_MEM | BPF_H:
718 case BPF_ST | BPF_MEM | BPF_B:
719 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
720 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
721 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
722 break;
723 }
724out:
725 return to - to_buff;
726}
727
728static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
729 gfp_t gfp_extra_flags)
730{
731 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
732 struct bpf_prog *fp;
733
734 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
735 if (fp != NULL) {
736 /* aux->prog still points to the fp_other one, so
737 * when promoting the clone to the real program,
738 * this still needs to be adapted.
739 */
740 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
741 }
742
743 return fp;
744}
745
746static void bpf_prog_clone_free(struct bpf_prog *fp)
747{
748 /* aux was stolen by the other clone, so we cannot free
749 * it from this path! It will be freed eventually by the
750 * other program on release.
751 *
752 * At this point, we don't need a deferred release since
753 * clone is guaranteed to not be locked.
754 */
755 fp->aux = NULL;
756 __bpf_prog_free(fp);
757}
758
759void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
760{
761 /* We have to repoint aux->prog to self, as we don't
762 * know whether fp here is the clone or the original.
763 */
764 fp->aux->prog = fp;
765 bpf_prog_clone_free(fp_other);
766}
767
768struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
769{
770 struct bpf_insn insn_buff[16], aux[2];
771 struct bpf_prog *clone, *tmp;
772 int insn_delta, insn_cnt;
773 struct bpf_insn *insn;
774 int i, rewritten;
775
776 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
777 return prog;
778
779 clone = bpf_prog_clone_create(prog, GFP_USER);
780 if (!clone)
781 return ERR_PTR(-ENOMEM);
782
783 insn_cnt = clone->len;
784 insn = clone->insnsi;
785
786 for (i = 0; i < insn_cnt; i++, insn++) {
787 /* We temporarily need to hold the original ld64 insn
788 * so that we can still access the first part in the
789 * second blinding run.
790 */
791 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
792 insn[1].code == 0)
793 memcpy(aux, insn, sizeof(aux));
794
795 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
796 if (!rewritten)
797 continue;
798
799 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
800 if (!tmp) {
801 /* Patching may have repointed aux->prog during
802 * realloc from the original one, so we need to
803 * fix it up here on error.
804 */
805 bpf_jit_prog_release_other(prog, clone);
806 return ERR_PTR(-ENOMEM);
807 }
808
809 clone = tmp;
810 insn_delta = rewritten - 1;
811
812 /* Walk new program and skip insns we just inserted. */
813 insn = clone->insnsi + i + insn_delta;
814 insn_cnt += insn_delta;
815 i += insn_delta;
816 }
817
818 clone->blinded = 1;
819 return clone;
820}
821#endif /* CONFIG_BPF_JIT */
822
823/* Base function for offset calculation. Needs to go into .text section,
824 * therefore keeping it non-static as well; will also be used by JITs
825 * anyway later on, so do not let the compiler omit it. This also needs
826 * to go into kallsyms for correlation from e.g. bpftool, so naming
827 * must not change.
828 */
829noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
830{
831 return 0;
832}
833EXPORT_SYMBOL_GPL(__bpf_call_base);
834
835/* All UAPI available opcodes. */
836#define BPF_INSN_MAP(INSN_2, INSN_3) \
837 /* 32 bit ALU operations. */ \
838 /* Register based. */ \
839 INSN_3(ALU, ADD, X), \
840 INSN_3(ALU, SUB, X), \
841 INSN_3(ALU, AND, X), \
842 INSN_3(ALU, OR, X), \
843 INSN_3(ALU, LSH, X), \
844 INSN_3(ALU, RSH, X), \
845 INSN_3(ALU, XOR, X), \
846 INSN_3(ALU, MUL, X), \
847 INSN_3(ALU, MOV, X), \
848 INSN_3(ALU, DIV, X), \
849 INSN_3(ALU, MOD, X), \
850 INSN_2(ALU, NEG), \
851 INSN_3(ALU, END, TO_BE), \
852 INSN_3(ALU, END, TO_LE), \
853 /* Immediate based. */ \
854 INSN_3(ALU, ADD, K), \
855 INSN_3(ALU, SUB, K), \
856 INSN_3(ALU, AND, K), \
857 INSN_3(ALU, OR, K), \
858 INSN_3(ALU, LSH, K), \
859 INSN_3(ALU, RSH, K), \
860 INSN_3(ALU, XOR, K), \
861 INSN_3(ALU, MUL, K), \
862 INSN_3(ALU, MOV, K), \
863 INSN_3(ALU, DIV, K), \
864 INSN_3(ALU, MOD, K), \
865 /* 64 bit ALU operations. */ \
866 /* Register based. */ \
867 INSN_3(ALU64, ADD, X), \
868 INSN_3(ALU64, SUB, X), \
869 INSN_3(ALU64, AND, X), \
870 INSN_3(ALU64, OR, X), \
871 INSN_3(ALU64, LSH, X), \
872 INSN_3(ALU64, RSH, X), \
873 INSN_3(ALU64, XOR, X), \
874 INSN_3(ALU64, MUL, X), \
875 INSN_3(ALU64, MOV, X), \
876 INSN_3(ALU64, ARSH, X), \
877 INSN_3(ALU64, DIV, X), \
878 INSN_3(ALU64, MOD, X), \
879 INSN_2(ALU64, NEG), \
880 /* Immediate based. */ \
881 INSN_3(ALU64, ADD, K), \
882 INSN_3(ALU64, SUB, K), \
883 INSN_3(ALU64, AND, K), \
884 INSN_3(ALU64, OR, K), \
885 INSN_3(ALU64, LSH, K), \
886 INSN_3(ALU64, RSH, K), \
887 INSN_3(ALU64, XOR, K), \
888 INSN_3(ALU64, MUL, K), \
889 INSN_3(ALU64, MOV, K), \
890 INSN_3(ALU64, ARSH, K), \
891 INSN_3(ALU64, DIV, K), \
892 INSN_3(ALU64, MOD, K), \
893 /* Call instruction. */ \
894 INSN_2(JMP, CALL), \
895 /* Exit instruction. */ \
896 INSN_2(JMP, EXIT), \
897 /* Jump instructions. */ \
898 /* Register based. */ \
899 INSN_3(JMP, JEQ, X), \
900 INSN_3(JMP, JNE, X), \
901 INSN_3(JMP, JGT, X), \
902 INSN_3(JMP, JLT, X), \
903 INSN_3(JMP, JGE, X), \
904 INSN_3(JMP, JLE, X), \
905 INSN_3(JMP, JSGT, X), \
906 INSN_3(JMP, JSLT, X), \
907 INSN_3(JMP, JSGE, X), \
908 INSN_3(JMP, JSLE, X), \
909 INSN_3(JMP, JSET, X), \
910 /* Immediate based. */ \
911 INSN_3(JMP, JEQ, K), \
912 INSN_3(JMP, JNE, K), \
913 INSN_3(JMP, JGT, K), \
914 INSN_3(JMP, JLT, K), \
915 INSN_3(JMP, JGE, K), \
916 INSN_3(JMP, JLE, K), \
917 INSN_3(JMP, JSGT, K), \
918 INSN_3(JMP, JSLT, K), \
919 INSN_3(JMP, JSGE, K), \
920 INSN_3(JMP, JSLE, K), \
921 INSN_3(JMP, JSET, K), \
922 INSN_2(JMP, JA), \
923 /* Store instructions. */ \
924 /* Register based. */ \
925 INSN_3(STX, MEM, B), \
926 INSN_3(STX, MEM, H), \
927 INSN_3(STX, MEM, W), \
928 INSN_3(STX, MEM, DW), \
929 INSN_3(STX, XADD, W), \
930 INSN_3(STX, XADD, DW), \
931 /* Immediate based. */ \
932 INSN_3(ST, MEM, B), \
933 INSN_3(ST, MEM, H), \
934 INSN_3(ST, MEM, W), \
935 INSN_3(ST, MEM, DW), \
936 /* Load instructions. */ \
937 /* Register based. */ \
938 INSN_3(LDX, MEM, B), \
939 INSN_3(LDX, MEM, H), \
940 INSN_3(LDX, MEM, W), \
941 INSN_3(LDX, MEM, DW), \
942 /* Immediate based. */ \
943 INSN_3(LD, IMM, DW), \
944 /* Misc (old cBPF carry-over). */ \
945 INSN_3(LD, ABS, B), \
946 INSN_3(LD, ABS, H), \
947 INSN_3(LD, ABS, W), \
948 INSN_3(LD, IND, B), \
949 INSN_3(LD, IND, H), \
950 INSN_3(LD, IND, W)
951
952bool bpf_opcode_in_insntable(u8 code)
953{
954#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
955#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
956 static const bool public_insntable[256] = {
957 [0 ... 255] = false,
958 /* Now overwrite non-defaults ... */
959 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
960 };
961#undef BPF_INSN_3_TBL
962#undef BPF_INSN_2_TBL
963 return public_insntable[code];
964}
965
966#ifndef CONFIG_BPF_JIT_ALWAYS_ON
967/**
968 * __bpf_prog_run - run eBPF program on a given context
969 * @ctx: is the data we are operating on
970 * @insn: is the array of eBPF instructions
971 *
972 * Decode and execute eBPF instructions.
973 */
974static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
975{
976 u64 tmp;
977#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
978#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
979 static const void *jumptable[256] = {
980 [0 ... 255] = &&default_label,
981 /* Now overwrite non-defaults ... */
982 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
983 /* Non-UAPI available opcodes. */
984 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
985 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
986 };
987#undef BPF_INSN_3_LBL
988#undef BPF_INSN_2_LBL
989 u32 tail_call_cnt = 0;
990 void *ptr;
991 int off;
992
993#define CONT ({ insn++; goto select_insn; })
994#define CONT_JMP ({ insn++; goto select_insn; })
995
996select_insn:
997 goto *jumptable[insn->code];
998
999 /* ALU */
1000#define ALU(OPCODE, OP) \
1001 ALU64_##OPCODE##_X: \
1002 DST = DST OP SRC; \
1003 CONT; \
1004 ALU_##OPCODE##_X: \
1005 DST = (u32) DST OP (u32) SRC; \
1006 CONT; \
1007 ALU64_##OPCODE##_K: \
1008 DST = DST OP IMM; \
1009 CONT; \
1010 ALU_##OPCODE##_K: \
1011 DST = (u32) DST OP (u32) IMM; \
1012 CONT;
1013
1014 ALU(ADD, +)
1015 ALU(SUB, -)
1016 ALU(AND, &)
1017 ALU(OR, |)
1018 ALU(LSH, <<)
1019 ALU(RSH, >>)
1020 ALU(XOR, ^)
1021 ALU(MUL, *)
1022#undef ALU
1023 ALU_NEG:
1024 DST = (u32) -DST;
1025 CONT;
1026 ALU64_NEG:
1027 DST = -DST;
1028 CONT;
1029 ALU_MOV_X:
1030 DST = (u32) SRC;
1031 CONT;
1032 ALU_MOV_K:
1033 DST = (u32) IMM;
1034 CONT;
1035 ALU64_MOV_X:
1036 DST = SRC;
1037 CONT;
1038 ALU64_MOV_K:
1039 DST = IMM;
1040 CONT;
1041 LD_IMM_DW:
1042 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1043 insn++;
1044 CONT;
1045 ALU64_ARSH_X:
1046 (*(s64 *) &DST) >>= SRC;
1047 CONT;
1048 ALU64_ARSH_K:
1049 (*(s64 *) &DST) >>= IMM;
1050 CONT;
1051 ALU64_MOD_X:
1052 div64_u64_rem(DST, SRC, &tmp);
1053 DST = tmp;
1054 CONT;
1055 ALU_MOD_X:
1056 tmp = (u32) DST;
1057 DST = do_div(tmp, (u32) SRC);
1058 CONT;
1059 ALU64_MOD_K:
1060 div64_u64_rem(DST, IMM, &tmp);
1061 DST = tmp;
1062 CONT;
1063 ALU_MOD_K:
1064 tmp = (u32) DST;
1065 DST = do_div(tmp, (u32) IMM);
1066 CONT;
1067 ALU64_DIV_X:
1068 DST = div64_u64(DST, SRC);
1069 CONT;
1070 ALU_DIV_X:
1071 tmp = (u32) DST;
1072 do_div(tmp, (u32) SRC);
1073 DST = (u32) tmp;
1074 CONT;
1075 ALU64_DIV_K:
1076 DST = div64_u64(DST, IMM);
1077 CONT;
1078 ALU_DIV_K:
1079 tmp = (u32) DST;
1080 do_div(tmp, (u32) IMM);
1081 DST = (u32) tmp;
1082 CONT;
1083 ALU_END_TO_BE:
1084 switch (IMM) {
1085 case 16:
1086 DST = (__force u16) cpu_to_be16(DST);
1087 break;
1088 case 32:
1089 DST = (__force u32) cpu_to_be32(DST);
1090 break;
1091 case 64:
1092 DST = (__force u64) cpu_to_be64(DST);
1093 break;
1094 }
1095 CONT;
1096 ALU_END_TO_LE:
1097 switch (IMM) {
1098 case 16:
1099 DST = (__force u16) cpu_to_le16(DST);
1100 break;
1101 case 32:
1102 DST = (__force u32) cpu_to_le32(DST);
1103 break;
1104 case 64:
1105 DST = (__force u64) cpu_to_le64(DST);
1106 break;
1107 }
1108 CONT;
1109
1110 /* CALL */
1111 JMP_CALL:
1112 /* Function call scratches BPF_R1-BPF_R5 registers,
1113 * preserves BPF_R6-BPF_R9, and stores return value
1114 * into BPF_R0.
1115 */
1116 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1117 BPF_R4, BPF_R5);
1118 CONT;
1119
1120 JMP_CALL_ARGS:
1121 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1122 BPF_R3, BPF_R4,
1123 BPF_R5,
1124 insn + insn->off + 1);
1125 CONT;
1126
1127 JMP_TAIL_CALL: {
1128 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1129 struct bpf_array *array = container_of(map, struct bpf_array, map);
1130 struct bpf_prog *prog;
1131 u32 index = BPF_R3;
1132
1133 if (unlikely(index >= array->map.max_entries))
1134 goto out;
1135 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1136 goto out;
1137
1138 tail_call_cnt++;
1139
1140 prog = READ_ONCE(array->ptrs[index]);
1141 if (!prog)
1142 goto out;
1143
1144 /* ARG1 at this point is guaranteed to point to CTX from
1145 * the verifier side due to the fact that the tail call is
1146 * handeled like a helper, that is, bpf_tail_call_proto,
1147 * where arg1_type is ARG_PTR_TO_CTX.
1148 */
1149 insn = prog->insnsi;
1150 goto select_insn;
1151out:
1152 CONT;
1153 }
1154 /* JMP */
1155 JMP_JA:
1156 insn += insn->off;
1157 CONT;
1158 JMP_JEQ_X:
1159 if (DST == SRC) {
1160 insn += insn->off;
1161 CONT_JMP;
1162 }
1163 CONT;
1164 JMP_JEQ_K:
1165 if (DST == IMM) {
1166 insn += insn->off;
1167 CONT_JMP;
1168 }
1169 CONT;
1170 JMP_JNE_X:
1171 if (DST != SRC) {
1172 insn += insn->off;
1173 CONT_JMP;
1174 }
1175 CONT;
1176 JMP_JNE_K:
1177 if (DST != IMM) {
1178 insn += insn->off;
1179 CONT_JMP;
1180 }
1181 CONT;
1182 JMP_JGT_X:
1183 if (DST > SRC) {
1184 insn += insn->off;
1185 CONT_JMP;
1186 }
1187 CONT;
1188 JMP_JGT_K:
1189 if (DST > IMM) {
1190 insn += insn->off;
1191 CONT_JMP;
1192 }
1193 CONT;
1194 JMP_JLT_X:
1195 if (DST < SRC) {
1196 insn += insn->off;
1197 CONT_JMP;
1198 }
1199 CONT;
1200 JMP_JLT_K:
1201 if (DST < IMM) {
1202 insn += insn->off;
1203 CONT_JMP;
1204 }
1205 CONT;
1206 JMP_JGE_X:
1207 if (DST >= SRC) {
1208 insn += insn->off;
1209 CONT_JMP;
1210 }
1211 CONT;
1212 JMP_JGE_K:
1213 if (DST >= IMM) {
1214 insn += insn->off;
1215 CONT_JMP;
1216 }
1217 CONT;
1218 JMP_JLE_X:
1219 if (DST <= SRC) {
1220 insn += insn->off;
1221 CONT_JMP;
1222 }
1223 CONT;
1224 JMP_JLE_K:
1225 if (DST <= IMM) {
1226 insn += insn->off;
1227 CONT_JMP;
1228 }
1229 CONT;
1230 JMP_JSGT_X:
1231 if (((s64) DST) > ((s64) SRC)) {
1232 insn += insn->off;
1233 CONT_JMP;
1234 }
1235 CONT;
1236 JMP_JSGT_K:
1237 if (((s64) DST) > ((s64) IMM)) {
1238 insn += insn->off;
1239 CONT_JMP;
1240 }
1241 CONT;
1242 JMP_JSLT_X:
1243 if (((s64) DST) < ((s64) SRC)) {
1244 insn += insn->off;
1245 CONT_JMP;
1246 }
1247 CONT;
1248 JMP_JSLT_K:
1249 if (((s64) DST) < ((s64) IMM)) {
1250 insn += insn->off;
1251 CONT_JMP;
1252 }
1253 CONT;
1254 JMP_JSGE_X:
1255 if (((s64) DST) >= ((s64) SRC)) {
1256 insn += insn->off;
1257 CONT_JMP;
1258 }
1259 CONT;
1260 JMP_JSGE_K:
1261 if (((s64) DST) >= ((s64) IMM)) {
1262 insn += insn->off;
1263 CONT_JMP;
1264 }
1265 CONT;
1266 JMP_JSLE_X:
1267 if (((s64) DST) <= ((s64) SRC)) {
1268 insn += insn->off;
1269 CONT_JMP;
1270 }
1271 CONT;
1272 JMP_JSLE_K:
1273 if (((s64) DST) <= ((s64) IMM)) {
1274 insn += insn->off;
1275 CONT_JMP;
1276 }
1277 CONT;
1278 JMP_JSET_X:
1279 if (DST & SRC) {
1280 insn += insn->off;
1281 CONT_JMP;
1282 }
1283 CONT;
1284 JMP_JSET_K:
1285 if (DST & IMM) {
1286 insn += insn->off;
1287 CONT_JMP;
1288 }
1289 CONT;
1290 JMP_EXIT:
1291 return BPF_R0;
1292
1293 /* STX and ST and LDX*/
1294#define LDST(SIZEOP, SIZE) \
1295 STX_MEM_##SIZEOP: \
1296 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1297 CONT; \
1298 ST_MEM_##SIZEOP: \
1299 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1300 CONT; \
1301 LDX_MEM_##SIZEOP: \
1302 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1303 CONT;
1304
1305 LDST(B, u8)
1306 LDST(H, u16)
1307 LDST(W, u32)
1308 LDST(DW, u64)
1309#undef LDST
1310 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1311 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1312 (DST + insn->off));
1313 CONT;
1314 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1315 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1316 (DST + insn->off));
1317 CONT;
1318 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1319 off = IMM;
1320load_word:
1321 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1322 * appearing in the programs where ctx == skb
1323 * (see may_access_skb() in the verifier). All programs
1324 * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1325 * bpf_convert_filter() saves it in BPF_R6, internal BPF
1326 * verifier will check that BPF_R6 == ctx.
1327 *
1328 * BPF_ABS and BPF_IND are wrappers of function calls,
1329 * so they scratch BPF_R1-BPF_R5 registers, preserve
1330 * BPF_R6-BPF_R9, and store return value into BPF_R0.
1331 *
1332 * Implicit input:
1333 * ctx == skb == BPF_R6 == CTX
1334 *
1335 * Explicit input:
1336 * SRC == any register
1337 * IMM == 32-bit immediate
1338 *
1339 * Output:
1340 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1341 */
1342
1343 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1344 if (likely(ptr != NULL)) {
1345 BPF_R0 = get_unaligned_be32(ptr);
1346 CONT;
1347 }
1348
1349 return 0;
1350 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1351 off = IMM;
1352load_half:
1353 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1354 if (likely(ptr != NULL)) {
1355 BPF_R0 = get_unaligned_be16(ptr);
1356 CONT;
1357 }
1358
1359 return 0;
1360 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1361 off = IMM;
1362load_byte:
1363 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1364 if (likely(ptr != NULL)) {
1365 BPF_R0 = *(u8 *)ptr;
1366 CONT;
1367 }
1368
1369 return 0;
1370 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1371 off = IMM + SRC;
1372 goto load_word;
1373 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1374 off = IMM + SRC;
1375 goto load_half;
1376 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1377 off = IMM + SRC;
1378 goto load_byte;
1379
1380 default_label:
1381 /* If we ever reach this, we have a bug somewhere. Die hard here
1382 * instead of just returning 0; we could be somewhere in a subprog,
1383 * so execution could continue otherwise which we do /not/ want.
1384 *
1385 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1386 */
1387 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1388 BUG_ON(1);
1389 return 0;
1390}
1391STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1392
1393#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1394#define DEFINE_BPF_PROG_RUN(stack_size) \
1395static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1396{ \
1397 u64 stack[stack_size / sizeof(u64)]; \
1398 u64 regs[MAX_BPF_REG]; \
1399\
1400 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1401 ARG1 = (u64) (unsigned long) ctx; \
1402 return ___bpf_prog_run(regs, insn, stack); \
1403}
1404
1405#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1406#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1407static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1408 const struct bpf_insn *insn) \
1409{ \
1410 u64 stack[stack_size / sizeof(u64)]; \
1411 u64 regs[MAX_BPF_REG]; \
1412\
1413 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1414 BPF_R1 = r1; \
1415 BPF_R2 = r2; \
1416 BPF_R3 = r3; \
1417 BPF_R4 = r4; \
1418 BPF_R5 = r5; \
1419 return ___bpf_prog_run(regs, insn, stack); \
1420}
1421
1422#define EVAL1(FN, X) FN(X)
1423#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1424#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1425#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1426#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1427#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1428
1429EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1430EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1431EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1432
1433EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1434EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1435EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1436
1437#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1438
1439static unsigned int (*interpreters[])(const void *ctx,
1440 const struct bpf_insn *insn) = {
1441EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1442EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1443EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1444};
1445#undef PROG_NAME_LIST
1446#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1447static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1448 const struct bpf_insn *insn) = {
1449EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1450EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1451EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1452};
1453#undef PROG_NAME_LIST
1454
1455void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1456{
1457 stack_depth = max_t(u32, stack_depth, 1);
1458 insn->off = (s16) insn->imm;
1459 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1460 __bpf_call_base_args;
1461 insn->code = BPF_JMP | BPF_CALL_ARGS;
1462}
1463
1464#else
1465static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1466 const struct bpf_insn *insn)
1467{
1468 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1469 * is not working properly, so warn about it!
1470 */
1471 WARN_ON_ONCE(1);
1472 return 0;
1473}
1474#endif
1475
1476bool bpf_prog_array_compatible(struct bpf_array *array,
1477 const struct bpf_prog *fp)
1478{
1479 if (fp->kprobe_override)
1480 return false;
1481
1482 if (!array->owner_prog_type) {
1483 /* There's no owner yet where we could check for
1484 * compatibility.
1485 */
1486 array->owner_prog_type = fp->type;
1487 array->owner_jited = fp->jited;
1488
1489 return true;
1490 }
1491
1492 return array->owner_prog_type == fp->type &&
1493 array->owner_jited == fp->jited;
1494}
1495
1496static int bpf_check_tail_call(const struct bpf_prog *fp)
1497{
1498 struct bpf_prog_aux *aux = fp->aux;
1499 int i;
1500
1501 for (i = 0; i < aux->used_map_cnt; i++) {
1502 struct bpf_map *map = aux->used_maps[i];
1503 struct bpf_array *array;
1504
1505 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1506 continue;
1507
1508 array = container_of(map, struct bpf_array, map);
1509 if (!bpf_prog_array_compatible(array, fp))
1510 return -EINVAL;
1511 }
1512
1513 return 0;
1514}
1515
1516/**
1517 * bpf_prog_select_runtime - select exec runtime for BPF program
1518 * @fp: bpf_prog populated with internal BPF program
1519 * @err: pointer to error variable
1520 *
1521 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1522 * The BPF program will be executed via BPF_PROG_RUN() macro.
1523 */
1524struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1525{
1526#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1527 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1528
1529 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1530#else
1531 fp->bpf_func = __bpf_prog_ret0_warn;
1532#endif
1533
1534 /* eBPF JITs can rewrite the program in case constant
1535 * blinding is active. However, in case of error during
1536 * blinding, bpf_int_jit_compile() must always return a
1537 * valid program, which in this case would simply not
1538 * be JITed, but falls back to the interpreter.
1539 */
1540 if (!bpf_prog_is_dev_bound(fp->aux)) {
1541 fp = bpf_int_jit_compile(fp);
1542#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1543 if (!fp->jited) {
1544 *err = -ENOTSUPP;
1545 return fp;
1546 }
1547#endif
1548 } else {
1549 *err = bpf_prog_offload_compile(fp);
1550 if (*err)
1551 return fp;
1552 }
1553 bpf_prog_lock_ro(fp);
1554
1555 /* The tail call compatibility check can only be done at
1556 * this late stage as we need to determine, if we deal
1557 * with JITed or non JITed program concatenations and not
1558 * all eBPF JITs might immediately support all features.
1559 */
1560 *err = bpf_check_tail_call(fp);
1561
1562 return fp;
1563}
1564EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1565
1566static unsigned int __bpf_prog_ret1(const void *ctx,
1567 const struct bpf_insn *insn)
1568{
1569 return 1;
1570}
1571
1572static struct bpf_prog_dummy {
1573 struct bpf_prog prog;
1574} dummy_bpf_prog = {
1575 .prog = {
1576 .bpf_func = __bpf_prog_ret1,
1577 },
1578};
1579
1580/* to avoid allocating empty bpf_prog_array for cgroups that
1581 * don't have bpf program attached use one global 'empty_prog_array'
1582 * It will not be modified the caller of bpf_prog_array_alloc()
1583 * (since caller requested prog_cnt == 0)
1584 * that pointer should be 'freed' by bpf_prog_array_free()
1585 */
1586static struct {
1587 struct bpf_prog_array hdr;
1588 struct bpf_prog *null_prog;
1589} empty_prog_array = {
1590 .null_prog = NULL,
1591};
1592
1593struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1594{
1595 if (prog_cnt)
1596 return kzalloc(sizeof(struct bpf_prog_array) +
1597 sizeof(struct bpf_prog *) * (prog_cnt + 1),
1598 flags);
1599
1600 return &empty_prog_array.hdr;
1601}
1602
1603void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
1604{
1605 if (!progs ||
1606 progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
1607 return;
1608 kfree_rcu(progs, rcu);
1609}
1610
1611int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1612{
1613 struct bpf_prog **prog;
1614 u32 cnt = 0;
1615
1616 rcu_read_lock();
1617 prog = rcu_dereference(progs)->progs;
1618 for (; *prog; prog++)
1619 if (*prog != &dummy_bpf_prog.prog)
1620 cnt++;
1621 rcu_read_unlock();
1622 return cnt;
1623}
1624
1625static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1626 u32 *prog_ids,
1627 u32 request_cnt)
1628{
1629 int i = 0;
1630
1631 for (; *prog; prog++) {
1632 if (*prog == &dummy_bpf_prog.prog)
1633 continue;
1634 prog_ids[i] = (*prog)->aux->id;
1635 if (++i == request_cnt) {
1636 prog++;
1637 break;
1638 }
1639 }
1640
1641 return !!(*prog);
1642}
1643
1644int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1645 __u32 __user *prog_ids, u32 cnt)
1646{
1647 struct bpf_prog **prog;
1648 unsigned long err = 0;
1649 bool nospc;
1650 u32 *ids;
1651
1652 /* users of this function are doing:
1653 * cnt = bpf_prog_array_length();
1654 * if (cnt > 0)
1655 * bpf_prog_array_copy_to_user(..., cnt);
1656 * so below kcalloc doesn't need extra cnt > 0 check, but
1657 * bpf_prog_array_length() releases rcu lock and
1658 * prog array could have been swapped with empty or larger array,
1659 * so always copy 'cnt' prog_ids to the user.
1660 * In a rare race the user will see zero prog_ids
1661 */
1662 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1663 if (!ids)
1664 return -ENOMEM;
1665 rcu_read_lock();
1666 prog = rcu_dereference(progs)->progs;
1667 nospc = bpf_prog_array_copy_core(prog, ids, cnt);
1668 rcu_read_unlock();
1669 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1670 kfree(ids);
1671 if (err)
1672 return -EFAULT;
1673 if (nospc)
1674 return -ENOSPC;
1675 return 0;
1676}
1677
1678void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1679 struct bpf_prog *old_prog)
1680{
1681 struct bpf_prog **prog = progs->progs;
1682
1683 for (; *prog; prog++)
1684 if (*prog == old_prog) {
1685 WRITE_ONCE(*prog, &dummy_bpf_prog.prog);
1686 break;
1687 }
1688}
1689
1690int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1691 struct bpf_prog *exclude_prog,
1692 struct bpf_prog *include_prog,
1693 struct bpf_prog_array **new_array)
1694{
1695 int new_prog_cnt, carry_prog_cnt = 0;
1696 struct bpf_prog **existing_prog;
1697 struct bpf_prog_array *array;
1698 int new_prog_idx = 0;
1699
1700 /* Figure out how many existing progs we need to carry over to
1701 * the new array.
1702 */
1703 if (old_array) {
1704 existing_prog = old_array->progs;
1705 for (; *existing_prog; existing_prog++) {
1706 if (*existing_prog != exclude_prog &&
1707 *existing_prog != &dummy_bpf_prog.prog)
1708 carry_prog_cnt++;
1709 if (*existing_prog == include_prog)
1710 return -EEXIST;
1711 }
1712 }
1713
1714 /* How many progs (not NULL) will be in the new array? */
1715 new_prog_cnt = carry_prog_cnt;
1716 if (include_prog)
1717 new_prog_cnt += 1;
1718
1719 /* Do we have any prog (not NULL) in the new array? */
1720 if (!new_prog_cnt) {
1721 *new_array = NULL;
1722 return 0;
1723 }
1724
1725 /* +1 as the end of prog_array is marked with NULL */
1726 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1727 if (!array)
1728 return -ENOMEM;
1729
1730 /* Fill in the new prog array */
1731 if (carry_prog_cnt) {
1732 existing_prog = old_array->progs;
1733 for (; *existing_prog; existing_prog++)
1734 if (*existing_prog != exclude_prog &&
1735 *existing_prog != &dummy_bpf_prog.prog)
1736 array->progs[new_prog_idx++] = *existing_prog;
1737 }
1738 if (include_prog)
1739 array->progs[new_prog_idx++] = include_prog;
1740 array->progs[new_prog_idx] = NULL;
1741 *new_array = array;
1742 return 0;
1743}
1744
1745int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1746 u32 *prog_ids, u32 request_cnt,
1747 u32 *prog_cnt)
1748{
1749 struct bpf_prog **prog;
1750 u32 cnt = 0;
1751
1752 if (array)
1753 cnt = bpf_prog_array_length(array);
1754
1755 *prog_cnt = cnt;
1756
1757 /* return early if user requested only program count or nothing to copy */
1758 if (!request_cnt || !cnt)
1759 return 0;
1760
1761 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1762 prog = rcu_dereference_check(array, 1)->progs;
1763 return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1764 : 0;
1765}
1766
1767static void bpf_prog_free_deferred(struct work_struct *work)
1768{
1769 struct bpf_prog_aux *aux;
1770 int i;
1771
1772 aux = container_of(work, struct bpf_prog_aux, work);
1773 if (bpf_prog_is_dev_bound(aux))
1774 bpf_prog_offload_destroy(aux->prog);
1775 for (i = 0; i < aux->func_cnt; i++)
1776 bpf_jit_free(aux->func[i]);
1777 if (aux->func_cnt) {
1778 kfree(aux->func);
1779 bpf_prog_unlock_free(aux->prog);
1780 } else {
1781 bpf_jit_free(aux->prog);
1782 }
1783}
1784
1785/* Free internal BPF program */
1786void bpf_prog_free(struct bpf_prog *fp)
1787{
1788 struct bpf_prog_aux *aux = fp->aux;
1789
1790 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1791 schedule_work(&aux->work);
1792}
1793EXPORT_SYMBOL_GPL(bpf_prog_free);
1794
1795/* RNG for unpriviledged user space with separated state from prandom_u32(). */
1796static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1797
1798void bpf_user_rnd_init_once(void)
1799{
1800 prandom_init_once(&bpf_user_rnd_state);
1801}
1802
1803BPF_CALL_0(bpf_user_rnd_u32)
1804{
1805 /* Should someone ever have the rather unwise idea to use some
1806 * of the registers passed into this function, then note that
1807 * this function is called from native eBPF and classic-to-eBPF
1808 * transformations. Register assignments from both sides are
1809 * different, f.e. classic always sets fn(ctx, A, X) here.
1810 */
1811 struct rnd_state *state;
1812 u32 res;
1813
1814 state = &get_cpu_var(bpf_user_rnd_state);
1815 res = prandom_u32_state(state);
1816 put_cpu_var(bpf_user_rnd_state);
1817
1818 return res;
1819}
1820
1821/* Weak definitions of helper functions in case we don't have bpf syscall. */
1822const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1823const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1824const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1825
1826const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1827const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1828const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1829const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1830
1831const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1832const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1833const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1834const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1835
1836const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1837{
1838 return NULL;
1839}
1840
1841u64 __weak
1842bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1843 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1844{
1845 return -ENOTSUPP;
1846}
1847
1848/* Always built-in helper functions. */
1849const struct bpf_func_proto bpf_tail_call_proto = {
1850 .func = NULL,
1851 .gpl_only = false,
1852 .ret_type = RET_VOID,
1853 .arg1_type = ARG_PTR_TO_CTX,
1854 .arg2_type = ARG_CONST_MAP_PTR,
1855 .arg3_type = ARG_ANYTHING,
1856};
1857
1858/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1859 * It is encouraged to implement bpf_int_jit_compile() instead, so that
1860 * eBPF and implicitly also cBPF can get JITed!
1861 */
1862struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1863{
1864 return prog;
1865}
1866
1867/* Stub for JITs that support eBPF. All cBPF code gets transformed into
1868 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1869 */
1870void __weak bpf_jit_compile(struct bpf_prog *prog)
1871{
1872}
1873
1874bool __weak bpf_helper_changes_pkt_data(void *func)
1875{
1876 return false;
1877}
1878
1879/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1880 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1881 */
1882int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1883 int len)
1884{
1885 return -EFAULT;
1886}
1887
1888/* All definitions of tracepoints related to BPF. */
1889#define CREATE_TRACE_POINTS
1890#include <linux/bpf_trace.h>
1891
1892EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1893
1894/* These are only used within the BPF_SYSCALL code */
1895#ifdef CONFIG_BPF_SYSCALL
1896EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1897EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
1898#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20#include <uapi/linux/btf.h>
21#include <linux/filter.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/random.h>
25#include <linux/moduleloader.h>
26#include <linux/bpf.h>
27#include <linux/btf.h>
28#include <linux/objtool.h>
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
32#include <linux/perf_event.h>
33#include <linux/extable.h>
34#include <linux/log2.h>
35
36#include <asm/barrier.h>
37#include <asm/unaligned.h>
38
39/* Registers */
40#define BPF_R0 regs[BPF_REG_0]
41#define BPF_R1 regs[BPF_REG_1]
42#define BPF_R2 regs[BPF_REG_2]
43#define BPF_R3 regs[BPF_REG_3]
44#define BPF_R4 regs[BPF_REG_4]
45#define BPF_R5 regs[BPF_REG_5]
46#define BPF_R6 regs[BPF_REG_6]
47#define BPF_R7 regs[BPF_REG_7]
48#define BPF_R8 regs[BPF_REG_8]
49#define BPF_R9 regs[BPF_REG_9]
50#define BPF_R10 regs[BPF_REG_10]
51
52/* Named registers */
53#define DST regs[insn->dst_reg]
54#define SRC regs[insn->src_reg]
55#define FP regs[BPF_REG_FP]
56#define AX regs[BPF_REG_AX]
57#define ARG1 regs[BPF_REG_ARG1]
58#define CTX regs[BPF_REG_CTX]
59#define IMM insn->imm
60
61/* No hurry in this branch
62 *
63 * Exported for the bpf jit load helper.
64 */
65void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
66{
67 u8 *ptr = NULL;
68
69 if (k >= SKF_NET_OFF)
70 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
71 else if (k >= SKF_LL_OFF)
72 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
73
74 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
75 return ptr;
76
77 return NULL;
78}
79
80struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
81{
82 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
83 struct bpf_prog_aux *aux;
84 struct bpf_prog *fp;
85
86 size = round_up(size, PAGE_SIZE);
87 fp = __vmalloc(size, gfp_flags);
88 if (fp == NULL)
89 return NULL;
90
91 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
92 if (aux == NULL) {
93 vfree(fp);
94 return NULL;
95 }
96 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
97 if (!fp->active) {
98 vfree(fp);
99 kfree(aux);
100 return NULL;
101 }
102
103 fp->pages = size / PAGE_SIZE;
104 fp->aux = aux;
105 fp->aux->prog = fp;
106 fp->jit_requested = ebpf_jit_enabled();
107
108 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
109 mutex_init(&fp->aux->used_maps_mutex);
110 mutex_init(&fp->aux->dst_mutex);
111
112 return fp;
113}
114
115struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
116{
117 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
118 struct bpf_prog *prog;
119 int cpu;
120
121 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
122 if (!prog)
123 return NULL;
124
125 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
126 if (!prog->stats) {
127 free_percpu(prog->active);
128 kfree(prog->aux);
129 vfree(prog);
130 return NULL;
131 }
132
133 for_each_possible_cpu(cpu) {
134 struct bpf_prog_stats *pstats;
135
136 pstats = per_cpu_ptr(prog->stats, cpu);
137 u64_stats_init(&pstats->syncp);
138 }
139 return prog;
140}
141EXPORT_SYMBOL_GPL(bpf_prog_alloc);
142
143int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
144{
145 if (!prog->aux->nr_linfo || !prog->jit_requested)
146 return 0;
147
148 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
149 sizeof(*prog->aux->jited_linfo),
150 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
151 if (!prog->aux->jited_linfo)
152 return -ENOMEM;
153
154 return 0;
155}
156
157void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
158{
159 if (prog->aux->jited_linfo &&
160 (!prog->jited || !prog->aux->jited_linfo[0])) {
161 kvfree(prog->aux->jited_linfo);
162 prog->aux->jited_linfo = NULL;
163 }
164
165 kfree(prog->aux->kfunc_tab);
166 prog->aux->kfunc_tab = NULL;
167}
168
169/* The jit engine is responsible to provide an array
170 * for insn_off to the jited_off mapping (insn_to_jit_off).
171 *
172 * The idx to this array is the insn_off. Hence, the insn_off
173 * here is relative to the prog itself instead of the main prog.
174 * This array has one entry for each xlated bpf insn.
175 *
176 * jited_off is the byte off to the last byte of the jited insn.
177 *
178 * Hence, with
179 * insn_start:
180 * The first bpf insn off of the prog. The insn off
181 * here is relative to the main prog.
182 * e.g. if prog is a subprog, insn_start > 0
183 * linfo_idx:
184 * The prog's idx to prog->aux->linfo and jited_linfo
185 *
186 * jited_linfo[linfo_idx] = prog->bpf_func
187 *
188 * For i > linfo_idx,
189 *
190 * jited_linfo[i] = prog->bpf_func +
191 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
192 */
193void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
194 const u32 *insn_to_jit_off)
195{
196 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
197 const struct bpf_line_info *linfo;
198 void **jited_linfo;
199
200 if (!prog->aux->jited_linfo)
201 /* Userspace did not provide linfo */
202 return;
203
204 linfo_idx = prog->aux->linfo_idx;
205 linfo = &prog->aux->linfo[linfo_idx];
206 insn_start = linfo[0].insn_off;
207 insn_end = insn_start + prog->len;
208
209 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
210 jited_linfo[0] = prog->bpf_func;
211
212 nr_linfo = prog->aux->nr_linfo - linfo_idx;
213
214 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
215 /* The verifier ensures that linfo[i].insn_off is
216 * strictly increasing
217 */
218 jited_linfo[i] = prog->bpf_func +
219 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
220}
221
222struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
223 gfp_t gfp_extra_flags)
224{
225 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
226 struct bpf_prog *fp;
227 u32 pages;
228
229 size = round_up(size, PAGE_SIZE);
230 pages = size / PAGE_SIZE;
231 if (pages <= fp_old->pages)
232 return fp_old;
233
234 fp = __vmalloc(size, gfp_flags);
235 if (fp) {
236 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
237 fp->pages = pages;
238 fp->aux->prog = fp;
239
240 /* We keep fp->aux from fp_old around in the new
241 * reallocated structure.
242 */
243 fp_old->aux = NULL;
244 fp_old->stats = NULL;
245 fp_old->active = NULL;
246 __bpf_prog_free(fp_old);
247 }
248
249 return fp;
250}
251
252void __bpf_prog_free(struct bpf_prog *fp)
253{
254 if (fp->aux) {
255 mutex_destroy(&fp->aux->used_maps_mutex);
256 mutex_destroy(&fp->aux->dst_mutex);
257 kfree(fp->aux->poke_tab);
258 kfree(fp->aux);
259 }
260 free_percpu(fp->stats);
261 free_percpu(fp->active);
262 vfree(fp);
263}
264
265int bpf_prog_calc_tag(struct bpf_prog *fp)
266{
267 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
268 u32 raw_size = bpf_prog_tag_scratch_size(fp);
269 u32 digest[SHA1_DIGEST_WORDS];
270 u32 ws[SHA1_WORKSPACE_WORDS];
271 u32 i, bsize, psize, blocks;
272 struct bpf_insn *dst;
273 bool was_ld_map;
274 u8 *raw, *todo;
275 __be32 *result;
276 __be64 *bits;
277
278 raw = vmalloc(raw_size);
279 if (!raw)
280 return -ENOMEM;
281
282 sha1_init(digest);
283 memset(ws, 0, sizeof(ws));
284
285 /* We need to take out the map fd for the digest calculation
286 * since they are unstable from user space side.
287 */
288 dst = (void *)raw;
289 for (i = 0, was_ld_map = false; i < fp->len; i++) {
290 dst[i] = fp->insnsi[i];
291 if (!was_ld_map &&
292 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
293 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
294 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
295 was_ld_map = true;
296 dst[i].imm = 0;
297 } else if (was_ld_map &&
298 dst[i].code == 0 &&
299 dst[i].dst_reg == 0 &&
300 dst[i].src_reg == 0 &&
301 dst[i].off == 0) {
302 was_ld_map = false;
303 dst[i].imm = 0;
304 } else {
305 was_ld_map = false;
306 }
307 }
308
309 psize = bpf_prog_insn_size(fp);
310 memset(&raw[psize], 0, raw_size - psize);
311 raw[psize++] = 0x80;
312
313 bsize = round_up(psize, SHA1_BLOCK_SIZE);
314 blocks = bsize / SHA1_BLOCK_SIZE;
315 todo = raw;
316 if (bsize - psize >= sizeof(__be64)) {
317 bits = (__be64 *)(todo + bsize - sizeof(__be64));
318 } else {
319 bits = (__be64 *)(todo + bsize + bits_offset);
320 blocks++;
321 }
322 *bits = cpu_to_be64((psize - 1) << 3);
323
324 while (blocks--) {
325 sha1_transform(digest, todo, ws);
326 todo += SHA1_BLOCK_SIZE;
327 }
328
329 result = (__force __be32 *)digest;
330 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
331 result[i] = cpu_to_be32(digest[i]);
332 memcpy(fp->tag, result, sizeof(fp->tag));
333
334 vfree(raw);
335 return 0;
336}
337
338static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
339 s32 end_new, s32 curr, const bool probe_pass)
340{
341 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
342 s32 delta = end_new - end_old;
343 s64 imm = insn->imm;
344
345 if (curr < pos && curr + imm + 1 >= end_old)
346 imm += delta;
347 else if (curr >= end_new && curr + imm + 1 < end_new)
348 imm -= delta;
349 if (imm < imm_min || imm > imm_max)
350 return -ERANGE;
351 if (!probe_pass)
352 insn->imm = imm;
353 return 0;
354}
355
356static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
357 s32 end_new, s32 curr, const bool probe_pass)
358{
359 const s32 off_min = S16_MIN, off_max = S16_MAX;
360 s32 delta = end_new - end_old;
361 s32 off = insn->off;
362
363 if (curr < pos && curr + off + 1 >= end_old)
364 off += delta;
365 else if (curr >= end_new && curr + off + 1 < end_new)
366 off -= delta;
367 if (off < off_min || off > off_max)
368 return -ERANGE;
369 if (!probe_pass)
370 insn->off = off;
371 return 0;
372}
373
374static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
375 s32 end_new, const bool probe_pass)
376{
377 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
378 struct bpf_insn *insn = prog->insnsi;
379 int ret = 0;
380
381 for (i = 0; i < insn_cnt; i++, insn++) {
382 u8 code;
383
384 /* In the probing pass we still operate on the original,
385 * unpatched image in order to check overflows before we
386 * do any other adjustments. Therefore skip the patchlet.
387 */
388 if (probe_pass && i == pos) {
389 i = end_new;
390 insn = prog->insnsi + end_old;
391 }
392 code = insn->code;
393 if ((BPF_CLASS(code) != BPF_JMP &&
394 BPF_CLASS(code) != BPF_JMP32) ||
395 BPF_OP(code) == BPF_EXIT)
396 continue;
397 /* Adjust offset of jmps if we cross patch boundaries. */
398 if (BPF_OP(code) == BPF_CALL) {
399 if (insn->src_reg != BPF_PSEUDO_CALL)
400 continue;
401 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
402 end_new, i, probe_pass);
403 } else {
404 ret = bpf_adj_delta_to_off(insn, pos, end_old,
405 end_new, i, probe_pass);
406 }
407 if (ret)
408 break;
409 }
410
411 return ret;
412}
413
414static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
415{
416 struct bpf_line_info *linfo;
417 u32 i, nr_linfo;
418
419 nr_linfo = prog->aux->nr_linfo;
420 if (!nr_linfo || !delta)
421 return;
422
423 linfo = prog->aux->linfo;
424
425 for (i = 0; i < nr_linfo; i++)
426 if (off < linfo[i].insn_off)
427 break;
428
429 /* Push all off < linfo[i].insn_off by delta */
430 for (; i < nr_linfo; i++)
431 linfo[i].insn_off += delta;
432}
433
434struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
435 const struct bpf_insn *patch, u32 len)
436{
437 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
438 const u32 cnt_max = S16_MAX;
439 struct bpf_prog *prog_adj;
440 int err;
441
442 /* Since our patchlet doesn't expand the image, we're done. */
443 if (insn_delta == 0) {
444 memcpy(prog->insnsi + off, patch, sizeof(*patch));
445 return prog;
446 }
447
448 insn_adj_cnt = prog->len + insn_delta;
449
450 /* Reject anything that would potentially let the insn->off
451 * target overflow when we have excessive program expansions.
452 * We need to probe here before we do any reallocation where
453 * we afterwards may not fail anymore.
454 */
455 if (insn_adj_cnt > cnt_max &&
456 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
457 return ERR_PTR(err);
458
459 /* Several new instructions need to be inserted. Make room
460 * for them. Likely, there's no need for a new allocation as
461 * last page could have large enough tailroom.
462 */
463 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
464 GFP_USER);
465 if (!prog_adj)
466 return ERR_PTR(-ENOMEM);
467
468 prog_adj->len = insn_adj_cnt;
469
470 /* Patching happens in 3 steps:
471 *
472 * 1) Move over tail of insnsi from next instruction onwards,
473 * so we can patch the single target insn with one or more
474 * new ones (patching is always from 1 to n insns, n > 0).
475 * 2) Inject new instructions at the target location.
476 * 3) Adjust branch offsets if necessary.
477 */
478 insn_rest = insn_adj_cnt - off - len;
479
480 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
481 sizeof(*patch) * insn_rest);
482 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
483
484 /* We are guaranteed to not fail at this point, otherwise
485 * the ship has sailed to reverse to the original state. An
486 * overflow cannot happen at this point.
487 */
488 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
489
490 bpf_adj_linfo(prog_adj, off, insn_delta);
491
492 return prog_adj;
493}
494
495int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
496{
497 /* Branch offsets can't overflow when program is shrinking, no need
498 * to call bpf_adj_branches(..., true) here
499 */
500 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
501 sizeof(struct bpf_insn) * (prog->len - off - cnt));
502 prog->len -= cnt;
503
504 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
505}
506
507static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
508{
509 int i;
510
511 for (i = 0; i < fp->aux->func_cnt; i++)
512 bpf_prog_kallsyms_del(fp->aux->func[i]);
513}
514
515void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
516{
517 bpf_prog_kallsyms_del_subprogs(fp);
518 bpf_prog_kallsyms_del(fp);
519}
520
521#ifdef CONFIG_BPF_JIT
522/* All BPF JIT sysctl knobs here. */
523int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
524int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
525int bpf_jit_harden __read_mostly;
526long bpf_jit_limit __read_mostly;
527
528static void
529bpf_prog_ksym_set_addr(struct bpf_prog *prog)
530{
531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
532 unsigned long addr = (unsigned long)hdr;
533
534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
535
536 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
537 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
538}
539
540static void
541bpf_prog_ksym_set_name(struct bpf_prog *prog)
542{
543 char *sym = prog->aux->ksym.name;
544 const char *end = sym + KSYM_NAME_LEN;
545 const struct btf_type *type;
546 const char *func_name;
547
548 BUILD_BUG_ON(sizeof("bpf_prog_") +
549 sizeof(prog->tag) * 2 +
550 /* name has been null terminated.
551 * We should need +1 for the '_' preceding
552 * the name. However, the null character
553 * is double counted between the name and the
554 * sizeof("bpf_prog_") above, so we omit
555 * the +1 here.
556 */
557 sizeof(prog->aux->name) > KSYM_NAME_LEN);
558
559 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
560 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
561
562 /* prog->aux->name will be ignored if full btf name is available */
563 if (prog->aux->func_info_cnt) {
564 type = btf_type_by_id(prog->aux->btf,
565 prog->aux->func_info[prog->aux->func_idx].type_id);
566 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
567 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
568 return;
569 }
570
571 if (prog->aux->name[0])
572 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
573 else
574 *sym = 0;
575}
576
577static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
578{
579 return container_of(n, struct bpf_ksym, tnode)->start;
580}
581
582static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
583 struct latch_tree_node *b)
584{
585 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
586}
587
588static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
589{
590 unsigned long val = (unsigned long)key;
591 const struct bpf_ksym *ksym;
592
593 ksym = container_of(n, struct bpf_ksym, tnode);
594
595 if (val < ksym->start)
596 return -1;
597 if (val >= ksym->end)
598 return 1;
599
600 return 0;
601}
602
603static const struct latch_tree_ops bpf_tree_ops = {
604 .less = bpf_tree_less,
605 .comp = bpf_tree_comp,
606};
607
608static DEFINE_SPINLOCK(bpf_lock);
609static LIST_HEAD(bpf_kallsyms);
610static struct latch_tree_root bpf_tree __cacheline_aligned;
611
612void bpf_ksym_add(struct bpf_ksym *ksym)
613{
614 spin_lock_bh(&bpf_lock);
615 WARN_ON_ONCE(!list_empty(&ksym->lnode));
616 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
617 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
618 spin_unlock_bh(&bpf_lock);
619}
620
621static void __bpf_ksym_del(struct bpf_ksym *ksym)
622{
623 if (list_empty(&ksym->lnode))
624 return;
625
626 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
627 list_del_rcu(&ksym->lnode);
628}
629
630void bpf_ksym_del(struct bpf_ksym *ksym)
631{
632 spin_lock_bh(&bpf_lock);
633 __bpf_ksym_del(ksym);
634 spin_unlock_bh(&bpf_lock);
635}
636
637static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
638{
639 return fp->jited && !bpf_prog_was_classic(fp);
640}
641
642static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
643{
644 return list_empty(&fp->aux->ksym.lnode) ||
645 fp->aux->ksym.lnode.prev == LIST_POISON2;
646}
647
648void bpf_prog_kallsyms_add(struct bpf_prog *fp)
649{
650 if (!bpf_prog_kallsyms_candidate(fp) ||
651 !bpf_capable())
652 return;
653
654 bpf_prog_ksym_set_addr(fp);
655 bpf_prog_ksym_set_name(fp);
656 fp->aux->ksym.prog = true;
657
658 bpf_ksym_add(&fp->aux->ksym);
659}
660
661void bpf_prog_kallsyms_del(struct bpf_prog *fp)
662{
663 if (!bpf_prog_kallsyms_candidate(fp))
664 return;
665
666 bpf_ksym_del(&fp->aux->ksym);
667}
668
669static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
670{
671 struct latch_tree_node *n;
672
673 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
674 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
675}
676
677const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
678 unsigned long *off, char *sym)
679{
680 struct bpf_ksym *ksym;
681 char *ret = NULL;
682
683 rcu_read_lock();
684 ksym = bpf_ksym_find(addr);
685 if (ksym) {
686 unsigned long symbol_start = ksym->start;
687 unsigned long symbol_end = ksym->end;
688
689 strncpy(sym, ksym->name, KSYM_NAME_LEN);
690
691 ret = sym;
692 if (size)
693 *size = symbol_end - symbol_start;
694 if (off)
695 *off = addr - symbol_start;
696 }
697 rcu_read_unlock();
698
699 return ret;
700}
701
702bool is_bpf_text_address(unsigned long addr)
703{
704 bool ret;
705
706 rcu_read_lock();
707 ret = bpf_ksym_find(addr) != NULL;
708 rcu_read_unlock();
709
710 return ret;
711}
712
713static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
714{
715 struct bpf_ksym *ksym = bpf_ksym_find(addr);
716
717 return ksym && ksym->prog ?
718 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
719 NULL;
720}
721
722const struct exception_table_entry *search_bpf_extables(unsigned long addr)
723{
724 const struct exception_table_entry *e = NULL;
725 struct bpf_prog *prog;
726
727 rcu_read_lock();
728 prog = bpf_prog_ksym_find(addr);
729 if (!prog)
730 goto out;
731 if (!prog->aux->num_exentries)
732 goto out;
733
734 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
735out:
736 rcu_read_unlock();
737 return e;
738}
739
740int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
741 char *sym)
742{
743 struct bpf_ksym *ksym;
744 unsigned int it = 0;
745 int ret = -ERANGE;
746
747 if (!bpf_jit_kallsyms_enabled())
748 return ret;
749
750 rcu_read_lock();
751 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
752 if (it++ != symnum)
753 continue;
754
755 strncpy(sym, ksym->name, KSYM_NAME_LEN);
756
757 *value = ksym->start;
758 *type = BPF_SYM_ELF_TYPE;
759
760 ret = 0;
761 break;
762 }
763 rcu_read_unlock();
764
765 return ret;
766}
767
768int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
769 struct bpf_jit_poke_descriptor *poke)
770{
771 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
772 static const u32 poke_tab_max = 1024;
773 u32 slot = prog->aux->size_poke_tab;
774 u32 size = slot + 1;
775
776 if (size > poke_tab_max)
777 return -ENOSPC;
778 if (poke->tailcall_target || poke->tailcall_target_stable ||
779 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
780 return -EINVAL;
781
782 switch (poke->reason) {
783 case BPF_POKE_REASON_TAIL_CALL:
784 if (!poke->tail_call.map)
785 return -EINVAL;
786 break;
787 default:
788 return -EINVAL;
789 }
790
791 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
792 if (!tab)
793 return -ENOMEM;
794
795 memcpy(&tab[slot], poke, sizeof(*poke));
796 prog->aux->size_poke_tab = size;
797 prog->aux->poke_tab = tab;
798
799 return slot;
800}
801
802static atomic_long_t bpf_jit_current;
803
804/* Can be overridden by an arch's JIT compiler if it has a custom,
805 * dedicated BPF backend memory area, or if neither of the two
806 * below apply.
807 */
808u64 __weak bpf_jit_alloc_exec_limit(void)
809{
810#if defined(MODULES_VADDR)
811 return MODULES_END - MODULES_VADDR;
812#else
813 return VMALLOC_END - VMALLOC_START;
814#endif
815}
816
817static int __init bpf_jit_charge_init(void)
818{
819 /* Only used as heuristic here to derive limit. */
820 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
821 PAGE_SIZE), LONG_MAX);
822 return 0;
823}
824pure_initcall(bpf_jit_charge_init);
825
826int bpf_jit_charge_modmem(u32 pages)
827{
828 if (atomic_long_add_return(pages, &bpf_jit_current) >
829 (bpf_jit_limit >> PAGE_SHIFT)) {
830 if (!bpf_capable()) {
831 atomic_long_sub(pages, &bpf_jit_current);
832 return -EPERM;
833 }
834 }
835
836 return 0;
837}
838
839void bpf_jit_uncharge_modmem(u32 pages)
840{
841 atomic_long_sub(pages, &bpf_jit_current);
842}
843
844void *__weak bpf_jit_alloc_exec(unsigned long size)
845{
846 return module_alloc(size);
847}
848
849void __weak bpf_jit_free_exec(void *addr)
850{
851 module_memfree(addr);
852}
853
854struct bpf_binary_header *
855bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
856 unsigned int alignment,
857 bpf_jit_fill_hole_t bpf_fill_ill_insns)
858{
859 struct bpf_binary_header *hdr;
860 u32 size, hole, start, pages;
861
862 WARN_ON_ONCE(!is_power_of_2(alignment) ||
863 alignment > BPF_IMAGE_ALIGNMENT);
864
865 /* Most of BPF filters are really small, but if some of them
866 * fill a page, allow at least 128 extra bytes to insert a
867 * random section of illegal instructions.
868 */
869 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
870 pages = size / PAGE_SIZE;
871
872 if (bpf_jit_charge_modmem(pages))
873 return NULL;
874 hdr = bpf_jit_alloc_exec(size);
875 if (!hdr) {
876 bpf_jit_uncharge_modmem(pages);
877 return NULL;
878 }
879
880 /* Fill space with illegal/arch-dep instructions. */
881 bpf_fill_ill_insns(hdr, size);
882
883 hdr->pages = pages;
884 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
885 PAGE_SIZE - sizeof(*hdr));
886 start = (get_random_int() % hole) & ~(alignment - 1);
887
888 /* Leave a random number of instructions before BPF code. */
889 *image_ptr = &hdr->image[start];
890
891 return hdr;
892}
893
894void bpf_jit_binary_free(struct bpf_binary_header *hdr)
895{
896 u32 pages = hdr->pages;
897
898 bpf_jit_free_exec(hdr);
899 bpf_jit_uncharge_modmem(pages);
900}
901
902/* This symbol is only overridden by archs that have different
903 * requirements than the usual eBPF JITs, f.e. when they only
904 * implement cBPF JIT, do not set images read-only, etc.
905 */
906void __weak bpf_jit_free(struct bpf_prog *fp)
907{
908 if (fp->jited) {
909 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
910
911 bpf_jit_binary_free(hdr);
912
913 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
914 }
915
916 bpf_prog_unlock_free(fp);
917}
918
919int bpf_jit_get_func_addr(const struct bpf_prog *prog,
920 const struct bpf_insn *insn, bool extra_pass,
921 u64 *func_addr, bool *func_addr_fixed)
922{
923 s16 off = insn->off;
924 s32 imm = insn->imm;
925 u8 *addr;
926
927 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
928 if (!*func_addr_fixed) {
929 /* Place-holder address till the last pass has collected
930 * all addresses for JITed subprograms in which case we
931 * can pick them up from prog->aux.
932 */
933 if (!extra_pass)
934 addr = NULL;
935 else if (prog->aux->func &&
936 off >= 0 && off < prog->aux->func_cnt)
937 addr = (u8 *)prog->aux->func[off]->bpf_func;
938 else
939 return -EINVAL;
940 } else {
941 /* Address of a BPF helper call. Since part of the core
942 * kernel, it's always at a fixed location. __bpf_call_base
943 * and the helper with imm relative to it are both in core
944 * kernel.
945 */
946 addr = (u8 *)__bpf_call_base + imm;
947 }
948
949 *func_addr = (unsigned long)addr;
950 return 0;
951}
952
953static int bpf_jit_blind_insn(const struct bpf_insn *from,
954 const struct bpf_insn *aux,
955 struct bpf_insn *to_buff,
956 bool emit_zext)
957{
958 struct bpf_insn *to = to_buff;
959 u32 imm_rnd = get_random_int();
960 s16 off;
961
962 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
963 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
964
965 /* Constraints on AX register:
966 *
967 * AX register is inaccessible from user space. It is mapped in
968 * all JITs, and used here for constant blinding rewrites. It is
969 * typically "stateless" meaning its contents are only valid within
970 * the executed instruction, but not across several instructions.
971 * There are a few exceptions however which are further detailed
972 * below.
973 *
974 * Constant blinding is only used by JITs, not in the interpreter.
975 * The interpreter uses AX in some occasions as a local temporary
976 * register e.g. in DIV or MOD instructions.
977 *
978 * In restricted circumstances, the verifier can also use the AX
979 * register for rewrites as long as they do not interfere with
980 * the above cases!
981 */
982 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
983 goto out;
984
985 if (from->imm == 0 &&
986 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
987 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
988 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
989 goto out;
990 }
991
992 switch (from->code) {
993 case BPF_ALU | BPF_ADD | BPF_K:
994 case BPF_ALU | BPF_SUB | BPF_K:
995 case BPF_ALU | BPF_AND | BPF_K:
996 case BPF_ALU | BPF_OR | BPF_K:
997 case BPF_ALU | BPF_XOR | BPF_K:
998 case BPF_ALU | BPF_MUL | BPF_K:
999 case BPF_ALU | BPF_MOV | BPF_K:
1000 case BPF_ALU | BPF_DIV | BPF_K:
1001 case BPF_ALU | BPF_MOD | BPF_K:
1002 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1003 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1004 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1005 break;
1006
1007 case BPF_ALU64 | BPF_ADD | BPF_K:
1008 case BPF_ALU64 | BPF_SUB | BPF_K:
1009 case BPF_ALU64 | BPF_AND | BPF_K:
1010 case BPF_ALU64 | BPF_OR | BPF_K:
1011 case BPF_ALU64 | BPF_XOR | BPF_K:
1012 case BPF_ALU64 | BPF_MUL | BPF_K:
1013 case BPF_ALU64 | BPF_MOV | BPF_K:
1014 case BPF_ALU64 | BPF_DIV | BPF_K:
1015 case BPF_ALU64 | BPF_MOD | BPF_K:
1016 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1017 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1018 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1019 break;
1020
1021 case BPF_JMP | BPF_JEQ | BPF_K:
1022 case BPF_JMP | BPF_JNE | BPF_K:
1023 case BPF_JMP | BPF_JGT | BPF_K:
1024 case BPF_JMP | BPF_JLT | BPF_K:
1025 case BPF_JMP | BPF_JGE | BPF_K:
1026 case BPF_JMP | BPF_JLE | BPF_K:
1027 case BPF_JMP | BPF_JSGT | BPF_K:
1028 case BPF_JMP | BPF_JSLT | BPF_K:
1029 case BPF_JMP | BPF_JSGE | BPF_K:
1030 case BPF_JMP | BPF_JSLE | BPF_K:
1031 case BPF_JMP | BPF_JSET | BPF_K:
1032 /* Accommodate for extra offset in case of a backjump. */
1033 off = from->off;
1034 if (off < 0)
1035 off -= 2;
1036 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1037 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1038 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1039 break;
1040
1041 case BPF_JMP32 | BPF_JEQ | BPF_K:
1042 case BPF_JMP32 | BPF_JNE | BPF_K:
1043 case BPF_JMP32 | BPF_JGT | BPF_K:
1044 case BPF_JMP32 | BPF_JLT | BPF_K:
1045 case BPF_JMP32 | BPF_JGE | BPF_K:
1046 case BPF_JMP32 | BPF_JLE | BPF_K:
1047 case BPF_JMP32 | BPF_JSGT | BPF_K:
1048 case BPF_JMP32 | BPF_JSLT | BPF_K:
1049 case BPF_JMP32 | BPF_JSGE | BPF_K:
1050 case BPF_JMP32 | BPF_JSLE | BPF_K:
1051 case BPF_JMP32 | BPF_JSET | BPF_K:
1052 /* Accommodate for extra offset in case of a backjump. */
1053 off = from->off;
1054 if (off < 0)
1055 off -= 2;
1056 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1057 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1058 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1059 off);
1060 break;
1061
1062 case BPF_LD | BPF_IMM | BPF_DW:
1063 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1064 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1065 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1066 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1067 break;
1068 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1069 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1070 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1071 if (emit_zext)
1072 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1073 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1074 break;
1075
1076 case BPF_ST | BPF_MEM | BPF_DW:
1077 case BPF_ST | BPF_MEM | BPF_W:
1078 case BPF_ST | BPF_MEM | BPF_H:
1079 case BPF_ST | BPF_MEM | BPF_B:
1080 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1081 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1082 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1083 break;
1084 }
1085out:
1086 return to - to_buff;
1087}
1088
1089static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1090 gfp_t gfp_extra_flags)
1091{
1092 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1093 struct bpf_prog *fp;
1094
1095 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1096 if (fp != NULL) {
1097 /* aux->prog still points to the fp_other one, so
1098 * when promoting the clone to the real program,
1099 * this still needs to be adapted.
1100 */
1101 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1102 }
1103
1104 return fp;
1105}
1106
1107static void bpf_prog_clone_free(struct bpf_prog *fp)
1108{
1109 /* aux was stolen by the other clone, so we cannot free
1110 * it from this path! It will be freed eventually by the
1111 * other program on release.
1112 *
1113 * At this point, we don't need a deferred release since
1114 * clone is guaranteed to not be locked.
1115 */
1116 fp->aux = NULL;
1117 fp->stats = NULL;
1118 fp->active = NULL;
1119 __bpf_prog_free(fp);
1120}
1121
1122void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1123{
1124 /* We have to repoint aux->prog to self, as we don't
1125 * know whether fp here is the clone or the original.
1126 */
1127 fp->aux->prog = fp;
1128 bpf_prog_clone_free(fp_other);
1129}
1130
1131struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1132{
1133 struct bpf_insn insn_buff[16], aux[2];
1134 struct bpf_prog *clone, *tmp;
1135 int insn_delta, insn_cnt;
1136 struct bpf_insn *insn;
1137 int i, rewritten;
1138
1139 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1140 return prog;
1141
1142 clone = bpf_prog_clone_create(prog, GFP_USER);
1143 if (!clone)
1144 return ERR_PTR(-ENOMEM);
1145
1146 insn_cnt = clone->len;
1147 insn = clone->insnsi;
1148
1149 for (i = 0; i < insn_cnt; i++, insn++) {
1150 /* We temporarily need to hold the original ld64 insn
1151 * so that we can still access the first part in the
1152 * second blinding run.
1153 */
1154 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1155 insn[1].code == 0)
1156 memcpy(aux, insn, sizeof(aux));
1157
1158 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1159 clone->aux->verifier_zext);
1160 if (!rewritten)
1161 continue;
1162
1163 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1164 if (IS_ERR(tmp)) {
1165 /* Patching may have repointed aux->prog during
1166 * realloc from the original one, so we need to
1167 * fix it up here on error.
1168 */
1169 bpf_jit_prog_release_other(prog, clone);
1170 return tmp;
1171 }
1172
1173 clone = tmp;
1174 insn_delta = rewritten - 1;
1175
1176 /* Walk new program and skip insns we just inserted. */
1177 insn = clone->insnsi + i + insn_delta;
1178 insn_cnt += insn_delta;
1179 i += insn_delta;
1180 }
1181
1182 clone->blinded = 1;
1183 return clone;
1184}
1185#endif /* CONFIG_BPF_JIT */
1186
1187/* Base function for offset calculation. Needs to go into .text section,
1188 * therefore keeping it non-static as well; will also be used by JITs
1189 * anyway later on, so do not let the compiler omit it. This also needs
1190 * to go into kallsyms for correlation from e.g. bpftool, so naming
1191 * must not change.
1192 */
1193noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1194{
1195 return 0;
1196}
1197EXPORT_SYMBOL_GPL(__bpf_call_base);
1198
1199/* All UAPI available opcodes. */
1200#define BPF_INSN_MAP(INSN_2, INSN_3) \
1201 /* 32 bit ALU operations. */ \
1202 /* Register based. */ \
1203 INSN_3(ALU, ADD, X), \
1204 INSN_3(ALU, SUB, X), \
1205 INSN_3(ALU, AND, X), \
1206 INSN_3(ALU, OR, X), \
1207 INSN_3(ALU, LSH, X), \
1208 INSN_3(ALU, RSH, X), \
1209 INSN_3(ALU, XOR, X), \
1210 INSN_3(ALU, MUL, X), \
1211 INSN_3(ALU, MOV, X), \
1212 INSN_3(ALU, ARSH, X), \
1213 INSN_3(ALU, DIV, X), \
1214 INSN_3(ALU, MOD, X), \
1215 INSN_2(ALU, NEG), \
1216 INSN_3(ALU, END, TO_BE), \
1217 INSN_3(ALU, END, TO_LE), \
1218 /* Immediate based. */ \
1219 INSN_3(ALU, ADD, K), \
1220 INSN_3(ALU, SUB, K), \
1221 INSN_3(ALU, AND, K), \
1222 INSN_3(ALU, OR, K), \
1223 INSN_3(ALU, LSH, K), \
1224 INSN_3(ALU, RSH, K), \
1225 INSN_3(ALU, XOR, K), \
1226 INSN_3(ALU, MUL, K), \
1227 INSN_3(ALU, MOV, K), \
1228 INSN_3(ALU, ARSH, K), \
1229 INSN_3(ALU, DIV, K), \
1230 INSN_3(ALU, MOD, K), \
1231 /* 64 bit ALU operations. */ \
1232 /* Register based. */ \
1233 INSN_3(ALU64, ADD, X), \
1234 INSN_3(ALU64, SUB, X), \
1235 INSN_3(ALU64, AND, X), \
1236 INSN_3(ALU64, OR, X), \
1237 INSN_3(ALU64, LSH, X), \
1238 INSN_3(ALU64, RSH, X), \
1239 INSN_3(ALU64, XOR, X), \
1240 INSN_3(ALU64, MUL, X), \
1241 INSN_3(ALU64, MOV, X), \
1242 INSN_3(ALU64, ARSH, X), \
1243 INSN_3(ALU64, DIV, X), \
1244 INSN_3(ALU64, MOD, X), \
1245 INSN_2(ALU64, NEG), \
1246 /* Immediate based. */ \
1247 INSN_3(ALU64, ADD, K), \
1248 INSN_3(ALU64, SUB, K), \
1249 INSN_3(ALU64, AND, K), \
1250 INSN_3(ALU64, OR, K), \
1251 INSN_3(ALU64, LSH, K), \
1252 INSN_3(ALU64, RSH, K), \
1253 INSN_3(ALU64, XOR, K), \
1254 INSN_3(ALU64, MUL, K), \
1255 INSN_3(ALU64, MOV, K), \
1256 INSN_3(ALU64, ARSH, K), \
1257 INSN_3(ALU64, DIV, K), \
1258 INSN_3(ALU64, MOD, K), \
1259 /* Call instruction. */ \
1260 INSN_2(JMP, CALL), \
1261 /* Exit instruction. */ \
1262 INSN_2(JMP, EXIT), \
1263 /* 32-bit Jump instructions. */ \
1264 /* Register based. */ \
1265 INSN_3(JMP32, JEQ, X), \
1266 INSN_3(JMP32, JNE, X), \
1267 INSN_3(JMP32, JGT, X), \
1268 INSN_3(JMP32, JLT, X), \
1269 INSN_3(JMP32, JGE, X), \
1270 INSN_3(JMP32, JLE, X), \
1271 INSN_3(JMP32, JSGT, X), \
1272 INSN_3(JMP32, JSLT, X), \
1273 INSN_3(JMP32, JSGE, X), \
1274 INSN_3(JMP32, JSLE, X), \
1275 INSN_3(JMP32, JSET, X), \
1276 /* Immediate based. */ \
1277 INSN_3(JMP32, JEQ, K), \
1278 INSN_3(JMP32, JNE, K), \
1279 INSN_3(JMP32, JGT, K), \
1280 INSN_3(JMP32, JLT, K), \
1281 INSN_3(JMP32, JGE, K), \
1282 INSN_3(JMP32, JLE, K), \
1283 INSN_3(JMP32, JSGT, K), \
1284 INSN_3(JMP32, JSLT, K), \
1285 INSN_3(JMP32, JSGE, K), \
1286 INSN_3(JMP32, JSLE, K), \
1287 INSN_3(JMP32, JSET, K), \
1288 /* Jump instructions. */ \
1289 /* Register based. */ \
1290 INSN_3(JMP, JEQ, X), \
1291 INSN_3(JMP, JNE, X), \
1292 INSN_3(JMP, JGT, X), \
1293 INSN_3(JMP, JLT, X), \
1294 INSN_3(JMP, JGE, X), \
1295 INSN_3(JMP, JLE, X), \
1296 INSN_3(JMP, JSGT, X), \
1297 INSN_3(JMP, JSLT, X), \
1298 INSN_3(JMP, JSGE, X), \
1299 INSN_3(JMP, JSLE, X), \
1300 INSN_3(JMP, JSET, X), \
1301 /* Immediate based. */ \
1302 INSN_3(JMP, JEQ, K), \
1303 INSN_3(JMP, JNE, K), \
1304 INSN_3(JMP, JGT, K), \
1305 INSN_3(JMP, JLT, K), \
1306 INSN_3(JMP, JGE, K), \
1307 INSN_3(JMP, JLE, K), \
1308 INSN_3(JMP, JSGT, K), \
1309 INSN_3(JMP, JSLT, K), \
1310 INSN_3(JMP, JSGE, K), \
1311 INSN_3(JMP, JSLE, K), \
1312 INSN_3(JMP, JSET, K), \
1313 INSN_2(JMP, JA), \
1314 /* Store instructions. */ \
1315 /* Register based. */ \
1316 INSN_3(STX, MEM, B), \
1317 INSN_3(STX, MEM, H), \
1318 INSN_3(STX, MEM, W), \
1319 INSN_3(STX, MEM, DW), \
1320 INSN_3(STX, ATOMIC, W), \
1321 INSN_3(STX, ATOMIC, DW), \
1322 /* Immediate based. */ \
1323 INSN_3(ST, MEM, B), \
1324 INSN_3(ST, MEM, H), \
1325 INSN_3(ST, MEM, W), \
1326 INSN_3(ST, MEM, DW), \
1327 /* Load instructions. */ \
1328 /* Register based. */ \
1329 INSN_3(LDX, MEM, B), \
1330 INSN_3(LDX, MEM, H), \
1331 INSN_3(LDX, MEM, W), \
1332 INSN_3(LDX, MEM, DW), \
1333 /* Immediate based. */ \
1334 INSN_3(LD, IMM, DW)
1335
1336bool bpf_opcode_in_insntable(u8 code)
1337{
1338#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1339#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1340 static const bool public_insntable[256] = {
1341 [0 ... 255] = false,
1342 /* Now overwrite non-defaults ... */
1343 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1344 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1345 [BPF_LD | BPF_ABS | BPF_B] = true,
1346 [BPF_LD | BPF_ABS | BPF_H] = true,
1347 [BPF_LD | BPF_ABS | BPF_W] = true,
1348 [BPF_LD | BPF_IND | BPF_B] = true,
1349 [BPF_LD | BPF_IND | BPF_H] = true,
1350 [BPF_LD | BPF_IND | BPF_W] = true,
1351 };
1352#undef BPF_INSN_3_TBL
1353#undef BPF_INSN_2_TBL
1354 return public_insntable[code];
1355}
1356
1357#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1358u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1359{
1360 memset(dst, 0, size);
1361 return -EFAULT;
1362}
1363
1364/**
1365 * ___bpf_prog_run - run eBPF program on a given context
1366 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1367 * @insn: is the array of eBPF instructions
1368 *
1369 * Decode and execute eBPF instructions.
1370 *
1371 * Return: whatever value is in %BPF_R0 at program exit
1372 */
1373static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1374{
1375#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1376#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1377 static const void * const jumptable[256] __annotate_jump_table = {
1378 [0 ... 255] = &&default_label,
1379 /* Now overwrite non-defaults ... */
1380 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1381 /* Non-UAPI available opcodes. */
1382 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1383 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1384 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1385 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1386 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1387 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1388 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1389 };
1390#undef BPF_INSN_3_LBL
1391#undef BPF_INSN_2_LBL
1392 u32 tail_call_cnt = 0;
1393
1394#define CONT ({ insn++; goto select_insn; })
1395#define CONT_JMP ({ insn++; goto select_insn; })
1396
1397select_insn:
1398 goto *jumptable[insn->code];
1399
1400 /* Explicitly mask the register-based shift amounts with 63 or 31
1401 * to avoid undefined behavior. Normally this won't affect the
1402 * generated code, for example, in case of native 64 bit archs such
1403 * as x86-64 or arm64, the compiler is optimizing the AND away for
1404 * the interpreter. In case of JITs, each of the JIT backends compiles
1405 * the BPF shift operations to machine instructions which produce
1406 * implementation-defined results in such a case; the resulting
1407 * contents of the register may be arbitrary, but program behaviour
1408 * as a whole remains defined. In other words, in case of JIT backends,
1409 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1410 */
1411 /* ALU (shifts) */
1412#define SHT(OPCODE, OP) \
1413 ALU64_##OPCODE##_X: \
1414 DST = DST OP (SRC & 63); \
1415 CONT; \
1416 ALU_##OPCODE##_X: \
1417 DST = (u32) DST OP ((u32) SRC & 31); \
1418 CONT; \
1419 ALU64_##OPCODE##_K: \
1420 DST = DST OP IMM; \
1421 CONT; \
1422 ALU_##OPCODE##_K: \
1423 DST = (u32) DST OP (u32) IMM; \
1424 CONT;
1425 /* ALU (rest) */
1426#define ALU(OPCODE, OP) \
1427 ALU64_##OPCODE##_X: \
1428 DST = DST OP SRC; \
1429 CONT; \
1430 ALU_##OPCODE##_X: \
1431 DST = (u32) DST OP (u32) SRC; \
1432 CONT; \
1433 ALU64_##OPCODE##_K: \
1434 DST = DST OP IMM; \
1435 CONT; \
1436 ALU_##OPCODE##_K: \
1437 DST = (u32) DST OP (u32) IMM; \
1438 CONT;
1439 ALU(ADD, +)
1440 ALU(SUB, -)
1441 ALU(AND, &)
1442 ALU(OR, |)
1443 ALU(XOR, ^)
1444 ALU(MUL, *)
1445 SHT(LSH, <<)
1446 SHT(RSH, >>)
1447#undef SHT
1448#undef ALU
1449 ALU_NEG:
1450 DST = (u32) -DST;
1451 CONT;
1452 ALU64_NEG:
1453 DST = -DST;
1454 CONT;
1455 ALU_MOV_X:
1456 DST = (u32) SRC;
1457 CONT;
1458 ALU_MOV_K:
1459 DST = (u32) IMM;
1460 CONT;
1461 ALU64_MOV_X:
1462 DST = SRC;
1463 CONT;
1464 ALU64_MOV_K:
1465 DST = IMM;
1466 CONT;
1467 LD_IMM_DW:
1468 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1469 insn++;
1470 CONT;
1471 ALU_ARSH_X:
1472 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1473 CONT;
1474 ALU_ARSH_K:
1475 DST = (u64) (u32) (((s32) DST) >> IMM);
1476 CONT;
1477 ALU64_ARSH_X:
1478 (*(s64 *) &DST) >>= (SRC & 63);
1479 CONT;
1480 ALU64_ARSH_K:
1481 (*(s64 *) &DST) >>= IMM;
1482 CONT;
1483 ALU64_MOD_X:
1484 div64_u64_rem(DST, SRC, &AX);
1485 DST = AX;
1486 CONT;
1487 ALU_MOD_X:
1488 AX = (u32) DST;
1489 DST = do_div(AX, (u32) SRC);
1490 CONT;
1491 ALU64_MOD_K:
1492 div64_u64_rem(DST, IMM, &AX);
1493 DST = AX;
1494 CONT;
1495 ALU_MOD_K:
1496 AX = (u32) DST;
1497 DST = do_div(AX, (u32) IMM);
1498 CONT;
1499 ALU64_DIV_X:
1500 DST = div64_u64(DST, SRC);
1501 CONT;
1502 ALU_DIV_X:
1503 AX = (u32) DST;
1504 do_div(AX, (u32) SRC);
1505 DST = (u32) AX;
1506 CONT;
1507 ALU64_DIV_K:
1508 DST = div64_u64(DST, IMM);
1509 CONT;
1510 ALU_DIV_K:
1511 AX = (u32) DST;
1512 do_div(AX, (u32) IMM);
1513 DST = (u32) AX;
1514 CONT;
1515 ALU_END_TO_BE:
1516 switch (IMM) {
1517 case 16:
1518 DST = (__force u16) cpu_to_be16(DST);
1519 break;
1520 case 32:
1521 DST = (__force u32) cpu_to_be32(DST);
1522 break;
1523 case 64:
1524 DST = (__force u64) cpu_to_be64(DST);
1525 break;
1526 }
1527 CONT;
1528 ALU_END_TO_LE:
1529 switch (IMM) {
1530 case 16:
1531 DST = (__force u16) cpu_to_le16(DST);
1532 break;
1533 case 32:
1534 DST = (__force u32) cpu_to_le32(DST);
1535 break;
1536 case 64:
1537 DST = (__force u64) cpu_to_le64(DST);
1538 break;
1539 }
1540 CONT;
1541
1542 /* CALL */
1543 JMP_CALL:
1544 /* Function call scratches BPF_R1-BPF_R5 registers,
1545 * preserves BPF_R6-BPF_R9, and stores return value
1546 * into BPF_R0.
1547 */
1548 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1549 BPF_R4, BPF_R5);
1550 CONT;
1551
1552 JMP_CALL_ARGS:
1553 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1554 BPF_R3, BPF_R4,
1555 BPF_R5,
1556 insn + insn->off + 1);
1557 CONT;
1558
1559 JMP_TAIL_CALL: {
1560 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1561 struct bpf_array *array = container_of(map, struct bpf_array, map);
1562 struct bpf_prog *prog;
1563 u32 index = BPF_R3;
1564
1565 if (unlikely(index >= array->map.max_entries))
1566 goto out;
1567 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1568 goto out;
1569
1570 tail_call_cnt++;
1571
1572 prog = READ_ONCE(array->ptrs[index]);
1573 if (!prog)
1574 goto out;
1575
1576 /* ARG1 at this point is guaranteed to point to CTX from
1577 * the verifier side due to the fact that the tail call is
1578 * handled like a helper, that is, bpf_tail_call_proto,
1579 * where arg1_type is ARG_PTR_TO_CTX.
1580 */
1581 insn = prog->insnsi;
1582 goto select_insn;
1583out:
1584 CONT;
1585 }
1586 JMP_JA:
1587 insn += insn->off;
1588 CONT;
1589 JMP_EXIT:
1590 return BPF_R0;
1591 /* JMP */
1592#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1593 JMP_##OPCODE##_X: \
1594 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1595 insn += insn->off; \
1596 CONT_JMP; \
1597 } \
1598 CONT; \
1599 JMP32_##OPCODE##_X: \
1600 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1601 insn += insn->off; \
1602 CONT_JMP; \
1603 } \
1604 CONT; \
1605 JMP_##OPCODE##_K: \
1606 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1607 insn += insn->off; \
1608 CONT_JMP; \
1609 } \
1610 CONT; \
1611 JMP32_##OPCODE##_K: \
1612 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1613 insn += insn->off; \
1614 CONT_JMP; \
1615 } \
1616 CONT;
1617 COND_JMP(u, JEQ, ==)
1618 COND_JMP(u, JNE, !=)
1619 COND_JMP(u, JGT, >)
1620 COND_JMP(u, JLT, <)
1621 COND_JMP(u, JGE, >=)
1622 COND_JMP(u, JLE, <=)
1623 COND_JMP(u, JSET, &)
1624 COND_JMP(s, JSGT, >)
1625 COND_JMP(s, JSLT, <)
1626 COND_JMP(s, JSGE, >=)
1627 COND_JMP(s, JSLE, <=)
1628#undef COND_JMP
1629 /* ST, STX and LDX*/
1630 ST_NOSPEC:
1631 /* Speculation barrier for mitigating Speculative Store Bypass.
1632 * In case of arm64, we rely on the firmware mitigation as
1633 * controlled via the ssbd kernel parameter. Whenever the
1634 * mitigation is enabled, it works for all of the kernel code
1635 * with no need to provide any additional instructions here.
1636 * In case of x86, we use 'lfence' insn for mitigation. We
1637 * reuse preexisting logic from Spectre v1 mitigation that
1638 * happens to produce the required code on x86 for v4 as well.
1639 */
1640#ifdef CONFIG_X86
1641 barrier_nospec();
1642#endif
1643 CONT;
1644#define LDST(SIZEOP, SIZE) \
1645 STX_MEM_##SIZEOP: \
1646 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1647 CONT; \
1648 ST_MEM_##SIZEOP: \
1649 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1650 CONT; \
1651 LDX_MEM_##SIZEOP: \
1652 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1653 CONT;
1654
1655 LDST(B, u8)
1656 LDST(H, u16)
1657 LDST(W, u32)
1658 LDST(DW, u64)
1659#undef LDST
1660#define LDX_PROBE(SIZEOP, SIZE) \
1661 LDX_PROBE_MEM_##SIZEOP: \
1662 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1663 CONT;
1664 LDX_PROBE(B, 1)
1665 LDX_PROBE(H, 2)
1666 LDX_PROBE(W, 4)
1667 LDX_PROBE(DW, 8)
1668#undef LDX_PROBE
1669
1670#define ATOMIC_ALU_OP(BOP, KOP) \
1671 case BOP: \
1672 if (BPF_SIZE(insn->code) == BPF_W) \
1673 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1674 (DST + insn->off)); \
1675 else \
1676 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1677 (DST + insn->off)); \
1678 break; \
1679 case BOP | BPF_FETCH: \
1680 if (BPF_SIZE(insn->code) == BPF_W) \
1681 SRC = (u32) atomic_fetch_##KOP( \
1682 (u32) SRC, \
1683 (atomic_t *)(unsigned long) (DST + insn->off)); \
1684 else \
1685 SRC = (u64) atomic64_fetch_##KOP( \
1686 (u64) SRC, \
1687 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1688 break;
1689
1690 STX_ATOMIC_DW:
1691 STX_ATOMIC_W:
1692 switch (IMM) {
1693 ATOMIC_ALU_OP(BPF_ADD, add)
1694 ATOMIC_ALU_OP(BPF_AND, and)
1695 ATOMIC_ALU_OP(BPF_OR, or)
1696 ATOMIC_ALU_OP(BPF_XOR, xor)
1697#undef ATOMIC_ALU_OP
1698
1699 case BPF_XCHG:
1700 if (BPF_SIZE(insn->code) == BPF_W)
1701 SRC = (u32) atomic_xchg(
1702 (atomic_t *)(unsigned long) (DST + insn->off),
1703 (u32) SRC);
1704 else
1705 SRC = (u64) atomic64_xchg(
1706 (atomic64_t *)(unsigned long) (DST + insn->off),
1707 (u64) SRC);
1708 break;
1709 case BPF_CMPXCHG:
1710 if (BPF_SIZE(insn->code) == BPF_W)
1711 BPF_R0 = (u32) atomic_cmpxchg(
1712 (atomic_t *)(unsigned long) (DST + insn->off),
1713 (u32) BPF_R0, (u32) SRC);
1714 else
1715 BPF_R0 = (u64) atomic64_cmpxchg(
1716 (atomic64_t *)(unsigned long) (DST + insn->off),
1717 (u64) BPF_R0, (u64) SRC);
1718 break;
1719
1720 default:
1721 goto default_label;
1722 }
1723 CONT;
1724
1725 default_label:
1726 /* If we ever reach this, we have a bug somewhere. Die hard here
1727 * instead of just returning 0; we could be somewhere in a subprog,
1728 * so execution could continue otherwise which we do /not/ want.
1729 *
1730 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1731 */
1732 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1733 insn->code, insn->imm);
1734 BUG_ON(1);
1735 return 0;
1736}
1737
1738#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1739#define DEFINE_BPF_PROG_RUN(stack_size) \
1740static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1741{ \
1742 u64 stack[stack_size / sizeof(u64)]; \
1743 u64 regs[MAX_BPF_EXT_REG]; \
1744\
1745 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1746 ARG1 = (u64) (unsigned long) ctx; \
1747 return ___bpf_prog_run(regs, insn); \
1748}
1749
1750#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1751#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1752static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1753 const struct bpf_insn *insn) \
1754{ \
1755 u64 stack[stack_size / sizeof(u64)]; \
1756 u64 regs[MAX_BPF_EXT_REG]; \
1757\
1758 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1759 BPF_R1 = r1; \
1760 BPF_R2 = r2; \
1761 BPF_R3 = r3; \
1762 BPF_R4 = r4; \
1763 BPF_R5 = r5; \
1764 return ___bpf_prog_run(regs, insn); \
1765}
1766
1767#define EVAL1(FN, X) FN(X)
1768#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1769#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1770#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1771#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1772#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1773
1774EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1775EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1776EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1777
1778EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1779EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1780EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1781
1782#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1783
1784static unsigned int (*interpreters[])(const void *ctx,
1785 const struct bpf_insn *insn) = {
1786EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1787EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1788EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1789};
1790#undef PROG_NAME_LIST
1791#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1792static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1793 const struct bpf_insn *insn) = {
1794EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1795EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1796EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1797};
1798#undef PROG_NAME_LIST
1799
1800void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1801{
1802 stack_depth = max_t(u32, stack_depth, 1);
1803 insn->off = (s16) insn->imm;
1804 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1805 __bpf_call_base_args;
1806 insn->code = BPF_JMP | BPF_CALL_ARGS;
1807}
1808
1809#else
1810static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1811 const struct bpf_insn *insn)
1812{
1813 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1814 * is not working properly, so warn about it!
1815 */
1816 WARN_ON_ONCE(1);
1817 return 0;
1818}
1819#endif
1820
1821bool bpf_prog_array_compatible(struct bpf_array *array,
1822 const struct bpf_prog *fp)
1823{
1824 if (fp->kprobe_override)
1825 return false;
1826
1827 if (!array->aux->type) {
1828 /* There's no owner yet where we could check for
1829 * compatibility.
1830 */
1831 array->aux->type = fp->type;
1832 array->aux->jited = fp->jited;
1833 return true;
1834 }
1835
1836 return array->aux->type == fp->type &&
1837 array->aux->jited == fp->jited;
1838}
1839
1840static int bpf_check_tail_call(const struct bpf_prog *fp)
1841{
1842 struct bpf_prog_aux *aux = fp->aux;
1843 int i, ret = 0;
1844
1845 mutex_lock(&aux->used_maps_mutex);
1846 for (i = 0; i < aux->used_map_cnt; i++) {
1847 struct bpf_map *map = aux->used_maps[i];
1848 struct bpf_array *array;
1849
1850 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1851 continue;
1852
1853 array = container_of(map, struct bpf_array, map);
1854 if (!bpf_prog_array_compatible(array, fp)) {
1855 ret = -EINVAL;
1856 goto out;
1857 }
1858 }
1859
1860out:
1861 mutex_unlock(&aux->used_maps_mutex);
1862 return ret;
1863}
1864
1865static void bpf_prog_select_func(struct bpf_prog *fp)
1866{
1867#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1868 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1869
1870 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1871#else
1872 fp->bpf_func = __bpf_prog_ret0_warn;
1873#endif
1874}
1875
1876/**
1877 * bpf_prog_select_runtime - select exec runtime for BPF program
1878 * @fp: bpf_prog populated with internal BPF program
1879 * @err: pointer to error variable
1880 *
1881 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1882 * The BPF program will be executed via BPF_PROG_RUN() macro.
1883 *
1884 * Return: the &fp argument along with &err set to 0 for success or
1885 * a negative errno code on failure
1886 */
1887struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1888{
1889 /* In case of BPF to BPF calls, verifier did all the prep
1890 * work with regards to JITing, etc.
1891 */
1892 bool jit_needed = false;
1893
1894 if (fp->bpf_func)
1895 goto finalize;
1896
1897 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1898 bpf_prog_has_kfunc_call(fp))
1899 jit_needed = true;
1900
1901 bpf_prog_select_func(fp);
1902
1903 /* eBPF JITs can rewrite the program in case constant
1904 * blinding is active. However, in case of error during
1905 * blinding, bpf_int_jit_compile() must always return a
1906 * valid program, which in this case would simply not
1907 * be JITed, but falls back to the interpreter.
1908 */
1909 if (!bpf_prog_is_dev_bound(fp->aux)) {
1910 *err = bpf_prog_alloc_jited_linfo(fp);
1911 if (*err)
1912 return fp;
1913
1914 fp = bpf_int_jit_compile(fp);
1915 bpf_prog_jit_attempt_done(fp);
1916 if (!fp->jited && jit_needed) {
1917 *err = -ENOTSUPP;
1918 return fp;
1919 }
1920 } else {
1921 *err = bpf_prog_offload_compile(fp);
1922 if (*err)
1923 return fp;
1924 }
1925
1926finalize:
1927 bpf_prog_lock_ro(fp);
1928
1929 /* The tail call compatibility check can only be done at
1930 * this late stage as we need to determine, if we deal
1931 * with JITed or non JITed program concatenations and not
1932 * all eBPF JITs might immediately support all features.
1933 */
1934 *err = bpf_check_tail_call(fp);
1935
1936 return fp;
1937}
1938EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1939
1940static unsigned int __bpf_prog_ret1(const void *ctx,
1941 const struct bpf_insn *insn)
1942{
1943 return 1;
1944}
1945
1946static struct bpf_prog_dummy {
1947 struct bpf_prog prog;
1948} dummy_bpf_prog = {
1949 .prog = {
1950 .bpf_func = __bpf_prog_ret1,
1951 },
1952};
1953
1954/* to avoid allocating empty bpf_prog_array for cgroups that
1955 * don't have bpf program attached use one global 'empty_prog_array'
1956 * It will not be modified the caller of bpf_prog_array_alloc()
1957 * (since caller requested prog_cnt == 0)
1958 * that pointer should be 'freed' by bpf_prog_array_free()
1959 */
1960static struct {
1961 struct bpf_prog_array hdr;
1962 struct bpf_prog *null_prog;
1963} empty_prog_array = {
1964 .null_prog = NULL,
1965};
1966
1967struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1968{
1969 if (prog_cnt)
1970 return kzalloc(sizeof(struct bpf_prog_array) +
1971 sizeof(struct bpf_prog_array_item) *
1972 (prog_cnt + 1),
1973 flags);
1974
1975 return &empty_prog_array.hdr;
1976}
1977
1978void bpf_prog_array_free(struct bpf_prog_array *progs)
1979{
1980 if (!progs || progs == &empty_prog_array.hdr)
1981 return;
1982 kfree_rcu(progs, rcu);
1983}
1984
1985int bpf_prog_array_length(struct bpf_prog_array *array)
1986{
1987 struct bpf_prog_array_item *item;
1988 u32 cnt = 0;
1989
1990 for (item = array->items; item->prog; item++)
1991 if (item->prog != &dummy_bpf_prog.prog)
1992 cnt++;
1993 return cnt;
1994}
1995
1996bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1997{
1998 struct bpf_prog_array_item *item;
1999
2000 for (item = array->items; item->prog; item++)
2001 if (item->prog != &dummy_bpf_prog.prog)
2002 return false;
2003 return true;
2004}
2005
2006static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2007 u32 *prog_ids,
2008 u32 request_cnt)
2009{
2010 struct bpf_prog_array_item *item;
2011 int i = 0;
2012
2013 for (item = array->items; item->prog; item++) {
2014 if (item->prog == &dummy_bpf_prog.prog)
2015 continue;
2016 prog_ids[i] = item->prog->aux->id;
2017 if (++i == request_cnt) {
2018 item++;
2019 break;
2020 }
2021 }
2022
2023 return !!(item->prog);
2024}
2025
2026int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2027 __u32 __user *prog_ids, u32 cnt)
2028{
2029 unsigned long err = 0;
2030 bool nospc;
2031 u32 *ids;
2032
2033 /* users of this function are doing:
2034 * cnt = bpf_prog_array_length();
2035 * if (cnt > 0)
2036 * bpf_prog_array_copy_to_user(..., cnt);
2037 * so below kcalloc doesn't need extra cnt > 0 check.
2038 */
2039 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2040 if (!ids)
2041 return -ENOMEM;
2042 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2043 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2044 kfree(ids);
2045 if (err)
2046 return -EFAULT;
2047 if (nospc)
2048 return -ENOSPC;
2049 return 0;
2050}
2051
2052void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2053 struct bpf_prog *old_prog)
2054{
2055 struct bpf_prog_array_item *item;
2056
2057 for (item = array->items; item->prog; item++)
2058 if (item->prog == old_prog) {
2059 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2060 break;
2061 }
2062}
2063
2064/**
2065 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2066 * index into the program array with
2067 * a dummy no-op program.
2068 * @array: a bpf_prog_array
2069 * @index: the index of the program to replace
2070 *
2071 * Skips over dummy programs, by not counting them, when calculating
2072 * the position of the program to replace.
2073 *
2074 * Return:
2075 * * 0 - Success
2076 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2077 * * -ENOENT - Index out of range
2078 */
2079int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2080{
2081 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2082}
2083
2084/**
2085 * bpf_prog_array_update_at() - Updates the program at the given index
2086 * into the program array.
2087 * @array: a bpf_prog_array
2088 * @index: the index of the program to update
2089 * @prog: the program to insert into the array
2090 *
2091 * Skips over dummy programs, by not counting them, when calculating
2092 * the position of the program to update.
2093 *
2094 * Return:
2095 * * 0 - Success
2096 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2097 * * -ENOENT - Index out of range
2098 */
2099int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2100 struct bpf_prog *prog)
2101{
2102 struct bpf_prog_array_item *item;
2103
2104 if (unlikely(index < 0))
2105 return -EINVAL;
2106
2107 for (item = array->items; item->prog; item++) {
2108 if (item->prog == &dummy_bpf_prog.prog)
2109 continue;
2110 if (!index) {
2111 WRITE_ONCE(item->prog, prog);
2112 return 0;
2113 }
2114 index--;
2115 }
2116 return -ENOENT;
2117}
2118
2119int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2120 struct bpf_prog *exclude_prog,
2121 struct bpf_prog *include_prog,
2122 struct bpf_prog_array **new_array)
2123{
2124 int new_prog_cnt, carry_prog_cnt = 0;
2125 struct bpf_prog_array_item *existing;
2126 struct bpf_prog_array *array;
2127 bool found_exclude = false;
2128 int new_prog_idx = 0;
2129
2130 /* Figure out how many existing progs we need to carry over to
2131 * the new array.
2132 */
2133 if (old_array) {
2134 existing = old_array->items;
2135 for (; existing->prog; existing++) {
2136 if (existing->prog == exclude_prog) {
2137 found_exclude = true;
2138 continue;
2139 }
2140 if (existing->prog != &dummy_bpf_prog.prog)
2141 carry_prog_cnt++;
2142 if (existing->prog == include_prog)
2143 return -EEXIST;
2144 }
2145 }
2146
2147 if (exclude_prog && !found_exclude)
2148 return -ENOENT;
2149
2150 /* How many progs (not NULL) will be in the new array? */
2151 new_prog_cnt = carry_prog_cnt;
2152 if (include_prog)
2153 new_prog_cnt += 1;
2154
2155 /* Do we have any prog (not NULL) in the new array? */
2156 if (!new_prog_cnt) {
2157 *new_array = NULL;
2158 return 0;
2159 }
2160
2161 /* +1 as the end of prog_array is marked with NULL */
2162 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2163 if (!array)
2164 return -ENOMEM;
2165
2166 /* Fill in the new prog array */
2167 if (carry_prog_cnt) {
2168 existing = old_array->items;
2169 for (; existing->prog; existing++)
2170 if (existing->prog != exclude_prog &&
2171 existing->prog != &dummy_bpf_prog.prog) {
2172 array->items[new_prog_idx++].prog =
2173 existing->prog;
2174 }
2175 }
2176 if (include_prog)
2177 array->items[new_prog_idx++].prog = include_prog;
2178 array->items[new_prog_idx].prog = NULL;
2179 *new_array = array;
2180 return 0;
2181}
2182
2183int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2184 u32 *prog_ids, u32 request_cnt,
2185 u32 *prog_cnt)
2186{
2187 u32 cnt = 0;
2188
2189 if (array)
2190 cnt = bpf_prog_array_length(array);
2191
2192 *prog_cnt = cnt;
2193
2194 /* return early if user requested only program count or nothing to copy */
2195 if (!request_cnt || !cnt)
2196 return 0;
2197
2198 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2199 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2200 : 0;
2201}
2202
2203void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2204 struct bpf_map **used_maps, u32 len)
2205{
2206 struct bpf_map *map;
2207 u32 i;
2208
2209 for (i = 0; i < len; i++) {
2210 map = used_maps[i];
2211 if (map->ops->map_poke_untrack)
2212 map->ops->map_poke_untrack(map, aux);
2213 bpf_map_put(map);
2214 }
2215}
2216
2217static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2218{
2219 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2220 kfree(aux->used_maps);
2221}
2222
2223void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2224 struct btf_mod_pair *used_btfs, u32 len)
2225{
2226#ifdef CONFIG_BPF_SYSCALL
2227 struct btf_mod_pair *btf_mod;
2228 u32 i;
2229
2230 for (i = 0; i < len; i++) {
2231 btf_mod = &used_btfs[i];
2232 if (btf_mod->module)
2233 module_put(btf_mod->module);
2234 btf_put(btf_mod->btf);
2235 }
2236#endif
2237}
2238
2239static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2240{
2241 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2242 kfree(aux->used_btfs);
2243}
2244
2245static void bpf_prog_free_deferred(struct work_struct *work)
2246{
2247 struct bpf_prog_aux *aux;
2248 int i;
2249
2250 aux = container_of(work, struct bpf_prog_aux, work);
2251 bpf_free_used_maps(aux);
2252 bpf_free_used_btfs(aux);
2253 if (bpf_prog_is_dev_bound(aux))
2254 bpf_prog_offload_destroy(aux->prog);
2255#ifdef CONFIG_PERF_EVENTS
2256 if (aux->prog->has_callchain_buf)
2257 put_callchain_buffers();
2258#endif
2259 if (aux->dst_trampoline)
2260 bpf_trampoline_put(aux->dst_trampoline);
2261 for (i = 0; i < aux->func_cnt; i++) {
2262 /* We can just unlink the subprog poke descriptor table as
2263 * it was originally linked to the main program and is also
2264 * released along with it.
2265 */
2266 aux->func[i]->aux->poke_tab = NULL;
2267 bpf_jit_free(aux->func[i]);
2268 }
2269 if (aux->func_cnt) {
2270 kfree(aux->func);
2271 bpf_prog_unlock_free(aux->prog);
2272 } else {
2273 bpf_jit_free(aux->prog);
2274 }
2275}
2276
2277/* Free internal BPF program */
2278void bpf_prog_free(struct bpf_prog *fp)
2279{
2280 struct bpf_prog_aux *aux = fp->aux;
2281
2282 if (aux->dst_prog)
2283 bpf_prog_put(aux->dst_prog);
2284 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2285 schedule_work(&aux->work);
2286}
2287EXPORT_SYMBOL_GPL(bpf_prog_free);
2288
2289/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2290static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2291
2292void bpf_user_rnd_init_once(void)
2293{
2294 prandom_init_once(&bpf_user_rnd_state);
2295}
2296
2297BPF_CALL_0(bpf_user_rnd_u32)
2298{
2299 /* Should someone ever have the rather unwise idea to use some
2300 * of the registers passed into this function, then note that
2301 * this function is called from native eBPF and classic-to-eBPF
2302 * transformations. Register assignments from both sides are
2303 * different, f.e. classic always sets fn(ctx, A, X) here.
2304 */
2305 struct rnd_state *state;
2306 u32 res;
2307
2308 state = &get_cpu_var(bpf_user_rnd_state);
2309 res = prandom_u32_state(state);
2310 put_cpu_var(bpf_user_rnd_state);
2311
2312 return res;
2313}
2314
2315BPF_CALL_0(bpf_get_raw_cpu_id)
2316{
2317 return raw_smp_processor_id();
2318}
2319
2320/* Weak definitions of helper functions in case we don't have bpf syscall. */
2321const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2322const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2323const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2324const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2325const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2326const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2327const struct bpf_func_proto bpf_spin_lock_proto __weak;
2328const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2329const struct bpf_func_proto bpf_jiffies64_proto __weak;
2330
2331const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2332const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2333const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2334const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2335const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2336const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2337
2338const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2339const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2340const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2341const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2342const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2343const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2344const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2345const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2346const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2347
2348const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2349{
2350 return NULL;
2351}
2352
2353u64 __weak
2354bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2355 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2356{
2357 return -ENOTSUPP;
2358}
2359EXPORT_SYMBOL_GPL(bpf_event_output);
2360
2361/* Always built-in helper functions. */
2362const struct bpf_func_proto bpf_tail_call_proto = {
2363 .func = NULL,
2364 .gpl_only = false,
2365 .ret_type = RET_VOID,
2366 .arg1_type = ARG_PTR_TO_CTX,
2367 .arg2_type = ARG_CONST_MAP_PTR,
2368 .arg3_type = ARG_ANYTHING,
2369};
2370
2371/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2372 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2373 * eBPF and implicitly also cBPF can get JITed!
2374 */
2375struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2376{
2377 return prog;
2378}
2379
2380/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2381 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2382 */
2383void __weak bpf_jit_compile(struct bpf_prog *prog)
2384{
2385}
2386
2387bool __weak bpf_helper_changes_pkt_data(void *func)
2388{
2389 return false;
2390}
2391
2392/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2393 * analysis code and wants explicit zero extension inserted by verifier.
2394 * Otherwise, return FALSE.
2395 *
2396 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2397 * you don't override this. JITs that don't want these extra insns can detect
2398 * them using insn_is_zext.
2399 */
2400bool __weak bpf_jit_needs_zext(void)
2401{
2402 return false;
2403}
2404
2405bool __weak bpf_jit_supports_kfunc_call(void)
2406{
2407 return false;
2408}
2409
2410/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2411 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2412 */
2413int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2414 int len)
2415{
2416 return -EFAULT;
2417}
2418
2419int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2420 void *addr1, void *addr2)
2421{
2422 return -ENOTSUPP;
2423}
2424
2425DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2426EXPORT_SYMBOL(bpf_stats_enabled_key);
2427
2428/* All definitions of tracepoints related to BPF. */
2429#define CREATE_TRACE_POINTS
2430#include <linux/bpf_trace.h>
2431
2432EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2433EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);