Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20#include <uapi/linux/btf.h>
21#include <linux/filter.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/random.h>
25#include <linux/moduleloader.h>
26#include <linux/bpf.h>
27#include <linux/btf.h>
28#include <linux/frame.h>
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
32#include <linux/perf_event.h>
33#include <linux/extable.h>
34#include <linux/log2.h>
35#include <asm/unaligned.h>
36
37/* Registers */
38#define BPF_R0 regs[BPF_REG_0]
39#define BPF_R1 regs[BPF_REG_1]
40#define BPF_R2 regs[BPF_REG_2]
41#define BPF_R3 regs[BPF_REG_3]
42#define BPF_R4 regs[BPF_REG_4]
43#define BPF_R5 regs[BPF_REG_5]
44#define BPF_R6 regs[BPF_REG_6]
45#define BPF_R7 regs[BPF_REG_7]
46#define BPF_R8 regs[BPF_REG_8]
47#define BPF_R9 regs[BPF_REG_9]
48#define BPF_R10 regs[BPF_REG_10]
49
50/* Named registers */
51#define DST regs[insn->dst_reg]
52#define SRC regs[insn->src_reg]
53#define FP regs[BPF_REG_FP]
54#define AX regs[BPF_REG_AX]
55#define ARG1 regs[BPF_REG_ARG1]
56#define CTX regs[BPF_REG_CTX]
57#define IMM insn->imm
58
59/* No hurry in this branch
60 *
61 * Exported for the bpf jit load helper.
62 */
63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
64{
65 u8 *ptr = NULL;
66
67 if (k >= SKF_NET_OFF)
68 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
69 else if (k >= SKF_LL_OFF)
70 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
71
72 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
73 return ptr;
74
75 return NULL;
76}
77
78struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
79{
80 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
81 struct bpf_prog_aux *aux;
82 struct bpf_prog *fp;
83
84 size = round_up(size, PAGE_SIZE);
85 fp = __vmalloc(size, gfp_flags);
86 if (fp == NULL)
87 return NULL;
88
89 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
90 if (aux == NULL) {
91 vfree(fp);
92 return NULL;
93 }
94
95 fp->pages = size / PAGE_SIZE;
96 fp->aux = aux;
97 fp->aux->prog = fp;
98 fp->jit_requested = ebpf_jit_enabled();
99
100 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
101
102 return fp;
103}
104
105struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
106{
107 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
108 struct bpf_prog *prog;
109 int cpu;
110
111 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
112 if (!prog)
113 return NULL;
114
115 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
116 if (!prog->aux->stats) {
117 kfree(prog->aux);
118 vfree(prog);
119 return NULL;
120 }
121
122 for_each_possible_cpu(cpu) {
123 struct bpf_prog_stats *pstats;
124
125 pstats = per_cpu_ptr(prog->aux->stats, cpu);
126 u64_stats_init(&pstats->syncp);
127 }
128 return prog;
129}
130EXPORT_SYMBOL_GPL(bpf_prog_alloc);
131
132int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
133{
134 if (!prog->aux->nr_linfo || !prog->jit_requested)
135 return 0;
136
137 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
138 sizeof(*prog->aux->jited_linfo),
139 GFP_KERNEL | __GFP_NOWARN);
140 if (!prog->aux->jited_linfo)
141 return -ENOMEM;
142
143 return 0;
144}
145
146void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
147{
148 kfree(prog->aux->jited_linfo);
149 prog->aux->jited_linfo = NULL;
150}
151
152void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
153{
154 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
155 bpf_prog_free_jited_linfo(prog);
156}
157
158/* The jit engine is responsible to provide an array
159 * for insn_off to the jited_off mapping (insn_to_jit_off).
160 *
161 * The idx to this array is the insn_off. Hence, the insn_off
162 * here is relative to the prog itself instead of the main prog.
163 * This array has one entry for each xlated bpf insn.
164 *
165 * jited_off is the byte off to the last byte of the jited insn.
166 *
167 * Hence, with
168 * insn_start:
169 * The first bpf insn off of the prog. The insn off
170 * here is relative to the main prog.
171 * e.g. if prog is a subprog, insn_start > 0
172 * linfo_idx:
173 * The prog's idx to prog->aux->linfo and jited_linfo
174 *
175 * jited_linfo[linfo_idx] = prog->bpf_func
176 *
177 * For i > linfo_idx,
178 *
179 * jited_linfo[i] = prog->bpf_func +
180 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
181 */
182void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
183 const u32 *insn_to_jit_off)
184{
185 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
186 const struct bpf_line_info *linfo;
187 void **jited_linfo;
188
189 if (!prog->aux->jited_linfo)
190 /* Userspace did not provide linfo */
191 return;
192
193 linfo_idx = prog->aux->linfo_idx;
194 linfo = &prog->aux->linfo[linfo_idx];
195 insn_start = linfo[0].insn_off;
196 insn_end = insn_start + prog->len;
197
198 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
199 jited_linfo[0] = prog->bpf_func;
200
201 nr_linfo = prog->aux->nr_linfo - linfo_idx;
202
203 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
204 /* The verifier ensures that linfo[i].insn_off is
205 * strictly increasing
206 */
207 jited_linfo[i] = prog->bpf_func +
208 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
209}
210
211void bpf_prog_free_linfo(struct bpf_prog *prog)
212{
213 bpf_prog_free_jited_linfo(prog);
214 kvfree(prog->aux->linfo);
215}
216
217struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
218 gfp_t gfp_extra_flags)
219{
220 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
221 struct bpf_prog *fp;
222 u32 pages, delta;
223 int ret;
224
225 size = round_up(size, PAGE_SIZE);
226 pages = size / PAGE_SIZE;
227 if (pages <= fp_old->pages)
228 return fp_old;
229
230 delta = pages - fp_old->pages;
231 ret = __bpf_prog_charge(fp_old->aux->user, delta);
232 if (ret)
233 return NULL;
234
235 fp = __vmalloc(size, gfp_flags);
236 if (fp == NULL) {
237 __bpf_prog_uncharge(fp_old->aux->user, delta);
238 } else {
239 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
240 fp->pages = pages;
241 fp->aux->prog = fp;
242
243 /* We keep fp->aux from fp_old around in the new
244 * reallocated structure.
245 */
246 fp_old->aux = NULL;
247 __bpf_prog_free(fp_old);
248 }
249
250 return fp;
251}
252
253void __bpf_prog_free(struct bpf_prog *fp)
254{
255 if (fp->aux) {
256 free_percpu(fp->aux->stats);
257 kfree(fp->aux->poke_tab);
258 kfree(fp->aux);
259 }
260 vfree(fp);
261}
262
263int bpf_prog_calc_tag(struct bpf_prog *fp)
264{
265 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
266 u32 raw_size = bpf_prog_tag_scratch_size(fp);
267 u32 digest[SHA1_DIGEST_WORDS];
268 u32 ws[SHA1_WORKSPACE_WORDS];
269 u32 i, bsize, psize, blocks;
270 struct bpf_insn *dst;
271 bool was_ld_map;
272 u8 *raw, *todo;
273 __be32 *result;
274 __be64 *bits;
275
276 raw = vmalloc(raw_size);
277 if (!raw)
278 return -ENOMEM;
279
280 sha1_init(digest);
281 memset(ws, 0, sizeof(ws));
282
283 /* We need to take out the map fd for the digest calculation
284 * since they are unstable from user space side.
285 */
286 dst = (void *)raw;
287 for (i = 0, was_ld_map = false; i < fp->len; i++) {
288 dst[i] = fp->insnsi[i];
289 if (!was_ld_map &&
290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
293 was_ld_map = true;
294 dst[i].imm = 0;
295 } else if (was_ld_map &&
296 dst[i].code == 0 &&
297 dst[i].dst_reg == 0 &&
298 dst[i].src_reg == 0 &&
299 dst[i].off == 0) {
300 was_ld_map = false;
301 dst[i].imm = 0;
302 } else {
303 was_ld_map = false;
304 }
305 }
306
307 psize = bpf_prog_insn_size(fp);
308 memset(&raw[psize], 0, raw_size - psize);
309 raw[psize++] = 0x80;
310
311 bsize = round_up(psize, SHA1_BLOCK_SIZE);
312 blocks = bsize / SHA1_BLOCK_SIZE;
313 todo = raw;
314 if (bsize - psize >= sizeof(__be64)) {
315 bits = (__be64 *)(todo + bsize - sizeof(__be64));
316 } else {
317 bits = (__be64 *)(todo + bsize + bits_offset);
318 blocks++;
319 }
320 *bits = cpu_to_be64((psize - 1) << 3);
321
322 while (blocks--) {
323 sha1_transform(digest, todo, ws);
324 todo += SHA1_BLOCK_SIZE;
325 }
326
327 result = (__force __be32 *)digest;
328 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
329 result[i] = cpu_to_be32(digest[i]);
330 memcpy(fp->tag, result, sizeof(fp->tag));
331
332 vfree(raw);
333 return 0;
334}
335
336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
337 s32 end_new, s32 curr, const bool probe_pass)
338{
339 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
340 s32 delta = end_new - end_old;
341 s64 imm = insn->imm;
342
343 if (curr < pos && curr + imm + 1 >= end_old)
344 imm += delta;
345 else if (curr >= end_new && curr + imm + 1 < end_new)
346 imm -= delta;
347 if (imm < imm_min || imm > imm_max)
348 return -ERANGE;
349 if (!probe_pass)
350 insn->imm = imm;
351 return 0;
352}
353
354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
355 s32 end_new, s32 curr, const bool probe_pass)
356{
357 const s32 off_min = S16_MIN, off_max = S16_MAX;
358 s32 delta = end_new - end_old;
359 s32 off = insn->off;
360
361 if (curr < pos && curr + off + 1 >= end_old)
362 off += delta;
363 else if (curr >= end_new && curr + off + 1 < end_new)
364 off -= delta;
365 if (off < off_min || off > off_max)
366 return -ERANGE;
367 if (!probe_pass)
368 insn->off = off;
369 return 0;
370}
371
372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
373 s32 end_new, const bool probe_pass)
374{
375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
376 struct bpf_insn *insn = prog->insnsi;
377 int ret = 0;
378
379 for (i = 0; i < insn_cnt; i++, insn++) {
380 u8 code;
381
382 /* In the probing pass we still operate on the original,
383 * unpatched image in order to check overflows before we
384 * do any other adjustments. Therefore skip the patchlet.
385 */
386 if (probe_pass && i == pos) {
387 i = end_new;
388 insn = prog->insnsi + end_old;
389 }
390 code = insn->code;
391 if ((BPF_CLASS(code) != BPF_JMP &&
392 BPF_CLASS(code) != BPF_JMP32) ||
393 BPF_OP(code) == BPF_EXIT)
394 continue;
395 /* Adjust offset of jmps if we cross patch boundaries. */
396 if (BPF_OP(code) == BPF_CALL) {
397 if (insn->src_reg != BPF_PSEUDO_CALL)
398 continue;
399 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
400 end_new, i, probe_pass);
401 } else {
402 ret = bpf_adj_delta_to_off(insn, pos, end_old,
403 end_new, i, probe_pass);
404 }
405 if (ret)
406 break;
407 }
408
409 return ret;
410}
411
412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
413{
414 struct bpf_line_info *linfo;
415 u32 i, nr_linfo;
416
417 nr_linfo = prog->aux->nr_linfo;
418 if (!nr_linfo || !delta)
419 return;
420
421 linfo = prog->aux->linfo;
422
423 for (i = 0; i < nr_linfo; i++)
424 if (off < linfo[i].insn_off)
425 break;
426
427 /* Push all off < linfo[i].insn_off by delta */
428 for (; i < nr_linfo; i++)
429 linfo[i].insn_off += delta;
430}
431
432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
433 const struct bpf_insn *patch, u32 len)
434{
435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
436 const u32 cnt_max = S16_MAX;
437 struct bpf_prog *prog_adj;
438 int err;
439
440 /* Since our patchlet doesn't expand the image, we're done. */
441 if (insn_delta == 0) {
442 memcpy(prog->insnsi + off, patch, sizeof(*patch));
443 return prog;
444 }
445
446 insn_adj_cnt = prog->len + insn_delta;
447
448 /* Reject anything that would potentially let the insn->off
449 * target overflow when we have excessive program expansions.
450 * We need to probe here before we do any reallocation where
451 * we afterwards may not fail anymore.
452 */
453 if (insn_adj_cnt > cnt_max &&
454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
455 return ERR_PTR(err);
456
457 /* Several new instructions need to be inserted. Make room
458 * for them. Likely, there's no need for a new allocation as
459 * last page could have large enough tailroom.
460 */
461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
462 GFP_USER);
463 if (!prog_adj)
464 return ERR_PTR(-ENOMEM);
465
466 prog_adj->len = insn_adj_cnt;
467
468 /* Patching happens in 3 steps:
469 *
470 * 1) Move over tail of insnsi from next instruction onwards,
471 * so we can patch the single target insn with one or more
472 * new ones (patching is always from 1 to n insns, n > 0).
473 * 2) Inject new instructions at the target location.
474 * 3) Adjust branch offsets if necessary.
475 */
476 insn_rest = insn_adj_cnt - off - len;
477
478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
479 sizeof(*patch) * insn_rest);
480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
481
482 /* We are guaranteed to not fail at this point, otherwise
483 * the ship has sailed to reverse to the original state. An
484 * overflow cannot happen at this point.
485 */
486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
487
488 bpf_adj_linfo(prog_adj, off, insn_delta);
489
490 return prog_adj;
491}
492
493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
494{
495 /* Branch offsets can't overflow when program is shrinking, no need
496 * to call bpf_adj_branches(..., true) here
497 */
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
499 sizeof(struct bpf_insn) * (prog->len - off - cnt));
500 prog->len -= cnt;
501
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
503}
504
505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
506{
507 int i;
508
509 for (i = 0; i < fp->aux->func_cnt; i++)
510 bpf_prog_kallsyms_del(fp->aux->func[i]);
511}
512
513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
514{
515 bpf_prog_kallsyms_del_subprogs(fp);
516 bpf_prog_kallsyms_del(fp);
517}
518
519#ifdef CONFIG_BPF_JIT
520/* All BPF JIT sysctl knobs here. */
521int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
522int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
523int bpf_jit_harden __read_mostly;
524long bpf_jit_limit __read_mostly;
525
526static void
527bpf_prog_ksym_set_addr(struct bpf_prog *prog)
528{
529 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
530 unsigned long addr = (unsigned long)hdr;
531
532 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
533
534 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
535 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE;
536}
537
538static void
539bpf_prog_ksym_set_name(struct bpf_prog *prog)
540{
541 char *sym = prog->aux->ksym.name;
542 const char *end = sym + KSYM_NAME_LEN;
543 const struct btf_type *type;
544 const char *func_name;
545
546 BUILD_BUG_ON(sizeof("bpf_prog_") +
547 sizeof(prog->tag) * 2 +
548 /* name has been null terminated.
549 * We should need +1 for the '_' preceding
550 * the name. However, the null character
551 * is double counted between the name and the
552 * sizeof("bpf_prog_") above, so we omit
553 * the +1 here.
554 */
555 sizeof(prog->aux->name) > KSYM_NAME_LEN);
556
557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
559
560 /* prog->aux->name will be ignored if full btf name is available */
561 if (prog->aux->func_info_cnt) {
562 type = btf_type_by_id(prog->aux->btf,
563 prog->aux->func_info[prog->aux->func_idx].type_id);
564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
565 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
566 return;
567 }
568
569 if (prog->aux->name[0])
570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
571 else
572 *sym = 0;
573}
574
575static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
576{
577 return container_of(n, struct bpf_ksym, tnode)->start;
578}
579
580static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
581 struct latch_tree_node *b)
582{
583 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
584}
585
586static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
587{
588 unsigned long val = (unsigned long)key;
589 const struct bpf_ksym *ksym;
590
591 ksym = container_of(n, struct bpf_ksym, tnode);
592
593 if (val < ksym->start)
594 return -1;
595 if (val >= ksym->end)
596 return 1;
597
598 return 0;
599}
600
601static const struct latch_tree_ops bpf_tree_ops = {
602 .less = bpf_tree_less,
603 .comp = bpf_tree_comp,
604};
605
606static DEFINE_SPINLOCK(bpf_lock);
607static LIST_HEAD(bpf_kallsyms);
608static struct latch_tree_root bpf_tree __cacheline_aligned;
609
610void bpf_ksym_add(struct bpf_ksym *ksym)
611{
612 spin_lock_bh(&bpf_lock);
613 WARN_ON_ONCE(!list_empty(&ksym->lnode));
614 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
615 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
616 spin_unlock_bh(&bpf_lock);
617}
618
619static void __bpf_ksym_del(struct bpf_ksym *ksym)
620{
621 if (list_empty(&ksym->lnode))
622 return;
623
624 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
625 list_del_rcu(&ksym->lnode);
626}
627
628void bpf_ksym_del(struct bpf_ksym *ksym)
629{
630 spin_lock_bh(&bpf_lock);
631 __bpf_ksym_del(ksym);
632 spin_unlock_bh(&bpf_lock);
633}
634
635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
636{
637 return fp->jited && !bpf_prog_was_classic(fp);
638}
639
640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
641{
642 return list_empty(&fp->aux->ksym.lnode) ||
643 fp->aux->ksym.lnode.prev == LIST_POISON2;
644}
645
646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
647{
648 if (!bpf_prog_kallsyms_candidate(fp) ||
649 !bpf_capable())
650 return;
651
652 bpf_prog_ksym_set_addr(fp);
653 bpf_prog_ksym_set_name(fp);
654 fp->aux->ksym.prog = true;
655
656 bpf_ksym_add(&fp->aux->ksym);
657}
658
659void bpf_prog_kallsyms_del(struct bpf_prog *fp)
660{
661 if (!bpf_prog_kallsyms_candidate(fp))
662 return;
663
664 bpf_ksym_del(&fp->aux->ksym);
665}
666
667static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
668{
669 struct latch_tree_node *n;
670
671 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
672 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
673}
674
675const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
676 unsigned long *off, char *sym)
677{
678 struct bpf_ksym *ksym;
679 char *ret = NULL;
680
681 rcu_read_lock();
682 ksym = bpf_ksym_find(addr);
683 if (ksym) {
684 unsigned long symbol_start = ksym->start;
685 unsigned long symbol_end = ksym->end;
686
687 strncpy(sym, ksym->name, KSYM_NAME_LEN);
688
689 ret = sym;
690 if (size)
691 *size = symbol_end - symbol_start;
692 if (off)
693 *off = addr - symbol_start;
694 }
695 rcu_read_unlock();
696
697 return ret;
698}
699
700bool is_bpf_text_address(unsigned long addr)
701{
702 bool ret;
703
704 rcu_read_lock();
705 ret = bpf_ksym_find(addr) != NULL;
706 rcu_read_unlock();
707
708 return ret;
709}
710
711static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
712{
713 struct bpf_ksym *ksym = bpf_ksym_find(addr);
714
715 return ksym && ksym->prog ?
716 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
717 NULL;
718}
719
720const struct exception_table_entry *search_bpf_extables(unsigned long addr)
721{
722 const struct exception_table_entry *e = NULL;
723 struct bpf_prog *prog;
724
725 rcu_read_lock();
726 prog = bpf_prog_ksym_find(addr);
727 if (!prog)
728 goto out;
729 if (!prog->aux->num_exentries)
730 goto out;
731
732 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
733out:
734 rcu_read_unlock();
735 return e;
736}
737
738int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
739 char *sym)
740{
741 struct bpf_ksym *ksym;
742 unsigned int it = 0;
743 int ret = -ERANGE;
744
745 if (!bpf_jit_kallsyms_enabled())
746 return ret;
747
748 rcu_read_lock();
749 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
750 if (it++ != symnum)
751 continue;
752
753 strncpy(sym, ksym->name, KSYM_NAME_LEN);
754
755 *value = ksym->start;
756 *type = BPF_SYM_ELF_TYPE;
757
758 ret = 0;
759 break;
760 }
761 rcu_read_unlock();
762
763 return ret;
764}
765
766int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
767 struct bpf_jit_poke_descriptor *poke)
768{
769 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
770 static const u32 poke_tab_max = 1024;
771 u32 slot = prog->aux->size_poke_tab;
772 u32 size = slot + 1;
773
774 if (size > poke_tab_max)
775 return -ENOSPC;
776 if (poke->ip || poke->ip_stable || poke->adj_off)
777 return -EINVAL;
778
779 switch (poke->reason) {
780 case BPF_POKE_REASON_TAIL_CALL:
781 if (!poke->tail_call.map)
782 return -EINVAL;
783 break;
784 default:
785 return -EINVAL;
786 }
787
788 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
789 if (!tab)
790 return -ENOMEM;
791
792 memcpy(&tab[slot], poke, sizeof(*poke));
793 prog->aux->size_poke_tab = size;
794 prog->aux->poke_tab = tab;
795
796 return slot;
797}
798
799static atomic_long_t bpf_jit_current;
800
801/* Can be overridden by an arch's JIT compiler if it has a custom,
802 * dedicated BPF backend memory area, or if neither of the two
803 * below apply.
804 */
805u64 __weak bpf_jit_alloc_exec_limit(void)
806{
807#if defined(MODULES_VADDR)
808 return MODULES_END - MODULES_VADDR;
809#else
810 return VMALLOC_END - VMALLOC_START;
811#endif
812}
813
814static int __init bpf_jit_charge_init(void)
815{
816 /* Only used as heuristic here to derive limit. */
817 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
818 PAGE_SIZE), LONG_MAX);
819 return 0;
820}
821pure_initcall(bpf_jit_charge_init);
822
823static int bpf_jit_charge_modmem(u32 pages)
824{
825 if (atomic_long_add_return(pages, &bpf_jit_current) >
826 (bpf_jit_limit >> PAGE_SHIFT)) {
827 if (!capable(CAP_SYS_ADMIN)) {
828 atomic_long_sub(pages, &bpf_jit_current);
829 return -EPERM;
830 }
831 }
832
833 return 0;
834}
835
836static void bpf_jit_uncharge_modmem(u32 pages)
837{
838 atomic_long_sub(pages, &bpf_jit_current);
839}
840
841void *__weak bpf_jit_alloc_exec(unsigned long size)
842{
843 return module_alloc(size);
844}
845
846void __weak bpf_jit_free_exec(void *addr)
847{
848 module_memfree(addr);
849}
850
851struct bpf_binary_header *
852bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
853 unsigned int alignment,
854 bpf_jit_fill_hole_t bpf_fill_ill_insns)
855{
856 struct bpf_binary_header *hdr;
857 u32 size, hole, start, pages;
858
859 WARN_ON_ONCE(!is_power_of_2(alignment) ||
860 alignment > BPF_IMAGE_ALIGNMENT);
861
862 /* Most of BPF filters are really small, but if some of them
863 * fill a page, allow at least 128 extra bytes to insert a
864 * random section of illegal instructions.
865 */
866 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
867 pages = size / PAGE_SIZE;
868
869 if (bpf_jit_charge_modmem(pages))
870 return NULL;
871 hdr = bpf_jit_alloc_exec(size);
872 if (!hdr) {
873 bpf_jit_uncharge_modmem(pages);
874 return NULL;
875 }
876
877 /* Fill space with illegal/arch-dep instructions. */
878 bpf_fill_ill_insns(hdr, size);
879
880 hdr->pages = pages;
881 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
882 PAGE_SIZE - sizeof(*hdr));
883 start = (get_random_int() % hole) & ~(alignment - 1);
884
885 /* Leave a random number of instructions before BPF code. */
886 *image_ptr = &hdr->image[start];
887
888 return hdr;
889}
890
891void bpf_jit_binary_free(struct bpf_binary_header *hdr)
892{
893 u32 pages = hdr->pages;
894
895 bpf_jit_free_exec(hdr);
896 bpf_jit_uncharge_modmem(pages);
897}
898
899/* This symbol is only overridden by archs that have different
900 * requirements than the usual eBPF JITs, f.e. when they only
901 * implement cBPF JIT, do not set images read-only, etc.
902 */
903void __weak bpf_jit_free(struct bpf_prog *fp)
904{
905 if (fp->jited) {
906 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
907
908 bpf_jit_binary_free(hdr);
909
910 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
911 }
912
913 bpf_prog_unlock_free(fp);
914}
915
916int bpf_jit_get_func_addr(const struct bpf_prog *prog,
917 const struct bpf_insn *insn, bool extra_pass,
918 u64 *func_addr, bool *func_addr_fixed)
919{
920 s16 off = insn->off;
921 s32 imm = insn->imm;
922 u8 *addr;
923
924 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
925 if (!*func_addr_fixed) {
926 /* Place-holder address till the last pass has collected
927 * all addresses for JITed subprograms in which case we
928 * can pick them up from prog->aux.
929 */
930 if (!extra_pass)
931 addr = NULL;
932 else if (prog->aux->func &&
933 off >= 0 && off < prog->aux->func_cnt)
934 addr = (u8 *)prog->aux->func[off]->bpf_func;
935 else
936 return -EINVAL;
937 } else {
938 /* Address of a BPF helper call. Since part of the core
939 * kernel, it's always at a fixed location. __bpf_call_base
940 * and the helper with imm relative to it are both in core
941 * kernel.
942 */
943 addr = (u8 *)__bpf_call_base + imm;
944 }
945
946 *func_addr = (unsigned long)addr;
947 return 0;
948}
949
950static int bpf_jit_blind_insn(const struct bpf_insn *from,
951 const struct bpf_insn *aux,
952 struct bpf_insn *to_buff,
953 bool emit_zext)
954{
955 struct bpf_insn *to = to_buff;
956 u32 imm_rnd = get_random_int();
957 s16 off;
958
959 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
960 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
961
962 /* Constraints on AX register:
963 *
964 * AX register is inaccessible from user space. It is mapped in
965 * all JITs, and used here for constant blinding rewrites. It is
966 * typically "stateless" meaning its contents are only valid within
967 * the executed instruction, but not across several instructions.
968 * There are a few exceptions however which are further detailed
969 * below.
970 *
971 * Constant blinding is only used by JITs, not in the interpreter.
972 * The interpreter uses AX in some occasions as a local temporary
973 * register e.g. in DIV or MOD instructions.
974 *
975 * In restricted circumstances, the verifier can also use the AX
976 * register for rewrites as long as they do not interfere with
977 * the above cases!
978 */
979 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
980 goto out;
981
982 if (from->imm == 0 &&
983 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
984 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
985 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
986 goto out;
987 }
988
989 switch (from->code) {
990 case BPF_ALU | BPF_ADD | BPF_K:
991 case BPF_ALU | BPF_SUB | BPF_K:
992 case BPF_ALU | BPF_AND | BPF_K:
993 case BPF_ALU | BPF_OR | BPF_K:
994 case BPF_ALU | BPF_XOR | BPF_K:
995 case BPF_ALU | BPF_MUL | BPF_K:
996 case BPF_ALU | BPF_MOV | BPF_K:
997 case BPF_ALU | BPF_DIV | BPF_K:
998 case BPF_ALU | BPF_MOD | BPF_K:
999 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1000 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1001 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1002 break;
1003
1004 case BPF_ALU64 | BPF_ADD | BPF_K:
1005 case BPF_ALU64 | BPF_SUB | BPF_K:
1006 case BPF_ALU64 | BPF_AND | BPF_K:
1007 case BPF_ALU64 | BPF_OR | BPF_K:
1008 case BPF_ALU64 | BPF_XOR | BPF_K:
1009 case BPF_ALU64 | BPF_MUL | BPF_K:
1010 case BPF_ALU64 | BPF_MOV | BPF_K:
1011 case BPF_ALU64 | BPF_DIV | BPF_K:
1012 case BPF_ALU64 | BPF_MOD | BPF_K:
1013 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1014 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1015 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1016 break;
1017
1018 case BPF_JMP | BPF_JEQ | BPF_K:
1019 case BPF_JMP | BPF_JNE | BPF_K:
1020 case BPF_JMP | BPF_JGT | BPF_K:
1021 case BPF_JMP | BPF_JLT | BPF_K:
1022 case BPF_JMP | BPF_JGE | BPF_K:
1023 case BPF_JMP | BPF_JLE | BPF_K:
1024 case BPF_JMP | BPF_JSGT | BPF_K:
1025 case BPF_JMP | BPF_JSLT | BPF_K:
1026 case BPF_JMP | BPF_JSGE | BPF_K:
1027 case BPF_JMP | BPF_JSLE | BPF_K:
1028 case BPF_JMP | BPF_JSET | BPF_K:
1029 /* Accommodate for extra offset in case of a backjump. */
1030 off = from->off;
1031 if (off < 0)
1032 off -= 2;
1033 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1034 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1035 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1036 break;
1037
1038 case BPF_JMP32 | BPF_JEQ | BPF_K:
1039 case BPF_JMP32 | BPF_JNE | BPF_K:
1040 case BPF_JMP32 | BPF_JGT | BPF_K:
1041 case BPF_JMP32 | BPF_JLT | BPF_K:
1042 case BPF_JMP32 | BPF_JGE | BPF_K:
1043 case BPF_JMP32 | BPF_JLE | BPF_K:
1044 case BPF_JMP32 | BPF_JSGT | BPF_K:
1045 case BPF_JMP32 | BPF_JSLT | BPF_K:
1046 case BPF_JMP32 | BPF_JSGE | BPF_K:
1047 case BPF_JMP32 | BPF_JSLE | BPF_K:
1048 case BPF_JMP32 | BPF_JSET | BPF_K:
1049 /* Accommodate for extra offset in case of a backjump. */
1050 off = from->off;
1051 if (off < 0)
1052 off -= 2;
1053 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1054 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1055 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1056 off);
1057 break;
1058
1059 case BPF_LD | BPF_IMM | BPF_DW:
1060 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1061 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1062 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1063 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1064 break;
1065 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1066 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1067 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1068 if (emit_zext)
1069 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1070 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1071 break;
1072
1073 case BPF_ST | BPF_MEM | BPF_DW:
1074 case BPF_ST | BPF_MEM | BPF_W:
1075 case BPF_ST | BPF_MEM | BPF_H:
1076 case BPF_ST | BPF_MEM | BPF_B:
1077 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1078 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1079 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1080 break;
1081 }
1082out:
1083 return to - to_buff;
1084}
1085
1086static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1087 gfp_t gfp_extra_flags)
1088{
1089 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1090 struct bpf_prog *fp;
1091
1092 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1093 if (fp != NULL) {
1094 /* aux->prog still points to the fp_other one, so
1095 * when promoting the clone to the real program,
1096 * this still needs to be adapted.
1097 */
1098 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1099 }
1100
1101 return fp;
1102}
1103
1104static void bpf_prog_clone_free(struct bpf_prog *fp)
1105{
1106 /* aux was stolen by the other clone, so we cannot free
1107 * it from this path! It will be freed eventually by the
1108 * other program on release.
1109 *
1110 * At this point, we don't need a deferred release since
1111 * clone is guaranteed to not be locked.
1112 */
1113 fp->aux = NULL;
1114 __bpf_prog_free(fp);
1115}
1116
1117void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1118{
1119 /* We have to repoint aux->prog to self, as we don't
1120 * know whether fp here is the clone or the original.
1121 */
1122 fp->aux->prog = fp;
1123 bpf_prog_clone_free(fp_other);
1124}
1125
1126struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1127{
1128 struct bpf_insn insn_buff[16], aux[2];
1129 struct bpf_prog *clone, *tmp;
1130 int insn_delta, insn_cnt;
1131 struct bpf_insn *insn;
1132 int i, rewritten;
1133
1134 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1135 return prog;
1136
1137 clone = bpf_prog_clone_create(prog, GFP_USER);
1138 if (!clone)
1139 return ERR_PTR(-ENOMEM);
1140
1141 insn_cnt = clone->len;
1142 insn = clone->insnsi;
1143
1144 for (i = 0; i < insn_cnt; i++, insn++) {
1145 /* We temporarily need to hold the original ld64 insn
1146 * so that we can still access the first part in the
1147 * second blinding run.
1148 */
1149 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1150 insn[1].code == 0)
1151 memcpy(aux, insn, sizeof(aux));
1152
1153 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1154 clone->aux->verifier_zext);
1155 if (!rewritten)
1156 continue;
1157
1158 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1159 if (IS_ERR(tmp)) {
1160 /* Patching may have repointed aux->prog during
1161 * realloc from the original one, so we need to
1162 * fix it up here on error.
1163 */
1164 bpf_jit_prog_release_other(prog, clone);
1165 return tmp;
1166 }
1167
1168 clone = tmp;
1169 insn_delta = rewritten - 1;
1170
1171 /* Walk new program and skip insns we just inserted. */
1172 insn = clone->insnsi + i + insn_delta;
1173 insn_cnt += insn_delta;
1174 i += insn_delta;
1175 }
1176
1177 clone->blinded = 1;
1178 return clone;
1179}
1180#endif /* CONFIG_BPF_JIT */
1181
1182/* Base function for offset calculation. Needs to go into .text section,
1183 * therefore keeping it non-static as well; will also be used by JITs
1184 * anyway later on, so do not let the compiler omit it. This also needs
1185 * to go into kallsyms for correlation from e.g. bpftool, so naming
1186 * must not change.
1187 */
1188noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1189{
1190 return 0;
1191}
1192EXPORT_SYMBOL_GPL(__bpf_call_base);
1193
1194/* All UAPI available opcodes. */
1195#define BPF_INSN_MAP(INSN_2, INSN_3) \
1196 /* 32 bit ALU operations. */ \
1197 /* Register based. */ \
1198 INSN_3(ALU, ADD, X), \
1199 INSN_3(ALU, SUB, X), \
1200 INSN_3(ALU, AND, X), \
1201 INSN_3(ALU, OR, X), \
1202 INSN_3(ALU, LSH, X), \
1203 INSN_3(ALU, RSH, X), \
1204 INSN_3(ALU, XOR, X), \
1205 INSN_3(ALU, MUL, X), \
1206 INSN_3(ALU, MOV, X), \
1207 INSN_3(ALU, ARSH, X), \
1208 INSN_3(ALU, DIV, X), \
1209 INSN_3(ALU, MOD, X), \
1210 INSN_2(ALU, NEG), \
1211 INSN_3(ALU, END, TO_BE), \
1212 INSN_3(ALU, END, TO_LE), \
1213 /* Immediate based. */ \
1214 INSN_3(ALU, ADD, K), \
1215 INSN_3(ALU, SUB, K), \
1216 INSN_3(ALU, AND, K), \
1217 INSN_3(ALU, OR, K), \
1218 INSN_3(ALU, LSH, K), \
1219 INSN_3(ALU, RSH, K), \
1220 INSN_3(ALU, XOR, K), \
1221 INSN_3(ALU, MUL, K), \
1222 INSN_3(ALU, MOV, K), \
1223 INSN_3(ALU, ARSH, K), \
1224 INSN_3(ALU, DIV, K), \
1225 INSN_3(ALU, MOD, K), \
1226 /* 64 bit ALU operations. */ \
1227 /* Register based. */ \
1228 INSN_3(ALU64, ADD, X), \
1229 INSN_3(ALU64, SUB, X), \
1230 INSN_3(ALU64, AND, X), \
1231 INSN_3(ALU64, OR, X), \
1232 INSN_3(ALU64, LSH, X), \
1233 INSN_3(ALU64, RSH, X), \
1234 INSN_3(ALU64, XOR, X), \
1235 INSN_3(ALU64, MUL, X), \
1236 INSN_3(ALU64, MOV, X), \
1237 INSN_3(ALU64, ARSH, X), \
1238 INSN_3(ALU64, DIV, X), \
1239 INSN_3(ALU64, MOD, X), \
1240 INSN_2(ALU64, NEG), \
1241 /* Immediate based. */ \
1242 INSN_3(ALU64, ADD, K), \
1243 INSN_3(ALU64, SUB, K), \
1244 INSN_3(ALU64, AND, K), \
1245 INSN_3(ALU64, OR, K), \
1246 INSN_3(ALU64, LSH, K), \
1247 INSN_3(ALU64, RSH, K), \
1248 INSN_3(ALU64, XOR, K), \
1249 INSN_3(ALU64, MUL, K), \
1250 INSN_3(ALU64, MOV, K), \
1251 INSN_3(ALU64, ARSH, K), \
1252 INSN_3(ALU64, DIV, K), \
1253 INSN_3(ALU64, MOD, K), \
1254 /* Call instruction. */ \
1255 INSN_2(JMP, CALL), \
1256 /* Exit instruction. */ \
1257 INSN_2(JMP, EXIT), \
1258 /* 32-bit Jump instructions. */ \
1259 /* Register based. */ \
1260 INSN_3(JMP32, JEQ, X), \
1261 INSN_3(JMP32, JNE, X), \
1262 INSN_3(JMP32, JGT, X), \
1263 INSN_3(JMP32, JLT, X), \
1264 INSN_3(JMP32, JGE, X), \
1265 INSN_3(JMP32, JLE, X), \
1266 INSN_3(JMP32, JSGT, X), \
1267 INSN_3(JMP32, JSLT, X), \
1268 INSN_3(JMP32, JSGE, X), \
1269 INSN_3(JMP32, JSLE, X), \
1270 INSN_3(JMP32, JSET, X), \
1271 /* Immediate based. */ \
1272 INSN_3(JMP32, JEQ, K), \
1273 INSN_3(JMP32, JNE, K), \
1274 INSN_3(JMP32, JGT, K), \
1275 INSN_3(JMP32, JLT, K), \
1276 INSN_3(JMP32, JGE, K), \
1277 INSN_3(JMP32, JLE, K), \
1278 INSN_3(JMP32, JSGT, K), \
1279 INSN_3(JMP32, JSLT, K), \
1280 INSN_3(JMP32, JSGE, K), \
1281 INSN_3(JMP32, JSLE, K), \
1282 INSN_3(JMP32, JSET, K), \
1283 /* Jump instructions. */ \
1284 /* Register based. */ \
1285 INSN_3(JMP, JEQ, X), \
1286 INSN_3(JMP, JNE, X), \
1287 INSN_3(JMP, JGT, X), \
1288 INSN_3(JMP, JLT, X), \
1289 INSN_3(JMP, JGE, X), \
1290 INSN_3(JMP, JLE, X), \
1291 INSN_3(JMP, JSGT, X), \
1292 INSN_3(JMP, JSLT, X), \
1293 INSN_3(JMP, JSGE, X), \
1294 INSN_3(JMP, JSLE, X), \
1295 INSN_3(JMP, JSET, X), \
1296 /* Immediate based. */ \
1297 INSN_3(JMP, JEQ, K), \
1298 INSN_3(JMP, JNE, K), \
1299 INSN_3(JMP, JGT, K), \
1300 INSN_3(JMP, JLT, K), \
1301 INSN_3(JMP, JGE, K), \
1302 INSN_3(JMP, JLE, K), \
1303 INSN_3(JMP, JSGT, K), \
1304 INSN_3(JMP, JSLT, K), \
1305 INSN_3(JMP, JSGE, K), \
1306 INSN_3(JMP, JSLE, K), \
1307 INSN_3(JMP, JSET, K), \
1308 INSN_2(JMP, JA), \
1309 /* Store instructions. */ \
1310 /* Register based. */ \
1311 INSN_3(STX, MEM, B), \
1312 INSN_3(STX, MEM, H), \
1313 INSN_3(STX, MEM, W), \
1314 INSN_3(STX, MEM, DW), \
1315 INSN_3(STX, XADD, W), \
1316 INSN_3(STX, XADD, DW), \
1317 /* Immediate based. */ \
1318 INSN_3(ST, MEM, B), \
1319 INSN_3(ST, MEM, H), \
1320 INSN_3(ST, MEM, W), \
1321 INSN_3(ST, MEM, DW), \
1322 /* Load instructions. */ \
1323 /* Register based. */ \
1324 INSN_3(LDX, MEM, B), \
1325 INSN_3(LDX, MEM, H), \
1326 INSN_3(LDX, MEM, W), \
1327 INSN_3(LDX, MEM, DW), \
1328 /* Immediate based. */ \
1329 INSN_3(LD, IMM, DW)
1330
1331bool bpf_opcode_in_insntable(u8 code)
1332{
1333#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1334#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1335 static const bool public_insntable[256] = {
1336 [0 ... 255] = false,
1337 /* Now overwrite non-defaults ... */
1338 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1339 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1340 [BPF_LD | BPF_ABS | BPF_B] = true,
1341 [BPF_LD | BPF_ABS | BPF_H] = true,
1342 [BPF_LD | BPF_ABS | BPF_W] = true,
1343 [BPF_LD | BPF_IND | BPF_B] = true,
1344 [BPF_LD | BPF_IND | BPF_H] = true,
1345 [BPF_LD | BPF_IND | BPF_W] = true,
1346 };
1347#undef BPF_INSN_3_TBL
1348#undef BPF_INSN_2_TBL
1349 return public_insntable[code];
1350}
1351
1352#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1353u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1354{
1355 memset(dst, 0, size);
1356 return -EFAULT;
1357}
1358
1359/**
1360 * __bpf_prog_run - run eBPF program on a given context
1361 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1362 * @insn: is the array of eBPF instructions
1363 * @stack: is the eBPF storage stack
1364 *
1365 * Decode and execute eBPF instructions.
1366 */
1367static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1368{
1369#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1370#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1371 static const void * const jumptable[256] __annotate_jump_table = {
1372 [0 ... 255] = &&default_label,
1373 /* Now overwrite non-defaults ... */
1374 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1375 /* Non-UAPI available opcodes. */
1376 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1377 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1378 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1379 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1380 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1381 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1382 };
1383#undef BPF_INSN_3_LBL
1384#undef BPF_INSN_2_LBL
1385 u32 tail_call_cnt = 0;
1386
1387#define CONT ({ insn++; goto select_insn; })
1388#define CONT_JMP ({ insn++; goto select_insn; })
1389
1390select_insn:
1391 goto *jumptable[insn->code];
1392
1393 /* ALU */
1394#define ALU(OPCODE, OP) \
1395 ALU64_##OPCODE##_X: \
1396 DST = DST OP SRC; \
1397 CONT; \
1398 ALU_##OPCODE##_X: \
1399 DST = (u32) DST OP (u32) SRC; \
1400 CONT; \
1401 ALU64_##OPCODE##_K: \
1402 DST = DST OP IMM; \
1403 CONT; \
1404 ALU_##OPCODE##_K: \
1405 DST = (u32) DST OP (u32) IMM; \
1406 CONT;
1407
1408 ALU(ADD, +)
1409 ALU(SUB, -)
1410 ALU(AND, &)
1411 ALU(OR, |)
1412 ALU(LSH, <<)
1413 ALU(RSH, >>)
1414 ALU(XOR, ^)
1415 ALU(MUL, *)
1416#undef ALU
1417 ALU_NEG:
1418 DST = (u32) -DST;
1419 CONT;
1420 ALU64_NEG:
1421 DST = -DST;
1422 CONT;
1423 ALU_MOV_X:
1424 DST = (u32) SRC;
1425 CONT;
1426 ALU_MOV_K:
1427 DST = (u32) IMM;
1428 CONT;
1429 ALU64_MOV_X:
1430 DST = SRC;
1431 CONT;
1432 ALU64_MOV_K:
1433 DST = IMM;
1434 CONT;
1435 LD_IMM_DW:
1436 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1437 insn++;
1438 CONT;
1439 ALU_ARSH_X:
1440 DST = (u64) (u32) (((s32) DST) >> SRC);
1441 CONT;
1442 ALU_ARSH_K:
1443 DST = (u64) (u32) (((s32) DST) >> IMM);
1444 CONT;
1445 ALU64_ARSH_X:
1446 (*(s64 *) &DST) >>= SRC;
1447 CONT;
1448 ALU64_ARSH_K:
1449 (*(s64 *) &DST) >>= IMM;
1450 CONT;
1451 ALU64_MOD_X:
1452 div64_u64_rem(DST, SRC, &AX);
1453 DST = AX;
1454 CONT;
1455 ALU_MOD_X:
1456 AX = (u32) DST;
1457 DST = do_div(AX, (u32) SRC);
1458 CONT;
1459 ALU64_MOD_K:
1460 div64_u64_rem(DST, IMM, &AX);
1461 DST = AX;
1462 CONT;
1463 ALU_MOD_K:
1464 AX = (u32) DST;
1465 DST = do_div(AX, (u32) IMM);
1466 CONT;
1467 ALU64_DIV_X:
1468 DST = div64_u64(DST, SRC);
1469 CONT;
1470 ALU_DIV_X:
1471 AX = (u32) DST;
1472 do_div(AX, (u32) SRC);
1473 DST = (u32) AX;
1474 CONT;
1475 ALU64_DIV_K:
1476 DST = div64_u64(DST, IMM);
1477 CONT;
1478 ALU_DIV_K:
1479 AX = (u32) DST;
1480 do_div(AX, (u32) IMM);
1481 DST = (u32) AX;
1482 CONT;
1483 ALU_END_TO_BE:
1484 switch (IMM) {
1485 case 16:
1486 DST = (__force u16) cpu_to_be16(DST);
1487 break;
1488 case 32:
1489 DST = (__force u32) cpu_to_be32(DST);
1490 break;
1491 case 64:
1492 DST = (__force u64) cpu_to_be64(DST);
1493 break;
1494 }
1495 CONT;
1496 ALU_END_TO_LE:
1497 switch (IMM) {
1498 case 16:
1499 DST = (__force u16) cpu_to_le16(DST);
1500 break;
1501 case 32:
1502 DST = (__force u32) cpu_to_le32(DST);
1503 break;
1504 case 64:
1505 DST = (__force u64) cpu_to_le64(DST);
1506 break;
1507 }
1508 CONT;
1509
1510 /* CALL */
1511 JMP_CALL:
1512 /* Function call scratches BPF_R1-BPF_R5 registers,
1513 * preserves BPF_R6-BPF_R9, and stores return value
1514 * into BPF_R0.
1515 */
1516 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1517 BPF_R4, BPF_R5);
1518 CONT;
1519
1520 JMP_CALL_ARGS:
1521 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1522 BPF_R3, BPF_R4,
1523 BPF_R5,
1524 insn + insn->off + 1);
1525 CONT;
1526
1527 JMP_TAIL_CALL: {
1528 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1529 struct bpf_array *array = container_of(map, struct bpf_array, map);
1530 struct bpf_prog *prog;
1531 u32 index = BPF_R3;
1532
1533 if (unlikely(index >= array->map.max_entries))
1534 goto out;
1535 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1536 goto out;
1537
1538 tail_call_cnt++;
1539
1540 prog = READ_ONCE(array->ptrs[index]);
1541 if (!prog)
1542 goto out;
1543
1544 /* ARG1 at this point is guaranteed to point to CTX from
1545 * the verifier side due to the fact that the tail call is
1546 * handled like a helper, that is, bpf_tail_call_proto,
1547 * where arg1_type is ARG_PTR_TO_CTX.
1548 */
1549 insn = prog->insnsi;
1550 goto select_insn;
1551out:
1552 CONT;
1553 }
1554 JMP_JA:
1555 insn += insn->off;
1556 CONT;
1557 JMP_EXIT:
1558 return BPF_R0;
1559 /* JMP */
1560#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1561 JMP_##OPCODE##_X: \
1562 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1563 insn += insn->off; \
1564 CONT_JMP; \
1565 } \
1566 CONT; \
1567 JMP32_##OPCODE##_X: \
1568 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1569 insn += insn->off; \
1570 CONT_JMP; \
1571 } \
1572 CONT; \
1573 JMP_##OPCODE##_K: \
1574 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1575 insn += insn->off; \
1576 CONT_JMP; \
1577 } \
1578 CONT; \
1579 JMP32_##OPCODE##_K: \
1580 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1581 insn += insn->off; \
1582 CONT_JMP; \
1583 } \
1584 CONT;
1585 COND_JMP(u, JEQ, ==)
1586 COND_JMP(u, JNE, !=)
1587 COND_JMP(u, JGT, >)
1588 COND_JMP(u, JLT, <)
1589 COND_JMP(u, JGE, >=)
1590 COND_JMP(u, JLE, <=)
1591 COND_JMP(u, JSET, &)
1592 COND_JMP(s, JSGT, >)
1593 COND_JMP(s, JSLT, <)
1594 COND_JMP(s, JSGE, >=)
1595 COND_JMP(s, JSLE, <=)
1596#undef COND_JMP
1597 /* STX and ST and LDX*/
1598#define LDST(SIZEOP, SIZE) \
1599 STX_MEM_##SIZEOP: \
1600 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1601 CONT; \
1602 ST_MEM_##SIZEOP: \
1603 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1604 CONT; \
1605 LDX_MEM_##SIZEOP: \
1606 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1607 CONT;
1608
1609 LDST(B, u8)
1610 LDST(H, u16)
1611 LDST(W, u32)
1612 LDST(DW, u64)
1613#undef LDST
1614#define LDX_PROBE(SIZEOP, SIZE) \
1615 LDX_PROBE_MEM_##SIZEOP: \
1616 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \
1617 CONT;
1618 LDX_PROBE(B, 1)
1619 LDX_PROBE(H, 2)
1620 LDX_PROBE(W, 4)
1621 LDX_PROBE(DW, 8)
1622#undef LDX_PROBE
1623
1624 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1625 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1626 (DST + insn->off));
1627 CONT;
1628 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1629 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1630 (DST + insn->off));
1631 CONT;
1632
1633 default_label:
1634 /* If we ever reach this, we have a bug somewhere. Die hard here
1635 * instead of just returning 0; we could be somewhere in a subprog,
1636 * so execution could continue otherwise which we do /not/ want.
1637 *
1638 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1639 */
1640 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1641 BUG_ON(1);
1642 return 0;
1643}
1644
1645#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1646#define DEFINE_BPF_PROG_RUN(stack_size) \
1647static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1648{ \
1649 u64 stack[stack_size / sizeof(u64)]; \
1650 u64 regs[MAX_BPF_EXT_REG]; \
1651\
1652 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1653 ARG1 = (u64) (unsigned long) ctx; \
1654 return ___bpf_prog_run(regs, insn, stack); \
1655}
1656
1657#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1658#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1659static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1660 const struct bpf_insn *insn) \
1661{ \
1662 u64 stack[stack_size / sizeof(u64)]; \
1663 u64 regs[MAX_BPF_EXT_REG]; \
1664\
1665 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1666 BPF_R1 = r1; \
1667 BPF_R2 = r2; \
1668 BPF_R3 = r3; \
1669 BPF_R4 = r4; \
1670 BPF_R5 = r5; \
1671 return ___bpf_prog_run(regs, insn, stack); \
1672}
1673
1674#define EVAL1(FN, X) FN(X)
1675#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1676#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1677#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1678#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1679#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1680
1681EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1682EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1683EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1684
1685EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1686EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1687EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1688
1689#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1690
1691static unsigned int (*interpreters[])(const void *ctx,
1692 const struct bpf_insn *insn) = {
1693EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1694EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1695EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1696};
1697#undef PROG_NAME_LIST
1698#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1699static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1700 const struct bpf_insn *insn) = {
1701EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1702EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1703EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1704};
1705#undef PROG_NAME_LIST
1706
1707void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1708{
1709 stack_depth = max_t(u32, stack_depth, 1);
1710 insn->off = (s16) insn->imm;
1711 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1712 __bpf_call_base_args;
1713 insn->code = BPF_JMP | BPF_CALL_ARGS;
1714}
1715
1716#else
1717static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1718 const struct bpf_insn *insn)
1719{
1720 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1721 * is not working properly, so warn about it!
1722 */
1723 WARN_ON_ONCE(1);
1724 return 0;
1725}
1726#endif
1727
1728bool bpf_prog_array_compatible(struct bpf_array *array,
1729 const struct bpf_prog *fp)
1730{
1731 if (fp->kprobe_override)
1732 return false;
1733
1734 if (!array->aux->type) {
1735 /* There's no owner yet where we could check for
1736 * compatibility.
1737 */
1738 array->aux->type = fp->type;
1739 array->aux->jited = fp->jited;
1740 return true;
1741 }
1742
1743 return array->aux->type == fp->type &&
1744 array->aux->jited == fp->jited;
1745}
1746
1747static int bpf_check_tail_call(const struct bpf_prog *fp)
1748{
1749 struct bpf_prog_aux *aux = fp->aux;
1750 int i;
1751
1752 for (i = 0; i < aux->used_map_cnt; i++) {
1753 struct bpf_map *map = aux->used_maps[i];
1754 struct bpf_array *array;
1755
1756 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1757 continue;
1758
1759 array = container_of(map, struct bpf_array, map);
1760 if (!bpf_prog_array_compatible(array, fp))
1761 return -EINVAL;
1762 }
1763
1764 return 0;
1765}
1766
1767static void bpf_prog_select_func(struct bpf_prog *fp)
1768{
1769#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1770 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1771
1772 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1773#else
1774 fp->bpf_func = __bpf_prog_ret0_warn;
1775#endif
1776}
1777
1778/**
1779 * bpf_prog_select_runtime - select exec runtime for BPF program
1780 * @fp: bpf_prog populated with internal BPF program
1781 * @err: pointer to error variable
1782 *
1783 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1784 * The BPF program will be executed via BPF_PROG_RUN() macro.
1785 */
1786struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1787{
1788 /* In case of BPF to BPF calls, verifier did all the prep
1789 * work with regards to JITing, etc.
1790 */
1791 if (fp->bpf_func)
1792 goto finalize;
1793
1794 bpf_prog_select_func(fp);
1795
1796 /* eBPF JITs can rewrite the program in case constant
1797 * blinding is active. However, in case of error during
1798 * blinding, bpf_int_jit_compile() must always return a
1799 * valid program, which in this case would simply not
1800 * be JITed, but falls back to the interpreter.
1801 */
1802 if (!bpf_prog_is_dev_bound(fp->aux)) {
1803 *err = bpf_prog_alloc_jited_linfo(fp);
1804 if (*err)
1805 return fp;
1806
1807 fp = bpf_int_jit_compile(fp);
1808 if (!fp->jited) {
1809 bpf_prog_free_jited_linfo(fp);
1810#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1811 *err = -ENOTSUPP;
1812 return fp;
1813#endif
1814 } else {
1815 bpf_prog_free_unused_jited_linfo(fp);
1816 }
1817 } else {
1818 *err = bpf_prog_offload_compile(fp);
1819 if (*err)
1820 return fp;
1821 }
1822
1823finalize:
1824 bpf_prog_lock_ro(fp);
1825
1826 /* The tail call compatibility check can only be done at
1827 * this late stage as we need to determine, if we deal
1828 * with JITed or non JITed program concatenations and not
1829 * all eBPF JITs might immediately support all features.
1830 */
1831 *err = bpf_check_tail_call(fp);
1832
1833 return fp;
1834}
1835EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1836
1837static unsigned int __bpf_prog_ret1(const void *ctx,
1838 const struct bpf_insn *insn)
1839{
1840 return 1;
1841}
1842
1843static struct bpf_prog_dummy {
1844 struct bpf_prog prog;
1845} dummy_bpf_prog = {
1846 .prog = {
1847 .bpf_func = __bpf_prog_ret1,
1848 },
1849};
1850
1851/* to avoid allocating empty bpf_prog_array for cgroups that
1852 * don't have bpf program attached use one global 'empty_prog_array'
1853 * It will not be modified the caller of bpf_prog_array_alloc()
1854 * (since caller requested prog_cnt == 0)
1855 * that pointer should be 'freed' by bpf_prog_array_free()
1856 */
1857static struct {
1858 struct bpf_prog_array hdr;
1859 struct bpf_prog *null_prog;
1860} empty_prog_array = {
1861 .null_prog = NULL,
1862};
1863
1864struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1865{
1866 if (prog_cnt)
1867 return kzalloc(sizeof(struct bpf_prog_array) +
1868 sizeof(struct bpf_prog_array_item) *
1869 (prog_cnt + 1),
1870 flags);
1871
1872 return &empty_prog_array.hdr;
1873}
1874
1875void bpf_prog_array_free(struct bpf_prog_array *progs)
1876{
1877 if (!progs || progs == &empty_prog_array.hdr)
1878 return;
1879 kfree_rcu(progs, rcu);
1880}
1881
1882int bpf_prog_array_length(struct bpf_prog_array *array)
1883{
1884 struct bpf_prog_array_item *item;
1885 u32 cnt = 0;
1886
1887 for (item = array->items; item->prog; item++)
1888 if (item->prog != &dummy_bpf_prog.prog)
1889 cnt++;
1890 return cnt;
1891}
1892
1893bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1894{
1895 struct bpf_prog_array_item *item;
1896
1897 for (item = array->items; item->prog; item++)
1898 if (item->prog != &dummy_bpf_prog.prog)
1899 return false;
1900 return true;
1901}
1902
1903static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1904 u32 *prog_ids,
1905 u32 request_cnt)
1906{
1907 struct bpf_prog_array_item *item;
1908 int i = 0;
1909
1910 for (item = array->items; item->prog; item++) {
1911 if (item->prog == &dummy_bpf_prog.prog)
1912 continue;
1913 prog_ids[i] = item->prog->aux->id;
1914 if (++i == request_cnt) {
1915 item++;
1916 break;
1917 }
1918 }
1919
1920 return !!(item->prog);
1921}
1922
1923int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1924 __u32 __user *prog_ids, u32 cnt)
1925{
1926 unsigned long err = 0;
1927 bool nospc;
1928 u32 *ids;
1929
1930 /* users of this function are doing:
1931 * cnt = bpf_prog_array_length();
1932 * if (cnt > 0)
1933 * bpf_prog_array_copy_to_user(..., cnt);
1934 * so below kcalloc doesn't need extra cnt > 0 check.
1935 */
1936 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1937 if (!ids)
1938 return -ENOMEM;
1939 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1940 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1941 kfree(ids);
1942 if (err)
1943 return -EFAULT;
1944 if (nospc)
1945 return -ENOSPC;
1946 return 0;
1947}
1948
1949void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1950 struct bpf_prog *old_prog)
1951{
1952 struct bpf_prog_array_item *item;
1953
1954 for (item = array->items; item->prog; item++)
1955 if (item->prog == old_prog) {
1956 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1957 break;
1958 }
1959}
1960
1961/**
1962 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
1963 * index into the program array with
1964 * a dummy no-op program.
1965 * @array: a bpf_prog_array
1966 * @index: the index of the program to replace
1967 *
1968 * Skips over dummy programs, by not counting them, when calculating
1969 * the position of the program to replace.
1970 *
1971 * Return:
1972 * * 0 - Success
1973 * * -EINVAL - Invalid index value. Must be a non-negative integer.
1974 * * -ENOENT - Index out of range
1975 */
1976int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
1977{
1978 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
1979}
1980
1981/**
1982 * bpf_prog_array_update_at() - Updates the program at the given index
1983 * into the program array.
1984 * @array: a bpf_prog_array
1985 * @index: the index of the program to update
1986 * @prog: the program to insert into the array
1987 *
1988 * Skips over dummy programs, by not counting them, when calculating
1989 * the position of the program to update.
1990 *
1991 * Return:
1992 * * 0 - Success
1993 * * -EINVAL - Invalid index value. Must be a non-negative integer.
1994 * * -ENOENT - Index out of range
1995 */
1996int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1997 struct bpf_prog *prog)
1998{
1999 struct bpf_prog_array_item *item;
2000
2001 if (unlikely(index < 0))
2002 return -EINVAL;
2003
2004 for (item = array->items; item->prog; item++) {
2005 if (item->prog == &dummy_bpf_prog.prog)
2006 continue;
2007 if (!index) {
2008 WRITE_ONCE(item->prog, prog);
2009 return 0;
2010 }
2011 index--;
2012 }
2013 return -ENOENT;
2014}
2015
2016int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2017 struct bpf_prog *exclude_prog,
2018 struct bpf_prog *include_prog,
2019 struct bpf_prog_array **new_array)
2020{
2021 int new_prog_cnt, carry_prog_cnt = 0;
2022 struct bpf_prog_array_item *existing;
2023 struct bpf_prog_array *array;
2024 bool found_exclude = false;
2025 int new_prog_idx = 0;
2026
2027 /* Figure out how many existing progs we need to carry over to
2028 * the new array.
2029 */
2030 if (old_array) {
2031 existing = old_array->items;
2032 for (; existing->prog; existing++) {
2033 if (existing->prog == exclude_prog) {
2034 found_exclude = true;
2035 continue;
2036 }
2037 if (existing->prog != &dummy_bpf_prog.prog)
2038 carry_prog_cnt++;
2039 if (existing->prog == include_prog)
2040 return -EEXIST;
2041 }
2042 }
2043
2044 if (exclude_prog && !found_exclude)
2045 return -ENOENT;
2046
2047 /* How many progs (not NULL) will be in the new array? */
2048 new_prog_cnt = carry_prog_cnt;
2049 if (include_prog)
2050 new_prog_cnt += 1;
2051
2052 /* Do we have any prog (not NULL) in the new array? */
2053 if (!new_prog_cnt) {
2054 *new_array = NULL;
2055 return 0;
2056 }
2057
2058 /* +1 as the end of prog_array is marked with NULL */
2059 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2060 if (!array)
2061 return -ENOMEM;
2062
2063 /* Fill in the new prog array */
2064 if (carry_prog_cnt) {
2065 existing = old_array->items;
2066 for (; existing->prog; existing++)
2067 if (existing->prog != exclude_prog &&
2068 existing->prog != &dummy_bpf_prog.prog) {
2069 array->items[new_prog_idx++].prog =
2070 existing->prog;
2071 }
2072 }
2073 if (include_prog)
2074 array->items[new_prog_idx++].prog = include_prog;
2075 array->items[new_prog_idx].prog = NULL;
2076 *new_array = array;
2077 return 0;
2078}
2079
2080int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2081 u32 *prog_ids, u32 request_cnt,
2082 u32 *prog_cnt)
2083{
2084 u32 cnt = 0;
2085
2086 if (array)
2087 cnt = bpf_prog_array_length(array);
2088
2089 *prog_cnt = cnt;
2090
2091 /* return early if user requested only program count or nothing to copy */
2092 if (!request_cnt || !cnt)
2093 return 0;
2094
2095 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2096 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2097 : 0;
2098}
2099
2100void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2101 struct bpf_map **used_maps, u32 len)
2102{
2103 struct bpf_map *map;
2104 u32 i;
2105
2106 for (i = 0; i < len; i++) {
2107 map = used_maps[i];
2108 if (map->ops->map_poke_untrack)
2109 map->ops->map_poke_untrack(map, aux);
2110 bpf_map_put(map);
2111 }
2112}
2113
2114static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2115{
2116 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2117 kfree(aux->used_maps);
2118}
2119
2120static void bpf_prog_free_deferred(struct work_struct *work)
2121{
2122 struct bpf_prog_aux *aux;
2123 int i;
2124
2125 aux = container_of(work, struct bpf_prog_aux, work);
2126 bpf_free_used_maps(aux);
2127 if (bpf_prog_is_dev_bound(aux))
2128 bpf_prog_offload_destroy(aux->prog);
2129#ifdef CONFIG_PERF_EVENTS
2130 if (aux->prog->has_callchain_buf)
2131 put_callchain_buffers();
2132#endif
2133 bpf_trampoline_put(aux->trampoline);
2134 for (i = 0; i < aux->func_cnt; i++)
2135 bpf_jit_free(aux->func[i]);
2136 if (aux->func_cnt) {
2137 kfree(aux->func);
2138 bpf_prog_unlock_free(aux->prog);
2139 } else {
2140 bpf_jit_free(aux->prog);
2141 }
2142}
2143
2144/* Free internal BPF program */
2145void bpf_prog_free(struct bpf_prog *fp)
2146{
2147 struct bpf_prog_aux *aux = fp->aux;
2148
2149 if (aux->linked_prog)
2150 bpf_prog_put(aux->linked_prog);
2151 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2152 schedule_work(&aux->work);
2153}
2154EXPORT_SYMBOL_GPL(bpf_prog_free);
2155
2156/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2157static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2158
2159void bpf_user_rnd_init_once(void)
2160{
2161 prandom_init_once(&bpf_user_rnd_state);
2162}
2163
2164BPF_CALL_0(bpf_user_rnd_u32)
2165{
2166 /* Should someone ever have the rather unwise idea to use some
2167 * of the registers passed into this function, then note that
2168 * this function is called from native eBPF and classic-to-eBPF
2169 * transformations. Register assignments from both sides are
2170 * different, f.e. classic always sets fn(ctx, A, X) here.
2171 */
2172 struct rnd_state *state;
2173 u32 res;
2174
2175 state = &get_cpu_var(bpf_user_rnd_state);
2176 res = prandom_u32_state(state);
2177 put_cpu_var(bpf_user_rnd_state);
2178
2179 return res;
2180}
2181
2182BPF_CALL_0(bpf_get_raw_cpu_id)
2183{
2184 return raw_smp_processor_id();
2185}
2186
2187/* Weak definitions of helper functions in case we don't have bpf syscall. */
2188const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2189const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2190const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2191const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2192const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2193const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2194const struct bpf_func_proto bpf_spin_lock_proto __weak;
2195const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2196const struct bpf_func_proto bpf_jiffies64_proto __weak;
2197
2198const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2199const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2200const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2201const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2202const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2203
2204const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2205const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2206const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2207const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2208const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2209const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2210const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2211
2212const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2213{
2214 return NULL;
2215}
2216
2217u64 __weak
2218bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2219 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2220{
2221 return -ENOTSUPP;
2222}
2223EXPORT_SYMBOL_GPL(bpf_event_output);
2224
2225/* Always built-in helper functions. */
2226const struct bpf_func_proto bpf_tail_call_proto = {
2227 .func = NULL,
2228 .gpl_only = false,
2229 .ret_type = RET_VOID,
2230 .arg1_type = ARG_PTR_TO_CTX,
2231 .arg2_type = ARG_CONST_MAP_PTR,
2232 .arg3_type = ARG_ANYTHING,
2233};
2234
2235/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2236 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2237 * eBPF and implicitly also cBPF can get JITed!
2238 */
2239struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2240{
2241 return prog;
2242}
2243
2244/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2245 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2246 */
2247void __weak bpf_jit_compile(struct bpf_prog *prog)
2248{
2249}
2250
2251bool __weak bpf_helper_changes_pkt_data(void *func)
2252{
2253 return false;
2254}
2255
2256/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2257 * analysis code and wants explicit zero extension inserted by verifier.
2258 * Otherwise, return FALSE.
2259 */
2260bool __weak bpf_jit_needs_zext(void)
2261{
2262 return false;
2263}
2264
2265/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2266 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2267 */
2268int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2269 int len)
2270{
2271 return -EFAULT;
2272}
2273
2274int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2275 void *addr1, void *addr2)
2276{
2277 return -ENOTSUPP;
2278}
2279
2280DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2281EXPORT_SYMBOL(bpf_stats_enabled_key);
2282
2283/* All definitions of tracepoints related to BPF. */
2284#define CREATE_TRACE_POINTS
2285#include <linux/bpf_trace.h>
2286
2287EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2288EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20#include <uapi/linux/btf.h>
21#include <linux/filter.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/prandom.h>
25#include <linux/bpf.h>
26#include <linux/btf.h>
27#include <linux/objtool.h>
28#include <linux/overflow.h>
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
32#include <linux/perf_event.h>
33#include <linux/extable.h>
34#include <linux/log2.h>
35#include <linux/bpf_verifier.h>
36#include <linux/nodemask.h>
37#include <linux/nospec.h>
38#include <linux/bpf_mem_alloc.h>
39#include <linux/memcontrol.h>
40#include <linux/execmem.h>
41
42#include <asm/barrier.h>
43#include <linux/unaligned.h>
44
45/* Registers */
46#define BPF_R0 regs[BPF_REG_0]
47#define BPF_R1 regs[BPF_REG_1]
48#define BPF_R2 regs[BPF_REG_2]
49#define BPF_R3 regs[BPF_REG_3]
50#define BPF_R4 regs[BPF_REG_4]
51#define BPF_R5 regs[BPF_REG_5]
52#define BPF_R6 regs[BPF_REG_6]
53#define BPF_R7 regs[BPF_REG_7]
54#define BPF_R8 regs[BPF_REG_8]
55#define BPF_R9 regs[BPF_REG_9]
56#define BPF_R10 regs[BPF_REG_10]
57
58/* Named registers */
59#define DST regs[insn->dst_reg]
60#define SRC regs[insn->src_reg]
61#define FP regs[BPF_REG_FP]
62#define AX regs[BPF_REG_AX]
63#define ARG1 regs[BPF_REG_ARG1]
64#define CTX regs[BPF_REG_CTX]
65#define OFF insn->off
66#define IMM insn->imm
67
68struct bpf_mem_alloc bpf_global_ma;
69bool bpf_global_ma_set;
70
71/* No hurry in this branch
72 *
73 * Exported for the bpf jit load helper.
74 */
75void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
76{
77 u8 *ptr = NULL;
78
79 if (k >= SKF_NET_OFF) {
80 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
81 } else if (k >= SKF_LL_OFF) {
82 if (unlikely(!skb_mac_header_was_set(skb)))
83 return NULL;
84 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
85 }
86 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
87 return ptr;
88
89 return NULL;
90}
91
92/* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
93enum page_size_enum {
94 __PAGE_SIZE = PAGE_SIZE
95};
96
97struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
98{
99 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
100 struct bpf_prog_aux *aux;
101 struct bpf_prog *fp;
102
103 size = round_up(size, __PAGE_SIZE);
104 fp = __vmalloc(size, gfp_flags);
105 if (fp == NULL)
106 return NULL;
107
108 aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
109 if (aux == NULL) {
110 vfree(fp);
111 return NULL;
112 }
113 fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
114 if (!fp->active) {
115 vfree(fp);
116 kfree(aux);
117 return NULL;
118 }
119
120 fp->pages = size / PAGE_SIZE;
121 fp->aux = aux;
122 fp->aux->prog = fp;
123 fp->jit_requested = ebpf_jit_enabled();
124 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
125#ifdef CONFIG_CGROUP_BPF
126 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
127#endif
128
129 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
130#ifdef CONFIG_FINEIBT
131 INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
132#endif
133 mutex_init(&fp->aux->used_maps_mutex);
134 mutex_init(&fp->aux->ext_mutex);
135 mutex_init(&fp->aux->dst_mutex);
136
137 return fp;
138}
139
140struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
141{
142 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
143 struct bpf_prog *prog;
144 int cpu;
145
146 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
147 if (!prog)
148 return NULL;
149
150 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
151 if (!prog->stats) {
152 free_percpu(prog->active);
153 kfree(prog->aux);
154 vfree(prog);
155 return NULL;
156 }
157
158 for_each_possible_cpu(cpu) {
159 struct bpf_prog_stats *pstats;
160
161 pstats = per_cpu_ptr(prog->stats, cpu);
162 u64_stats_init(&pstats->syncp);
163 }
164 return prog;
165}
166EXPORT_SYMBOL_GPL(bpf_prog_alloc);
167
168int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
169{
170 if (!prog->aux->nr_linfo || !prog->jit_requested)
171 return 0;
172
173 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
174 sizeof(*prog->aux->jited_linfo),
175 bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
176 if (!prog->aux->jited_linfo)
177 return -ENOMEM;
178
179 return 0;
180}
181
182void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
183{
184 if (prog->aux->jited_linfo &&
185 (!prog->jited || !prog->aux->jited_linfo[0])) {
186 kvfree(prog->aux->jited_linfo);
187 prog->aux->jited_linfo = NULL;
188 }
189
190 kfree(prog->aux->kfunc_tab);
191 prog->aux->kfunc_tab = NULL;
192}
193
194/* The jit engine is responsible to provide an array
195 * for insn_off to the jited_off mapping (insn_to_jit_off).
196 *
197 * The idx to this array is the insn_off. Hence, the insn_off
198 * here is relative to the prog itself instead of the main prog.
199 * This array has one entry for each xlated bpf insn.
200 *
201 * jited_off is the byte off to the end of the jited insn.
202 *
203 * Hence, with
204 * insn_start:
205 * The first bpf insn off of the prog. The insn off
206 * here is relative to the main prog.
207 * e.g. if prog is a subprog, insn_start > 0
208 * linfo_idx:
209 * The prog's idx to prog->aux->linfo and jited_linfo
210 *
211 * jited_linfo[linfo_idx] = prog->bpf_func
212 *
213 * For i > linfo_idx,
214 *
215 * jited_linfo[i] = prog->bpf_func +
216 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
217 */
218void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
219 const u32 *insn_to_jit_off)
220{
221 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
222 const struct bpf_line_info *linfo;
223 void **jited_linfo;
224
225 if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
226 /* Userspace did not provide linfo */
227 return;
228
229 linfo_idx = prog->aux->linfo_idx;
230 linfo = &prog->aux->linfo[linfo_idx];
231 insn_start = linfo[0].insn_off;
232 insn_end = insn_start + prog->len;
233
234 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
235 jited_linfo[0] = prog->bpf_func;
236
237 nr_linfo = prog->aux->nr_linfo - linfo_idx;
238
239 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
240 /* The verifier ensures that linfo[i].insn_off is
241 * strictly increasing
242 */
243 jited_linfo[i] = prog->bpf_func +
244 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
245}
246
247struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
248 gfp_t gfp_extra_flags)
249{
250 gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
251 struct bpf_prog *fp;
252 u32 pages;
253
254 size = round_up(size, PAGE_SIZE);
255 pages = size / PAGE_SIZE;
256 if (pages <= fp_old->pages)
257 return fp_old;
258
259 fp = __vmalloc(size, gfp_flags);
260 if (fp) {
261 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
262 fp->pages = pages;
263 fp->aux->prog = fp;
264
265 /* We keep fp->aux from fp_old around in the new
266 * reallocated structure.
267 */
268 fp_old->aux = NULL;
269 fp_old->stats = NULL;
270 fp_old->active = NULL;
271 __bpf_prog_free(fp_old);
272 }
273
274 return fp;
275}
276
277void __bpf_prog_free(struct bpf_prog *fp)
278{
279 if (fp->aux) {
280 mutex_destroy(&fp->aux->used_maps_mutex);
281 mutex_destroy(&fp->aux->dst_mutex);
282 kfree(fp->aux->poke_tab);
283 kfree(fp->aux);
284 }
285 free_percpu(fp->stats);
286 free_percpu(fp->active);
287 vfree(fp);
288}
289
290int bpf_prog_calc_tag(struct bpf_prog *fp)
291{
292 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
293 u32 raw_size = bpf_prog_tag_scratch_size(fp);
294 u32 digest[SHA1_DIGEST_WORDS];
295 u32 ws[SHA1_WORKSPACE_WORDS];
296 u32 i, bsize, psize, blocks;
297 struct bpf_insn *dst;
298 bool was_ld_map;
299 u8 *raw, *todo;
300 __be32 *result;
301 __be64 *bits;
302
303 raw = vmalloc(raw_size);
304 if (!raw)
305 return -ENOMEM;
306
307 sha1_init(digest);
308 memset(ws, 0, sizeof(ws));
309
310 /* We need to take out the map fd for the digest calculation
311 * since they are unstable from user space side.
312 */
313 dst = (void *)raw;
314 for (i = 0, was_ld_map = false; i < fp->len; i++) {
315 dst[i] = fp->insnsi[i];
316 if (!was_ld_map &&
317 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
318 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
319 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
320 was_ld_map = true;
321 dst[i].imm = 0;
322 } else if (was_ld_map &&
323 dst[i].code == 0 &&
324 dst[i].dst_reg == 0 &&
325 dst[i].src_reg == 0 &&
326 dst[i].off == 0) {
327 was_ld_map = false;
328 dst[i].imm = 0;
329 } else {
330 was_ld_map = false;
331 }
332 }
333
334 psize = bpf_prog_insn_size(fp);
335 memset(&raw[psize], 0, raw_size - psize);
336 raw[psize++] = 0x80;
337
338 bsize = round_up(psize, SHA1_BLOCK_SIZE);
339 blocks = bsize / SHA1_BLOCK_SIZE;
340 todo = raw;
341 if (bsize - psize >= sizeof(__be64)) {
342 bits = (__be64 *)(todo + bsize - sizeof(__be64));
343 } else {
344 bits = (__be64 *)(todo + bsize + bits_offset);
345 blocks++;
346 }
347 *bits = cpu_to_be64((psize - 1) << 3);
348
349 while (blocks--) {
350 sha1_transform(digest, todo, ws);
351 todo += SHA1_BLOCK_SIZE;
352 }
353
354 result = (__force __be32 *)digest;
355 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
356 result[i] = cpu_to_be32(digest[i]);
357 memcpy(fp->tag, result, sizeof(fp->tag));
358
359 vfree(raw);
360 return 0;
361}
362
363static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
364 s32 end_new, s32 curr, const bool probe_pass)
365{
366 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
367 s32 delta = end_new - end_old;
368 s64 imm = insn->imm;
369
370 if (curr < pos && curr + imm + 1 >= end_old)
371 imm += delta;
372 else if (curr >= end_new && curr + imm + 1 < end_new)
373 imm -= delta;
374 if (imm < imm_min || imm > imm_max)
375 return -ERANGE;
376 if (!probe_pass)
377 insn->imm = imm;
378 return 0;
379}
380
381static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
382 s32 end_new, s32 curr, const bool probe_pass)
383{
384 s64 off_min, off_max, off;
385 s32 delta = end_new - end_old;
386
387 if (insn->code == (BPF_JMP32 | BPF_JA)) {
388 off = insn->imm;
389 off_min = S32_MIN;
390 off_max = S32_MAX;
391 } else {
392 off = insn->off;
393 off_min = S16_MIN;
394 off_max = S16_MAX;
395 }
396
397 if (curr < pos && curr + off + 1 >= end_old)
398 off += delta;
399 else if (curr >= end_new && curr + off + 1 < end_new)
400 off -= delta;
401 if (off < off_min || off > off_max)
402 return -ERANGE;
403 if (!probe_pass) {
404 if (insn->code == (BPF_JMP32 | BPF_JA))
405 insn->imm = off;
406 else
407 insn->off = off;
408 }
409 return 0;
410}
411
412static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
413 s32 end_new, const bool probe_pass)
414{
415 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
416 struct bpf_insn *insn = prog->insnsi;
417 int ret = 0;
418
419 for (i = 0; i < insn_cnt; i++, insn++) {
420 u8 code;
421
422 /* In the probing pass we still operate on the original,
423 * unpatched image in order to check overflows before we
424 * do any other adjustments. Therefore skip the patchlet.
425 */
426 if (probe_pass && i == pos) {
427 i = end_new;
428 insn = prog->insnsi + end_old;
429 }
430 if (bpf_pseudo_func(insn)) {
431 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
432 end_new, i, probe_pass);
433 if (ret)
434 return ret;
435 continue;
436 }
437 code = insn->code;
438 if ((BPF_CLASS(code) != BPF_JMP &&
439 BPF_CLASS(code) != BPF_JMP32) ||
440 BPF_OP(code) == BPF_EXIT)
441 continue;
442 /* Adjust offset of jmps if we cross patch boundaries. */
443 if (BPF_OP(code) == BPF_CALL) {
444 if (insn->src_reg != BPF_PSEUDO_CALL)
445 continue;
446 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
447 end_new, i, probe_pass);
448 } else {
449 ret = bpf_adj_delta_to_off(insn, pos, end_old,
450 end_new, i, probe_pass);
451 }
452 if (ret)
453 break;
454 }
455
456 return ret;
457}
458
459static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
460{
461 struct bpf_line_info *linfo;
462 u32 i, nr_linfo;
463
464 nr_linfo = prog->aux->nr_linfo;
465 if (!nr_linfo || !delta)
466 return;
467
468 linfo = prog->aux->linfo;
469
470 for (i = 0; i < nr_linfo; i++)
471 if (off < linfo[i].insn_off)
472 break;
473
474 /* Push all off < linfo[i].insn_off by delta */
475 for (; i < nr_linfo; i++)
476 linfo[i].insn_off += delta;
477}
478
479struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
480 const struct bpf_insn *patch, u32 len)
481{
482 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
483 const u32 cnt_max = S16_MAX;
484 struct bpf_prog *prog_adj;
485 int err;
486
487 /* Since our patchlet doesn't expand the image, we're done. */
488 if (insn_delta == 0) {
489 memcpy(prog->insnsi + off, patch, sizeof(*patch));
490 return prog;
491 }
492
493 insn_adj_cnt = prog->len + insn_delta;
494
495 /* Reject anything that would potentially let the insn->off
496 * target overflow when we have excessive program expansions.
497 * We need to probe here before we do any reallocation where
498 * we afterwards may not fail anymore.
499 */
500 if (insn_adj_cnt > cnt_max &&
501 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
502 return ERR_PTR(err);
503
504 /* Several new instructions need to be inserted. Make room
505 * for them. Likely, there's no need for a new allocation as
506 * last page could have large enough tailroom.
507 */
508 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
509 GFP_USER);
510 if (!prog_adj)
511 return ERR_PTR(-ENOMEM);
512
513 prog_adj->len = insn_adj_cnt;
514
515 /* Patching happens in 3 steps:
516 *
517 * 1) Move over tail of insnsi from next instruction onwards,
518 * so we can patch the single target insn with one or more
519 * new ones (patching is always from 1 to n insns, n > 0).
520 * 2) Inject new instructions at the target location.
521 * 3) Adjust branch offsets if necessary.
522 */
523 insn_rest = insn_adj_cnt - off - len;
524
525 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
526 sizeof(*patch) * insn_rest);
527 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
528
529 /* We are guaranteed to not fail at this point, otherwise
530 * the ship has sailed to reverse to the original state. An
531 * overflow cannot happen at this point.
532 */
533 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
534
535 bpf_adj_linfo(prog_adj, off, insn_delta);
536
537 return prog_adj;
538}
539
540int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
541{
542 int err;
543
544 /* Branch offsets can't overflow when program is shrinking, no need
545 * to call bpf_adj_branches(..., true) here
546 */
547 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
548 sizeof(struct bpf_insn) * (prog->len - off - cnt));
549 prog->len -= cnt;
550
551 err = bpf_adj_branches(prog, off, off + cnt, off, false);
552 WARN_ON_ONCE(err);
553 return err;
554}
555
556static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
557{
558 int i;
559
560 for (i = 0; i < fp->aux->real_func_cnt; i++)
561 bpf_prog_kallsyms_del(fp->aux->func[i]);
562}
563
564void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
565{
566 bpf_prog_kallsyms_del_subprogs(fp);
567 bpf_prog_kallsyms_del(fp);
568}
569
570#ifdef CONFIG_BPF_JIT
571/* All BPF JIT sysctl knobs here. */
572int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
573int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
574int bpf_jit_harden __read_mostly;
575long bpf_jit_limit __read_mostly;
576long bpf_jit_limit_max __read_mostly;
577
578static void
579bpf_prog_ksym_set_addr(struct bpf_prog *prog)
580{
581 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
582
583 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
584 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
585}
586
587static void
588bpf_prog_ksym_set_name(struct bpf_prog *prog)
589{
590 char *sym = prog->aux->ksym.name;
591 const char *end = sym + KSYM_NAME_LEN;
592 const struct btf_type *type;
593 const char *func_name;
594
595 BUILD_BUG_ON(sizeof("bpf_prog_") +
596 sizeof(prog->tag) * 2 +
597 /* name has been null terminated.
598 * We should need +1 for the '_' preceding
599 * the name. However, the null character
600 * is double counted between the name and the
601 * sizeof("bpf_prog_") above, so we omit
602 * the +1 here.
603 */
604 sizeof(prog->aux->name) > KSYM_NAME_LEN);
605
606 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
607 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
608
609 /* prog->aux->name will be ignored if full btf name is available */
610 if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
611 type = btf_type_by_id(prog->aux->btf,
612 prog->aux->func_info[prog->aux->func_idx].type_id);
613 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
614 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
615 return;
616 }
617
618 if (prog->aux->name[0])
619 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
620 else
621 *sym = 0;
622}
623
624static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
625{
626 return container_of(n, struct bpf_ksym, tnode)->start;
627}
628
629static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
630 struct latch_tree_node *b)
631{
632 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
633}
634
635static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
636{
637 unsigned long val = (unsigned long)key;
638 const struct bpf_ksym *ksym;
639
640 ksym = container_of(n, struct bpf_ksym, tnode);
641
642 if (val < ksym->start)
643 return -1;
644 /* Ensure that we detect return addresses as part of the program, when
645 * the final instruction is a call for a program part of the stack
646 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
647 */
648 if (val > ksym->end)
649 return 1;
650
651 return 0;
652}
653
654static const struct latch_tree_ops bpf_tree_ops = {
655 .less = bpf_tree_less,
656 .comp = bpf_tree_comp,
657};
658
659static DEFINE_SPINLOCK(bpf_lock);
660static LIST_HEAD(bpf_kallsyms);
661static struct latch_tree_root bpf_tree __cacheline_aligned;
662
663void bpf_ksym_add(struct bpf_ksym *ksym)
664{
665 spin_lock_bh(&bpf_lock);
666 WARN_ON_ONCE(!list_empty(&ksym->lnode));
667 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
668 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
669 spin_unlock_bh(&bpf_lock);
670}
671
672static void __bpf_ksym_del(struct bpf_ksym *ksym)
673{
674 if (list_empty(&ksym->lnode))
675 return;
676
677 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
678 list_del_rcu(&ksym->lnode);
679}
680
681void bpf_ksym_del(struct bpf_ksym *ksym)
682{
683 spin_lock_bh(&bpf_lock);
684 __bpf_ksym_del(ksym);
685 spin_unlock_bh(&bpf_lock);
686}
687
688static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
689{
690 return fp->jited && !bpf_prog_was_classic(fp);
691}
692
693void bpf_prog_kallsyms_add(struct bpf_prog *fp)
694{
695 if (!bpf_prog_kallsyms_candidate(fp) ||
696 !bpf_token_capable(fp->aux->token, CAP_BPF))
697 return;
698
699 bpf_prog_ksym_set_addr(fp);
700 bpf_prog_ksym_set_name(fp);
701 fp->aux->ksym.prog = true;
702
703 bpf_ksym_add(&fp->aux->ksym);
704
705#ifdef CONFIG_FINEIBT
706 /*
707 * When FineIBT, code in the __cfi_foo() symbols can get executed
708 * and hence unwinder needs help.
709 */
710 if (cfi_mode != CFI_FINEIBT)
711 return;
712
713 snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
714 "__cfi_%s", fp->aux->ksym.name);
715
716 fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
717 fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
718
719 bpf_ksym_add(&fp->aux->ksym_prefix);
720#endif
721}
722
723void bpf_prog_kallsyms_del(struct bpf_prog *fp)
724{
725 if (!bpf_prog_kallsyms_candidate(fp))
726 return;
727
728 bpf_ksym_del(&fp->aux->ksym);
729#ifdef CONFIG_FINEIBT
730 if (cfi_mode != CFI_FINEIBT)
731 return;
732 bpf_ksym_del(&fp->aux->ksym_prefix);
733#endif
734}
735
736static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
737{
738 struct latch_tree_node *n;
739
740 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
741 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
742}
743
744int __bpf_address_lookup(unsigned long addr, unsigned long *size,
745 unsigned long *off, char *sym)
746{
747 struct bpf_ksym *ksym;
748 int ret = 0;
749
750 rcu_read_lock();
751 ksym = bpf_ksym_find(addr);
752 if (ksym) {
753 unsigned long symbol_start = ksym->start;
754 unsigned long symbol_end = ksym->end;
755
756 ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
757
758 if (size)
759 *size = symbol_end - symbol_start;
760 if (off)
761 *off = addr - symbol_start;
762 }
763 rcu_read_unlock();
764
765 return ret;
766}
767
768bool is_bpf_text_address(unsigned long addr)
769{
770 bool ret;
771
772 rcu_read_lock();
773 ret = bpf_ksym_find(addr) != NULL;
774 rcu_read_unlock();
775
776 return ret;
777}
778
779struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
780{
781 struct bpf_ksym *ksym = bpf_ksym_find(addr);
782
783 return ksym && ksym->prog ?
784 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
785 NULL;
786}
787
788const struct exception_table_entry *search_bpf_extables(unsigned long addr)
789{
790 const struct exception_table_entry *e = NULL;
791 struct bpf_prog *prog;
792
793 rcu_read_lock();
794 prog = bpf_prog_ksym_find(addr);
795 if (!prog)
796 goto out;
797 if (!prog->aux->num_exentries)
798 goto out;
799
800 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
801out:
802 rcu_read_unlock();
803 return e;
804}
805
806int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
807 char *sym)
808{
809 struct bpf_ksym *ksym;
810 unsigned int it = 0;
811 int ret = -ERANGE;
812
813 if (!bpf_jit_kallsyms_enabled())
814 return ret;
815
816 rcu_read_lock();
817 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
818 if (it++ != symnum)
819 continue;
820
821 strscpy(sym, ksym->name, KSYM_NAME_LEN);
822
823 *value = ksym->start;
824 *type = BPF_SYM_ELF_TYPE;
825
826 ret = 0;
827 break;
828 }
829 rcu_read_unlock();
830
831 return ret;
832}
833
834int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
835 struct bpf_jit_poke_descriptor *poke)
836{
837 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
838 static const u32 poke_tab_max = 1024;
839 u32 slot = prog->aux->size_poke_tab;
840 u32 size = slot + 1;
841
842 if (size > poke_tab_max)
843 return -ENOSPC;
844 if (poke->tailcall_target || poke->tailcall_target_stable ||
845 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
846 return -EINVAL;
847
848 switch (poke->reason) {
849 case BPF_POKE_REASON_TAIL_CALL:
850 if (!poke->tail_call.map)
851 return -EINVAL;
852 break;
853 default:
854 return -EINVAL;
855 }
856
857 tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
858 if (!tab)
859 return -ENOMEM;
860
861 memcpy(&tab[slot], poke, sizeof(*poke));
862 prog->aux->size_poke_tab = size;
863 prog->aux->poke_tab = tab;
864
865 return slot;
866}
867
868/*
869 * BPF program pack allocator.
870 *
871 * Most BPF programs are pretty small. Allocating a hole page for each
872 * program is sometime a waste. Many small bpf program also adds pressure
873 * to instruction TLB. To solve this issue, we introduce a BPF program pack
874 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
875 * to host BPF programs.
876 */
877#define BPF_PROG_CHUNK_SHIFT 6
878#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
879#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
880
881struct bpf_prog_pack {
882 struct list_head list;
883 void *ptr;
884 unsigned long bitmap[];
885};
886
887void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
888{
889 memset(area, 0, size);
890}
891
892#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
893
894static DEFINE_MUTEX(pack_mutex);
895static LIST_HEAD(pack_list);
896
897/* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
898 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
899 */
900#ifdef PMD_SIZE
901/* PMD_SIZE is really big for some archs. It doesn't make sense to
902 * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
903 * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
904 * greater than or equal to 2MB.
905 */
906#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
907#else
908#define BPF_PROG_PACK_SIZE PAGE_SIZE
909#endif
910
911#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
912
913static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
914{
915 struct bpf_prog_pack *pack;
916 int err;
917
918 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
919 GFP_KERNEL);
920 if (!pack)
921 return NULL;
922 pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
923 if (!pack->ptr)
924 goto out;
925 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
926 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
927
928 set_vm_flush_reset_perms(pack->ptr);
929 err = set_memory_rox((unsigned long)pack->ptr,
930 BPF_PROG_PACK_SIZE / PAGE_SIZE);
931 if (err)
932 goto out;
933 list_add_tail(&pack->list, &pack_list);
934 return pack;
935
936out:
937 bpf_jit_free_exec(pack->ptr);
938 kfree(pack);
939 return NULL;
940}
941
942void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
943{
944 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
945 struct bpf_prog_pack *pack;
946 unsigned long pos;
947 void *ptr = NULL;
948
949 mutex_lock(&pack_mutex);
950 if (size > BPF_PROG_PACK_SIZE) {
951 size = round_up(size, PAGE_SIZE);
952 ptr = bpf_jit_alloc_exec(size);
953 if (ptr) {
954 int err;
955
956 bpf_fill_ill_insns(ptr, size);
957 set_vm_flush_reset_perms(ptr);
958 err = set_memory_rox((unsigned long)ptr,
959 size / PAGE_SIZE);
960 if (err) {
961 bpf_jit_free_exec(ptr);
962 ptr = NULL;
963 }
964 }
965 goto out;
966 }
967 list_for_each_entry(pack, &pack_list, list) {
968 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
969 nbits, 0);
970 if (pos < BPF_PROG_CHUNK_COUNT)
971 goto found_free_area;
972 }
973
974 pack = alloc_new_pack(bpf_fill_ill_insns);
975 if (!pack)
976 goto out;
977
978 pos = 0;
979
980found_free_area:
981 bitmap_set(pack->bitmap, pos, nbits);
982 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
983
984out:
985 mutex_unlock(&pack_mutex);
986 return ptr;
987}
988
989void bpf_prog_pack_free(void *ptr, u32 size)
990{
991 struct bpf_prog_pack *pack = NULL, *tmp;
992 unsigned int nbits;
993 unsigned long pos;
994
995 mutex_lock(&pack_mutex);
996 if (size > BPF_PROG_PACK_SIZE) {
997 bpf_jit_free_exec(ptr);
998 goto out;
999 }
1000
1001 list_for_each_entry(tmp, &pack_list, list) {
1002 if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
1003 pack = tmp;
1004 break;
1005 }
1006 }
1007
1008 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
1009 goto out;
1010
1011 nbits = BPF_PROG_SIZE_TO_NBITS(size);
1012 pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
1013
1014 WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1015 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1016
1017 bitmap_clear(pack->bitmap, pos, nbits);
1018 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1019 BPF_PROG_CHUNK_COUNT, 0) == 0) {
1020 list_del(&pack->list);
1021 bpf_jit_free_exec(pack->ptr);
1022 kfree(pack);
1023 }
1024out:
1025 mutex_unlock(&pack_mutex);
1026}
1027
1028static atomic_long_t bpf_jit_current;
1029
1030/* Can be overridden by an arch's JIT compiler if it has a custom,
1031 * dedicated BPF backend memory area, or if neither of the two
1032 * below apply.
1033 */
1034u64 __weak bpf_jit_alloc_exec_limit(void)
1035{
1036#if defined(MODULES_VADDR)
1037 return MODULES_END - MODULES_VADDR;
1038#else
1039 return VMALLOC_END - VMALLOC_START;
1040#endif
1041}
1042
1043static int __init bpf_jit_charge_init(void)
1044{
1045 /* Only used as heuristic here to derive limit. */
1046 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
1047 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1048 PAGE_SIZE), LONG_MAX);
1049 return 0;
1050}
1051pure_initcall(bpf_jit_charge_init);
1052
1053int bpf_jit_charge_modmem(u32 size)
1054{
1055 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
1056 if (!bpf_capable()) {
1057 atomic_long_sub(size, &bpf_jit_current);
1058 return -EPERM;
1059 }
1060 }
1061
1062 return 0;
1063}
1064
1065void bpf_jit_uncharge_modmem(u32 size)
1066{
1067 atomic_long_sub(size, &bpf_jit_current);
1068}
1069
1070void *__weak bpf_jit_alloc_exec(unsigned long size)
1071{
1072 return execmem_alloc(EXECMEM_BPF, size);
1073}
1074
1075void __weak bpf_jit_free_exec(void *addr)
1076{
1077 execmem_free(addr);
1078}
1079
1080struct bpf_binary_header *
1081bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1082 unsigned int alignment,
1083 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1084{
1085 struct bpf_binary_header *hdr;
1086 u32 size, hole, start;
1087
1088 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1089 alignment > BPF_IMAGE_ALIGNMENT);
1090
1091 /* Most of BPF filters are really small, but if some of them
1092 * fill a page, allow at least 128 extra bytes to insert a
1093 * random section of illegal instructions.
1094 */
1095 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1096
1097 if (bpf_jit_charge_modmem(size))
1098 return NULL;
1099 hdr = bpf_jit_alloc_exec(size);
1100 if (!hdr) {
1101 bpf_jit_uncharge_modmem(size);
1102 return NULL;
1103 }
1104
1105 /* Fill space with illegal/arch-dep instructions. */
1106 bpf_fill_ill_insns(hdr, size);
1107
1108 hdr->size = size;
1109 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1110 PAGE_SIZE - sizeof(*hdr));
1111 start = get_random_u32_below(hole) & ~(alignment - 1);
1112
1113 /* Leave a random number of instructions before BPF code. */
1114 *image_ptr = &hdr->image[start];
1115
1116 return hdr;
1117}
1118
1119void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1120{
1121 u32 size = hdr->size;
1122
1123 bpf_jit_free_exec(hdr);
1124 bpf_jit_uncharge_modmem(size);
1125}
1126
1127/* Allocate jit binary from bpf_prog_pack allocator.
1128 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1129 * to the memory. To solve this problem, a RW buffer is also allocated at
1130 * as the same time. The JIT engine should calculate offsets based on the
1131 * RO memory address, but write JITed program to the RW buffer. Once the
1132 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1133 * the JITed program to the RO memory.
1134 */
1135struct bpf_binary_header *
1136bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1137 unsigned int alignment,
1138 struct bpf_binary_header **rw_header,
1139 u8 **rw_image,
1140 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1141{
1142 struct bpf_binary_header *ro_header;
1143 u32 size, hole, start;
1144
1145 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1146 alignment > BPF_IMAGE_ALIGNMENT);
1147
1148 /* add 16 bytes for a random section of illegal instructions */
1149 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1150
1151 if (bpf_jit_charge_modmem(size))
1152 return NULL;
1153 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1154 if (!ro_header) {
1155 bpf_jit_uncharge_modmem(size);
1156 return NULL;
1157 }
1158
1159 *rw_header = kvmalloc(size, GFP_KERNEL);
1160 if (!*rw_header) {
1161 bpf_prog_pack_free(ro_header, size);
1162 bpf_jit_uncharge_modmem(size);
1163 return NULL;
1164 }
1165
1166 /* Fill space with illegal/arch-dep instructions. */
1167 bpf_fill_ill_insns(*rw_header, size);
1168 (*rw_header)->size = size;
1169
1170 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1171 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1172 start = get_random_u32_below(hole) & ~(alignment - 1);
1173
1174 *image_ptr = &ro_header->image[start];
1175 *rw_image = &(*rw_header)->image[start];
1176
1177 return ro_header;
1178}
1179
1180/* Copy JITed text from rw_header to its final location, the ro_header. */
1181int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
1182 struct bpf_binary_header *rw_header)
1183{
1184 void *ptr;
1185
1186 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1187
1188 kvfree(rw_header);
1189
1190 if (IS_ERR(ptr)) {
1191 bpf_prog_pack_free(ro_header, ro_header->size);
1192 return PTR_ERR(ptr);
1193 }
1194 return 0;
1195}
1196
1197/* bpf_jit_binary_pack_free is called in two different scenarios:
1198 * 1) when the program is freed after;
1199 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1200 * For case 2), we need to free both the RO memory and the RW buffer.
1201 *
1202 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1203 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1204 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1205 * bpf_arch_text_copy (when jit fails).
1206 */
1207void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1208 struct bpf_binary_header *rw_header)
1209{
1210 u32 size = ro_header->size;
1211
1212 bpf_prog_pack_free(ro_header, size);
1213 kvfree(rw_header);
1214 bpf_jit_uncharge_modmem(size);
1215}
1216
1217struct bpf_binary_header *
1218bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1219{
1220 unsigned long real_start = (unsigned long)fp->bpf_func;
1221 unsigned long addr;
1222
1223 addr = real_start & BPF_PROG_CHUNK_MASK;
1224 return (void *)addr;
1225}
1226
1227static inline struct bpf_binary_header *
1228bpf_jit_binary_hdr(const struct bpf_prog *fp)
1229{
1230 unsigned long real_start = (unsigned long)fp->bpf_func;
1231 unsigned long addr;
1232
1233 addr = real_start & PAGE_MASK;
1234 return (void *)addr;
1235}
1236
1237/* This symbol is only overridden by archs that have different
1238 * requirements than the usual eBPF JITs, f.e. when they only
1239 * implement cBPF JIT, do not set images read-only, etc.
1240 */
1241void __weak bpf_jit_free(struct bpf_prog *fp)
1242{
1243 if (fp->jited) {
1244 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1245
1246 bpf_jit_binary_free(hdr);
1247 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1248 }
1249
1250 bpf_prog_unlock_free(fp);
1251}
1252
1253int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1254 const struct bpf_insn *insn, bool extra_pass,
1255 u64 *func_addr, bool *func_addr_fixed)
1256{
1257 s16 off = insn->off;
1258 s32 imm = insn->imm;
1259 u8 *addr;
1260 int err;
1261
1262 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1263 if (!*func_addr_fixed) {
1264 /* Place-holder address till the last pass has collected
1265 * all addresses for JITed subprograms in which case we
1266 * can pick them up from prog->aux.
1267 */
1268 if (!extra_pass)
1269 addr = NULL;
1270 else if (prog->aux->func &&
1271 off >= 0 && off < prog->aux->real_func_cnt)
1272 addr = (u8 *)prog->aux->func[off]->bpf_func;
1273 else
1274 return -EINVAL;
1275 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
1276 bpf_jit_supports_far_kfunc_call()) {
1277 err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
1278 if (err)
1279 return err;
1280 } else {
1281 /* Address of a BPF helper call. Since part of the core
1282 * kernel, it's always at a fixed location. __bpf_call_base
1283 * and the helper with imm relative to it are both in core
1284 * kernel.
1285 */
1286 addr = (u8 *)__bpf_call_base + imm;
1287 }
1288
1289 *func_addr = (unsigned long)addr;
1290 return 0;
1291}
1292
1293static int bpf_jit_blind_insn(const struct bpf_insn *from,
1294 const struct bpf_insn *aux,
1295 struct bpf_insn *to_buff,
1296 bool emit_zext)
1297{
1298 struct bpf_insn *to = to_buff;
1299 u32 imm_rnd = get_random_u32();
1300 s16 off;
1301
1302 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1303 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1304
1305 /* Constraints on AX register:
1306 *
1307 * AX register is inaccessible from user space. It is mapped in
1308 * all JITs, and used here for constant blinding rewrites. It is
1309 * typically "stateless" meaning its contents are only valid within
1310 * the executed instruction, but not across several instructions.
1311 * There are a few exceptions however which are further detailed
1312 * below.
1313 *
1314 * Constant blinding is only used by JITs, not in the interpreter.
1315 * The interpreter uses AX in some occasions as a local temporary
1316 * register e.g. in DIV or MOD instructions.
1317 *
1318 * In restricted circumstances, the verifier can also use the AX
1319 * register for rewrites as long as they do not interfere with
1320 * the above cases!
1321 */
1322 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1323 goto out;
1324
1325 if (from->imm == 0 &&
1326 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1327 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1328 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1329 goto out;
1330 }
1331
1332 switch (from->code) {
1333 case BPF_ALU | BPF_ADD | BPF_K:
1334 case BPF_ALU | BPF_SUB | BPF_K:
1335 case BPF_ALU | BPF_AND | BPF_K:
1336 case BPF_ALU | BPF_OR | BPF_K:
1337 case BPF_ALU | BPF_XOR | BPF_K:
1338 case BPF_ALU | BPF_MUL | BPF_K:
1339 case BPF_ALU | BPF_MOV | BPF_K:
1340 case BPF_ALU | BPF_DIV | BPF_K:
1341 case BPF_ALU | BPF_MOD | BPF_K:
1342 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1343 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1344 *to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1345 break;
1346
1347 case BPF_ALU64 | BPF_ADD | BPF_K:
1348 case BPF_ALU64 | BPF_SUB | BPF_K:
1349 case BPF_ALU64 | BPF_AND | BPF_K:
1350 case BPF_ALU64 | BPF_OR | BPF_K:
1351 case BPF_ALU64 | BPF_XOR | BPF_K:
1352 case BPF_ALU64 | BPF_MUL | BPF_K:
1353 case BPF_ALU64 | BPF_MOV | BPF_K:
1354 case BPF_ALU64 | BPF_DIV | BPF_K:
1355 case BPF_ALU64 | BPF_MOD | BPF_K:
1356 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1357 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1358 *to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
1359 break;
1360
1361 case BPF_JMP | BPF_JEQ | BPF_K:
1362 case BPF_JMP | BPF_JNE | BPF_K:
1363 case BPF_JMP | BPF_JGT | BPF_K:
1364 case BPF_JMP | BPF_JLT | BPF_K:
1365 case BPF_JMP | BPF_JGE | BPF_K:
1366 case BPF_JMP | BPF_JLE | BPF_K:
1367 case BPF_JMP | BPF_JSGT | BPF_K:
1368 case BPF_JMP | BPF_JSLT | BPF_K:
1369 case BPF_JMP | BPF_JSGE | BPF_K:
1370 case BPF_JMP | BPF_JSLE | BPF_K:
1371 case BPF_JMP | BPF_JSET | BPF_K:
1372 /* Accommodate for extra offset in case of a backjump. */
1373 off = from->off;
1374 if (off < 0)
1375 off -= 2;
1376 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1377 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1378 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1379 break;
1380
1381 case BPF_JMP32 | BPF_JEQ | BPF_K:
1382 case BPF_JMP32 | BPF_JNE | BPF_K:
1383 case BPF_JMP32 | BPF_JGT | BPF_K:
1384 case BPF_JMP32 | BPF_JLT | BPF_K:
1385 case BPF_JMP32 | BPF_JGE | BPF_K:
1386 case BPF_JMP32 | BPF_JLE | BPF_K:
1387 case BPF_JMP32 | BPF_JSGT | BPF_K:
1388 case BPF_JMP32 | BPF_JSLT | BPF_K:
1389 case BPF_JMP32 | BPF_JSGE | BPF_K:
1390 case BPF_JMP32 | BPF_JSLE | BPF_K:
1391 case BPF_JMP32 | BPF_JSET | BPF_K:
1392 /* Accommodate for extra offset in case of a backjump. */
1393 off = from->off;
1394 if (off < 0)
1395 off -= 2;
1396 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1397 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1398 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1399 off);
1400 break;
1401
1402 case BPF_LD | BPF_IMM | BPF_DW:
1403 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1404 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1405 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1406 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1407 break;
1408 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1409 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1410 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1411 if (emit_zext)
1412 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1413 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1414 break;
1415
1416 case BPF_ST | BPF_MEM | BPF_DW:
1417 case BPF_ST | BPF_MEM | BPF_W:
1418 case BPF_ST | BPF_MEM | BPF_H:
1419 case BPF_ST | BPF_MEM | BPF_B:
1420 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1421 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1422 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1423 break;
1424 }
1425out:
1426 return to - to_buff;
1427}
1428
1429static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1430 gfp_t gfp_extra_flags)
1431{
1432 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1433 struct bpf_prog *fp;
1434
1435 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1436 if (fp != NULL) {
1437 /* aux->prog still points to the fp_other one, so
1438 * when promoting the clone to the real program,
1439 * this still needs to be adapted.
1440 */
1441 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1442 }
1443
1444 return fp;
1445}
1446
1447static void bpf_prog_clone_free(struct bpf_prog *fp)
1448{
1449 /* aux was stolen by the other clone, so we cannot free
1450 * it from this path! It will be freed eventually by the
1451 * other program on release.
1452 *
1453 * At this point, we don't need a deferred release since
1454 * clone is guaranteed to not be locked.
1455 */
1456 fp->aux = NULL;
1457 fp->stats = NULL;
1458 fp->active = NULL;
1459 __bpf_prog_free(fp);
1460}
1461
1462void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1463{
1464 /* We have to repoint aux->prog to self, as we don't
1465 * know whether fp here is the clone or the original.
1466 */
1467 fp->aux->prog = fp;
1468 bpf_prog_clone_free(fp_other);
1469}
1470
1471struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1472{
1473 struct bpf_insn insn_buff[16], aux[2];
1474 struct bpf_prog *clone, *tmp;
1475 int insn_delta, insn_cnt;
1476 struct bpf_insn *insn;
1477 int i, rewritten;
1478
1479 if (!prog->blinding_requested || prog->blinded)
1480 return prog;
1481
1482 clone = bpf_prog_clone_create(prog, GFP_USER);
1483 if (!clone)
1484 return ERR_PTR(-ENOMEM);
1485
1486 insn_cnt = clone->len;
1487 insn = clone->insnsi;
1488
1489 for (i = 0; i < insn_cnt; i++, insn++) {
1490 if (bpf_pseudo_func(insn)) {
1491 /* ld_imm64 with an address of bpf subprog is not
1492 * a user controlled constant. Don't randomize it,
1493 * since it will conflict with jit_subprogs() logic.
1494 */
1495 insn++;
1496 i++;
1497 continue;
1498 }
1499
1500 /* We temporarily need to hold the original ld64 insn
1501 * so that we can still access the first part in the
1502 * second blinding run.
1503 */
1504 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1505 insn[1].code == 0)
1506 memcpy(aux, insn, sizeof(aux));
1507
1508 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1509 clone->aux->verifier_zext);
1510 if (!rewritten)
1511 continue;
1512
1513 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1514 if (IS_ERR(tmp)) {
1515 /* Patching may have repointed aux->prog during
1516 * realloc from the original one, so we need to
1517 * fix it up here on error.
1518 */
1519 bpf_jit_prog_release_other(prog, clone);
1520 return tmp;
1521 }
1522
1523 clone = tmp;
1524 insn_delta = rewritten - 1;
1525
1526 /* Walk new program and skip insns we just inserted. */
1527 insn = clone->insnsi + i + insn_delta;
1528 insn_cnt += insn_delta;
1529 i += insn_delta;
1530 }
1531
1532 clone->blinded = 1;
1533 return clone;
1534}
1535#endif /* CONFIG_BPF_JIT */
1536
1537/* Base function for offset calculation. Needs to go into .text section,
1538 * therefore keeping it non-static as well; will also be used by JITs
1539 * anyway later on, so do not let the compiler omit it. This also needs
1540 * to go into kallsyms for correlation from e.g. bpftool, so naming
1541 * must not change.
1542 */
1543noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1544{
1545 return 0;
1546}
1547EXPORT_SYMBOL_GPL(__bpf_call_base);
1548
1549/* All UAPI available opcodes. */
1550#define BPF_INSN_MAP(INSN_2, INSN_3) \
1551 /* 32 bit ALU operations. */ \
1552 /* Register based. */ \
1553 INSN_3(ALU, ADD, X), \
1554 INSN_3(ALU, SUB, X), \
1555 INSN_3(ALU, AND, X), \
1556 INSN_3(ALU, OR, X), \
1557 INSN_3(ALU, LSH, X), \
1558 INSN_3(ALU, RSH, X), \
1559 INSN_3(ALU, XOR, X), \
1560 INSN_3(ALU, MUL, X), \
1561 INSN_3(ALU, MOV, X), \
1562 INSN_3(ALU, ARSH, X), \
1563 INSN_3(ALU, DIV, X), \
1564 INSN_3(ALU, MOD, X), \
1565 INSN_2(ALU, NEG), \
1566 INSN_3(ALU, END, TO_BE), \
1567 INSN_3(ALU, END, TO_LE), \
1568 /* Immediate based. */ \
1569 INSN_3(ALU, ADD, K), \
1570 INSN_3(ALU, SUB, K), \
1571 INSN_3(ALU, AND, K), \
1572 INSN_3(ALU, OR, K), \
1573 INSN_3(ALU, LSH, K), \
1574 INSN_3(ALU, RSH, K), \
1575 INSN_3(ALU, XOR, K), \
1576 INSN_3(ALU, MUL, K), \
1577 INSN_3(ALU, MOV, K), \
1578 INSN_3(ALU, ARSH, K), \
1579 INSN_3(ALU, DIV, K), \
1580 INSN_3(ALU, MOD, K), \
1581 /* 64 bit ALU operations. */ \
1582 /* Register based. */ \
1583 INSN_3(ALU64, ADD, X), \
1584 INSN_3(ALU64, SUB, X), \
1585 INSN_3(ALU64, AND, X), \
1586 INSN_3(ALU64, OR, X), \
1587 INSN_3(ALU64, LSH, X), \
1588 INSN_3(ALU64, RSH, X), \
1589 INSN_3(ALU64, XOR, X), \
1590 INSN_3(ALU64, MUL, X), \
1591 INSN_3(ALU64, MOV, X), \
1592 INSN_3(ALU64, ARSH, X), \
1593 INSN_3(ALU64, DIV, X), \
1594 INSN_3(ALU64, MOD, X), \
1595 INSN_2(ALU64, NEG), \
1596 INSN_3(ALU64, END, TO_LE), \
1597 /* Immediate based. */ \
1598 INSN_3(ALU64, ADD, K), \
1599 INSN_3(ALU64, SUB, K), \
1600 INSN_3(ALU64, AND, K), \
1601 INSN_3(ALU64, OR, K), \
1602 INSN_3(ALU64, LSH, K), \
1603 INSN_3(ALU64, RSH, K), \
1604 INSN_3(ALU64, XOR, K), \
1605 INSN_3(ALU64, MUL, K), \
1606 INSN_3(ALU64, MOV, K), \
1607 INSN_3(ALU64, ARSH, K), \
1608 INSN_3(ALU64, DIV, K), \
1609 INSN_3(ALU64, MOD, K), \
1610 /* Call instruction. */ \
1611 INSN_2(JMP, CALL), \
1612 /* Exit instruction. */ \
1613 INSN_2(JMP, EXIT), \
1614 /* 32-bit Jump instructions. */ \
1615 /* Register based. */ \
1616 INSN_3(JMP32, JEQ, X), \
1617 INSN_3(JMP32, JNE, X), \
1618 INSN_3(JMP32, JGT, X), \
1619 INSN_3(JMP32, JLT, X), \
1620 INSN_3(JMP32, JGE, X), \
1621 INSN_3(JMP32, JLE, X), \
1622 INSN_3(JMP32, JSGT, X), \
1623 INSN_3(JMP32, JSLT, X), \
1624 INSN_3(JMP32, JSGE, X), \
1625 INSN_3(JMP32, JSLE, X), \
1626 INSN_3(JMP32, JSET, X), \
1627 /* Immediate based. */ \
1628 INSN_3(JMP32, JEQ, K), \
1629 INSN_3(JMP32, JNE, K), \
1630 INSN_3(JMP32, JGT, K), \
1631 INSN_3(JMP32, JLT, K), \
1632 INSN_3(JMP32, JGE, K), \
1633 INSN_3(JMP32, JLE, K), \
1634 INSN_3(JMP32, JSGT, K), \
1635 INSN_3(JMP32, JSLT, K), \
1636 INSN_3(JMP32, JSGE, K), \
1637 INSN_3(JMP32, JSLE, K), \
1638 INSN_3(JMP32, JSET, K), \
1639 /* Jump instructions. */ \
1640 /* Register based. */ \
1641 INSN_3(JMP, JEQ, X), \
1642 INSN_3(JMP, JNE, X), \
1643 INSN_3(JMP, JGT, X), \
1644 INSN_3(JMP, JLT, X), \
1645 INSN_3(JMP, JGE, X), \
1646 INSN_3(JMP, JLE, X), \
1647 INSN_3(JMP, JSGT, X), \
1648 INSN_3(JMP, JSLT, X), \
1649 INSN_3(JMP, JSGE, X), \
1650 INSN_3(JMP, JSLE, X), \
1651 INSN_3(JMP, JSET, X), \
1652 /* Immediate based. */ \
1653 INSN_3(JMP, JEQ, K), \
1654 INSN_3(JMP, JNE, K), \
1655 INSN_3(JMP, JGT, K), \
1656 INSN_3(JMP, JLT, K), \
1657 INSN_3(JMP, JGE, K), \
1658 INSN_3(JMP, JLE, K), \
1659 INSN_3(JMP, JSGT, K), \
1660 INSN_3(JMP, JSLT, K), \
1661 INSN_3(JMP, JSGE, K), \
1662 INSN_3(JMP, JSLE, K), \
1663 INSN_3(JMP, JSET, K), \
1664 INSN_2(JMP, JA), \
1665 INSN_2(JMP32, JA), \
1666 /* Store instructions. */ \
1667 /* Register based. */ \
1668 INSN_3(STX, MEM, B), \
1669 INSN_3(STX, MEM, H), \
1670 INSN_3(STX, MEM, W), \
1671 INSN_3(STX, MEM, DW), \
1672 INSN_3(STX, ATOMIC, W), \
1673 INSN_3(STX, ATOMIC, DW), \
1674 /* Immediate based. */ \
1675 INSN_3(ST, MEM, B), \
1676 INSN_3(ST, MEM, H), \
1677 INSN_3(ST, MEM, W), \
1678 INSN_3(ST, MEM, DW), \
1679 /* Load instructions. */ \
1680 /* Register based. */ \
1681 INSN_3(LDX, MEM, B), \
1682 INSN_3(LDX, MEM, H), \
1683 INSN_3(LDX, MEM, W), \
1684 INSN_3(LDX, MEM, DW), \
1685 INSN_3(LDX, MEMSX, B), \
1686 INSN_3(LDX, MEMSX, H), \
1687 INSN_3(LDX, MEMSX, W), \
1688 /* Immediate based. */ \
1689 INSN_3(LD, IMM, DW)
1690
1691bool bpf_opcode_in_insntable(u8 code)
1692{
1693#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1694#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1695 static const bool public_insntable[256] = {
1696 [0 ... 255] = false,
1697 /* Now overwrite non-defaults ... */
1698 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1699 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1700 [BPF_LD | BPF_ABS | BPF_B] = true,
1701 [BPF_LD | BPF_ABS | BPF_H] = true,
1702 [BPF_LD | BPF_ABS | BPF_W] = true,
1703 [BPF_LD | BPF_IND | BPF_B] = true,
1704 [BPF_LD | BPF_IND | BPF_H] = true,
1705 [BPF_LD | BPF_IND | BPF_W] = true,
1706 [BPF_JMP | BPF_JCOND] = true,
1707 };
1708#undef BPF_INSN_3_TBL
1709#undef BPF_INSN_2_TBL
1710 return public_insntable[code];
1711}
1712
1713#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1714/**
1715 * ___bpf_prog_run - run eBPF program on a given context
1716 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1717 * @insn: is the array of eBPF instructions
1718 *
1719 * Decode and execute eBPF instructions.
1720 *
1721 * Return: whatever value is in %BPF_R0 at program exit
1722 */
1723static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1724{
1725#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1726#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1727 static const void * const jumptable[256] __annotate_jump_table = {
1728 [0 ... 255] = &&default_label,
1729 /* Now overwrite non-defaults ... */
1730 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1731 /* Non-UAPI available opcodes. */
1732 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1733 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1734 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1735 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1736 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1737 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1738 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1739 [BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
1740 [BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
1741 [BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1742 };
1743#undef BPF_INSN_3_LBL
1744#undef BPF_INSN_2_LBL
1745 u32 tail_call_cnt = 0;
1746
1747#define CONT ({ insn++; goto select_insn; })
1748#define CONT_JMP ({ insn++; goto select_insn; })
1749
1750select_insn:
1751 goto *jumptable[insn->code];
1752
1753 /* Explicitly mask the register-based shift amounts with 63 or 31
1754 * to avoid undefined behavior. Normally this won't affect the
1755 * generated code, for example, in case of native 64 bit archs such
1756 * as x86-64 or arm64, the compiler is optimizing the AND away for
1757 * the interpreter. In case of JITs, each of the JIT backends compiles
1758 * the BPF shift operations to machine instructions which produce
1759 * implementation-defined results in such a case; the resulting
1760 * contents of the register may be arbitrary, but program behaviour
1761 * as a whole remains defined. In other words, in case of JIT backends,
1762 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1763 */
1764 /* ALU (shifts) */
1765#define SHT(OPCODE, OP) \
1766 ALU64_##OPCODE##_X: \
1767 DST = DST OP (SRC & 63); \
1768 CONT; \
1769 ALU_##OPCODE##_X: \
1770 DST = (u32) DST OP ((u32) SRC & 31); \
1771 CONT; \
1772 ALU64_##OPCODE##_K: \
1773 DST = DST OP IMM; \
1774 CONT; \
1775 ALU_##OPCODE##_K: \
1776 DST = (u32) DST OP (u32) IMM; \
1777 CONT;
1778 /* ALU (rest) */
1779#define ALU(OPCODE, OP) \
1780 ALU64_##OPCODE##_X: \
1781 DST = DST OP SRC; \
1782 CONT; \
1783 ALU_##OPCODE##_X: \
1784 DST = (u32) DST OP (u32) SRC; \
1785 CONT; \
1786 ALU64_##OPCODE##_K: \
1787 DST = DST OP IMM; \
1788 CONT; \
1789 ALU_##OPCODE##_K: \
1790 DST = (u32) DST OP (u32) IMM; \
1791 CONT;
1792 ALU(ADD, +)
1793 ALU(SUB, -)
1794 ALU(AND, &)
1795 ALU(OR, |)
1796 ALU(XOR, ^)
1797 ALU(MUL, *)
1798 SHT(LSH, <<)
1799 SHT(RSH, >>)
1800#undef SHT
1801#undef ALU
1802 ALU_NEG:
1803 DST = (u32) -DST;
1804 CONT;
1805 ALU64_NEG:
1806 DST = -DST;
1807 CONT;
1808 ALU_MOV_X:
1809 switch (OFF) {
1810 case 0:
1811 DST = (u32) SRC;
1812 break;
1813 case 8:
1814 DST = (u32)(s8) SRC;
1815 break;
1816 case 16:
1817 DST = (u32)(s16) SRC;
1818 break;
1819 }
1820 CONT;
1821 ALU_MOV_K:
1822 DST = (u32) IMM;
1823 CONT;
1824 ALU64_MOV_X:
1825 switch (OFF) {
1826 case 0:
1827 DST = SRC;
1828 break;
1829 case 8:
1830 DST = (s8) SRC;
1831 break;
1832 case 16:
1833 DST = (s16) SRC;
1834 break;
1835 case 32:
1836 DST = (s32) SRC;
1837 break;
1838 }
1839 CONT;
1840 ALU64_MOV_K:
1841 DST = IMM;
1842 CONT;
1843 LD_IMM_DW:
1844 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1845 insn++;
1846 CONT;
1847 ALU_ARSH_X:
1848 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1849 CONT;
1850 ALU_ARSH_K:
1851 DST = (u64) (u32) (((s32) DST) >> IMM);
1852 CONT;
1853 ALU64_ARSH_X:
1854 (*(s64 *) &DST) >>= (SRC & 63);
1855 CONT;
1856 ALU64_ARSH_K:
1857 (*(s64 *) &DST) >>= IMM;
1858 CONT;
1859 ALU64_MOD_X:
1860 switch (OFF) {
1861 case 0:
1862 div64_u64_rem(DST, SRC, &AX);
1863 DST = AX;
1864 break;
1865 case 1:
1866 AX = div64_s64(DST, SRC);
1867 DST = DST - AX * SRC;
1868 break;
1869 }
1870 CONT;
1871 ALU_MOD_X:
1872 switch (OFF) {
1873 case 0:
1874 AX = (u32) DST;
1875 DST = do_div(AX, (u32) SRC);
1876 break;
1877 case 1:
1878 AX = abs((s32)DST);
1879 AX = do_div(AX, abs((s32)SRC));
1880 if ((s32)DST < 0)
1881 DST = (u32)-AX;
1882 else
1883 DST = (u32)AX;
1884 break;
1885 }
1886 CONT;
1887 ALU64_MOD_K:
1888 switch (OFF) {
1889 case 0:
1890 div64_u64_rem(DST, IMM, &AX);
1891 DST = AX;
1892 break;
1893 case 1:
1894 AX = div64_s64(DST, IMM);
1895 DST = DST - AX * IMM;
1896 break;
1897 }
1898 CONT;
1899 ALU_MOD_K:
1900 switch (OFF) {
1901 case 0:
1902 AX = (u32) DST;
1903 DST = do_div(AX, (u32) IMM);
1904 break;
1905 case 1:
1906 AX = abs((s32)DST);
1907 AX = do_div(AX, abs((s32)IMM));
1908 if ((s32)DST < 0)
1909 DST = (u32)-AX;
1910 else
1911 DST = (u32)AX;
1912 break;
1913 }
1914 CONT;
1915 ALU64_DIV_X:
1916 switch (OFF) {
1917 case 0:
1918 DST = div64_u64(DST, SRC);
1919 break;
1920 case 1:
1921 DST = div64_s64(DST, SRC);
1922 break;
1923 }
1924 CONT;
1925 ALU_DIV_X:
1926 switch (OFF) {
1927 case 0:
1928 AX = (u32) DST;
1929 do_div(AX, (u32) SRC);
1930 DST = (u32) AX;
1931 break;
1932 case 1:
1933 AX = abs((s32)DST);
1934 do_div(AX, abs((s32)SRC));
1935 if (((s32)DST < 0) == ((s32)SRC < 0))
1936 DST = (u32)AX;
1937 else
1938 DST = (u32)-AX;
1939 break;
1940 }
1941 CONT;
1942 ALU64_DIV_K:
1943 switch (OFF) {
1944 case 0:
1945 DST = div64_u64(DST, IMM);
1946 break;
1947 case 1:
1948 DST = div64_s64(DST, IMM);
1949 break;
1950 }
1951 CONT;
1952 ALU_DIV_K:
1953 switch (OFF) {
1954 case 0:
1955 AX = (u32) DST;
1956 do_div(AX, (u32) IMM);
1957 DST = (u32) AX;
1958 break;
1959 case 1:
1960 AX = abs((s32)DST);
1961 do_div(AX, abs((s32)IMM));
1962 if (((s32)DST < 0) == ((s32)IMM < 0))
1963 DST = (u32)AX;
1964 else
1965 DST = (u32)-AX;
1966 break;
1967 }
1968 CONT;
1969 ALU_END_TO_BE:
1970 switch (IMM) {
1971 case 16:
1972 DST = (__force u16) cpu_to_be16(DST);
1973 break;
1974 case 32:
1975 DST = (__force u32) cpu_to_be32(DST);
1976 break;
1977 case 64:
1978 DST = (__force u64) cpu_to_be64(DST);
1979 break;
1980 }
1981 CONT;
1982 ALU_END_TO_LE:
1983 switch (IMM) {
1984 case 16:
1985 DST = (__force u16) cpu_to_le16(DST);
1986 break;
1987 case 32:
1988 DST = (__force u32) cpu_to_le32(DST);
1989 break;
1990 case 64:
1991 DST = (__force u64) cpu_to_le64(DST);
1992 break;
1993 }
1994 CONT;
1995 ALU64_END_TO_LE:
1996 switch (IMM) {
1997 case 16:
1998 DST = (__force u16) __swab16(DST);
1999 break;
2000 case 32:
2001 DST = (__force u32) __swab32(DST);
2002 break;
2003 case 64:
2004 DST = (__force u64) __swab64(DST);
2005 break;
2006 }
2007 CONT;
2008
2009 /* CALL */
2010 JMP_CALL:
2011 /* Function call scratches BPF_R1-BPF_R5 registers,
2012 * preserves BPF_R6-BPF_R9, and stores return value
2013 * into BPF_R0.
2014 */
2015 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2016 BPF_R4, BPF_R5);
2017 CONT;
2018
2019 JMP_CALL_ARGS:
2020 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
2021 BPF_R3, BPF_R4,
2022 BPF_R5,
2023 insn + insn->off + 1);
2024 CONT;
2025
2026 JMP_TAIL_CALL: {
2027 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
2028 struct bpf_array *array = container_of(map, struct bpf_array, map);
2029 struct bpf_prog *prog;
2030 u32 index = BPF_R3;
2031
2032 if (unlikely(index >= array->map.max_entries))
2033 goto out;
2034
2035 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
2036 goto out;
2037
2038 tail_call_cnt++;
2039
2040 prog = READ_ONCE(array->ptrs[index]);
2041 if (!prog)
2042 goto out;
2043
2044 /* ARG1 at this point is guaranteed to point to CTX from
2045 * the verifier side due to the fact that the tail call is
2046 * handled like a helper, that is, bpf_tail_call_proto,
2047 * where arg1_type is ARG_PTR_TO_CTX.
2048 */
2049 insn = prog->insnsi;
2050 goto select_insn;
2051out:
2052 CONT;
2053 }
2054 JMP_JA:
2055 insn += insn->off;
2056 CONT;
2057 JMP32_JA:
2058 insn += insn->imm;
2059 CONT;
2060 JMP_EXIT:
2061 return BPF_R0;
2062 /* JMP */
2063#define COND_JMP(SIGN, OPCODE, CMP_OP) \
2064 JMP_##OPCODE##_X: \
2065 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
2066 insn += insn->off; \
2067 CONT_JMP; \
2068 } \
2069 CONT; \
2070 JMP32_##OPCODE##_X: \
2071 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
2072 insn += insn->off; \
2073 CONT_JMP; \
2074 } \
2075 CONT; \
2076 JMP_##OPCODE##_K: \
2077 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
2078 insn += insn->off; \
2079 CONT_JMP; \
2080 } \
2081 CONT; \
2082 JMP32_##OPCODE##_K: \
2083 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
2084 insn += insn->off; \
2085 CONT_JMP; \
2086 } \
2087 CONT;
2088 COND_JMP(u, JEQ, ==)
2089 COND_JMP(u, JNE, !=)
2090 COND_JMP(u, JGT, >)
2091 COND_JMP(u, JLT, <)
2092 COND_JMP(u, JGE, >=)
2093 COND_JMP(u, JLE, <=)
2094 COND_JMP(u, JSET, &)
2095 COND_JMP(s, JSGT, >)
2096 COND_JMP(s, JSLT, <)
2097 COND_JMP(s, JSGE, >=)
2098 COND_JMP(s, JSLE, <=)
2099#undef COND_JMP
2100 /* ST, STX and LDX*/
2101 ST_NOSPEC:
2102 /* Speculation barrier for mitigating Speculative Store Bypass.
2103 * In case of arm64, we rely on the firmware mitigation as
2104 * controlled via the ssbd kernel parameter. Whenever the
2105 * mitigation is enabled, it works for all of the kernel code
2106 * with no need to provide any additional instructions here.
2107 * In case of x86, we use 'lfence' insn for mitigation. We
2108 * reuse preexisting logic from Spectre v1 mitigation that
2109 * happens to produce the required code on x86 for v4 as well.
2110 */
2111 barrier_nospec();
2112 CONT;
2113#define LDST(SIZEOP, SIZE) \
2114 STX_MEM_##SIZEOP: \
2115 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
2116 CONT; \
2117 ST_MEM_##SIZEOP: \
2118 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
2119 CONT; \
2120 LDX_MEM_##SIZEOP: \
2121 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2122 CONT; \
2123 LDX_PROBE_MEM_##SIZEOP: \
2124 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2125 (const void *)(long) (SRC + insn->off)); \
2126 DST = *((SIZE *)&DST); \
2127 CONT;
2128
2129 LDST(B, u8)
2130 LDST(H, u16)
2131 LDST(W, u32)
2132 LDST(DW, u64)
2133#undef LDST
2134
2135#define LDSX(SIZEOP, SIZE) \
2136 LDX_MEMSX_##SIZEOP: \
2137 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
2138 CONT; \
2139 LDX_PROBE_MEMSX_##SIZEOP: \
2140 bpf_probe_read_kernel_common(&DST, sizeof(SIZE), \
2141 (const void *)(long) (SRC + insn->off)); \
2142 DST = *((SIZE *)&DST); \
2143 CONT;
2144
2145 LDSX(B, s8)
2146 LDSX(H, s16)
2147 LDSX(W, s32)
2148#undef LDSX
2149
2150#define ATOMIC_ALU_OP(BOP, KOP) \
2151 case BOP: \
2152 if (BPF_SIZE(insn->code) == BPF_W) \
2153 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
2154 (DST + insn->off)); \
2155 else \
2156 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
2157 (DST + insn->off)); \
2158 break; \
2159 case BOP | BPF_FETCH: \
2160 if (BPF_SIZE(insn->code) == BPF_W) \
2161 SRC = (u32) atomic_fetch_##KOP( \
2162 (u32) SRC, \
2163 (atomic_t *)(unsigned long) (DST + insn->off)); \
2164 else \
2165 SRC = (u64) atomic64_fetch_##KOP( \
2166 (u64) SRC, \
2167 (atomic64_t *)(unsigned long) (DST + insn->off)); \
2168 break;
2169
2170 STX_ATOMIC_DW:
2171 STX_ATOMIC_W:
2172 switch (IMM) {
2173 ATOMIC_ALU_OP(BPF_ADD, add)
2174 ATOMIC_ALU_OP(BPF_AND, and)
2175 ATOMIC_ALU_OP(BPF_OR, or)
2176 ATOMIC_ALU_OP(BPF_XOR, xor)
2177#undef ATOMIC_ALU_OP
2178
2179 case BPF_XCHG:
2180 if (BPF_SIZE(insn->code) == BPF_W)
2181 SRC = (u32) atomic_xchg(
2182 (atomic_t *)(unsigned long) (DST + insn->off),
2183 (u32) SRC);
2184 else
2185 SRC = (u64) atomic64_xchg(
2186 (atomic64_t *)(unsigned long) (DST + insn->off),
2187 (u64) SRC);
2188 break;
2189 case BPF_CMPXCHG:
2190 if (BPF_SIZE(insn->code) == BPF_W)
2191 BPF_R0 = (u32) atomic_cmpxchg(
2192 (atomic_t *)(unsigned long) (DST + insn->off),
2193 (u32) BPF_R0, (u32) SRC);
2194 else
2195 BPF_R0 = (u64) atomic64_cmpxchg(
2196 (atomic64_t *)(unsigned long) (DST + insn->off),
2197 (u64) BPF_R0, (u64) SRC);
2198 break;
2199
2200 default:
2201 goto default_label;
2202 }
2203 CONT;
2204
2205 default_label:
2206 /* If we ever reach this, we have a bug somewhere. Die hard here
2207 * instead of just returning 0; we could be somewhere in a subprog,
2208 * so execution could continue otherwise which we do /not/ want.
2209 *
2210 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2211 */
2212 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2213 insn->code, insn->imm);
2214 BUG_ON(1);
2215 return 0;
2216}
2217
2218#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2219#define DEFINE_BPF_PROG_RUN(stack_size) \
2220static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2221{ \
2222 u64 stack[stack_size / sizeof(u64)]; \
2223 u64 regs[MAX_BPF_EXT_REG] = {}; \
2224\
2225 kmsan_unpoison_memory(stack, sizeof(stack)); \
2226 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2227 ARG1 = (u64) (unsigned long) ctx; \
2228 return ___bpf_prog_run(regs, insn); \
2229}
2230
2231#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2232#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2233static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2234 const struct bpf_insn *insn) \
2235{ \
2236 u64 stack[stack_size / sizeof(u64)]; \
2237 u64 regs[MAX_BPF_EXT_REG]; \
2238\
2239 kmsan_unpoison_memory(stack, sizeof(stack)); \
2240 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2241 BPF_R1 = r1; \
2242 BPF_R2 = r2; \
2243 BPF_R3 = r3; \
2244 BPF_R4 = r4; \
2245 BPF_R5 = r5; \
2246 return ___bpf_prog_run(regs, insn); \
2247}
2248
2249#define EVAL1(FN, X) FN(X)
2250#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2251#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2252#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2253#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2254#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2255
2256EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2257EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2258EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2259
2260EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2261EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2262EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2263
2264#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2265
2266static unsigned int (*interpreters[])(const void *ctx,
2267 const struct bpf_insn *insn) = {
2268EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2269EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2270EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2271};
2272#undef PROG_NAME_LIST
2273#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2274static __maybe_unused
2275u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2276 const struct bpf_insn *insn) = {
2277EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2278EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2279EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2280};
2281#undef PROG_NAME_LIST
2282
2283#ifdef CONFIG_BPF_SYSCALL
2284void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2285{
2286 stack_depth = max_t(u32, stack_depth, 1);
2287 insn->off = (s16) insn->imm;
2288 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2289 __bpf_call_base_args;
2290 insn->code = BPF_JMP | BPF_CALL_ARGS;
2291}
2292#endif
2293#else
2294static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2295 const struct bpf_insn *insn)
2296{
2297 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2298 * is not working properly, so warn about it!
2299 */
2300 WARN_ON_ONCE(1);
2301 return 0;
2302}
2303#endif
2304
2305bool bpf_prog_map_compatible(struct bpf_map *map,
2306 const struct bpf_prog *fp)
2307{
2308 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2309 bool ret;
2310 struct bpf_prog_aux *aux = fp->aux;
2311
2312 if (fp->kprobe_override)
2313 return false;
2314
2315 /* XDP programs inserted into maps are not guaranteed to run on
2316 * a particular netdev (and can run outside driver context entirely
2317 * in the case of devmap and cpumap). Until device checks
2318 * are implemented, prohibit adding dev-bound programs to program maps.
2319 */
2320 if (bpf_prog_is_dev_bound(aux))
2321 return false;
2322
2323 spin_lock(&map->owner.lock);
2324 if (!map->owner.type) {
2325 /* There's no owner yet where we could check for
2326 * compatibility.
2327 */
2328 map->owner.type = prog_type;
2329 map->owner.jited = fp->jited;
2330 map->owner.xdp_has_frags = aux->xdp_has_frags;
2331 map->owner.attach_func_proto = aux->attach_func_proto;
2332 ret = true;
2333 } else {
2334 ret = map->owner.type == prog_type &&
2335 map->owner.jited == fp->jited &&
2336 map->owner.xdp_has_frags == aux->xdp_has_frags;
2337 if (ret &&
2338 map->owner.attach_func_proto != aux->attach_func_proto) {
2339 switch (prog_type) {
2340 case BPF_PROG_TYPE_TRACING:
2341 case BPF_PROG_TYPE_LSM:
2342 case BPF_PROG_TYPE_EXT:
2343 case BPF_PROG_TYPE_STRUCT_OPS:
2344 ret = false;
2345 break;
2346 default:
2347 break;
2348 }
2349 }
2350 }
2351 spin_unlock(&map->owner.lock);
2352
2353 return ret;
2354}
2355
2356static int bpf_check_tail_call(const struct bpf_prog *fp)
2357{
2358 struct bpf_prog_aux *aux = fp->aux;
2359 int i, ret = 0;
2360
2361 mutex_lock(&aux->used_maps_mutex);
2362 for (i = 0; i < aux->used_map_cnt; i++) {
2363 struct bpf_map *map = aux->used_maps[i];
2364
2365 if (!map_type_contains_progs(map))
2366 continue;
2367
2368 if (!bpf_prog_map_compatible(map, fp)) {
2369 ret = -EINVAL;
2370 goto out;
2371 }
2372 }
2373
2374out:
2375 mutex_unlock(&aux->used_maps_mutex);
2376 return ret;
2377}
2378
2379static void bpf_prog_select_func(struct bpf_prog *fp)
2380{
2381#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2382 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2383
2384 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2385#else
2386 fp->bpf_func = __bpf_prog_ret0_warn;
2387#endif
2388}
2389
2390/**
2391 * bpf_prog_select_runtime - select exec runtime for BPF program
2392 * @fp: bpf_prog populated with BPF program
2393 * @err: pointer to error variable
2394 *
2395 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2396 * The BPF program will be executed via bpf_prog_run() function.
2397 *
2398 * Return: the &fp argument along with &err set to 0 for success or
2399 * a negative errno code on failure
2400 */
2401struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2402{
2403 /* In case of BPF to BPF calls, verifier did all the prep
2404 * work with regards to JITing, etc.
2405 */
2406 bool jit_needed = false;
2407
2408 if (fp->bpf_func)
2409 goto finalize;
2410
2411 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2412 bpf_prog_has_kfunc_call(fp))
2413 jit_needed = true;
2414
2415 bpf_prog_select_func(fp);
2416
2417 /* eBPF JITs can rewrite the program in case constant
2418 * blinding is active. However, in case of error during
2419 * blinding, bpf_int_jit_compile() must always return a
2420 * valid program, which in this case would simply not
2421 * be JITed, but falls back to the interpreter.
2422 */
2423 if (!bpf_prog_is_offloaded(fp->aux)) {
2424 *err = bpf_prog_alloc_jited_linfo(fp);
2425 if (*err)
2426 return fp;
2427
2428 fp = bpf_int_jit_compile(fp);
2429 bpf_prog_jit_attempt_done(fp);
2430 if (!fp->jited && jit_needed) {
2431 *err = -ENOTSUPP;
2432 return fp;
2433 }
2434 } else {
2435 *err = bpf_prog_offload_compile(fp);
2436 if (*err)
2437 return fp;
2438 }
2439
2440finalize:
2441 *err = bpf_prog_lock_ro(fp);
2442 if (*err)
2443 return fp;
2444
2445 /* The tail call compatibility check can only be done at
2446 * this late stage as we need to determine, if we deal
2447 * with JITed or non JITed program concatenations and not
2448 * all eBPF JITs might immediately support all features.
2449 */
2450 *err = bpf_check_tail_call(fp);
2451
2452 return fp;
2453}
2454EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2455
2456static unsigned int __bpf_prog_ret1(const void *ctx,
2457 const struct bpf_insn *insn)
2458{
2459 return 1;
2460}
2461
2462static struct bpf_prog_dummy {
2463 struct bpf_prog prog;
2464} dummy_bpf_prog = {
2465 .prog = {
2466 .bpf_func = __bpf_prog_ret1,
2467 },
2468};
2469
2470struct bpf_empty_prog_array bpf_empty_prog_array = {
2471 .null_prog = NULL,
2472};
2473EXPORT_SYMBOL(bpf_empty_prog_array);
2474
2475struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2476{
2477 struct bpf_prog_array *p;
2478
2479 if (prog_cnt)
2480 p = kzalloc(struct_size(p, items, prog_cnt + 1), flags);
2481 else
2482 p = &bpf_empty_prog_array.hdr;
2483
2484 return p;
2485}
2486
2487void bpf_prog_array_free(struct bpf_prog_array *progs)
2488{
2489 if (!progs || progs == &bpf_empty_prog_array.hdr)
2490 return;
2491 kfree_rcu(progs, rcu);
2492}
2493
2494static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2495{
2496 struct bpf_prog_array *progs;
2497
2498 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2499 * no need to call kfree_rcu(), just call kfree() directly.
2500 */
2501 progs = container_of(rcu, struct bpf_prog_array, rcu);
2502 if (rcu_trace_implies_rcu_gp())
2503 kfree(progs);
2504 else
2505 kfree_rcu(progs, rcu);
2506}
2507
2508void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2509{
2510 if (!progs || progs == &bpf_empty_prog_array.hdr)
2511 return;
2512 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2513}
2514
2515int bpf_prog_array_length(struct bpf_prog_array *array)
2516{
2517 struct bpf_prog_array_item *item;
2518 u32 cnt = 0;
2519
2520 for (item = array->items; item->prog; item++)
2521 if (item->prog != &dummy_bpf_prog.prog)
2522 cnt++;
2523 return cnt;
2524}
2525
2526bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2527{
2528 struct bpf_prog_array_item *item;
2529
2530 for (item = array->items; item->prog; item++)
2531 if (item->prog != &dummy_bpf_prog.prog)
2532 return false;
2533 return true;
2534}
2535
2536static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2537 u32 *prog_ids,
2538 u32 request_cnt)
2539{
2540 struct bpf_prog_array_item *item;
2541 int i = 0;
2542
2543 for (item = array->items; item->prog; item++) {
2544 if (item->prog == &dummy_bpf_prog.prog)
2545 continue;
2546 prog_ids[i] = item->prog->aux->id;
2547 if (++i == request_cnt) {
2548 item++;
2549 break;
2550 }
2551 }
2552
2553 return !!(item->prog);
2554}
2555
2556int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2557 __u32 __user *prog_ids, u32 cnt)
2558{
2559 unsigned long err = 0;
2560 bool nospc;
2561 u32 *ids;
2562
2563 /* users of this function are doing:
2564 * cnt = bpf_prog_array_length();
2565 * if (cnt > 0)
2566 * bpf_prog_array_copy_to_user(..., cnt);
2567 * so below kcalloc doesn't need extra cnt > 0 check.
2568 */
2569 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2570 if (!ids)
2571 return -ENOMEM;
2572 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2573 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2574 kfree(ids);
2575 if (err)
2576 return -EFAULT;
2577 if (nospc)
2578 return -ENOSPC;
2579 return 0;
2580}
2581
2582void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2583 struct bpf_prog *old_prog)
2584{
2585 struct bpf_prog_array_item *item;
2586
2587 for (item = array->items; item->prog; item++)
2588 if (item->prog == old_prog) {
2589 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2590 break;
2591 }
2592}
2593
2594/**
2595 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2596 * index into the program array with
2597 * a dummy no-op program.
2598 * @array: a bpf_prog_array
2599 * @index: the index of the program to replace
2600 *
2601 * Skips over dummy programs, by not counting them, when calculating
2602 * the position of the program to replace.
2603 *
2604 * Return:
2605 * * 0 - Success
2606 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2607 * * -ENOENT - Index out of range
2608 */
2609int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2610{
2611 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2612}
2613
2614/**
2615 * bpf_prog_array_update_at() - Updates the program at the given index
2616 * into the program array.
2617 * @array: a bpf_prog_array
2618 * @index: the index of the program to update
2619 * @prog: the program to insert into the array
2620 *
2621 * Skips over dummy programs, by not counting them, when calculating
2622 * the position of the program to update.
2623 *
2624 * Return:
2625 * * 0 - Success
2626 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2627 * * -ENOENT - Index out of range
2628 */
2629int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2630 struct bpf_prog *prog)
2631{
2632 struct bpf_prog_array_item *item;
2633
2634 if (unlikely(index < 0))
2635 return -EINVAL;
2636
2637 for (item = array->items; item->prog; item++) {
2638 if (item->prog == &dummy_bpf_prog.prog)
2639 continue;
2640 if (!index) {
2641 WRITE_ONCE(item->prog, prog);
2642 return 0;
2643 }
2644 index--;
2645 }
2646 return -ENOENT;
2647}
2648
2649int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2650 struct bpf_prog *exclude_prog,
2651 struct bpf_prog *include_prog,
2652 u64 bpf_cookie,
2653 struct bpf_prog_array **new_array)
2654{
2655 int new_prog_cnt, carry_prog_cnt = 0;
2656 struct bpf_prog_array_item *existing, *new;
2657 struct bpf_prog_array *array;
2658 bool found_exclude = false;
2659
2660 /* Figure out how many existing progs we need to carry over to
2661 * the new array.
2662 */
2663 if (old_array) {
2664 existing = old_array->items;
2665 for (; existing->prog; existing++) {
2666 if (existing->prog == exclude_prog) {
2667 found_exclude = true;
2668 continue;
2669 }
2670 if (existing->prog != &dummy_bpf_prog.prog)
2671 carry_prog_cnt++;
2672 if (existing->prog == include_prog)
2673 return -EEXIST;
2674 }
2675 }
2676
2677 if (exclude_prog && !found_exclude)
2678 return -ENOENT;
2679
2680 /* How many progs (not NULL) will be in the new array? */
2681 new_prog_cnt = carry_prog_cnt;
2682 if (include_prog)
2683 new_prog_cnt += 1;
2684
2685 /* Do we have any prog (not NULL) in the new array? */
2686 if (!new_prog_cnt) {
2687 *new_array = NULL;
2688 return 0;
2689 }
2690
2691 /* +1 as the end of prog_array is marked with NULL */
2692 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2693 if (!array)
2694 return -ENOMEM;
2695 new = array->items;
2696
2697 /* Fill in the new prog array */
2698 if (carry_prog_cnt) {
2699 existing = old_array->items;
2700 for (; existing->prog; existing++) {
2701 if (existing->prog == exclude_prog ||
2702 existing->prog == &dummy_bpf_prog.prog)
2703 continue;
2704
2705 new->prog = existing->prog;
2706 new->bpf_cookie = existing->bpf_cookie;
2707 new++;
2708 }
2709 }
2710 if (include_prog) {
2711 new->prog = include_prog;
2712 new->bpf_cookie = bpf_cookie;
2713 new++;
2714 }
2715 new->prog = NULL;
2716 *new_array = array;
2717 return 0;
2718}
2719
2720int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2721 u32 *prog_ids, u32 request_cnt,
2722 u32 *prog_cnt)
2723{
2724 u32 cnt = 0;
2725
2726 if (array)
2727 cnt = bpf_prog_array_length(array);
2728
2729 *prog_cnt = cnt;
2730
2731 /* return early if user requested only program count or nothing to copy */
2732 if (!request_cnt || !cnt)
2733 return 0;
2734
2735 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2736 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2737 : 0;
2738}
2739
2740void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2741 struct bpf_map **used_maps, u32 len)
2742{
2743 struct bpf_map *map;
2744 bool sleepable;
2745 u32 i;
2746
2747 sleepable = aux->prog->sleepable;
2748 for (i = 0; i < len; i++) {
2749 map = used_maps[i];
2750 if (map->ops->map_poke_untrack)
2751 map->ops->map_poke_untrack(map, aux);
2752 if (sleepable)
2753 atomic64_dec(&map->sleepable_refcnt);
2754 bpf_map_put(map);
2755 }
2756}
2757
2758static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2759{
2760 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2761 kfree(aux->used_maps);
2762}
2763
2764void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2765{
2766#ifdef CONFIG_BPF_SYSCALL
2767 struct btf_mod_pair *btf_mod;
2768 u32 i;
2769
2770 for (i = 0; i < len; i++) {
2771 btf_mod = &used_btfs[i];
2772 if (btf_mod->module)
2773 module_put(btf_mod->module);
2774 btf_put(btf_mod->btf);
2775 }
2776#endif
2777}
2778
2779static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2780{
2781 __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2782 kfree(aux->used_btfs);
2783}
2784
2785static void bpf_prog_free_deferred(struct work_struct *work)
2786{
2787 struct bpf_prog_aux *aux;
2788 int i;
2789
2790 aux = container_of(work, struct bpf_prog_aux, work);
2791#ifdef CONFIG_BPF_SYSCALL
2792 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2793#endif
2794#ifdef CONFIG_CGROUP_BPF
2795 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2796 bpf_cgroup_atype_put(aux->cgroup_atype);
2797#endif
2798 bpf_free_used_maps(aux);
2799 bpf_free_used_btfs(aux);
2800 if (bpf_prog_is_dev_bound(aux))
2801 bpf_prog_dev_bound_destroy(aux->prog);
2802#ifdef CONFIG_PERF_EVENTS
2803 if (aux->prog->has_callchain_buf)
2804 put_callchain_buffers();
2805#endif
2806 if (aux->dst_trampoline)
2807 bpf_trampoline_put(aux->dst_trampoline);
2808 for (i = 0; i < aux->real_func_cnt; i++) {
2809 /* We can just unlink the subprog poke descriptor table as
2810 * it was originally linked to the main program and is also
2811 * released along with it.
2812 */
2813 aux->func[i]->aux->poke_tab = NULL;
2814 bpf_jit_free(aux->func[i]);
2815 }
2816 if (aux->real_func_cnt) {
2817 kfree(aux->func);
2818 bpf_prog_unlock_free(aux->prog);
2819 } else {
2820 bpf_jit_free(aux->prog);
2821 }
2822}
2823
2824void bpf_prog_free(struct bpf_prog *fp)
2825{
2826 struct bpf_prog_aux *aux = fp->aux;
2827
2828 if (aux->dst_prog)
2829 bpf_prog_put(aux->dst_prog);
2830 bpf_token_put(aux->token);
2831 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2832 schedule_work(&aux->work);
2833}
2834EXPORT_SYMBOL_GPL(bpf_prog_free);
2835
2836/* RNG for unprivileged user space with separated state from prandom_u32(). */
2837static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2838
2839void bpf_user_rnd_init_once(void)
2840{
2841 prandom_init_once(&bpf_user_rnd_state);
2842}
2843
2844BPF_CALL_0(bpf_user_rnd_u32)
2845{
2846 /* Should someone ever have the rather unwise idea to use some
2847 * of the registers passed into this function, then note that
2848 * this function is called from native eBPF and classic-to-eBPF
2849 * transformations. Register assignments from both sides are
2850 * different, f.e. classic always sets fn(ctx, A, X) here.
2851 */
2852 struct rnd_state *state;
2853 u32 res;
2854
2855 state = &get_cpu_var(bpf_user_rnd_state);
2856 res = prandom_u32_state(state);
2857 put_cpu_var(bpf_user_rnd_state);
2858
2859 return res;
2860}
2861
2862BPF_CALL_0(bpf_get_raw_cpu_id)
2863{
2864 return raw_smp_processor_id();
2865}
2866
2867/* Weak definitions of helper functions in case we don't have bpf syscall. */
2868const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2869const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2870const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2871const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2872const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2873const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2874const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2875const struct bpf_func_proto bpf_spin_lock_proto __weak;
2876const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2877const struct bpf_func_proto bpf_jiffies64_proto __weak;
2878
2879const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2880const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2881const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2882const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2883const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2884const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2885const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2886
2887const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2888const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2889const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2890const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2891const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2892const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2893const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2894const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2895const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2896const struct bpf_func_proto bpf_set_retval_proto __weak;
2897const struct bpf_func_proto bpf_get_retval_proto __weak;
2898
2899const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2900{
2901 return NULL;
2902}
2903
2904const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2905{
2906 return NULL;
2907}
2908
2909u64 __weak
2910bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2911 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2912{
2913 return -ENOTSUPP;
2914}
2915EXPORT_SYMBOL_GPL(bpf_event_output);
2916
2917/* Always built-in helper functions. */
2918const struct bpf_func_proto bpf_tail_call_proto = {
2919 .func = NULL,
2920 .gpl_only = false,
2921 .ret_type = RET_VOID,
2922 .arg1_type = ARG_PTR_TO_CTX,
2923 .arg2_type = ARG_CONST_MAP_PTR,
2924 .arg3_type = ARG_ANYTHING,
2925};
2926
2927/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2928 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2929 * eBPF and implicitly also cBPF can get JITed!
2930 */
2931struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2932{
2933 return prog;
2934}
2935
2936/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2937 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2938 */
2939void __weak bpf_jit_compile(struct bpf_prog *prog)
2940{
2941}
2942
2943bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
2944{
2945 return false;
2946}
2947
2948/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2949 * analysis code and wants explicit zero extension inserted by verifier.
2950 * Otherwise, return FALSE.
2951 *
2952 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2953 * you don't override this. JITs that don't want these extra insns can detect
2954 * them using insn_is_zext.
2955 */
2956bool __weak bpf_jit_needs_zext(void)
2957{
2958 return false;
2959}
2960
2961/* Return true if the JIT inlines the call to the helper corresponding to
2962 * the imm.
2963 *
2964 * The verifier will not patch the insn->imm for the call to the helper if
2965 * this returns true.
2966 */
2967bool __weak bpf_jit_inlines_helper_call(s32 imm)
2968{
2969 return false;
2970}
2971
2972/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2973bool __weak bpf_jit_supports_subprog_tailcalls(void)
2974{
2975 return false;
2976}
2977
2978bool __weak bpf_jit_supports_percpu_insn(void)
2979{
2980 return false;
2981}
2982
2983bool __weak bpf_jit_supports_kfunc_call(void)
2984{
2985 return false;
2986}
2987
2988bool __weak bpf_jit_supports_far_kfunc_call(void)
2989{
2990 return false;
2991}
2992
2993bool __weak bpf_jit_supports_arena(void)
2994{
2995 return false;
2996}
2997
2998bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
2999{
3000 return false;
3001}
3002
3003u64 __weak bpf_arch_uaddress_limit(void)
3004{
3005#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
3006 return TASK_SIZE;
3007#else
3008 return 0;
3009#endif
3010}
3011
3012/* Return TRUE if the JIT backend satisfies the following two conditions:
3013 * 1) JIT backend supports atomic_xchg() on pointer-sized words.
3014 * 2) Under the specific arch, the implementation of xchg() is the same
3015 * as atomic_xchg() on pointer-sized words.
3016 */
3017bool __weak bpf_jit_supports_ptr_xchg(void)
3018{
3019 return false;
3020}
3021
3022/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3023 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3024 */
3025int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3026 int len)
3027{
3028 return -EFAULT;
3029}
3030
3031int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3032 void *addr1, void *addr2)
3033{
3034 return -ENOTSUPP;
3035}
3036
3037void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3038{
3039 return ERR_PTR(-ENOTSUPP);
3040}
3041
3042int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3043{
3044 return -ENOTSUPP;
3045}
3046
3047bool __weak bpf_jit_supports_exceptions(void)
3048{
3049 return false;
3050}
3051
3052bool __weak bpf_jit_supports_private_stack(void)
3053{
3054 return false;
3055}
3056
3057void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3058{
3059}
3060
3061/* for configs without MMU or 32-bit */
3062__weak const struct bpf_map_ops arena_map_ops;
3063__weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
3064{
3065 return 0;
3066}
3067__weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
3068{
3069 return 0;
3070}
3071
3072#ifdef CONFIG_BPF_SYSCALL
3073static int __init bpf_global_ma_init(void)
3074{
3075 int ret;
3076
3077 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3078 bpf_global_ma_set = !ret;
3079 return ret;
3080}
3081late_initcall(bpf_global_ma_init);
3082#endif
3083
3084DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3085EXPORT_SYMBOL(bpf_stats_enabled_key);
3086
3087/* All definitions of tracepoints related to BPF. */
3088#define CREATE_TRACE_POINTS
3089#include <linux/bpf_trace.h>
3090
3091EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3092EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);