Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20#include <uapi/linux/btf.h>
21#include <linux/filter.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/random.h>
25#include <linux/moduleloader.h>
26#include <linux/bpf.h>
27#include <linux/btf.h>
28#include <linux/frame.h>
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
32#include <linux/perf_event.h>
33
34#include <asm/unaligned.h>
35
36/* Registers */
37#define BPF_R0 regs[BPF_REG_0]
38#define BPF_R1 regs[BPF_REG_1]
39#define BPF_R2 regs[BPF_REG_2]
40#define BPF_R3 regs[BPF_REG_3]
41#define BPF_R4 regs[BPF_REG_4]
42#define BPF_R5 regs[BPF_REG_5]
43#define BPF_R6 regs[BPF_REG_6]
44#define BPF_R7 regs[BPF_REG_7]
45#define BPF_R8 regs[BPF_REG_8]
46#define BPF_R9 regs[BPF_REG_9]
47#define BPF_R10 regs[BPF_REG_10]
48
49/* Named registers */
50#define DST regs[insn->dst_reg]
51#define SRC regs[insn->src_reg]
52#define FP regs[BPF_REG_FP]
53#define AX regs[BPF_REG_AX]
54#define ARG1 regs[BPF_REG_ARG1]
55#define CTX regs[BPF_REG_CTX]
56#define IMM insn->imm
57
58/* No hurry in this branch
59 *
60 * Exported for the bpf jit load helper.
61 */
62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63{
64 u8 *ptr = NULL;
65
66 if (k >= SKF_NET_OFF)
67 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68 else if (k >= SKF_LL_OFF)
69 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70
71 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72 return ptr;
73
74 return NULL;
75}
76
77struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
78{
79 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80 struct bpf_prog_aux *aux;
81 struct bpf_prog *fp;
82
83 size = round_up(size, PAGE_SIZE);
84 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85 if (fp == NULL)
86 return NULL;
87
88 aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
89 if (aux == NULL) {
90 vfree(fp);
91 return NULL;
92 }
93
94 fp->pages = size / PAGE_SIZE;
95 fp->aux = aux;
96 fp->aux->prog = fp;
97 fp->jit_requested = ebpf_jit_enabled();
98
99 INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
100
101 return fp;
102}
103
104struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
105{
106 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
107 struct bpf_prog *prog;
108 int cpu;
109
110 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
111 if (!prog)
112 return NULL;
113
114 prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
115 if (!prog->aux->stats) {
116 kfree(prog->aux);
117 vfree(prog);
118 return NULL;
119 }
120
121 for_each_possible_cpu(cpu) {
122 struct bpf_prog_stats *pstats;
123
124 pstats = per_cpu_ptr(prog->aux->stats, cpu);
125 u64_stats_init(&pstats->syncp);
126 }
127 return prog;
128}
129EXPORT_SYMBOL_GPL(bpf_prog_alloc);
130
131int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
132{
133 if (!prog->aux->nr_linfo || !prog->jit_requested)
134 return 0;
135
136 prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
137 sizeof(*prog->aux->jited_linfo),
138 GFP_KERNEL | __GFP_NOWARN);
139 if (!prog->aux->jited_linfo)
140 return -ENOMEM;
141
142 return 0;
143}
144
145void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
146{
147 kfree(prog->aux->jited_linfo);
148 prog->aux->jited_linfo = NULL;
149}
150
151void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
152{
153 if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
154 bpf_prog_free_jited_linfo(prog);
155}
156
157/* The jit engine is responsible to provide an array
158 * for insn_off to the jited_off mapping (insn_to_jit_off).
159 *
160 * The idx to this array is the insn_off. Hence, the insn_off
161 * here is relative to the prog itself instead of the main prog.
162 * This array has one entry for each xlated bpf insn.
163 *
164 * jited_off is the byte off to the last byte of the jited insn.
165 *
166 * Hence, with
167 * insn_start:
168 * The first bpf insn off of the prog. The insn off
169 * here is relative to the main prog.
170 * e.g. if prog is a subprog, insn_start > 0
171 * linfo_idx:
172 * The prog's idx to prog->aux->linfo and jited_linfo
173 *
174 * jited_linfo[linfo_idx] = prog->bpf_func
175 *
176 * For i > linfo_idx,
177 *
178 * jited_linfo[i] = prog->bpf_func +
179 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
180 */
181void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
182 const u32 *insn_to_jit_off)
183{
184 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
185 const struct bpf_line_info *linfo;
186 void **jited_linfo;
187
188 if (!prog->aux->jited_linfo)
189 /* Userspace did not provide linfo */
190 return;
191
192 linfo_idx = prog->aux->linfo_idx;
193 linfo = &prog->aux->linfo[linfo_idx];
194 insn_start = linfo[0].insn_off;
195 insn_end = insn_start + prog->len;
196
197 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
198 jited_linfo[0] = prog->bpf_func;
199
200 nr_linfo = prog->aux->nr_linfo - linfo_idx;
201
202 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
203 /* The verifier ensures that linfo[i].insn_off is
204 * strictly increasing
205 */
206 jited_linfo[i] = prog->bpf_func +
207 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
208}
209
210void bpf_prog_free_linfo(struct bpf_prog *prog)
211{
212 bpf_prog_free_jited_linfo(prog);
213 kvfree(prog->aux->linfo);
214}
215
216struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
217 gfp_t gfp_extra_flags)
218{
219 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
220 struct bpf_prog *fp;
221 u32 pages, delta;
222 int ret;
223
224 BUG_ON(fp_old == NULL);
225
226 size = round_up(size, PAGE_SIZE);
227 pages = size / PAGE_SIZE;
228 if (pages <= fp_old->pages)
229 return fp_old;
230
231 delta = pages - fp_old->pages;
232 ret = __bpf_prog_charge(fp_old->aux->user, delta);
233 if (ret)
234 return NULL;
235
236 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
237 if (fp == NULL) {
238 __bpf_prog_uncharge(fp_old->aux->user, delta);
239 } else {
240 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
241 fp->pages = pages;
242 fp->aux->prog = fp;
243
244 /* We keep fp->aux from fp_old around in the new
245 * reallocated structure.
246 */
247 fp_old->aux = NULL;
248 __bpf_prog_free(fp_old);
249 }
250
251 return fp;
252}
253
254void __bpf_prog_free(struct bpf_prog *fp)
255{
256 if (fp->aux) {
257 free_percpu(fp->aux->stats);
258 kfree(fp->aux);
259 }
260 vfree(fp);
261}
262
263int bpf_prog_calc_tag(struct bpf_prog *fp)
264{
265 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
266 u32 raw_size = bpf_prog_tag_scratch_size(fp);
267 u32 digest[SHA_DIGEST_WORDS];
268 u32 ws[SHA_WORKSPACE_WORDS];
269 u32 i, bsize, psize, blocks;
270 struct bpf_insn *dst;
271 bool was_ld_map;
272 u8 *raw, *todo;
273 __be32 *result;
274 __be64 *bits;
275
276 raw = vmalloc(raw_size);
277 if (!raw)
278 return -ENOMEM;
279
280 sha_init(digest);
281 memset(ws, 0, sizeof(ws));
282
283 /* We need to take out the map fd for the digest calculation
284 * since they are unstable from user space side.
285 */
286 dst = (void *)raw;
287 for (i = 0, was_ld_map = false; i < fp->len; i++) {
288 dst[i] = fp->insnsi[i];
289 if (!was_ld_map &&
290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
291 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
292 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
293 was_ld_map = true;
294 dst[i].imm = 0;
295 } else if (was_ld_map &&
296 dst[i].code == 0 &&
297 dst[i].dst_reg == 0 &&
298 dst[i].src_reg == 0 &&
299 dst[i].off == 0) {
300 was_ld_map = false;
301 dst[i].imm = 0;
302 } else {
303 was_ld_map = false;
304 }
305 }
306
307 psize = bpf_prog_insn_size(fp);
308 memset(&raw[psize], 0, raw_size - psize);
309 raw[psize++] = 0x80;
310
311 bsize = round_up(psize, SHA_MESSAGE_BYTES);
312 blocks = bsize / SHA_MESSAGE_BYTES;
313 todo = raw;
314 if (bsize - psize >= sizeof(__be64)) {
315 bits = (__be64 *)(todo + bsize - sizeof(__be64));
316 } else {
317 bits = (__be64 *)(todo + bsize + bits_offset);
318 blocks++;
319 }
320 *bits = cpu_to_be64((psize - 1) << 3);
321
322 while (blocks--) {
323 sha_transform(digest, todo, ws);
324 todo += SHA_MESSAGE_BYTES;
325 }
326
327 result = (__force __be32 *)digest;
328 for (i = 0; i < SHA_DIGEST_WORDS; i++)
329 result[i] = cpu_to_be32(digest[i]);
330 memcpy(fp->tag, result, sizeof(fp->tag));
331
332 vfree(raw);
333 return 0;
334}
335
336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
337 s32 end_new, s32 curr, const bool probe_pass)
338{
339 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
340 s32 delta = end_new - end_old;
341 s64 imm = insn->imm;
342
343 if (curr < pos && curr + imm + 1 >= end_old)
344 imm += delta;
345 else if (curr >= end_new && curr + imm + 1 < end_new)
346 imm -= delta;
347 if (imm < imm_min || imm > imm_max)
348 return -ERANGE;
349 if (!probe_pass)
350 insn->imm = imm;
351 return 0;
352}
353
354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
355 s32 end_new, s32 curr, const bool probe_pass)
356{
357 const s32 off_min = S16_MIN, off_max = S16_MAX;
358 s32 delta = end_new - end_old;
359 s32 off = insn->off;
360
361 if (curr < pos && curr + off + 1 >= end_old)
362 off += delta;
363 else if (curr >= end_new && curr + off + 1 < end_new)
364 off -= delta;
365 if (off < off_min || off > off_max)
366 return -ERANGE;
367 if (!probe_pass)
368 insn->off = off;
369 return 0;
370}
371
372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
373 s32 end_new, const bool probe_pass)
374{
375 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
376 struct bpf_insn *insn = prog->insnsi;
377 int ret = 0;
378
379 for (i = 0; i < insn_cnt; i++, insn++) {
380 u8 code;
381
382 /* In the probing pass we still operate on the original,
383 * unpatched image in order to check overflows before we
384 * do any other adjustments. Therefore skip the patchlet.
385 */
386 if (probe_pass && i == pos) {
387 i = end_new;
388 insn = prog->insnsi + end_old;
389 }
390 code = insn->code;
391 if ((BPF_CLASS(code) != BPF_JMP &&
392 BPF_CLASS(code) != BPF_JMP32) ||
393 BPF_OP(code) == BPF_EXIT)
394 continue;
395 /* Adjust offset of jmps if we cross patch boundaries. */
396 if (BPF_OP(code) == BPF_CALL) {
397 if (insn->src_reg != BPF_PSEUDO_CALL)
398 continue;
399 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
400 end_new, i, probe_pass);
401 } else {
402 ret = bpf_adj_delta_to_off(insn, pos, end_old,
403 end_new, i, probe_pass);
404 }
405 if (ret)
406 break;
407 }
408
409 return ret;
410}
411
412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
413{
414 struct bpf_line_info *linfo;
415 u32 i, nr_linfo;
416
417 nr_linfo = prog->aux->nr_linfo;
418 if (!nr_linfo || !delta)
419 return;
420
421 linfo = prog->aux->linfo;
422
423 for (i = 0; i < nr_linfo; i++)
424 if (off < linfo[i].insn_off)
425 break;
426
427 /* Push all off < linfo[i].insn_off by delta */
428 for (; i < nr_linfo; i++)
429 linfo[i].insn_off += delta;
430}
431
432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
433 const struct bpf_insn *patch, u32 len)
434{
435 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
436 const u32 cnt_max = S16_MAX;
437 struct bpf_prog *prog_adj;
438 int err;
439
440 /* Since our patchlet doesn't expand the image, we're done. */
441 if (insn_delta == 0) {
442 memcpy(prog->insnsi + off, patch, sizeof(*patch));
443 return prog;
444 }
445
446 insn_adj_cnt = prog->len + insn_delta;
447
448 /* Reject anything that would potentially let the insn->off
449 * target overflow when we have excessive program expansions.
450 * We need to probe here before we do any reallocation where
451 * we afterwards may not fail anymore.
452 */
453 if (insn_adj_cnt > cnt_max &&
454 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
455 return ERR_PTR(err);
456
457 /* Several new instructions need to be inserted. Make room
458 * for them. Likely, there's no need for a new allocation as
459 * last page could have large enough tailroom.
460 */
461 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
462 GFP_USER);
463 if (!prog_adj)
464 return ERR_PTR(-ENOMEM);
465
466 prog_adj->len = insn_adj_cnt;
467
468 /* Patching happens in 3 steps:
469 *
470 * 1) Move over tail of insnsi from next instruction onwards,
471 * so we can patch the single target insn with one or more
472 * new ones (patching is always from 1 to n insns, n > 0).
473 * 2) Inject new instructions at the target location.
474 * 3) Adjust branch offsets if necessary.
475 */
476 insn_rest = insn_adj_cnt - off - len;
477
478 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
479 sizeof(*patch) * insn_rest);
480 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
481
482 /* We are guaranteed to not fail at this point, otherwise
483 * the ship has sailed to reverse to the original state. An
484 * overflow cannot happen at this point.
485 */
486 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
487
488 bpf_adj_linfo(prog_adj, off, insn_delta);
489
490 return prog_adj;
491}
492
493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
494{
495 /* Branch offsets can't overflow when program is shrinking, no need
496 * to call bpf_adj_branches(..., true) here
497 */
498 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
499 sizeof(struct bpf_insn) * (prog->len - off - cnt));
500 prog->len -= cnt;
501
502 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
503}
504
505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
506{
507 int i;
508
509 for (i = 0; i < fp->aux->func_cnt; i++)
510 bpf_prog_kallsyms_del(fp->aux->func[i]);
511}
512
513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
514{
515 bpf_prog_kallsyms_del_subprogs(fp);
516 bpf_prog_kallsyms_del(fp);
517}
518
519#ifdef CONFIG_BPF_JIT
520/* All BPF JIT sysctl knobs here. */
521int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
522int bpf_jit_harden __read_mostly;
523int bpf_jit_kallsyms __read_mostly;
524long bpf_jit_limit __read_mostly;
525
526static __always_inline void
527bpf_get_prog_addr_region(const struct bpf_prog *prog,
528 unsigned long *symbol_start,
529 unsigned long *symbol_end)
530{
531 const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
532 unsigned long addr = (unsigned long)hdr;
533
534 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
535
536 *symbol_start = addr;
537 *symbol_end = addr + hdr->pages * PAGE_SIZE;
538}
539
540void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
541{
542 const char *end = sym + KSYM_NAME_LEN;
543 const struct btf_type *type;
544 const char *func_name;
545
546 BUILD_BUG_ON(sizeof("bpf_prog_") +
547 sizeof(prog->tag) * 2 +
548 /* name has been null terminated.
549 * We should need +1 for the '_' preceding
550 * the name. However, the null character
551 * is double counted between the name and the
552 * sizeof("bpf_prog_") above, so we omit
553 * the +1 here.
554 */
555 sizeof(prog->aux->name) > KSYM_NAME_LEN);
556
557 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
558 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
559
560 /* prog->aux->name will be ignored if full btf name is available */
561 if (prog->aux->func_info_cnt) {
562 type = btf_type_by_id(prog->aux->btf,
563 prog->aux->func_info[prog->aux->func_idx].type_id);
564 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
565 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
566 return;
567 }
568
569 if (prog->aux->name[0])
570 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
571 else
572 *sym = 0;
573}
574
575static __always_inline unsigned long
576bpf_get_prog_addr_start(struct latch_tree_node *n)
577{
578 unsigned long symbol_start, symbol_end;
579 const struct bpf_prog_aux *aux;
580
581 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
582 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
583
584 return symbol_start;
585}
586
587static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
588 struct latch_tree_node *b)
589{
590 return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
591}
592
593static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
594{
595 unsigned long val = (unsigned long)key;
596 unsigned long symbol_start, symbol_end;
597 const struct bpf_prog_aux *aux;
598
599 aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
600 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
601
602 if (val < symbol_start)
603 return -1;
604 if (val >= symbol_end)
605 return 1;
606
607 return 0;
608}
609
610static const struct latch_tree_ops bpf_tree_ops = {
611 .less = bpf_tree_less,
612 .comp = bpf_tree_comp,
613};
614
615static DEFINE_SPINLOCK(bpf_lock);
616static LIST_HEAD(bpf_kallsyms);
617static struct latch_tree_root bpf_tree __cacheline_aligned;
618
619static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
620{
621 WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
622 list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
623 latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
624}
625
626static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
627{
628 if (list_empty(&aux->ksym_lnode))
629 return;
630
631 latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
632 list_del_rcu(&aux->ksym_lnode);
633}
634
635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
636{
637 return fp->jited && !bpf_prog_was_classic(fp);
638}
639
640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
641{
642 return list_empty(&fp->aux->ksym_lnode) ||
643 fp->aux->ksym_lnode.prev == LIST_POISON2;
644}
645
646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
647{
648 if (!bpf_prog_kallsyms_candidate(fp) ||
649 !capable(CAP_SYS_ADMIN))
650 return;
651
652 spin_lock_bh(&bpf_lock);
653 bpf_prog_ksym_node_add(fp->aux);
654 spin_unlock_bh(&bpf_lock);
655}
656
657void bpf_prog_kallsyms_del(struct bpf_prog *fp)
658{
659 if (!bpf_prog_kallsyms_candidate(fp))
660 return;
661
662 spin_lock_bh(&bpf_lock);
663 bpf_prog_ksym_node_del(fp->aux);
664 spin_unlock_bh(&bpf_lock);
665}
666
667static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
668{
669 struct latch_tree_node *n;
670
671 if (!bpf_jit_kallsyms_enabled())
672 return NULL;
673
674 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
675 return n ?
676 container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
677 NULL;
678}
679
680const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
681 unsigned long *off, char *sym)
682{
683 unsigned long symbol_start, symbol_end;
684 struct bpf_prog *prog;
685 char *ret = NULL;
686
687 rcu_read_lock();
688 prog = bpf_prog_kallsyms_find(addr);
689 if (prog) {
690 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
691 bpf_get_prog_name(prog, sym);
692
693 ret = sym;
694 if (size)
695 *size = symbol_end - symbol_start;
696 if (off)
697 *off = addr - symbol_start;
698 }
699 rcu_read_unlock();
700
701 return ret;
702}
703
704bool is_bpf_text_address(unsigned long addr)
705{
706 bool ret;
707
708 rcu_read_lock();
709 ret = bpf_prog_kallsyms_find(addr) != NULL;
710 rcu_read_unlock();
711
712 return ret;
713}
714
715int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
716 char *sym)
717{
718 struct bpf_prog_aux *aux;
719 unsigned int it = 0;
720 int ret = -ERANGE;
721
722 if (!bpf_jit_kallsyms_enabled())
723 return ret;
724
725 rcu_read_lock();
726 list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
727 if (it++ != symnum)
728 continue;
729
730 bpf_get_prog_name(aux->prog, sym);
731
732 *value = (unsigned long)aux->prog->bpf_func;
733 *type = BPF_SYM_ELF_TYPE;
734
735 ret = 0;
736 break;
737 }
738 rcu_read_unlock();
739
740 return ret;
741}
742
743static atomic_long_t bpf_jit_current;
744
745/* Can be overridden by an arch's JIT compiler if it has a custom,
746 * dedicated BPF backend memory area, or if neither of the two
747 * below apply.
748 */
749u64 __weak bpf_jit_alloc_exec_limit(void)
750{
751#if defined(MODULES_VADDR)
752 return MODULES_END - MODULES_VADDR;
753#else
754 return VMALLOC_END - VMALLOC_START;
755#endif
756}
757
758static int __init bpf_jit_charge_init(void)
759{
760 /* Only used as heuristic here to derive limit. */
761 bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
762 PAGE_SIZE), LONG_MAX);
763 return 0;
764}
765pure_initcall(bpf_jit_charge_init);
766
767static int bpf_jit_charge_modmem(u32 pages)
768{
769 if (atomic_long_add_return(pages, &bpf_jit_current) >
770 (bpf_jit_limit >> PAGE_SHIFT)) {
771 if (!capable(CAP_SYS_ADMIN)) {
772 atomic_long_sub(pages, &bpf_jit_current);
773 return -EPERM;
774 }
775 }
776
777 return 0;
778}
779
780static void bpf_jit_uncharge_modmem(u32 pages)
781{
782 atomic_long_sub(pages, &bpf_jit_current);
783}
784
785void *__weak bpf_jit_alloc_exec(unsigned long size)
786{
787 return module_alloc(size);
788}
789
790void __weak bpf_jit_free_exec(void *addr)
791{
792 module_memfree(addr);
793}
794
795struct bpf_binary_header *
796bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
797 unsigned int alignment,
798 bpf_jit_fill_hole_t bpf_fill_ill_insns)
799{
800 struct bpf_binary_header *hdr;
801 u32 size, hole, start, pages;
802
803 /* Most of BPF filters are really small, but if some of them
804 * fill a page, allow at least 128 extra bytes to insert a
805 * random section of illegal instructions.
806 */
807 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
808 pages = size / PAGE_SIZE;
809
810 if (bpf_jit_charge_modmem(pages))
811 return NULL;
812 hdr = bpf_jit_alloc_exec(size);
813 if (!hdr) {
814 bpf_jit_uncharge_modmem(pages);
815 return NULL;
816 }
817
818 /* Fill space with illegal/arch-dep instructions. */
819 bpf_fill_ill_insns(hdr, size);
820
821 hdr->pages = pages;
822 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
823 PAGE_SIZE - sizeof(*hdr));
824 start = (get_random_int() % hole) & ~(alignment - 1);
825
826 /* Leave a random number of instructions before BPF code. */
827 *image_ptr = &hdr->image[start];
828
829 return hdr;
830}
831
832void bpf_jit_binary_free(struct bpf_binary_header *hdr)
833{
834 u32 pages = hdr->pages;
835
836 bpf_jit_free_exec(hdr);
837 bpf_jit_uncharge_modmem(pages);
838}
839
840/* This symbol is only overridden by archs that have different
841 * requirements than the usual eBPF JITs, f.e. when they only
842 * implement cBPF JIT, do not set images read-only, etc.
843 */
844void __weak bpf_jit_free(struct bpf_prog *fp)
845{
846 if (fp->jited) {
847 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
848
849 bpf_jit_binary_free(hdr);
850
851 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
852 }
853
854 bpf_prog_unlock_free(fp);
855}
856
857int bpf_jit_get_func_addr(const struct bpf_prog *prog,
858 const struct bpf_insn *insn, bool extra_pass,
859 u64 *func_addr, bool *func_addr_fixed)
860{
861 s16 off = insn->off;
862 s32 imm = insn->imm;
863 u8 *addr;
864
865 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
866 if (!*func_addr_fixed) {
867 /* Place-holder address till the last pass has collected
868 * all addresses for JITed subprograms in which case we
869 * can pick them up from prog->aux.
870 */
871 if (!extra_pass)
872 addr = NULL;
873 else if (prog->aux->func &&
874 off >= 0 && off < prog->aux->func_cnt)
875 addr = (u8 *)prog->aux->func[off]->bpf_func;
876 else
877 return -EINVAL;
878 } else {
879 /* Address of a BPF helper call. Since part of the core
880 * kernel, it's always at a fixed location. __bpf_call_base
881 * and the helper with imm relative to it are both in core
882 * kernel.
883 */
884 addr = (u8 *)__bpf_call_base + imm;
885 }
886
887 *func_addr = (unsigned long)addr;
888 return 0;
889}
890
891static int bpf_jit_blind_insn(const struct bpf_insn *from,
892 const struct bpf_insn *aux,
893 struct bpf_insn *to_buff,
894 bool emit_zext)
895{
896 struct bpf_insn *to = to_buff;
897 u32 imm_rnd = get_random_int();
898 s16 off;
899
900 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
901 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
902
903 /* Constraints on AX register:
904 *
905 * AX register is inaccessible from user space. It is mapped in
906 * all JITs, and used here for constant blinding rewrites. It is
907 * typically "stateless" meaning its contents are only valid within
908 * the executed instruction, but not across several instructions.
909 * There are a few exceptions however which are further detailed
910 * below.
911 *
912 * Constant blinding is only used by JITs, not in the interpreter.
913 * The interpreter uses AX in some occasions as a local temporary
914 * register e.g. in DIV or MOD instructions.
915 *
916 * In restricted circumstances, the verifier can also use the AX
917 * register for rewrites as long as they do not interfere with
918 * the above cases!
919 */
920 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
921 goto out;
922
923 if (from->imm == 0 &&
924 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
925 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
926 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
927 goto out;
928 }
929
930 switch (from->code) {
931 case BPF_ALU | BPF_ADD | BPF_K:
932 case BPF_ALU | BPF_SUB | BPF_K:
933 case BPF_ALU | BPF_AND | BPF_K:
934 case BPF_ALU | BPF_OR | BPF_K:
935 case BPF_ALU | BPF_XOR | BPF_K:
936 case BPF_ALU | BPF_MUL | BPF_K:
937 case BPF_ALU | BPF_MOV | BPF_K:
938 case BPF_ALU | BPF_DIV | BPF_K:
939 case BPF_ALU | BPF_MOD | BPF_K:
940 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
941 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
942 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
943 break;
944
945 case BPF_ALU64 | BPF_ADD | BPF_K:
946 case BPF_ALU64 | BPF_SUB | BPF_K:
947 case BPF_ALU64 | BPF_AND | BPF_K:
948 case BPF_ALU64 | BPF_OR | BPF_K:
949 case BPF_ALU64 | BPF_XOR | BPF_K:
950 case BPF_ALU64 | BPF_MUL | BPF_K:
951 case BPF_ALU64 | BPF_MOV | BPF_K:
952 case BPF_ALU64 | BPF_DIV | BPF_K:
953 case BPF_ALU64 | BPF_MOD | BPF_K:
954 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
955 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
956 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
957 break;
958
959 case BPF_JMP | BPF_JEQ | BPF_K:
960 case BPF_JMP | BPF_JNE | BPF_K:
961 case BPF_JMP | BPF_JGT | BPF_K:
962 case BPF_JMP | BPF_JLT | BPF_K:
963 case BPF_JMP | BPF_JGE | BPF_K:
964 case BPF_JMP | BPF_JLE | BPF_K:
965 case BPF_JMP | BPF_JSGT | BPF_K:
966 case BPF_JMP | BPF_JSLT | BPF_K:
967 case BPF_JMP | BPF_JSGE | BPF_K:
968 case BPF_JMP | BPF_JSLE | BPF_K:
969 case BPF_JMP | BPF_JSET | BPF_K:
970 /* Accommodate for extra offset in case of a backjump. */
971 off = from->off;
972 if (off < 0)
973 off -= 2;
974 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
975 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
976 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
977 break;
978
979 case BPF_JMP32 | BPF_JEQ | BPF_K:
980 case BPF_JMP32 | BPF_JNE | BPF_K:
981 case BPF_JMP32 | BPF_JGT | BPF_K:
982 case BPF_JMP32 | BPF_JLT | BPF_K:
983 case BPF_JMP32 | BPF_JGE | BPF_K:
984 case BPF_JMP32 | BPF_JLE | BPF_K:
985 case BPF_JMP32 | BPF_JSGT | BPF_K:
986 case BPF_JMP32 | BPF_JSLT | BPF_K:
987 case BPF_JMP32 | BPF_JSGE | BPF_K:
988 case BPF_JMP32 | BPF_JSLE | BPF_K:
989 case BPF_JMP32 | BPF_JSET | BPF_K:
990 /* Accommodate for extra offset in case of a backjump. */
991 off = from->off;
992 if (off < 0)
993 off -= 2;
994 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
995 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
996 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
997 off);
998 break;
999
1000 case BPF_LD | BPF_IMM | BPF_DW:
1001 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1002 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1003 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1004 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1005 break;
1006 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1007 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1008 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009 if (emit_zext)
1010 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1011 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1012 break;
1013
1014 case BPF_ST | BPF_MEM | BPF_DW:
1015 case BPF_ST | BPF_MEM | BPF_W:
1016 case BPF_ST | BPF_MEM | BPF_H:
1017 case BPF_ST | BPF_MEM | BPF_B:
1018 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1019 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1020 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1021 break;
1022 }
1023out:
1024 return to - to_buff;
1025}
1026
1027static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1028 gfp_t gfp_extra_flags)
1029{
1030 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1031 struct bpf_prog *fp;
1032
1033 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1034 if (fp != NULL) {
1035 /* aux->prog still points to the fp_other one, so
1036 * when promoting the clone to the real program,
1037 * this still needs to be adapted.
1038 */
1039 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1040 }
1041
1042 return fp;
1043}
1044
1045static void bpf_prog_clone_free(struct bpf_prog *fp)
1046{
1047 /* aux was stolen by the other clone, so we cannot free
1048 * it from this path! It will be freed eventually by the
1049 * other program on release.
1050 *
1051 * At this point, we don't need a deferred release since
1052 * clone is guaranteed to not be locked.
1053 */
1054 fp->aux = NULL;
1055 __bpf_prog_free(fp);
1056}
1057
1058void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1059{
1060 /* We have to repoint aux->prog to self, as we don't
1061 * know whether fp here is the clone or the original.
1062 */
1063 fp->aux->prog = fp;
1064 bpf_prog_clone_free(fp_other);
1065}
1066
1067struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1068{
1069 struct bpf_insn insn_buff[16], aux[2];
1070 struct bpf_prog *clone, *tmp;
1071 int insn_delta, insn_cnt;
1072 struct bpf_insn *insn;
1073 int i, rewritten;
1074
1075 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1076 return prog;
1077
1078 clone = bpf_prog_clone_create(prog, GFP_USER);
1079 if (!clone)
1080 return ERR_PTR(-ENOMEM);
1081
1082 insn_cnt = clone->len;
1083 insn = clone->insnsi;
1084
1085 for (i = 0; i < insn_cnt; i++, insn++) {
1086 /* We temporarily need to hold the original ld64 insn
1087 * so that we can still access the first part in the
1088 * second blinding run.
1089 */
1090 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1091 insn[1].code == 0)
1092 memcpy(aux, insn, sizeof(aux));
1093
1094 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095 clone->aux->verifier_zext);
1096 if (!rewritten)
1097 continue;
1098
1099 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1100 if (IS_ERR(tmp)) {
1101 /* Patching may have repointed aux->prog during
1102 * realloc from the original one, so we need to
1103 * fix it up here on error.
1104 */
1105 bpf_jit_prog_release_other(prog, clone);
1106 return tmp;
1107 }
1108
1109 clone = tmp;
1110 insn_delta = rewritten - 1;
1111
1112 /* Walk new program and skip insns we just inserted. */
1113 insn = clone->insnsi + i + insn_delta;
1114 insn_cnt += insn_delta;
1115 i += insn_delta;
1116 }
1117
1118 clone->blinded = 1;
1119 return clone;
1120}
1121#endif /* CONFIG_BPF_JIT */
1122
1123/* Base function for offset calculation. Needs to go into .text section,
1124 * therefore keeping it non-static as well; will also be used by JITs
1125 * anyway later on, so do not let the compiler omit it. This also needs
1126 * to go into kallsyms for correlation from e.g. bpftool, so naming
1127 * must not change.
1128 */
1129noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1130{
1131 return 0;
1132}
1133EXPORT_SYMBOL_GPL(__bpf_call_base);
1134
1135/* All UAPI available opcodes. */
1136#define BPF_INSN_MAP(INSN_2, INSN_3) \
1137 /* 32 bit ALU operations. */ \
1138 /* Register based. */ \
1139 INSN_3(ALU, ADD, X), \
1140 INSN_3(ALU, SUB, X), \
1141 INSN_3(ALU, AND, X), \
1142 INSN_3(ALU, OR, X), \
1143 INSN_3(ALU, LSH, X), \
1144 INSN_3(ALU, RSH, X), \
1145 INSN_3(ALU, XOR, X), \
1146 INSN_3(ALU, MUL, X), \
1147 INSN_3(ALU, MOV, X), \
1148 INSN_3(ALU, ARSH, X), \
1149 INSN_3(ALU, DIV, X), \
1150 INSN_3(ALU, MOD, X), \
1151 INSN_2(ALU, NEG), \
1152 INSN_3(ALU, END, TO_BE), \
1153 INSN_3(ALU, END, TO_LE), \
1154 /* Immediate based. */ \
1155 INSN_3(ALU, ADD, K), \
1156 INSN_3(ALU, SUB, K), \
1157 INSN_3(ALU, AND, K), \
1158 INSN_3(ALU, OR, K), \
1159 INSN_3(ALU, LSH, K), \
1160 INSN_3(ALU, RSH, K), \
1161 INSN_3(ALU, XOR, K), \
1162 INSN_3(ALU, MUL, K), \
1163 INSN_3(ALU, MOV, K), \
1164 INSN_3(ALU, ARSH, K), \
1165 INSN_3(ALU, DIV, K), \
1166 INSN_3(ALU, MOD, K), \
1167 /* 64 bit ALU operations. */ \
1168 /* Register based. */ \
1169 INSN_3(ALU64, ADD, X), \
1170 INSN_3(ALU64, SUB, X), \
1171 INSN_3(ALU64, AND, X), \
1172 INSN_3(ALU64, OR, X), \
1173 INSN_3(ALU64, LSH, X), \
1174 INSN_3(ALU64, RSH, X), \
1175 INSN_3(ALU64, XOR, X), \
1176 INSN_3(ALU64, MUL, X), \
1177 INSN_3(ALU64, MOV, X), \
1178 INSN_3(ALU64, ARSH, X), \
1179 INSN_3(ALU64, DIV, X), \
1180 INSN_3(ALU64, MOD, X), \
1181 INSN_2(ALU64, NEG), \
1182 /* Immediate based. */ \
1183 INSN_3(ALU64, ADD, K), \
1184 INSN_3(ALU64, SUB, K), \
1185 INSN_3(ALU64, AND, K), \
1186 INSN_3(ALU64, OR, K), \
1187 INSN_3(ALU64, LSH, K), \
1188 INSN_3(ALU64, RSH, K), \
1189 INSN_3(ALU64, XOR, K), \
1190 INSN_3(ALU64, MUL, K), \
1191 INSN_3(ALU64, MOV, K), \
1192 INSN_3(ALU64, ARSH, K), \
1193 INSN_3(ALU64, DIV, K), \
1194 INSN_3(ALU64, MOD, K), \
1195 /* Call instruction. */ \
1196 INSN_2(JMP, CALL), \
1197 /* Exit instruction. */ \
1198 INSN_2(JMP, EXIT), \
1199 /* 32-bit Jump instructions. */ \
1200 /* Register based. */ \
1201 INSN_3(JMP32, JEQ, X), \
1202 INSN_3(JMP32, JNE, X), \
1203 INSN_3(JMP32, JGT, X), \
1204 INSN_3(JMP32, JLT, X), \
1205 INSN_3(JMP32, JGE, X), \
1206 INSN_3(JMP32, JLE, X), \
1207 INSN_3(JMP32, JSGT, X), \
1208 INSN_3(JMP32, JSLT, X), \
1209 INSN_3(JMP32, JSGE, X), \
1210 INSN_3(JMP32, JSLE, X), \
1211 INSN_3(JMP32, JSET, X), \
1212 /* Immediate based. */ \
1213 INSN_3(JMP32, JEQ, K), \
1214 INSN_3(JMP32, JNE, K), \
1215 INSN_3(JMP32, JGT, K), \
1216 INSN_3(JMP32, JLT, K), \
1217 INSN_3(JMP32, JGE, K), \
1218 INSN_3(JMP32, JLE, K), \
1219 INSN_3(JMP32, JSGT, K), \
1220 INSN_3(JMP32, JSLT, K), \
1221 INSN_3(JMP32, JSGE, K), \
1222 INSN_3(JMP32, JSLE, K), \
1223 INSN_3(JMP32, JSET, K), \
1224 /* Jump instructions. */ \
1225 /* Register based. */ \
1226 INSN_3(JMP, JEQ, X), \
1227 INSN_3(JMP, JNE, X), \
1228 INSN_3(JMP, JGT, X), \
1229 INSN_3(JMP, JLT, X), \
1230 INSN_3(JMP, JGE, X), \
1231 INSN_3(JMP, JLE, X), \
1232 INSN_3(JMP, JSGT, X), \
1233 INSN_3(JMP, JSLT, X), \
1234 INSN_3(JMP, JSGE, X), \
1235 INSN_3(JMP, JSLE, X), \
1236 INSN_3(JMP, JSET, X), \
1237 /* Immediate based. */ \
1238 INSN_3(JMP, JEQ, K), \
1239 INSN_3(JMP, JNE, K), \
1240 INSN_3(JMP, JGT, K), \
1241 INSN_3(JMP, JLT, K), \
1242 INSN_3(JMP, JGE, K), \
1243 INSN_3(JMP, JLE, K), \
1244 INSN_3(JMP, JSGT, K), \
1245 INSN_3(JMP, JSLT, K), \
1246 INSN_3(JMP, JSGE, K), \
1247 INSN_3(JMP, JSLE, K), \
1248 INSN_3(JMP, JSET, K), \
1249 INSN_2(JMP, JA), \
1250 /* Store instructions. */ \
1251 /* Register based. */ \
1252 INSN_3(STX, MEM, B), \
1253 INSN_3(STX, MEM, H), \
1254 INSN_3(STX, MEM, W), \
1255 INSN_3(STX, MEM, DW), \
1256 INSN_3(STX, XADD, W), \
1257 INSN_3(STX, XADD, DW), \
1258 /* Immediate based. */ \
1259 INSN_3(ST, MEM, B), \
1260 INSN_3(ST, MEM, H), \
1261 INSN_3(ST, MEM, W), \
1262 INSN_3(ST, MEM, DW), \
1263 /* Load instructions. */ \
1264 /* Register based. */ \
1265 INSN_3(LDX, MEM, B), \
1266 INSN_3(LDX, MEM, H), \
1267 INSN_3(LDX, MEM, W), \
1268 INSN_3(LDX, MEM, DW), \
1269 /* Immediate based. */ \
1270 INSN_3(LD, IMM, DW)
1271
1272bool bpf_opcode_in_insntable(u8 code)
1273{
1274#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1275#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1276 static const bool public_insntable[256] = {
1277 [0 ... 255] = false,
1278 /* Now overwrite non-defaults ... */
1279 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1280 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1281 [BPF_LD | BPF_ABS | BPF_B] = true,
1282 [BPF_LD | BPF_ABS | BPF_H] = true,
1283 [BPF_LD | BPF_ABS | BPF_W] = true,
1284 [BPF_LD | BPF_IND | BPF_B] = true,
1285 [BPF_LD | BPF_IND | BPF_H] = true,
1286 [BPF_LD | BPF_IND | BPF_W] = true,
1287 };
1288#undef BPF_INSN_3_TBL
1289#undef BPF_INSN_2_TBL
1290 return public_insntable[code];
1291}
1292
1293#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1294/**
1295 * __bpf_prog_run - run eBPF program on a given context
1296 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1297 * @insn: is the array of eBPF instructions
1298 * @stack: is the eBPF storage stack
1299 *
1300 * Decode and execute eBPF instructions.
1301 */
1302static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1303{
1304#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1305#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1306 static const void * const jumptable[256] __annotate_jump_table = {
1307 [0 ... 255] = &&default_label,
1308 /* Now overwrite non-defaults ... */
1309 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1310 /* Non-UAPI available opcodes. */
1311 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1312 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1313 };
1314#undef BPF_INSN_3_LBL
1315#undef BPF_INSN_2_LBL
1316 u32 tail_call_cnt = 0;
1317
1318#define CONT ({ insn++; goto select_insn; })
1319#define CONT_JMP ({ insn++; goto select_insn; })
1320
1321select_insn:
1322 goto *jumptable[insn->code];
1323
1324 /* ALU */
1325#define ALU(OPCODE, OP) \
1326 ALU64_##OPCODE##_X: \
1327 DST = DST OP SRC; \
1328 CONT; \
1329 ALU_##OPCODE##_X: \
1330 DST = (u32) DST OP (u32) SRC; \
1331 CONT; \
1332 ALU64_##OPCODE##_K: \
1333 DST = DST OP IMM; \
1334 CONT; \
1335 ALU_##OPCODE##_K: \
1336 DST = (u32) DST OP (u32) IMM; \
1337 CONT;
1338
1339 ALU(ADD, +)
1340 ALU(SUB, -)
1341 ALU(AND, &)
1342 ALU(OR, |)
1343 ALU(LSH, <<)
1344 ALU(RSH, >>)
1345 ALU(XOR, ^)
1346 ALU(MUL, *)
1347#undef ALU
1348 ALU_NEG:
1349 DST = (u32) -DST;
1350 CONT;
1351 ALU64_NEG:
1352 DST = -DST;
1353 CONT;
1354 ALU_MOV_X:
1355 DST = (u32) SRC;
1356 CONT;
1357 ALU_MOV_K:
1358 DST = (u32) IMM;
1359 CONT;
1360 ALU64_MOV_X:
1361 DST = SRC;
1362 CONT;
1363 ALU64_MOV_K:
1364 DST = IMM;
1365 CONT;
1366 LD_IMM_DW:
1367 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1368 insn++;
1369 CONT;
1370 ALU_ARSH_X:
1371 DST = (u64) (u32) (((s32) DST) >> SRC);
1372 CONT;
1373 ALU_ARSH_K:
1374 DST = (u64) (u32) (((s32) DST) >> IMM);
1375 CONT;
1376 ALU64_ARSH_X:
1377 (*(s64 *) &DST) >>= SRC;
1378 CONT;
1379 ALU64_ARSH_K:
1380 (*(s64 *) &DST) >>= IMM;
1381 CONT;
1382 ALU64_MOD_X:
1383 div64_u64_rem(DST, SRC, &AX);
1384 DST = AX;
1385 CONT;
1386 ALU_MOD_X:
1387 AX = (u32) DST;
1388 DST = do_div(AX, (u32) SRC);
1389 CONT;
1390 ALU64_MOD_K:
1391 div64_u64_rem(DST, IMM, &AX);
1392 DST = AX;
1393 CONT;
1394 ALU_MOD_K:
1395 AX = (u32) DST;
1396 DST = do_div(AX, (u32) IMM);
1397 CONT;
1398 ALU64_DIV_X:
1399 DST = div64_u64(DST, SRC);
1400 CONT;
1401 ALU_DIV_X:
1402 AX = (u32) DST;
1403 do_div(AX, (u32) SRC);
1404 DST = (u32) AX;
1405 CONT;
1406 ALU64_DIV_K:
1407 DST = div64_u64(DST, IMM);
1408 CONT;
1409 ALU_DIV_K:
1410 AX = (u32) DST;
1411 do_div(AX, (u32) IMM);
1412 DST = (u32) AX;
1413 CONT;
1414 ALU_END_TO_BE:
1415 switch (IMM) {
1416 case 16:
1417 DST = (__force u16) cpu_to_be16(DST);
1418 break;
1419 case 32:
1420 DST = (__force u32) cpu_to_be32(DST);
1421 break;
1422 case 64:
1423 DST = (__force u64) cpu_to_be64(DST);
1424 break;
1425 }
1426 CONT;
1427 ALU_END_TO_LE:
1428 switch (IMM) {
1429 case 16:
1430 DST = (__force u16) cpu_to_le16(DST);
1431 break;
1432 case 32:
1433 DST = (__force u32) cpu_to_le32(DST);
1434 break;
1435 case 64:
1436 DST = (__force u64) cpu_to_le64(DST);
1437 break;
1438 }
1439 CONT;
1440
1441 /* CALL */
1442 JMP_CALL:
1443 /* Function call scratches BPF_R1-BPF_R5 registers,
1444 * preserves BPF_R6-BPF_R9, and stores return value
1445 * into BPF_R0.
1446 */
1447 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1448 BPF_R4, BPF_R5);
1449 CONT;
1450
1451 JMP_CALL_ARGS:
1452 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1453 BPF_R3, BPF_R4,
1454 BPF_R5,
1455 insn + insn->off + 1);
1456 CONT;
1457
1458 JMP_TAIL_CALL: {
1459 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1460 struct bpf_array *array = container_of(map, struct bpf_array, map);
1461 struct bpf_prog *prog;
1462 u32 index = BPF_R3;
1463
1464 if (unlikely(index >= array->map.max_entries))
1465 goto out;
1466 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1467 goto out;
1468
1469 tail_call_cnt++;
1470
1471 prog = READ_ONCE(array->ptrs[index]);
1472 if (!prog)
1473 goto out;
1474
1475 /* ARG1 at this point is guaranteed to point to CTX from
1476 * the verifier side due to the fact that the tail call is
1477 * handeled like a helper, that is, bpf_tail_call_proto,
1478 * where arg1_type is ARG_PTR_TO_CTX.
1479 */
1480 insn = prog->insnsi;
1481 goto select_insn;
1482out:
1483 CONT;
1484 }
1485 JMP_JA:
1486 insn += insn->off;
1487 CONT;
1488 JMP_EXIT:
1489 return BPF_R0;
1490 /* JMP */
1491#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1492 JMP_##OPCODE##_X: \
1493 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1494 insn += insn->off; \
1495 CONT_JMP; \
1496 } \
1497 CONT; \
1498 JMP32_##OPCODE##_X: \
1499 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1500 insn += insn->off; \
1501 CONT_JMP; \
1502 } \
1503 CONT; \
1504 JMP_##OPCODE##_K: \
1505 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1506 insn += insn->off; \
1507 CONT_JMP; \
1508 } \
1509 CONT; \
1510 JMP32_##OPCODE##_K: \
1511 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1512 insn += insn->off; \
1513 CONT_JMP; \
1514 } \
1515 CONT;
1516 COND_JMP(u, JEQ, ==)
1517 COND_JMP(u, JNE, !=)
1518 COND_JMP(u, JGT, >)
1519 COND_JMP(u, JLT, <)
1520 COND_JMP(u, JGE, >=)
1521 COND_JMP(u, JLE, <=)
1522 COND_JMP(u, JSET, &)
1523 COND_JMP(s, JSGT, >)
1524 COND_JMP(s, JSLT, <)
1525 COND_JMP(s, JSGE, >=)
1526 COND_JMP(s, JSLE, <=)
1527#undef COND_JMP
1528 /* STX and ST and LDX*/
1529#define LDST(SIZEOP, SIZE) \
1530 STX_MEM_##SIZEOP: \
1531 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1532 CONT; \
1533 ST_MEM_##SIZEOP: \
1534 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1535 CONT; \
1536 LDX_MEM_##SIZEOP: \
1537 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1538 CONT;
1539
1540 LDST(B, u8)
1541 LDST(H, u16)
1542 LDST(W, u32)
1543 LDST(DW, u64)
1544#undef LDST
1545 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1546 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1547 (DST + insn->off));
1548 CONT;
1549 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1550 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1551 (DST + insn->off));
1552 CONT;
1553
1554 default_label:
1555 /* If we ever reach this, we have a bug somewhere. Die hard here
1556 * instead of just returning 0; we could be somewhere in a subprog,
1557 * so execution could continue otherwise which we do /not/ want.
1558 *
1559 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1560 */
1561 pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1562 BUG_ON(1);
1563 return 0;
1564}
1565
1566#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1567#define DEFINE_BPF_PROG_RUN(stack_size) \
1568static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1569{ \
1570 u64 stack[stack_size / sizeof(u64)]; \
1571 u64 regs[MAX_BPF_EXT_REG]; \
1572\
1573 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1574 ARG1 = (u64) (unsigned long) ctx; \
1575 return ___bpf_prog_run(regs, insn, stack); \
1576}
1577
1578#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1579#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1580static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1581 const struct bpf_insn *insn) \
1582{ \
1583 u64 stack[stack_size / sizeof(u64)]; \
1584 u64 regs[MAX_BPF_EXT_REG]; \
1585\
1586 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1587 BPF_R1 = r1; \
1588 BPF_R2 = r2; \
1589 BPF_R3 = r3; \
1590 BPF_R4 = r4; \
1591 BPF_R5 = r5; \
1592 return ___bpf_prog_run(regs, insn, stack); \
1593}
1594
1595#define EVAL1(FN, X) FN(X)
1596#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1597#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1598#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1599#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1600#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1601
1602EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1603EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1604EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1605
1606EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1607EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1608EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1609
1610#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1611
1612static unsigned int (*interpreters[])(const void *ctx,
1613 const struct bpf_insn *insn) = {
1614EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1615EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1616EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1617};
1618#undef PROG_NAME_LIST
1619#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1620static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1621 const struct bpf_insn *insn) = {
1622EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1623EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1624EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1625};
1626#undef PROG_NAME_LIST
1627
1628void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1629{
1630 stack_depth = max_t(u32, stack_depth, 1);
1631 insn->off = (s16) insn->imm;
1632 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1633 __bpf_call_base_args;
1634 insn->code = BPF_JMP | BPF_CALL_ARGS;
1635}
1636
1637#else
1638static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1639 const struct bpf_insn *insn)
1640{
1641 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1642 * is not working properly, so warn about it!
1643 */
1644 WARN_ON_ONCE(1);
1645 return 0;
1646}
1647#endif
1648
1649bool bpf_prog_array_compatible(struct bpf_array *array,
1650 const struct bpf_prog *fp)
1651{
1652 if (fp->kprobe_override)
1653 return false;
1654
1655 if (!array->owner_prog_type) {
1656 /* There's no owner yet where we could check for
1657 * compatibility.
1658 */
1659 array->owner_prog_type = fp->type;
1660 array->owner_jited = fp->jited;
1661
1662 return true;
1663 }
1664
1665 return array->owner_prog_type == fp->type &&
1666 array->owner_jited == fp->jited;
1667}
1668
1669static int bpf_check_tail_call(const struct bpf_prog *fp)
1670{
1671 struct bpf_prog_aux *aux = fp->aux;
1672 int i;
1673
1674 for (i = 0; i < aux->used_map_cnt; i++) {
1675 struct bpf_map *map = aux->used_maps[i];
1676 struct bpf_array *array;
1677
1678 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1679 continue;
1680
1681 array = container_of(map, struct bpf_array, map);
1682 if (!bpf_prog_array_compatible(array, fp))
1683 return -EINVAL;
1684 }
1685
1686 return 0;
1687}
1688
1689static void bpf_prog_select_func(struct bpf_prog *fp)
1690{
1691#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1692 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1693
1694 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1695#else
1696 fp->bpf_func = __bpf_prog_ret0_warn;
1697#endif
1698}
1699
1700/**
1701 * bpf_prog_select_runtime - select exec runtime for BPF program
1702 * @fp: bpf_prog populated with internal BPF program
1703 * @err: pointer to error variable
1704 *
1705 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1706 * The BPF program will be executed via BPF_PROG_RUN() macro.
1707 */
1708struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1709{
1710 /* In case of BPF to BPF calls, verifier did all the prep
1711 * work with regards to JITing, etc.
1712 */
1713 if (fp->bpf_func)
1714 goto finalize;
1715
1716 bpf_prog_select_func(fp);
1717
1718 /* eBPF JITs can rewrite the program in case constant
1719 * blinding is active. However, in case of error during
1720 * blinding, bpf_int_jit_compile() must always return a
1721 * valid program, which in this case would simply not
1722 * be JITed, but falls back to the interpreter.
1723 */
1724 if (!bpf_prog_is_dev_bound(fp->aux)) {
1725 *err = bpf_prog_alloc_jited_linfo(fp);
1726 if (*err)
1727 return fp;
1728
1729 fp = bpf_int_jit_compile(fp);
1730 if (!fp->jited) {
1731 bpf_prog_free_jited_linfo(fp);
1732#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1733 *err = -ENOTSUPP;
1734 return fp;
1735#endif
1736 } else {
1737 bpf_prog_free_unused_jited_linfo(fp);
1738 }
1739 } else {
1740 *err = bpf_prog_offload_compile(fp);
1741 if (*err)
1742 return fp;
1743 }
1744
1745finalize:
1746 bpf_prog_lock_ro(fp);
1747
1748 /* The tail call compatibility check can only be done at
1749 * this late stage as we need to determine, if we deal
1750 * with JITed or non JITed program concatenations and not
1751 * all eBPF JITs might immediately support all features.
1752 */
1753 *err = bpf_check_tail_call(fp);
1754
1755 return fp;
1756}
1757EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1758
1759static unsigned int __bpf_prog_ret1(const void *ctx,
1760 const struct bpf_insn *insn)
1761{
1762 return 1;
1763}
1764
1765static struct bpf_prog_dummy {
1766 struct bpf_prog prog;
1767} dummy_bpf_prog = {
1768 .prog = {
1769 .bpf_func = __bpf_prog_ret1,
1770 },
1771};
1772
1773/* to avoid allocating empty bpf_prog_array for cgroups that
1774 * don't have bpf program attached use one global 'empty_prog_array'
1775 * It will not be modified the caller of bpf_prog_array_alloc()
1776 * (since caller requested prog_cnt == 0)
1777 * that pointer should be 'freed' by bpf_prog_array_free()
1778 */
1779static struct {
1780 struct bpf_prog_array hdr;
1781 struct bpf_prog *null_prog;
1782} empty_prog_array = {
1783 .null_prog = NULL,
1784};
1785
1786struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1787{
1788 if (prog_cnt)
1789 return kzalloc(sizeof(struct bpf_prog_array) +
1790 sizeof(struct bpf_prog_array_item) *
1791 (prog_cnt + 1),
1792 flags);
1793
1794 return &empty_prog_array.hdr;
1795}
1796
1797void bpf_prog_array_free(struct bpf_prog_array *progs)
1798{
1799 if (!progs || progs == &empty_prog_array.hdr)
1800 return;
1801 kfree_rcu(progs, rcu);
1802}
1803
1804int bpf_prog_array_length(struct bpf_prog_array *array)
1805{
1806 struct bpf_prog_array_item *item;
1807 u32 cnt = 0;
1808
1809 for (item = array->items; item->prog; item++)
1810 if (item->prog != &dummy_bpf_prog.prog)
1811 cnt++;
1812 return cnt;
1813}
1814
1815bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1816{
1817 struct bpf_prog_array_item *item;
1818
1819 for (item = array->items; item->prog; item++)
1820 if (item->prog != &dummy_bpf_prog.prog)
1821 return false;
1822 return true;
1823}
1824
1825static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1826 u32 *prog_ids,
1827 u32 request_cnt)
1828{
1829 struct bpf_prog_array_item *item;
1830 int i = 0;
1831
1832 for (item = array->items; item->prog; item++) {
1833 if (item->prog == &dummy_bpf_prog.prog)
1834 continue;
1835 prog_ids[i] = item->prog->aux->id;
1836 if (++i == request_cnt) {
1837 item++;
1838 break;
1839 }
1840 }
1841
1842 return !!(item->prog);
1843}
1844
1845int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1846 __u32 __user *prog_ids, u32 cnt)
1847{
1848 unsigned long err = 0;
1849 bool nospc;
1850 u32 *ids;
1851
1852 /* users of this function are doing:
1853 * cnt = bpf_prog_array_length();
1854 * if (cnt > 0)
1855 * bpf_prog_array_copy_to_user(..., cnt);
1856 * so below kcalloc doesn't need extra cnt > 0 check.
1857 */
1858 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1859 if (!ids)
1860 return -ENOMEM;
1861 nospc = bpf_prog_array_copy_core(array, ids, cnt);
1862 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1863 kfree(ids);
1864 if (err)
1865 return -EFAULT;
1866 if (nospc)
1867 return -ENOSPC;
1868 return 0;
1869}
1870
1871void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1872 struct bpf_prog *old_prog)
1873{
1874 struct bpf_prog_array_item *item;
1875
1876 for (item = array->items; item->prog; item++)
1877 if (item->prog == old_prog) {
1878 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1879 break;
1880 }
1881}
1882
1883int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1884 struct bpf_prog *exclude_prog,
1885 struct bpf_prog *include_prog,
1886 struct bpf_prog_array **new_array)
1887{
1888 int new_prog_cnt, carry_prog_cnt = 0;
1889 struct bpf_prog_array_item *existing;
1890 struct bpf_prog_array *array;
1891 bool found_exclude = false;
1892 int new_prog_idx = 0;
1893
1894 /* Figure out how many existing progs we need to carry over to
1895 * the new array.
1896 */
1897 if (old_array) {
1898 existing = old_array->items;
1899 for (; existing->prog; existing++) {
1900 if (existing->prog == exclude_prog) {
1901 found_exclude = true;
1902 continue;
1903 }
1904 if (existing->prog != &dummy_bpf_prog.prog)
1905 carry_prog_cnt++;
1906 if (existing->prog == include_prog)
1907 return -EEXIST;
1908 }
1909 }
1910
1911 if (exclude_prog && !found_exclude)
1912 return -ENOENT;
1913
1914 /* How many progs (not NULL) will be in the new array? */
1915 new_prog_cnt = carry_prog_cnt;
1916 if (include_prog)
1917 new_prog_cnt += 1;
1918
1919 /* Do we have any prog (not NULL) in the new array? */
1920 if (!new_prog_cnt) {
1921 *new_array = NULL;
1922 return 0;
1923 }
1924
1925 /* +1 as the end of prog_array is marked with NULL */
1926 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1927 if (!array)
1928 return -ENOMEM;
1929
1930 /* Fill in the new prog array */
1931 if (carry_prog_cnt) {
1932 existing = old_array->items;
1933 for (; existing->prog; existing++)
1934 if (existing->prog != exclude_prog &&
1935 existing->prog != &dummy_bpf_prog.prog) {
1936 array->items[new_prog_idx++].prog =
1937 existing->prog;
1938 }
1939 }
1940 if (include_prog)
1941 array->items[new_prog_idx++].prog = include_prog;
1942 array->items[new_prog_idx].prog = NULL;
1943 *new_array = array;
1944 return 0;
1945}
1946
1947int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1948 u32 *prog_ids, u32 request_cnt,
1949 u32 *prog_cnt)
1950{
1951 u32 cnt = 0;
1952
1953 if (array)
1954 cnt = bpf_prog_array_length(array);
1955
1956 *prog_cnt = cnt;
1957
1958 /* return early if user requested only program count or nothing to copy */
1959 if (!request_cnt || !cnt)
1960 return 0;
1961
1962 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1963 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1964 : 0;
1965}
1966
1967static void bpf_prog_free_deferred(struct work_struct *work)
1968{
1969 struct bpf_prog_aux *aux;
1970 int i;
1971
1972 aux = container_of(work, struct bpf_prog_aux, work);
1973 if (bpf_prog_is_dev_bound(aux))
1974 bpf_prog_offload_destroy(aux->prog);
1975#ifdef CONFIG_PERF_EVENTS
1976 if (aux->prog->has_callchain_buf)
1977 put_callchain_buffers();
1978#endif
1979 for (i = 0; i < aux->func_cnt; i++)
1980 bpf_jit_free(aux->func[i]);
1981 if (aux->func_cnt) {
1982 kfree(aux->func);
1983 bpf_prog_unlock_free(aux->prog);
1984 } else {
1985 bpf_jit_free(aux->prog);
1986 }
1987}
1988
1989/* Free internal BPF program */
1990void bpf_prog_free(struct bpf_prog *fp)
1991{
1992 struct bpf_prog_aux *aux = fp->aux;
1993
1994 INIT_WORK(&aux->work, bpf_prog_free_deferred);
1995 schedule_work(&aux->work);
1996}
1997EXPORT_SYMBOL_GPL(bpf_prog_free);
1998
1999/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2000static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2001
2002void bpf_user_rnd_init_once(void)
2003{
2004 prandom_init_once(&bpf_user_rnd_state);
2005}
2006
2007BPF_CALL_0(bpf_user_rnd_u32)
2008{
2009 /* Should someone ever have the rather unwise idea to use some
2010 * of the registers passed into this function, then note that
2011 * this function is called from native eBPF and classic-to-eBPF
2012 * transformations. Register assignments from both sides are
2013 * different, f.e. classic always sets fn(ctx, A, X) here.
2014 */
2015 struct rnd_state *state;
2016 u32 res;
2017
2018 state = &get_cpu_var(bpf_user_rnd_state);
2019 res = prandom_u32_state(state);
2020 put_cpu_var(bpf_user_rnd_state);
2021
2022 return res;
2023}
2024
2025/* Weak definitions of helper functions in case we don't have bpf syscall. */
2026const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2027const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2028const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2029const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2030const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2031const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2032const struct bpf_func_proto bpf_spin_lock_proto __weak;
2033const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2034
2035const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2036const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2037const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2038const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2039
2040const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2041const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2042const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2043const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2044const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2045
2046const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2047{
2048 return NULL;
2049}
2050
2051u64 __weak
2052bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2053 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2054{
2055 return -ENOTSUPP;
2056}
2057EXPORT_SYMBOL_GPL(bpf_event_output);
2058
2059/* Always built-in helper functions. */
2060const struct bpf_func_proto bpf_tail_call_proto = {
2061 .func = NULL,
2062 .gpl_only = false,
2063 .ret_type = RET_VOID,
2064 .arg1_type = ARG_PTR_TO_CTX,
2065 .arg2_type = ARG_CONST_MAP_PTR,
2066 .arg3_type = ARG_ANYTHING,
2067};
2068
2069/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2070 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2071 * eBPF and implicitly also cBPF can get JITed!
2072 */
2073struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2074{
2075 return prog;
2076}
2077
2078/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2079 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2080 */
2081void __weak bpf_jit_compile(struct bpf_prog *prog)
2082{
2083}
2084
2085bool __weak bpf_helper_changes_pkt_data(void *func)
2086{
2087 return false;
2088}
2089
2090/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2091 * analysis code and wants explicit zero extension inserted by verifier.
2092 * Otherwise, return FALSE.
2093 */
2094bool __weak bpf_jit_needs_zext(void)
2095{
2096 return false;
2097}
2098
2099/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2100 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2101 */
2102int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2103 int len)
2104{
2105 return -EFAULT;
2106}
2107
2108DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2109EXPORT_SYMBOL(bpf_stats_enabled_key);
2110
2111/* All definitions of tracepoints related to BPF. */
2112#define CREATE_TRACE_POINTS
2113#include <linux/bpf_trace.h>
2114
2115EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2116EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
7 *
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
15 *
16 * Andi Kleen - Fix a few bad bugs and races.
17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18 */
19
20#include <uapi/linux/btf.h>
21#include <linux/filter.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/random.h>
25#include <linux/moduleloader.h>
26#include <linux/bpf.h>
27#include <linux/btf.h>
28#include <linux/objtool.h>
29#include <linux/rbtree_latch.h>
30#include <linux/kallsyms.h>
31#include <linux/rcupdate.h>
32#include <linux/perf_event.h>
33#include <linux/extable.h>
34#include <linux/log2.h>
35#include <linux/bpf_verifier.h>
36#include <linux/nodemask.h>
37#include <linux/bpf_mem_alloc.h>
38
39#include <asm/barrier.h>
40#include <asm/unaligned.h>
41
42/* Registers */
43#define BPF_R0 regs[BPF_REG_0]
44#define BPF_R1 regs[BPF_REG_1]
45#define BPF_R2 regs[BPF_REG_2]
46#define BPF_R3 regs[BPF_REG_3]
47#define BPF_R4 regs[BPF_REG_4]
48#define BPF_R5 regs[BPF_REG_5]
49#define BPF_R6 regs[BPF_REG_6]
50#define BPF_R7 regs[BPF_REG_7]
51#define BPF_R8 regs[BPF_REG_8]
52#define BPF_R9 regs[BPF_REG_9]
53#define BPF_R10 regs[BPF_REG_10]
54
55/* Named registers */
56#define DST regs[insn->dst_reg]
57#define SRC regs[insn->src_reg]
58#define FP regs[BPF_REG_FP]
59#define AX regs[BPF_REG_AX]
60#define ARG1 regs[BPF_REG_ARG1]
61#define CTX regs[BPF_REG_CTX]
62#define IMM insn->imm
63
64struct bpf_mem_alloc bpf_global_ma;
65bool bpf_global_ma_set;
66
67/* No hurry in this branch
68 *
69 * Exported for the bpf jit load helper.
70 */
71void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
72{
73 u8 *ptr = NULL;
74
75 if (k >= SKF_NET_OFF) {
76 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
77 } else if (k >= SKF_LL_OFF) {
78 if (unlikely(!skb_mac_header_was_set(skb)))
79 return NULL;
80 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
81 }
82 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
83 return ptr;
84
85 return NULL;
86}
87
88struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
89{
90 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
91 struct bpf_prog_aux *aux;
92 struct bpf_prog *fp;
93
94 size = round_up(size, PAGE_SIZE);
95 fp = __vmalloc(size, gfp_flags);
96 if (fp == NULL)
97 return NULL;
98
99 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
100 if (aux == NULL) {
101 vfree(fp);
102 return NULL;
103 }
104 fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
105 if (!fp->active) {
106 vfree(fp);
107 kfree(aux);
108 return NULL;
109 }
110
111 fp->pages = size / PAGE_SIZE;
112 fp->aux = aux;
113 fp->aux->prog = fp;
114 fp->jit_requested = ebpf_jit_enabled();
115 fp->blinding_requested = bpf_jit_blinding_enabled(fp);
116#ifdef CONFIG_CGROUP_BPF
117 aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
118#endif
119
120 INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
121 mutex_init(&fp->aux->used_maps_mutex);
122 mutex_init(&fp->aux->dst_mutex);
123
124 return fp;
125}
126
127struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
128{
129 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
130 struct bpf_prog *prog;
131 int cpu;
132
133 prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
134 if (!prog)
135 return NULL;
136
137 prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
138 if (!prog->stats) {
139 free_percpu(prog->active);
140 kfree(prog->aux);
141 vfree(prog);
142 return NULL;
143 }
144
145 for_each_possible_cpu(cpu) {
146 struct bpf_prog_stats *pstats;
147
148 pstats = per_cpu_ptr(prog->stats, cpu);
149 u64_stats_init(&pstats->syncp);
150 }
151 return prog;
152}
153EXPORT_SYMBOL_GPL(bpf_prog_alloc);
154
155int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
156{
157 if (!prog->aux->nr_linfo || !prog->jit_requested)
158 return 0;
159
160 prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
161 sizeof(*prog->aux->jited_linfo),
162 GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
163 if (!prog->aux->jited_linfo)
164 return -ENOMEM;
165
166 return 0;
167}
168
169void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
170{
171 if (prog->aux->jited_linfo &&
172 (!prog->jited || !prog->aux->jited_linfo[0])) {
173 kvfree(prog->aux->jited_linfo);
174 prog->aux->jited_linfo = NULL;
175 }
176
177 kfree(prog->aux->kfunc_tab);
178 prog->aux->kfunc_tab = NULL;
179}
180
181/* The jit engine is responsible to provide an array
182 * for insn_off to the jited_off mapping (insn_to_jit_off).
183 *
184 * The idx to this array is the insn_off. Hence, the insn_off
185 * here is relative to the prog itself instead of the main prog.
186 * This array has one entry for each xlated bpf insn.
187 *
188 * jited_off is the byte off to the end of the jited insn.
189 *
190 * Hence, with
191 * insn_start:
192 * The first bpf insn off of the prog. The insn off
193 * here is relative to the main prog.
194 * e.g. if prog is a subprog, insn_start > 0
195 * linfo_idx:
196 * The prog's idx to prog->aux->linfo and jited_linfo
197 *
198 * jited_linfo[linfo_idx] = prog->bpf_func
199 *
200 * For i > linfo_idx,
201 *
202 * jited_linfo[i] = prog->bpf_func +
203 * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
204 */
205void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
206 const u32 *insn_to_jit_off)
207{
208 u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
209 const struct bpf_line_info *linfo;
210 void **jited_linfo;
211
212 if (!prog->aux->jited_linfo)
213 /* Userspace did not provide linfo */
214 return;
215
216 linfo_idx = prog->aux->linfo_idx;
217 linfo = &prog->aux->linfo[linfo_idx];
218 insn_start = linfo[0].insn_off;
219 insn_end = insn_start + prog->len;
220
221 jited_linfo = &prog->aux->jited_linfo[linfo_idx];
222 jited_linfo[0] = prog->bpf_func;
223
224 nr_linfo = prog->aux->nr_linfo - linfo_idx;
225
226 for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
227 /* The verifier ensures that linfo[i].insn_off is
228 * strictly increasing
229 */
230 jited_linfo[i] = prog->bpf_func +
231 insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
232}
233
234struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
235 gfp_t gfp_extra_flags)
236{
237 gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
238 struct bpf_prog *fp;
239 u32 pages;
240
241 size = round_up(size, PAGE_SIZE);
242 pages = size / PAGE_SIZE;
243 if (pages <= fp_old->pages)
244 return fp_old;
245
246 fp = __vmalloc(size, gfp_flags);
247 if (fp) {
248 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
249 fp->pages = pages;
250 fp->aux->prog = fp;
251
252 /* We keep fp->aux from fp_old around in the new
253 * reallocated structure.
254 */
255 fp_old->aux = NULL;
256 fp_old->stats = NULL;
257 fp_old->active = NULL;
258 __bpf_prog_free(fp_old);
259 }
260
261 return fp;
262}
263
264void __bpf_prog_free(struct bpf_prog *fp)
265{
266 if (fp->aux) {
267 mutex_destroy(&fp->aux->used_maps_mutex);
268 mutex_destroy(&fp->aux->dst_mutex);
269 kfree(fp->aux->poke_tab);
270 kfree(fp->aux);
271 }
272 free_percpu(fp->stats);
273 free_percpu(fp->active);
274 vfree(fp);
275}
276
277int bpf_prog_calc_tag(struct bpf_prog *fp)
278{
279 const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
280 u32 raw_size = bpf_prog_tag_scratch_size(fp);
281 u32 digest[SHA1_DIGEST_WORDS];
282 u32 ws[SHA1_WORKSPACE_WORDS];
283 u32 i, bsize, psize, blocks;
284 struct bpf_insn *dst;
285 bool was_ld_map;
286 u8 *raw, *todo;
287 __be32 *result;
288 __be64 *bits;
289
290 raw = vmalloc(raw_size);
291 if (!raw)
292 return -ENOMEM;
293
294 sha1_init(digest);
295 memset(ws, 0, sizeof(ws));
296
297 /* We need to take out the map fd for the digest calculation
298 * since they are unstable from user space side.
299 */
300 dst = (void *)raw;
301 for (i = 0, was_ld_map = false; i < fp->len; i++) {
302 dst[i] = fp->insnsi[i];
303 if (!was_ld_map &&
304 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
305 (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
306 dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
307 was_ld_map = true;
308 dst[i].imm = 0;
309 } else if (was_ld_map &&
310 dst[i].code == 0 &&
311 dst[i].dst_reg == 0 &&
312 dst[i].src_reg == 0 &&
313 dst[i].off == 0) {
314 was_ld_map = false;
315 dst[i].imm = 0;
316 } else {
317 was_ld_map = false;
318 }
319 }
320
321 psize = bpf_prog_insn_size(fp);
322 memset(&raw[psize], 0, raw_size - psize);
323 raw[psize++] = 0x80;
324
325 bsize = round_up(psize, SHA1_BLOCK_SIZE);
326 blocks = bsize / SHA1_BLOCK_SIZE;
327 todo = raw;
328 if (bsize - psize >= sizeof(__be64)) {
329 bits = (__be64 *)(todo + bsize - sizeof(__be64));
330 } else {
331 bits = (__be64 *)(todo + bsize + bits_offset);
332 blocks++;
333 }
334 *bits = cpu_to_be64((psize - 1) << 3);
335
336 while (blocks--) {
337 sha1_transform(digest, todo, ws);
338 todo += SHA1_BLOCK_SIZE;
339 }
340
341 result = (__force __be32 *)digest;
342 for (i = 0; i < SHA1_DIGEST_WORDS; i++)
343 result[i] = cpu_to_be32(digest[i]);
344 memcpy(fp->tag, result, sizeof(fp->tag));
345
346 vfree(raw);
347 return 0;
348}
349
350static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
351 s32 end_new, s32 curr, const bool probe_pass)
352{
353 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
354 s32 delta = end_new - end_old;
355 s64 imm = insn->imm;
356
357 if (curr < pos && curr + imm + 1 >= end_old)
358 imm += delta;
359 else if (curr >= end_new && curr + imm + 1 < end_new)
360 imm -= delta;
361 if (imm < imm_min || imm > imm_max)
362 return -ERANGE;
363 if (!probe_pass)
364 insn->imm = imm;
365 return 0;
366}
367
368static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
369 s32 end_new, s32 curr, const bool probe_pass)
370{
371 const s32 off_min = S16_MIN, off_max = S16_MAX;
372 s32 delta = end_new - end_old;
373 s32 off = insn->off;
374
375 if (curr < pos && curr + off + 1 >= end_old)
376 off += delta;
377 else if (curr >= end_new && curr + off + 1 < end_new)
378 off -= delta;
379 if (off < off_min || off > off_max)
380 return -ERANGE;
381 if (!probe_pass)
382 insn->off = off;
383 return 0;
384}
385
386static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
387 s32 end_new, const bool probe_pass)
388{
389 u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
390 struct bpf_insn *insn = prog->insnsi;
391 int ret = 0;
392
393 for (i = 0; i < insn_cnt; i++, insn++) {
394 u8 code;
395
396 /* In the probing pass we still operate on the original,
397 * unpatched image in order to check overflows before we
398 * do any other adjustments. Therefore skip the patchlet.
399 */
400 if (probe_pass && i == pos) {
401 i = end_new;
402 insn = prog->insnsi + end_old;
403 }
404 if (bpf_pseudo_func(insn)) {
405 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
406 end_new, i, probe_pass);
407 if (ret)
408 return ret;
409 continue;
410 }
411 code = insn->code;
412 if ((BPF_CLASS(code) != BPF_JMP &&
413 BPF_CLASS(code) != BPF_JMP32) ||
414 BPF_OP(code) == BPF_EXIT)
415 continue;
416 /* Adjust offset of jmps if we cross patch boundaries. */
417 if (BPF_OP(code) == BPF_CALL) {
418 if (insn->src_reg != BPF_PSEUDO_CALL)
419 continue;
420 ret = bpf_adj_delta_to_imm(insn, pos, end_old,
421 end_new, i, probe_pass);
422 } else {
423 ret = bpf_adj_delta_to_off(insn, pos, end_old,
424 end_new, i, probe_pass);
425 }
426 if (ret)
427 break;
428 }
429
430 return ret;
431}
432
433static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
434{
435 struct bpf_line_info *linfo;
436 u32 i, nr_linfo;
437
438 nr_linfo = prog->aux->nr_linfo;
439 if (!nr_linfo || !delta)
440 return;
441
442 linfo = prog->aux->linfo;
443
444 for (i = 0; i < nr_linfo; i++)
445 if (off < linfo[i].insn_off)
446 break;
447
448 /* Push all off < linfo[i].insn_off by delta */
449 for (; i < nr_linfo; i++)
450 linfo[i].insn_off += delta;
451}
452
453struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
454 const struct bpf_insn *patch, u32 len)
455{
456 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
457 const u32 cnt_max = S16_MAX;
458 struct bpf_prog *prog_adj;
459 int err;
460
461 /* Since our patchlet doesn't expand the image, we're done. */
462 if (insn_delta == 0) {
463 memcpy(prog->insnsi + off, patch, sizeof(*patch));
464 return prog;
465 }
466
467 insn_adj_cnt = prog->len + insn_delta;
468
469 /* Reject anything that would potentially let the insn->off
470 * target overflow when we have excessive program expansions.
471 * We need to probe here before we do any reallocation where
472 * we afterwards may not fail anymore.
473 */
474 if (insn_adj_cnt > cnt_max &&
475 (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
476 return ERR_PTR(err);
477
478 /* Several new instructions need to be inserted. Make room
479 * for them. Likely, there's no need for a new allocation as
480 * last page could have large enough tailroom.
481 */
482 prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
483 GFP_USER);
484 if (!prog_adj)
485 return ERR_PTR(-ENOMEM);
486
487 prog_adj->len = insn_adj_cnt;
488
489 /* Patching happens in 3 steps:
490 *
491 * 1) Move over tail of insnsi from next instruction onwards,
492 * so we can patch the single target insn with one or more
493 * new ones (patching is always from 1 to n insns, n > 0).
494 * 2) Inject new instructions at the target location.
495 * 3) Adjust branch offsets if necessary.
496 */
497 insn_rest = insn_adj_cnt - off - len;
498
499 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
500 sizeof(*patch) * insn_rest);
501 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
502
503 /* We are guaranteed to not fail at this point, otherwise
504 * the ship has sailed to reverse to the original state. An
505 * overflow cannot happen at this point.
506 */
507 BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
508
509 bpf_adj_linfo(prog_adj, off, insn_delta);
510
511 return prog_adj;
512}
513
514int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
515{
516 /* Branch offsets can't overflow when program is shrinking, no need
517 * to call bpf_adj_branches(..., true) here
518 */
519 memmove(prog->insnsi + off, prog->insnsi + off + cnt,
520 sizeof(struct bpf_insn) * (prog->len - off - cnt));
521 prog->len -= cnt;
522
523 return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
524}
525
526static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
527{
528 int i;
529
530 for (i = 0; i < fp->aux->func_cnt; i++)
531 bpf_prog_kallsyms_del(fp->aux->func[i]);
532}
533
534void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
535{
536 bpf_prog_kallsyms_del_subprogs(fp);
537 bpf_prog_kallsyms_del(fp);
538}
539
540#ifdef CONFIG_BPF_JIT
541/* All BPF JIT sysctl knobs here. */
542int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
543int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
544int bpf_jit_harden __read_mostly;
545long bpf_jit_limit __read_mostly;
546long bpf_jit_limit_max __read_mostly;
547
548static void
549bpf_prog_ksym_set_addr(struct bpf_prog *prog)
550{
551 WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
552
553 prog->aux->ksym.start = (unsigned long) prog->bpf_func;
554 prog->aux->ksym.end = prog->aux->ksym.start + prog->jited_len;
555}
556
557static void
558bpf_prog_ksym_set_name(struct bpf_prog *prog)
559{
560 char *sym = prog->aux->ksym.name;
561 const char *end = sym + KSYM_NAME_LEN;
562 const struct btf_type *type;
563 const char *func_name;
564
565 BUILD_BUG_ON(sizeof("bpf_prog_") +
566 sizeof(prog->tag) * 2 +
567 /* name has been null terminated.
568 * We should need +1 for the '_' preceding
569 * the name. However, the null character
570 * is double counted between the name and the
571 * sizeof("bpf_prog_") above, so we omit
572 * the +1 here.
573 */
574 sizeof(prog->aux->name) > KSYM_NAME_LEN);
575
576 sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
577 sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
578
579 /* prog->aux->name will be ignored if full btf name is available */
580 if (prog->aux->func_info_cnt) {
581 type = btf_type_by_id(prog->aux->btf,
582 prog->aux->func_info[prog->aux->func_idx].type_id);
583 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
584 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
585 return;
586 }
587
588 if (prog->aux->name[0])
589 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
590 else
591 *sym = 0;
592}
593
594static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
595{
596 return container_of(n, struct bpf_ksym, tnode)->start;
597}
598
599static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
600 struct latch_tree_node *b)
601{
602 return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
603}
604
605static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
606{
607 unsigned long val = (unsigned long)key;
608 const struct bpf_ksym *ksym;
609
610 ksym = container_of(n, struct bpf_ksym, tnode);
611
612 if (val < ksym->start)
613 return -1;
614 if (val >= ksym->end)
615 return 1;
616
617 return 0;
618}
619
620static const struct latch_tree_ops bpf_tree_ops = {
621 .less = bpf_tree_less,
622 .comp = bpf_tree_comp,
623};
624
625static DEFINE_SPINLOCK(bpf_lock);
626static LIST_HEAD(bpf_kallsyms);
627static struct latch_tree_root bpf_tree __cacheline_aligned;
628
629void bpf_ksym_add(struct bpf_ksym *ksym)
630{
631 spin_lock_bh(&bpf_lock);
632 WARN_ON_ONCE(!list_empty(&ksym->lnode));
633 list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
634 latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
635 spin_unlock_bh(&bpf_lock);
636}
637
638static void __bpf_ksym_del(struct bpf_ksym *ksym)
639{
640 if (list_empty(&ksym->lnode))
641 return;
642
643 latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
644 list_del_rcu(&ksym->lnode);
645}
646
647void bpf_ksym_del(struct bpf_ksym *ksym)
648{
649 spin_lock_bh(&bpf_lock);
650 __bpf_ksym_del(ksym);
651 spin_unlock_bh(&bpf_lock);
652}
653
654static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
655{
656 return fp->jited && !bpf_prog_was_classic(fp);
657}
658
659void bpf_prog_kallsyms_add(struct bpf_prog *fp)
660{
661 if (!bpf_prog_kallsyms_candidate(fp) ||
662 !bpf_capable())
663 return;
664
665 bpf_prog_ksym_set_addr(fp);
666 bpf_prog_ksym_set_name(fp);
667 fp->aux->ksym.prog = true;
668
669 bpf_ksym_add(&fp->aux->ksym);
670}
671
672void bpf_prog_kallsyms_del(struct bpf_prog *fp)
673{
674 if (!bpf_prog_kallsyms_candidate(fp))
675 return;
676
677 bpf_ksym_del(&fp->aux->ksym);
678}
679
680static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
681{
682 struct latch_tree_node *n;
683
684 n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
685 return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
686}
687
688const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
689 unsigned long *off, char *sym)
690{
691 struct bpf_ksym *ksym;
692 char *ret = NULL;
693
694 rcu_read_lock();
695 ksym = bpf_ksym_find(addr);
696 if (ksym) {
697 unsigned long symbol_start = ksym->start;
698 unsigned long symbol_end = ksym->end;
699
700 strncpy(sym, ksym->name, KSYM_NAME_LEN);
701
702 ret = sym;
703 if (size)
704 *size = symbol_end - symbol_start;
705 if (off)
706 *off = addr - symbol_start;
707 }
708 rcu_read_unlock();
709
710 return ret;
711}
712
713bool is_bpf_text_address(unsigned long addr)
714{
715 bool ret;
716
717 rcu_read_lock();
718 ret = bpf_ksym_find(addr) != NULL;
719 rcu_read_unlock();
720
721 return ret;
722}
723
724static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
725{
726 struct bpf_ksym *ksym = bpf_ksym_find(addr);
727
728 return ksym && ksym->prog ?
729 container_of(ksym, struct bpf_prog_aux, ksym)->prog :
730 NULL;
731}
732
733const struct exception_table_entry *search_bpf_extables(unsigned long addr)
734{
735 const struct exception_table_entry *e = NULL;
736 struct bpf_prog *prog;
737
738 rcu_read_lock();
739 prog = bpf_prog_ksym_find(addr);
740 if (!prog)
741 goto out;
742 if (!prog->aux->num_exentries)
743 goto out;
744
745 e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
746out:
747 rcu_read_unlock();
748 return e;
749}
750
751int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
752 char *sym)
753{
754 struct bpf_ksym *ksym;
755 unsigned int it = 0;
756 int ret = -ERANGE;
757
758 if (!bpf_jit_kallsyms_enabled())
759 return ret;
760
761 rcu_read_lock();
762 list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
763 if (it++ != symnum)
764 continue;
765
766 strncpy(sym, ksym->name, KSYM_NAME_LEN);
767
768 *value = ksym->start;
769 *type = BPF_SYM_ELF_TYPE;
770
771 ret = 0;
772 break;
773 }
774 rcu_read_unlock();
775
776 return ret;
777}
778
779int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
780 struct bpf_jit_poke_descriptor *poke)
781{
782 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
783 static const u32 poke_tab_max = 1024;
784 u32 slot = prog->aux->size_poke_tab;
785 u32 size = slot + 1;
786
787 if (size > poke_tab_max)
788 return -ENOSPC;
789 if (poke->tailcall_target || poke->tailcall_target_stable ||
790 poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
791 return -EINVAL;
792
793 switch (poke->reason) {
794 case BPF_POKE_REASON_TAIL_CALL:
795 if (!poke->tail_call.map)
796 return -EINVAL;
797 break;
798 default:
799 return -EINVAL;
800 }
801
802 tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
803 if (!tab)
804 return -ENOMEM;
805
806 memcpy(&tab[slot], poke, sizeof(*poke));
807 prog->aux->size_poke_tab = size;
808 prog->aux->poke_tab = tab;
809
810 return slot;
811}
812
813/*
814 * BPF program pack allocator.
815 *
816 * Most BPF programs are pretty small. Allocating a hole page for each
817 * program is sometime a waste. Many small bpf program also adds pressure
818 * to instruction TLB. To solve this issue, we introduce a BPF program pack
819 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
820 * to host BPF programs.
821 */
822#define BPF_PROG_CHUNK_SHIFT 6
823#define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT)
824#define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1))
825
826struct bpf_prog_pack {
827 struct list_head list;
828 void *ptr;
829 unsigned long bitmap[];
830};
831
832void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
833{
834 memset(area, 0, size);
835}
836
837#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
838
839static DEFINE_MUTEX(pack_mutex);
840static LIST_HEAD(pack_list);
841
842/* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
843 * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
844 */
845#ifdef PMD_SIZE
846#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
847#else
848#define BPF_PROG_PACK_SIZE PAGE_SIZE
849#endif
850
851#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
852
853static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
854{
855 struct bpf_prog_pack *pack;
856
857 pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
858 GFP_KERNEL);
859 if (!pack)
860 return NULL;
861 pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
862 if (!pack->ptr) {
863 kfree(pack);
864 return NULL;
865 }
866 bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
867 bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
868 list_add_tail(&pack->list, &pack_list);
869
870 set_vm_flush_reset_perms(pack->ptr);
871 set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
872 return pack;
873}
874
875void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
876{
877 unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
878 struct bpf_prog_pack *pack;
879 unsigned long pos;
880 void *ptr = NULL;
881
882 mutex_lock(&pack_mutex);
883 if (size > BPF_PROG_PACK_SIZE) {
884 size = round_up(size, PAGE_SIZE);
885 ptr = module_alloc(size);
886 if (ptr) {
887 bpf_fill_ill_insns(ptr, size);
888 set_vm_flush_reset_perms(ptr);
889 set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
890 }
891 goto out;
892 }
893 list_for_each_entry(pack, &pack_list, list) {
894 pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
895 nbits, 0);
896 if (pos < BPF_PROG_CHUNK_COUNT)
897 goto found_free_area;
898 }
899
900 pack = alloc_new_pack(bpf_fill_ill_insns);
901 if (!pack)
902 goto out;
903
904 pos = 0;
905
906found_free_area:
907 bitmap_set(pack->bitmap, pos, nbits);
908 ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
909
910out:
911 mutex_unlock(&pack_mutex);
912 return ptr;
913}
914
915void bpf_prog_pack_free(struct bpf_binary_header *hdr)
916{
917 struct bpf_prog_pack *pack = NULL, *tmp;
918 unsigned int nbits;
919 unsigned long pos;
920
921 mutex_lock(&pack_mutex);
922 if (hdr->size > BPF_PROG_PACK_SIZE) {
923 module_memfree(hdr);
924 goto out;
925 }
926
927 list_for_each_entry(tmp, &pack_list, list) {
928 if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
929 pack = tmp;
930 break;
931 }
932 }
933
934 if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
935 goto out;
936
937 nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
938 pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
939
940 WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
941 "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
942
943 bitmap_clear(pack->bitmap, pos, nbits);
944 if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
945 BPF_PROG_CHUNK_COUNT, 0) == 0) {
946 list_del(&pack->list);
947 module_memfree(pack->ptr);
948 kfree(pack);
949 }
950out:
951 mutex_unlock(&pack_mutex);
952}
953
954static atomic_long_t bpf_jit_current;
955
956/* Can be overridden by an arch's JIT compiler if it has a custom,
957 * dedicated BPF backend memory area, or if neither of the two
958 * below apply.
959 */
960u64 __weak bpf_jit_alloc_exec_limit(void)
961{
962#if defined(MODULES_VADDR)
963 return MODULES_END - MODULES_VADDR;
964#else
965 return VMALLOC_END - VMALLOC_START;
966#endif
967}
968
969static int __init bpf_jit_charge_init(void)
970{
971 /* Only used as heuristic here to derive limit. */
972 bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
973 bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
974 PAGE_SIZE), LONG_MAX);
975 return 0;
976}
977pure_initcall(bpf_jit_charge_init);
978
979int bpf_jit_charge_modmem(u32 size)
980{
981 if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
982 if (!bpf_capable()) {
983 atomic_long_sub(size, &bpf_jit_current);
984 return -EPERM;
985 }
986 }
987
988 return 0;
989}
990
991void bpf_jit_uncharge_modmem(u32 size)
992{
993 atomic_long_sub(size, &bpf_jit_current);
994}
995
996void *__weak bpf_jit_alloc_exec(unsigned long size)
997{
998 return module_alloc(size);
999}
1000
1001void __weak bpf_jit_free_exec(void *addr)
1002{
1003 module_memfree(addr);
1004}
1005
1006struct bpf_binary_header *
1007bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1008 unsigned int alignment,
1009 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1010{
1011 struct bpf_binary_header *hdr;
1012 u32 size, hole, start;
1013
1014 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1015 alignment > BPF_IMAGE_ALIGNMENT);
1016
1017 /* Most of BPF filters are really small, but if some of them
1018 * fill a page, allow at least 128 extra bytes to insert a
1019 * random section of illegal instructions.
1020 */
1021 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1022
1023 if (bpf_jit_charge_modmem(size))
1024 return NULL;
1025 hdr = bpf_jit_alloc_exec(size);
1026 if (!hdr) {
1027 bpf_jit_uncharge_modmem(size);
1028 return NULL;
1029 }
1030
1031 /* Fill space with illegal/arch-dep instructions. */
1032 bpf_fill_ill_insns(hdr, size);
1033
1034 hdr->size = size;
1035 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1036 PAGE_SIZE - sizeof(*hdr));
1037 start = get_random_u32_below(hole) & ~(alignment - 1);
1038
1039 /* Leave a random number of instructions before BPF code. */
1040 *image_ptr = &hdr->image[start];
1041
1042 return hdr;
1043}
1044
1045void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1046{
1047 u32 size = hdr->size;
1048
1049 bpf_jit_free_exec(hdr);
1050 bpf_jit_uncharge_modmem(size);
1051}
1052
1053/* Allocate jit binary from bpf_prog_pack allocator.
1054 * Since the allocated memory is RO+X, the JIT engine cannot write directly
1055 * to the memory. To solve this problem, a RW buffer is also allocated at
1056 * as the same time. The JIT engine should calculate offsets based on the
1057 * RO memory address, but write JITed program to the RW buffer. Once the
1058 * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1059 * the JITed program to the RO memory.
1060 */
1061struct bpf_binary_header *
1062bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1063 unsigned int alignment,
1064 struct bpf_binary_header **rw_header,
1065 u8 **rw_image,
1066 bpf_jit_fill_hole_t bpf_fill_ill_insns)
1067{
1068 struct bpf_binary_header *ro_header;
1069 u32 size, hole, start;
1070
1071 WARN_ON_ONCE(!is_power_of_2(alignment) ||
1072 alignment > BPF_IMAGE_ALIGNMENT);
1073
1074 /* add 16 bytes for a random section of illegal instructions */
1075 size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1076
1077 if (bpf_jit_charge_modmem(size))
1078 return NULL;
1079 ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
1080 if (!ro_header) {
1081 bpf_jit_uncharge_modmem(size);
1082 return NULL;
1083 }
1084
1085 *rw_header = kvmalloc(size, GFP_KERNEL);
1086 if (!*rw_header) {
1087 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1088 bpf_prog_pack_free(ro_header);
1089 bpf_jit_uncharge_modmem(size);
1090 return NULL;
1091 }
1092
1093 /* Fill space with illegal/arch-dep instructions. */
1094 bpf_fill_ill_insns(*rw_header, size);
1095 (*rw_header)->size = size;
1096
1097 hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1098 BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1099 start = get_random_u32_below(hole) & ~(alignment - 1);
1100
1101 *image_ptr = &ro_header->image[start];
1102 *rw_image = &(*rw_header)->image[start];
1103
1104 return ro_header;
1105}
1106
1107/* Copy JITed text from rw_header to its final location, the ro_header. */
1108int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1109 struct bpf_binary_header *ro_header,
1110 struct bpf_binary_header *rw_header)
1111{
1112 void *ptr;
1113
1114 ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1115
1116 kvfree(rw_header);
1117
1118 if (IS_ERR(ptr)) {
1119 bpf_prog_pack_free(ro_header);
1120 return PTR_ERR(ptr);
1121 }
1122 return 0;
1123}
1124
1125/* bpf_jit_binary_pack_free is called in two different scenarios:
1126 * 1) when the program is freed after;
1127 * 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1128 * For case 2), we need to free both the RO memory and the RW buffer.
1129 *
1130 * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1131 * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1132 * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1133 * bpf_arch_text_copy (when jit fails).
1134 */
1135void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1136 struct bpf_binary_header *rw_header)
1137{
1138 u32 size = ro_header->size;
1139
1140 bpf_prog_pack_free(ro_header);
1141 kvfree(rw_header);
1142 bpf_jit_uncharge_modmem(size);
1143}
1144
1145struct bpf_binary_header *
1146bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
1147{
1148 unsigned long real_start = (unsigned long)fp->bpf_func;
1149 unsigned long addr;
1150
1151 addr = real_start & BPF_PROG_CHUNK_MASK;
1152 return (void *)addr;
1153}
1154
1155static inline struct bpf_binary_header *
1156bpf_jit_binary_hdr(const struct bpf_prog *fp)
1157{
1158 unsigned long real_start = (unsigned long)fp->bpf_func;
1159 unsigned long addr;
1160
1161 addr = real_start & PAGE_MASK;
1162 return (void *)addr;
1163}
1164
1165/* This symbol is only overridden by archs that have different
1166 * requirements than the usual eBPF JITs, f.e. when they only
1167 * implement cBPF JIT, do not set images read-only, etc.
1168 */
1169void __weak bpf_jit_free(struct bpf_prog *fp)
1170{
1171 if (fp->jited) {
1172 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1173
1174 bpf_jit_binary_free(hdr);
1175 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1176 }
1177
1178 bpf_prog_unlock_free(fp);
1179}
1180
1181int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1182 const struct bpf_insn *insn, bool extra_pass,
1183 u64 *func_addr, bool *func_addr_fixed)
1184{
1185 s16 off = insn->off;
1186 s32 imm = insn->imm;
1187 u8 *addr;
1188
1189 *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1190 if (!*func_addr_fixed) {
1191 /* Place-holder address till the last pass has collected
1192 * all addresses for JITed subprograms in which case we
1193 * can pick them up from prog->aux.
1194 */
1195 if (!extra_pass)
1196 addr = NULL;
1197 else if (prog->aux->func &&
1198 off >= 0 && off < prog->aux->func_cnt)
1199 addr = (u8 *)prog->aux->func[off]->bpf_func;
1200 else
1201 return -EINVAL;
1202 } else {
1203 /* Address of a BPF helper call. Since part of the core
1204 * kernel, it's always at a fixed location. __bpf_call_base
1205 * and the helper with imm relative to it are both in core
1206 * kernel.
1207 */
1208 addr = (u8 *)__bpf_call_base + imm;
1209 }
1210
1211 *func_addr = (unsigned long)addr;
1212 return 0;
1213}
1214
1215static int bpf_jit_blind_insn(const struct bpf_insn *from,
1216 const struct bpf_insn *aux,
1217 struct bpf_insn *to_buff,
1218 bool emit_zext)
1219{
1220 struct bpf_insn *to = to_buff;
1221 u32 imm_rnd = get_random_u32();
1222 s16 off;
1223
1224 BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
1225 BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1226
1227 /* Constraints on AX register:
1228 *
1229 * AX register is inaccessible from user space. It is mapped in
1230 * all JITs, and used here for constant blinding rewrites. It is
1231 * typically "stateless" meaning its contents are only valid within
1232 * the executed instruction, but not across several instructions.
1233 * There are a few exceptions however which are further detailed
1234 * below.
1235 *
1236 * Constant blinding is only used by JITs, not in the interpreter.
1237 * The interpreter uses AX in some occasions as a local temporary
1238 * register e.g. in DIV or MOD instructions.
1239 *
1240 * In restricted circumstances, the verifier can also use the AX
1241 * register for rewrites as long as they do not interfere with
1242 * the above cases!
1243 */
1244 if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1245 goto out;
1246
1247 if (from->imm == 0 &&
1248 (from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
1249 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1250 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1251 goto out;
1252 }
1253
1254 switch (from->code) {
1255 case BPF_ALU | BPF_ADD | BPF_K:
1256 case BPF_ALU | BPF_SUB | BPF_K:
1257 case BPF_ALU | BPF_AND | BPF_K:
1258 case BPF_ALU | BPF_OR | BPF_K:
1259 case BPF_ALU | BPF_XOR | BPF_K:
1260 case BPF_ALU | BPF_MUL | BPF_K:
1261 case BPF_ALU | BPF_MOV | BPF_K:
1262 case BPF_ALU | BPF_DIV | BPF_K:
1263 case BPF_ALU | BPF_MOD | BPF_K:
1264 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1265 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1266 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1267 break;
1268
1269 case BPF_ALU64 | BPF_ADD | BPF_K:
1270 case BPF_ALU64 | BPF_SUB | BPF_K:
1271 case BPF_ALU64 | BPF_AND | BPF_K:
1272 case BPF_ALU64 | BPF_OR | BPF_K:
1273 case BPF_ALU64 | BPF_XOR | BPF_K:
1274 case BPF_ALU64 | BPF_MUL | BPF_K:
1275 case BPF_ALU64 | BPF_MOV | BPF_K:
1276 case BPF_ALU64 | BPF_DIV | BPF_K:
1277 case BPF_ALU64 | BPF_MOD | BPF_K:
1278 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1279 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1280 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1281 break;
1282
1283 case BPF_JMP | BPF_JEQ | BPF_K:
1284 case BPF_JMP | BPF_JNE | BPF_K:
1285 case BPF_JMP | BPF_JGT | BPF_K:
1286 case BPF_JMP | BPF_JLT | BPF_K:
1287 case BPF_JMP | BPF_JGE | BPF_K:
1288 case BPF_JMP | BPF_JLE | BPF_K:
1289 case BPF_JMP | BPF_JSGT | BPF_K:
1290 case BPF_JMP | BPF_JSLT | BPF_K:
1291 case BPF_JMP | BPF_JSGE | BPF_K:
1292 case BPF_JMP | BPF_JSLE | BPF_K:
1293 case BPF_JMP | BPF_JSET | BPF_K:
1294 /* Accommodate for extra offset in case of a backjump. */
1295 off = from->off;
1296 if (off < 0)
1297 off -= 2;
1298 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1299 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1300 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1301 break;
1302
1303 case BPF_JMP32 | BPF_JEQ | BPF_K:
1304 case BPF_JMP32 | BPF_JNE | BPF_K:
1305 case BPF_JMP32 | BPF_JGT | BPF_K:
1306 case BPF_JMP32 | BPF_JLT | BPF_K:
1307 case BPF_JMP32 | BPF_JGE | BPF_K:
1308 case BPF_JMP32 | BPF_JLE | BPF_K:
1309 case BPF_JMP32 | BPF_JSGT | BPF_K:
1310 case BPF_JMP32 | BPF_JSLT | BPF_K:
1311 case BPF_JMP32 | BPF_JSGE | BPF_K:
1312 case BPF_JMP32 | BPF_JSLE | BPF_K:
1313 case BPF_JMP32 | BPF_JSET | BPF_K:
1314 /* Accommodate for extra offset in case of a backjump. */
1315 off = from->off;
1316 if (off < 0)
1317 off -= 2;
1318 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1319 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1320 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1321 off);
1322 break;
1323
1324 case BPF_LD | BPF_IMM | BPF_DW:
1325 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1326 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1327 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1328 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1329 break;
1330 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1331 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1332 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1333 if (emit_zext)
1334 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1335 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1336 break;
1337
1338 case BPF_ST | BPF_MEM | BPF_DW:
1339 case BPF_ST | BPF_MEM | BPF_W:
1340 case BPF_ST | BPF_MEM | BPF_H:
1341 case BPF_ST | BPF_MEM | BPF_B:
1342 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1343 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1344 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1345 break;
1346 }
1347out:
1348 return to - to_buff;
1349}
1350
1351static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1352 gfp_t gfp_extra_flags)
1353{
1354 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1355 struct bpf_prog *fp;
1356
1357 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1358 if (fp != NULL) {
1359 /* aux->prog still points to the fp_other one, so
1360 * when promoting the clone to the real program,
1361 * this still needs to be adapted.
1362 */
1363 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1364 }
1365
1366 return fp;
1367}
1368
1369static void bpf_prog_clone_free(struct bpf_prog *fp)
1370{
1371 /* aux was stolen by the other clone, so we cannot free
1372 * it from this path! It will be freed eventually by the
1373 * other program on release.
1374 *
1375 * At this point, we don't need a deferred release since
1376 * clone is guaranteed to not be locked.
1377 */
1378 fp->aux = NULL;
1379 fp->stats = NULL;
1380 fp->active = NULL;
1381 __bpf_prog_free(fp);
1382}
1383
1384void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1385{
1386 /* We have to repoint aux->prog to self, as we don't
1387 * know whether fp here is the clone or the original.
1388 */
1389 fp->aux->prog = fp;
1390 bpf_prog_clone_free(fp_other);
1391}
1392
1393struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1394{
1395 struct bpf_insn insn_buff[16], aux[2];
1396 struct bpf_prog *clone, *tmp;
1397 int insn_delta, insn_cnt;
1398 struct bpf_insn *insn;
1399 int i, rewritten;
1400
1401 if (!prog->blinding_requested || prog->blinded)
1402 return prog;
1403
1404 clone = bpf_prog_clone_create(prog, GFP_USER);
1405 if (!clone)
1406 return ERR_PTR(-ENOMEM);
1407
1408 insn_cnt = clone->len;
1409 insn = clone->insnsi;
1410
1411 for (i = 0; i < insn_cnt; i++, insn++) {
1412 if (bpf_pseudo_func(insn)) {
1413 /* ld_imm64 with an address of bpf subprog is not
1414 * a user controlled constant. Don't randomize it,
1415 * since it will conflict with jit_subprogs() logic.
1416 */
1417 insn++;
1418 i++;
1419 continue;
1420 }
1421
1422 /* We temporarily need to hold the original ld64 insn
1423 * so that we can still access the first part in the
1424 * second blinding run.
1425 */
1426 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1427 insn[1].code == 0)
1428 memcpy(aux, insn, sizeof(aux));
1429
1430 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1431 clone->aux->verifier_zext);
1432 if (!rewritten)
1433 continue;
1434
1435 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1436 if (IS_ERR(tmp)) {
1437 /* Patching may have repointed aux->prog during
1438 * realloc from the original one, so we need to
1439 * fix it up here on error.
1440 */
1441 bpf_jit_prog_release_other(prog, clone);
1442 return tmp;
1443 }
1444
1445 clone = tmp;
1446 insn_delta = rewritten - 1;
1447
1448 /* Walk new program and skip insns we just inserted. */
1449 insn = clone->insnsi + i + insn_delta;
1450 insn_cnt += insn_delta;
1451 i += insn_delta;
1452 }
1453
1454 clone->blinded = 1;
1455 return clone;
1456}
1457#endif /* CONFIG_BPF_JIT */
1458
1459/* Base function for offset calculation. Needs to go into .text section,
1460 * therefore keeping it non-static as well; will also be used by JITs
1461 * anyway later on, so do not let the compiler omit it. This also needs
1462 * to go into kallsyms for correlation from e.g. bpftool, so naming
1463 * must not change.
1464 */
1465noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1466{
1467 return 0;
1468}
1469EXPORT_SYMBOL_GPL(__bpf_call_base);
1470
1471/* All UAPI available opcodes. */
1472#define BPF_INSN_MAP(INSN_2, INSN_3) \
1473 /* 32 bit ALU operations. */ \
1474 /* Register based. */ \
1475 INSN_3(ALU, ADD, X), \
1476 INSN_3(ALU, SUB, X), \
1477 INSN_3(ALU, AND, X), \
1478 INSN_3(ALU, OR, X), \
1479 INSN_3(ALU, LSH, X), \
1480 INSN_3(ALU, RSH, X), \
1481 INSN_3(ALU, XOR, X), \
1482 INSN_3(ALU, MUL, X), \
1483 INSN_3(ALU, MOV, X), \
1484 INSN_3(ALU, ARSH, X), \
1485 INSN_3(ALU, DIV, X), \
1486 INSN_3(ALU, MOD, X), \
1487 INSN_2(ALU, NEG), \
1488 INSN_3(ALU, END, TO_BE), \
1489 INSN_3(ALU, END, TO_LE), \
1490 /* Immediate based. */ \
1491 INSN_3(ALU, ADD, K), \
1492 INSN_3(ALU, SUB, K), \
1493 INSN_3(ALU, AND, K), \
1494 INSN_3(ALU, OR, K), \
1495 INSN_3(ALU, LSH, K), \
1496 INSN_3(ALU, RSH, K), \
1497 INSN_3(ALU, XOR, K), \
1498 INSN_3(ALU, MUL, K), \
1499 INSN_3(ALU, MOV, K), \
1500 INSN_3(ALU, ARSH, K), \
1501 INSN_3(ALU, DIV, K), \
1502 INSN_3(ALU, MOD, K), \
1503 /* 64 bit ALU operations. */ \
1504 /* Register based. */ \
1505 INSN_3(ALU64, ADD, X), \
1506 INSN_3(ALU64, SUB, X), \
1507 INSN_3(ALU64, AND, X), \
1508 INSN_3(ALU64, OR, X), \
1509 INSN_3(ALU64, LSH, X), \
1510 INSN_3(ALU64, RSH, X), \
1511 INSN_3(ALU64, XOR, X), \
1512 INSN_3(ALU64, MUL, X), \
1513 INSN_3(ALU64, MOV, X), \
1514 INSN_3(ALU64, ARSH, X), \
1515 INSN_3(ALU64, DIV, X), \
1516 INSN_3(ALU64, MOD, X), \
1517 INSN_2(ALU64, NEG), \
1518 /* Immediate based. */ \
1519 INSN_3(ALU64, ADD, K), \
1520 INSN_3(ALU64, SUB, K), \
1521 INSN_3(ALU64, AND, K), \
1522 INSN_3(ALU64, OR, K), \
1523 INSN_3(ALU64, LSH, K), \
1524 INSN_3(ALU64, RSH, K), \
1525 INSN_3(ALU64, XOR, K), \
1526 INSN_3(ALU64, MUL, K), \
1527 INSN_3(ALU64, MOV, K), \
1528 INSN_3(ALU64, ARSH, K), \
1529 INSN_3(ALU64, DIV, K), \
1530 INSN_3(ALU64, MOD, K), \
1531 /* Call instruction. */ \
1532 INSN_2(JMP, CALL), \
1533 /* Exit instruction. */ \
1534 INSN_2(JMP, EXIT), \
1535 /* 32-bit Jump instructions. */ \
1536 /* Register based. */ \
1537 INSN_3(JMP32, JEQ, X), \
1538 INSN_3(JMP32, JNE, X), \
1539 INSN_3(JMP32, JGT, X), \
1540 INSN_3(JMP32, JLT, X), \
1541 INSN_3(JMP32, JGE, X), \
1542 INSN_3(JMP32, JLE, X), \
1543 INSN_3(JMP32, JSGT, X), \
1544 INSN_3(JMP32, JSLT, X), \
1545 INSN_3(JMP32, JSGE, X), \
1546 INSN_3(JMP32, JSLE, X), \
1547 INSN_3(JMP32, JSET, X), \
1548 /* Immediate based. */ \
1549 INSN_3(JMP32, JEQ, K), \
1550 INSN_3(JMP32, JNE, K), \
1551 INSN_3(JMP32, JGT, K), \
1552 INSN_3(JMP32, JLT, K), \
1553 INSN_3(JMP32, JGE, K), \
1554 INSN_3(JMP32, JLE, K), \
1555 INSN_3(JMP32, JSGT, K), \
1556 INSN_3(JMP32, JSLT, K), \
1557 INSN_3(JMP32, JSGE, K), \
1558 INSN_3(JMP32, JSLE, K), \
1559 INSN_3(JMP32, JSET, K), \
1560 /* Jump instructions. */ \
1561 /* Register based. */ \
1562 INSN_3(JMP, JEQ, X), \
1563 INSN_3(JMP, JNE, X), \
1564 INSN_3(JMP, JGT, X), \
1565 INSN_3(JMP, JLT, X), \
1566 INSN_3(JMP, JGE, X), \
1567 INSN_3(JMP, JLE, X), \
1568 INSN_3(JMP, JSGT, X), \
1569 INSN_3(JMP, JSLT, X), \
1570 INSN_3(JMP, JSGE, X), \
1571 INSN_3(JMP, JSLE, X), \
1572 INSN_3(JMP, JSET, X), \
1573 /* Immediate based. */ \
1574 INSN_3(JMP, JEQ, K), \
1575 INSN_3(JMP, JNE, K), \
1576 INSN_3(JMP, JGT, K), \
1577 INSN_3(JMP, JLT, K), \
1578 INSN_3(JMP, JGE, K), \
1579 INSN_3(JMP, JLE, K), \
1580 INSN_3(JMP, JSGT, K), \
1581 INSN_3(JMP, JSLT, K), \
1582 INSN_3(JMP, JSGE, K), \
1583 INSN_3(JMP, JSLE, K), \
1584 INSN_3(JMP, JSET, K), \
1585 INSN_2(JMP, JA), \
1586 /* Store instructions. */ \
1587 /* Register based. */ \
1588 INSN_3(STX, MEM, B), \
1589 INSN_3(STX, MEM, H), \
1590 INSN_3(STX, MEM, W), \
1591 INSN_3(STX, MEM, DW), \
1592 INSN_3(STX, ATOMIC, W), \
1593 INSN_3(STX, ATOMIC, DW), \
1594 /* Immediate based. */ \
1595 INSN_3(ST, MEM, B), \
1596 INSN_3(ST, MEM, H), \
1597 INSN_3(ST, MEM, W), \
1598 INSN_3(ST, MEM, DW), \
1599 /* Load instructions. */ \
1600 /* Register based. */ \
1601 INSN_3(LDX, MEM, B), \
1602 INSN_3(LDX, MEM, H), \
1603 INSN_3(LDX, MEM, W), \
1604 INSN_3(LDX, MEM, DW), \
1605 /* Immediate based. */ \
1606 INSN_3(LD, IMM, DW)
1607
1608bool bpf_opcode_in_insntable(u8 code)
1609{
1610#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true
1611#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1612 static const bool public_insntable[256] = {
1613 [0 ... 255] = false,
1614 /* Now overwrite non-defaults ... */
1615 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1616 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1617 [BPF_LD | BPF_ABS | BPF_B] = true,
1618 [BPF_LD | BPF_ABS | BPF_H] = true,
1619 [BPF_LD | BPF_ABS | BPF_W] = true,
1620 [BPF_LD | BPF_IND | BPF_B] = true,
1621 [BPF_LD | BPF_IND | BPF_H] = true,
1622 [BPF_LD | BPF_IND | BPF_W] = true,
1623 };
1624#undef BPF_INSN_3_TBL
1625#undef BPF_INSN_2_TBL
1626 return public_insntable[code];
1627}
1628
1629#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1630u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1631{
1632 memset(dst, 0, size);
1633 return -EFAULT;
1634}
1635
1636/**
1637 * ___bpf_prog_run - run eBPF program on a given context
1638 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1639 * @insn: is the array of eBPF instructions
1640 *
1641 * Decode and execute eBPF instructions.
1642 *
1643 * Return: whatever value is in %BPF_R0 at program exit
1644 */
1645static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1646{
1647#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
1648#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1649 static const void * const jumptable[256] __annotate_jump_table = {
1650 [0 ... 255] = &&default_label,
1651 /* Now overwrite non-defaults ... */
1652 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1653 /* Non-UAPI available opcodes. */
1654 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1655 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1656 [BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
1657 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1658 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1659 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1660 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1661 };
1662#undef BPF_INSN_3_LBL
1663#undef BPF_INSN_2_LBL
1664 u32 tail_call_cnt = 0;
1665
1666#define CONT ({ insn++; goto select_insn; })
1667#define CONT_JMP ({ insn++; goto select_insn; })
1668
1669select_insn:
1670 goto *jumptable[insn->code];
1671
1672 /* Explicitly mask the register-based shift amounts with 63 or 31
1673 * to avoid undefined behavior. Normally this won't affect the
1674 * generated code, for example, in case of native 64 bit archs such
1675 * as x86-64 or arm64, the compiler is optimizing the AND away for
1676 * the interpreter. In case of JITs, each of the JIT backends compiles
1677 * the BPF shift operations to machine instructions which produce
1678 * implementation-defined results in such a case; the resulting
1679 * contents of the register may be arbitrary, but program behaviour
1680 * as a whole remains defined. In other words, in case of JIT backends,
1681 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1682 */
1683 /* ALU (shifts) */
1684#define SHT(OPCODE, OP) \
1685 ALU64_##OPCODE##_X: \
1686 DST = DST OP (SRC & 63); \
1687 CONT; \
1688 ALU_##OPCODE##_X: \
1689 DST = (u32) DST OP ((u32) SRC & 31); \
1690 CONT; \
1691 ALU64_##OPCODE##_K: \
1692 DST = DST OP IMM; \
1693 CONT; \
1694 ALU_##OPCODE##_K: \
1695 DST = (u32) DST OP (u32) IMM; \
1696 CONT;
1697 /* ALU (rest) */
1698#define ALU(OPCODE, OP) \
1699 ALU64_##OPCODE##_X: \
1700 DST = DST OP SRC; \
1701 CONT; \
1702 ALU_##OPCODE##_X: \
1703 DST = (u32) DST OP (u32) SRC; \
1704 CONT; \
1705 ALU64_##OPCODE##_K: \
1706 DST = DST OP IMM; \
1707 CONT; \
1708 ALU_##OPCODE##_K: \
1709 DST = (u32) DST OP (u32) IMM; \
1710 CONT;
1711 ALU(ADD, +)
1712 ALU(SUB, -)
1713 ALU(AND, &)
1714 ALU(OR, |)
1715 ALU(XOR, ^)
1716 ALU(MUL, *)
1717 SHT(LSH, <<)
1718 SHT(RSH, >>)
1719#undef SHT
1720#undef ALU
1721 ALU_NEG:
1722 DST = (u32) -DST;
1723 CONT;
1724 ALU64_NEG:
1725 DST = -DST;
1726 CONT;
1727 ALU_MOV_X:
1728 DST = (u32) SRC;
1729 CONT;
1730 ALU_MOV_K:
1731 DST = (u32) IMM;
1732 CONT;
1733 ALU64_MOV_X:
1734 DST = SRC;
1735 CONT;
1736 ALU64_MOV_K:
1737 DST = IMM;
1738 CONT;
1739 LD_IMM_DW:
1740 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1741 insn++;
1742 CONT;
1743 ALU_ARSH_X:
1744 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1745 CONT;
1746 ALU_ARSH_K:
1747 DST = (u64) (u32) (((s32) DST) >> IMM);
1748 CONT;
1749 ALU64_ARSH_X:
1750 (*(s64 *) &DST) >>= (SRC & 63);
1751 CONT;
1752 ALU64_ARSH_K:
1753 (*(s64 *) &DST) >>= IMM;
1754 CONT;
1755 ALU64_MOD_X:
1756 div64_u64_rem(DST, SRC, &AX);
1757 DST = AX;
1758 CONT;
1759 ALU_MOD_X:
1760 AX = (u32) DST;
1761 DST = do_div(AX, (u32) SRC);
1762 CONT;
1763 ALU64_MOD_K:
1764 div64_u64_rem(DST, IMM, &AX);
1765 DST = AX;
1766 CONT;
1767 ALU_MOD_K:
1768 AX = (u32) DST;
1769 DST = do_div(AX, (u32) IMM);
1770 CONT;
1771 ALU64_DIV_X:
1772 DST = div64_u64(DST, SRC);
1773 CONT;
1774 ALU_DIV_X:
1775 AX = (u32) DST;
1776 do_div(AX, (u32) SRC);
1777 DST = (u32) AX;
1778 CONT;
1779 ALU64_DIV_K:
1780 DST = div64_u64(DST, IMM);
1781 CONT;
1782 ALU_DIV_K:
1783 AX = (u32) DST;
1784 do_div(AX, (u32) IMM);
1785 DST = (u32) AX;
1786 CONT;
1787 ALU_END_TO_BE:
1788 switch (IMM) {
1789 case 16:
1790 DST = (__force u16) cpu_to_be16(DST);
1791 break;
1792 case 32:
1793 DST = (__force u32) cpu_to_be32(DST);
1794 break;
1795 case 64:
1796 DST = (__force u64) cpu_to_be64(DST);
1797 break;
1798 }
1799 CONT;
1800 ALU_END_TO_LE:
1801 switch (IMM) {
1802 case 16:
1803 DST = (__force u16) cpu_to_le16(DST);
1804 break;
1805 case 32:
1806 DST = (__force u32) cpu_to_le32(DST);
1807 break;
1808 case 64:
1809 DST = (__force u64) cpu_to_le64(DST);
1810 break;
1811 }
1812 CONT;
1813
1814 /* CALL */
1815 JMP_CALL:
1816 /* Function call scratches BPF_R1-BPF_R5 registers,
1817 * preserves BPF_R6-BPF_R9, and stores return value
1818 * into BPF_R0.
1819 */
1820 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1821 BPF_R4, BPF_R5);
1822 CONT;
1823
1824 JMP_CALL_ARGS:
1825 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1826 BPF_R3, BPF_R4,
1827 BPF_R5,
1828 insn + insn->off + 1);
1829 CONT;
1830
1831 JMP_TAIL_CALL: {
1832 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1833 struct bpf_array *array = container_of(map, struct bpf_array, map);
1834 struct bpf_prog *prog;
1835 u32 index = BPF_R3;
1836
1837 if (unlikely(index >= array->map.max_entries))
1838 goto out;
1839
1840 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1841 goto out;
1842
1843 tail_call_cnt++;
1844
1845 prog = READ_ONCE(array->ptrs[index]);
1846 if (!prog)
1847 goto out;
1848
1849 /* ARG1 at this point is guaranteed to point to CTX from
1850 * the verifier side due to the fact that the tail call is
1851 * handled like a helper, that is, bpf_tail_call_proto,
1852 * where arg1_type is ARG_PTR_TO_CTX.
1853 */
1854 insn = prog->insnsi;
1855 goto select_insn;
1856out:
1857 CONT;
1858 }
1859 JMP_JA:
1860 insn += insn->off;
1861 CONT;
1862 JMP_EXIT:
1863 return BPF_R0;
1864 /* JMP */
1865#define COND_JMP(SIGN, OPCODE, CMP_OP) \
1866 JMP_##OPCODE##_X: \
1867 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \
1868 insn += insn->off; \
1869 CONT_JMP; \
1870 } \
1871 CONT; \
1872 JMP32_##OPCODE##_X: \
1873 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \
1874 insn += insn->off; \
1875 CONT_JMP; \
1876 } \
1877 CONT; \
1878 JMP_##OPCODE##_K: \
1879 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \
1880 insn += insn->off; \
1881 CONT_JMP; \
1882 } \
1883 CONT; \
1884 JMP32_##OPCODE##_K: \
1885 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \
1886 insn += insn->off; \
1887 CONT_JMP; \
1888 } \
1889 CONT;
1890 COND_JMP(u, JEQ, ==)
1891 COND_JMP(u, JNE, !=)
1892 COND_JMP(u, JGT, >)
1893 COND_JMP(u, JLT, <)
1894 COND_JMP(u, JGE, >=)
1895 COND_JMP(u, JLE, <=)
1896 COND_JMP(u, JSET, &)
1897 COND_JMP(s, JSGT, >)
1898 COND_JMP(s, JSLT, <)
1899 COND_JMP(s, JSGE, >=)
1900 COND_JMP(s, JSLE, <=)
1901#undef COND_JMP
1902 /* ST, STX and LDX*/
1903 ST_NOSPEC:
1904 /* Speculation barrier for mitigating Speculative Store Bypass.
1905 * In case of arm64, we rely on the firmware mitigation as
1906 * controlled via the ssbd kernel parameter. Whenever the
1907 * mitigation is enabled, it works for all of the kernel code
1908 * with no need to provide any additional instructions here.
1909 * In case of x86, we use 'lfence' insn for mitigation. We
1910 * reuse preexisting logic from Spectre v1 mitigation that
1911 * happens to produce the required code on x86 for v4 as well.
1912 */
1913#ifdef CONFIG_X86
1914 barrier_nospec();
1915#endif
1916 CONT;
1917#define LDST(SIZEOP, SIZE) \
1918 STX_MEM_##SIZEOP: \
1919 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
1920 CONT; \
1921 ST_MEM_##SIZEOP: \
1922 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
1923 CONT; \
1924 LDX_MEM_##SIZEOP: \
1925 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
1926 CONT; \
1927 LDX_PROBE_MEM_##SIZEOP: \
1928 bpf_probe_read_kernel(&DST, sizeof(SIZE), \
1929 (const void *)(long) (SRC + insn->off)); \
1930 DST = *((SIZE *)&DST); \
1931 CONT;
1932
1933 LDST(B, u8)
1934 LDST(H, u16)
1935 LDST(W, u32)
1936 LDST(DW, u64)
1937#undef LDST
1938
1939#define ATOMIC_ALU_OP(BOP, KOP) \
1940 case BOP: \
1941 if (BPF_SIZE(insn->code) == BPF_W) \
1942 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1943 (DST + insn->off)); \
1944 else \
1945 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1946 (DST + insn->off)); \
1947 break; \
1948 case BOP | BPF_FETCH: \
1949 if (BPF_SIZE(insn->code) == BPF_W) \
1950 SRC = (u32) atomic_fetch_##KOP( \
1951 (u32) SRC, \
1952 (atomic_t *)(unsigned long) (DST + insn->off)); \
1953 else \
1954 SRC = (u64) atomic64_fetch_##KOP( \
1955 (u64) SRC, \
1956 (atomic64_t *)(unsigned long) (DST + insn->off)); \
1957 break;
1958
1959 STX_ATOMIC_DW:
1960 STX_ATOMIC_W:
1961 switch (IMM) {
1962 ATOMIC_ALU_OP(BPF_ADD, add)
1963 ATOMIC_ALU_OP(BPF_AND, and)
1964 ATOMIC_ALU_OP(BPF_OR, or)
1965 ATOMIC_ALU_OP(BPF_XOR, xor)
1966#undef ATOMIC_ALU_OP
1967
1968 case BPF_XCHG:
1969 if (BPF_SIZE(insn->code) == BPF_W)
1970 SRC = (u32) atomic_xchg(
1971 (atomic_t *)(unsigned long) (DST + insn->off),
1972 (u32) SRC);
1973 else
1974 SRC = (u64) atomic64_xchg(
1975 (atomic64_t *)(unsigned long) (DST + insn->off),
1976 (u64) SRC);
1977 break;
1978 case BPF_CMPXCHG:
1979 if (BPF_SIZE(insn->code) == BPF_W)
1980 BPF_R0 = (u32) atomic_cmpxchg(
1981 (atomic_t *)(unsigned long) (DST + insn->off),
1982 (u32) BPF_R0, (u32) SRC);
1983 else
1984 BPF_R0 = (u64) atomic64_cmpxchg(
1985 (atomic64_t *)(unsigned long) (DST + insn->off),
1986 (u64) BPF_R0, (u64) SRC);
1987 break;
1988
1989 default:
1990 goto default_label;
1991 }
1992 CONT;
1993
1994 default_label:
1995 /* If we ever reach this, we have a bug somewhere. Die hard here
1996 * instead of just returning 0; we could be somewhere in a subprog,
1997 * so execution could continue otherwise which we do /not/ want.
1998 *
1999 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2000 */
2001 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2002 insn->code, insn->imm);
2003 BUG_ON(1);
2004 return 0;
2005}
2006
2007#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2008#define DEFINE_BPF_PROG_RUN(stack_size) \
2009static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2010{ \
2011 u64 stack[stack_size / sizeof(u64)]; \
2012 u64 regs[MAX_BPF_EXT_REG] = {}; \
2013\
2014 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2015 ARG1 = (u64) (unsigned long) ctx; \
2016 return ___bpf_prog_run(regs, insn); \
2017}
2018
2019#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2020#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2021static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2022 const struct bpf_insn *insn) \
2023{ \
2024 u64 stack[stack_size / sizeof(u64)]; \
2025 u64 regs[MAX_BPF_EXT_REG]; \
2026\
2027 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2028 BPF_R1 = r1; \
2029 BPF_R2 = r2; \
2030 BPF_R3 = r3; \
2031 BPF_R4 = r4; \
2032 BPF_R5 = r5; \
2033 return ___bpf_prog_run(regs, insn); \
2034}
2035
2036#define EVAL1(FN, X) FN(X)
2037#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2038#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2039#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2040#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2041#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2042
2043EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2044EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2045EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2046
2047EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2048EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2049EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2050
2051#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2052
2053static unsigned int (*interpreters[])(const void *ctx,
2054 const struct bpf_insn *insn) = {
2055EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2056EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2057EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2058};
2059#undef PROG_NAME_LIST
2060#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2061static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2062 const struct bpf_insn *insn) = {
2063EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2064EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2065EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2066};
2067#undef PROG_NAME_LIST
2068
2069void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2070{
2071 stack_depth = max_t(u32, stack_depth, 1);
2072 insn->off = (s16) insn->imm;
2073 insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2074 __bpf_call_base_args;
2075 insn->code = BPF_JMP | BPF_CALL_ARGS;
2076}
2077
2078#else
2079static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2080 const struct bpf_insn *insn)
2081{
2082 /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2083 * is not working properly, so warn about it!
2084 */
2085 WARN_ON_ONCE(1);
2086 return 0;
2087}
2088#endif
2089
2090bool bpf_prog_map_compatible(struct bpf_map *map,
2091 const struct bpf_prog *fp)
2092{
2093 enum bpf_prog_type prog_type = resolve_prog_type(fp);
2094 bool ret;
2095
2096 if (fp->kprobe_override)
2097 return false;
2098
2099 spin_lock(&map->owner.lock);
2100 if (!map->owner.type) {
2101 /* There's no owner yet where we could check for
2102 * compatibility.
2103 */
2104 map->owner.type = prog_type;
2105 map->owner.jited = fp->jited;
2106 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2107 ret = true;
2108 } else {
2109 ret = map->owner.type == prog_type &&
2110 map->owner.jited == fp->jited &&
2111 map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2112 }
2113 spin_unlock(&map->owner.lock);
2114
2115 return ret;
2116}
2117
2118static int bpf_check_tail_call(const struct bpf_prog *fp)
2119{
2120 struct bpf_prog_aux *aux = fp->aux;
2121 int i, ret = 0;
2122
2123 mutex_lock(&aux->used_maps_mutex);
2124 for (i = 0; i < aux->used_map_cnt; i++) {
2125 struct bpf_map *map = aux->used_maps[i];
2126
2127 if (!map_type_contains_progs(map))
2128 continue;
2129
2130 if (!bpf_prog_map_compatible(map, fp)) {
2131 ret = -EINVAL;
2132 goto out;
2133 }
2134 }
2135
2136out:
2137 mutex_unlock(&aux->used_maps_mutex);
2138 return ret;
2139}
2140
2141static void bpf_prog_select_func(struct bpf_prog *fp)
2142{
2143#ifndef CONFIG_BPF_JIT_ALWAYS_ON
2144 u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2145
2146 fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2147#else
2148 fp->bpf_func = __bpf_prog_ret0_warn;
2149#endif
2150}
2151
2152/**
2153 * bpf_prog_select_runtime - select exec runtime for BPF program
2154 * @fp: bpf_prog populated with BPF program
2155 * @err: pointer to error variable
2156 *
2157 * Try to JIT eBPF program, if JIT is not available, use interpreter.
2158 * The BPF program will be executed via bpf_prog_run() function.
2159 *
2160 * Return: the &fp argument along with &err set to 0 for success or
2161 * a negative errno code on failure
2162 */
2163struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2164{
2165 /* In case of BPF to BPF calls, verifier did all the prep
2166 * work with regards to JITing, etc.
2167 */
2168 bool jit_needed = false;
2169
2170 if (fp->bpf_func)
2171 goto finalize;
2172
2173 if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2174 bpf_prog_has_kfunc_call(fp))
2175 jit_needed = true;
2176
2177 bpf_prog_select_func(fp);
2178
2179 /* eBPF JITs can rewrite the program in case constant
2180 * blinding is active. However, in case of error during
2181 * blinding, bpf_int_jit_compile() must always return a
2182 * valid program, which in this case would simply not
2183 * be JITed, but falls back to the interpreter.
2184 */
2185 if (!bpf_prog_is_dev_bound(fp->aux)) {
2186 *err = bpf_prog_alloc_jited_linfo(fp);
2187 if (*err)
2188 return fp;
2189
2190 fp = bpf_int_jit_compile(fp);
2191 bpf_prog_jit_attempt_done(fp);
2192 if (!fp->jited && jit_needed) {
2193 *err = -ENOTSUPP;
2194 return fp;
2195 }
2196 } else {
2197 *err = bpf_prog_offload_compile(fp);
2198 if (*err)
2199 return fp;
2200 }
2201
2202finalize:
2203 bpf_prog_lock_ro(fp);
2204
2205 /* The tail call compatibility check can only be done at
2206 * this late stage as we need to determine, if we deal
2207 * with JITed or non JITed program concatenations and not
2208 * all eBPF JITs might immediately support all features.
2209 */
2210 *err = bpf_check_tail_call(fp);
2211
2212 return fp;
2213}
2214EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2215
2216static unsigned int __bpf_prog_ret1(const void *ctx,
2217 const struct bpf_insn *insn)
2218{
2219 return 1;
2220}
2221
2222static struct bpf_prog_dummy {
2223 struct bpf_prog prog;
2224} dummy_bpf_prog = {
2225 .prog = {
2226 .bpf_func = __bpf_prog_ret1,
2227 },
2228};
2229
2230struct bpf_empty_prog_array bpf_empty_prog_array = {
2231 .null_prog = NULL,
2232};
2233EXPORT_SYMBOL(bpf_empty_prog_array);
2234
2235struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2236{
2237 if (prog_cnt)
2238 return kzalloc(sizeof(struct bpf_prog_array) +
2239 sizeof(struct bpf_prog_array_item) *
2240 (prog_cnt + 1),
2241 flags);
2242
2243 return &bpf_empty_prog_array.hdr;
2244}
2245
2246void bpf_prog_array_free(struct bpf_prog_array *progs)
2247{
2248 if (!progs || progs == &bpf_empty_prog_array.hdr)
2249 return;
2250 kfree_rcu(progs, rcu);
2251}
2252
2253static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
2254{
2255 struct bpf_prog_array *progs;
2256
2257 /* If RCU Tasks Trace grace period implies RCU grace period, there is
2258 * no need to call kfree_rcu(), just call kfree() directly.
2259 */
2260 progs = container_of(rcu, struct bpf_prog_array, rcu);
2261 if (rcu_trace_implies_rcu_gp())
2262 kfree(progs);
2263 else
2264 kfree_rcu(progs, rcu);
2265}
2266
2267void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
2268{
2269 if (!progs || progs == &bpf_empty_prog_array.hdr)
2270 return;
2271 call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
2272}
2273
2274int bpf_prog_array_length(struct bpf_prog_array *array)
2275{
2276 struct bpf_prog_array_item *item;
2277 u32 cnt = 0;
2278
2279 for (item = array->items; item->prog; item++)
2280 if (item->prog != &dummy_bpf_prog.prog)
2281 cnt++;
2282 return cnt;
2283}
2284
2285bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2286{
2287 struct bpf_prog_array_item *item;
2288
2289 for (item = array->items; item->prog; item++)
2290 if (item->prog != &dummy_bpf_prog.prog)
2291 return false;
2292 return true;
2293}
2294
2295static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2296 u32 *prog_ids,
2297 u32 request_cnt)
2298{
2299 struct bpf_prog_array_item *item;
2300 int i = 0;
2301
2302 for (item = array->items; item->prog; item++) {
2303 if (item->prog == &dummy_bpf_prog.prog)
2304 continue;
2305 prog_ids[i] = item->prog->aux->id;
2306 if (++i == request_cnt) {
2307 item++;
2308 break;
2309 }
2310 }
2311
2312 return !!(item->prog);
2313}
2314
2315int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2316 __u32 __user *prog_ids, u32 cnt)
2317{
2318 unsigned long err = 0;
2319 bool nospc;
2320 u32 *ids;
2321
2322 /* users of this function are doing:
2323 * cnt = bpf_prog_array_length();
2324 * if (cnt > 0)
2325 * bpf_prog_array_copy_to_user(..., cnt);
2326 * so below kcalloc doesn't need extra cnt > 0 check.
2327 */
2328 ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2329 if (!ids)
2330 return -ENOMEM;
2331 nospc = bpf_prog_array_copy_core(array, ids, cnt);
2332 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2333 kfree(ids);
2334 if (err)
2335 return -EFAULT;
2336 if (nospc)
2337 return -ENOSPC;
2338 return 0;
2339}
2340
2341void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2342 struct bpf_prog *old_prog)
2343{
2344 struct bpf_prog_array_item *item;
2345
2346 for (item = array->items; item->prog; item++)
2347 if (item->prog == old_prog) {
2348 WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2349 break;
2350 }
2351}
2352
2353/**
2354 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2355 * index into the program array with
2356 * a dummy no-op program.
2357 * @array: a bpf_prog_array
2358 * @index: the index of the program to replace
2359 *
2360 * Skips over dummy programs, by not counting them, when calculating
2361 * the position of the program to replace.
2362 *
2363 * Return:
2364 * * 0 - Success
2365 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2366 * * -ENOENT - Index out of range
2367 */
2368int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2369{
2370 return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2371}
2372
2373/**
2374 * bpf_prog_array_update_at() - Updates the program at the given index
2375 * into the program array.
2376 * @array: a bpf_prog_array
2377 * @index: the index of the program to update
2378 * @prog: the program to insert into the array
2379 *
2380 * Skips over dummy programs, by not counting them, when calculating
2381 * the position of the program to update.
2382 *
2383 * Return:
2384 * * 0 - Success
2385 * * -EINVAL - Invalid index value. Must be a non-negative integer.
2386 * * -ENOENT - Index out of range
2387 */
2388int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2389 struct bpf_prog *prog)
2390{
2391 struct bpf_prog_array_item *item;
2392
2393 if (unlikely(index < 0))
2394 return -EINVAL;
2395
2396 for (item = array->items; item->prog; item++) {
2397 if (item->prog == &dummy_bpf_prog.prog)
2398 continue;
2399 if (!index) {
2400 WRITE_ONCE(item->prog, prog);
2401 return 0;
2402 }
2403 index--;
2404 }
2405 return -ENOENT;
2406}
2407
2408int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2409 struct bpf_prog *exclude_prog,
2410 struct bpf_prog *include_prog,
2411 u64 bpf_cookie,
2412 struct bpf_prog_array **new_array)
2413{
2414 int new_prog_cnt, carry_prog_cnt = 0;
2415 struct bpf_prog_array_item *existing, *new;
2416 struct bpf_prog_array *array;
2417 bool found_exclude = false;
2418
2419 /* Figure out how many existing progs we need to carry over to
2420 * the new array.
2421 */
2422 if (old_array) {
2423 existing = old_array->items;
2424 for (; existing->prog; existing++) {
2425 if (existing->prog == exclude_prog) {
2426 found_exclude = true;
2427 continue;
2428 }
2429 if (existing->prog != &dummy_bpf_prog.prog)
2430 carry_prog_cnt++;
2431 if (existing->prog == include_prog)
2432 return -EEXIST;
2433 }
2434 }
2435
2436 if (exclude_prog && !found_exclude)
2437 return -ENOENT;
2438
2439 /* How many progs (not NULL) will be in the new array? */
2440 new_prog_cnt = carry_prog_cnt;
2441 if (include_prog)
2442 new_prog_cnt += 1;
2443
2444 /* Do we have any prog (not NULL) in the new array? */
2445 if (!new_prog_cnt) {
2446 *new_array = NULL;
2447 return 0;
2448 }
2449
2450 /* +1 as the end of prog_array is marked with NULL */
2451 array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2452 if (!array)
2453 return -ENOMEM;
2454 new = array->items;
2455
2456 /* Fill in the new prog array */
2457 if (carry_prog_cnt) {
2458 existing = old_array->items;
2459 for (; existing->prog; existing++) {
2460 if (existing->prog == exclude_prog ||
2461 existing->prog == &dummy_bpf_prog.prog)
2462 continue;
2463
2464 new->prog = existing->prog;
2465 new->bpf_cookie = existing->bpf_cookie;
2466 new++;
2467 }
2468 }
2469 if (include_prog) {
2470 new->prog = include_prog;
2471 new->bpf_cookie = bpf_cookie;
2472 new++;
2473 }
2474 new->prog = NULL;
2475 *new_array = array;
2476 return 0;
2477}
2478
2479int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2480 u32 *prog_ids, u32 request_cnt,
2481 u32 *prog_cnt)
2482{
2483 u32 cnt = 0;
2484
2485 if (array)
2486 cnt = bpf_prog_array_length(array);
2487
2488 *prog_cnt = cnt;
2489
2490 /* return early if user requested only program count or nothing to copy */
2491 if (!request_cnt || !cnt)
2492 return 0;
2493
2494 /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2495 return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2496 : 0;
2497}
2498
2499void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2500 struct bpf_map **used_maps, u32 len)
2501{
2502 struct bpf_map *map;
2503 u32 i;
2504
2505 for (i = 0; i < len; i++) {
2506 map = used_maps[i];
2507 if (map->ops->map_poke_untrack)
2508 map->ops->map_poke_untrack(map, aux);
2509 bpf_map_put(map);
2510 }
2511}
2512
2513static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2514{
2515 __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2516 kfree(aux->used_maps);
2517}
2518
2519void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2520 struct btf_mod_pair *used_btfs, u32 len)
2521{
2522#ifdef CONFIG_BPF_SYSCALL
2523 struct btf_mod_pair *btf_mod;
2524 u32 i;
2525
2526 for (i = 0; i < len; i++) {
2527 btf_mod = &used_btfs[i];
2528 if (btf_mod->module)
2529 module_put(btf_mod->module);
2530 btf_put(btf_mod->btf);
2531 }
2532#endif
2533}
2534
2535static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2536{
2537 __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2538 kfree(aux->used_btfs);
2539}
2540
2541static void bpf_prog_free_deferred(struct work_struct *work)
2542{
2543 struct bpf_prog_aux *aux;
2544 int i;
2545
2546 aux = container_of(work, struct bpf_prog_aux, work);
2547#ifdef CONFIG_BPF_SYSCALL
2548 bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2549#endif
2550#ifdef CONFIG_CGROUP_BPF
2551 if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2552 bpf_cgroup_atype_put(aux->cgroup_atype);
2553#endif
2554 bpf_free_used_maps(aux);
2555 bpf_free_used_btfs(aux);
2556 if (bpf_prog_is_dev_bound(aux))
2557 bpf_prog_offload_destroy(aux->prog);
2558#ifdef CONFIG_PERF_EVENTS
2559 if (aux->prog->has_callchain_buf)
2560 put_callchain_buffers();
2561#endif
2562 if (aux->dst_trampoline)
2563 bpf_trampoline_put(aux->dst_trampoline);
2564 for (i = 0; i < aux->func_cnt; i++) {
2565 /* We can just unlink the subprog poke descriptor table as
2566 * it was originally linked to the main program and is also
2567 * released along with it.
2568 */
2569 aux->func[i]->aux->poke_tab = NULL;
2570 bpf_jit_free(aux->func[i]);
2571 }
2572 if (aux->func_cnt) {
2573 kfree(aux->func);
2574 bpf_prog_unlock_free(aux->prog);
2575 } else {
2576 bpf_jit_free(aux->prog);
2577 }
2578}
2579
2580void bpf_prog_free(struct bpf_prog *fp)
2581{
2582 struct bpf_prog_aux *aux = fp->aux;
2583
2584 if (aux->dst_prog)
2585 bpf_prog_put(aux->dst_prog);
2586 INIT_WORK(&aux->work, bpf_prog_free_deferred);
2587 schedule_work(&aux->work);
2588}
2589EXPORT_SYMBOL_GPL(bpf_prog_free);
2590
2591/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2592static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2593
2594void bpf_user_rnd_init_once(void)
2595{
2596 prandom_init_once(&bpf_user_rnd_state);
2597}
2598
2599BPF_CALL_0(bpf_user_rnd_u32)
2600{
2601 /* Should someone ever have the rather unwise idea to use some
2602 * of the registers passed into this function, then note that
2603 * this function is called from native eBPF and classic-to-eBPF
2604 * transformations. Register assignments from both sides are
2605 * different, f.e. classic always sets fn(ctx, A, X) here.
2606 */
2607 struct rnd_state *state;
2608 u32 res;
2609
2610 state = &get_cpu_var(bpf_user_rnd_state);
2611 res = prandom_u32_state(state);
2612 put_cpu_var(bpf_user_rnd_state);
2613
2614 return res;
2615}
2616
2617BPF_CALL_0(bpf_get_raw_cpu_id)
2618{
2619 return raw_smp_processor_id();
2620}
2621
2622/* Weak definitions of helper functions in case we don't have bpf syscall. */
2623const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2624const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2625const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2626const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2627const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2628const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2629const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2630const struct bpf_func_proto bpf_spin_lock_proto __weak;
2631const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2632const struct bpf_func_proto bpf_jiffies64_proto __weak;
2633
2634const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2635const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2636const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2637const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2638const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2639const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2640const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2641
2642const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2643const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2644const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2645const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2646const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2647const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2648const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2649const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2650const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2651const struct bpf_func_proto bpf_set_retval_proto __weak;
2652const struct bpf_func_proto bpf_get_retval_proto __weak;
2653
2654const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2655{
2656 return NULL;
2657}
2658
2659const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2660{
2661 return NULL;
2662}
2663
2664u64 __weak
2665bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2666 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2667{
2668 return -ENOTSUPP;
2669}
2670EXPORT_SYMBOL_GPL(bpf_event_output);
2671
2672/* Always built-in helper functions. */
2673const struct bpf_func_proto bpf_tail_call_proto = {
2674 .func = NULL,
2675 .gpl_only = false,
2676 .ret_type = RET_VOID,
2677 .arg1_type = ARG_PTR_TO_CTX,
2678 .arg2_type = ARG_CONST_MAP_PTR,
2679 .arg3_type = ARG_ANYTHING,
2680};
2681
2682/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2683 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2684 * eBPF and implicitly also cBPF can get JITed!
2685 */
2686struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2687{
2688 return prog;
2689}
2690
2691/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2692 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2693 */
2694void __weak bpf_jit_compile(struct bpf_prog *prog)
2695{
2696}
2697
2698bool __weak bpf_helper_changes_pkt_data(void *func)
2699{
2700 return false;
2701}
2702
2703/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2704 * analysis code and wants explicit zero extension inserted by verifier.
2705 * Otherwise, return FALSE.
2706 *
2707 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2708 * you don't override this. JITs that don't want these extra insns can detect
2709 * them using insn_is_zext.
2710 */
2711bool __weak bpf_jit_needs_zext(void)
2712{
2713 return false;
2714}
2715
2716/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
2717bool __weak bpf_jit_supports_subprog_tailcalls(void)
2718{
2719 return false;
2720}
2721
2722bool __weak bpf_jit_supports_kfunc_call(void)
2723{
2724 return false;
2725}
2726
2727/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2728 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2729 */
2730int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2731 int len)
2732{
2733 return -EFAULT;
2734}
2735
2736int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2737 void *addr1, void *addr2)
2738{
2739 return -ENOTSUPP;
2740}
2741
2742void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2743{
2744 return ERR_PTR(-ENOTSUPP);
2745}
2746
2747int __weak bpf_arch_text_invalidate(void *dst, size_t len)
2748{
2749 return -ENOTSUPP;
2750}
2751
2752#ifdef CONFIG_BPF_SYSCALL
2753static int __init bpf_global_ma_init(void)
2754{
2755 int ret;
2756
2757 ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
2758 bpf_global_ma_set = !ret;
2759 return ret;
2760}
2761late_initcall(bpf_global_ma_init);
2762#endif
2763
2764DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2765EXPORT_SYMBOL(bpf_stats_enabled_key);
2766
2767/* All definitions of tracepoints related to BPF. */
2768#define CREATE_TRACE_POINTS
2769#include <linux/bpf_trace.h>
2770
2771EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2772EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);