Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *	Jay Schulist <jschlst@samba.org>
  13 *	Alexei Starovoitov <ast@plumgrid.com>
  14 *	Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/frame.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33
 
  34#include <asm/unaligned.h>
  35
  36/* Registers */
  37#define BPF_R0	regs[BPF_REG_0]
  38#define BPF_R1	regs[BPF_REG_1]
  39#define BPF_R2	regs[BPF_REG_2]
  40#define BPF_R3	regs[BPF_REG_3]
  41#define BPF_R4	regs[BPF_REG_4]
  42#define BPF_R5	regs[BPF_REG_5]
  43#define BPF_R6	regs[BPF_REG_6]
  44#define BPF_R7	regs[BPF_REG_7]
  45#define BPF_R8	regs[BPF_REG_8]
  46#define BPF_R9	regs[BPF_REG_9]
  47#define BPF_R10	regs[BPF_REG_10]
  48
  49/* Named registers */
  50#define DST	regs[insn->dst_reg]
  51#define SRC	regs[insn->src_reg]
  52#define FP	regs[BPF_REG_FP]
  53#define AX	regs[BPF_REG_AX]
  54#define ARG1	regs[BPF_REG_ARG1]
  55#define CTX	regs[BPF_REG_CTX]
  56#define IMM	insn->imm
  57
  58/* No hurry in this branch
  59 *
  60 * Exported for the bpf jit load helper.
  61 */
  62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  63{
  64	u8 *ptr = NULL;
  65
  66	if (k >= SKF_NET_OFF)
  67		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  68	else if (k >= SKF_LL_OFF)
  69		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  70
  71	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  72		return ptr;
  73
  74	return NULL;
  75}
  76
  77struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  78{
  79	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  80	struct bpf_prog_aux *aux;
  81	struct bpf_prog *fp;
  82
  83	size = round_up(size, PAGE_SIZE);
  84	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  85	if (fp == NULL)
  86		return NULL;
  87
  88	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  89	if (aux == NULL) {
  90		vfree(fp);
  91		return NULL;
  92	}
  93
  94	fp->pages = size / PAGE_SIZE;
  95	fp->aux = aux;
  96	fp->aux->prog = fp;
  97	fp->jit_requested = ebpf_jit_enabled();
  98
  99	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 100
 101	return fp;
 102}
 103
 104struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 105{
 106	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 107	struct bpf_prog *prog;
 108	int cpu;
 109
 110	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 111	if (!prog)
 112		return NULL;
 113
 114	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 115	if (!prog->aux->stats) {
 116		kfree(prog->aux);
 117		vfree(prog);
 118		return NULL;
 119	}
 120
 121	for_each_possible_cpu(cpu) {
 122		struct bpf_prog_stats *pstats;
 123
 124		pstats = per_cpu_ptr(prog->aux->stats, cpu);
 125		u64_stats_init(&pstats->syncp);
 126	}
 127	return prog;
 128}
 129EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 130
 131int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 132{
 133	if (!prog->aux->nr_linfo || !prog->jit_requested)
 134		return 0;
 135
 136	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
 137					 sizeof(*prog->aux->jited_linfo),
 138					 GFP_KERNEL | __GFP_NOWARN);
 139	if (!prog->aux->jited_linfo)
 140		return -ENOMEM;
 141
 142	return 0;
 143}
 144
 145void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
 146{
 147	kfree(prog->aux->jited_linfo);
 148	prog->aux->jited_linfo = NULL;
 149}
 150
 151void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
 152{
 153	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
 154		bpf_prog_free_jited_linfo(prog);
 155}
 156
 157/* The jit engine is responsible to provide an array
 158 * for insn_off to the jited_off mapping (insn_to_jit_off).
 159 *
 160 * The idx to this array is the insn_off.  Hence, the insn_off
 161 * here is relative to the prog itself instead of the main prog.
 162 * This array has one entry for each xlated bpf insn.
 163 *
 164 * jited_off is the byte off to the last byte of the jited insn.
 165 *
 166 * Hence, with
 167 * insn_start:
 168 *      The first bpf insn off of the prog.  The insn off
 169 *      here is relative to the main prog.
 170 *      e.g. if prog is a subprog, insn_start > 0
 171 * linfo_idx:
 172 *      The prog's idx to prog->aux->linfo and jited_linfo
 173 *
 174 * jited_linfo[linfo_idx] = prog->bpf_func
 175 *
 176 * For i > linfo_idx,
 177 *
 178 * jited_linfo[i] = prog->bpf_func +
 179 *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 180 */
 181void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 182			       const u32 *insn_to_jit_off)
 183{
 184	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 185	const struct bpf_line_info *linfo;
 186	void **jited_linfo;
 187
 188	if (!prog->aux->jited_linfo)
 189		/* Userspace did not provide linfo */
 190		return;
 191
 192	linfo_idx = prog->aux->linfo_idx;
 193	linfo = &prog->aux->linfo[linfo_idx];
 194	insn_start = linfo[0].insn_off;
 195	insn_end = insn_start + prog->len;
 196
 197	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 198	jited_linfo[0] = prog->bpf_func;
 199
 200	nr_linfo = prog->aux->nr_linfo - linfo_idx;
 201
 202	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 203		/* The verifier ensures that linfo[i].insn_off is
 204		 * strictly increasing
 205		 */
 206		jited_linfo[i] = prog->bpf_func +
 207			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 208}
 209
 210void bpf_prog_free_linfo(struct bpf_prog *prog)
 211{
 212	bpf_prog_free_jited_linfo(prog);
 213	kvfree(prog->aux->linfo);
 214}
 215
 216struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 217				  gfp_t gfp_extra_flags)
 218{
 219	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 220	struct bpf_prog *fp;
 221	u32 pages, delta;
 222	int ret;
 223
 224	BUG_ON(fp_old == NULL);
 225
 226	size = round_up(size, PAGE_SIZE);
 227	pages = size / PAGE_SIZE;
 228	if (pages <= fp_old->pages)
 229		return fp_old;
 230
 231	delta = pages - fp_old->pages;
 232	ret = __bpf_prog_charge(fp_old->aux->user, delta);
 233	if (ret)
 234		return NULL;
 235
 236	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 237	if (fp == NULL) {
 238		__bpf_prog_uncharge(fp_old->aux->user, delta);
 239	} else {
 240		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 241		fp->pages = pages;
 242		fp->aux->prog = fp;
 243
 244		/* We keep fp->aux from fp_old around in the new
 245		 * reallocated structure.
 246		 */
 247		fp_old->aux = NULL;
 248		__bpf_prog_free(fp_old);
 249	}
 250
 251	return fp;
 252}
 253
 254void __bpf_prog_free(struct bpf_prog *fp)
 255{
 256	if (fp->aux) {
 257		free_percpu(fp->aux->stats);
 
 258		kfree(fp->aux);
 259	}
 260	vfree(fp);
 261}
 262
 263int bpf_prog_calc_tag(struct bpf_prog *fp)
 264{
 265	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 266	u32 raw_size = bpf_prog_tag_scratch_size(fp);
 267	u32 digest[SHA_DIGEST_WORDS];
 268	u32 ws[SHA_WORKSPACE_WORDS];
 269	u32 i, bsize, psize, blocks;
 270	struct bpf_insn *dst;
 271	bool was_ld_map;
 272	u8 *raw, *todo;
 273	__be32 *result;
 274	__be64 *bits;
 275
 276	raw = vmalloc(raw_size);
 277	if (!raw)
 278		return -ENOMEM;
 279
 280	sha_init(digest);
 281	memset(ws, 0, sizeof(ws));
 282
 283	/* We need to take out the map fd for the digest calculation
 284	 * since they are unstable from user space side.
 285	 */
 286	dst = (void *)raw;
 287	for (i = 0, was_ld_map = false; i < fp->len; i++) {
 288		dst[i] = fp->insnsi[i];
 289		if (!was_ld_map &&
 290		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 291		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 292		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 293			was_ld_map = true;
 294			dst[i].imm = 0;
 295		} else if (was_ld_map &&
 296			   dst[i].code == 0 &&
 297			   dst[i].dst_reg == 0 &&
 298			   dst[i].src_reg == 0 &&
 299			   dst[i].off == 0) {
 300			was_ld_map = false;
 301			dst[i].imm = 0;
 302		} else {
 303			was_ld_map = false;
 304		}
 305	}
 306
 307	psize = bpf_prog_insn_size(fp);
 308	memset(&raw[psize], 0, raw_size - psize);
 309	raw[psize++] = 0x80;
 310
 311	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 312	blocks = bsize / SHA_MESSAGE_BYTES;
 313	todo   = raw;
 314	if (bsize - psize >= sizeof(__be64)) {
 315		bits = (__be64 *)(todo + bsize - sizeof(__be64));
 316	} else {
 317		bits = (__be64 *)(todo + bsize + bits_offset);
 318		blocks++;
 319	}
 320	*bits = cpu_to_be64((psize - 1) << 3);
 321
 322	while (blocks--) {
 323		sha_transform(digest, todo, ws);
 324		todo += SHA_MESSAGE_BYTES;
 325	}
 326
 327	result = (__force __be32 *)digest;
 328	for (i = 0; i < SHA_DIGEST_WORDS; i++)
 329		result[i] = cpu_to_be32(digest[i]);
 330	memcpy(fp->tag, result, sizeof(fp->tag));
 331
 332	vfree(raw);
 333	return 0;
 334}
 335
 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 337				s32 end_new, s32 curr, const bool probe_pass)
 338{
 339	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 340	s32 delta = end_new - end_old;
 341	s64 imm = insn->imm;
 342
 343	if (curr < pos && curr + imm + 1 >= end_old)
 344		imm += delta;
 345	else if (curr >= end_new && curr + imm + 1 < end_new)
 346		imm -= delta;
 347	if (imm < imm_min || imm > imm_max)
 348		return -ERANGE;
 349	if (!probe_pass)
 350		insn->imm = imm;
 351	return 0;
 352}
 353
 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 355				s32 end_new, s32 curr, const bool probe_pass)
 356{
 357	const s32 off_min = S16_MIN, off_max = S16_MAX;
 358	s32 delta = end_new - end_old;
 359	s32 off = insn->off;
 360
 361	if (curr < pos && curr + off + 1 >= end_old)
 362		off += delta;
 363	else if (curr >= end_new && curr + off + 1 < end_new)
 364		off -= delta;
 365	if (off < off_min || off > off_max)
 366		return -ERANGE;
 367	if (!probe_pass)
 368		insn->off = off;
 369	return 0;
 370}
 371
 372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 373			    s32 end_new, const bool probe_pass)
 374{
 375	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 376	struct bpf_insn *insn = prog->insnsi;
 377	int ret = 0;
 378
 379	for (i = 0; i < insn_cnt; i++, insn++) {
 380		u8 code;
 381
 382		/* In the probing pass we still operate on the original,
 383		 * unpatched image in order to check overflows before we
 384		 * do any other adjustments. Therefore skip the patchlet.
 385		 */
 386		if (probe_pass && i == pos) {
 387			i = end_new;
 388			insn = prog->insnsi + end_old;
 389		}
 390		code = insn->code;
 391		if ((BPF_CLASS(code) != BPF_JMP &&
 392		     BPF_CLASS(code) != BPF_JMP32) ||
 393		    BPF_OP(code) == BPF_EXIT)
 394			continue;
 395		/* Adjust offset of jmps if we cross patch boundaries. */
 396		if (BPF_OP(code) == BPF_CALL) {
 397			if (insn->src_reg != BPF_PSEUDO_CALL)
 398				continue;
 399			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 400						   end_new, i, probe_pass);
 401		} else {
 402			ret = bpf_adj_delta_to_off(insn, pos, end_old,
 403						   end_new, i, probe_pass);
 404		}
 405		if (ret)
 406			break;
 407	}
 408
 409	return ret;
 410}
 411
 412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 413{
 414	struct bpf_line_info *linfo;
 415	u32 i, nr_linfo;
 416
 417	nr_linfo = prog->aux->nr_linfo;
 418	if (!nr_linfo || !delta)
 419		return;
 420
 421	linfo = prog->aux->linfo;
 422
 423	for (i = 0; i < nr_linfo; i++)
 424		if (off < linfo[i].insn_off)
 425			break;
 426
 427	/* Push all off < linfo[i].insn_off by delta */
 428	for (; i < nr_linfo; i++)
 429		linfo[i].insn_off += delta;
 430}
 431
 432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 433				       const struct bpf_insn *patch, u32 len)
 434{
 435	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 436	const u32 cnt_max = S16_MAX;
 437	struct bpf_prog *prog_adj;
 438	int err;
 439
 440	/* Since our patchlet doesn't expand the image, we're done. */
 441	if (insn_delta == 0) {
 442		memcpy(prog->insnsi + off, patch, sizeof(*patch));
 443		return prog;
 444	}
 445
 446	insn_adj_cnt = prog->len + insn_delta;
 447
 448	/* Reject anything that would potentially let the insn->off
 449	 * target overflow when we have excessive program expansions.
 450	 * We need to probe here before we do any reallocation where
 451	 * we afterwards may not fail anymore.
 452	 */
 453	if (insn_adj_cnt > cnt_max &&
 454	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 455		return ERR_PTR(err);
 456
 457	/* Several new instructions need to be inserted. Make room
 458	 * for them. Likely, there's no need for a new allocation as
 459	 * last page could have large enough tailroom.
 460	 */
 461	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 462				    GFP_USER);
 463	if (!prog_adj)
 464		return ERR_PTR(-ENOMEM);
 465
 466	prog_adj->len = insn_adj_cnt;
 467
 468	/* Patching happens in 3 steps:
 469	 *
 470	 * 1) Move over tail of insnsi from next instruction onwards,
 471	 *    so we can patch the single target insn with one or more
 472	 *    new ones (patching is always from 1 to n insns, n > 0).
 473	 * 2) Inject new instructions at the target location.
 474	 * 3) Adjust branch offsets if necessary.
 475	 */
 476	insn_rest = insn_adj_cnt - off - len;
 477
 478	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 479		sizeof(*patch) * insn_rest);
 480	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 481
 482	/* We are guaranteed to not fail at this point, otherwise
 483	 * the ship has sailed to reverse to the original state. An
 484	 * overflow cannot happen at this point.
 485	 */
 486	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 487
 488	bpf_adj_linfo(prog_adj, off, insn_delta);
 489
 490	return prog_adj;
 491}
 492
 493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 494{
 495	/* Branch offsets can't overflow when program is shrinking, no need
 496	 * to call bpf_adj_branches(..., true) here
 497	 */
 498	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 499		sizeof(struct bpf_insn) * (prog->len - off - cnt));
 500	prog->len -= cnt;
 501
 502	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 503}
 504
 505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 506{
 507	int i;
 508
 509	for (i = 0; i < fp->aux->func_cnt; i++)
 510		bpf_prog_kallsyms_del(fp->aux->func[i]);
 511}
 512
 513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 514{
 515	bpf_prog_kallsyms_del_subprogs(fp);
 516	bpf_prog_kallsyms_del(fp);
 517}
 518
 519#ifdef CONFIG_BPF_JIT
 520/* All BPF JIT sysctl knobs here. */
 521int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 
 522int bpf_jit_harden   __read_mostly;
 523int bpf_jit_kallsyms __read_mostly;
 524long bpf_jit_limit   __read_mostly;
 525
 526static __always_inline void
 527bpf_get_prog_addr_region(const struct bpf_prog *prog,
 528			 unsigned long *symbol_start,
 529			 unsigned long *symbol_end)
 530{
 531	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 532	unsigned long addr = (unsigned long)hdr;
 533
 534	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 535
 536	*symbol_start = addr;
 537	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
 538}
 539
 540void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 
 541{
 
 542	const char *end = sym + KSYM_NAME_LEN;
 543	const struct btf_type *type;
 544	const char *func_name;
 545
 546	BUILD_BUG_ON(sizeof("bpf_prog_") +
 547		     sizeof(prog->tag) * 2 +
 548		     /* name has been null terminated.
 549		      * We should need +1 for the '_' preceding
 550		      * the name.  However, the null character
 551		      * is double counted between the name and the
 552		      * sizeof("bpf_prog_") above, so we omit
 553		      * the +1 here.
 554		      */
 555		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 556
 557	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 558	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 559
 560	/* prog->aux->name will be ignored if full btf name is available */
 561	if (prog->aux->func_info_cnt) {
 562		type = btf_type_by_id(prog->aux->btf,
 563				      prog->aux->func_info[prog->aux->func_idx].type_id);
 564		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 565		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 566		return;
 567	}
 568
 569	if (prog->aux->name[0])
 570		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 571	else
 572		*sym = 0;
 573}
 574
 575static __always_inline unsigned long
 576bpf_get_prog_addr_start(struct latch_tree_node *n)
 577{
 578	unsigned long symbol_start, symbol_end;
 579	const struct bpf_prog_aux *aux;
 580
 581	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 582	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 583
 584	return symbol_start;
 585}
 586
 587static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 588					  struct latch_tree_node *b)
 589{
 590	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
 591}
 592
 593static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 594{
 595	unsigned long val = (unsigned long)key;
 596	unsigned long symbol_start, symbol_end;
 597	const struct bpf_prog_aux *aux;
 598
 599	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 600	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 601
 602	if (val < symbol_start)
 603		return -1;
 604	if (val >= symbol_end)
 605		return  1;
 606
 607	return 0;
 608}
 609
 610static const struct latch_tree_ops bpf_tree_ops = {
 611	.less	= bpf_tree_less,
 612	.comp	= bpf_tree_comp,
 613};
 614
 615static DEFINE_SPINLOCK(bpf_lock);
 616static LIST_HEAD(bpf_kallsyms);
 617static struct latch_tree_root bpf_tree __cacheline_aligned;
 618
 619static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 620{
 621	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
 622	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
 623	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 
 
 624}
 625
 626static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 627{
 628	if (list_empty(&aux->ksym_lnode))
 629		return;
 630
 631	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 632	list_del_rcu(&aux->ksym_lnode);
 
 
 
 
 
 
 
 633}
 634
 635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 636{
 637	return fp->jited && !bpf_prog_was_classic(fp);
 638}
 639
 640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 641{
 642	return list_empty(&fp->aux->ksym_lnode) ||
 643	       fp->aux->ksym_lnode.prev == LIST_POISON2;
 644}
 645
 646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 647{
 648	if (!bpf_prog_kallsyms_candidate(fp) ||
 649	    !capable(CAP_SYS_ADMIN))
 650		return;
 651
 652	spin_lock_bh(&bpf_lock);
 653	bpf_prog_ksym_node_add(fp->aux);
 654	spin_unlock_bh(&bpf_lock);
 
 
 655}
 656
 657void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 658{
 659	if (!bpf_prog_kallsyms_candidate(fp))
 660		return;
 661
 662	spin_lock_bh(&bpf_lock);
 663	bpf_prog_ksym_node_del(fp->aux);
 664	spin_unlock_bh(&bpf_lock);
 665}
 666
 667static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 668{
 669	struct latch_tree_node *n;
 670
 671	if (!bpf_jit_kallsyms_enabled())
 672		return NULL;
 673
 674	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 675	return n ?
 676	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
 677	       NULL;
 678}
 679
 680const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 681				 unsigned long *off, char *sym)
 682{
 683	unsigned long symbol_start, symbol_end;
 684	struct bpf_prog *prog;
 685	char *ret = NULL;
 686
 687	rcu_read_lock();
 688	prog = bpf_prog_kallsyms_find(addr);
 689	if (prog) {
 690		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
 691		bpf_get_prog_name(prog, sym);
 
 
 692
 693		ret = sym;
 694		if (size)
 695			*size = symbol_end - symbol_start;
 696		if (off)
 697			*off  = addr - symbol_start;
 698	}
 699	rcu_read_unlock();
 700
 701	return ret;
 702}
 703
 704bool is_bpf_text_address(unsigned long addr)
 705{
 706	bool ret;
 707
 708	rcu_read_lock();
 709	ret = bpf_prog_kallsyms_find(addr) != NULL;
 710	rcu_read_unlock();
 711
 712	return ret;
 713}
 714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 715int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 716		    char *sym)
 717{
 718	struct bpf_prog_aux *aux;
 719	unsigned int it = 0;
 720	int ret = -ERANGE;
 721
 722	if (!bpf_jit_kallsyms_enabled())
 723		return ret;
 724
 725	rcu_read_lock();
 726	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
 727		if (it++ != symnum)
 728			continue;
 729
 730		bpf_get_prog_name(aux->prog, sym);
 731
 732		*value = (unsigned long)aux->prog->bpf_func;
 733		*type  = BPF_SYM_ELF_TYPE;
 734
 735		ret = 0;
 736		break;
 737	}
 738	rcu_read_unlock();
 739
 740	return ret;
 741}
 742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743static atomic_long_t bpf_jit_current;
 744
 745/* Can be overridden by an arch's JIT compiler if it has a custom,
 746 * dedicated BPF backend memory area, or if neither of the two
 747 * below apply.
 748 */
 749u64 __weak bpf_jit_alloc_exec_limit(void)
 750{
 751#if defined(MODULES_VADDR)
 752	return MODULES_END - MODULES_VADDR;
 753#else
 754	return VMALLOC_END - VMALLOC_START;
 755#endif
 756}
 757
 758static int __init bpf_jit_charge_init(void)
 759{
 760	/* Only used as heuristic here to derive limit. */
 761	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 762					    PAGE_SIZE), LONG_MAX);
 763	return 0;
 764}
 765pure_initcall(bpf_jit_charge_init);
 766
 767static int bpf_jit_charge_modmem(u32 pages)
 768{
 769	if (atomic_long_add_return(pages, &bpf_jit_current) >
 770	    (bpf_jit_limit >> PAGE_SHIFT)) {
 771		if (!capable(CAP_SYS_ADMIN)) {
 772			atomic_long_sub(pages, &bpf_jit_current);
 773			return -EPERM;
 774		}
 775	}
 776
 777	return 0;
 778}
 779
 780static void bpf_jit_uncharge_modmem(u32 pages)
 781{
 782	atomic_long_sub(pages, &bpf_jit_current);
 783}
 784
 785void *__weak bpf_jit_alloc_exec(unsigned long size)
 786{
 787	return module_alloc(size);
 788}
 789
 790void __weak bpf_jit_free_exec(void *addr)
 791{
 792	module_memfree(addr);
 793}
 794
 795struct bpf_binary_header *
 796bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 797		     unsigned int alignment,
 798		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 799{
 800	struct bpf_binary_header *hdr;
 801	u32 size, hole, start, pages;
 802
 
 
 
 803	/* Most of BPF filters are really small, but if some of them
 804	 * fill a page, allow at least 128 extra bytes to insert a
 805	 * random section of illegal instructions.
 806	 */
 807	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 808	pages = size / PAGE_SIZE;
 809
 810	if (bpf_jit_charge_modmem(pages))
 811		return NULL;
 812	hdr = bpf_jit_alloc_exec(size);
 813	if (!hdr) {
 814		bpf_jit_uncharge_modmem(pages);
 815		return NULL;
 816	}
 817
 818	/* Fill space with illegal/arch-dep instructions. */
 819	bpf_fill_ill_insns(hdr, size);
 820
 821	hdr->pages = pages;
 822	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 823		     PAGE_SIZE - sizeof(*hdr));
 824	start = (get_random_int() % hole) & ~(alignment - 1);
 825
 826	/* Leave a random number of instructions before BPF code. */
 827	*image_ptr = &hdr->image[start];
 828
 829	return hdr;
 830}
 831
 832void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 833{
 834	u32 pages = hdr->pages;
 835
 836	bpf_jit_free_exec(hdr);
 837	bpf_jit_uncharge_modmem(pages);
 838}
 839
 840/* This symbol is only overridden by archs that have different
 841 * requirements than the usual eBPF JITs, f.e. when they only
 842 * implement cBPF JIT, do not set images read-only, etc.
 843 */
 844void __weak bpf_jit_free(struct bpf_prog *fp)
 845{
 846	if (fp->jited) {
 847		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 848
 849		bpf_jit_binary_free(hdr);
 850
 851		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 852	}
 853
 854	bpf_prog_unlock_free(fp);
 855}
 856
 857int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 858			  const struct bpf_insn *insn, bool extra_pass,
 859			  u64 *func_addr, bool *func_addr_fixed)
 860{
 861	s16 off = insn->off;
 862	s32 imm = insn->imm;
 863	u8 *addr;
 864
 865	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 866	if (!*func_addr_fixed) {
 867		/* Place-holder address till the last pass has collected
 868		 * all addresses for JITed subprograms in which case we
 869		 * can pick them up from prog->aux.
 870		 */
 871		if (!extra_pass)
 872			addr = NULL;
 873		else if (prog->aux->func &&
 874			 off >= 0 && off < prog->aux->func_cnt)
 875			addr = (u8 *)prog->aux->func[off]->bpf_func;
 876		else
 877			return -EINVAL;
 878	} else {
 879		/* Address of a BPF helper call. Since part of the core
 880		 * kernel, it's always at a fixed location. __bpf_call_base
 881		 * and the helper with imm relative to it are both in core
 882		 * kernel.
 883		 */
 884		addr = (u8 *)__bpf_call_base + imm;
 885	}
 886
 887	*func_addr = (unsigned long)addr;
 888	return 0;
 889}
 890
 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
 892			      const struct bpf_insn *aux,
 893			      struct bpf_insn *to_buff,
 894			      bool emit_zext)
 895{
 896	struct bpf_insn *to = to_buff;
 897	u32 imm_rnd = get_random_int();
 898	s16 off;
 899
 900	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 901	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 902
 903	/* Constraints on AX register:
 904	 *
 905	 * AX register is inaccessible from user space. It is mapped in
 906	 * all JITs, and used here for constant blinding rewrites. It is
 907	 * typically "stateless" meaning its contents are only valid within
 908	 * the executed instruction, but not across several instructions.
 909	 * There are a few exceptions however which are further detailed
 910	 * below.
 911	 *
 912	 * Constant blinding is only used by JITs, not in the interpreter.
 913	 * The interpreter uses AX in some occasions as a local temporary
 914	 * register e.g. in DIV or MOD instructions.
 915	 *
 916	 * In restricted circumstances, the verifier can also use the AX
 917	 * register for rewrites as long as they do not interfere with
 918	 * the above cases!
 919	 */
 920	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 921		goto out;
 922
 923	if (from->imm == 0 &&
 924	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 925	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 926		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 927		goto out;
 928	}
 929
 930	switch (from->code) {
 931	case BPF_ALU | BPF_ADD | BPF_K:
 932	case BPF_ALU | BPF_SUB | BPF_K:
 933	case BPF_ALU | BPF_AND | BPF_K:
 934	case BPF_ALU | BPF_OR  | BPF_K:
 935	case BPF_ALU | BPF_XOR | BPF_K:
 936	case BPF_ALU | BPF_MUL | BPF_K:
 937	case BPF_ALU | BPF_MOV | BPF_K:
 938	case BPF_ALU | BPF_DIV | BPF_K:
 939	case BPF_ALU | BPF_MOD | BPF_K:
 940		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 941		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 942		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
 943		break;
 944
 945	case BPF_ALU64 | BPF_ADD | BPF_K:
 946	case BPF_ALU64 | BPF_SUB | BPF_K:
 947	case BPF_ALU64 | BPF_AND | BPF_K:
 948	case BPF_ALU64 | BPF_OR  | BPF_K:
 949	case BPF_ALU64 | BPF_XOR | BPF_K:
 950	case BPF_ALU64 | BPF_MUL | BPF_K:
 951	case BPF_ALU64 | BPF_MOV | BPF_K:
 952	case BPF_ALU64 | BPF_DIV | BPF_K:
 953	case BPF_ALU64 | BPF_MOD | BPF_K:
 954		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 955		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 956		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
 957		break;
 958
 959	case BPF_JMP | BPF_JEQ  | BPF_K:
 960	case BPF_JMP | BPF_JNE  | BPF_K:
 961	case BPF_JMP | BPF_JGT  | BPF_K:
 962	case BPF_JMP | BPF_JLT  | BPF_K:
 963	case BPF_JMP | BPF_JGE  | BPF_K:
 964	case BPF_JMP | BPF_JLE  | BPF_K:
 965	case BPF_JMP | BPF_JSGT | BPF_K:
 966	case BPF_JMP | BPF_JSLT | BPF_K:
 967	case BPF_JMP | BPF_JSGE | BPF_K:
 968	case BPF_JMP | BPF_JSLE | BPF_K:
 969	case BPF_JMP | BPF_JSET | BPF_K:
 970		/* Accommodate for extra offset in case of a backjump. */
 971		off = from->off;
 972		if (off < 0)
 973			off -= 2;
 974		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 975		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 976		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
 977		break;
 978
 979	case BPF_JMP32 | BPF_JEQ  | BPF_K:
 980	case BPF_JMP32 | BPF_JNE  | BPF_K:
 981	case BPF_JMP32 | BPF_JGT  | BPF_K:
 982	case BPF_JMP32 | BPF_JLT  | BPF_K:
 983	case BPF_JMP32 | BPF_JGE  | BPF_K:
 984	case BPF_JMP32 | BPF_JLE  | BPF_K:
 985	case BPF_JMP32 | BPF_JSGT | BPF_K:
 986	case BPF_JMP32 | BPF_JSLT | BPF_K:
 987	case BPF_JMP32 | BPF_JSGE | BPF_K:
 988	case BPF_JMP32 | BPF_JSLE | BPF_K:
 989	case BPF_JMP32 | BPF_JSET | BPF_K:
 990		/* Accommodate for extra offset in case of a backjump. */
 991		off = from->off;
 992		if (off < 0)
 993			off -= 2;
 994		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 995		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 996		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
 997				      off);
 998		break;
 999
1000	case BPF_LD | BPF_IMM | BPF_DW:
1001		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1002		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1003		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1004		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1005		break;
1006	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1007		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1008		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009		if (emit_zext)
1010			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1011		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1012		break;
1013
1014	case BPF_ST | BPF_MEM | BPF_DW:
1015	case BPF_ST | BPF_MEM | BPF_W:
1016	case BPF_ST | BPF_MEM | BPF_H:
1017	case BPF_ST | BPF_MEM | BPF_B:
1018		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1019		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1020		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1021		break;
1022	}
1023out:
1024	return to - to_buff;
1025}
1026
1027static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1028					      gfp_t gfp_extra_flags)
1029{
1030	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1031	struct bpf_prog *fp;
1032
1033	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1034	if (fp != NULL) {
1035		/* aux->prog still points to the fp_other one, so
1036		 * when promoting the clone to the real program,
1037		 * this still needs to be adapted.
1038		 */
1039		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1040	}
1041
1042	return fp;
1043}
1044
1045static void bpf_prog_clone_free(struct bpf_prog *fp)
1046{
1047	/* aux was stolen by the other clone, so we cannot free
1048	 * it from this path! It will be freed eventually by the
1049	 * other program on release.
1050	 *
1051	 * At this point, we don't need a deferred release since
1052	 * clone is guaranteed to not be locked.
1053	 */
1054	fp->aux = NULL;
1055	__bpf_prog_free(fp);
1056}
1057
1058void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1059{
1060	/* We have to repoint aux->prog to self, as we don't
1061	 * know whether fp here is the clone or the original.
1062	 */
1063	fp->aux->prog = fp;
1064	bpf_prog_clone_free(fp_other);
1065}
1066
1067struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1068{
1069	struct bpf_insn insn_buff[16], aux[2];
1070	struct bpf_prog *clone, *tmp;
1071	int insn_delta, insn_cnt;
1072	struct bpf_insn *insn;
1073	int i, rewritten;
1074
1075	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1076		return prog;
1077
1078	clone = bpf_prog_clone_create(prog, GFP_USER);
1079	if (!clone)
1080		return ERR_PTR(-ENOMEM);
1081
1082	insn_cnt = clone->len;
1083	insn = clone->insnsi;
1084
1085	for (i = 0; i < insn_cnt; i++, insn++) {
1086		/* We temporarily need to hold the original ld64 insn
1087		 * so that we can still access the first part in the
1088		 * second blinding run.
1089		 */
1090		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1091		    insn[1].code == 0)
1092			memcpy(aux, insn, sizeof(aux));
1093
1094		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095						clone->aux->verifier_zext);
1096		if (!rewritten)
1097			continue;
1098
1099		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1100		if (IS_ERR(tmp)) {
1101			/* Patching may have repointed aux->prog during
1102			 * realloc from the original one, so we need to
1103			 * fix it up here on error.
1104			 */
1105			bpf_jit_prog_release_other(prog, clone);
1106			return tmp;
1107		}
1108
1109		clone = tmp;
1110		insn_delta = rewritten - 1;
1111
1112		/* Walk new program and skip insns we just inserted. */
1113		insn = clone->insnsi + i + insn_delta;
1114		insn_cnt += insn_delta;
1115		i        += insn_delta;
1116	}
1117
1118	clone->blinded = 1;
1119	return clone;
1120}
1121#endif /* CONFIG_BPF_JIT */
1122
1123/* Base function for offset calculation. Needs to go into .text section,
1124 * therefore keeping it non-static as well; will also be used by JITs
1125 * anyway later on, so do not let the compiler omit it. This also needs
1126 * to go into kallsyms for correlation from e.g. bpftool, so naming
1127 * must not change.
1128 */
1129noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1130{
1131	return 0;
1132}
1133EXPORT_SYMBOL_GPL(__bpf_call_base);
1134
1135/* All UAPI available opcodes. */
1136#define BPF_INSN_MAP(INSN_2, INSN_3)		\
1137	/* 32 bit ALU operations. */		\
1138	/*   Register based. */			\
1139	INSN_3(ALU, ADD,  X),			\
1140	INSN_3(ALU, SUB,  X),			\
1141	INSN_3(ALU, AND,  X),			\
1142	INSN_3(ALU, OR,   X),			\
1143	INSN_3(ALU, LSH,  X),			\
1144	INSN_3(ALU, RSH,  X),			\
1145	INSN_3(ALU, XOR,  X),			\
1146	INSN_3(ALU, MUL,  X),			\
1147	INSN_3(ALU, MOV,  X),			\
1148	INSN_3(ALU, ARSH, X),			\
1149	INSN_3(ALU, DIV,  X),			\
1150	INSN_3(ALU, MOD,  X),			\
1151	INSN_2(ALU, NEG),			\
1152	INSN_3(ALU, END, TO_BE),		\
1153	INSN_3(ALU, END, TO_LE),		\
1154	/*   Immediate based. */		\
1155	INSN_3(ALU, ADD,  K),			\
1156	INSN_3(ALU, SUB,  K),			\
1157	INSN_3(ALU, AND,  K),			\
1158	INSN_3(ALU, OR,   K),			\
1159	INSN_3(ALU, LSH,  K),			\
1160	INSN_3(ALU, RSH,  K),			\
1161	INSN_3(ALU, XOR,  K),			\
1162	INSN_3(ALU, MUL,  K),			\
1163	INSN_3(ALU, MOV,  K),			\
1164	INSN_3(ALU, ARSH, K),			\
1165	INSN_3(ALU, DIV,  K),			\
1166	INSN_3(ALU, MOD,  K),			\
1167	/* 64 bit ALU operations. */		\
1168	/*   Register based. */			\
1169	INSN_3(ALU64, ADD,  X),			\
1170	INSN_3(ALU64, SUB,  X),			\
1171	INSN_3(ALU64, AND,  X),			\
1172	INSN_3(ALU64, OR,   X),			\
1173	INSN_3(ALU64, LSH,  X),			\
1174	INSN_3(ALU64, RSH,  X),			\
1175	INSN_3(ALU64, XOR,  X),			\
1176	INSN_3(ALU64, MUL,  X),			\
1177	INSN_3(ALU64, MOV,  X),			\
1178	INSN_3(ALU64, ARSH, X),			\
1179	INSN_3(ALU64, DIV,  X),			\
1180	INSN_3(ALU64, MOD,  X),			\
1181	INSN_2(ALU64, NEG),			\
1182	/*   Immediate based. */		\
1183	INSN_3(ALU64, ADD,  K),			\
1184	INSN_3(ALU64, SUB,  K),			\
1185	INSN_3(ALU64, AND,  K),			\
1186	INSN_3(ALU64, OR,   K),			\
1187	INSN_3(ALU64, LSH,  K),			\
1188	INSN_3(ALU64, RSH,  K),			\
1189	INSN_3(ALU64, XOR,  K),			\
1190	INSN_3(ALU64, MUL,  K),			\
1191	INSN_3(ALU64, MOV,  K),			\
1192	INSN_3(ALU64, ARSH, K),			\
1193	INSN_3(ALU64, DIV,  K),			\
1194	INSN_3(ALU64, MOD,  K),			\
1195	/* Call instruction. */			\
1196	INSN_2(JMP, CALL),			\
1197	/* Exit instruction. */			\
1198	INSN_2(JMP, EXIT),			\
1199	/* 32-bit Jump instructions. */		\
1200	/*   Register based. */			\
1201	INSN_3(JMP32, JEQ,  X),			\
1202	INSN_3(JMP32, JNE,  X),			\
1203	INSN_3(JMP32, JGT,  X),			\
1204	INSN_3(JMP32, JLT,  X),			\
1205	INSN_3(JMP32, JGE,  X),			\
1206	INSN_3(JMP32, JLE,  X),			\
1207	INSN_3(JMP32, JSGT, X),			\
1208	INSN_3(JMP32, JSLT, X),			\
1209	INSN_3(JMP32, JSGE, X),			\
1210	INSN_3(JMP32, JSLE, X),			\
1211	INSN_3(JMP32, JSET, X),			\
1212	/*   Immediate based. */		\
1213	INSN_3(JMP32, JEQ,  K),			\
1214	INSN_3(JMP32, JNE,  K),			\
1215	INSN_3(JMP32, JGT,  K),			\
1216	INSN_3(JMP32, JLT,  K),			\
1217	INSN_3(JMP32, JGE,  K),			\
1218	INSN_3(JMP32, JLE,  K),			\
1219	INSN_3(JMP32, JSGT, K),			\
1220	INSN_3(JMP32, JSLT, K),			\
1221	INSN_3(JMP32, JSGE, K),			\
1222	INSN_3(JMP32, JSLE, K),			\
1223	INSN_3(JMP32, JSET, K),			\
1224	/* Jump instructions. */		\
1225	/*   Register based. */			\
1226	INSN_3(JMP, JEQ,  X),			\
1227	INSN_3(JMP, JNE,  X),			\
1228	INSN_3(JMP, JGT,  X),			\
1229	INSN_3(JMP, JLT,  X),			\
1230	INSN_3(JMP, JGE,  X),			\
1231	INSN_3(JMP, JLE,  X),			\
1232	INSN_3(JMP, JSGT, X),			\
1233	INSN_3(JMP, JSLT, X),			\
1234	INSN_3(JMP, JSGE, X),			\
1235	INSN_3(JMP, JSLE, X),			\
1236	INSN_3(JMP, JSET, X),			\
1237	/*   Immediate based. */		\
1238	INSN_3(JMP, JEQ,  K),			\
1239	INSN_3(JMP, JNE,  K),			\
1240	INSN_3(JMP, JGT,  K),			\
1241	INSN_3(JMP, JLT,  K),			\
1242	INSN_3(JMP, JGE,  K),			\
1243	INSN_3(JMP, JLE,  K),			\
1244	INSN_3(JMP, JSGT, K),			\
1245	INSN_3(JMP, JSLT, K),			\
1246	INSN_3(JMP, JSGE, K),			\
1247	INSN_3(JMP, JSLE, K),			\
1248	INSN_3(JMP, JSET, K),			\
1249	INSN_2(JMP, JA),			\
1250	/* Store instructions. */		\
1251	/*   Register based. */			\
1252	INSN_3(STX, MEM,  B),			\
1253	INSN_3(STX, MEM,  H),			\
1254	INSN_3(STX, MEM,  W),			\
1255	INSN_3(STX, MEM,  DW),			\
1256	INSN_3(STX, XADD, W),			\
1257	INSN_3(STX, XADD, DW),			\
1258	/*   Immediate based. */		\
1259	INSN_3(ST, MEM, B),			\
1260	INSN_3(ST, MEM, H),			\
1261	INSN_3(ST, MEM, W),			\
1262	INSN_3(ST, MEM, DW),			\
1263	/* Load instructions. */		\
1264	/*   Register based. */			\
1265	INSN_3(LDX, MEM, B),			\
1266	INSN_3(LDX, MEM, H),			\
1267	INSN_3(LDX, MEM, W),			\
1268	INSN_3(LDX, MEM, DW),			\
1269	/*   Immediate based. */		\
1270	INSN_3(LD, IMM, DW)
1271
1272bool bpf_opcode_in_insntable(u8 code)
1273{
1274#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1275#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1276	static const bool public_insntable[256] = {
1277		[0 ... 255] = false,
1278		/* Now overwrite non-defaults ... */
1279		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1280		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1281		[BPF_LD | BPF_ABS | BPF_B] = true,
1282		[BPF_LD | BPF_ABS | BPF_H] = true,
1283		[BPF_LD | BPF_ABS | BPF_W] = true,
1284		[BPF_LD | BPF_IND | BPF_B] = true,
1285		[BPF_LD | BPF_IND | BPF_H] = true,
1286		[BPF_LD | BPF_IND | BPF_W] = true,
1287	};
1288#undef BPF_INSN_3_TBL
1289#undef BPF_INSN_2_TBL
1290	return public_insntable[code];
1291}
1292
1293#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 
 
 
 
 
 
1294/**
1295 *	__bpf_prog_run - run eBPF program on a given context
1296 *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1297 *	@insn: is the array of eBPF instructions
1298 *	@stack: is the eBPF storage stack
1299 *
1300 * Decode and execute eBPF instructions.
1301 */
1302static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1303{
1304#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1305#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1306	static const void * const jumptable[256] __annotate_jump_table = {
1307		[0 ... 255] = &&default_label,
1308		/* Now overwrite non-defaults ... */
1309		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1310		/* Non-UAPI available opcodes. */
1311		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1312		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
 
 
 
 
1313	};
1314#undef BPF_INSN_3_LBL
1315#undef BPF_INSN_2_LBL
1316	u32 tail_call_cnt = 0;
1317
1318#define CONT	 ({ insn++; goto select_insn; })
1319#define CONT_JMP ({ insn++; goto select_insn; })
1320
1321select_insn:
1322	goto *jumptable[insn->code];
1323
1324	/* ALU */
1325#define ALU(OPCODE, OP)			\
1326	ALU64_##OPCODE##_X:		\
1327		DST = DST OP SRC;	\
1328		CONT;			\
1329	ALU_##OPCODE##_X:		\
1330		DST = (u32) DST OP (u32) SRC;	\
1331		CONT;			\
1332	ALU64_##OPCODE##_K:		\
1333		DST = DST OP IMM;		\
1334		CONT;			\
1335	ALU_##OPCODE##_K:		\
1336		DST = (u32) DST OP (u32) IMM;	\
1337		CONT;
1338
1339	ALU(ADD,  +)
1340	ALU(SUB,  -)
1341	ALU(AND,  &)
1342	ALU(OR,   |)
1343	ALU(LSH, <<)
1344	ALU(RSH, >>)
1345	ALU(XOR,  ^)
1346	ALU(MUL,  *)
1347#undef ALU
1348	ALU_NEG:
1349		DST = (u32) -DST;
1350		CONT;
1351	ALU64_NEG:
1352		DST = -DST;
1353		CONT;
1354	ALU_MOV_X:
1355		DST = (u32) SRC;
1356		CONT;
1357	ALU_MOV_K:
1358		DST = (u32) IMM;
1359		CONT;
1360	ALU64_MOV_X:
1361		DST = SRC;
1362		CONT;
1363	ALU64_MOV_K:
1364		DST = IMM;
1365		CONT;
1366	LD_IMM_DW:
1367		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1368		insn++;
1369		CONT;
1370	ALU_ARSH_X:
1371		DST = (u64) (u32) (((s32) DST) >> SRC);
1372		CONT;
1373	ALU_ARSH_K:
1374		DST = (u64) (u32) (((s32) DST) >> IMM);
1375		CONT;
1376	ALU64_ARSH_X:
1377		(*(s64 *) &DST) >>= SRC;
1378		CONT;
1379	ALU64_ARSH_K:
1380		(*(s64 *) &DST) >>= IMM;
1381		CONT;
1382	ALU64_MOD_X:
1383		div64_u64_rem(DST, SRC, &AX);
1384		DST = AX;
1385		CONT;
1386	ALU_MOD_X:
1387		AX = (u32) DST;
1388		DST = do_div(AX, (u32) SRC);
1389		CONT;
1390	ALU64_MOD_K:
1391		div64_u64_rem(DST, IMM, &AX);
1392		DST = AX;
1393		CONT;
1394	ALU_MOD_K:
1395		AX = (u32) DST;
1396		DST = do_div(AX, (u32) IMM);
1397		CONT;
1398	ALU64_DIV_X:
1399		DST = div64_u64(DST, SRC);
1400		CONT;
1401	ALU_DIV_X:
1402		AX = (u32) DST;
1403		do_div(AX, (u32) SRC);
1404		DST = (u32) AX;
1405		CONT;
1406	ALU64_DIV_K:
1407		DST = div64_u64(DST, IMM);
1408		CONT;
1409	ALU_DIV_K:
1410		AX = (u32) DST;
1411		do_div(AX, (u32) IMM);
1412		DST = (u32) AX;
1413		CONT;
1414	ALU_END_TO_BE:
1415		switch (IMM) {
1416		case 16:
1417			DST = (__force u16) cpu_to_be16(DST);
1418			break;
1419		case 32:
1420			DST = (__force u32) cpu_to_be32(DST);
1421			break;
1422		case 64:
1423			DST = (__force u64) cpu_to_be64(DST);
1424			break;
1425		}
1426		CONT;
1427	ALU_END_TO_LE:
1428		switch (IMM) {
1429		case 16:
1430			DST = (__force u16) cpu_to_le16(DST);
1431			break;
1432		case 32:
1433			DST = (__force u32) cpu_to_le32(DST);
1434			break;
1435		case 64:
1436			DST = (__force u64) cpu_to_le64(DST);
1437			break;
1438		}
1439		CONT;
1440
1441	/* CALL */
1442	JMP_CALL:
1443		/* Function call scratches BPF_R1-BPF_R5 registers,
1444		 * preserves BPF_R6-BPF_R9, and stores return value
1445		 * into BPF_R0.
1446		 */
1447		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1448						       BPF_R4, BPF_R5);
1449		CONT;
1450
1451	JMP_CALL_ARGS:
1452		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1453							    BPF_R3, BPF_R4,
1454							    BPF_R5,
1455							    insn + insn->off + 1);
1456		CONT;
1457
1458	JMP_TAIL_CALL: {
1459		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1460		struct bpf_array *array = container_of(map, struct bpf_array, map);
1461		struct bpf_prog *prog;
1462		u32 index = BPF_R3;
1463
1464		if (unlikely(index >= array->map.max_entries))
1465			goto out;
1466		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1467			goto out;
1468
1469		tail_call_cnt++;
1470
1471		prog = READ_ONCE(array->ptrs[index]);
1472		if (!prog)
1473			goto out;
1474
1475		/* ARG1 at this point is guaranteed to point to CTX from
1476		 * the verifier side due to the fact that the tail call is
1477		 * handeled like a helper, that is, bpf_tail_call_proto,
1478		 * where arg1_type is ARG_PTR_TO_CTX.
1479		 */
1480		insn = prog->insnsi;
1481		goto select_insn;
1482out:
1483		CONT;
1484	}
1485	JMP_JA:
1486		insn += insn->off;
1487		CONT;
1488	JMP_EXIT:
1489		return BPF_R0;
1490	/* JMP */
1491#define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1492	JMP_##OPCODE##_X:					\
1493		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1494			insn += insn->off;			\
1495			CONT_JMP;				\
1496		}						\
1497		CONT;						\
1498	JMP32_##OPCODE##_X:					\
1499		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1500			insn += insn->off;			\
1501			CONT_JMP;				\
1502		}						\
1503		CONT;						\
1504	JMP_##OPCODE##_K:					\
1505		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1506			insn += insn->off;			\
1507			CONT_JMP;				\
1508		}						\
1509		CONT;						\
1510	JMP32_##OPCODE##_K:					\
1511		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1512			insn += insn->off;			\
1513			CONT_JMP;				\
1514		}						\
1515		CONT;
1516	COND_JMP(u, JEQ, ==)
1517	COND_JMP(u, JNE, !=)
1518	COND_JMP(u, JGT, >)
1519	COND_JMP(u, JLT, <)
1520	COND_JMP(u, JGE, >=)
1521	COND_JMP(u, JLE, <=)
1522	COND_JMP(u, JSET, &)
1523	COND_JMP(s, JSGT, >)
1524	COND_JMP(s, JSLT, <)
1525	COND_JMP(s, JSGE, >=)
1526	COND_JMP(s, JSLE, <=)
1527#undef COND_JMP
1528	/* STX and ST and LDX*/
1529#define LDST(SIZEOP, SIZE)						\
1530	STX_MEM_##SIZEOP:						\
1531		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1532		CONT;							\
1533	ST_MEM_##SIZEOP:						\
1534		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1535		CONT;							\
1536	LDX_MEM_##SIZEOP:						\
1537		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1538		CONT;
1539
1540	LDST(B,   u8)
1541	LDST(H,  u16)
1542	LDST(W,  u32)
1543	LDST(DW, u64)
1544#undef LDST
 
 
 
 
 
 
 
 
 
 
1545	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1546		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1547			   (DST + insn->off));
1548		CONT;
1549	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1550		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1551			     (DST + insn->off));
1552		CONT;
1553
1554	default_label:
1555		/* If we ever reach this, we have a bug somewhere. Die hard here
1556		 * instead of just returning 0; we could be somewhere in a subprog,
1557		 * so execution could continue otherwise which we do /not/ want.
1558		 *
1559		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1560		 */
1561		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1562		BUG_ON(1);
1563		return 0;
1564}
1565
1566#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1567#define DEFINE_BPF_PROG_RUN(stack_size) \
1568static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1569{ \
1570	u64 stack[stack_size / sizeof(u64)]; \
1571	u64 regs[MAX_BPF_EXT_REG]; \
1572\
1573	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1574	ARG1 = (u64) (unsigned long) ctx; \
1575	return ___bpf_prog_run(regs, insn, stack); \
1576}
1577
1578#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1579#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1580static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1581				      const struct bpf_insn *insn) \
1582{ \
1583	u64 stack[stack_size / sizeof(u64)]; \
1584	u64 regs[MAX_BPF_EXT_REG]; \
1585\
1586	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1587	BPF_R1 = r1; \
1588	BPF_R2 = r2; \
1589	BPF_R3 = r3; \
1590	BPF_R4 = r4; \
1591	BPF_R5 = r5; \
1592	return ___bpf_prog_run(regs, insn, stack); \
1593}
1594
1595#define EVAL1(FN, X) FN(X)
1596#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1597#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1598#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1599#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1600#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1601
1602EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1603EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1604EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1605
1606EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1607EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1608EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1609
1610#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1611
1612static unsigned int (*interpreters[])(const void *ctx,
1613				      const struct bpf_insn *insn) = {
1614EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1615EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1616EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1617};
1618#undef PROG_NAME_LIST
1619#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1620static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1621				  const struct bpf_insn *insn) = {
1622EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1623EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1624EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1625};
1626#undef PROG_NAME_LIST
1627
1628void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1629{
1630	stack_depth = max_t(u32, stack_depth, 1);
1631	insn->off = (s16) insn->imm;
1632	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1633		__bpf_call_base_args;
1634	insn->code = BPF_JMP | BPF_CALL_ARGS;
1635}
1636
1637#else
1638static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1639					 const struct bpf_insn *insn)
1640{
1641	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1642	 * is not working properly, so warn about it!
1643	 */
1644	WARN_ON_ONCE(1);
1645	return 0;
1646}
1647#endif
1648
1649bool bpf_prog_array_compatible(struct bpf_array *array,
1650			       const struct bpf_prog *fp)
1651{
1652	if (fp->kprobe_override)
1653		return false;
1654
1655	if (!array->owner_prog_type) {
1656		/* There's no owner yet where we could check for
1657		 * compatibility.
1658		 */
1659		array->owner_prog_type = fp->type;
1660		array->owner_jited = fp->jited;
1661
1662		return true;
1663	}
1664
1665	return array->owner_prog_type == fp->type &&
1666	       array->owner_jited == fp->jited;
1667}
1668
1669static int bpf_check_tail_call(const struct bpf_prog *fp)
1670{
1671	struct bpf_prog_aux *aux = fp->aux;
1672	int i;
1673
1674	for (i = 0; i < aux->used_map_cnt; i++) {
1675		struct bpf_map *map = aux->used_maps[i];
1676		struct bpf_array *array;
1677
1678		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1679			continue;
1680
1681		array = container_of(map, struct bpf_array, map);
1682		if (!bpf_prog_array_compatible(array, fp))
1683			return -EINVAL;
1684	}
1685
1686	return 0;
1687}
1688
1689static void bpf_prog_select_func(struct bpf_prog *fp)
1690{
1691#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1692	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1693
1694	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1695#else
1696	fp->bpf_func = __bpf_prog_ret0_warn;
1697#endif
1698}
1699
1700/**
1701 *	bpf_prog_select_runtime - select exec runtime for BPF program
1702 *	@fp: bpf_prog populated with internal BPF program
1703 *	@err: pointer to error variable
1704 *
1705 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1706 * The BPF program will be executed via BPF_PROG_RUN() macro.
1707 */
1708struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1709{
1710	/* In case of BPF to BPF calls, verifier did all the prep
1711	 * work with regards to JITing, etc.
1712	 */
1713	if (fp->bpf_func)
1714		goto finalize;
1715
1716	bpf_prog_select_func(fp);
1717
1718	/* eBPF JITs can rewrite the program in case constant
1719	 * blinding is active. However, in case of error during
1720	 * blinding, bpf_int_jit_compile() must always return a
1721	 * valid program, which in this case would simply not
1722	 * be JITed, but falls back to the interpreter.
1723	 */
1724	if (!bpf_prog_is_dev_bound(fp->aux)) {
1725		*err = bpf_prog_alloc_jited_linfo(fp);
1726		if (*err)
1727			return fp;
1728
1729		fp = bpf_int_jit_compile(fp);
1730		if (!fp->jited) {
1731			bpf_prog_free_jited_linfo(fp);
1732#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1733			*err = -ENOTSUPP;
1734			return fp;
1735#endif
1736		} else {
1737			bpf_prog_free_unused_jited_linfo(fp);
1738		}
1739	} else {
1740		*err = bpf_prog_offload_compile(fp);
1741		if (*err)
1742			return fp;
1743	}
1744
1745finalize:
1746	bpf_prog_lock_ro(fp);
1747
1748	/* The tail call compatibility check can only be done at
1749	 * this late stage as we need to determine, if we deal
1750	 * with JITed or non JITed program concatenations and not
1751	 * all eBPF JITs might immediately support all features.
1752	 */
1753	*err = bpf_check_tail_call(fp);
1754
1755	return fp;
1756}
1757EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1758
1759static unsigned int __bpf_prog_ret1(const void *ctx,
1760				    const struct bpf_insn *insn)
1761{
1762	return 1;
1763}
1764
1765static struct bpf_prog_dummy {
1766	struct bpf_prog prog;
1767} dummy_bpf_prog = {
1768	.prog = {
1769		.bpf_func = __bpf_prog_ret1,
1770	},
1771};
1772
1773/* to avoid allocating empty bpf_prog_array for cgroups that
1774 * don't have bpf program attached use one global 'empty_prog_array'
1775 * It will not be modified the caller of bpf_prog_array_alloc()
1776 * (since caller requested prog_cnt == 0)
1777 * that pointer should be 'freed' by bpf_prog_array_free()
1778 */
1779static struct {
1780	struct bpf_prog_array hdr;
1781	struct bpf_prog *null_prog;
1782} empty_prog_array = {
1783	.null_prog = NULL,
1784};
1785
1786struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1787{
1788	if (prog_cnt)
1789		return kzalloc(sizeof(struct bpf_prog_array) +
1790			       sizeof(struct bpf_prog_array_item) *
1791			       (prog_cnt + 1),
1792			       flags);
1793
1794	return &empty_prog_array.hdr;
1795}
1796
1797void bpf_prog_array_free(struct bpf_prog_array *progs)
1798{
1799	if (!progs || progs == &empty_prog_array.hdr)
1800		return;
1801	kfree_rcu(progs, rcu);
1802}
1803
1804int bpf_prog_array_length(struct bpf_prog_array *array)
1805{
1806	struct bpf_prog_array_item *item;
1807	u32 cnt = 0;
1808
1809	for (item = array->items; item->prog; item++)
1810		if (item->prog != &dummy_bpf_prog.prog)
1811			cnt++;
1812	return cnt;
1813}
1814
1815bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1816{
1817	struct bpf_prog_array_item *item;
1818
1819	for (item = array->items; item->prog; item++)
1820		if (item->prog != &dummy_bpf_prog.prog)
1821			return false;
1822	return true;
1823}
1824
1825static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1826				     u32 *prog_ids,
1827				     u32 request_cnt)
1828{
1829	struct bpf_prog_array_item *item;
1830	int i = 0;
1831
1832	for (item = array->items; item->prog; item++) {
1833		if (item->prog == &dummy_bpf_prog.prog)
1834			continue;
1835		prog_ids[i] = item->prog->aux->id;
1836		if (++i == request_cnt) {
1837			item++;
1838			break;
1839		}
1840	}
1841
1842	return !!(item->prog);
1843}
1844
1845int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1846				__u32 __user *prog_ids, u32 cnt)
1847{
1848	unsigned long err = 0;
1849	bool nospc;
1850	u32 *ids;
1851
1852	/* users of this function are doing:
1853	 * cnt = bpf_prog_array_length();
1854	 * if (cnt > 0)
1855	 *     bpf_prog_array_copy_to_user(..., cnt);
1856	 * so below kcalloc doesn't need extra cnt > 0 check.
1857	 */
1858	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1859	if (!ids)
1860		return -ENOMEM;
1861	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1862	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1863	kfree(ids);
1864	if (err)
1865		return -EFAULT;
1866	if (nospc)
1867		return -ENOSPC;
1868	return 0;
1869}
1870
1871void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1872				struct bpf_prog *old_prog)
1873{
1874	struct bpf_prog_array_item *item;
1875
1876	for (item = array->items; item->prog; item++)
1877		if (item->prog == old_prog) {
1878			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1879			break;
1880		}
1881}
1882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1883int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1884			struct bpf_prog *exclude_prog,
1885			struct bpf_prog *include_prog,
1886			struct bpf_prog_array **new_array)
1887{
1888	int new_prog_cnt, carry_prog_cnt = 0;
1889	struct bpf_prog_array_item *existing;
1890	struct bpf_prog_array *array;
1891	bool found_exclude = false;
1892	int new_prog_idx = 0;
1893
1894	/* Figure out how many existing progs we need to carry over to
1895	 * the new array.
1896	 */
1897	if (old_array) {
1898		existing = old_array->items;
1899		for (; existing->prog; existing++) {
1900			if (existing->prog == exclude_prog) {
1901				found_exclude = true;
1902				continue;
1903			}
1904			if (existing->prog != &dummy_bpf_prog.prog)
1905				carry_prog_cnt++;
1906			if (existing->prog == include_prog)
1907				return -EEXIST;
1908		}
1909	}
1910
1911	if (exclude_prog && !found_exclude)
1912		return -ENOENT;
1913
1914	/* How many progs (not NULL) will be in the new array? */
1915	new_prog_cnt = carry_prog_cnt;
1916	if (include_prog)
1917		new_prog_cnt += 1;
1918
1919	/* Do we have any prog (not NULL) in the new array? */
1920	if (!new_prog_cnt) {
1921		*new_array = NULL;
1922		return 0;
1923	}
1924
1925	/* +1 as the end of prog_array is marked with NULL */
1926	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1927	if (!array)
1928		return -ENOMEM;
1929
1930	/* Fill in the new prog array */
1931	if (carry_prog_cnt) {
1932		existing = old_array->items;
1933		for (; existing->prog; existing++)
1934			if (existing->prog != exclude_prog &&
1935			    existing->prog != &dummy_bpf_prog.prog) {
1936				array->items[new_prog_idx++].prog =
1937					existing->prog;
1938			}
1939	}
1940	if (include_prog)
1941		array->items[new_prog_idx++].prog = include_prog;
1942	array->items[new_prog_idx].prog = NULL;
1943	*new_array = array;
1944	return 0;
1945}
1946
1947int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1948			     u32 *prog_ids, u32 request_cnt,
1949			     u32 *prog_cnt)
1950{
1951	u32 cnt = 0;
1952
1953	if (array)
1954		cnt = bpf_prog_array_length(array);
1955
1956	*prog_cnt = cnt;
1957
1958	/* return early if user requested only program count or nothing to copy */
1959	if (!request_cnt || !cnt)
1960		return 0;
1961
1962	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1963	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1964								     : 0;
1965}
1966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1967static void bpf_prog_free_deferred(struct work_struct *work)
1968{
1969	struct bpf_prog_aux *aux;
1970	int i;
1971
1972	aux = container_of(work, struct bpf_prog_aux, work);
 
1973	if (bpf_prog_is_dev_bound(aux))
1974		bpf_prog_offload_destroy(aux->prog);
1975#ifdef CONFIG_PERF_EVENTS
1976	if (aux->prog->has_callchain_buf)
1977		put_callchain_buffers();
1978#endif
 
1979	for (i = 0; i < aux->func_cnt; i++)
1980		bpf_jit_free(aux->func[i]);
1981	if (aux->func_cnt) {
1982		kfree(aux->func);
1983		bpf_prog_unlock_free(aux->prog);
1984	} else {
1985		bpf_jit_free(aux->prog);
1986	}
1987}
1988
1989/* Free internal BPF program */
1990void bpf_prog_free(struct bpf_prog *fp)
1991{
1992	struct bpf_prog_aux *aux = fp->aux;
1993
 
 
1994	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1995	schedule_work(&aux->work);
1996}
1997EXPORT_SYMBOL_GPL(bpf_prog_free);
1998
1999/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2000static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2001
2002void bpf_user_rnd_init_once(void)
2003{
2004	prandom_init_once(&bpf_user_rnd_state);
2005}
2006
2007BPF_CALL_0(bpf_user_rnd_u32)
2008{
2009	/* Should someone ever have the rather unwise idea to use some
2010	 * of the registers passed into this function, then note that
2011	 * this function is called from native eBPF and classic-to-eBPF
2012	 * transformations. Register assignments from both sides are
2013	 * different, f.e. classic always sets fn(ctx, A, X) here.
2014	 */
2015	struct rnd_state *state;
2016	u32 res;
2017
2018	state = &get_cpu_var(bpf_user_rnd_state);
2019	res = prandom_u32_state(state);
2020	put_cpu_var(bpf_user_rnd_state);
2021
2022	return res;
2023}
2024
 
 
 
 
 
2025/* Weak definitions of helper functions in case we don't have bpf syscall. */
2026const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2027const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2028const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2029const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2030const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2031const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2032const struct bpf_func_proto bpf_spin_lock_proto __weak;
2033const struct bpf_func_proto bpf_spin_unlock_proto __weak;
 
2034
2035const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2036const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2037const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2038const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
 
2039
2040const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2041const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2042const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2043const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
 
2044const struct bpf_func_proto bpf_get_local_storage_proto __weak;
 
2045
2046const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2047{
2048	return NULL;
2049}
2050
2051u64 __weak
2052bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2053		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2054{
2055	return -ENOTSUPP;
2056}
2057EXPORT_SYMBOL_GPL(bpf_event_output);
2058
2059/* Always built-in helper functions. */
2060const struct bpf_func_proto bpf_tail_call_proto = {
2061	.func		= NULL,
2062	.gpl_only	= false,
2063	.ret_type	= RET_VOID,
2064	.arg1_type	= ARG_PTR_TO_CTX,
2065	.arg2_type	= ARG_CONST_MAP_PTR,
2066	.arg3_type	= ARG_ANYTHING,
2067};
2068
2069/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2070 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2071 * eBPF and implicitly also cBPF can get JITed!
2072 */
2073struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2074{
2075	return prog;
2076}
2077
2078/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2079 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2080 */
2081void __weak bpf_jit_compile(struct bpf_prog *prog)
2082{
2083}
2084
2085bool __weak bpf_helper_changes_pkt_data(void *func)
2086{
2087	return false;
2088}
2089
2090/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2091 * analysis code and wants explicit zero extension inserted by verifier.
2092 * Otherwise, return FALSE.
2093 */
2094bool __weak bpf_jit_needs_zext(void)
2095{
2096	return false;
2097}
2098
2099/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2100 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2101 */
2102int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2103			 int len)
2104{
2105	return -EFAULT;
 
 
 
 
 
 
2106}
2107
2108DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2109EXPORT_SYMBOL(bpf_stats_enabled_key);
2110
2111/* All definitions of tracepoints related to BPF. */
2112#define CREATE_TRACE_POINTS
2113#include <linux/bpf_trace.h>
2114
2115EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2116EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *	Jay Schulist <jschlst@samba.org>
  13 *	Alexei Starovoitov <ast@plumgrid.com>
  14 *	Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/frame.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33#include <linux/extable.h>
  34#include <linux/log2.h>
  35#include <asm/unaligned.h>
  36
  37/* Registers */
  38#define BPF_R0	regs[BPF_REG_0]
  39#define BPF_R1	regs[BPF_REG_1]
  40#define BPF_R2	regs[BPF_REG_2]
  41#define BPF_R3	regs[BPF_REG_3]
  42#define BPF_R4	regs[BPF_REG_4]
  43#define BPF_R5	regs[BPF_REG_5]
  44#define BPF_R6	regs[BPF_REG_6]
  45#define BPF_R7	regs[BPF_REG_7]
  46#define BPF_R8	regs[BPF_REG_8]
  47#define BPF_R9	regs[BPF_REG_9]
  48#define BPF_R10	regs[BPF_REG_10]
  49
  50/* Named registers */
  51#define DST	regs[insn->dst_reg]
  52#define SRC	regs[insn->src_reg]
  53#define FP	regs[BPF_REG_FP]
  54#define AX	regs[BPF_REG_AX]
  55#define ARG1	regs[BPF_REG_ARG1]
  56#define CTX	regs[BPF_REG_CTX]
  57#define IMM	insn->imm
  58
  59/* No hurry in this branch
  60 *
  61 * Exported for the bpf jit load helper.
  62 */
  63void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  64{
  65	u8 *ptr = NULL;
  66
  67	if (k >= SKF_NET_OFF)
  68		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  69	else if (k >= SKF_LL_OFF)
  70		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  71
  72	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  73		return ptr;
  74
  75	return NULL;
  76}
  77
  78struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  79{
  80	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  81	struct bpf_prog_aux *aux;
  82	struct bpf_prog *fp;
  83
  84	size = round_up(size, PAGE_SIZE);
  85	fp = __vmalloc(size, gfp_flags);
  86	if (fp == NULL)
  87		return NULL;
  88
  89	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  90	if (aux == NULL) {
  91		vfree(fp);
  92		return NULL;
  93	}
  94
  95	fp->pages = size / PAGE_SIZE;
  96	fp->aux = aux;
  97	fp->aux->prog = fp;
  98	fp->jit_requested = ebpf_jit_enabled();
  99
 100	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
 101
 102	return fp;
 103}
 104
 105struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 106{
 107	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 108	struct bpf_prog *prog;
 109	int cpu;
 110
 111	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 112	if (!prog)
 113		return NULL;
 114
 115	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 116	if (!prog->aux->stats) {
 117		kfree(prog->aux);
 118		vfree(prog);
 119		return NULL;
 120	}
 121
 122	for_each_possible_cpu(cpu) {
 123		struct bpf_prog_stats *pstats;
 124
 125		pstats = per_cpu_ptr(prog->aux->stats, cpu);
 126		u64_stats_init(&pstats->syncp);
 127	}
 128	return prog;
 129}
 130EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 131
 132int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 133{
 134	if (!prog->aux->nr_linfo || !prog->jit_requested)
 135		return 0;
 136
 137	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
 138					 sizeof(*prog->aux->jited_linfo),
 139					 GFP_KERNEL | __GFP_NOWARN);
 140	if (!prog->aux->jited_linfo)
 141		return -ENOMEM;
 142
 143	return 0;
 144}
 145
 146void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
 147{
 148	kfree(prog->aux->jited_linfo);
 149	prog->aux->jited_linfo = NULL;
 150}
 151
 152void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
 153{
 154	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
 155		bpf_prog_free_jited_linfo(prog);
 156}
 157
 158/* The jit engine is responsible to provide an array
 159 * for insn_off to the jited_off mapping (insn_to_jit_off).
 160 *
 161 * The idx to this array is the insn_off.  Hence, the insn_off
 162 * here is relative to the prog itself instead of the main prog.
 163 * This array has one entry for each xlated bpf insn.
 164 *
 165 * jited_off is the byte off to the last byte of the jited insn.
 166 *
 167 * Hence, with
 168 * insn_start:
 169 *      The first bpf insn off of the prog.  The insn off
 170 *      here is relative to the main prog.
 171 *      e.g. if prog is a subprog, insn_start > 0
 172 * linfo_idx:
 173 *      The prog's idx to prog->aux->linfo and jited_linfo
 174 *
 175 * jited_linfo[linfo_idx] = prog->bpf_func
 176 *
 177 * For i > linfo_idx,
 178 *
 179 * jited_linfo[i] = prog->bpf_func +
 180 *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 181 */
 182void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 183			       const u32 *insn_to_jit_off)
 184{
 185	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 186	const struct bpf_line_info *linfo;
 187	void **jited_linfo;
 188
 189	if (!prog->aux->jited_linfo)
 190		/* Userspace did not provide linfo */
 191		return;
 192
 193	linfo_idx = prog->aux->linfo_idx;
 194	linfo = &prog->aux->linfo[linfo_idx];
 195	insn_start = linfo[0].insn_off;
 196	insn_end = insn_start + prog->len;
 197
 198	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 199	jited_linfo[0] = prog->bpf_func;
 200
 201	nr_linfo = prog->aux->nr_linfo - linfo_idx;
 202
 203	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 204		/* The verifier ensures that linfo[i].insn_off is
 205		 * strictly increasing
 206		 */
 207		jited_linfo[i] = prog->bpf_func +
 208			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 209}
 210
 211void bpf_prog_free_linfo(struct bpf_prog *prog)
 212{
 213	bpf_prog_free_jited_linfo(prog);
 214	kvfree(prog->aux->linfo);
 215}
 216
 217struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 218				  gfp_t gfp_extra_flags)
 219{
 220	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 221	struct bpf_prog *fp;
 222	u32 pages, delta;
 223	int ret;
 224
 
 
 225	size = round_up(size, PAGE_SIZE);
 226	pages = size / PAGE_SIZE;
 227	if (pages <= fp_old->pages)
 228		return fp_old;
 229
 230	delta = pages - fp_old->pages;
 231	ret = __bpf_prog_charge(fp_old->aux->user, delta);
 232	if (ret)
 233		return NULL;
 234
 235	fp = __vmalloc(size, gfp_flags);
 236	if (fp == NULL) {
 237		__bpf_prog_uncharge(fp_old->aux->user, delta);
 238	} else {
 239		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 240		fp->pages = pages;
 241		fp->aux->prog = fp;
 242
 243		/* We keep fp->aux from fp_old around in the new
 244		 * reallocated structure.
 245		 */
 246		fp_old->aux = NULL;
 247		__bpf_prog_free(fp_old);
 248	}
 249
 250	return fp;
 251}
 252
 253void __bpf_prog_free(struct bpf_prog *fp)
 254{
 255	if (fp->aux) {
 256		free_percpu(fp->aux->stats);
 257		kfree(fp->aux->poke_tab);
 258		kfree(fp->aux);
 259	}
 260	vfree(fp);
 261}
 262
 263int bpf_prog_calc_tag(struct bpf_prog *fp)
 264{
 265	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
 266	u32 raw_size = bpf_prog_tag_scratch_size(fp);
 267	u32 digest[SHA1_DIGEST_WORDS];
 268	u32 ws[SHA1_WORKSPACE_WORDS];
 269	u32 i, bsize, psize, blocks;
 270	struct bpf_insn *dst;
 271	bool was_ld_map;
 272	u8 *raw, *todo;
 273	__be32 *result;
 274	__be64 *bits;
 275
 276	raw = vmalloc(raw_size);
 277	if (!raw)
 278		return -ENOMEM;
 279
 280	sha1_init(digest);
 281	memset(ws, 0, sizeof(ws));
 282
 283	/* We need to take out the map fd for the digest calculation
 284	 * since they are unstable from user space side.
 285	 */
 286	dst = (void *)raw;
 287	for (i = 0, was_ld_map = false; i < fp->len; i++) {
 288		dst[i] = fp->insnsi[i];
 289		if (!was_ld_map &&
 290		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 291		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 292		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 293			was_ld_map = true;
 294			dst[i].imm = 0;
 295		} else if (was_ld_map &&
 296			   dst[i].code == 0 &&
 297			   dst[i].dst_reg == 0 &&
 298			   dst[i].src_reg == 0 &&
 299			   dst[i].off == 0) {
 300			was_ld_map = false;
 301			dst[i].imm = 0;
 302		} else {
 303			was_ld_map = false;
 304		}
 305	}
 306
 307	psize = bpf_prog_insn_size(fp);
 308	memset(&raw[psize], 0, raw_size - psize);
 309	raw[psize++] = 0x80;
 310
 311	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
 312	blocks = bsize / SHA1_BLOCK_SIZE;
 313	todo   = raw;
 314	if (bsize - psize >= sizeof(__be64)) {
 315		bits = (__be64 *)(todo + bsize - sizeof(__be64));
 316	} else {
 317		bits = (__be64 *)(todo + bsize + bits_offset);
 318		blocks++;
 319	}
 320	*bits = cpu_to_be64((psize - 1) << 3);
 321
 322	while (blocks--) {
 323		sha1_transform(digest, todo, ws);
 324		todo += SHA1_BLOCK_SIZE;
 325	}
 326
 327	result = (__force __be32 *)digest;
 328	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
 329		result[i] = cpu_to_be32(digest[i]);
 330	memcpy(fp->tag, result, sizeof(fp->tag));
 331
 332	vfree(raw);
 333	return 0;
 334}
 335
 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 337				s32 end_new, s32 curr, const bool probe_pass)
 338{
 339	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 340	s32 delta = end_new - end_old;
 341	s64 imm = insn->imm;
 342
 343	if (curr < pos && curr + imm + 1 >= end_old)
 344		imm += delta;
 345	else if (curr >= end_new && curr + imm + 1 < end_new)
 346		imm -= delta;
 347	if (imm < imm_min || imm > imm_max)
 348		return -ERANGE;
 349	if (!probe_pass)
 350		insn->imm = imm;
 351	return 0;
 352}
 353
 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 355				s32 end_new, s32 curr, const bool probe_pass)
 356{
 357	const s32 off_min = S16_MIN, off_max = S16_MAX;
 358	s32 delta = end_new - end_old;
 359	s32 off = insn->off;
 360
 361	if (curr < pos && curr + off + 1 >= end_old)
 362		off += delta;
 363	else if (curr >= end_new && curr + off + 1 < end_new)
 364		off -= delta;
 365	if (off < off_min || off > off_max)
 366		return -ERANGE;
 367	if (!probe_pass)
 368		insn->off = off;
 369	return 0;
 370}
 371
 372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 373			    s32 end_new, const bool probe_pass)
 374{
 375	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 376	struct bpf_insn *insn = prog->insnsi;
 377	int ret = 0;
 378
 379	for (i = 0; i < insn_cnt; i++, insn++) {
 380		u8 code;
 381
 382		/* In the probing pass we still operate on the original,
 383		 * unpatched image in order to check overflows before we
 384		 * do any other adjustments. Therefore skip the patchlet.
 385		 */
 386		if (probe_pass && i == pos) {
 387			i = end_new;
 388			insn = prog->insnsi + end_old;
 389		}
 390		code = insn->code;
 391		if ((BPF_CLASS(code) != BPF_JMP &&
 392		     BPF_CLASS(code) != BPF_JMP32) ||
 393		    BPF_OP(code) == BPF_EXIT)
 394			continue;
 395		/* Adjust offset of jmps if we cross patch boundaries. */
 396		if (BPF_OP(code) == BPF_CALL) {
 397			if (insn->src_reg != BPF_PSEUDO_CALL)
 398				continue;
 399			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 400						   end_new, i, probe_pass);
 401		} else {
 402			ret = bpf_adj_delta_to_off(insn, pos, end_old,
 403						   end_new, i, probe_pass);
 404		}
 405		if (ret)
 406			break;
 407	}
 408
 409	return ret;
 410}
 411
 412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 413{
 414	struct bpf_line_info *linfo;
 415	u32 i, nr_linfo;
 416
 417	nr_linfo = prog->aux->nr_linfo;
 418	if (!nr_linfo || !delta)
 419		return;
 420
 421	linfo = prog->aux->linfo;
 422
 423	for (i = 0; i < nr_linfo; i++)
 424		if (off < linfo[i].insn_off)
 425			break;
 426
 427	/* Push all off < linfo[i].insn_off by delta */
 428	for (; i < nr_linfo; i++)
 429		linfo[i].insn_off += delta;
 430}
 431
 432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 433				       const struct bpf_insn *patch, u32 len)
 434{
 435	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 436	const u32 cnt_max = S16_MAX;
 437	struct bpf_prog *prog_adj;
 438	int err;
 439
 440	/* Since our patchlet doesn't expand the image, we're done. */
 441	if (insn_delta == 0) {
 442		memcpy(prog->insnsi + off, patch, sizeof(*patch));
 443		return prog;
 444	}
 445
 446	insn_adj_cnt = prog->len + insn_delta;
 447
 448	/* Reject anything that would potentially let the insn->off
 449	 * target overflow when we have excessive program expansions.
 450	 * We need to probe here before we do any reallocation where
 451	 * we afterwards may not fail anymore.
 452	 */
 453	if (insn_adj_cnt > cnt_max &&
 454	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 455		return ERR_PTR(err);
 456
 457	/* Several new instructions need to be inserted. Make room
 458	 * for them. Likely, there's no need for a new allocation as
 459	 * last page could have large enough tailroom.
 460	 */
 461	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 462				    GFP_USER);
 463	if (!prog_adj)
 464		return ERR_PTR(-ENOMEM);
 465
 466	prog_adj->len = insn_adj_cnt;
 467
 468	/* Patching happens in 3 steps:
 469	 *
 470	 * 1) Move over tail of insnsi from next instruction onwards,
 471	 *    so we can patch the single target insn with one or more
 472	 *    new ones (patching is always from 1 to n insns, n > 0).
 473	 * 2) Inject new instructions at the target location.
 474	 * 3) Adjust branch offsets if necessary.
 475	 */
 476	insn_rest = insn_adj_cnt - off - len;
 477
 478	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 479		sizeof(*patch) * insn_rest);
 480	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 481
 482	/* We are guaranteed to not fail at this point, otherwise
 483	 * the ship has sailed to reverse to the original state. An
 484	 * overflow cannot happen at this point.
 485	 */
 486	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 487
 488	bpf_adj_linfo(prog_adj, off, insn_delta);
 489
 490	return prog_adj;
 491}
 492
 493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 494{
 495	/* Branch offsets can't overflow when program is shrinking, no need
 496	 * to call bpf_adj_branches(..., true) here
 497	 */
 498	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 499		sizeof(struct bpf_insn) * (prog->len - off - cnt));
 500	prog->len -= cnt;
 501
 502	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 503}
 504
 505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 506{
 507	int i;
 508
 509	for (i = 0; i < fp->aux->func_cnt; i++)
 510		bpf_prog_kallsyms_del(fp->aux->func[i]);
 511}
 512
 513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 514{
 515	bpf_prog_kallsyms_del_subprogs(fp);
 516	bpf_prog_kallsyms_del(fp);
 517}
 518
 519#ifdef CONFIG_BPF_JIT
 520/* All BPF JIT sysctl knobs here. */
 521int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 522int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 523int bpf_jit_harden   __read_mostly;
 
 524long bpf_jit_limit   __read_mostly;
 525
 526static void
 527bpf_prog_ksym_set_addr(struct bpf_prog *prog)
 
 
 528{
 529	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 530	unsigned long addr = (unsigned long)hdr;
 531
 532	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 533
 534	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
 535	prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
 536}
 537
 538static void
 539bpf_prog_ksym_set_name(struct bpf_prog *prog)
 540{
 541	char *sym = prog->aux->ksym.name;
 542	const char *end = sym + KSYM_NAME_LEN;
 543	const struct btf_type *type;
 544	const char *func_name;
 545
 546	BUILD_BUG_ON(sizeof("bpf_prog_") +
 547		     sizeof(prog->tag) * 2 +
 548		     /* name has been null terminated.
 549		      * We should need +1 for the '_' preceding
 550		      * the name.  However, the null character
 551		      * is double counted between the name and the
 552		      * sizeof("bpf_prog_") above, so we omit
 553		      * the +1 here.
 554		      */
 555		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 556
 557	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 558	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 559
 560	/* prog->aux->name will be ignored if full btf name is available */
 561	if (prog->aux->func_info_cnt) {
 562		type = btf_type_by_id(prog->aux->btf,
 563				      prog->aux->func_info[prog->aux->func_idx].type_id);
 564		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 565		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 566		return;
 567	}
 568
 569	if (prog->aux->name[0])
 570		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 571	else
 572		*sym = 0;
 573}
 574
 575static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
 
 576{
 577	return container_of(n, struct bpf_ksym, tnode)->start;
 
 
 
 
 
 
 578}
 579
 580static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 581					  struct latch_tree_node *b)
 582{
 583	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
 584}
 585
 586static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 587{
 588	unsigned long val = (unsigned long)key;
 589	const struct bpf_ksym *ksym;
 
 590
 591	ksym = container_of(n, struct bpf_ksym, tnode);
 
 592
 593	if (val < ksym->start)
 594		return -1;
 595	if (val >= ksym->end)
 596		return  1;
 597
 598	return 0;
 599}
 600
 601static const struct latch_tree_ops bpf_tree_ops = {
 602	.less	= bpf_tree_less,
 603	.comp	= bpf_tree_comp,
 604};
 605
 606static DEFINE_SPINLOCK(bpf_lock);
 607static LIST_HEAD(bpf_kallsyms);
 608static struct latch_tree_root bpf_tree __cacheline_aligned;
 609
 610void bpf_ksym_add(struct bpf_ksym *ksym)
 611{
 612	spin_lock_bh(&bpf_lock);
 613	WARN_ON_ONCE(!list_empty(&ksym->lnode));
 614	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
 615	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 616	spin_unlock_bh(&bpf_lock);
 617}
 618
 619static void __bpf_ksym_del(struct bpf_ksym *ksym)
 620{
 621	if (list_empty(&ksym->lnode))
 622		return;
 623
 624	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 625	list_del_rcu(&ksym->lnode);
 626}
 627
 628void bpf_ksym_del(struct bpf_ksym *ksym)
 629{
 630	spin_lock_bh(&bpf_lock);
 631	__bpf_ksym_del(ksym);
 632	spin_unlock_bh(&bpf_lock);
 633}
 634
 635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 636{
 637	return fp->jited && !bpf_prog_was_classic(fp);
 638}
 639
 640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 641{
 642	return list_empty(&fp->aux->ksym.lnode) ||
 643	       fp->aux->ksym.lnode.prev == LIST_POISON2;
 644}
 645
 646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 647{
 648	if (!bpf_prog_kallsyms_candidate(fp) ||
 649	    !bpf_capable())
 650		return;
 651
 652	bpf_prog_ksym_set_addr(fp);
 653	bpf_prog_ksym_set_name(fp);
 654	fp->aux->ksym.prog = true;
 655
 656	bpf_ksym_add(&fp->aux->ksym);
 657}
 658
 659void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 660{
 661	if (!bpf_prog_kallsyms_candidate(fp))
 662		return;
 663
 664	bpf_ksym_del(&fp->aux->ksym);
 
 
 665}
 666
 667static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
 668{
 669	struct latch_tree_node *n;
 670
 
 
 
 671	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 672	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
 
 
 673}
 674
 675const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 676				 unsigned long *off, char *sym)
 677{
 678	struct bpf_ksym *ksym;
 
 679	char *ret = NULL;
 680
 681	rcu_read_lock();
 682	ksym = bpf_ksym_find(addr);
 683	if (ksym) {
 684		unsigned long symbol_start = ksym->start;
 685		unsigned long symbol_end = ksym->end;
 686
 687		strncpy(sym, ksym->name, KSYM_NAME_LEN);
 688
 689		ret = sym;
 690		if (size)
 691			*size = symbol_end - symbol_start;
 692		if (off)
 693			*off  = addr - symbol_start;
 694	}
 695	rcu_read_unlock();
 696
 697	return ret;
 698}
 699
 700bool is_bpf_text_address(unsigned long addr)
 701{
 702	bool ret;
 703
 704	rcu_read_lock();
 705	ret = bpf_ksym_find(addr) != NULL;
 706	rcu_read_unlock();
 707
 708	return ret;
 709}
 710
 711static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
 712{
 713	struct bpf_ksym *ksym = bpf_ksym_find(addr);
 714
 715	return ksym && ksym->prog ?
 716	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
 717	       NULL;
 718}
 719
 720const struct exception_table_entry *search_bpf_extables(unsigned long addr)
 721{
 722	const struct exception_table_entry *e = NULL;
 723	struct bpf_prog *prog;
 724
 725	rcu_read_lock();
 726	prog = bpf_prog_ksym_find(addr);
 727	if (!prog)
 728		goto out;
 729	if (!prog->aux->num_exentries)
 730		goto out;
 731
 732	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
 733out:
 734	rcu_read_unlock();
 735	return e;
 736}
 737
 738int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 739		    char *sym)
 740{
 741	struct bpf_ksym *ksym;
 742	unsigned int it = 0;
 743	int ret = -ERANGE;
 744
 745	if (!bpf_jit_kallsyms_enabled())
 746		return ret;
 747
 748	rcu_read_lock();
 749	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
 750		if (it++ != symnum)
 751			continue;
 752
 753		strncpy(sym, ksym->name, KSYM_NAME_LEN);
 754
 755		*value = ksym->start;
 756		*type  = BPF_SYM_ELF_TYPE;
 757
 758		ret = 0;
 759		break;
 760	}
 761	rcu_read_unlock();
 762
 763	return ret;
 764}
 765
 766int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
 767				struct bpf_jit_poke_descriptor *poke)
 768{
 769	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
 770	static const u32 poke_tab_max = 1024;
 771	u32 slot = prog->aux->size_poke_tab;
 772	u32 size = slot + 1;
 773
 774	if (size > poke_tab_max)
 775		return -ENOSPC;
 776	if (poke->ip || poke->ip_stable || poke->adj_off)
 777		return -EINVAL;
 778
 779	switch (poke->reason) {
 780	case BPF_POKE_REASON_TAIL_CALL:
 781		if (!poke->tail_call.map)
 782			return -EINVAL;
 783		break;
 784	default:
 785		return -EINVAL;
 786	}
 787
 788	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
 789	if (!tab)
 790		return -ENOMEM;
 791
 792	memcpy(&tab[slot], poke, sizeof(*poke));
 793	prog->aux->size_poke_tab = size;
 794	prog->aux->poke_tab = tab;
 795
 796	return slot;
 797}
 798
 799static atomic_long_t bpf_jit_current;
 800
 801/* Can be overridden by an arch's JIT compiler if it has a custom,
 802 * dedicated BPF backend memory area, or if neither of the two
 803 * below apply.
 804 */
 805u64 __weak bpf_jit_alloc_exec_limit(void)
 806{
 807#if defined(MODULES_VADDR)
 808	return MODULES_END - MODULES_VADDR;
 809#else
 810	return VMALLOC_END - VMALLOC_START;
 811#endif
 812}
 813
 814static int __init bpf_jit_charge_init(void)
 815{
 816	/* Only used as heuristic here to derive limit. */
 817	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 818					    PAGE_SIZE), LONG_MAX);
 819	return 0;
 820}
 821pure_initcall(bpf_jit_charge_init);
 822
 823static int bpf_jit_charge_modmem(u32 pages)
 824{
 825	if (atomic_long_add_return(pages, &bpf_jit_current) >
 826	    (bpf_jit_limit >> PAGE_SHIFT)) {
 827		if (!capable(CAP_SYS_ADMIN)) {
 828			atomic_long_sub(pages, &bpf_jit_current);
 829			return -EPERM;
 830		}
 831	}
 832
 833	return 0;
 834}
 835
 836static void bpf_jit_uncharge_modmem(u32 pages)
 837{
 838	atomic_long_sub(pages, &bpf_jit_current);
 839}
 840
 841void *__weak bpf_jit_alloc_exec(unsigned long size)
 842{
 843	return module_alloc(size);
 844}
 845
 846void __weak bpf_jit_free_exec(void *addr)
 847{
 848	module_memfree(addr);
 849}
 850
 851struct bpf_binary_header *
 852bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 853		     unsigned int alignment,
 854		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 855{
 856	struct bpf_binary_header *hdr;
 857	u32 size, hole, start, pages;
 858
 859	WARN_ON_ONCE(!is_power_of_2(alignment) ||
 860		     alignment > BPF_IMAGE_ALIGNMENT);
 861
 862	/* Most of BPF filters are really small, but if some of them
 863	 * fill a page, allow at least 128 extra bytes to insert a
 864	 * random section of illegal instructions.
 865	 */
 866	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 867	pages = size / PAGE_SIZE;
 868
 869	if (bpf_jit_charge_modmem(pages))
 870		return NULL;
 871	hdr = bpf_jit_alloc_exec(size);
 872	if (!hdr) {
 873		bpf_jit_uncharge_modmem(pages);
 874		return NULL;
 875	}
 876
 877	/* Fill space with illegal/arch-dep instructions. */
 878	bpf_fill_ill_insns(hdr, size);
 879
 880	hdr->pages = pages;
 881	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 882		     PAGE_SIZE - sizeof(*hdr));
 883	start = (get_random_int() % hole) & ~(alignment - 1);
 884
 885	/* Leave a random number of instructions before BPF code. */
 886	*image_ptr = &hdr->image[start];
 887
 888	return hdr;
 889}
 890
 891void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 892{
 893	u32 pages = hdr->pages;
 894
 895	bpf_jit_free_exec(hdr);
 896	bpf_jit_uncharge_modmem(pages);
 897}
 898
 899/* This symbol is only overridden by archs that have different
 900 * requirements than the usual eBPF JITs, f.e. when they only
 901 * implement cBPF JIT, do not set images read-only, etc.
 902 */
 903void __weak bpf_jit_free(struct bpf_prog *fp)
 904{
 905	if (fp->jited) {
 906		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 907
 908		bpf_jit_binary_free(hdr);
 909
 910		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 911	}
 912
 913	bpf_prog_unlock_free(fp);
 914}
 915
 916int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 917			  const struct bpf_insn *insn, bool extra_pass,
 918			  u64 *func_addr, bool *func_addr_fixed)
 919{
 920	s16 off = insn->off;
 921	s32 imm = insn->imm;
 922	u8 *addr;
 923
 924	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 925	if (!*func_addr_fixed) {
 926		/* Place-holder address till the last pass has collected
 927		 * all addresses for JITed subprograms in which case we
 928		 * can pick them up from prog->aux.
 929		 */
 930		if (!extra_pass)
 931			addr = NULL;
 932		else if (prog->aux->func &&
 933			 off >= 0 && off < prog->aux->func_cnt)
 934			addr = (u8 *)prog->aux->func[off]->bpf_func;
 935		else
 936			return -EINVAL;
 937	} else {
 938		/* Address of a BPF helper call. Since part of the core
 939		 * kernel, it's always at a fixed location. __bpf_call_base
 940		 * and the helper with imm relative to it are both in core
 941		 * kernel.
 942		 */
 943		addr = (u8 *)__bpf_call_base + imm;
 944	}
 945
 946	*func_addr = (unsigned long)addr;
 947	return 0;
 948}
 949
 950static int bpf_jit_blind_insn(const struct bpf_insn *from,
 951			      const struct bpf_insn *aux,
 952			      struct bpf_insn *to_buff,
 953			      bool emit_zext)
 954{
 955	struct bpf_insn *to = to_buff;
 956	u32 imm_rnd = get_random_int();
 957	s16 off;
 958
 959	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 960	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 961
 962	/* Constraints on AX register:
 963	 *
 964	 * AX register is inaccessible from user space. It is mapped in
 965	 * all JITs, and used here for constant blinding rewrites. It is
 966	 * typically "stateless" meaning its contents are only valid within
 967	 * the executed instruction, but not across several instructions.
 968	 * There are a few exceptions however which are further detailed
 969	 * below.
 970	 *
 971	 * Constant blinding is only used by JITs, not in the interpreter.
 972	 * The interpreter uses AX in some occasions as a local temporary
 973	 * register e.g. in DIV or MOD instructions.
 974	 *
 975	 * In restricted circumstances, the verifier can also use the AX
 976	 * register for rewrites as long as they do not interfere with
 977	 * the above cases!
 978	 */
 979	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 980		goto out;
 981
 982	if (from->imm == 0 &&
 983	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 984	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 985		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 986		goto out;
 987	}
 988
 989	switch (from->code) {
 990	case BPF_ALU | BPF_ADD | BPF_K:
 991	case BPF_ALU | BPF_SUB | BPF_K:
 992	case BPF_ALU | BPF_AND | BPF_K:
 993	case BPF_ALU | BPF_OR  | BPF_K:
 994	case BPF_ALU | BPF_XOR | BPF_K:
 995	case BPF_ALU | BPF_MUL | BPF_K:
 996	case BPF_ALU | BPF_MOV | BPF_K:
 997	case BPF_ALU | BPF_DIV | BPF_K:
 998	case BPF_ALU | BPF_MOD | BPF_K:
 999		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1000		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1001		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1002		break;
1003
1004	case BPF_ALU64 | BPF_ADD | BPF_K:
1005	case BPF_ALU64 | BPF_SUB | BPF_K:
1006	case BPF_ALU64 | BPF_AND | BPF_K:
1007	case BPF_ALU64 | BPF_OR  | BPF_K:
1008	case BPF_ALU64 | BPF_XOR | BPF_K:
1009	case BPF_ALU64 | BPF_MUL | BPF_K:
1010	case BPF_ALU64 | BPF_MOV | BPF_K:
1011	case BPF_ALU64 | BPF_DIV | BPF_K:
1012	case BPF_ALU64 | BPF_MOD | BPF_K:
1013		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1014		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1015		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1016		break;
1017
1018	case BPF_JMP | BPF_JEQ  | BPF_K:
1019	case BPF_JMP | BPF_JNE  | BPF_K:
1020	case BPF_JMP | BPF_JGT  | BPF_K:
1021	case BPF_JMP | BPF_JLT  | BPF_K:
1022	case BPF_JMP | BPF_JGE  | BPF_K:
1023	case BPF_JMP | BPF_JLE  | BPF_K:
1024	case BPF_JMP | BPF_JSGT | BPF_K:
1025	case BPF_JMP | BPF_JSLT | BPF_K:
1026	case BPF_JMP | BPF_JSGE | BPF_K:
1027	case BPF_JMP | BPF_JSLE | BPF_K:
1028	case BPF_JMP | BPF_JSET | BPF_K:
1029		/* Accommodate for extra offset in case of a backjump. */
1030		off = from->off;
1031		if (off < 0)
1032			off -= 2;
1033		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1034		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1035		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1036		break;
1037
1038	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1039	case BPF_JMP32 | BPF_JNE  | BPF_K:
1040	case BPF_JMP32 | BPF_JGT  | BPF_K:
1041	case BPF_JMP32 | BPF_JLT  | BPF_K:
1042	case BPF_JMP32 | BPF_JGE  | BPF_K:
1043	case BPF_JMP32 | BPF_JLE  | BPF_K:
1044	case BPF_JMP32 | BPF_JSGT | BPF_K:
1045	case BPF_JMP32 | BPF_JSLT | BPF_K:
1046	case BPF_JMP32 | BPF_JSGE | BPF_K:
1047	case BPF_JMP32 | BPF_JSLE | BPF_K:
1048	case BPF_JMP32 | BPF_JSET | BPF_K:
1049		/* Accommodate for extra offset in case of a backjump. */
1050		off = from->off;
1051		if (off < 0)
1052			off -= 2;
1053		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1054		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1055		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1056				      off);
1057		break;
1058
1059	case BPF_LD | BPF_IMM | BPF_DW:
1060		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1061		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1062		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1063		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1064		break;
1065	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1066		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1067		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1068		if (emit_zext)
1069			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1070		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1071		break;
1072
1073	case BPF_ST | BPF_MEM | BPF_DW:
1074	case BPF_ST | BPF_MEM | BPF_W:
1075	case BPF_ST | BPF_MEM | BPF_H:
1076	case BPF_ST | BPF_MEM | BPF_B:
1077		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1078		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1079		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1080		break;
1081	}
1082out:
1083	return to - to_buff;
1084}
1085
1086static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1087					      gfp_t gfp_extra_flags)
1088{
1089	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1090	struct bpf_prog *fp;
1091
1092	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1093	if (fp != NULL) {
1094		/* aux->prog still points to the fp_other one, so
1095		 * when promoting the clone to the real program,
1096		 * this still needs to be adapted.
1097		 */
1098		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1099	}
1100
1101	return fp;
1102}
1103
1104static void bpf_prog_clone_free(struct bpf_prog *fp)
1105{
1106	/* aux was stolen by the other clone, so we cannot free
1107	 * it from this path! It will be freed eventually by the
1108	 * other program on release.
1109	 *
1110	 * At this point, we don't need a deferred release since
1111	 * clone is guaranteed to not be locked.
1112	 */
1113	fp->aux = NULL;
1114	__bpf_prog_free(fp);
1115}
1116
1117void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1118{
1119	/* We have to repoint aux->prog to self, as we don't
1120	 * know whether fp here is the clone or the original.
1121	 */
1122	fp->aux->prog = fp;
1123	bpf_prog_clone_free(fp_other);
1124}
1125
1126struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1127{
1128	struct bpf_insn insn_buff[16], aux[2];
1129	struct bpf_prog *clone, *tmp;
1130	int insn_delta, insn_cnt;
1131	struct bpf_insn *insn;
1132	int i, rewritten;
1133
1134	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1135		return prog;
1136
1137	clone = bpf_prog_clone_create(prog, GFP_USER);
1138	if (!clone)
1139		return ERR_PTR(-ENOMEM);
1140
1141	insn_cnt = clone->len;
1142	insn = clone->insnsi;
1143
1144	for (i = 0; i < insn_cnt; i++, insn++) {
1145		/* We temporarily need to hold the original ld64 insn
1146		 * so that we can still access the first part in the
1147		 * second blinding run.
1148		 */
1149		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1150		    insn[1].code == 0)
1151			memcpy(aux, insn, sizeof(aux));
1152
1153		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1154						clone->aux->verifier_zext);
1155		if (!rewritten)
1156			continue;
1157
1158		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1159		if (IS_ERR(tmp)) {
1160			/* Patching may have repointed aux->prog during
1161			 * realloc from the original one, so we need to
1162			 * fix it up here on error.
1163			 */
1164			bpf_jit_prog_release_other(prog, clone);
1165			return tmp;
1166		}
1167
1168		clone = tmp;
1169		insn_delta = rewritten - 1;
1170
1171		/* Walk new program and skip insns we just inserted. */
1172		insn = clone->insnsi + i + insn_delta;
1173		insn_cnt += insn_delta;
1174		i        += insn_delta;
1175	}
1176
1177	clone->blinded = 1;
1178	return clone;
1179}
1180#endif /* CONFIG_BPF_JIT */
1181
1182/* Base function for offset calculation. Needs to go into .text section,
1183 * therefore keeping it non-static as well; will also be used by JITs
1184 * anyway later on, so do not let the compiler omit it. This also needs
1185 * to go into kallsyms for correlation from e.g. bpftool, so naming
1186 * must not change.
1187 */
1188noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1189{
1190	return 0;
1191}
1192EXPORT_SYMBOL_GPL(__bpf_call_base);
1193
1194/* All UAPI available opcodes. */
1195#define BPF_INSN_MAP(INSN_2, INSN_3)		\
1196	/* 32 bit ALU operations. */		\
1197	/*   Register based. */			\
1198	INSN_3(ALU, ADD,  X),			\
1199	INSN_3(ALU, SUB,  X),			\
1200	INSN_3(ALU, AND,  X),			\
1201	INSN_3(ALU, OR,   X),			\
1202	INSN_3(ALU, LSH,  X),			\
1203	INSN_3(ALU, RSH,  X),			\
1204	INSN_3(ALU, XOR,  X),			\
1205	INSN_3(ALU, MUL,  X),			\
1206	INSN_3(ALU, MOV,  X),			\
1207	INSN_3(ALU, ARSH, X),			\
1208	INSN_3(ALU, DIV,  X),			\
1209	INSN_3(ALU, MOD,  X),			\
1210	INSN_2(ALU, NEG),			\
1211	INSN_3(ALU, END, TO_BE),		\
1212	INSN_3(ALU, END, TO_LE),		\
1213	/*   Immediate based. */		\
1214	INSN_3(ALU, ADD,  K),			\
1215	INSN_3(ALU, SUB,  K),			\
1216	INSN_3(ALU, AND,  K),			\
1217	INSN_3(ALU, OR,   K),			\
1218	INSN_3(ALU, LSH,  K),			\
1219	INSN_3(ALU, RSH,  K),			\
1220	INSN_3(ALU, XOR,  K),			\
1221	INSN_3(ALU, MUL,  K),			\
1222	INSN_3(ALU, MOV,  K),			\
1223	INSN_3(ALU, ARSH, K),			\
1224	INSN_3(ALU, DIV,  K),			\
1225	INSN_3(ALU, MOD,  K),			\
1226	/* 64 bit ALU operations. */		\
1227	/*   Register based. */			\
1228	INSN_3(ALU64, ADD,  X),			\
1229	INSN_3(ALU64, SUB,  X),			\
1230	INSN_3(ALU64, AND,  X),			\
1231	INSN_3(ALU64, OR,   X),			\
1232	INSN_3(ALU64, LSH,  X),			\
1233	INSN_3(ALU64, RSH,  X),			\
1234	INSN_3(ALU64, XOR,  X),			\
1235	INSN_3(ALU64, MUL,  X),			\
1236	INSN_3(ALU64, MOV,  X),			\
1237	INSN_3(ALU64, ARSH, X),			\
1238	INSN_3(ALU64, DIV,  X),			\
1239	INSN_3(ALU64, MOD,  X),			\
1240	INSN_2(ALU64, NEG),			\
1241	/*   Immediate based. */		\
1242	INSN_3(ALU64, ADD,  K),			\
1243	INSN_3(ALU64, SUB,  K),			\
1244	INSN_3(ALU64, AND,  K),			\
1245	INSN_3(ALU64, OR,   K),			\
1246	INSN_3(ALU64, LSH,  K),			\
1247	INSN_3(ALU64, RSH,  K),			\
1248	INSN_3(ALU64, XOR,  K),			\
1249	INSN_3(ALU64, MUL,  K),			\
1250	INSN_3(ALU64, MOV,  K),			\
1251	INSN_3(ALU64, ARSH, K),			\
1252	INSN_3(ALU64, DIV,  K),			\
1253	INSN_3(ALU64, MOD,  K),			\
1254	/* Call instruction. */			\
1255	INSN_2(JMP, CALL),			\
1256	/* Exit instruction. */			\
1257	INSN_2(JMP, EXIT),			\
1258	/* 32-bit Jump instructions. */		\
1259	/*   Register based. */			\
1260	INSN_3(JMP32, JEQ,  X),			\
1261	INSN_3(JMP32, JNE,  X),			\
1262	INSN_3(JMP32, JGT,  X),			\
1263	INSN_3(JMP32, JLT,  X),			\
1264	INSN_3(JMP32, JGE,  X),			\
1265	INSN_3(JMP32, JLE,  X),			\
1266	INSN_3(JMP32, JSGT, X),			\
1267	INSN_3(JMP32, JSLT, X),			\
1268	INSN_3(JMP32, JSGE, X),			\
1269	INSN_3(JMP32, JSLE, X),			\
1270	INSN_3(JMP32, JSET, X),			\
1271	/*   Immediate based. */		\
1272	INSN_3(JMP32, JEQ,  K),			\
1273	INSN_3(JMP32, JNE,  K),			\
1274	INSN_3(JMP32, JGT,  K),			\
1275	INSN_3(JMP32, JLT,  K),			\
1276	INSN_3(JMP32, JGE,  K),			\
1277	INSN_3(JMP32, JLE,  K),			\
1278	INSN_3(JMP32, JSGT, K),			\
1279	INSN_3(JMP32, JSLT, K),			\
1280	INSN_3(JMP32, JSGE, K),			\
1281	INSN_3(JMP32, JSLE, K),			\
1282	INSN_3(JMP32, JSET, K),			\
1283	/* Jump instructions. */		\
1284	/*   Register based. */			\
1285	INSN_3(JMP, JEQ,  X),			\
1286	INSN_3(JMP, JNE,  X),			\
1287	INSN_3(JMP, JGT,  X),			\
1288	INSN_3(JMP, JLT,  X),			\
1289	INSN_3(JMP, JGE,  X),			\
1290	INSN_3(JMP, JLE,  X),			\
1291	INSN_3(JMP, JSGT, X),			\
1292	INSN_3(JMP, JSLT, X),			\
1293	INSN_3(JMP, JSGE, X),			\
1294	INSN_3(JMP, JSLE, X),			\
1295	INSN_3(JMP, JSET, X),			\
1296	/*   Immediate based. */		\
1297	INSN_3(JMP, JEQ,  K),			\
1298	INSN_3(JMP, JNE,  K),			\
1299	INSN_3(JMP, JGT,  K),			\
1300	INSN_3(JMP, JLT,  K),			\
1301	INSN_3(JMP, JGE,  K),			\
1302	INSN_3(JMP, JLE,  K),			\
1303	INSN_3(JMP, JSGT, K),			\
1304	INSN_3(JMP, JSLT, K),			\
1305	INSN_3(JMP, JSGE, K),			\
1306	INSN_3(JMP, JSLE, K),			\
1307	INSN_3(JMP, JSET, K),			\
1308	INSN_2(JMP, JA),			\
1309	/* Store instructions. */		\
1310	/*   Register based. */			\
1311	INSN_3(STX, MEM,  B),			\
1312	INSN_3(STX, MEM,  H),			\
1313	INSN_3(STX, MEM,  W),			\
1314	INSN_3(STX, MEM,  DW),			\
1315	INSN_3(STX, XADD, W),			\
1316	INSN_3(STX, XADD, DW),			\
1317	/*   Immediate based. */		\
1318	INSN_3(ST, MEM, B),			\
1319	INSN_3(ST, MEM, H),			\
1320	INSN_3(ST, MEM, W),			\
1321	INSN_3(ST, MEM, DW),			\
1322	/* Load instructions. */		\
1323	/*   Register based. */			\
1324	INSN_3(LDX, MEM, B),			\
1325	INSN_3(LDX, MEM, H),			\
1326	INSN_3(LDX, MEM, W),			\
1327	INSN_3(LDX, MEM, DW),			\
1328	/*   Immediate based. */		\
1329	INSN_3(LD, IMM, DW)
1330
1331bool bpf_opcode_in_insntable(u8 code)
1332{
1333#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1334#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1335	static const bool public_insntable[256] = {
1336		[0 ... 255] = false,
1337		/* Now overwrite non-defaults ... */
1338		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1339		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1340		[BPF_LD | BPF_ABS | BPF_B] = true,
1341		[BPF_LD | BPF_ABS | BPF_H] = true,
1342		[BPF_LD | BPF_ABS | BPF_W] = true,
1343		[BPF_LD | BPF_IND | BPF_B] = true,
1344		[BPF_LD | BPF_IND | BPF_H] = true,
1345		[BPF_LD | BPF_IND | BPF_W] = true,
1346	};
1347#undef BPF_INSN_3_TBL
1348#undef BPF_INSN_2_TBL
1349	return public_insntable[code];
1350}
1351
1352#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1353u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1354{
1355	memset(dst, 0, size);
1356	return -EFAULT;
1357}
1358
1359/**
1360 *	__bpf_prog_run - run eBPF program on a given context
1361 *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1362 *	@insn: is the array of eBPF instructions
1363 *	@stack: is the eBPF storage stack
1364 *
1365 * Decode and execute eBPF instructions.
1366 */
1367static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1368{
1369#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1370#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1371	static const void * const jumptable[256] __annotate_jump_table = {
1372		[0 ... 255] = &&default_label,
1373		/* Now overwrite non-defaults ... */
1374		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1375		/* Non-UAPI available opcodes. */
1376		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1377		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1378		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1379		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1380		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1381		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1382	};
1383#undef BPF_INSN_3_LBL
1384#undef BPF_INSN_2_LBL
1385	u32 tail_call_cnt = 0;
1386
1387#define CONT	 ({ insn++; goto select_insn; })
1388#define CONT_JMP ({ insn++; goto select_insn; })
1389
1390select_insn:
1391	goto *jumptable[insn->code];
1392
1393	/* ALU */
1394#define ALU(OPCODE, OP)			\
1395	ALU64_##OPCODE##_X:		\
1396		DST = DST OP SRC;	\
1397		CONT;			\
1398	ALU_##OPCODE##_X:		\
1399		DST = (u32) DST OP (u32) SRC;	\
1400		CONT;			\
1401	ALU64_##OPCODE##_K:		\
1402		DST = DST OP IMM;		\
1403		CONT;			\
1404	ALU_##OPCODE##_K:		\
1405		DST = (u32) DST OP (u32) IMM;	\
1406		CONT;
1407
1408	ALU(ADD,  +)
1409	ALU(SUB,  -)
1410	ALU(AND,  &)
1411	ALU(OR,   |)
1412	ALU(LSH, <<)
1413	ALU(RSH, >>)
1414	ALU(XOR,  ^)
1415	ALU(MUL,  *)
1416#undef ALU
1417	ALU_NEG:
1418		DST = (u32) -DST;
1419		CONT;
1420	ALU64_NEG:
1421		DST = -DST;
1422		CONT;
1423	ALU_MOV_X:
1424		DST = (u32) SRC;
1425		CONT;
1426	ALU_MOV_K:
1427		DST = (u32) IMM;
1428		CONT;
1429	ALU64_MOV_X:
1430		DST = SRC;
1431		CONT;
1432	ALU64_MOV_K:
1433		DST = IMM;
1434		CONT;
1435	LD_IMM_DW:
1436		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1437		insn++;
1438		CONT;
1439	ALU_ARSH_X:
1440		DST = (u64) (u32) (((s32) DST) >> SRC);
1441		CONT;
1442	ALU_ARSH_K:
1443		DST = (u64) (u32) (((s32) DST) >> IMM);
1444		CONT;
1445	ALU64_ARSH_X:
1446		(*(s64 *) &DST) >>= SRC;
1447		CONT;
1448	ALU64_ARSH_K:
1449		(*(s64 *) &DST) >>= IMM;
1450		CONT;
1451	ALU64_MOD_X:
1452		div64_u64_rem(DST, SRC, &AX);
1453		DST = AX;
1454		CONT;
1455	ALU_MOD_X:
1456		AX = (u32) DST;
1457		DST = do_div(AX, (u32) SRC);
1458		CONT;
1459	ALU64_MOD_K:
1460		div64_u64_rem(DST, IMM, &AX);
1461		DST = AX;
1462		CONT;
1463	ALU_MOD_K:
1464		AX = (u32) DST;
1465		DST = do_div(AX, (u32) IMM);
1466		CONT;
1467	ALU64_DIV_X:
1468		DST = div64_u64(DST, SRC);
1469		CONT;
1470	ALU_DIV_X:
1471		AX = (u32) DST;
1472		do_div(AX, (u32) SRC);
1473		DST = (u32) AX;
1474		CONT;
1475	ALU64_DIV_K:
1476		DST = div64_u64(DST, IMM);
1477		CONT;
1478	ALU_DIV_K:
1479		AX = (u32) DST;
1480		do_div(AX, (u32) IMM);
1481		DST = (u32) AX;
1482		CONT;
1483	ALU_END_TO_BE:
1484		switch (IMM) {
1485		case 16:
1486			DST = (__force u16) cpu_to_be16(DST);
1487			break;
1488		case 32:
1489			DST = (__force u32) cpu_to_be32(DST);
1490			break;
1491		case 64:
1492			DST = (__force u64) cpu_to_be64(DST);
1493			break;
1494		}
1495		CONT;
1496	ALU_END_TO_LE:
1497		switch (IMM) {
1498		case 16:
1499			DST = (__force u16) cpu_to_le16(DST);
1500			break;
1501		case 32:
1502			DST = (__force u32) cpu_to_le32(DST);
1503			break;
1504		case 64:
1505			DST = (__force u64) cpu_to_le64(DST);
1506			break;
1507		}
1508		CONT;
1509
1510	/* CALL */
1511	JMP_CALL:
1512		/* Function call scratches BPF_R1-BPF_R5 registers,
1513		 * preserves BPF_R6-BPF_R9, and stores return value
1514		 * into BPF_R0.
1515		 */
1516		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1517						       BPF_R4, BPF_R5);
1518		CONT;
1519
1520	JMP_CALL_ARGS:
1521		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1522							    BPF_R3, BPF_R4,
1523							    BPF_R5,
1524							    insn + insn->off + 1);
1525		CONT;
1526
1527	JMP_TAIL_CALL: {
1528		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1529		struct bpf_array *array = container_of(map, struct bpf_array, map);
1530		struct bpf_prog *prog;
1531		u32 index = BPF_R3;
1532
1533		if (unlikely(index >= array->map.max_entries))
1534			goto out;
1535		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1536			goto out;
1537
1538		tail_call_cnt++;
1539
1540		prog = READ_ONCE(array->ptrs[index]);
1541		if (!prog)
1542			goto out;
1543
1544		/* ARG1 at this point is guaranteed to point to CTX from
1545		 * the verifier side due to the fact that the tail call is
1546		 * handled like a helper, that is, bpf_tail_call_proto,
1547		 * where arg1_type is ARG_PTR_TO_CTX.
1548		 */
1549		insn = prog->insnsi;
1550		goto select_insn;
1551out:
1552		CONT;
1553	}
1554	JMP_JA:
1555		insn += insn->off;
1556		CONT;
1557	JMP_EXIT:
1558		return BPF_R0;
1559	/* JMP */
1560#define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1561	JMP_##OPCODE##_X:					\
1562		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1563			insn += insn->off;			\
1564			CONT_JMP;				\
1565		}						\
1566		CONT;						\
1567	JMP32_##OPCODE##_X:					\
1568		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1569			insn += insn->off;			\
1570			CONT_JMP;				\
1571		}						\
1572		CONT;						\
1573	JMP_##OPCODE##_K:					\
1574		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1575			insn += insn->off;			\
1576			CONT_JMP;				\
1577		}						\
1578		CONT;						\
1579	JMP32_##OPCODE##_K:					\
1580		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1581			insn += insn->off;			\
1582			CONT_JMP;				\
1583		}						\
1584		CONT;
1585	COND_JMP(u, JEQ, ==)
1586	COND_JMP(u, JNE, !=)
1587	COND_JMP(u, JGT, >)
1588	COND_JMP(u, JLT, <)
1589	COND_JMP(u, JGE, >=)
1590	COND_JMP(u, JLE, <=)
1591	COND_JMP(u, JSET, &)
1592	COND_JMP(s, JSGT, >)
1593	COND_JMP(s, JSLT, <)
1594	COND_JMP(s, JSGE, >=)
1595	COND_JMP(s, JSLE, <=)
1596#undef COND_JMP
1597	/* STX and ST and LDX*/
1598#define LDST(SIZEOP, SIZE)						\
1599	STX_MEM_##SIZEOP:						\
1600		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1601		CONT;							\
1602	ST_MEM_##SIZEOP:						\
1603		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1604		CONT;							\
1605	LDX_MEM_##SIZEOP:						\
1606		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1607		CONT;
1608
1609	LDST(B,   u8)
1610	LDST(H,  u16)
1611	LDST(W,  u32)
1612	LDST(DW, u64)
1613#undef LDST
1614#define LDX_PROBE(SIZEOP, SIZE)							\
1615	LDX_PROBE_MEM_##SIZEOP:							\
1616		bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));	\
1617		CONT;
1618	LDX_PROBE(B,  1)
1619	LDX_PROBE(H,  2)
1620	LDX_PROBE(W,  4)
1621	LDX_PROBE(DW, 8)
1622#undef LDX_PROBE
1623
1624	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1625		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1626			   (DST + insn->off));
1627		CONT;
1628	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1629		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1630			     (DST + insn->off));
1631		CONT;
1632
1633	default_label:
1634		/* If we ever reach this, we have a bug somewhere. Die hard here
1635		 * instead of just returning 0; we could be somewhere in a subprog,
1636		 * so execution could continue otherwise which we do /not/ want.
1637		 *
1638		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1639		 */
1640		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
1641		BUG_ON(1);
1642		return 0;
1643}
1644
1645#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1646#define DEFINE_BPF_PROG_RUN(stack_size) \
1647static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1648{ \
1649	u64 stack[stack_size / sizeof(u64)]; \
1650	u64 regs[MAX_BPF_EXT_REG]; \
1651\
1652	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1653	ARG1 = (u64) (unsigned long) ctx; \
1654	return ___bpf_prog_run(regs, insn, stack); \
1655}
1656
1657#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1658#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1659static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1660				      const struct bpf_insn *insn) \
1661{ \
1662	u64 stack[stack_size / sizeof(u64)]; \
1663	u64 regs[MAX_BPF_EXT_REG]; \
1664\
1665	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1666	BPF_R1 = r1; \
1667	BPF_R2 = r2; \
1668	BPF_R3 = r3; \
1669	BPF_R4 = r4; \
1670	BPF_R5 = r5; \
1671	return ___bpf_prog_run(regs, insn, stack); \
1672}
1673
1674#define EVAL1(FN, X) FN(X)
1675#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1676#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1677#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1678#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1679#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1680
1681EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1682EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1683EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1684
1685EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1686EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1687EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1688
1689#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1690
1691static unsigned int (*interpreters[])(const void *ctx,
1692				      const struct bpf_insn *insn) = {
1693EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1694EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1695EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1696};
1697#undef PROG_NAME_LIST
1698#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1699static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1700				  const struct bpf_insn *insn) = {
1701EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1702EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1703EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1704};
1705#undef PROG_NAME_LIST
1706
1707void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1708{
1709	stack_depth = max_t(u32, stack_depth, 1);
1710	insn->off = (s16) insn->imm;
1711	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1712		__bpf_call_base_args;
1713	insn->code = BPF_JMP | BPF_CALL_ARGS;
1714}
1715
1716#else
1717static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1718					 const struct bpf_insn *insn)
1719{
1720	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1721	 * is not working properly, so warn about it!
1722	 */
1723	WARN_ON_ONCE(1);
1724	return 0;
1725}
1726#endif
1727
1728bool bpf_prog_array_compatible(struct bpf_array *array,
1729			       const struct bpf_prog *fp)
1730{
1731	if (fp->kprobe_override)
1732		return false;
1733
1734	if (!array->aux->type) {
1735		/* There's no owner yet where we could check for
1736		 * compatibility.
1737		 */
1738		array->aux->type  = fp->type;
1739		array->aux->jited = fp->jited;
 
1740		return true;
1741	}
1742
1743	return array->aux->type  == fp->type &&
1744	       array->aux->jited == fp->jited;
1745}
1746
1747static int bpf_check_tail_call(const struct bpf_prog *fp)
1748{
1749	struct bpf_prog_aux *aux = fp->aux;
1750	int i;
1751
1752	for (i = 0; i < aux->used_map_cnt; i++) {
1753		struct bpf_map *map = aux->used_maps[i];
1754		struct bpf_array *array;
1755
1756		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1757			continue;
1758
1759		array = container_of(map, struct bpf_array, map);
1760		if (!bpf_prog_array_compatible(array, fp))
1761			return -EINVAL;
1762	}
1763
1764	return 0;
1765}
1766
1767static void bpf_prog_select_func(struct bpf_prog *fp)
1768{
1769#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1770	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1771
1772	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1773#else
1774	fp->bpf_func = __bpf_prog_ret0_warn;
1775#endif
1776}
1777
1778/**
1779 *	bpf_prog_select_runtime - select exec runtime for BPF program
1780 *	@fp: bpf_prog populated with internal BPF program
1781 *	@err: pointer to error variable
1782 *
1783 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1784 * The BPF program will be executed via BPF_PROG_RUN() macro.
1785 */
1786struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1787{
1788	/* In case of BPF to BPF calls, verifier did all the prep
1789	 * work with regards to JITing, etc.
1790	 */
1791	if (fp->bpf_func)
1792		goto finalize;
1793
1794	bpf_prog_select_func(fp);
1795
1796	/* eBPF JITs can rewrite the program in case constant
1797	 * blinding is active. However, in case of error during
1798	 * blinding, bpf_int_jit_compile() must always return a
1799	 * valid program, which in this case would simply not
1800	 * be JITed, but falls back to the interpreter.
1801	 */
1802	if (!bpf_prog_is_dev_bound(fp->aux)) {
1803		*err = bpf_prog_alloc_jited_linfo(fp);
1804		if (*err)
1805			return fp;
1806
1807		fp = bpf_int_jit_compile(fp);
1808		if (!fp->jited) {
1809			bpf_prog_free_jited_linfo(fp);
1810#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1811			*err = -ENOTSUPP;
1812			return fp;
1813#endif
1814		} else {
1815			bpf_prog_free_unused_jited_linfo(fp);
1816		}
1817	} else {
1818		*err = bpf_prog_offload_compile(fp);
1819		if (*err)
1820			return fp;
1821	}
1822
1823finalize:
1824	bpf_prog_lock_ro(fp);
1825
1826	/* The tail call compatibility check can only be done at
1827	 * this late stage as we need to determine, if we deal
1828	 * with JITed or non JITed program concatenations and not
1829	 * all eBPF JITs might immediately support all features.
1830	 */
1831	*err = bpf_check_tail_call(fp);
1832
1833	return fp;
1834}
1835EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1836
1837static unsigned int __bpf_prog_ret1(const void *ctx,
1838				    const struct bpf_insn *insn)
1839{
1840	return 1;
1841}
1842
1843static struct bpf_prog_dummy {
1844	struct bpf_prog prog;
1845} dummy_bpf_prog = {
1846	.prog = {
1847		.bpf_func = __bpf_prog_ret1,
1848	},
1849};
1850
1851/* to avoid allocating empty bpf_prog_array for cgroups that
1852 * don't have bpf program attached use one global 'empty_prog_array'
1853 * It will not be modified the caller of bpf_prog_array_alloc()
1854 * (since caller requested prog_cnt == 0)
1855 * that pointer should be 'freed' by bpf_prog_array_free()
1856 */
1857static struct {
1858	struct bpf_prog_array hdr;
1859	struct bpf_prog *null_prog;
1860} empty_prog_array = {
1861	.null_prog = NULL,
1862};
1863
1864struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1865{
1866	if (prog_cnt)
1867		return kzalloc(sizeof(struct bpf_prog_array) +
1868			       sizeof(struct bpf_prog_array_item) *
1869			       (prog_cnt + 1),
1870			       flags);
1871
1872	return &empty_prog_array.hdr;
1873}
1874
1875void bpf_prog_array_free(struct bpf_prog_array *progs)
1876{
1877	if (!progs || progs == &empty_prog_array.hdr)
1878		return;
1879	kfree_rcu(progs, rcu);
1880}
1881
1882int bpf_prog_array_length(struct bpf_prog_array *array)
1883{
1884	struct bpf_prog_array_item *item;
1885	u32 cnt = 0;
1886
1887	for (item = array->items; item->prog; item++)
1888		if (item->prog != &dummy_bpf_prog.prog)
1889			cnt++;
1890	return cnt;
1891}
1892
1893bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1894{
1895	struct bpf_prog_array_item *item;
1896
1897	for (item = array->items; item->prog; item++)
1898		if (item->prog != &dummy_bpf_prog.prog)
1899			return false;
1900	return true;
1901}
1902
1903static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1904				     u32 *prog_ids,
1905				     u32 request_cnt)
1906{
1907	struct bpf_prog_array_item *item;
1908	int i = 0;
1909
1910	for (item = array->items; item->prog; item++) {
1911		if (item->prog == &dummy_bpf_prog.prog)
1912			continue;
1913		prog_ids[i] = item->prog->aux->id;
1914		if (++i == request_cnt) {
1915			item++;
1916			break;
1917		}
1918	}
1919
1920	return !!(item->prog);
1921}
1922
1923int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1924				__u32 __user *prog_ids, u32 cnt)
1925{
1926	unsigned long err = 0;
1927	bool nospc;
1928	u32 *ids;
1929
1930	/* users of this function are doing:
1931	 * cnt = bpf_prog_array_length();
1932	 * if (cnt > 0)
1933	 *     bpf_prog_array_copy_to_user(..., cnt);
1934	 * so below kcalloc doesn't need extra cnt > 0 check.
1935	 */
1936	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1937	if (!ids)
1938		return -ENOMEM;
1939	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1940	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1941	kfree(ids);
1942	if (err)
1943		return -EFAULT;
1944	if (nospc)
1945		return -ENOSPC;
1946	return 0;
1947}
1948
1949void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1950				struct bpf_prog *old_prog)
1951{
1952	struct bpf_prog_array_item *item;
1953
1954	for (item = array->items; item->prog; item++)
1955		if (item->prog == old_prog) {
1956			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1957			break;
1958		}
1959}
1960
1961/**
1962 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
1963 *                                   index into the program array with
1964 *                                   a dummy no-op program.
1965 * @array: a bpf_prog_array
1966 * @index: the index of the program to replace
1967 *
1968 * Skips over dummy programs, by not counting them, when calculating
1969 * the position of the program to replace.
1970 *
1971 * Return:
1972 * * 0		- Success
1973 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
1974 * * -ENOENT	- Index out of range
1975 */
1976int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
1977{
1978	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
1979}
1980
1981/**
1982 * bpf_prog_array_update_at() - Updates the program at the given index
1983 *                              into the program array.
1984 * @array: a bpf_prog_array
1985 * @index: the index of the program to update
1986 * @prog: the program to insert into the array
1987 *
1988 * Skips over dummy programs, by not counting them, when calculating
1989 * the position of the program to update.
1990 *
1991 * Return:
1992 * * 0		- Success
1993 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
1994 * * -ENOENT	- Index out of range
1995 */
1996int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1997			     struct bpf_prog *prog)
1998{
1999	struct bpf_prog_array_item *item;
2000
2001	if (unlikely(index < 0))
2002		return -EINVAL;
2003
2004	for (item = array->items; item->prog; item++) {
2005		if (item->prog == &dummy_bpf_prog.prog)
2006			continue;
2007		if (!index) {
2008			WRITE_ONCE(item->prog, prog);
2009			return 0;
2010		}
2011		index--;
2012	}
2013	return -ENOENT;
2014}
2015
2016int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2017			struct bpf_prog *exclude_prog,
2018			struct bpf_prog *include_prog,
2019			struct bpf_prog_array **new_array)
2020{
2021	int new_prog_cnt, carry_prog_cnt = 0;
2022	struct bpf_prog_array_item *existing;
2023	struct bpf_prog_array *array;
2024	bool found_exclude = false;
2025	int new_prog_idx = 0;
2026
2027	/* Figure out how many existing progs we need to carry over to
2028	 * the new array.
2029	 */
2030	if (old_array) {
2031		existing = old_array->items;
2032		for (; existing->prog; existing++) {
2033			if (existing->prog == exclude_prog) {
2034				found_exclude = true;
2035				continue;
2036			}
2037			if (existing->prog != &dummy_bpf_prog.prog)
2038				carry_prog_cnt++;
2039			if (existing->prog == include_prog)
2040				return -EEXIST;
2041		}
2042	}
2043
2044	if (exclude_prog && !found_exclude)
2045		return -ENOENT;
2046
2047	/* How many progs (not NULL) will be in the new array? */
2048	new_prog_cnt = carry_prog_cnt;
2049	if (include_prog)
2050		new_prog_cnt += 1;
2051
2052	/* Do we have any prog (not NULL) in the new array? */
2053	if (!new_prog_cnt) {
2054		*new_array = NULL;
2055		return 0;
2056	}
2057
2058	/* +1 as the end of prog_array is marked with NULL */
2059	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2060	if (!array)
2061		return -ENOMEM;
2062
2063	/* Fill in the new prog array */
2064	if (carry_prog_cnt) {
2065		existing = old_array->items;
2066		for (; existing->prog; existing++)
2067			if (existing->prog != exclude_prog &&
2068			    existing->prog != &dummy_bpf_prog.prog) {
2069				array->items[new_prog_idx++].prog =
2070					existing->prog;
2071			}
2072	}
2073	if (include_prog)
2074		array->items[new_prog_idx++].prog = include_prog;
2075	array->items[new_prog_idx].prog = NULL;
2076	*new_array = array;
2077	return 0;
2078}
2079
2080int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2081			     u32 *prog_ids, u32 request_cnt,
2082			     u32 *prog_cnt)
2083{
2084	u32 cnt = 0;
2085
2086	if (array)
2087		cnt = bpf_prog_array_length(array);
2088
2089	*prog_cnt = cnt;
2090
2091	/* return early if user requested only program count or nothing to copy */
2092	if (!request_cnt || !cnt)
2093		return 0;
2094
2095	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2096	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2097								     : 0;
2098}
2099
2100void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2101			  struct bpf_map **used_maps, u32 len)
2102{
2103	struct bpf_map *map;
2104	u32 i;
2105
2106	for (i = 0; i < len; i++) {
2107		map = used_maps[i];
2108		if (map->ops->map_poke_untrack)
2109			map->ops->map_poke_untrack(map, aux);
2110		bpf_map_put(map);
2111	}
2112}
2113
2114static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2115{
2116	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2117	kfree(aux->used_maps);
2118}
2119
2120static void bpf_prog_free_deferred(struct work_struct *work)
2121{
2122	struct bpf_prog_aux *aux;
2123	int i;
2124
2125	aux = container_of(work, struct bpf_prog_aux, work);
2126	bpf_free_used_maps(aux);
2127	if (bpf_prog_is_dev_bound(aux))
2128		bpf_prog_offload_destroy(aux->prog);
2129#ifdef CONFIG_PERF_EVENTS
2130	if (aux->prog->has_callchain_buf)
2131		put_callchain_buffers();
2132#endif
2133	bpf_trampoline_put(aux->trampoline);
2134	for (i = 0; i < aux->func_cnt; i++)
2135		bpf_jit_free(aux->func[i]);
2136	if (aux->func_cnt) {
2137		kfree(aux->func);
2138		bpf_prog_unlock_free(aux->prog);
2139	} else {
2140		bpf_jit_free(aux->prog);
2141	}
2142}
2143
2144/* Free internal BPF program */
2145void bpf_prog_free(struct bpf_prog *fp)
2146{
2147	struct bpf_prog_aux *aux = fp->aux;
2148
2149	if (aux->linked_prog)
2150		bpf_prog_put(aux->linked_prog);
2151	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2152	schedule_work(&aux->work);
2153}
2154EXPORT_SYMBOL_GPL(bpf_prog_free);
2155
2156/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2157static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2158
2159void bpf_user_rnd_init_once(void)
2160{
2161	prandom_init_once(&bpf_user_rnd_state);
2162}
2163
2164BPF_CALL_0(bpf_user_rnd_u32)
2165{
2166	/* Should someone ever have the rather unwise idea to use some
2167	 * of the registers passed into this function, then note that
2168	 * this function is called from native eBPF and classic-to-eBPF
2169	 * transformations. Register assignments from both sides are
2170	 * different, f.e. classic always sets fn(ctx, A, X) here.
2171	 */
2172	struct rnd_state *state;
2173	u32 res;
2174
2175	state = &get_cpu_var(bpf_user_rnd_state);
2176	res = prandom_u32_state(state);
2177	put_cpu_var(bpf_user_rnd_state);
2178
2179	return res;
2180}
2181
2182BPF_CALL_0(bpf_get_raw_cpu_id)
2183{
2184	return raw_smp_processor_id();
2185}
2186
2187/* Weak definitions of helper functions in case we don't have bpf syscall. */
2188const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2189const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2190const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2191const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2192const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2193const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2194const struct bpf_func_proto bpf_spin_lock_proto __weak;
2195const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2196const struct bpf_func_proto bpf_jiffies64_proto __weak;
2197
2198const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2199const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2200const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2201const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2202const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2203
2204const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2205const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2206const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2207const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2208const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2209const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2210const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2211
2212const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2213{
2214	return NULL;
2215}
2216
2217u64 __weak
2218bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2219		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2220{
2221	return -ENOTSUPP;
2222}
2223EXPORT_SYMBOL_GPL(bpf_event_output);
2224
2225/* Always built-in helper functions. */
2226const struct bpf_func_proto bpf_tail_call_proto = {
2227	.func		= NULL,
2228	.gpl_only	= false,
2229	.ret_type	= RET_VOID,
2230	.arg1_type	= ARG_PTR_TO_CTX,
2231	.arg2_type	= ARG_CONST_MAP_PTR,
2232	.arg3_type	= ARG_ANYTHING,
2233};
2234
2235/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2236 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2237 * eBPF and implicitly also cBPF can get JITed!
2238 */
2239struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2240{
2241	return prog;
2242}
2243
2244/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2245 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2246 */
2247void __weak bpf_jit_compile(struct bpf_prog *prog)
2248{
2249}
2250
2251bool __weak bpf_helper_changes_pkt_data(void *func)
2252{
2253	return false;
2254}
2255
2256/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2257 * analysis code and wants explicit zero extension inserted by verifier.
2258 * Otherwise, return FALSE.
2259 */
2260bool __weak bpf_jit_needs_zext(void)
2261{
2262	return false;
2263}
2264
2265/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2266 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2267 */
2268int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2269			 int len)
2270{
2271	return -EFAULT;
2272}
2273
2274int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2275			      void *addr1, void *addr2)
2276{
2277	return -ENOTSUPP;
2278}
2279
2280DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2281EXPORT_SYMBOL(bpf_stats_enabled_key);
2282
2283/* All definitions of tracepoints related to BPF. */
2284#define CREATE_TRACE_POINTS
2285#include <linux/bpf_trace.h>
2286
2287EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2288EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);