Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *	Jay Schulist <jschlst@samba.org>
  13 *	Alexei Starovoitov <ast@plumgrid.com>
  14 *	Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/frame.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
 
 
  33
 
  34#include <asm/unaligned.h>
  35
  36/* Registers */
  37#define BPF_R0	regs[BPF_REG_0]
  38#define BPF_R1	regs[BPF_REG_1]
  39#define BPF_R2	regs[BPF_REG_2]
  40#define BPF_R3	regs[BPF_REG_3]
  41#define BPF_R4	regs[BPF_REG_4]
  42#define BPF_R5	regs[BPF_REG_5]
  43#define BPF_R6	regs[BPF_REG_6]
  44#define BPF_R7	regs[BPF_REG_7]
  45#define BPF_R8	regs[BPF_REG_8]
  46#define BPF_R9	regs[BPF_REG_9]
  47#define BPF_R10	regs[BPF_REG_10]
  48
  49/* Named registers */
  50#define DST	regs[insn->dst_reg]
  51#define SRC	regs[insn->src_reg]
  52#define FP	regs[BPF_REG_FP]
  53#define AX	regs[BPF_REG_AX]
  54#define ARG1	regs[BPF_REG_ARG1]
  55#define CTX	regs[BPF_REG_CTX]
  56#define IMM	insn->imm
  57
  58/* No hurry in this branch
  59 *
  60 * Exported for the bpf jit load helper.
  61 */
  62void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  63{
  64	u8 *ptr = NULL;
  65
  66	if (k >= SKF_NET_OFF)
  67		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  68	else if (k >= SKF_LL_OFF)
  69		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  70
  71	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  72		return ptr;
  73
  74	return NULL;
  75}
  76
  77struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  78{
  79	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
  80	struct bpf_prog_aux *aux;
  81	struct bpf_prog *fp;
  82
  83	size = round_up(size, PAGE_SIZE);
  84	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  85	if (fp == NULL)
  86		return NULL;
  87
  88	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
  89	if (aux == NULL) {
  90		vfree(fp);
  91		return NULL;
  92	}
 
 
 
 
 
 
  93
  94	fp->pages = size / PAGE_SIZE;
  95	fp->aux = aux;
  96	fp->aux->prog = fp;
  97	fp->jit_requested = ebpf_jit_enabled();
  98
  99	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 
 
 100
 101	return fp;
 102}
 103
 104struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 105{
 106	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 107	struct bpf_prog *prog;
 108	int cpu;
 109
 110	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 111	if (!prog)
 112		return NULL;
 113
 114	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 115	if (!prog->aux->stats) {
 
 116		kfree(prog->aux);
 117		vfree(prog);
 118		return NULL;
 119	}
 120
 121	for_each_possible_cpu(cpu) {
 122		struct bpf_prog_stats *pstats;
 123
 124		pstats = per_cpu_ptr(prog->aux->stats, cpu);
 125		u64_stats_init(&pstats->syncp);
 126	}
 127	return prog;
 128}
 129EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 130
 131int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 132{
 133	if (!prog->aux->nr_linfo || !prog->jit_requested)
 134		return 0;
 135
 136	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
 137					 sizeof(*prog->aux->jited_linfo),
 138					 GFP_KERNEL | __GFP_NOWARN);
 139	if (!prog->aux->jited_linfo)
 140		return -ENOMEM;
 141
 142	return 0;
 143}
 144
 145void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
 146{
 147	kfree(prog->aux->jited_linfo);
 148	prog->aux->jited_linfo = NULL;
 149}
 
 
 150
 151void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
 152{
 153	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
 154		bpf_prog_free_jited_linfo(prog);
 155}
 156
 157/* The jit engine is responsible to provide an array
 158 * for insn_off to the jited_off mapping (insn_to_jit_off).
 159 *
 160 * The idx to this array is the insn_off.  Hence, the insn_off
 161 * here is relative to the prog itself instead of the main prog.
 162 * This array has one entry for each xlated bpf insn.
 163 *
 164 * jited_off is the byte off to the last byte of the jited insn.
 165 *
 166 * Hence, with
 167 * insn_start:
 168 *      The first bpf insn off of the prog.  The insn off
 169 *      here is relative to the main prog.
 170 *      e.g. if prog is a subprog, insn_start > 0
 171 * linfo_idx:
 172 *      The prog's idx to prog->aux->linfo and jited_linfo
 173 *
 174 * jited_linfo[linfo_idx] = prog->bpf_func
 175 *
 176 * For i > linfo_idx,
 177 *
 178 * jited_linfo[i] = prog->bpf_func +
 179 *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 180 */
 181void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 182			       const u32 *insn_to_jit_off)
 183{
 184	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 185	const struct bpf_line_info *linfo;
 186	void **jited_linfo;
 187
 188	if (!prog->aux->jited_linfo)
 189		/* Userspace did not provide linfo */
 190		return;
 191
 192	linfo_idx = prog->aux->linfo_idx;
 193	linfo = &prog->aux->linfo[linfo_idx];
 194	insn_start = linfo[0].insn_off;
 195	insn_end = insn_start + prog->len;
 196
 197	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 198	jited_linfo[0] = prog->bpf_func;
 199
 200	nr_linfo = prog->aux->nr_linfo - linfo_idx;
 201
 202	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 203		/* The verifier ensures that linfo[i].insn_off is
 204		 * strictly increasing
 205		 */
 206		jited_linfo[i] = prog->bpf_func +
 207			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 208}
 209
 210void bpf_prog_free_linfo(struct bpf_prog *prog)
 211{
 212	bpf_prog_free_jited_linfo(prog);
 213	kvfree(prog->aux->linfo);
 214}
 215
 216struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 217				  gfp_t gfp_extra_flags)
 218{
 219	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
 220	struct bpf_prog *fp;
 221	u32 pages, delta;
 222	int ret;
 223
 224	BUG_ON(fp_old == NULL);
 225
 226	size = round_up(size, PAGE_SIZE);
 227	pages = size / PAGE_SIZE;
 228	if (pages <= fp_old->pages)
 229		return fp_old;
 230
 231	delta = pages - fp_old->pages;
 232	ret = __bpf_prog_charge(fp_old->aux->user, delta);
 233	if (ret)
 234		return NULL;
 235
 236	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 237	if (fp == NULL) {
 238		__bpf_prog_uncharge(fp_old->aux->user, delta);
 239	} else {
 240		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 241		fp->pages = pages;
 242		fp->aux->prog = fp;
 243
 244		/* We keep fp->aux from fp_old around in the new
 245		 * reallocated structure.
 246		 */
 247		fp_old->aux = NULL;
 
 
 248		__bpf_prog_free(fp_old);
 249	}
 250
 251	return fp;
 252}
 253
 254void __bpf_prog_free(struct bpf_prog *fp)
 255{
 256	if (fp->aux) {
 257		free_percpu(fp->aux->stats);
 
 
 258		kfree(fp->aux);
 259	}
 
 
 260	vfree(fp);
 261}
 262
 263int bpf_prog_calc_tag(struct bpf_prog *fp)
 264{
 265	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
 266	u32 raw_size = bpf_prog_tag_scratch_size(fp);
 267	u32 digest[SHA_DIGEST_WORDS];
 268	u32 ws[SHA_WORKSPACE_WORDS];
 269	u32 i, bsize, psize, blocks;
 270	struct bpf_insn *dst;
 271	bool was_ld_map;
 272	u8 *raw, *todo;
 273	__be32 *result;
 274	__be64 *bits;
 275
 276	raw = vmalloc(raw_size);
 277	if (!raw)
 278		return -ENOMEM;
 279
 280	sha_init(digest);
 281	memset(ws, 0, sizeof(ws));
 282
 283	/* We need to take out the map fd for the digest calculation
 284	 * since they are unstable from user space side.
 285	 */
 286	dst = (void *)raw;
 287	for (i = 0, was_ld_map = false; i < fp->len; i++) {
 288		dst[i] = fp->insnsi[i];
 289		if (!was_ld_map &&
 290		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 291		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 292		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 293			was_ld_map = true;
 294			dst[i].imm = 0;
 295		} else if (was_ld_map &&
 296			   dst[i].code == 0 &&
 297			   dst[i].dst_reg == 0 &&
 298			   dst[i].src_reg == 0 &&
 299			   dst[i].off == 0) {
 300			was_ld_map = false;
 301			dst[i].imm = 0;
 302		} else {
 303			was_ld_map = false;
 304		}
 305	}
 306
 307	psize = bpf_prog_insn_size(fp);
 308	memset(&raw[psize], 0, raw_size - psize);
 309	raw[psize++] = 0x80;
 310
 311	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
 312	blocks = bsize / SHA_MESSAGE_BYTES;
 313	todo   = raw;
 314	if (bsize - psize >= sizeof(__be64)) {
 315		bits = (__be64 *)(todo + bsize - sizeof(__be64));
 316	} else {
 317		bits = (__be64 *)(todo + bsize + bits_offset);
 318		blocks++;
 319	}
 320	*bits = cpu_to_be64((psize - 1) << 3);
 321
 322	while (blocks--) {
 323		sha_transform(digest, todo, ws);
 324		todo += SHA_MESSAGE_BYTES;
 325	}
 326
 327	result = (__force __be32 *)digest;
 328	for (i = 0; i < SHA_DIGEST_WORDS; i++)
 329		result[i] = cpu_to_be32(digest[i]);
 330	memcpy(fp->tag, result, sizeof(fp->tag));
 331
 332	vfree(raw);
 333	return 0;
 334}
 335
 336static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 337				s32 end_new, s32 curr, const bool probe_pass)
 338{
 339	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 340	s32 delta = end_new - end_old;
 341	s64 imm = insn->imm;
 342
 343	if (curr < pos && curr + imm + 1 >= end_old)
 344		imm += delta;
 345	else if (curr >= end_new && curr + imm + 1 < end_new)
 346		imm -= delta;
 347	if (imm < imm_min || imm > imm_max)
 348		return -ERANGE;
 349	if (!probe_pass)
 350		insn->imm = imm;
 351	return 0;
 352}
 353
 354static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 355				s32 end_new, s32 curr, const bool probe_pass)
 356{
 357	const s32 off_min = S16_MIN, off_max = S16_MAX;
 358	s32 delta = end_new - end_old;
 359	s32 off = insn->off;
 360
 361	if (curr < pos && curr + off + 1 >= end_old)
 362		off += delta;
 363	else if (curr >= end_new && curr + off + 1 < end_new)
 364		off -= delta;
 365	if (off < off_min || off > off_max)
 366		return -ERANGE;
 367	if (!probe_pass)
 368		insn->off = off;
 369	return 0;
 370}
 371
 372static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 373			    s32 end_new, const bool probe_pass)
 374{
 375	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 376	struct bpf_insn *insn = prog->insnsi;
 377	int ret = 0;
 378
 379	for (i = 0; i < insn_cnt; i++, insn++) {
 380		u8 code;
 381
 382		/* In the probing pass we still operate on the original,
 383		 * unpatched image in order to check overflows before we
 384		 * do any other adjustments. Therefore skip the patchlet.
 385		 */
 386		if (probe_pass && i == pos) {
 387			i = end_new;
 388			insn = prog->insnsi + end_old;
 389		}
 390		code = insn->code;
 391		if ((BPF_CLASS(code) != BPF_JMP &&
 392		     BPF_CLASS(code) != BPF_JMP32) ||
 393		    BPF_OP(code) == BPF_EXIT)
 394			continue;
 395		/* Adjust offset of jmps if we cross patch boundaries. */
 396		if (BPF_OP(code) == BPF_CALL) {
 397			if (insn->src_reg != BPF_PSEUDO_CALL)
 398				continue;
 399			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 400						   end_new, i, probe_pass);
 401		} else {
 402			ret = bpf_adj_delta_to_off(insn, pos, end_old,
 403						   end_new, i, probe_pass);
 404		}
 405		if (ret)
 406			break;
 407	}
 408
 409	return ret;
 410}
 411
 412static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 413{
 414	struct bpf_line_info *linfo;
 415	u32 i, nr_linfo;
 416
 417	nr_linfo = prog->aux->nr_linfo;
 418	if (!nr_linfo || !delta)
 419		return;
 420
 421	linfo = prog->aux->linfo;
 422
 423	for (i = 0; i < nr_linfo; i++)
 424		if (off < linfo[i].insn_off)
 425			break;
 426
 427	/* Push all off < linfo[i].insn_off by delta */
 428	for (; i < nr_linfo; i++)
 429		linfo[i].insn_off += delta;
 430}
 431
 432struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 433				       const struct bpf_insn *patch, u32 len)
 434{
 435	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 436	const u32 cnt_max = S16_MAX;
 437	struct bpf_prog *prog_adj;
 438	int err;
 439
 440	/* Since our patchlet doesn't expand the image, we're done. */
 441	if (insn_delta == 0) {
 442		memcpy(prog->insnsi + off, patch, sizeof(*patch));
 443		return prog;
 444	}
 445
 446	insn_adj_cnt = prog->len + insn_delta;
 447
 448	/* Reject anything that would potentially let the insn->off
 449	 * target overflow when we have excessive program expansions.
 450	 * We need to probe here before we do any reallocation where
 451	 * we afterwards may not fail anymore.
 452	 */
 453	if (insn_adj_cnt > cnt_max &&
 454	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 455		return ERR_PTR(err);
 456
 457	/* Several new instructions need to be inserted. Make room
 458	 * for them. Likely, there's no need for a new allocation as
 459	 * last page could have large enough tailroom.
 460	 */
 461	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 462				    GFP_USER);
 463	if (!prog_adj)
 464		return ERR_PTR(-ENOMEM);
 465
 466	prog_adj->len = insn_adj_cnt;
 467
 468	/* Patching happens in 3 steps:
 469	 *
 470	 * 1) Move over tail of insnsi from next instruction onwards,
 471	 *    so we can patch the single target insn with one or more
 472	 *    new ones (patching is always from 1 to n insns, n > 0).
 473	 * 2) Inject new instructions at the target location.
 474	 * 3) Adjust branch offsets if necessary.
 475	 */
 476	insn_rest = insn_adj_cnt - off - len;
 477
 478	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 479		sizeof(*patch) * insn_rest);
 480	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 481
 482	/* We are guaranteed to not fail at this point, otherwise
 483	 * the ship has sailed to reverse to the original state. An
 484	 * overflow cannot happen at this point.
 485	 */
 486	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 487
 488	bpf_adj_linfo(prog_adj, off, insn_delta);
 489
 490	return prog_adj;
 491}
 492
 493int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 494{
 495	/* Branch offsets can't overflow when program is shrinking, no need
 496	 * to call bpf_adj_branches(..., true) here
 497	 */
 498	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 499		sizeof(struct bpf_insn) * (prog->len - off - cnt));
 500	prog->len -= cnt;
 501
 502	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 503}
 504
 505static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 506{
 507	int i;
 508
 509	for (i = 0; i < fp->aux->func_cnt; i++)
 510		bpf_prog_kallsyms_del(fp->aux->func[i]);
 511}
 512
 513void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 514{
 515	bpf_prog_kallsyms_del_subprogs(fp);
 516	bpf_prog_kallsyms_del(fp);
 517}
 518
 519#ifdef CONFIG_BPF_JIT
 520/* All BPF JIT sysctl knobs here. */
 521int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 
 522int bpf_jit_harden   __read_mostly;
 523int bpf_jit_kallsyms __read_mostly;
 524long bpf_jit_limit   __read_mostly;
 525
 526static __always_inline void
 527bpf_get_prog_addr_region(const struct bpf_prog *prog,
 528			 unsigned long *symbol_start,
 529			 unsigned long *symbol_end)
 530{
 531	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 532	unsigned long addr = (unsigned long)hdr;
 533
 534	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 535
 536	*symbol_start = addr;
 537	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
 538}
 539
 540void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 
 541{
 
 542	const char *end = sym + KSYM_NAME_LEN;
 543	const struct btf_type *type;
 544	const char *func_name;
 545
 546	BUILD_BUG_ON(sizeof("bpf_prog_") +
 547		     sizeof(prog->tag) * 2 +
 548		     /* name has been null terminated.
 549		      * We should need +1 for the '_' preceding
 550		      * the name.  However, the null character
 551		      * is double counted between the name and the
 552		      * sizeof("bpf_prog_") above, so we omit
 553		      * the +1 here.
 554		      */
 555		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 556
 557	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 558	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 559
 560	/* prog->aux->name will be ignored if full btf name is available */
 561	if (prog->aux->func_info_cnt) {
 562		type = btf_type_by_id(prog->aux->btf,
 563				      prog->aux->func_info[prog->aux->func_idx].type_id);
 564		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 565		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 566		return;
 567	}
 568
 569	if (prog->aux->name[0])
 570		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 571	else
 572		*sym = 0;
 573}
 574
 575static __always_inline unsigned long
 576bpf_get_prog_addr_start(struct latch_tree_node *n)
 577{
 578	unsigned long symbol_start, symbol_end;
 579	const struct bpf_prog_aux *aux;
 580
 581	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 582	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 583
 584	return symbol_start;
 585}
 586
 587static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 588					  struct latch_tree_node *b)
 589{
 590	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
 591}
 592
 593static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 594{
 595	unsigned long val = (unsigned long)key;
 596	unsigned long symbol_start, symbol_end;
 597	const struct bpf_prog_aux *aux;
 598
 599	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
 600	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
 601
 602	if (val < symbol_start)
 603		return -1;
 604	if (val >= symbol_end)
 605		return  1;
 606
 607	return 0;
 608}
 609
 610static const struct latch_tree_ops bpf_tree_ops = {
 611	.less	= bpf_tree_less,
 612	.comp	= bpf_tree_comp,
 613};
 614
 615static DEFINE_SPINLOCK(bpf_lock);
 616static LIST_HEAD(bpf_kallsyms);
 617static struct latch_tree_root bpf_tree __cacheline_aligned;
 618
 619static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
 620{
 621	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
 622	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
 623	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 
 
 624}
 625
 626static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
 627{
 628	if (list_empty(&aux->ksym_lnode))
 629		return;
 630
 631	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
 632	list_del_rcu(&aux->ksym_lnode);
 
 
 
 
 
 
 
 633}
 634
 635static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 636{
 637	return fp->jited && !bpf_prog_was_classic(fp);
 638}
 639
 640static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 641{
 642	return list_empty(&fp->aux->ksym_lnode) ||
 643	       fp->aux->ksym_lnode.prev == LIST_POISON2;
 644}
 645
 646void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 647{
 648	if (!bpf_prog_kallsyms_candidate(fp) ||
 649	    !capable(CAP_SYS_ADMIN))
 650		return;
 651
 652	spin_lock_bh(&bpf_lock);
 653	bpf_prog_ksym_node_add(fp->aux);
 654	spin_unlock_bh(&bpf_lock);
 
 
 655}
 656
 657void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 658{
 659	if (!bpf_prog_kallsyms_candidate(fp))
 660		return;
 661
 662	spin_lock_bh(&bpf_lock);
 663	bpf_prog_ksym_node_del(fp->aux);
 664	spin_unlock_bh(&bpf_lock);
 665}
 666
 667static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
 668{
 669	struct latch_tree_node *n;
 670
 671	if (!bpf_jit_kallsyms_enabled())
 672		return NULL;
 673
 674	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 675	return n ?
 676	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
 677	       NULL;
 678}
 679
 680const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 681				 unsigned long *off, char *sym)
 682{
 683	unsigned long symbol_start, symbol_end;
 684	struct bpf_prog *prog;
 685	char *ret = NULL;
 686
 687	rcu_read_lock();
 688	prog = bpf_prog_kallsyms_find(addr);
 689	if (prog) {
 690		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
 691		bpf_get_prog_name(prog, sym);
 
 
 692
 693		ret = sym;
 694		if (size)
 695			*size = symbol_end - symbol_start;
 696		if (off)
 697			*off  = addr - symbol_start;
 698	}
 699	rcu_read_unlock();
 700
 701	return ret;
 702}
 703
 704bool is_bpf_text_address(unsigned long addr)
 705{
 706	bool ret;
 707
 708	rcu_read_lock();
 709	ret = bpf_prog_kallsyms_find(addr) != NULL;
 710	rcu_read_unlock();
 711
 712	return ret;
 713}
 714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 715int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 716		    char *sym)
 717{
 718	struct bpf_prog_aux *aux;
 719	unsigned int it = 0;
 720	int ret = -ERANGE;
 721
 722	if (!bpf_jit_kallsyms_enabled())
 723		return ret;
 724
 725	rcu_read_lock();
 726	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
 727		if (it++ != symnum)
 728			continue;
 729
 730		bpf_get_prog_name(aux->prog, sym);
 731
 732		*value = (unsigned long)aux->prog->bpf_func;
 733		*type  = BPF_SYM_ELF_TYPE;
 734
 735		ret = 0;
 736		break;
 737	}
 738	rcu_read_unlock();
 739
 740	return ret;
 741}
 742
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743static atomic_long_t bpf_jit_current;
 744
 745/* Can be overridden by an arch's JIT compiler if it has a custom,
 746 * dedicated BPF backend memory area, or if neither of the two
 747 * below apply.
 748 */
 749u64 __weak bpf_jit_alloc_exec_limit(void)
 750{
 751#if defined(MODULES_VADDR)
 752	return MODULES_END - MODULES_VADDR;
 753#else
 754	return VMALLOC_END - VMALLOC_START;
 755#endif
 756}
 757
 758static int __init bpf_jit_charge_init(void)
 759{
 760	/* Only used as heuristic here to derive limit. */
 761	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 762					    PAGE_SIZE), LONG_MAX);
 763	return 0;
 764}
 765pure_initcall(bpf_jit_charge_init);
 766
 767static int bpf_jit_charge_modmem(u32 pages)
 768{
 769	if (atomic_long_add_return(pages, &bpf_jit_current) >
 770	    (bpf_jit_limit >> PAGE_SHIFT)) {
 771		if (!capable(CAP_SYS_ADMIN)) {
 772			atomic_long_sub(pages, &bpf_jit_current);
 773			return -EPERM;
 774		}
 775	}
 776
 777	return 0;
 778}
 779
 780static void bpf_jit_uncharge_modmem(u32 pages)
 781{
 782	atomic_long_sub(pages, &bpf_jit_current);
 783}
 784
 785void *__weak bpf_jit_alloc_exec(unsigned long size)
 786{
 787	return module_alloc(size);
 788}
 789
 790void __weak bpf_jit_free_exec(void *addr)
 791{
 792	module_memfree(addr);
 793}
 794
 795struct bpf_binary_header *
 796bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 797		     unsigned int alignment,
 798		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 799{
 800	struct bpf_binary_header *hdr;
 801	u32 size, hole, start, pages;
 802
 
 
 
 803	/* Most of BPF filters are really small, but if some of them
 804	 * fill a page, allow at least 128 extra bytes to insert a
 805	 * random section of illegal instructions.
 806	 */
 807	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 808	pages = size / PAGE_SIZE;
 809
 810	if (bpf_jit_charge_modmem(pages))
 811		return NULL;
 812	hdr = bpf_jit_alloc_exec(size);
 813	if (!hdr) {
 814		bpf_jit_uncharge_modmem(pages);
 815		return NULL;
 816	}
 817
 818	/* Fill space with illegal/arch-dep instructions. */
 819	bpf_fill_ill_insns(hdr, size);
 820
 821	hdr->pages = pages;
 822	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 823		     PAGE_SIZE - sizeof(*hdr));
 824	start = (get_random_int() % hole) & ~(alignment - 1);
 825
 826	/* Leave a random number of instructions before BPF code. */
 827	*image_ptr = &hdr->image[start];
 828
 829	return hdr;
 830}
 831
 832void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 833{
 834	u32 pages = hdr->pages;
 835
 836	bpf_jit_free_exec(hdr);
 837	bpf_jit_uncharge_modmem(pages);
 838}
 839
 840/* This symbol is only overridden by archs that have different
 841 * requirements than the usual eBPF JITs, f.e. when they only
 842 * implement cBPF JIT, do not set images read-only, etc.
 843 */
 844void __weak bpf_jit_free(struct bpf_prog *fp)
 845{
 846	if (fp->jited) {
 847		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 848
 849		bpf_jit_binary_free(hdr);
 850
 851		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 852	}
 853
 854	bpf_prog_unlock_free(fp);
 855}
 856
 857int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 858			  const struct bpf_insn *insn, bool extra_pass,
 859			  u64 *func_addr, bool *func_addr_fixed)
 860{
 861	s16 off = insn->off;
 862	s32 imm = insn->imm;
 863	u8 *addr;
 864
 865	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 866	if (!*func_addr_fixed) {
 867		/* Place-holder address till the last pass has collected
 868		 * all addresses for JITed subprograms in which case we
 869		 * can pick them up from prog->aux.
 870		 */
 871		if (!extra_pass)
 872			addr = NULL;
 873		else if (prog->aux->func &&
 874			 off >= 0 && off < prog->aux->func_cnt)
 875			addr = (u8 *)prog->aux->func[off]->bpf_func;
 876		else
 877			return -EINVAL;
 878	} else {
 879		/* Address of a BPF helper call. Since part of the core
 880		 * kernel, it's always at a fixed location. __bpf_call_base
 881		 * and the helper with imm relative to it are both in core
 882		 * kernel.
 883		 */
 884		addr = (u8 *)__bpf_call_base + imm;
 885	}
 886
 887	*func_addr = (unsigned long)addr;
 888	return 0;
 889}
 890
 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
 892			      const struct bpf_insn *aux,
 893			      struct bpf_insn *to_buff,
 894			      bool emit_zext)
 895{
 896	struct bpf_insn *to = to_buff;
 897	u32 imm_rnd = get_random_int();
 898	s16 off;
 899
 900	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 901	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 902
 903	/* Constraints on AX register:
 904	 *
 905	 * AX register is inaccessible from user space. It is mapped in
 906	 * all JITs, and used here for constant blinding rewrites. It is
 907	 * typically "stateless" meaning its contents are only valid within
 908	 * the executed instruction, but not across several instructions.
 909	 * There are a few exceptions however which are further detailed
 910	 * below.
 911	 *
 912	 * Constant blinding is only used by JITs, not in the interpreter.
 913	 * The interpreter uses AX in some occasions as a local temporary
 914	 * register e.g. in DIV or MOD instructions.
 915	 *
 916	 * In restricted circumstances, the verifier can also use the AX
 917	 * register for rewrites as long as they do not interfere with
 918	 * the above cases!
 919	 */
 920	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 921		goto out;
 922
 923	if (from->imm == 0 &&
 924	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 925	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 926		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 927		goto out;
 928	}
 929
 930	switch (from->code) {
 931	case BPF_ALU | BPF_ADD | BPF_K:
 932	case BPF_ALU | BPF_SUB | BPF_K:
 933	case BPF_ALU | BPF_AND | BPF_K:
 934	case BPF_ALU | BPF_OR  | BPF_K:
 935	case BPF_ALU | BPF_XOR | BPF_K:
 936	case BPF_ALU | BPF_MUL | BPF_K:
 937	case BPF_ALU | BPF_MOV | BPF_K:
 938	case BPF_ALU | BPF_DIV | BPF_K:
 939	case BPF_ALU | BPF_MOD | BPF_K:
 940		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 941		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 942		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
 943		break;
 944
 945	case BPF_ALU64 | BPF_ADD | BPF_K:
 946	case BPF_ALU64 | BPF_SUB | BPF_K:
 947	case BPF_ALU64 | BPF_AND | BPF_K:
 948	case BPF_ALU64 | BPF_OR  | BPF_K:
 949	case BPF_ALU64 | BPF_XOR | BPF_K:
 950	case BPF_ALU64 | BPF_MUL | BPF_K:
 951	case BPF_ALU64 | BPF_MOV | BPF_K:
 952	case BPF_ALU64 | BPF_DIV | BPF_K:
 953	case BPF_ALU64 | BPF_MOD | BPF_K:
 954		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 955		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 956		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
 957		break;
 958
 959	case BPF_JMP | BPF_JEQ  | BPF_K:
 960	case BPF_JMP | BPF_JNE  | BPF_K:
 961	case BPF_JMP | BPF_JGT  | BPF_K:
 962	case BPF_JMP | BPF_JLT  | BPF_K:
 963	case BPF_JMP | BPF_JGE  | BPF_K:
 964	case BPF_JMP | BPF_JLE  | BPF_K:
 965	case BPF_JMP | BPF_JSGT | BPF_K:
 966	case BPF_JMP | BPF_JSLT | BPF_K:
 967	case BPF_JMP | BPF_JSGE | BPF_K:
 968	case BPF_JMP | BPF_JSLE | BPF_K:
 969	case BPF_JMP | BPF_JSET | BPF_K:
 970		/* Accommodate for extra offset in case of a backjump. */
 971		off = from->off;
 972		if (off < 0)
 973			off -= 2;
 974		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 975		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 976		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
 977		break;
 978
 979	case BPF_JMP32 | BPF_JEQ  | BPF_K:
 980	case BPF_JMP32 | BPF_JNE  | BPF_K:
 981	case BPF_JMP32 | BPF_JGT  | BPF_K:
 982	case BPF_JMP32 | BPF_JLT  | BPF_K:
 983	case BPF_JMP32 | BPF_JGE  | BPF_K:
 984	case BPF_JMP32 | BPF_JLE  | BPF_K:
 985	case BPF_JMP32 | BPF_JSGT | BPF_K:
 986	case BPF_JMP32 | BPF_JSLT | BPF_K:
 987	case BPF_JMP32 | BPF_JSGE | BPF_K:
 988	case BPF_JMP32 | BPF_JSLE | BPF_K:
 989	case BPF_JMP32 | BPF_JSET | BPF_K:
 990		/* Accommodate for extra offset in case of a backjump. */
 991		off = from->off;
 992		if (off < 0)
 993			off -= 2;
 994		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
 995		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
 996		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
 997				      off);
 998		break;
 999
1000	case BPF_LD | BPF_IMM | BPF_DW:
1001		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1002		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1003		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1004		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1005		break;
1006	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1007		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1008		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009		if (emit_zext)
1010			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1011		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1012		break;
1013
1014	case BPF_ST | BPF_MEM | BPF_DW:
1015	case BPF_ST | BPF_MEM | BPF_W:
1016	case BPF_ST | BPF_MEM | BPF_H:
1017	case BPF_ST | BPF_MEM | BPF_B:
1018		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1019		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1020		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1021		break;
1022	}
1023out:
1024	return to - to_buff;
1025}
1026
1027static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1028					      gfp_t gfp_extra_flags)
1029{
1030	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1031	struct bpf_prog *fp;
1032
1033	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
1034	if (fp != NULL) {
1035		/* aux->prog still points to the fp_other one, so
1036		 * when promoting the clone to the real program,
1037		 * this still needs to be adapted.
1038		 */
1039		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1040	}
1041
1042	return fp;
1043}
1044
1045static void bpf_prog_clone_free(struct bpf_prog *fp)
1046{
1047	/* aux was stolen by the other clone, so we cannot free
1048	 * it from this path! It will be freed eventually by the
1049	 * other program on release.
1050	 *
1051	 * At this point, we don't need a deferred release since
1052	 * clone is guaranteed to not be locked.
1053	 */
1054	fp->aux = NULL;
 
 
1055	__bpf_prog_free(fp);
1056}
1057
1058void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1059{
1060	/* We have to repoint aux->prog to self, as we don't
1061	 * know whether fp here is the clone or the original.
1062	 */
1063	fp->aux->prog = fp;
1064	bpf_prog_clone_free(fp_other);
1065}
1066
1067struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1068{
1069	struct bpf_insn insn_buff[16], aux[2];
1070	struct bpf_prog *clone, *tmp;
1071	int insn_delta, insn_cnt;
1072	struct bpf_insn *insn;
1073	int i, rewritten;
1074
1075	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1076		return prog;
1077
1078	clone = bpf_prog_clone_create(prog, GFP_USER);
1079	if (!clone)
1080		return ERR_PTR(-ENOMEM);
1081
1082	insn_cnt = clone->len;
1083	insn = clone->insnsi;
1084
1085	for (i = 0; i < insn_cnt; i++, insn++) {
1086		/* We temporarily need to hold the original ld64 insn
1087		 * so that we can still access the first part in the
1088		 * second blinding run.
1089		 */
1090		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1091		    insn[1].code == 0)
1092			memcpy(aux, insn, sizeof(aux));
1093
1094		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095						clone->aux->verifier_zext);
1096		if (!rewritten)
1097			continue;
1098
1099		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1100		if (IS_ERR(tmp)) {
1101			/* Patching may have repointed aux->prog during
1102			 * realloc from the original one, so we need to
1103			 * fix it up here on error.
1104			 */
1105			bpf_jit_prog_release_other(prog, clone);
1106			return tmp;
1107		}
1108
1109		clone = tmp;
1110		insn_delta = rewritten - 1;
1111
1112		/* Walk new program and skip insns we just inserted. */
1113		insn = clone->insnsi + i + insn_delta;
1114		insn_cnt += insn_delta;
1115		i        += insn_delta;
1116	}
1117
1118	clone->blinded = 1;
1119	return clone;
1120}
1121#endif /* CONFIG_BPF_JIT */
1122
1123/* Base function for offset calculation. Needs to go into .text section,
1124 * therefore keeping it non-static as well; will also be used by JITs
1125 * anyway later on, so do not let the compiler omit it. This also needs
1126 * to go into kallsyms for correlation from e.g. bpftool, so naming
1127 * must not change.
1128 */
1129noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1130{
1131	return 0;
1132}
1133EXPORT_SYMBOL_GPL(__bpf_call_base);
1134
1135/* All UAPI available opcodes. */
1136#define BPF_INSN_MAP(INSN_2, INSN_3)		\
1137	/* 32 bit ALU operations. */		\
1138	/*   Register based. */			\
1139	INSN_3(ALU, ADD,  X),			\
1140	INSN_3(ALU, SUB,  X),			\
1141	INSN_3(ALU, AND,  X),			\
1142	INSN_3(ALU, OR,   X),			\
1143	INSN_3(ALU, LSH,  X),			\
1144	INSN_3(ALU, RSH,  X),			\
1145	INSN_3(ALU, XOR,  X),			\
1146	INSN_3(ALU, MUL,  X),			\
1147	INSN_3(ALU, MOV,  X),			\
1148	INSN_3(ALU, ARSH, X),			\
1149	INSN_3(ALU, DIV,  X),			\
1150	INSN_3(ALU, MOD,  X),			\
1151	INSN_2(ALU, NEG),			\
1152	INSN_3(ALU, END, TO_BE),		\
1153	INSN_3(ALU, END, TO_LE),		\
1154	/*   Immediate based. */		\
1155	INSN_3(ALU, ADD,  K),			\
1156	INSN_3(ALU, SUB,  K),			\
1157	INSN_3(ALU, AND,  K),			\
1158	INSN_3(ALU, OR,   K),			\
1159	INSN_3(ALU, LSH,  K),			\
1160	INSN_3(ALU, RSH,  K),			\
1161	INSN_3(ALU, XOR,  K),			\
1162	INSN_3(ALU, MUL,  K),			\
1163	INSN_3(ALU, MOV,  K),			\
1164	INSN_3(ALU, ARSH, K),			\
1165	INSN_3(ALU, DIV,  K),			\
1166	INSN_3(ALU, MOD,  K),			\
1167	/* 64 bit ALU operations. */		\
1168	/*   Register based. */			\
1169	INSN_3(ALU64, ADD,  X),			\
1170	INSN_3(ALU64, SUB,  X),			\
1171	INSN_3(ALU64, AND,  X),			\
1172	INSN_3(ALU64, OR,   X),			\
1173	INSN_3(ALU64, LSH,  X),			\
1174	INSN_3(ALU64, RSH,  X),			\
1175	INSN_3(ALU64, XOR,  X),			\
1176	INSN_3(ALU64, MUL,  X),			\
1177	INSN_3(ALU64, MOV,  X),			\
1178	INSN_3(ALU64, ARSH, X),			\
1179	INSN_3(ALU64, DIV,  X),			\
1180	INSN_3(ALU64, MOD,  X),			\
1181	INSN_2(ALU64, NEG),			\
1182	/*   Immediate based. */		\
1183	INSN_3(ALU64, ADD,  K),			\
1184	INSN_3(ALU64, SUB,  K),			\
1185	INSN_3(ALU64, AND,  K),			\
1186	INSN_3(ALU64, OR,   K),			\
1187	INSN_3(ALU64, LSH,  K),			\
1188	INSN_3(ALU64, RSH,  K),			\
1189	INSN_3(ALU64, XOR,  K),			\
1190	INSN_3(ALU64, MUL,  K),			\
1191	INSN_3(ALU64, MOV,  K),			\
1192	INSN_3(ALU64, ARSH, K),			\
1193	INSN_3(ALU64, DIV,  K),			\
1194	INSN_3(ALU64, MOD,  K),			\
1195	/* Call instruction. */			\
1196	INSN_2(JMP, CALL),			\
1197	/* Exit instruction. */			\
1198	INSN_2(JMP, EXIT),			\
1199	/* 32-bit Jump instructions. */		\
1200	/*   Register based. */			\
1201	INSN_3(JMP32, JEQ,  X),			\
1202	INSN_3(JMP32, JNE,  X),			\
1203	INSN_3(JMP32, JGT,  X),			\
1204	INSN_3(JMP32, JLT,  X),			\
1205	INSN_3(JMP32, JGE,  X),			\
1206	INSN_3(JMP32, JLE,  X),			\
1207	INSN_3(JMP32, JSGT, X),			\
1208	INSN_3(JMP32, JSLT, X),			\
1209	INSN_3(JMP32, JSGE, X),			\
1210	INSN_3(JMP32, JSLE, X),			\
1211	INSN_3(JMP32, JSET, X),			\
1212	/*   Immediate based. */		\
1213	INSN_3(JMP32, JEQ,  K),			\
1214	INSN_3(JMP32, JNE,  K),			\
1215	INSN_3(JMP32, JGT,  K),			\
1216	INSN_3(JMP32, JLT,  K),			\
1217	INSN_3(JMP32, JGE,  K),			\
1218	INSN_3(JMP32, JLE,  K),			\
1219	INSN_3(JMP32, JSGT, K),			\
1220	INSN_3(JMP32, JSLT, K),			\
1221	INSN_3(JMP32, JSGE, K),			\
1222	INSN_3(JMP32, JSLE, K),			\
1223	INSN_3(JMP32, JSET, K),			\
1224	/* Jump instructions. */		\
1225	/*   Register based. */			\
1226	INSN_3(JMP, JEQ,  X),			\
1227	INSN_3(JMP, JNE,  X),			\
1228	INSN_3(JMP, JGT,  X),			\
1229	INSN_3(JMP, JLT,  X),			\
1230	INSN_3(JMP, JGE,  X),			\
1231	INSN_3(JMP, JLE,  X),			\
1232	INSN_3(JMP, JSGT, X),			\
1233	INSN_3(JMP, JSLT, X),			\
1234	INSN_3(JMP, JSGE, X),			\
1235	INSN_3(JMP, JSLE, X),			\
1236	INSN_3(JMP, JSET, X),			\
1237	/*   Immediate based. */		\
1238	INSN_3(JMP, JEQ,  K),			\
1239	INSN_3(JMP, JNE,  K),			\
1240	INSN_3(JMP, JGT,  K),			\
1241	INSN_3(JMP, JLT,  K),			\
1242	INSN_3(JMP, JGE,  K),			\
1243	INSN_3(JMP, JLE,  K),			\
1244	INSN_3(JMP, JSGT, K),			\
1245	INSN_3(JMP, JSLT, K),			\
1246	INSN_3(JMP, JSGE, K),			\
1247	INSN_3(JMP, JSLE, K),			\
1248	INSN_3(JMP, JSET, K),			\
1249	INSN_2(JMP, JA),			\
1250	/* Store instructions. */		\
1251	/*   Register based. */			\
1252	INSN_3(STX, MEM,  B),			\
1253	INSN_3(STX, MEM,  H),			\
1254	INSN_3(STX, MEM,  W),			\
1255	INSN_3(STX, MEM,  DW),			\
1256	INSN_3(STX, XADD, W),			\
1257	INSN_3(STX, XADD, DW),			\
1258	/*   Immediate based. */		\
1259	INSN_3(ST, MEM, B),			\
1260	INSN_3(ST, MEM, H),			\
1261	INSN_3(ST, MEM, W),			\
1262	INSN_3(ST, MEM, DW),			\
1263	/* Load instructions. */		\
1264	/*   Register based. */			\
1265	INSN_3(LDX, MEM, B),			\
1266	INSN_3(LDX, MEM, H),			\
1267	INSN_3(LDX, MEM, W),			\
1268	INSN_3(LDX, MEM, DW),			\
1269	/*   Immediate based. */		\
1270	INSN_3(LD, IMM, DW)
1271
1272bool bpf_opcode_in_insntable(u8 code)
1273{
1274#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1275#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1276	static const bool public_insntable[256] = {
1277		[0 ... 255] = false,
1278		/* Now overwrite non-defaults ... */
1279		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1280		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1281		[BPF_LD | BPF_ABS | BPF_B] = true,
1282		[BPF_LD | BPF_ABS | BPF_H] = true,
1283		[BPF_LD | BPF_ABS | BPF_W] = true,
1284		[BPF_LD | BPF_IND | BPF_B] = true,
1285		[BPF_LD | BPF_IND | BPF_H] = true,
1286		[BPF_LD | BPF_IND | BPF_W] = true,
1287	};
1288#undef BPF_INSN_3_TBL
1289#undef BPF_INSN_2_TBL
1290	return public_insntable[code];
1291}
1292
1293#ifndef CONFIG_BPF_JIT_ALWAYS_ON
 
 
 
 
 
 
1294/**
1295 *	__bpf_prog_run - run eBPF program on a given context
1296 *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1297 *	@insn: is the array of eBPF instructions
1298 *	@stack: is the eBPF storage stack
1299 *
1300 * Decode and execute eBPF instructions.
 
 
1301 */
1302static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
1303{
1304#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1305#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1306	static const void * const jumptable[256] __annotate_jump_table = {
1307		[0 ... 255] = &&default_label,
1308		/* Now overwrite non-defaults ... */
1309		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1310		/* Non-UAPI available opcodes. */
1311		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1312		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
 
 
 
 
 
1313	};
1314#undef BPF_INSN_3_LBL
1315#undef BPF_INSN_2_LBL
1316	u32 tail_call_cnt = 0;
1317
1318#define CONT	 ({ insn++; goto select_insn; })
1319#define CONT_JMP ({ insn++; goto select_insn; })
1320
1321select_insn:
1322	goto *jumptable[insn->code];
1323
1324	/* ALU */
1325#define ALU(OPCODE, OP)			\
1326	ALU64_##OPCODE##_X:		\
1327		DST = DST OP SRC;	\
1328		CONT;			\
1329	ALU_##OPCODE##_X:		\
1330		DST = (u32) DST OP (u32) SRC;	\
1331		CONT;			\
1332	ALU64_##OPCODE##_K:		\
1333		DST = DST OP IMM;		\
1334		CONT;			\
1335	ALU_##OPCODE##_K:		\
1336		DST = (u32) DST OP (u32) IMM;	\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1337		CONT;
1338
1339	ALU(ADD,  +)
1340	ALU(SUB,  -)
1341	ALU(AND,  &)
1342	ALU(OR,   |)
1343	ALU(LSH, <<)
1344	ALU(RSH, >>)
1345	ALU(XOR,  ^)
1346	ALU(MUL,  *)
 
 
 
1347#undef ALU
1348	ALU_NEG:
1349		DST = (u32) -DST;
1350		CONT;
1351	ALU64_NEG:
1352		DST = -DST;
1353		CONT;
1354	ALU_MOV_X:
1355		DST = (u32) SRC;
1356		CONT;
1357	ALU_MOV_K:
1358		DST = (u32) IMM;
1359		CONT;
1360	ALU64_MOV_X:
1361		DST = SRC;
1362		CONT;
1363	ALU64_MOV_K:
1364		DST = IMM;
1365		CONT;
1366	LD_IMM_DW:
1367		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1368		insn++;
1369		CONT;
1370	ALU_ARSH_X:
1371		DST = (u64) (u32) (((s32) DST) >> SRC);
1372		CONT;
1373	ALU_ARSH_K:
1374		DST = (u64) (u32) (((s32) DST) >> IMM);
1375		CONT;
1376	ALU64_ARSH_X:
1377		(*(s64 *) &DST) >>= SRC;
1378		CONT;
1379	ALU64_ARSH_K:
1380		(*(s64 *) &DST) >>= IMM;
1381		CONT;
1382	ALU64_MOD_X:
1383		div64_u64_rem(DST, SRC, &AX);
1384		DST = AX;
1385		CONT;
1386	ALU_MOD_X:
1387		AX = (u32) DST;
1388		DST = do_div(AX, (u32) SRC);
1389		CONT;
1390	ALU64_MOD_K:
1391		div64_u64_rem(DST, IMM, &AX);
1392		DST = AX;
1393		CONT;
1394	ALU_MOD_K:
1395		AX = (u32) DST;
1396		DST = do_div(AX, (u32) IMM);
1397		CONT;
1398	ALU64_DIV_X:
1399		DST = div64_u64(DST, SRC);
1400		CONT;
1401	ALU_DIV_X:
1402		AX = (u32) DST;
1403		do_div(AX, (u32) SRC);
1404		DST = (u32) AX;
1405		CONT;
1406	ALU64_DIV_K:
1407		DST = div64_u64(DST, IMM);
1408		CONT;
1409	ALU_DIV_K:
1410		AX = (u32) DST;
1411		do_div(AX, (u32) IMM);
1412		DST = (u32) AX;
1413		CONT;
1414	ALU_END_TO_BE:
1415		switch (IMM) {
1416		case 16:
1417			DST = (__force u16) cpu_to_be16(DST);
1418			break;
1419		case 32:
1420			DST = (__force u32) cpu_to_be32(DST);
1421			break;
1422		case 64:
1423			DST = (__force u64) cpu_to_be64(DST);
1424			break;
1425		}
1426		CONT;
1427	ALU_END_TO_LE:
1428		switch (IMM) {
1429		case 16:
1430			DST = (__force u16) cpu_to_le16(DST);
1431			break;
1432		case 32:
1433			DST = (__force u32) cpu_to_le32(DST);
1434			break;
1435		case 64:
1436			DST = (__force u64) cpu_to_le64(DST);
1437			break;
1438		}
1439		CONT;
1440
1441	/* CALL */
1442	JMP_CALL:
1443		/* Function call scratches BPF_R1-BPF_R5 registers,
1444		 * preserves BPF_R6-BPF_R9, and stores return value
1445		 * into BPF_R0.
1446		 */
1447		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1448						       BPF_R4, BPF_R5);
1449		CONT;
1450
1451	JMP_CALL_ARGS:
1452		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1453							    BPF_R3, BPF_R4,
1454							    BPF_R5,
1455							    insn + insn->off + 1);
1456		CONT;
1457
1458	JMP_TAIL_CALL: {
1459		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1460		struct bpf_array *array = container_of(map, struct bpf_array, map);
1461		struct bpf_prog *prog;
1462		u32 index = BPF_R3;
1463
1464		if (unlikely(index >= array->map.max_entries))
1465			goto out;
1466		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1467			goto out;
1468
1469		tail_call_cnt++;
1470
1471		prog = READ_ONCE(array->ptrs[index]);
1472		if (!prog)
1473			goto out;
1474
1475		/* ARG1 at this point is guaranteed to point to CTX from
1476		 * the verifier side due to the fact that the tail call is
1477		 * handeled like a helper, that is, bpf_tail_call_proto,
1478		 * where arg1_type is ARG_PTR_TO_CTX.
1479		 */
1480		insn = prog->insnsi;
1481		goto select_insn;
1482out:
1483		CONT;
1484	}
1485	JMP_JA:
1486		insn += insn->off;
1487		CONT;
1488	JMP_EXIT:
1489		return BPF_R0;
1490	/* JMP */
1491#define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1492	JMP_##OPCODE##_X:					\
1493		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1494			insn += insn->off;			\
1495			CONT_JMP;				\
1496		}						\
1497		CONT;						\
1498	JMP32_##OPCODE##_X:					\
1499		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1500			insn += insn->off;			\
1501			CONT_JMP;				\
1502		}						\
1503		CONT;						\
1504	JMP_##OPCODE##_K:					\
1505		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1506			insn += insn->off;			\
1507			CONT_JMP;				\
1508		}						\
1509		CONT;						\
1510	JMP32_##OPCODE##_K:					\
1511		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1512			insn += insn->off;			\
1513			CONT_JMP;				\
1514		}						\
1515		CONT;
1516	COND_JMP(u, JEQ, ==)
1517	COND_JMP(u, JNE, !=)
1518	COND_JMP(u, JGT, >)
1519	COND_JMP(u, JLT, <)
1520	COND_JMP(u, JGE, >=)
1521	COND_JMP(u, JLE, <=)
1522	COND_JMP(u, JSET, &)
1523	COND_JMP(s, JSGT, >)
1524	COND_JMP(s, JSLT, <)
1525	COND_JMP(s, JSGE, >=)
1526	COND_JMP(s, JSLE, <=)
1527#undef COND_JMP
1528	/* STX and ST and LDX*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1529#define LDST(SIZEOP, SIZE)						\
1530	STX_MEM_##SIZEOP:						\
1531		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1532		CONT;							\
1533	ST_MEM_##SIZEOP:						\
1534		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1535		CONT;							\
1536	LDX_MEM_##SIZEOP:						\
1537		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1538		CONT;
1539
1540	LDST(B,   u8)
1541	LDST(H,  u16)
1542	LDST(W,  u32)
1543	LDST(DW, u64)
1544#undef LDST
1545	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1546		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1547			   (DST + insn->off));
1548		CONT;
1549	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1550		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1551			     (DST + insn->off));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1552		CONT;
1553
1554	default_label:
1555		/* If we ever reach this, we have a bug somewhere. Die hard here
1556		 * instead of just returning 0; we could be somewhere in a subprog,
1557		 * so execution could continue otherwise which we do /not/ want.
1558		 *
1559		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1560		 */
1561		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
 
1562		BUG_ON(1);
1563		return 0;
1564}
1565
1566#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1567#define DEFINE_BPF_PROG_RUN(stack_size) \
1568static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1569{ \
1570	u64 stack[stack_size / sizeof(u64)]; \
1571	u64 regs[MAX_BPF_EXT_REG]; \
1572\
1573	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1574	ARG1 = (u64) (unsigned long) ctx; \
1575	return ___bpf_prog_run(regs, insn, stack); \
1576}
1577
1578#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1579#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1580static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1581				      const struct bpf_insn *insn) \
1582{ \
1583	u64 stack[stack_size / sizeof(u64)]; \
1584	u64 regs[MAX_BPF_EXT_REG]; \
1585\
1586	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1587	BPF_R1 = r1; \
1588	BPF_R2 = r2; \
1589	BPF_R3 = r3; \
1590	BPF_R4 = r4; \
1591	BPF_R5 = r5; \
1592	return ___bpf_prog_run(regs, insn, stack); \
1593}
1594
1595#define EVAL1(FN, X) FN(X)
1596#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1597#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1598#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1599#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1600#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1601
1602EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1603EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1604EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1605
1606EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1607EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1608EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1609
1610#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1611
1612static unsigned int (*interpreters[])(const void *ctx,
1613				      const struct bpf_insn *insn) = {
1614EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1615EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1616EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1617};
1618#undef PROG_NAME_LIST
1619#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1620static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1621				  const struct bpf_insn *insn) = {
1622EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1623EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1624EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1625};
1626#undef PROG_NAME_LIST
1627
1628void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1629{
1630	stack_depth = max_t(u32, stack_depth, 1);
1631	insn->off = (s16) insn->imm;
1632	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1633		__bpf_call_base_args;
1634	insn->code = BPF_JMP | BPF_CALL_ARGS;
1635}
1636
1637#else
1638static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1639					 const struct bpf_insn *insn)
1640{
1641	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1642	 * is not working properly, so warn about it!
1643	 */
1644	WARN_ON_ONCE(1);
1645	return 0;
1646}
1647#endif
1648
1649bool bpf_prog_array_compatible(struct bpf_array *array,
1650			       const struct bpf_prog *fp)
1651{
1652	if (fp->kprobe_override)
1653		return false;
1654
1655	if (!array->owner_prog_type) {
1656		/* There's no owner yet where we could check for
1657		 * compatibility.
1658		 */
1659		array->owner_prog_type = fp->type;
1660		array->owner_jited = fp->jited;
1661
1662		return true;
1663	}
1664
1665	return array->owner_prog_type == fp->type &&
1666	       array->owner_jited == fp->jited;
1667}
1668
1669static int bpf_check_tail_call(const struct bpf_prog *fp)
1670{
1671	struct bpf_prog_aux *aux = fp->aux;
1672	int i;
1673
 
1674	for (i = 0; i < aux->used_map_cnt; i++) {
1675		struct bpf_map *map = aux->used_maps[i];
1676		struct bpf_array *array;
1677
1678		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1679			continue;
1680
1681		array = container_of(map, struct bpf_array, map);
1682		if (!bpf_prog_array_compatible(array, fp))
1683			return -EINVAL;
 
 
1684	}
1685
1686	return 0;
 
 
1687}
1688
1689static void bpf_prog_select_func(struct bpf_prog *fp)
1690{
1691#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1692	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1693
1694	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1695#else
1696	fp->bpf_func = __bpf_prog_ret0_warn;
1697#endif
1698}
1699
1700/**
1701 *	bpf_prog_select_runtime - select exec runtime for BPF program
1702 *	@fp: bpf_prog populated with internal BPF program
1703 *	@err: pointer to error variable
1704 *
1705 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1706 * The BPF program will be executed via BPF_PROG_RUN() macro.
 
 
 
1707 */
1708struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1709{
1710	/* In case of BPF to BPF calls, verifier did all the prep
1711	 * work with regards to JITing, etc.
1712	 */
 
 
1713	if (fp->bpf_func)
1714		goto finalize;
1715
 
 
 
 
1716	bpf_prog_select_func(fp);
1717
1718	/* eBPF JITs can rewrite the program in case constant
1719	 * blinding is active. However, in case of error during
1720	 * blinding, bpf_int_jit_compile() must always return a
1721	 * valid program, which in this case would simply not
1722	 * be JITed, but falls back to the interpreter.
1723	 */
1724	if (!bpf_prog_is_dev_bound(fp->aux)) {
1725		*err = bpf_prog_alloc_jited_linfo(fp);
1726		if (*err)
1727			return fp;
1728
1729		fp = bpf_int_jit_compile(fp);
1730		if (!fp->jited) {
1731			bpf_prog_free_jited_linfo(fp);
1732#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1733			*err = -ENOTSUPP;
1734			return fp;
1735#endif
1736		} else {
1737			bpf_prog_free_unused_jited_linfo(fp);
1738		}
1739	} else {
1740		*err = bpf_prog_offload_compile(fp);
1741		if (*err)
1742			return fp;
1743	}
1744
1745finalize:
1746	bpf_prog_lock_ro(fp);
1747
1748	/* The tail call compatibility check can only be done at
1749	 * this late stage as we need to determine, if we deal
1750	 * with JITed or non JITed program concatenations and not
1751	 * all eBPF JITs might immediately support all features.
1752	 */
1753	*err = bpf_check_tail_call(fp);
1754
1755	return fp;
1756}
1757EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1758
1759static unsigned int __bpf_prog_ret1(const void *ctx,
1760				    const struct bpf_insn *insn)
1761{
1762	return 1;
1763}
1764
1765static struct bpf_prog_dummy {
1766	struct bpf_prog prog;
1767} dummy_bpf_prog = {
1768	.prog = {
1769		.bpf_func = __bpf_prog_ret1,
1770	},
1771};
1772
1773/* to avoid allocating empty bpf_prog_array for cgroups that
1774 * don't have bpf program attached use one global 'empty_prog_array'
1775 * It will not be modified the caller of bpf_prog_array_alloc()
1776 * (since caller requested prog_cnt == 0)
1777 * that pointer should be 'freed' by bpf_prog_array_free()
1778 */
1779static struct {
1780	struct bpf_prog_array hdr;
1781	struct bpf_prog *null_prog;
1782} empty_prog_array = {
1783	.null_prog = NULL,
1784};
1785
1786struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1787{
1788	if (prog_cnt)
1789		return kzalloc(sizeof(struct bpf_prog_array) +
1790			       sizeof(struct bpf_prog_array_item) *
1791			       (prog_cnt + 1),
1792			       flags);
1793
1794	return &empty_prog_array.hdr;
1795}
1796
1797void bpf_prog_array_free(struct bpf_prog_array *progs)
1798{
1799	if (!progs || progs == &empty_prog_array.hdr)
1800		return;
1801	kfree_rcu(progs, rcu);
1802}
1803
1804int bpf_prog_array_length(struct bpf_prog_array *array)
1805{
1806	struct bpf_prog_array_item *item;
1807	u32 cnt = 0;
1808
1809	for (item = array->items; item->prog; item++)
1810		if (item->prog != &dummy_bpf_prog.prog)
1811			cnt++;
1812	return cnt;
1813}
1814
1815bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1816{
1817	struct bpf_prog_array_item *item;
1818
1819	for (item = array->items; item->prog; item++)
1820		if (item->prog != &dummy_bpf_prog.prog)
1821			return false;
1822	return true;
1823}
1824
1825static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
1826				     u32 *prog_ids,
1827				     u32 request_cnt)
1828{
1829	struct bpf_prog_array_item *item;
1830	int i = 0;
1831
1832	for (item = array->items; item->prog; item++) {
1833		if (item->prog == &dummy_bpf_prog.prog)
1834			continue;
1835		prog_ids[i] = item->prog->aux->id;
1836		if (++i == request_cnt) {
1837			item++;
1838			break;
1839		}
1840	}
1841
1842	return !!(item->prog);
1843}
1844
1845int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
1846				__u32 __user *prog_ids, u32 cnt)
1847{
1848	unsigned long err = 0;
1849	bool nospc;
1850	u32 *ids;
1851
1852	/* users of this function are doing:
1853	 * cnt = bpf_prog_array_length();
1854	 * if (cnt > 0)
1855	 *     bpf_prog_array_copy_to_user(..., cnt);
1856	 * so below kcalloc doesn't need extra cnt > 0 check.
1857	 */
1858	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
1859	if (!ids)
1860		return -ENOMEM;
1861	nospc = bpf_prog_array_copy_core(array, ids, cnt);
1862	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1863	kfree(ids);
1864	if (err)
1865		return -EFAULT;
1866	if (nospc)
1867		return -ENOSPC;
1868	return 0;
1869}
1870
1871void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
1872				struct bpf_prog *old_prog)
1873{
1874	struct bpf_prog_array_item *item;
1875
1876	for (item = array->items; item->prog; item++)
1877		if (item->prog == old_prog) {
1878			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
1879			break;
1880		}
1881}
1882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1883int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1884			struct bpf_prog *exclude_prog,
1885			struct bpf_prog *include_prog,
1886			struct bpf_prog_array **new_array)
1887{
1888	int new_prog_cnt, carry_prog_cnt = 0;
1889	struct bpf_prog_array_item *existing;
1890	struct bpf_prog_array *array;
1891	bool found_exclude = false;
1892	int new_prog_idx = 0;
1893
1894	/* Figure out how many existing progs we need to carry over to
1895	 * the new array.
1896	 */
1897	if (old_array) {
1898		existing = old_array->items;
1899		for (; existing->prog; existing++) {
1900			if (existing->prog == exclude_prog) {
1901				found_exclude = true;
1902				continue;
1903			}
1904			if (existing->prog != &dummy_bpf_prog.prog)
1905				carry_prog_cnt++;
1906			if (existing->prog == include_prog)
1907				return -EEXIST;
1908		}
1909	}
1910
1911	if (exclude_prog && !found_exclude)
1912		return -ENOENT;
1913
1914	/* How many progs (not NULL) will be in the new array? */
1915	new_prog_cnt = carry_prog_cnt;
1916	if (include_prog)
1917		new_prog_cnt += 1;
1918
1919	/* Do we have any prog (not NULL) in the new array? */
1920	if (!new_prog_cnt) {
1921		*new_array = NULL;
1922		return 0;
1923	}
1924
1925	/* +1 as the end of prog_array is marked with NULL */
1926	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
1927	if (!array)
1928		return -ENOMEM;
1929
1930	/* Fill in the new prog array */
1931	if (carry_prog_cnt) {
1932		existing = old_array->items;
1933		for (; existing->prog; existing++)
1934			if (existing->prog != exclude_prog &&
1935			    existing->prog != &dummy_bpf_prog.prog) {
1936				array->items[new_prog_idx++].prog =
1937					existing->prog;
1938			}
1939	}
1940	if (include_prog)
1941		array->items[new_prog_idx++].prog = include_prog;
1942	array->items[new_prog_idx].prog = NULL;
1943	*new_array = array;
1944	return 0;
1945}
1946
1947int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1948			     u32 *prog_ids, u32 request_cnt,
1949			     u32 *prog_cnt)
1950{
1951	u32 cnt = 0;
1952
1953	if (array)
1954		cnt = bpf_prog_array_length(array);
1955
1956	*prog_cnt = cnt;
1957
1958	/* return early if user requested only program count or nothing to copy */
1959	if (!request_cnt || !cnt)
1960		return 0;
1961
1962	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1963	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
1964								     : 0;
1965}
1966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1967static void bpf_prog_free_deferred(struct work_struct *work)
1968{
1969	struct bpf_prog_aux *aux;
1970	int i;
1971
1972	aux = container_of(work, struct bpf_prog_aux, work);
 
 
1973	if (bpf_prog_is_dev_bound(aux))
1974		bpf_prog_offload_destroy(aux->prog);
1975#ifdef CONFIG_PERF_EVENTS
1976	if (aux->prog->has_callchain_buf)
1977		put_callchain_buffers();
1978#endif
1979	for (i = 0; i < aux->func_cnt; i++)
 
 
 
 
 
 
 
1980		bpf_jit_free(aux->func[i]);
 
1981	if (aux->func_cnt) {
1982		kfree(aux->func);
1983		bpf_prog_unlock_free(aux->prog);
1984	} else {
1985		bpf_jit_free(aux->prog);
1986	}
1987}
1988
1989/* Free internal BPF program */
1990void bpf_prog_free(struct bpf_prog *fp)
1991{
1992	struct bpf_prog_aux *aux = fp->aux;
1993
 
 
1994	INIT_WORK(&aux->work, bpf_prog_free_deferred);
1995	schedule_work(&aux->work);
1996}
1997EXPORT_SYMBOL_GPL(bpf_prog_free);
1998
1999/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2000static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2001
2002void bpf_user_rnd_init_once(void)
2003{
2004	prandom_init_once(&bpf_user_rnd_state);
2005}
2006
2007BPF_CALL_0(bpf_user_rnd_u32)
2008{
2009	/* Should someone ever have the rather unwise idea to use some
2010	 * of the registers passed into this function, then note that
2011	 * this function is called from native eBPF and classic-to-eBPF
2012	 * transformations. Register assignments from both sides are
2013	 * different, f.e. classic always sets fn(ctx, A, X) here.
2014	 */
2015	struct rnd_state *state;
2016	u32 res;
2017
2018	state = &get_cpu_var(bpf_user_rnd_state);
2019	res = prandom_u32_state(state);
2020	put_cpu_var(bpf_user_rnd_state);
2021
2022	return res;
2023}
2024
 
 
 
 
 
2025/* Weak definitions of helper functions in case we don't have bpf syscall. */
2026const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2027const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2028const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2029const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2030const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2031const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2032const struct bpf_func_proto bpf_spin_lock_proto __weak;
2033const struct bpf_func_proto bpf_spin_unlock_proto __weak;
 
2034
2035const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2036const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2037const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2038const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
 
 
2039
2040const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2041const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2042const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2043const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
 
2044const struct bpf_func_proto bpf_get_local_storage_proto __weak;
 
 
 
2045
2046const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2047{
2048	return NULL;
2049}
2050
2051u64 __weak
2052bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2053		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2054{
2055	return -ENOTSUPP;
2056}
2057EXPORT_SYMBOL_GPL(bpf_event_output);
2058
2059/* Always built-in helper functions. */
2060const struct bpf_func_proto bpf_tail_call_proto = {
2061	.func		= NULL,
2062	.gpl_only	= false,
2063	.ret_type	= RET_VOID,
2064	.arg1_type	= ARG_PTR_TO_CTX,
2065	.arg2_type	= ARG_CONST_MAP_PTR,
2066	.arg3_type	= ARG_ANYTHING,
2067};
2068
2069/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2070 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2071 * eBPF and implicitly also cBPF can get JITed!
2072 */
2073struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2074{
2075	return prog;
2076}
2077
2078/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2079 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2080 */
2081void __weak bpf_jit_compile(struct bpf_prog *prog)
2082{
2083}
2084
2085bool __weak bpf_helper_changes_pkt_data(void *func)
2086{
2087	return false;
2088}
2089
2090/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2091 * analysis code and wants explicit zero extension inserted by verifier.
2092 * Otherwise, return FALSE.
 
 
 
 
2093 */
2094bool __weak bpf_jit_needs_zext(void)
2095{
2096	return false;
2097}
2098
 
 
 
 
 
2099/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2100 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2101 */
2102int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2103			 int len)
2104{
2105	return -EFAULT;
 
 
 
 
 
 
2106}
2107
2108DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2109EXPORT_SYMBOL(bpf_stats_enabled_key);
2110
2111/* All definitions of tracepoints related to BPF. */
2112#define CREATE_TRACE_POINTS
2113#include <linux/bpf_trace.h>
2114
2115EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2116EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *	Jay Schulist <jschlst@samba.org>
  13 *	Alexei Starovoitov <ast@plumgrid.com>
  14 *	Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <uapi/linux/btf.h>
  21#include <linux/filter.h>
  22#include <linux/skbuff.h>
  23#include <linux/vmalloc.h>
  24#include <linux/random.h>
  25#include <linux/moduleloader.h>
  26#include <linux/bpf.h>
  27#include <linux/btf.h>
  28#include <linux/objtool.h>
  29#include <linux/rbtree_latch.h>
  30#include <linux/kallsyms.h>
  31#include <linux/rcupdate.h>
  32#include <linux/perf_event.h>
  33#include <linux/extable.h>
  34#include <linux/log2.h>
  35
  36#include <asm/barrier.h>
  37#include <asm/unaligned.h>
  38
  39/* Registers */
  40#define BPF_R0	regs[BPF_REG_0]
  41#define BPF_R1	regs[BPF_REG_1]
  42#define BPF_R2	regs[BPF_REG_2]
  43#define BPF_R3	regs[BPF_REG_3]
  44#define BPF_R4	regs[BPF_REG_4]
  45#define BPF_R5	regs[BPF_REG_5]
  46#define BPF_R6	regs[BPF_REG_6]
  47#define BPF_R7	regs[BPF_REG_7]
  48#define BPF_R8	regs[BPF_REG_8]
  49#define BPF_R9	regs[BPF_REG_9]
  50#define BPF_R10	regs[BPF_REG_10]
  51
  52/* Named registers */
  53#define DST	regs[insn->dst_reg]
  54#define SRC	regs[insn->src_reg]
  55#define FP	regs[BPF_REG_FP]
  56#define AX	regs[BPF_REG_AX]
  57#define ARG1	regs[BPF_REG_ARG1]
  58#define CTX	regs[BPF_REG_CTX]
  59#define IMM	insn->imm
  60
  61/* No hurry in this branch
  62 *
  63 * Exported for the bpf jit load helper.
  64 */
  65void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
  66{
  67	u8 *ptr = NULL;
  68
  69	if (k >= SKF_NET_OFF)
  70		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  71	else if (k >= SKF_LL_OFF)
  72		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  73
  74	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  75		return ptr;
  76
  77	return NULL;
  78}
  79
  80struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
  81{
  82	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
  83	struct bpf_prog_aux *aux;
  84	struct bpf_prog *fp;
  85
  86	size = round_up(size, PAGE_SIZE);
  87	fp = __vmalloc(size, gfp_flags);
  88	if (fp == NULL)
  89		return NULL;
  90
  91	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
  92	if (aux == NULL) {
  93		vfree(fp);
  94		return NULL;
  95	}
  96	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
  97	if (!fp->active) {
  98		vfree(fp);
  99		kfree(aux);
 100		return NULL;
 101	}
 102
 103	fp->pages = size / PAGE_SIZE;
 104	fp->aux = aux;
 105	fp->aux->prog = fp;
 106	fp->jit_requested = ebpf_jit_enabled();
 107
 108	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
 109	mutex_init(&fp->aux->used_maps_mutex);
 110	mutex_init(&fp->aux->dst_mutex);
 111
 112	return fp;
 113}
 114
 115struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 116{
 117	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
 118	struct bpf_prog *prog;
 119	int cpu;
 120
 121	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
 122	if (!prog)
 123		return NULL;
 124
 125	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 126	if (!prog->stats) {
 127		free_percpu(prog->active);
 128		kfree(prog->aux);
 129		vfree(prog);
 130		return NULL;
 131	}
 132
 133	for_each_possible_cpu(cpu) {
 134		struct bpf_prog_stats *pstats;
 135
 136		pstats = per_cpu_ptr(prog->stats, cpu);
 137		u64_stats_init(&pstats->syncp);
 138	}
 139	return prog;
 140}
 141EXPORT_SYMBOL_GPL(bpf_prog_alloc);
 142
 143int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
 144{
 145	if (!prog->aux->nr_linfo || !prog->jit_requested)
 146		return 0;
 147
 148	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
 149					  sizeof(*prog->aux->jited_linfo),
 150					  GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
 151	if (!prog->aux->jited_linfo)
 152		return -ENOMEM;
 153
 154	return 0;
 155}
 156
 157void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
 158{
 159	if (prog->aux->jited_linfo &&
 160	    (!prog->jited || !prog->aux->jited_linfo[0])) {
 161		kvfree(prog->aux->jited_linfo);
 162		prog->aux->jited_linfo = NULL;
 163	}
 164
 165	kfree(prog->aux->kfunc_tab);
 166	prog->aux->kfunc_tab = NULL;
 
 
 167}
 168
 169/* The jit engine is responsible to provide an array
 170 * for insn_off to the jited_off mapping (insn_to_jit_off).
 171 *
 172 * The idx to this array is the insn_off.  Hence, the insn_off
 173 * here is relative to the prog itself instead of the main prog.
 174 * This array has one entry for each xlated bpf insn.
 175 *
 176 * jited_off is the byte off to the last byte of the jited insn.
 177 *
 178 * Hence, with
 179 * insn_start:
 180 *      The first bpf insn off of the prog.  The insn off
 181 *      here is relative to the main prog.
 182 *      e.g. if prog is a subprog, insn_start > 0
 183 * linfo_idx:
 184 *      The prog's idx to prog->aux->linfo and jited_linfo
 185 *
 186 * jited_linfo[linfo_idx] = prog->bpf_func
 187 *
 188 * For i > linfo_idx,
 189 *
 190 * jited_linfo[i] = prog->bpf_func +
 191 *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
 192 */
 193void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
 194			       const u32 *insn_to_jit_off)
 195{
 196	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
 197	const struct bpf_line_info *linfo;
 198	void **jited_linfo;
 199
 200	if (!prog->aux->jited_linfo)
 201		/* Userspace did not provide linfo */
 202		return;
 203
 204	linfo_idx = prog->aux->linfo_idx;
 205	linfo = &prog->aux->linfo[linfo_idx];
 206	insn_start = linfo[0].insn_off;
 207	insn_end = insn_start + prog->len;
 208
 209	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
 210	jited_linfo[0] = prog->bpf_func;
 211
 212	nr_linfo = prog->aux->nr_linfo - linfo_idx;
 213
 214	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
 215		/* The verifier ensures that linfo[i].insn_off is
 216		 * strictly increasing
 217		 */
 218		jited_linfo[i] = prog->bpf_func +
 219			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
 220}
 221
 
 
 
 
 
 
 222struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
 223				  gfp_t gfp_extra_flags)
 224{
 225	gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
 226	struct bpf_prog *fp;
 227	u32 pages;
 
 
 
 228
 229	size = round_up(size, PAGE_SIZE);
 230	pages = size / PAGE_SIZE;
 231	if (pages <= fp_old->pages)
 232		return fp_old;
 233
 234	fp = __vmalloc(size, gfp_flags);
 235	if (fp) {
 
 
 
 
 
 
 
 236		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 237		fp->pages = pages;
 238		fp->aux->prog = fp;
 239
 240		/* We keep fp->aux from fp_old around in the new
 241		 * reallocated structure.
 242		 */
 243		fp_old->aux = NULL;
 244		fp_old->stats = NULL;
 245		fp_old->active = NULL;
 246		__bpf_prog_free(fp_old);
 247	}
 248
 249	return fp;
 250}
 251
 252void __bpf_prog_free(struct bpf_prog *fp)
 253{
 254	if (fp->aux) {
 255		mutex_destroy(&fp->aux->used_maps_mutex);
 256		mutex_destroy(&fp->aux->dst_mutex);
 257		kfree(fp->aux->poke_tab);
 258		kfree(fp->aux);
 259	}
 260	free_percpu(fp->stats);
 261	free_percpu(fp->active);
 262	vfree(fp);
 263}
 264
 265int bpf_prog_calc_tag(struct bpf_prog *fp)
 266{
 267	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
 268	u32 raw_size = bpf_prog_tag_scratch_size(fp);
 269	u32 digest[SHA1_DIGEST_WORDS];
 270	u32 ws[SHA1_WORKSPACE_WORDS];
 271	u32 i, bsize, psize, blocks;
 272	struct bpf_insn *dst;
 273	bool was_ld_map;
 274	u8 *raw, *todo;
 275	__be32 *result;
 276	__be64 *bits;
 277
 278	raw = vmalloc(raw_size);
 279	if (!raw)
 280		return -ENOMEM;
 281
 282	sha1_init(digest);
 283	memset(ws, 0, sizeof(ws));
 284
 285	/* We need to take out the map fd for the digest calculation
 286	 * since they are unstable from user space side.
 287	 */
 288	dst = (void *)raw;
 289	for (i = 0, was_ld_map = false; i < fp->len; i++) {
 290		dst[i] = fp->insnsi[i];
 291		if (!was_ld_map &&
 292		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
 293		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
 294		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
 295			was_ld_map = true;
 296			dst[i].imm = 0;
 297		} else if (was_ld_map &&
 298			   dst[i].code == 0 &&
 299			   dst[i].dst_reg == 0 &&
 300			   dst[i].src_reg == 0 &&
 301			   dst[i].off == 0) {
 302			was_ld_map = false;
 303			dst[i].imm = 0;
 304		} else {
 305			was_ld_map = false;
 306		}
 307	}
 308
 309	psize = bpf_prog_insn_size(fp);
 310	memset(&raw[psize], 0, raw_size - psize);
 311	raw[psize++] = 0x80;
 312
 313	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
 314	blocks = bsize / SHA1_BLOCK_SIZE;
 315	todo   = raw;
 316	if (bsize - psize >= sizeof(__be64)) {
 317		bits = (__be64 *)(todo + bsize - sizeof(__be64));
 318	} else {
 319		bits = (__be64 *)(todo + bsize + bits_offset);
 320		blocks++;
 321	}
 322	*bits = cpu_to_be64((psize - 1) << 3);
 323
 324	while (blocks--) {
 325		sha1_transform(digest, todo, ws);
 326		todo += SHA1_BLOCK_SIZE;
 327	}
 328
 329	result = (__force __be32 *)digest;
 330	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
 331		result[i] = cpu_to_be32(digest[i]);
 332	memcpy(fp->tag, result, sizeof(fp->tag));
 333
 334	vfree(raw);
 335	return 0;
 336}
 337
 338static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
 339				s32 end_new, s32 curr, const bool probe_pass)
 340{
 341	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
 342	s32 delta = end_new - end_old;
 343	s64 imm = insn->imm;
 344
 345	if (curr < pos && curr + imm + 1 >= end_old)
 346		imm += delta;
 347	else if (curr >= end_new && curr + imm + 1 < end_new)
 348		imm -= delta;
 349	if (imm < imm_min || imm > imm_max)
 350		return -ERANGE;
 351	if (!probe_pass)
 352		insn->imm = imm;
 353	return 0;
 354}
 355
 356static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
 357				s32 end_new, s32 curr, const bool probe_pass)
 358{
 359	const s32 off_min = S16_MIN, off_max = S16_MAX;
 360	s32 delta = end_new - end_old;
 361	s32 off = insn->off;
 362
 363	if (curr < pos && curr + off + 1 >= end_old)
 364		off += delta;
 365	else if (curr >= end_new && curr + off + 1 < end_new)
 366		off -= delta;
 367	if (off < off_min || off > off_max)
 368		return -ERANGE;
 369	if (!probe_pass)
 370		insn->off = off;
 371	return 0;
 372}
 373
 374static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
 375			    s32 end_new, const bool probe_pass)
 376{
 377	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
 378	struct bpf_insn *insn = prog->insnsi;
 379	int ret = 0;
 380
 381	for (i = 0; i < insn_cnt; i++, insn++) {
 382		u8 code;
 383
 384		/* In the probing pass we still operate on the original,
 385		 * unpatched image in order to check overflows before we
 386		 * do any other adjustments. Therefore skip the patchlet.
 387		 */
 388		if (probe_pass && i == pos) {
 389			i = end_new;
 390			insn = prog->insnsi + end_old;
 391		}
 392		code = insn->code;
 393		if ((BPF_CLASS(code) != BPF_JMP &&
 394		     BPF_CLASS(code) != BPF_JMP32) ||
 395		    BPF_OP(code) == BPF_EXIT)
 396			continue;
 397		/* Adjust offset of jmps if we cross patch boundaries. */
 398		if (BPF_OP(code) == BPF_CALL) {
 399			if (insn->src_reg != BPF_PSEUDO_CALL)
 400				continue;
 401			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
 402						   end_new, i, probe_pass);
 403		} else {
 404			ret = bpf_adj_delta_to_off(insn, pos, end_old,
 405						   end_new, i, probe_pass);
 406		}
 407		if (ret)
 408			break;
 409	}
 410
 411	return ret;
 412}
 413
 414static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
 415{
 416	struct bpf_line_info *linfo;
 417	u32 i, nr_linfo;
 418
 419	nr_linfo = prog->aux->nr_linfo;
 420	if (!nr_linfo || !delta)
 421		return;
 422
 423	linfo = prog->aux->linfo;
 424
 425	for (i = 0; i < nr_linfo; i++)
 426		if (off < linfo[i].insn_off)
 427			break;
 428
 429	/* Push all off < linfo[i].insn_off by delta */
 430	for (; i < nr_linfo; i++)
 431		linfo[i].insn_off += delta;
 432}
 433
 434struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 435				       const struct bpf_insn *patch, u32 len)
 436{
 437	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
 438	const u32 cnt_max = S16_MAX;
 439	struct bpf_prog *prog_adj;
 440	int err;
 441
 442	/* Since our patchlet doesn't expand the image, we're done. */
 443	if (insn_delta == 0) {
 444		memcpy(prog->insnsi + off, patch, sizeof(*patch));
 445		return prog;
 446	}
 447
 448	insn_adj_cnt = prog->len + insn_delta;
 449
 450	/* Reject anything that would potentially let the insn->off
 451	 * target overflow when we have excessive program expansions.
 452	 * We need to probe here before we do any reallocation where
 453	 * we afterwards may not fail anymore.
 454	 */
 455	if (insn_adj_cnt > cnt_max &&
 456	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
 457		return ERR_PTR(err);
 458
 459	/* Several new instructions need to be inserted. Make room
 460	 * for them. Likely, there's no need for a new allocation as
 461	 * last page could have large enough tailroom.
 462	 */
 463	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
 464				    GFP_USER);
 465	if (!prog_adj)
 466		return ERR_PTR(-ENOMEM);
 467
 468	prog_adj->len = insn_adj_cnt;
 469
 470	/* Patching happens in 3 steps:
 471	 *
 472	 * 1) Move over tail of insnsi from next instruction onwards,
 473	 *    so we can patch the single target insn with one or more
 474	 *    new ones (patching is always from 1 to n insns, n > 0).
 475	 * 2) Inject new instructions at the target location.
 476	 * 3) Adjust branch offsets if necessary.
 477	 */
 478	insn_rest = insn_adj_cnt - off - len;
 479
 480	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
 481		sizeof(*patch) * insn_rest);
 482	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 483
 484	/* We are guaranteed to not fail at this point, otherwise
 485	 * the ship has sailed to reverse to the original state. An
 486	 * overflow cannot happen at this point.
 487	 */
 488	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
 489
 490	bpf_adj_linfo(prog_adj, off, insn_delta);
 491
 492	return prog_adj;
 493}
 494
 495int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
 496{
 497	/* Branch offsets can't overflow when program is shrinking, no need
 498	 * to call bpf_adj_branches(..., true) here
 499	 */
 500	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
 501		sizeof(struct bpf_insn) * (prog->len - off - cnt));
 502	prog->len -= cnt;
 503
 504	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 505}
 506
 507static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 508{
 509	int i;
 510
 511	for (i = 0; i < fp->aux->func_cnt; i++)
 512		bpf_prog_kallsyms_del(fp->aux->func[i]);
 513}
 514
 515void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 516{
 517	bpf_prog_kallsyms_del_subprogs(fp);
 518	bpf_prog_kallsyms_del(fp);
 519}
 520
 521#ifdef CONFIG_BPF_JIT
 522/* All BPF JIT sysctl knobs here. */
 523int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 524int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
 525int bpf_jit_harden   __read_mostly;
 
 526long bpf_jit_limit   __read_mostly;
 527
 528static void
 529bpf_prog_ksym_set_addr(struct bpf_prog *prog)
 
 
 530{
 531	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
 532	unsigned long addr = (unsigned long)hdr;
 533
 534	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
 535
 536	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
 537	prog->aux->ksym.end   = addr + hdr->pages * PAGE_SIZE;
 538}
 539
 540static void
 541bpf_prog_ksym_set_name(struct bpf_prog *prog)
 542{
 543	char *sym = prog->aux->ksym.name;
 544	const char *end = sym + KSYM_NAME_LEN;
 545	const struct btf_type *type;
 546	const char *func_name;
 547
 548	BUILD_BUG_ON(sizeof("bpf_prog_") +
 549		     sizeof(prog->tag) * 2 +
 550		     /* name has been null terminated.
 551		      * We should need +1 for the '_' preceding
 552		      * the name.  However, the null character
 553		      * is double counted between the name and the
 554		      * sizeof("bpf_prog_") above, so we omit
 555		      * the +1 here.
 556		      */
 557		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
 558
 559	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
 560	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
 561
 562	/* prog->aux->name will be ignored if full btf name is available */
 563	if (prog->aux->func_info_cnt) {
 564		type = btf_type_by_id(prog->aux->btf,
 565				      prog->aux->func_info[prog->aux->func_idx].type_id);
 566		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
 567		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
 568		return;
 569	}
 570
 571	if (prog->aux->name[0])
 572		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
 573	else
 574		*sym = 0;
 575}
 576
 577static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
 
 578{
 579	return container_of(n, struct bpf_ksym, tnode)->start;
 
 
 
 
 
 
 580}
 581
 582static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
 583					  struct latch_tree_node *b)
 584{
 585	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
 586}
 587
 588static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
 589{
 590	unsigned long val = (unsigned long)key;
 591	const struct bpf_ksym *ksym;
 
 592
 593	ksym = container_of(n, struct bpf_ksym, tnode);
 
 594
 595	if (val < ksym->start)
 596		return -1;
 597	if (val >= ksym->end)
 598		return  1;
 599
 600	return 0;
 601}
 602
 603static const struct latch_tree_ops bpf_tree_ops = {
 604	.less	= bpf_tree_less,
 605	.comp	= bpf_tree_comp,
 606};
 607
 608static DEFINE_SPINLOCK(bpf_lock);
 609static LIST_HEAD(bpf_kallsyms);
 610static struct latch_tree_root bpf_tree __cacheline_aligned;
 611
 612void bpf_ksym_add(struct bpf_ksym *ksym)
 613{
 614	spin_lock_bh(&bpf_lock);
 615	WARN_ON_ONCE(!list_empty(&ksym->lnode));
 616	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
 617	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 618	spin_unlock_bh(&bpf_lock);
 619}
 620
 621static void __bpf_ksym_del(struct bpf_ksym *ksym)
 622{
 623	if (list_empty(&ksym->lnode))
 624		return;
 625
 626	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
 627	list_del_rcu(&ksym->lnode);
 628}
 629
 630void bpf_ksym_del(struct bpf_ksym *ksym)
 631{
 632	spin_lock_bh(&bpf_lock);
 633	__bpf_ksym_del(ksym);
 634	spin_unlock_bh(&bpf_lock);
 635}
 636
 637static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 638{
 639	return fp->jited && !bpf_prog_was_classic(fp);
 640}
 641
 642static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
 643{
 644	return list_empty(&fp->aux->ksym.lnode) ||
 645	       fp->aux->ksym.lnode.prev == LIST_POISON2;
 646}
 647
 648void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 649{
 650	if (!bpf_prog_kallsyms_candidate(fp) ||
 651	    !bpf_capable())
 652		return;
 653
 654	bpf_prog_ksym_set_addr(fp);
 655	bpf_prog_ksym_set_name(fp);
 656	fp->aux->ksym.prog = true;
 657
 658	bpf_ksym_add(&fp->aux->ksym);
 659}
 660
 661void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 662{
 663	if (!bpf_prog_kallsyms_candidate(fp))
 664		return;
 665
 666	bpf_ksym_del(&fp->aux->ksym);
 
 
 667}
 668
 669static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
 670{
 671	struct latch_tree_node *n;
 672
 
 
 
 673	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
 674	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
 
 
 675}
 676
 677const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
 678				 unsigned long *off, char *sym)
 679{
 680	struct bpf_ksym *ksym;
 
 681	char *ret = NULL;
 682
 683	rcu_read_lock();
 684	ksym = bpf_ksym_find(addr);
 685	if (ksym) {
 686		unsigned long symbol_start = ksym->start;
 687		unsigned long symbol_end = ksym->end;
 688
 689		strncpy(sym, ksym->name, KSYM_NAME_LEN);
 690
 691		ret = sym;
 692		if (size)
 693			*size = symbol_end - symbol_start;
 694		if (off)
 695			*off  = addr - symbol_start;
 696	}
 697	rcu_read_unlock();
 698
 699	return ret;
 700}
 701
 702bool is_bpf_text_address(unsigned long addr)
 703{
 704	bool ret;
 705
 706	rcu_read_lock();
 707	ret = bpf_ksym_find(addr) != NULL;
 708	rcu_read_unlock();
 709
 710	return ret;
 711}
 712
 713static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
 714{
 715	struct bpf_ksym *ksym = bpf_ksym_find(addr);
 716
 717	return ksym && ksym->prog ?
 718	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
 719	       NULL;
 720}
 721
 722const struct exception_table_entry *search_bpf_extables(unsigned long addr)
 723{
 724	const struct exception_table_entry *e = NULL;
 725	struct bpf_prog *prog;
 726
 727	rcu_read_lock();
 728	prog = bpf_prog_ksym_find(addr);
 729	if (!prog)
 730		goto out;
 731	if (!prog->aux->num_exentries)
 732		goto out;
 733
 734	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
 735out:
 736	rcu_read_unlock();
 737	return e;
 738}
 739
 740int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 741		    char *sym)
 742{
 743	struct bpf_ksym *ksym;
 744	unsigned int it = 0;
 745	int ret = -ERANGE;
 746
 747	if (!bpf_jit_kallsyms_enabled())
 748		return ret;
 749
 750	rcu_read_lock();
 751	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
 752		if (it++ != symnum)
 753			continue;
 754
 755		strncpy(sym, ksym->name, KSYM_NAME_LEN);
 756
 757		*value = ksym->start;
 758		*type  = BPF_SYM_ELF_TYPE;
 759
 760		ret = 0;
 761		break;
 762	}
 763	rcu_read_unlock();
 764
 765	return ret;
 766}
 767
 768int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
 769				struct bpf_jit_poke_descriptor *poke)
 770{
 771	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
 772	static const u32 poke_tab_max = 1024;
 773	u32 slot = prog->aux->size_poke_tab;
 774	u32 size = slot + 1;
 775
 776	if (size > poke_tab_max)
 777		return -ENOSPC;
 778	if (poke->tailcall_target || poke->tailcall_target_stable ||
 779	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
 780		return -EINVAL;
 781
 782	switch (poke->reason) {
 783	case BPF_POKE_REASON_TAIL_CALL:
 784		if (!poke->tail_call.map)
 785			return -EINVAL;
 786		break;
 787	default:
 788		return -EINVAL;
 789	}
 790
 791	tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
 792	if (!tab)
 793		return -ENOMEM;
 794
 795	memcpy(&tab[slot], poke, sizeof(*poke));
 796	prog->aux->size_poke_tab = size;
 797	prog->aux->poke_tab = tab;
 798
 799	return slot;
 800}
 801
 802static atomic_long_t bpf_jit_current;
 803
 804/* Can be overridden by an arch's JIT compiler if it has a custom,
 805 * dedicated BPF backend memory area, or if neither of the two
 806 * below apply.
 807 */
 808u64 __weak bpf_jit_alloc_exec_limit(void)
 809{
 810#if defined(MODULES_VADDR)
 811	return MODULES_END - MODULES_VADDR;
 812#else
 813	return VMALLOC_END - VMALLOC_START;
 814#endif
 815}
 816
 817static int __init bpf_jit_charge_init(void)
 818{
 819	/* Only used as heuristic here to derive limit. */
 820	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
 821					    PAGE_SIZE), LONG_MAX);
 822	return 0;
 823}
 824pure_initcall(bpf_jit_charge_init);
 825
 826int bpf_jit_charge_modmem(u32 pages)
 827{
 828	if (atomic_long_add_return(pages, &bpf_jit_current) >
 829	    (bpf_jit_limit >> PAGE_SHIFT)) {
 830		if (!bpf_capable()) {
 831			atomic_long_sub(pages, &bpf_jit_current);
 832			return -EPERM;
 833		}
 834	}
 835
 836	return 0;
 837}
 838
 839void bpf_jit_uncharge_modmem(u32 pages)
 840{
 841	atomic_long_sub(pages, &bpf_jit_current);
 842}
 843
 844void *__weak bpf_jit_alloc_exec(unsigned long size)
 845{
 846	return module_alloc(size);
 847}
 848
 849void __weak bpf_jit_free_exec(void *addr)
 850{
 851	module_memfree(addr);
 852}
 853
 854struct bpf_binary_header *
 855bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 856		     unsigned int alignment,
 857		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
 858{
 859	struct bpf_binary_header *hdr;
 860	u32 size, hole, start, pages;
 861
 862	WARN_ON_ONCE(!is_power_of_2(alignment) ||
 863		     alignment > BPF_IMAGE_ALIGNMENT);
 864
 865	/* Most of BPF filters are really small, but if some of them
 866	 * fill a page, allow at least 128 extra bytes to insert a
 867	 * random section of illegal instructions.
 868	 */
 869	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
 870	pages = size / PAGE_SIZE;
 871
 872	if (bpf_jit_charge_modmem(pages))
 873		return NULL;
 874	hdr = bpf_jit_alloc_exec(size);
 875	if (!hdr) {
 876		bpf_jit_uncharge_modmem(pages);
 877		return NULL;
 878	}
 879
 880	/* Fill space with illegal/arch-dep instructions. */
 881	bpf_fill_ill_insns(hdr, size);
 882
 883	hdr->pages = pages;
 884	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
 885		     PAGE_SIZE - sizeof(*hdr));
 886	start = (get_random_int() % hole) & ~(alignment - 1);
 887
 888	/* Leave a random number of instructions before BPF code. */
 889	*image_ptr = &hdr->image[start];
 890
 891	return hdr;
 892}
 893
 894void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 895{
 896	u32 pages = hdr->pages;
 897
 898	bpf_jit_free_exec(hdr);
 899	bpf_jit_uncharge_modmem(pages);
 900}
 901
 902/* This symbol is only overridden by archs that have different
 903 * requirements than the usual eBPF JITs, f.e. when they only
 904 * implement cBPF JIT, do not set images read-only, etc.
 905 */
 906void __weak bpf_jit_free(struct bpf_prog *fp)
 907{
 908	if (fp->jited) {
 909		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
 910
 911		bpf_jit_binary_free(hdr);
 912
 913		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
 914	}
 915
 916	bpf_prog_unlock_free(fp);
 917}
 918
 919int bpf_jit_get_func_addr(const struct bpf_prog *prog,
 920			  const struct bpf_insn *insn, bool extra_pass,
 921			  u64 *func_addr, bool *func_addr_fixed)
 922{
 923	s16 off = insn->off;
 924	s32 imm = insn->imm;
 925	u8 *addr;
 926
 927	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
 928	if (!*func_addr_fixed) {
 929		/* Place-holder address till the last pass has collected
 930		 * all addresses for JITed subprograms in which case we
 931		 * can pick them up from prog->aux.
 932		 */
 933		if (!extra_pass)
 934			addr = NULL;
 935		else if (prog->aux->func &&
 936			 off >= 0 && off < prog->aux->func_cnt)
 937			addr = (u8 *)prog->aux->func[off]->bpf_func;
 938		else
 939			return -EINVAL;
 940	} else {
 941		/* Address of a BPF helper call. Since part of the core
 942		 * kernel, it's always at a fixed location. __bpf_call_base
 943		 * and the helper with imm relative to it are both in core
 944		 * kernel.
 945		 */
 946		addr = (u8 *)__bpf_call_base + imm;
 947	}
 948
 949	*func_addr = (unsigned long)addr;
 950	return 0;
 951}
 952
 953static int bpf_jit_blind_insn(const struct bpf_insn *from,
 954			      const struct bpf_insn *aux,
 955			      struct bpf_insn *to_buff,
 956			      bool emit_zext)
 957{
 958	struct bpf_insn *to = to_buff;
 959	u32 imm_rnd = get_random_int();
 960	s16 off;
 961
 962	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
 963	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
 964
 965	/* Constraints on AX register:
 966	 *
 967	 * AX register is inaccessible from user space. It is mapped in
 968	 * all JITs, and used here for constant blinding rewrites. It is
 969	 * typically "stateless" meaning its contents are only valid within
 970	 * the executed instruction, but not across several instructions.
 971	 * There are a few exceptions however which are further detailed
 972	 * below.
 973	 *
 974	 * Constant blinding is only used by JITs, not in the interpreter.
 975	 * The interpreter uses AX in some occasions as a local temporary
 976	 * register e.g. in DIV or MOD instructions.
 977	 *
 978	 * In restricted circumstances, the verifier can also use the AX
 979	 * register for rewrites as long as they do not interfere with
 980	 * the above cases!
 981	 */
 982	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
 983		goto out;
 984
 985	if (from->imm == 0 &&
 986	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
 987	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
 988		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
 989		goto out;
 990	}
 991
 992	switch (from->code) {
 993	case BPF_ALU | BPF_ADD | BPF_K:
 994	case BPF_ALU | BPF_SUB | BPF_K:
 995	case BPF_ALU | BPF_AND | BPF_K:
 996	case BPF_ALU | BPF_OR  | BPF_K:
 997	case BPF_ALU | BPF_XOR | BPF_K:
 998	case BPF_ALU | BPF_MUL | BPF_K:
 999	case BPF_ALU | BPF_MOV | BPF_K:
1000	case BPF_ALU | BPF_DIV | BPF_K:
1001	case BPF_ALU | BPF_MOD | BPF_K:
1002		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1003		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1004		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1005		break;
1006
1007	case BPF_ALU64 | BPF_ADD | BPF_K:
1008	case BPF_ALU64 | BPF_SUB | BPF_K:
1009	case BPF_ALU64 | BPF_AND | BPF_K:
1010	case BPF_ALU64 | BPF_OR  | BPF_K:
1011	case BPF_ALU64 | BPF_XOR | BPF_K:
1012	case BPF_ALU64 | BPF_MUL | BPF_K:
1013	case BPF_ALU64 | BPF_MOV | BPF_K:
1014	case BPF_ALU64 | BPF_DIV | BPF_K:
1015	case BPF_ALU64 | BPF_MOD | BPF_K:
1016		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1017		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1018		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1019		break;
1020
1021	case BPF_JMP | BPF_JEQ  | BPF_K:
1022	case BPF_JMP | BPF_JNE  | BPF_K:
1023	case BPF_JMP | BPF_JGT  | BPF_K:
1024	case BPF_JMP | BPF_JLT  | BPF_K:
1025	case BPF_JMP | BPF_JGE  | BPF_K:
1026	case BPF_JMP | BPF_JLE  | BPF_K:
1027	case BPF_JMP | BPF_JSGT | BPF_K:
1028	case BPF_JMP | BPF_JSLT | BPF_K:
1029	case BPF_JMP | BPF_JSGE | BPF_K:
1030	case BPF_JMP | BPF_JSLE | BPF_K:
1031	case BPF_JMP | BPF_JSET | BPF_K:
1032		/* Accommodate for extra offset in case of a backjump. */
1033		off = from->off;
1034		if (off < 0)
1035			off -= 2;
1036		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1037		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1038		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1039		break;
1040
1041	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1042	case BPF_JMP32 | BPF_JNE  | BPF_K:
1043	case BPF_JMP32 | BPF_JGT  | BPF_K:
1044	case BPF_JMP32 | BPF_JLT  | BPF_K:
1045	case BPF_JMP32 | BPF_JGE  | BPF_K:
1046	case BPF_JMP32 | BPF_JLE  | BPF_K:
1047	case BPF_JMP32 | BPF_JSGT | BPF_K:
1048	case BPF_JMP32 | BPF_JSLT | BPF_K:
1049	case BPF_JMP32 | BPF_JSGE | BPF_K:
1050	case BPF_JMP32 | BPF_JSLE | BPF_K:
1051	case BPF_JMP32 | BPF_JSET | BPF_K:
1052		/* Accommodate for extra offset in case of a backjump. */
1053		off = from->off;
1054		if (off < 0)
1055			off -= 2;
1056		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1057		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1058		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1059				      off);
1060		break;
1061
1062	case BPF_LD | BPF_IMM | BPF_DW:
1063		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1064		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1065		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1066		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1067		break;
1068	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1069		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1070		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1071		if (emit_zext)
1072			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
1073		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1074		break;
1075
1076	case BPF_ST | BPF_MEM | BPF_DW:
1077	case BPF_ST | BPF_MEM | BPF_W:
1078	case BPF_ST | BPF_MEM | BPF_H:
1079	case BPF_ST | BPF_MEM | BPF_B:
1080		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1081		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1082		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1083		break;
1084	}
1085out:
1086	return to - to_buff;
1087}
1088
1089static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1090					      gfp_t gfp_extra_flags)
1091{
1092	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1093	struct bpf_prog *fp;
1094
1095	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1096	if (fp != NULL) {
1097		/* aux->prog still points to the fp_other one, so
1098		 * when promoting the clone to the real program,
1099		 * this still needs to be adapted.
1100		 */
1101		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1102	}
1103
1104	return fp;
1105}
1106
1107static void bpf_prog_clone_free(struct bpf_prog *fp)
1108{
1109	/* aux was stolen by the other clone, so we cannot free
1110	 * it from this path! It will be freed eventually by the
1111	 * other program on release.
1112	 *
1113	 * At this point, we don't need a deferred release since
1114	 * clone is guaranteed to not be locked.
1115	 */
1116	fp->aux = NULL;
1117	fp->stats = NULL;
1118	fp->active = NULL;
1119	__bpf_prog_free(fp);
1120}
1121
1122void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1123{
1124	/* We have to repoint aux->prog to self, as we don't
1125	 * know whether fp here is the clone or the original.
1126	 */
1127	fp->aux->prog = fp;
1128	bpf_prog_clone_free(fp_other);
1129}
1130
1131struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1132{
1133	struct bpf_insn insn_buff[16], aux[2];
1134	struct bpf_prog *clone, *tmp;
1135	int insn_delta, insn_cnt;
1136	struct bpf_insn *insn;
1137	int i, rewritten;
1138
1139	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
1140		return prog;
1141
1142	clone = bpf_prog_clone_create(prog, GFP_USER);
1143	if (!clone)
1144		return ERR_PTR(-ENOMEM);
1145
1146	insn_cnt = clone->len;
1147	insn = clone->insnsi;
1148
1149	for (i = 0; i < insn_cnt; i++, insn++) {
1150		/* We temporarily need to hold the original ld64 insn
1151		 * so that we can still access the first part in the
1152		 * second blinding run.
1153		 */
1154		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1155		    insn[1].code == 0)
1156			memcpy(aux, insn, sizeof(aux));
1157
1158		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1159						clone->aux->verifier_zext);
1160		if (!rewritten)
1161			continue;
1162
1163		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1164		if (IS_ERR(tmp)) {
1165			/* Patching may have repointed aux->prog during
1166			 * realloc from the original one, so we need to
1167			 * fix it up here on error.
1168			 */
1169			bpf_jit_prog_release_other(prog, clone);
1170			return tmp;
1171		}
1172
1173		clone = tmp;
1174		insn_delta = rewritten - 1;
1175
1176		/* Walk new program and skip insns we just inserted. */
1177		insn = clone->insnsi + i + insn_delta;
1178		insn_cnt += insn_delta;
1179		i        += insn_delta;
1180	}
1181
1182	clone->blinded = 1;
1183	return clone;
1184}
1185#endif /* CONFIG_BPF_JIT */
1186
1187/* Base function for offset calculation. Needs to go into .text section,
1188 * therefore keeping it non-static as well; will also be used by JITs
1189 * anyway later on, so do not let the compiler omit it. This also needs
1190 * to go into kallsyms for correlation from e.g. bpftool, so naming
1191 * must not change.
1192 */
1193noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1194{
1195	return 0;
1196}
1197EXPORT_SYMBOL_GPL(__bpf_call_base);
1198
1199/* All UAPI available opcodes. */
1200#define BPF_INSN_MAP(INSN_2, INSN_3)		\
1201	/* 32 bit ALU operations. */		\
1202	/*   Register based. */			\
1203	INSN_3(ALU, ADD,  X),			\
1204	INSN_3(ALU, SUB,  X),			\
1205	INSN_3(ALU, AND,  X),			\
1206	INSN_3(ALU, OR,   X),			\
1207	INSN_3(ALU, LSH,  X),			\
1208	INSN_3(ALU, RSH,  X),			\
1209	INSN_3(ALU, XOR,  X),			\
1210	INSN_3(ALU, MUL,  X),			\
1211	INSN_3(ALU, MOV,  X),			\
1212	INSN_3(ALU, ARSH, X),			\
1213	INSN_3(ALU, DIV,  X),			\
1214	INSN_3(ALU, MOD,  X),			\
1215	INSN_2(ALU, NEG),			\
1216	INSN_3(ALU, END, TO_BE),		\
1217	INSN_3(ALU, END, TO_LE),		\
1218	/*   Immediate based. */		\
1219	INSN_3(ALU, ADD,  K),			\
1220	INSN_3(ALU, SUB,  K),			\
1221	INSN_3(ALU, AND,  K),			\
1222	INSN_3(ALU, OR,   K),			\
1223	INSN_3(ALU, LSH,  K),			\
1224	INSN_3(ALU, RSH,  K),			\
1225	INSN_3(ALU, XOR,  K),			\
1226	INSN_3(ALU, MUL,  K),			\
1227	INSN_3(ALU, MOV,  K),			\
1228	INSN_3(ALU, ARSH, K),			\
1229	INSN_3(ALU, DIV,  K),			\
1230	INSN_3(ALU, MOD,  K),			\
1231	/* 64 bit ALU operations. */		\
1232	/*   Register based. */			\
1233	INSN_3(ALU64, ADD,  X),			\
1234	INSN_3(ALU64, SUB,  X),			\
1235	INSN_3(ALU64, AND,  X),			\
1236	INSN_3(ALU64, OR,   X),			\
1237	INSN_3(ALU64, LSH,  X),			\
1238	INSN_3(ALU64, RSH,  X),			\
1239	INSN_3(ALU64, XOR,  X),			\
1240	INSN_3(ALU64, MUL,  X),			\
1241	INSN_3(ALU64, MOV,  X),			\
1242	INSN_3(ALU64, ARSH, X),			\
1243	INSN_3(ALU64, DIV,  X),			\
1244	INSN_3(ALU64, MOD,  X),			\
1245	INSN_2(ALU64, NEG),			\
1246	/*   Immediate based. */		\
1247	INSN_3(ALU64, ADD,  K),			\
1248	INSN_3(ALU64, SUB,  K),			\
1249	INSN_3(ALU64, AND,  K),			\
1250	INSN_3(ALU64, OR,   K),			\
1251	INSN_3(ALU64, LSH,  K),			\
1252	INSN_3(ALU64, RSH,  K),			\
1253	INSN_3(ALU64, XOR,  K),			\
1254	INSN_3(ALU64, MUL,  K),			\
1255	INSN_3(ALU64, MOV,  K),			\
1256	INSN_3(ALU64, ARSH, K),			\
1257	INSN_3(ALU64, DIV,  K),			\
1258	INSN_3(ALU64, MOD,  K),			\
1259	/* Call instruction. */			\
1260	INSN_2(JMP, CALL),			\
1261	/* Exit instruction. */			\
1262	INSN_2(JMP, EXIT),			\
1263	/* 32-bit Jump instructions. */		\
1264	/*   Register based. */			\
1265	INSN_3(JMP32, JEQ,  X),			\
1266	INSN_3(JMP32, JNE,  X),			\
1267	INSN_3(JMP32, JGT,  X),			\
1268	INSN_3(JMP32, JLT,  X),			\
1269	INSN_3(JMP32, JGE,  X),			\
1270	INSN_3(JMP32, JLE,  X),			\
1271	INSN_3(JMP32, JSGT, X),			\
1272	INSN_3(JMP32, JSLT, X),			\
1273	INSN_3(JMP32, JSGE, X),			\
1274	INSN_3(JMP32, JSLE, X),			\
1275	INSN_3(JMP32, JSET, X),			\
1276	/*   Immediate based. */		\
1277	INSN_3(JMP32, JEQ,  K),			\
1278	INSN_3(JMP32, JNE,  K),			\
1279	INSN_3(JMP32, JGT,  K),			\
1280	INSN_3(JMP32, JLT,  K),			\
1281	INSN_3(JMP32, JGE,  K),			\
1282	INSN_3(JMP32, JLE,  K),			\
1283	INSN_3(JMP32, JSGT, K),			\
1284	INSN_3(JMP32, JSLT, K),			\
1285	INSN_3(JMP32, JSGE, K),			\
1286	INSN_3(JMP32, JSLE, K),			\
1287	INSN_3(JMP32, JSET, K),			\
1288	/* Jump instructions. */		\
1289	/*   Register based. */			\
1290	INSN_3(JMP, JEQ,  X),			\
1291	INSN_3(JMP, JNE,  X),			\
1292	INSN_3(JMP, JGT,  X),			\
1293	INSN_3(JMP, JLT,  X),			\
1294	INSN_3(JMP, JGE,  X),			\
1295	INSN_3(JMP, JLE,  X),			\
1296	INSN_3(JMP, JSGT, X),			\
1297	INSN_3(JMP, JSLT, X),			\
1298	INSN_3(JMP, JSGE, X),			\
1299	INSN_3(JMP, JSLE, X),			\
1300	INSN_3(JMP, JSET, X),			\
1301	/*   Immediate based. */		\
1302	INSN_3(JMP, JEQ,  K),			\
1303	INSN_3(JMP, JNE,  K),			\
1304	INSN_3(JMP, JGT,  K),			\
1305	INSN_3(JMP, JLT,  K),			\
1306	INSN_3(JMP, JGE,  K),			\
1307	INSN_3(JMP, JLE,  K),			\
1308	INSN_3(JMP, JSGT, K),			\
1309	INSN_3(JMP, JSLT, K),			\
1310	INSN_3(JMP, JSGE, K),			\
1311	INSN_3(JMP, JSLE, K),			\
1312	INSN_3(JMP, JSET, K),			\
1313	INSN_2(JMP, JA),			\
1314	/* Store instructions. */		\
1315	/*   Register based. */			\
1316	INSN_3(STX, MEM,  B),			\
1317	INSN_3(STX, MEM,  H),			\
1318	INSN_3(STX, MEM,  W),			\
1319	INSN_3(STX, MEM,  DW),			\
1320	INSN_3(STX, ATOMIC, W),			\
1321	INSN_3(STX, ATOMIC, DW),		\
1322	/*   Immediate based. */		\
1323	INSN_3(ST, MEM, B),			\
1324	INSN_3(ST, MEM, H),			\
1325	INSN_3(ST, MEM, W),			\
1326	INSN_3(ST, MEM, DW),			\
1327	/* Load instructions. */		\
1328	/*   Register based. */			\
1329	INSN_3(LDX, MEM, B),			\
1330	INSN_3(LDX, MEM, H),			\
1331	INSN_3(LDX, MEM, W),			\
1332	INSN_3(LDX, MEM, DW),			\
1333	/*   Immediate based. */		\
1334	INSN_3(LD, IMM, DW)
1335
1336bool bpf_opcode_in_insntable(u8 code)
1337{
1338#define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1339#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1340	static const bool public_insntable[256] = {
1341		[0 ... 255] = false,
1342		/* Now overwrite non-defaults ... */
1343		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1344		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1345		[BPF_LD | BPF_ABS | BPF_B] = true,
1346		[BPF_LD | BPF_ABS | BPF_H] = true,
1347		[BPF_LD | BPF_ABS | BPF_W] = true,
1348		[BPF_LD | BPF_IND | BPF_B] = true,
1349		[BPF_LD | BPF_IND | BPF_H] = true,
1350		[BPF_LD | BPF_IND | BPF_W] = true,
1351	};
1352#undef BPF_INSN_3_TBL
1353#undef BPF_INSN_2_TBL
1354	return public_insntable[code];
1355}
1356
1357#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1358u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1359{
1360	memset(dst, 0, size);
1361	return -EFAULT;
1362}
1363
1364/**
1365 *	___bpf_prog_run - run eBPF program on a given context
1366 *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1367 *	@insn: is the array of eBPF instructions
 
1368 *
1369 * Decode and execute eBPF instructions.
1370 *
1371 * Return: whatever value is in %BPF_R0 at program exit
1372 */
1373static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1374{
1375#define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1376#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1377	static const void * const jumptable[256] __annotate_jump_table = {
1378		[0 ... 255] = &&default_label,
1379		/* Now overwrite non-defaults ... */
1380		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1381		/* Non-UAPI available opcodes. */
1382		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1383		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1384		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1385		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1386		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1387		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1388		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1389	};
1390#undef BPF_INSN_3_LBL
1391#undef BPF_INSN_2_LBL
1392	u32 tail_call_cnt = 0;
1393
1394#define CONT	 ({ insn++; goto select_insn; })
1395#define CONT_JMP ({ insn++; goto select_insn; })
1396
1397select_insn:
1398	goto *jumptable[insn->code];
1399
1400	/* Explicitly mask the register-based shift amounts with 63 or 31
1401	 * to avoid undefined behavior. Normally this won't affect the
1402	 * generated code, for example, in case of native 64 bit archs such
1403	 * as x86-64 or arm64, the compiler is optimizing the AND away for
1404	 * the interpreter. In case of JITs, each of the JIT backends compiles
1405	 * the BPF shift operations to machine instructions which produce
1406	 * implementation-defined results in such a case; the resulting
1407	 * contents of the register may be arbitrary, but program behaviour
1408	 * as a whole remains defined. In other words, in case of JIT backends,
1409	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1410	 */
1411	/* ALU (shifts) */
1412#define SHT(OPCODE, OP)					\
1413	ALU64_##OPCODE##_X:				\
1414		DST = DST OP (SRC & 63);		\
1415		CONT;					\
1416	ALU_##OPCODE##_X:				\
1417		DST = (u32) DST OP ((u32) SRC & 31);	\
1418		CONT;					\
1419	ALU64_##OPCODE##_K:				\
1420		DST = DST OP IMM;			\
1421		CONT;					\
1422	ALU_##OPCODE##_K:				\
1423		DST = (u32) DST OP (u32) IMM;		\
1424		CONT;
1425	/* ALU (rest) */
1426#define ALU(OPCODE, OP)					\
1427	ALU64_##OPCODE##_X:				\
1428		DST = DST OP SRC;			\
1429		CONT;					\
1430	ALU_##OPCODE##_X:				\
1431		DST = (u32) DST OP (u32) SRC;		\
1432		CONT;					\
1433	ALU64_##OPCODE##_K:				\
1434		DST = DST OP IMM;			\
1435		CONT;					\
1436	ALU_##OPCODE##_K:				\
1437		DST = (u32) DST OP (u32) IMM;		\
1438		CONT;
 
1439	ALU(ADD,  +)
1440	ALU(SUB,  -)
1441	ALU(AND,  &)
1442	ALU(OR,   |)
 
 
1443	ALU(XOR,  ^)
1444	ALU(MUL,  *)
1445	SHT(LSH, <<)
1446	SHT(RSH, >>)
1447#undef SHT
1448#undef ALU
1449	ALU_NEG:
1450		DST = (u32) -DST;
1451		CONT;
1452	ALU64_NEG:
1453		DST = -DST;
1454		CONT;
1455	ALU_MOV_X:
1456		DST = (u32) SRC;
1457		CONT;
1458	ALU_MOV_K:
1459		DST = (u32) IMM;
1460		CONT;
1461	ALU64_MOV_X:
1462		DST = SRC;
1463		CONT;
1464	ALU64_MOV_K:
1465		DST = IMM;
1466		CONT;
1467	LD_IMM_DW:
1468		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1469		insn++;
1470		CONT;
1471	ALU_ARSH_X:
1472		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1473		CONT;
1474	ALU_ARSH_K:
1475		DST = (u64) (u32) (((s32) DST) >> IMM);
1476		CONT;
1477	ALU64_ARSH_X:
1478		(*(s64 *) &DST) >>= (SRC & 63);
1479		CONT;
1480	ALU64_ARSH_K:
1481		(*(s64 *) &DST) >>= IMM;
1482		CONT;
1483	ALU64_MOD_X:
1484		div64_u64_rem(DST, SRC, &AX);
1485		DST = AX;
1486		CONT;
1487	ALU_MOD_X:
1488		AX = (u32) DST;
1489		DST = do_div(AX, (u32) SRC);
1490		CONT;
1491	ALU64_MOD_K:
1492		div64_u64_rem(DST, IMM, &AX);
1493		DST = AX;
1494		CONT;
1495	ALU_MOD_K:
1496		AX = (u32) DST;
1497		DST = do_div(AX, (u32) IMM);
1498		CONT;
1499	ALU64_DIV_X:
1500		DST = div64_u64(DST, SRC);
1501		CONT;
1502	ALU_DIV_X:
1503		AX = (u32) DST;
1504		do_div(AX, (u32) SRC);
1505		DST = (u32) AX;
1506		CONT;
1507	ALU64_DIV_K:
1508		DST = div64_u64(DST, IMM);
1509		CONT;
1510	ALU_DIV_K:
1511		AX = (u32) DST;
1512		do_div(AX, (u32) IMM);
1513		DST = (u32) AX;
1514		CONT;
1515	ALU_END_TO_BE:
1516		switch (IMM) {
1517		case 16:
1518			DST = (__force u16) cpu_to_be16(DST);
1519			break;
1520		case 32:
1521			DST = (__force u32) cpu_to_be32(DST);
1522			break;
1523		case 64:
1524			DST = (__force u64) cpu_to_be64(DST);
1525			break;
1526		}
1527		CONT;
1528	ALU_END_TO_LE:
1529		switch (IMM) {
1530		case 16:
1531			DST = (__force u16) cpu_to_le16(DST);
1532			break;
1533		case 32:
1534			DST = (__force u32) cpu_to_le32(DST);
1535			break;
1536		case 64:
1537			DST = (__force u64) cpu_to_le64(DST);
1538			break;
1539		}
1540		CONT;
1541
1542	/* CALL */
1543	JMP_CALL:
1544		/* Function call scratches BPF_R1-BPF_R5 registers,
1545		 * preserves BPF_R6-BPF_R9, and stores return value
1546		 * into BPF_R0.
1547		 */
1548		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1549						       BPF_R4, BPF_R5);
1550		CONT;
1551
1552	JMP_CALL_ARGS:
1553		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1554							    BPF_R3, BPF_R4,
1555							    BPF_R5,
1556							    insn + insn->off + 1);
1557		CONT;
1558
1559	JMP_TAIL_CALL: {
1560		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1561		struct bpf_array *array = container_of(map, struct bpf_array, map);
1562		struct bpf_prog *prog;
1563		u32 index = BPF_R3;
1564
1565		if (unlikely(index >= array->map.max_entries))
1566			goto out;
1567		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1568			goto out;
1569
1570		tail_call_cnt++;
1571
1572		prog = READ_ONCE(array->ptrs[index]);
1573		if (!prog)
1574			goto out;
1575
1576		/* ARG1 at this point is guaranteed to point to CTX from
1577		 * the verifier side due to the fact that the tail call is
1578		 * handled like a helper, that is, bpf_tail_call_proto,
1579		 * where arg1_type is ARG_PTR_TO_CTX.
1580		 */
1581		insn = prog->insnsi;
1582		goto select_insn;
1583out:
1584		CONT;
1585	}
1586	JMP_JA:
1587		insn += insn->off;
1588		CONT;
1589	JMP_EXIT:
1590		return BPF_R0;
1591	/* JMP */
1592#define COND_JMP(SIGN, OPCODE, CMP_OP)				\
1593	JMP_##OPCODE##_X:					\
1594		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
1595			insn += insn->off;			\
1596			CONT_JMP;				\
1597		}						\
1598		CONT;						\
1599	JMP32_##OPCODE##_X:					\
1600		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
1601			insn += insn->off;			\
1602			CONT_JMP;				\
1603		}						\
1604		CONT;						\
1605	JMP_##OPCODE##_K:					\
1606		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
1607			insn += insn->off;			\
1608			CONT_JMP;				\
1609		}						\
1610		CONT;						\
1611	JMP32_##OPCODE##_K:					\
1612		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
1613			insn += insn->off;			\
1614			CONT_JMP;				\
1615		}						\
1616		CONT;
1617	COND_JMP(u, JEQ, ==)
1618	COND_JMP(u, JNE, !=)
1619	COND_JMP(u, JGT, >)
1620	COND_JMP(u, JLT, <)
1621	COND_JMP(u, JGE, >=)
1622	COND_JMP(u, JLE, <=)
1623	COND_JMP(u, JSET, &)
1624	COND_JMP(s, JSGT, >)
1625	COND_JMP(s, JSLT, <)
1626	COND_JMP(s, JSGE, >=)
1627	COND_JMP(s, JSLE, <=)
1628#undef COND_JMP
1629	/* ST, STX and LDX*/
1630	ST_NOSPEC:
1631		/* Speculation barrier for mitigating Speculative Store Bypass.
1632		 * In case of arm64, we rely on the firmware mitigation as
1633		 * controlled via the ssbd kernel parameter. Whenever the
1634		 * mitigation is enabled, it works for all of the kernel code
1635		 * with no need to provide any additional instructions here.
1636		 * In case of x86, we use 'lfence' insn for mitigation. We
1637		 * reuse preexisting logic from Spectre v1 mitigation that
1638		 * happens to produce the required code on x86 for v4 as well.
1639		 */
1640#ifdef CONFIG_X86
1641		barrier_nospec();
1642#endif
1643		CONT;
1644#define LDST(SIZEOP, SIZE)						\
1645	STX_MEM_##SIZEOP:						\
1646		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
1647		CONT;							\
1648	ST_MEM_##SIZEOP:						\
1649		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
1650		CONT;							\
1651	LDX_MEM_##SIZEOP:						\
1652		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
1653		CONT;
1654
1655	LDST(B,   u8)
1656	LDST(H,  u16)
1657	LDST(W,  u32)
1658	LDST(DW, u64)
1659#undef LDST
1660#define LDX_PROBE(SIZEOP, SIZE)							\
1661	LDX_PROBE_MEM_##SIZEOP:							\
1662		bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));	\
1663		CONT;
1664	LDX_PROBE(B,  1)
1665	LDX_PROBE(H,  2)
1666	LDX_PROBE(W,  4)
1667	LDX_PROBE(DW, 8)
1668#undef LDX_PROBE
1669
1670#define ATOMIC_ALU_OP(BOP, KOP)						\
1671		case BOP:						\
1672			if (BPF_SIZE(insn->code) == BPF_W)		\
1673				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1674					     (DST + insn->off));	\
1675			else						\
1676				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1677					       (DST + insn->off));	\
1678			break;						\
1679		case BOP | BPF_FETCH:					\
1680			if (BPF_SIZE(insn->code) == BPF_W)		\
1681				SRC = (u32) atomic_fetch_##KOP(		\
1682					(u32) SRC,			\
1683					(atomic_t *)(unsigned long) (DST + insn->off)); \
1684			else						\
1685				SRC = (u64) atomic64_fetch_##KOP(	\
1686					(u64) SRC,			\
1687					(atomic64_t *)(unsigned long) (DST + insn->off)); \
1688			break;
1689
1690	STX_ATOMIC_DW:
1691	STX_ATOMIC_W:
1692		switch (IMM) {
1693		ATOMIC_ALU_OP(BPF_ADD, add)
1694		ATOMIC_ALU_OP(BPF_AND, and)
1695		ATOMIC_ALU_OP(BPF_OR, or)
1696		ATOMIC_ALU_OP(BPF_XOR, xor)
1697#undef ATOMIC_ALU_OP
1698
1699		case BPF_XCHG:
1700			if (BPF_SIZE(insn->code) == BPF_W)
1701				SRC = (u32) atomic_xchg(
1702					(atomic_t *)(unsigned long) (DST + insn->off),
1703					(u32) SRC);
1704			else
1705				SRC = (u64) atomic64_xchg(
1706					(atomic64_t *)(unsigned long) (DST + insn->off),
1707					(u64) SRC);
1708			break;
1709		case BPF_CMPXCHG:
1710			if (BPF_SIZE(insn->code) == BPF_W)
1711				BPF_R0 = (u32) atomic_cmpxchg(
1712					(atomic_t *)(unsigned long) (DST + insn->off),
1713					(u32) BPF_R0, (u32) SRC);
1714			else
1715				BPF_R0 = (u64) atomic64_cmpxchg(
1716					(atomic64_t *)(unsigned long) (DST + insn->off),
1717					(u64) BPF_R0, (u64) SRC);
1718			break;
1719
1720		default:
1721			goto default_label;
1722		}
1723		CONT;
1724
1725	default_label:
1726		/* If we ever reach this, we have a bug somewhere. Die hard here
1727		 * instead of just returning 0; we could be somewhere in a subprog,
1728		 * so execution could continue otherwise which we do /not/ want.
1729		 *
1730		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
1731		 */
1732		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
1733			insn->code, insn->imm);
1734		BUG_ON(1);
1735		return 0;
1736}
1737
1738#define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1739#define DEFINE_BPF_PROG_RUN(stack_size) \
1740static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1741{ \
1742	u64 stack[stack_size / sizeof(u64)]; \
1743	u64 regs[MAX_BPF_EXT_REG]; \
1744\
1745	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1746	ARG1 = (u64) (unsigned long) ctx; \
1747	return ___bpf_prog_run(regs, insn); \
1748}
1749
1750#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1751#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1752static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1753				      const struct bpf_insn *insn) \
1754{ \
1755	u64 stack[stack_size / sizeof(u64)]; \
1756	u64 regs[MAX_BPF_EXT_REG]; \
1757\
1758	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1759	BPF_R1 = r1; \
1760	BPF_R2 = r2; \
1761	BPF_R3 = r3; \
1762	BPF_R4 = r4; \
1763	BPF_R5 = r5; \
1764	return ___bpf_prog_run(regs, insn); \
1765}
1766
1767#define EVAL1(FN, X) FN(X)
1768#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1769#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1770#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1771#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1772#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1773
1774EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1775EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1776EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1777
1778EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1779EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1780EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1781
1782#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1783
1784static unsigned int (*interpreters[])(const void *ctx,
1785				      const struct bpf_insn *insn) = {
1786EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1787EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1788EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1789};
1790#undef PROG_NAME_LIST
1791#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1792static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1793				  const struct bpf_insn *insn) = {
1794EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1795EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1796EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1797};
1798#undef PROG_NAME_LIST
1799
1800void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1801{
1802	stack_depth = max_t(u32, stack_depth, 1);
1803	insn->off = (s16) insn->imm;
1804	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1805		__bpf_call_base_args;
1806	insn->code = BPF_JMP | BPF_CALL_ARGS;
1807}
1808
1809#else
1810static unsigned int __bpf_prog_ret0_warn(const void *ctx,
1811					 const struct bpf_insn *insn)
1812{
1813	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
1814	 * is not working properly, so warn about it!
1815	 */
1816	WARN_ON_ONCE(1);
1817	return 0;
1818}
1819#endif
1820
1821bool bpf_prog_array_compatible(struct bpf_array *array,
1822			       const struct bpf_prog *fp)
1823{
1824	if (fp->kprobe_override)
1825		return false;
1826
1827	if (!array->aux->type) {
1828		/* There's no owner yet where we could check for
1829		 * compatibility.
1830		 */
1831		array->aux->type  = fp->type;
1832		array->aux->jited = fp->jited;
 
1833		return true;
1834	}
1835
1836	return array->aux->type  == fp->type &&
1837	       array->aux->jited == fp->jited;
1838}
1839
1840static int bpf_check_tail_call(const struct bpf_prog *fp)
1841{
1842	struct bpf_prog_aux *aux = fp->aux;
1843	int i, ret = 0;
1844
1845	mutex_lock(&aux->used_maps_mutex);
1846	for (i = 0; i < aux->used_map_cnt; i++) {
1847		struct bpf_map *map = aux->used_maps[i];
1848		struct bpf_array *array;
1849
1850		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1851			continue;
1852
1853		array = container_of(map, struct bpf_array, map);
1854		if (!bpf_prog_array_compatible(array, fp)) {
1855			ret = -EINVAL;
1856			goto out;
1857		}
1858	}
1859
1860out:
1861	mutex_unlock(&aux->used_maps_mutex);
1862	return ret;
1863}
1864
1865static void bpf_prog_select_func(struct bpf_prog *fp)
1866{
1867#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1868	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1869
1870	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1871#else
1872	fp->bpf_func = __bpf_prog_ret0_warn;
1873#endif
1874}
1875
1876/**
1877 *	bpf_prog_select_runtime - select exec runtime for BPF program
1878 *	@fp: bpf_prog populated with internal BPF program
1879 *	@err: pointer to error variable
1880 *
1881 * Try to JIT eBPF program, if JIT is not available, use interpreter.
1882 * The BPF program will be executed via BPF_PROG_RUN() macro.
1883 *
1884 * Return: the &fp argument along with &err set to 0 for success or
1885 * a negative errno code on failure
1886 */
1887struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1888{
1889	/* In case of BPF to BPF calls, verifier did all the prep
1890	 * work with regards to JITing, etc.
1891	 */
1892	bool jit_needed = false;
1893
1894	if (fp->bpf_func)
1895		goto finalize;
1896
1897	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
1898	    bpf_prog_has_kfunc_call(fp))
1899		jit_needed = true;
1900
1901	bpf_prog_select_func(fp);
1902
1903	/* eBPF JITs can rewrite the program in case constant
1904	 * blinding is active. However, in case of error during
1905	 * blinding, bpf_int_jit_compile() must always return a
1906	 * valid program, which in this case would simply not
1907	 * be JITed, but falls back to the interpreter.
1908	 */
1909	if (!bpf_prog_is_dev_bound(fp->aux)) {
1910		*err = bpf_prog_alloc_jited_linfo(fp);
1911		if (*err)
1912			return fp;
1913
1914		fp = bpf_int_jit_compile(fp);
1915		bpf_prog_jit_attempt_done(fp);
1916		if (!fp->jited && jit_needed) {
 
1917			*err = -ENOTSUPP;
1918			return fp;
 
 
 
1919		}
1920	} else {
1921		*err = bpf_prog_offload_compile(fp);
1922		if (*err)
1923			return fp;
1924	}
1925
1926finalize:
1927	bpf_prog_lock_ro(fp);
1928
1929	/* The tail call compatibility check can only be done at
1930	 * this late stage as we need to determine, if we deal
1931	 * with JITed or non JITed program concatenations and not
1932	 * all eBPF JITs might immediately support all features.
1933	 */
1934	*err = bpf_check_tail_call(fp);
1935
1936	return fp;
1937}
1938EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1939
1940static unsigned int __bpf_prog_ret1(const void *ctx,
1941				    const struct bpf_insn *insn)
1942{
1943	return 1;
1944}
1945
1946static struct bpf_prog_dummy {
1947	struct bpf_prog prog;
1948} dummy_bpf_prog = {
1949	.prog = {
1950		.bpf_func = __bpf_prog_ret1,
1951	},
1952};
1953
1954/* to avoid allocating empty bpf_prog_array for cgroups that
1955 * don't have bpf program attached use one global 'empty_prog_array'
1956 * It will not be modified the caller of bpf_prog_array_alloc()
1957 * (since caller requested prog_cnt == 0)
1958 * that pointer should be 'freed' by bpf_prog_array_free()
1959 */
1960static struct {
1961	struct bpf_prog_array hdr;
1962	struct bpf_prog *null_prog;
1963} empty_prog_array = {
1964	.null_prog = NULL,
1965};
1966
1967struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
1968{
1969	if (prog_cnt)
1970		return kzalloc(sizeof(struct bpf_prog_array) +
1971			       sizeof(struct bpf_prog_array_item) *
1972			       (prog_cnt + 1),
1973			       flags);
1974
1975	return &empty_prog_array.hdr;
1976}
1977
1978void bpf_prog_array_free(struct bpf_prog_array *progs)
1979{
1980	if (!progs || progs == &empty_prog_array.hdr)
1981		return;
1982	kfree_rcu(progs, rcu);
1983}
1984
1985int bpf_prog_array_length(struct bpf_prog_array *array)
1986{
1987	struct bpf_prog_array_item *item;
1988	u32 cnt = 0;
1989
1990	for (item = array->items; item->prog; item++)
1991		if (item->prog != &dummy_bpf_prog.prog)
1992			cnt++;
1993	return cnt;
1994}
1995
1996bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
1997{
1998	struct bpf_prog_array_item *item;
1999
2000	for (item = array->items; item->prog; item++)
2001		if (item->prog != &dummy_bpf_prog.prog)
2002			return false;
2003	return true;
2004}
2005
2006static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2007				     u32 *prog_ids,
2008				     u32 request_cnt)
2009{
2010	struct bpf_prog_array_item *item;
2011	int i = 0;
2012
2013	for (item = array->items; item->prog; item++) {
2014		if (item->prog == &dummy_bpf_prog.prog)
2015			continue;
2016		prog_ids[i] = item->prog->aux->id;
2017		if (++i == request_cnt) {
2018			item++;
2019			break;
2020		}
2021	}
2022
2023	return !!(item->prog);
2024}
2025
2026int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2027				__u32 __user *prog_ids, u32 cnt)
2028{
2029	unsigned long err = 0;
2030	bool nospc;
2031	u32 *ids;
2032
2033	/* users of this function are doing:
2034	 * cnt = bpf_prog_array_length();
2035	 * if (cnt > 0)
2036	 *     bpf_prog_array_copy_to_user(..., cnt);
2037	 * so below kcalloc doesn't need extra cnt > 0 check.
2038	 */
2039	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2040	if (!ids)
2041		return -ENOMEM;
2042	nospc = bpf_prog_array_copy_core(array, ids, cnt);
2043	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2044	kfree(ids);
2045	if (err)
2046		return -EFAULT;
2047	if (nospc)
2048		return -ENOSPC;
2049	return 0;
2050}
2051
2052void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2053				struct bpf_prog *old_prog)
2054{
2055	struct bpf_prog_array_item *item;
2056
2057	for (item = array->items; item->prog; item++)
2058		if (item->prog == old_prog) {
2059			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2060			break;
2061		}
2062}
2063
2064/**
2065 * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2066 *                                   index into the program array with
2067 *                                   a dummy no-op program.
2068 * @array: a bpf_prog_array
2069 * @index: the index of the program to replace
2070 *
2071 * Skips over dummy programs, by not counting them, when calculating
2072 * the position of the program to replace.
2073 *
2074 * Return:
2075 * * 0		- Success
2076 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2077 * * -ENOENT	- Index out of range
2078 */
2079int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2080{
2081	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2082}
2083
2084/**
2085 * bpf_prog_array_update_at() - Updates the program at the given index
2086 *                              into the program array.
2087 * @array: a bpf_prog_array
2088 * @index: the index of the program to update
2089 * @prog: the program to insert into the array
2090 *
2091 * Skips over dummy programs, by not counting them, when calculating
2092 * the position of the program to update.
2093 *
2094 * Return:
2095 * * 0		- Success
2096 * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2097 * * -ENOENT	- Index out of range
2098 */
2099int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2100			     struct bpf_prog *prog)
2101{
2102	struct bpf_prog_array_item *item;
2103
2104	if (unlikely(index < 0))
2105		return -EINVAL;
2106
2107	for (item = array->items; item->prog; item++) {
2108		if (item->prog == &dummy_bpf_prog.prog)
2109			continue;
2110		if (!index) {
2111			WRITE_ONCE(item->prog, prog);
2112			return 0;
2113		}
2114		index--;
2115	}
2116	return -ENOENT;
2117}
2118
2119int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2120			struct bpf_prog *exclude_prog,
2121			struct bpf_prog *include_prog,
2122			struct bpf_prog_array **new_array)
2123{
2124	int new_prog_cnt, carry_prog_cnt = 0;
2125	struct bpf_prog_array_item *existing;
2126	struct bpf_prog_array *array;
2127	bool found_exclude = false;
2128	int new_prog_idx = 0;
2129
2130	/* Figure out how many existing progs we need to carry over to
2131	 * the new array.
2132	 */
2133	if (old_array) {
2134		existing = old_array->items;
2135		for (; existing->prog; existing++) {
2136			if (existing->prog == exclude_prog) {
2137				found_exclude = true;
2138				continue;
2139			}
2140			if (existing->prog != &dummy_bpf_prog.prog)
2141				carry_prog_cnt++;
2142			if (existing->prog == include_prog)
2143				return -EEXIST;
2144		}
2145	}
2146
2147	if (exclude_prog && !found_exclude)
2148		return -ENOENT;
2149
2150	/* How many progs (not NULL) will be in the new array? */
2151	new_prog_cnt = carry_prog_cnt;
2152	if (include_prog)
2153		new_prog_cnt += 1;
2154
2155	/* Do we have any prog (not NULL) in the new array? */
2156	if (!new_prog_cnt) {
2157		*new_array = NULL;
2158		return 0;
2159	}
2160
2161	/* +1 as the end of prog_array is marked with NULL */
2162	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2163	if (!array)
2164		return -ENOMEM;
2165
2166	/* Fill in the new prog array */
2167	if (carry_prog_cnt) {
2168		existing = old_array->items;
2169		for (; existing->prog; existing++)
2170			if (existing->prog != exclude_prog &&
2171			    existing->prog != &dummy_bpf_prog.prog) {
2172				array->items[new_prog_idx++].prog =
2173					existing->prog;
2174			}
2175	}
2176	if (include_prog)
2177		array->items[new_prog_idx++].prog = include_prog;
2178	array->items[new_prog_idx].prog = NULL;
2179	*new_array = array;
2180	return 0;
2181}
2182
2183int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2184			     u32 *prog_ids, u32 request_cnt,
2185			     u32 *prog_cnt)
2186{
2187	u32 cnt = 0;
2188
2189	if (array)
2190		cnt = bpf_prog_array_length(array);
2191
2192	*prog_cnt = cnt;
2193
2194	/* return early if user requested only program count or nothing to copy */
2195	if (!request_cnt || !cnt)
2196		return 0;
2197
2198	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2199	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2200								     : 0;
2201}
2202
2203void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2204			  struct bpf_map **used_maps, u32 len)
2205{
2206	struct bpf_map *map;
2207	u32 i;
2208
2209	for (i = 0; i < len; i++) {
2210		map = used_maps[i];
2211		if (map->ops->map_poke_untrack)
2212			map->ops->map_poke_untrack(map, aux);
2213		bpf_map_put(map);
2214	}
2215}
2216
2217static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2218{
2219	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2220	kfree(aux->used_maps);
2221}
2222
2223void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2224			  struct btf_mod_pair *used_btfs, u32 len)
2225{
2226#ifdef CONFIG_BPF_SYSCALL
2227	struct btf_mod_pair *btf_mod;
2228	u32 i;
2229
2230	for (i = 0; i < len; i++) {
2231		btf_mod = &used_btfs[i];
2232		if (btf_mod->module)
2233			module_put(btf_mod->module);
2234		btf_put(btf_mod->btf);
2235	}
2236#endif
2237}
2238
2239static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2240{
2241	__bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2242	kfree(aux->used_btfs);
2243}
2244
2245static void bpf_prog_free_deferred(struct work_struct *work)
2246{
2247	struct bpf_prog_aux *aux;
2248	int i;
2249
2250	aux = container_of(work, struct bpf_prog_aux, work);
2251	bpf_free_used_maps(aux);
2252	bpf_free_used_btfs(aux);
2253	if (bpf_prog_is_dev_bound(aux))
2254		bpf_prog_offload_destroy(aux->prog);
2255#ifdef CONFIG_PERF_EVENTS
2256	if (aux->prog->has_callchain_buf)
2257		put_callchain_buffers();
2258#endif
2259	if (aux->dst_trampoline)
2260		bpf_trampoline_put(aux->dst_trampoline);
2261	for (i = 0; i < aux->func_cnt; i++) {
2262		/* We can just unlink the subprog poke descriptor table as
2263		 * it was originally linked to the main program and is also
2264		 * released along with it.
2265		 */
2266		aux->func[i]->aux->poke_tab = NULL;
2267		bpf_jit_free(aux->func[i]);
2268	}
2269	if (aux->func_cnt) {
2270		kfree(aux->func);
2271		bpf_prog_unlock_free(aux->prog);
2272	} else {
2273		bpf_jit_free(aux->prog);
2274	}
2275}
2276
2277/* Free internal BPF program */
2278void bpf_prog_free(struct bpf_prog *fp)
2279{
2280	struct bpf_prog_aux *aux = fp->aux;
2281
2282	if (aux->dst_prog)
2283		bpf_prog_put(aux->dst_prog);
2284	INIT_WORK(&aux->work, bpf_prog_free_deferred);
2285	schedule_work(&aux->work);
2286}
2287EXPORT_SYMBOL_GPL(bpf_prog_free);
2288
2289/* RNG for unpriviledged user space with separated state from prandom_u32(). */
2290static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2291
2292void bpf_user_rnd_init_once(void)
2293{
2294	prandom_init_once(&bpf_user_rnd_state);
2295}
2296
2297BPF_CALL_0(bpf_user_rnd_u32)
2298{
2299	/* Should someone ever have the rather unwise idea to use some
2300	 * of the registers passed into this function, then note that
2301	 * this function is called from native eBPF and classic-to-eBPF
2302	 * transformations. Register assignments from both sides are
2303	 * different, f.e. classic always sets fn(ctx, A, X) here.
2304	 */
2305	struct rnd_state *state;
2306	u32 res;
2307
2308	state = &get_cpu_var(bpf_user_rnd_state);
2309	res = prandom_u32_state(state);
2310	put_cpu_var(bpf_user_rnd_state);
2311
2312	return res;
2313}
2314
2315BPF_CALL_0(bpf_get_raw_cpu_id)
2316{
2317	return raw_smp_processor_id();
2318}
2319
2320/* Weak definitions of helper functions in case we don't have bpf syscall. */
2321const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2322const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2323const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2324const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2325const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2326const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2327const struct bpf_func_proto bpf_spin_lock_proto __weak;
2328const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2329const struct bpf_func_proto bpf_jiffies64_proto __weak;
2330
2331const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2332const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2333const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2334const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2335const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2336const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2337
2338const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2339const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2340const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2341const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2342const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2343const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2344const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2345const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2346const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2347
2348const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2349{
2350	return NULL;
2351}
2352
2353u64 __weak
2354bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2355		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2356{
2357	return -ENOTSUPP;
2358}
2359EXPORT_SYMBOL_GPL(bpf_event_output);
2360
2361/* Always built-in helper functions. */
2362const struct bpf_func_proto bpf_tail_call_proto = {
2363	.func		= NULL,
2364	.gpl_only	= false,
2365	.ret_type	= RET_VOID,
2366	.arg1_type	= ARG_PTR_TO_CTX,
2367	.arg2_type	= ARG_CONST_MAP_PTR,
2368	.arg3_type	= ARG_ANYTHING,
2369};
2370
2371/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2372 * It is encouraged to implement bpf_int_jit_compile() instead, so that
2373 * eBPF and implicitly also cBPF can get JITed!
2374 */
2375struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2376{
2377	return prog;
2378}
2379
2380/* Stub for JITs that support eBPF. All cBPF code gets transformed into
2381 * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2382 */
2383void __weak bpf_jit_compile(struct bpf_prog *prog)
2384{
2385}
2386
2387bool __weak bpf_helper_changes_pkt_data(void *func)
2388{
2389	return false;
2390}
2391
2392/* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2393 * analysis code and wants explicit zero extension inserted by verifier.
2394 * Otherwise, return FALSE.
2395 *
2396 * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2397 * you don't override this. JITs that don't want these extra insns can detect
2398 * them using insn_is_zext.
2399 */
2400bool __weak bpf_jit_needs_zext(void)
2401{
2402	return false;
2403}
2404
2405bool __weak bpf_jit_supports_kfunc_call(void)
2406{
2407	return false;
2408}
2409
2410/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2411 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2412 */
2413int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2414			 int len)
2415{
2416	return -EFAULT;
2417}
2418
2419int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2420			      void *addr1, void *addr2)
2421{
2422	return -ENOTSUPP;
2423}
2424
2425DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2426EXPORT_SYMBOL(bpf_stats_enabled_key);
2427
2428/* All definitions of tracepoints related to BPF. */
2429#define CREATE_TRACE_POINTS
2430#include <linux/bpf_trace.h>
2431
2432EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2433EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);