Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 * Copyright (c) 2016 Facebook
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of version 2 of the GNU General Public
   6 * License as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful, but
   9 * WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11 * General Public License for more details.
  12 */
  13#include <linux/kernel.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/bpf.h>
  17#include <linux/bpf_verifier.h>
  18#include <linux/filter.h>
  19#include <net/netlink.h>
  20#include <linux/file.h>
  21#include <linux/vmalloc.h>
  22#include <linux/stringify.h>
  23#include <linux/bsearch.h>
  24#include <linux/sort.h>
  25
  26#include "disasm.h"
  27
  28static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
  29#define BPF_PROG_TYPE(_id, _name) \
  30	[_id] = & _name ## _verifier_ops,
  31#define BPF_MAP_TYPE(_id, _ops)
  32#include <linux/bpf_types.h>
  33#undef BPF_PROG_TYPE
  34#undef BPF_MAP_TYPE
  35};
  36
  37/* bpf_check() is a static code analyzer that walks eBPF program
  38 * instruction by instruction and updates register/stack state.
  39 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
  40 *
  41 * The first pass is depth-first-search to check that the program is a DAG.
  42 * It rejects the following programs:
  43 * - larger than BPF_MAXINSNS insns
  44 * - if loop is present (detected via back-edge)
  45 * - unreachable insns exist (shouldn't be a forest. program = one function)
  46 * - out of bounds or malformed jumps
  47 * The second pass is all possible path descent from the 1st insn.
  48 * Since it's analyzing all pathes through the program, the length of the
  49 * analysis is limited to 64k insn, which may be hit even if total number of
  50 * insn is less then 4K, but there are too many branches that change stack/regs.
  51 * Number of 'branches to be analyzed' is limited to 1k
  52 *
  53 * On entry to each instruction, each register has a type, and the instruction
  54 * changes the types of the registers depending on instruction semantics.
  55 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
  56 * copied to R1.
  57 *
  58 * All registers are 64-bit.
  59 * R0 - return register
  60 * R1-R5 argument passing registers
  61 * R6-R9 callee saved registers
  62 * R10 - frame pointer read-only
  63 *
  64 * At the start of BPF program the register R1 contains a pointer to bpf_context
  65 * and has type PTR_TO_CTX.
  66 *
  67 * Verifier tracks arithmetic operations on pointers in case:
  68 *    BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
  69 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
  70 * 1st insn copies R10 (which has FRAME_PTR) type into R1
  71 * and 2nd arithmetic instruction is pattern matched to recognize
  72 * that it wants to construct a pointer to some element within stack.
  73 * So after 2nd insn, the register R1 has type PTR_TO_STACK
  74 * (and -20 constant is saved for further stack bounds checking).
  75 * Meaning that this reg is a pointer to stack plus known immediate constant.
  76 *
  77 * Most of the time the registers have SCALAR_VALUE type, which
  78 * means the register has some value, but it's not a valid pointer.
  79 * (like pointer plus pointer becomes SCALAR_VALUE type)
  80 *
  81 * When verifier sees load or store instructions the type of base register
  82 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer
  83 * types recognized by check_mem_access() function.
  84 *
  85 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
  86 * and the range of [ptr, ptr + map's value_size) is accessible.
  87 *
  88 * registers used to pass values to function calls are checked against
  89 * function argument constraints.
  90 *
  91 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
  92 * It means that the register type passed to this function must be
  93 * PTR_TO_STACK and it will be used inside the function as
  94 * 'pointer to map element key'
  95 *
  96 * For example the argument constraints for bpf_map_lookup_elem():
  97 *   .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
  98 *   .arg1_type = ARG_CONST_MAP_PTR,
  99 *   .arg2_type = ARG_PTR_TO_MAP_KEY,
 100 *
 101 * ret_type says that this function returns 'pointer to map elem value or null'
 102 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
 103 * 2nd argument should be a pointer to stack, which will be used inside
 104 * the helper function as a pointer to map element key.
 105 *
 106 * On the kernel side the helper function looks like:
 107 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 108 * {
 109 *    struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
 110 *    void *key = (void *) (unsigned long) r2;
 111 *    void *value;
 112 *
 113 *    here kernel can access 'key' and 'map' pointers safely, knowing that
 114 *    [key, key + map->key_size) bytes are valid and were initialized on
 115 *    the stack of eBPF program.
 116 * }
 117 *
 118 * Corresponding eBPF program may look like:
 119 *    BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),  // after this insn R2 type is FRAME_PTR
 120 *    BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
 121 *    BPF_LD_MAP_FD(BPF_REG_1, map_fd),      // after this insn R1 type is CONST_PTR_TO_MAP
 122 *    BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
 123 * here verifier looks at prototype of map_lookup_elem() and sees:
 124 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
 125 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
 126 *
 127 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
 128 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
 129 * and were initialized prior to this call.
 130 * If it's ok, then verifier allows this BPF_CALL insn and looks at
 131 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
 132 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
 133 * returns ether pointer to map value or NULL.
 134 *
 135 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
 136 * insn, the register holding that pointer in the true branch changes state to
 137 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
 138 * branch. See check_cond_jmp_op().
 139 *
 140 * After the call R0 is set to return type of the function and registers R1-R5
 141 * are set to NOT_INIT to indicate that they are no longer readable.
 142 */
 143
 144/* verifier_state + insn_idx are pushed to stack when branch is encountered */
 145struct bpf_verifier_stack_elem {
 146	/* verifer state is 'st'
 147	 * before processing instruction 'insn_idx'
 148	 * and after processing instruction 'prev_insn_idx'
 149	 */
 150	struct bpf_verifier_state st;
 151	int insn_idx;
 152	int prev_insn_idx;
 153	struct bpf_verifier_stack_elem *next;
 154};
 155
 156#define BPF_COMPLEXITY_LIMIT_INSNS	131072
 157#define BPF_COMPLEXITY_LIMIT_STACK	1024
 158
 159#define BPF_MAP_PTR_UNPRIV	1UL
 160#define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
 161					  POISON_POINTER_DELTA))
 162#define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
 163
 164static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
 165{
 166	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
 167}
 168
 169static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
 170{
 171	return aux->map_state & BPF_MAP_PTR_UNPRIV;
 172}
 173
 174static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
 175			      const struct bpf_map *map, bool unpriv)
 176{
 177	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
 178	unpriv |= bpf_map_ptr_unpriv(aux);
 179	aux->map_state = (unsigned long)map |
 180			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
 181}
 182
 183struct bpf_call_arg_meta {
 184	struct bpf_map *map_ptr;
 185	bool raw_mode;
 186	bool pkt_access;
 187	int regno;
 188	int access_size;
 189};
 190
 191static DEFINE_MUTEX(bpf_verifier_lock);
 192
 193void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
 194		       va_list args)
 195{
 196	unsigned int n;
 197
 198	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
 199
 200	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
 201		  "verifier log line truncated - local buffer too short\n");
 202
 203	n = min(log->len_total - log->len_used - 1, n);
 204	log->kbuf[n] = '\0';
 205
 206	if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
 207		log->len_used += n;
 208	else
 209		log->ubuf = NULL;
 210}
 211
 212/* log_level controls verbosity level of eBPF verifier.
 213 * bpf_verifier_log_write() is used to dump the verification trace to the log,
 214 * so the user can figure out what's wrong with the program
 215 */
 216__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
 217					   const char *fmt, ...)
 218{
 219	va_list args;
 220
 221	if (!bpf_verifier_log_needed(&env->log))
 222		return;
 223
 224	va_start(args, fmt);
 225	bpf_verifier_vlog(&env->log, fmt, args);
 226	va_end(args);
 227}
 228EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
 229
 230__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
 231{
 232	struct bpf_verifier_env *env = private_data;
 233	va_list args;
 234
 235	if (!bpf_verifier_log_needed(&env->log))
 236		return;
 237
 238	va_start(args, fmt);
 239	bpf_verifier_vlog(&env->log, fmt, args);
 240	va_end(args);
 241}
 242
 243static bool type_is_pkt_pointer(enum bpf_reg_type type)
 244{
 245	return type == PTR_TO_PACKET ||
 246	       type == PTR_TO_PACKET_META;
 247}
 248
 249/* string representation of 'enum bpf_reg_type' */
 250static const char * const reg_type_str[] = {
 251	[NOT_INIT]		= "?",
 252	[SCALAR_VALUE]		= "inv",
 253	[PTR_TO_CTX]		= "ctx",
 254	[CONST_PTR_TO_MAP]	= "map_ptr",
 255	[PTR_TO_MAP_VALUE]	= "map_value",
 256	[PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
 257	[PTR_TO_STACK]		= "fp",
 258	[PTR_TO_PACKET]		= "pkt",
 259	[PTR_TO_PACKET_META]	= "pkt_meta",
 260	[PTR_TO_PACKET_END]	= "pkt_end",
 261};
 262
 263static void print_liveness(struct bpf_verifier_env *env,
 264			   enum bpf_reg_liveness live)
 265{
 266	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
 267	    verbose(env, "_");
 268	if (live & REG_LIVE_READ)
 269		verbose(env, "r");
 270	if (live & REG_LIVE_WRITTEN)
 271		verbose(env, "w");
 272}
 273
 274static struct bpf_func_state *func(struct bpf_verifier_env *env,
 275				   const struct bpf_reg_state *reg)
 276{
 277	struct bpf_verifier_state *cur = env->cur_state;
 278
 279	return cur->frame[reg->frameno];
 280}
 281
 282static void print_verifier_state(struct bpf_verifier_env *env,
 283				 const struct bpf_func_state *state)
 284{
 285	const struct bpf_reg_state *reg;
 286	enum bpf_reg_type t;
 287	int i;
 288
 289	if (state->frameno)
 290		verbose(env, " frame%d:", state->frameno);
 291	for (i = 0; i < MAX_BPF_REG; i++) {
 292		reg = &state->regs[i];
 293		t = reg->type;
 294		if (t == NOT_INIT)
 295			continue;
 296		verbose(env, " R%d", i);
 297		print_liveness(env, reg->live);
 298		verbose(env, "=%s", reg_type_str[t]);
 299		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
 300		    tnum_is_const(reg->var_off)) {
 301			/* reg->off should be 0 for SCALAR_VALUE */
 302			verbose(env, "%lld", reg->var_off.value + reg->off);
 303			if (t == PTR_TO_STACK)
 304				verbose(env, ",call_%d", func(env, reg)->callsite);
 305		} else {
 306			verbose(env, "(id=%d", reg->id);
 307			if (t != SCALAR_VALUE)
 308				verbose(env, ",off=%d", reg->off);
 309			if (type_is_pkt_pointer(t))
 310				verbose(env, ",r=%d", reg->range);
 311			else if (t == CONST_PTR_TO_MAP ||
 312				 t == PTR_TO_MAP_VALUE ||
 313				 t == PTR_TO_MAP_VALUE_OR_NULL)
 314				verbose(env, ",ks=%d,vs=%d",
 315					reg->map_ptr->key_size,
 316					reg->map_ptr->value_size);
 317			if (tnum_is_const(reg->var_off)) {
 318				/* Typically an immediate SCALAR_VALUE, but
 319				 * could be a pointer whose offset is too big
 320				 * for reg->off
 321				 */
 322				verbose(env, ",imm=%llx", reg->var_off.value);
 323			} else {
 324				if (reg->smin_value != reg->umin_value &&
 325				    reg->smin_value != S64_MIN)
 326					verbose(env, ",smin_value=%lld",
 327						(long long)reg->smin_value);
 328				if (reg->smax_value != reg->umax_value &&
 329				    reg->smax_value != S64_MAX)
 330					verbose(env, ",smax_value=%lld",
 331						(long long)reg->smax_value);
 332				if (reg->umin_value != 0)
 333					verbose(env, ",umin_value=%llu",
 334						(unsigned long long)reg->umin_value);
 335				if (reg->umax_value != U64_MAX)
 336					verbose(env, ",umax_value=%llu",
 337						(unsigned long long)reg->umax_value);
 338				if (!tnum_is_unknown(reg->var_off)) {
 339					char tn_buf[48];
 340
 341					tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
 342					verbose(env, ",var_off=%s", tn_buf);
 343				}
 344			}
 345			verbose(env, ")");
 346		}
 347	}
 348	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
 349		if (state->stack[i].slot_type[0] == STACK_SPILL) {
 350			verbose(env, " fp%d",
 351				(-i - 1) * BPF_REG_SIZE);
 352			print_liveness(env, state->stack[i].spilled_ptr.live);
 353			verbose(env, "=%s",
 354				reg_type_str[state->stack[i].spilled_ptr.type]);
 355		}
 356		if (state->stack[i].slot_type[0] == STACK_ZERO)
 357			verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
 358	}
 359	verbose(env, "\n");
 360}
 361
 362static int copy_stack_state(struct bpf_func_state *dst,
 363			    const struct bpf_func_state *src)
 364{
 365	if (!src->stack)
 366		return 0;
 367	if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) {
 368		/* internal bug, make state invalid to reject the program */
 369		memset(dst, 0, sizeof(*dst));
 370		return -EFAULT;
 371	}
 372	memcpy(dst->stack, src->stack,
 373	       sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE));
 374	return 0;
 375}
 376
 377/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
 378 * make it consume minimal amount of memory. check_stack_write() access from
 379 * the program calls into realloc_func_state() to grow the stack size.
 380 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
 381 * which this function copies over. It points to previous bpf_verifier_state
 382 * which is never reallocated
 383 */
 384static int realloc_func_state(struct bpf_func_state *state, int size,
 385			      bool copy_old)
 386{
 387	u32 old_size = state->allocated_stack;
 388	struct bpf_stack_state *new_stack;
 389	int slot = size / BPF_REG_SIZE;
 390
 391	if (size <= old_size || !size) {
 392		if (copy_old)
 393			return 0;
 394		state->allocated_stack = slot * BPF_REG_SIZE;
 395		if (!size && old_size) {
 396			kfree(state->stack);
 397			state->stack = NULL;
 398		}
 399		return 0;
 400	}
 401	new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state),
 402				  GFP_KERNEL);
 403	if (!new_stack)
 404		return -ENOMEM;
 405	if (copy_old) {
 406		if (state->stack)
 407			memcpy(new_stack, state->stack,
 408			       sizeof(*new_stack) * (old_size / BPF_REG_SIZE));
 409		memset(new_stack + old_size / BPF_REG_SIZE, 0,
 410		       sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE);
 411	}
 412	state->allocated_stack = slot * BPF_REG_SIZE;
 413	kfree(state->stack);
 414	state->stack = new_stack;
 415	return 0;
 416}
 417
 418static void free_func_state(struct bpf_func_state *state)
 419{
 420	if (!state)
 421		return;
 422	kfree(state->stack);
 423	kfree(state);
 424}
 425
 426static void free_verifier_state(struct bpf_verifier_state *state,
 427				bool free_self)
 428{
 429	int i;
 430
 431	for (i = 0; i <= state->curframe; i++) {
 432		free_func_state(state->frame[i]);
 433		state->frame[i] = NULL;
 434	}
 435	if (free_self)
 436		kfree(state);
 437}
 438
 439/* copy verifier state from src to dst growing dst stack space
 440 * when necessary to accommodate larger src stack
 441 */
 442static int copy_func_state(struct bpf_func_state *dst,
 443			   const struct bpf_func_state *src)
 444{
 445	int err;
 446
 447	err = realloc_func_state(dst, src->allocated_stack, false);
 448	if (err)
 449		return err;
 450	memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
 451	return copy_stack_state(dst, src);
 452}
 453
 454static int copy_verifier_state(struct bpf_verifier_state *dst_state,
 455			       const struct bpf_verifier_state *src)
 456{
 457	struct bpf_func_state *dst;
 458	int i, err;
 459
 460	/* if dst has more stack frames then src frame, free them */
 461	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
 462		free_func_state(dst_state->frame[i]);
 463		dst_state->frame[i] = NULL;
 464	}
 465	dst_state->curframe = src->curframe;
 466	dst_state->parent = src->parent;
 467	for (i = 0; i <= src->curframe; i++) {
 468		dst = dst_state->frame[i];
 469		if (!dst) {
 470			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 471			if (!dst)
 472				return -ENOMEM;
 473			dst_state->frame[i] = dst;
 474		}
 475		err = copy_func_state(dst, src->frame[i]);
 476		if (err)
 477			return err;
 478	}
 479	return 0;
 480}
 481
 482static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
 483		     int *insn_idx)
 484{
 485	struct bpf_verifier_state *cur = env->cur_state;
 486	struct bpf_verifier_stack_elem *elem, *head = env->head;
 487	int err;
 488
 489	if (env->head == NULL)
 490		return -ENOENT;
 491
 492	if (cur) {
 493		err = copy_verifier_state(cur, &head->st);
 494		if (err)
 495			return err;
 496	}
 497	if (insn_idx)
 498		*insn_idx = head->insn_idx;
 499	if (prev_insn_idx)
 500		*prev_insn_idx = head->prev_insn_idx;
 501	elem = head->next;
 502	free_verifier_state(&head->st, false);
 503	kfree(head);
 504	env->head = elem;
 505	env->stack_size--;
 506	return 0;
 507}
 508
 509static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 510					     int insn_idx, int prev_insn_idx)
 511{
 512	struct bpf_verifier_state *cur = env->cur_state;
 513	struct bpf_verifier_stack_elem *elem;
 514	int err;
 515
 516	elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
 517	if (!elem)
 518		goto err;
 519
 520	elem->insn_idx = insn_idx;
 521	elem->prev_insn_idx = prev_insn_idx;
 522	elem->next = env->head;
 523	env->head = elem;
 524	env->stack_size++;
 525	err = copy_verifier_state(&elem->st, cur);
 526	if (err)
 527		goto err;
 528	if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
 529		verbose(env, "BPF program is too complex\n");
 530		goto err;
 531	}
 532	return &elem->st;
 533err:
 534	free_verifier_state(env->cur_state, true);
 535	env->cur_state = NULL;
 536	/* pop all elements and return */
 537	while (!pop_stack(env, NULL, NULL));
 538	return NULL;
 539}
 540
 541#define CALLER_SAVED_REGS 6
 542static const int caller_saved[CALLER_SAVED_REGS] = {
 543	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 544};
 545
 546static void __mark_reg_not_init(struct bpf_reg_state *reg);
 547
 548/* Mark the unknown part of a register (variable offset or scalar value) as
 549 * known to have the value @imm.
 550 */
 551static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
 552{
 553	reg->id = 0;
 554	reg->var_off = tnum_const(imm);
 555	reg->smin_value = (s64)imm;
 556	reg->smax_value = (s64)imm;
 557	reg->umin_value = imm;
 558	reg->umax_value = imm;
 559}
 560
 561/* Mark the 'variable offset' part of a register as zero.  This should be
 562 * used only on registers holding a pointer type.
 563 */
 564static void __mark_reg_known_zero(struct bpf_reg_state *reg)
 565{
 566	__mark_reg_known(reg, 0);
 567}
 568
 569static void __mark_reg_const_zero(struct bpf_reg_state *reg)
 570{
 571	__mark_reg_known(reg, 0);
 572	reg->off = 0;
 573	reg->type = SCALAR_VALUE;
 574}
 575
 576static void mark_reg_known_zero(struct bpf_verifier_env *env,
 577				struct bpf_reg_state *regs, u32 regno)
 578{
 579	if (WARN_ON(regno >= MAX_BPF_REG)) {
 580		verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
 581		/* Something bad happened, let's kill all regs */
 582		for (regno = 0; regno < MAX_BPF_REG; regno++)
 583			__mark_reg_not_init(regs + regno);
 584		return;
 585	}
 586	__mark_reg_known_zero(regs + regno);
 587}
 588
 589static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
 590{
 591	return type_is_pkt_pointer(reg->type);
 592}
 593
 594static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
 595{
 596	return reg_is_pkt_pointer(reg) ||
 597	       reg->type == PTR_TO_PACKET_END;
 598}
 599
 600/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
 601static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
 602				    enum bpf_reg_type which)
 603{
 604	/* The register can already have a range from prior markings.
 605	 * This is fine as long as it hasn't been advanced from its
 606	 * origin.
 607	 */
 608	return reg->type == which &&
 609	       reg->id == 0 &&
 610	       reg->off == 0 &&
 611	       tnum_equals_const(reg->var_off, 0);
 612}
 613
 614/* Attempts to improve min/max values based on var_off information */
 615static void __update_reg_bounds(struct bpf_reg_state *reg)
 616{
 617	/* min signed is max(sign bit) | min(other bits) */
 618	reg->smin_value = max_t(s64, reg->smin_value,
 619				reg->var_off.value | (reg->var_off.mask & S64_MIN));
 620	/* max signed is min(sign bit) | max(other bits) */
 621	reg->smax_value = min_t(s64, reg->smax_value,
 622				reg->var_off.value | (reg->var_off.mask & S64_MAX));
 623	reg->umin_value = max(reg->umin_value, reg->var_off.value);
 624	reg->umax_value = min(reg->umax_value,
 625			      reg->var_off.value | reg->var_off.mask);
 626}
 627
 628/* Uses signed min/max values to inform unsigned, and vice-versa */
 629static void __reg_deduce_bounds(struct bpf_reg_state *reg)
 630{
 631	/* Learn sign from signed bounds.
 632	 * If we cannot cross the sign boundary, then signed and unsigned bounds
 633	 * are the same, so combine.  This works even in the negative case, e.g.
 634	 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
 635	 */
 636	if (reg->smin_value >= 0 || reg->smax_value < 0) {
 637		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
 638							  reg->umin_value);
 639		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
 640							  reg->umax_value);
 641		return;
 642	}
 643	/* Learn sign from unsigned bounds.  Signed bounds cross the sign
 644	 * boundary, so we must be careful.
 645	 */
 646	if ((s64)reg->umax_value >= 0) {
 647		/* Positive.  We can't learn anything from the smin, but smax
 648		 * is positive, hence safe.
 649		 */
 650		reg->smin_value = reg->umin_value;
 651		reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
 652							  reg->umax_value);
 653	} else if ((s64)reg->umin_value < 0) {
 654		/* Negative.  We can't learn anything from the smax, but smin
 655		 * is negative, hence safe.
 656		 */
 657		reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
 658							  reg->umin_value);
 659		reg->smax_value = reg->umax_value;
 660	}
 661}
 662
 663/* Attempts to improve var_off based on unsigned min/max information */
 664static void __reg_bound_offset(struct bpf_reg_state *reg)
 665{
 666	reg->var_off = tnum_intersect(reg->var_off,
 667				      tnum_range(reg->umin_value,
 668						 reg->umax_value));
 669}
 670
 671/* Reset the min/max bounds of a register */
 672static void __mark_reg_unbounded(struct bpf_reg_state *reg)
 673{
 674	reg->smin_value = S64_MIN;
 675	reg->smax_value = S64_MAX;
 676	reg->umin_value = 0;
 677	reg->umax_value = U64_MAX;
 678}
 679
 680/* Mark a register as having a completely unknown (scalar) value. */
 681static void __mark_reg_unknown(struct bpf_reg_state *reg)
 682{
 683	reg->type = SCALAR_VALUE;
 684	reg->id = 0;
 685	reg->off = 0;
 686	reg->var_off = tnum_unknown;
 687	reg->frameno = 0;
 688	__mark_reg_unbounded(reg);
 689}
 690
 691static void mark_reg_unknown(struct bpf_verifier_env *env,
 692			     struct bpf_reg_state *regs, u32 regno)
 693{
 694	if (WARN_ON(regno >= MAX_BPF_REG)) {
 695		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
 696		/* Something bad happened, let's kill all regs except FP */
 697		for (regno = 0; regno < BPF_REG_FP; regno++)
 698			__mark_reg_not_init(regs + regno);
 699		return;
 700	}
 701	__mark_reg_unknown(regs + regno);
 702}
 703
 704static void __mark_reg_not_init(struct bpf_reg_state *reg)
 705{
 706	__mark_reg_unknown(reg);
 707	reg->type = NOT_INIT;
 708}
 709
 710static void mark_reg_not_init(struct bpf_verifier_env *env,
 711			      struct bpf_reg_state *regs, u32 regno)
 712{
 713	if (WARN_ON(regno >= MAX_BPF_REG)) {
 714		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
 715		/* Something bad happened, let's kill all regs except FP */
 716		for (regno = 0; regno < BPF_REG_FP; regno++)
 717			__mark_reg_not_init(regs + regno);
 718		return;
 719	}
 720	__mark_reg_not_init(regs + regno);
 721}
 722
 723static void init_reg_state(struct bpf_verifier_env *env,
 724			   struct bpf_func_state *state)
 725{
 726	struct bpf_reg_state *regs = state->regs;
 727	int i;
 728
 729	for (i = 0; i < MAX_BPF_REG; i++) {
 730		mark_reg_not_init(env, regs, i);
 731		regs[i].live = REG_LIVE_NONE;
 732	}
 733
 734	/* frame pointer */
 735	regs[BPF_REG_FP].type = PTR_TO_STACK;
 736	mark_reg_known_zero(env, regs, BPF_REG_FP);
 737	regs[BPF_REG_FP].frameno = state->frameno;
 738
 739	/* 1st arg to a function */
 740	regs[BPF_REG_1].type = PTR_TO_CTX;
 741	mark_reg_known_zero(env, regs, BPF_REG_1);
 742}
 743
 744#define BPF_MAIN_FUNC (-1)
 745static void init_func_state(struct bpf_verifier_env *env,
 746			    struct bpf_func_state *state,
 747			    int callsite, int frameno, int subprogno)
 748{
 749	state->callsite = callsite;
 750	state->frameno = frameno;
 751	state->subprogno = subprogno;
 752	init_reg_state(env, state);
 753}
 754
 755enum reg_arg_type {
 756	SRC_OP,		/* register is used as source operand */
 757	DST_OP,		/* register is used as destination operand */
 758	DST_OP_NO_MARK	/* same as above, check only, don't mark */
 759};
 760
 761static int cmp_subprogs(const void *a, const void *b)
 762{
 763	return *(int *)a - *(int *)b;
 764}
 765
 766static int find_subprog(struct bpf_verifier_env *env, int off)
 767{
 768	u32 *p;
 769
 770	p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
 771		    sizeof(env->subprog_starts[0]), cmp_subprogs);
 772	if (!p)
 773		return -ENOENT;
 774	return p - env->subprog_starts;
 775
 776}
 777
 778static int add_subprog(struct bpf_verifier_env *env, int off)
 779{
 780	int insn_cnt = env->prog->len;
 781	int ret;
 782
 783	if (off >= insn_cnt || off < 0) {
 784		verbose(env, "call to invalid destination\n");
 785		return -EINVAL;
 786	}
 787	ret = find_subprog(env, off);
 788	if (ret >= 0)
 789		return 0;
 790	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
 791		verbose(env, "too many subprograms\n");
 792		return -E2BIG;
 793	}
 794	env->subprog_starts[env->subprog_cnt++] = off;
 795	sort(env->subprog_starts, env->subprog_cnt,
 796	     sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
 797	return 0;
 798}
 799
 800static int check_subprogs(struct bpf_verifier_env *env)
 801{
 802	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
 803	struct bpf_insn *insn = env->prog->insnsi;
 804	int insn_cnt = env->prog->len;
 805
 806	/* determine subprog starts. The end is one before the next starts */
 807	for (i = 0; i < insn_cnt; i++) {
 808		if (insn[i].code != (BPF_JMP | BPF_CALL))
 809			continue;
 810		if (insn[i].src_reg != BPF_PSEUDO_CALL)
 811			continue;
 812		if (!env->allow_ptr_leaks) {
 813			verbose(env, "function calls to other bpf functions are allowed for root only\n");
 814			return -EPERM;
 815		}
 816		if (bpf_prog_is_dev_bound(env->prog->aux)) {
 817			verbose(env, "function calls in offloaded programs are not supported yet\n");
 818			return -EINVAL;
 819		}
 820		ret = add_subprog(env, i + insn[i].imm + 1);
 821		if (ret < 0)
 822			return ret;
 823	}
 824
 825	if (env->log.level > 1)
 826		for (i = 0; i < env->subprog_cnt; i++)
 827			verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
 828
 829	/* now check that all jumps are within the same subprog */
 830	subprog_start = 0;
 831	if (env->subprog_cnt == cur_subprog)
 832		subprog_end = insn_cnt;
 833	else
 834		subprog_end = env->subprog_starts[cur_subprog++];
 835	for (i = 0; i < insn_cnt; i++) {
 836		u8 code = insn[i].code;
 837
 838		if (BPF_CLASS(code) != BPF_JMP)
 839			goto next;
 840		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
 841			goto next;
 842		off = i + insn[i].off + 1;
 843		if (off < subprog_start || off >= subprog_end) {
 844			verbose(env, "jump out of range from insn %d to %d\n", i, off);
 845			return -EINVAL;
 846		}
 847next:
 848		if (i == subprog_end - 1) {
 849			/* to avoid fall-through from one subprog into another
 850			 * the last insn of the subprog should be either exit
 851			 * or unconditional jump back
 852			 */
 853			if (code != (BPF_JMP | BPF_EXIT) &&
 854			    code != (BPF_JMP | BPF_JA)) {
 855				verbose(env, "last insn is not an exit or jmp\n");
 856				return -EINVAL;
 857			}
 858			subprog_start = subprog_end;
 859			if (env->subprog_cnt == cur_subprog)
 860				subprog_end = insn_cnt;
 861			else
 862				subprog_end = env->subprog_starts[cur_subprog++];
 863		}
 864	}
 865	return 0;
 866}
 867
 868static
 869struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
 870				       const struct bpf_verifier_state *state,
 871				       struct bpf_verifier_state *parent,
 872				       u32 regno)
 873{
 874	struct bpf_verifier_state *tmp = NULL;
 875
 876	/* 'parent' could be a state of caller and
 877	 * 'state' could be a state of callee. In such case
 878	 * parent->curframe < state->curframe
 879	 * and it's ok for r1 - r5 registers
 880	 *
 881	 * 'parent' could be a callee's state after it bpf_exit-ed.
 882	 * In such case parent->curframe > state->curframe
 883	 * and it's ok for r0 only
 884	 */
 885	if (parent->curframe == state->curframe ||
 886	    (parent->curframe < state->curframe &&
 887	     regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
 888	    (parent->curframe > state->curframe &&
 889	       regno == BPF_REG_0))
 890		return parent;
 891
 892	if (parent->curframe > state->curframe &&
 893	    regno >= BPF_REG_6) {
 894		/* for callee saved regs we have to skip the whole chain
 895		 * of states that belong to callee and mark as LIVE_READ
 896		 * the registers before the call
 897		 */
 898		tmp = parent;
 899		while (tmp && tmp->curframe != state->curframe) {
 900			tmp = tmp->parent;
 901		}
 902		if (!tmp)
 903			goto bug;
 904		parent = tmp;
 905	} else {
 906		goto bug;
 907	}
 908	return parent;
 909bug:
 910	verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
 911	verbose(env, "regno %d parent frame %d current frame %d\n",
 912		regno, parent->curframe, state->curframe);
 913	return NULL;
 914}
 915
 916static int mark_reg_read(struct bpf_verifier_env *env,
 917			 const struct bpf_verifier_state *state,
 918			 struct bpf_verifier_state *parent,
 919			 u32 regno)
 920{
 921	bool writes = parent == state->parent; /* Observe write marks */
 922
 923	if (regno == BPF_REG_FP)
 924		/* We don't need to worry about FP liveness because it's read-only */
 925		return 0;
 926
 927	while (parent) {
 928		/* if read wasn't screened by an earlier write ... */
 929		if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
 930			break;
 931		parent = skip_callee(env, state, parent, regno);
 932		if (!parent)
 933			return -EFAULT;
 934		/* ... then we depend on parent's value */
 935		parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
 936		state = parent;
 937		parent = state->parent;
 938		writes = true;
 939	}
 940	return 0;
 941}
 942
 943static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
 944			 enum reg_arg_type t)
 945{
 946	struct bpf_verifier_state *vstate = env->cur_state;
 947	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 948	struct bpf_reg_state *regs = state->regs;
 949
 950	if (regno >= MAX_BPF_REG) {
 951		verbose(env, "R%d is invalid\n", regno);
 952		return -EINVAL;
 953	}
 954
 955	if (t == SRC_OP) {
 956		/* check whether register used as source operand can be read */
 957		if (regs[regno].type == NOT_INIT) {
 958			verbose(env, "R%d !read_ok\n", regno);
 959			return -EACCES;
 960		}
 961		return mark_reg_read(env, vstate, vstate->parent, regno);
 962	} else {
 963		/* check whether register used as dest operand can be written to */
 964		if (regno == BPF_REG_FP) {
 965			verbose(env, "frame pointer is read only\n");
 966			return -EACCES;
 967		}
 968		regs[regno].live |= REG_LIVE_WRITTEN;
 969		if (t == DST_OP)
 970			mark_reg_unknown(env, regs, regno);
 971	}
 972	return 0;
 973}
 974
 975static bool is_spillable_regtype(enum bpf_reg_type type)
 976{
 977	switch (type) {
 978	case PTR_TO_MAP_VALUE:
 979	case PTR_TO_MAP_VALUE_OR_NULL:
 980	case PTR_TO_STACK:
 981	case PTR_TO_CTX:
 982	case PTR_TO_PACKET:
 983	case PTR_TO_PACKET_META:
 984	case PTR_TO_PACKET_END:
 985	case CONST_PTR_TO_MAP:
 986		return true;
 987	default:
 988		return false;
 989	}
 990}
 991
 992/* Does this register contain a constant zero? */
 993static bool register_is_null(struct bpf_reg_state *reg)
 994{
 995	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
 996}
 997
 998/* check_stack_read/write functions track spill/fill of registers,
 999 * stack boundary and alignment are checked in check_mem_access()
1000 */
1001static int check_stack_write(struct bpf_verifier_env *env,
1002			     struct bpf_func_state *state, /* func where register points to */
1003			     int off, int size, int value_regno, int insn_idx)
1004{
1005	struct bpf_func_state *cur; /* state of the current function */
1006	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
1007	enum bpf_reg_type type;
1008
1009	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1010				 true);
1011	if (err)
1012		return err;
1013	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1014	 * so it's aligned access and [off, off + size) are within stack limits
1015	 */
1016	if (!env->allow_ptr_leaks &&
1017	    state->stack[spi].slot_type[0] == STACK_SPILL &&
1018	    size != BPF_REG_SIZE) {
1019		verbose(env, "attempt to corrupt spilled pointer on stack\n");
1020		return -EACCES;
1021	}
1022
1023	cur = env->cur_state->frame[env->cur_state->curframe];
1024	if (value_regno >= 0 &&
1025	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
1026
1027		/* register containing pointer is being spilled into stack */
1028		if (size != BPF_REG_SIZE) {
1029			verbose(env, "invalid size of register spill\n");
1030			return -EACCES;
1031		}
1032
1033		if (state != cur && type == PTR_TO_STACK) {
1034			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
1035			return -EINVAL;
1036		}
1037
1038		/* save register state */
1039		state->stack[spi].spilled_ptr = cur->regs[value_regno];
1040		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1041
1042		for (i = 0; i < BPF_REG_SIZE; i++) {
1043			if (state->stack[spi].slot_type[i] == STACK_MISC &&
1044			    !env->allow_ptr_leaks) {
1045				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1046				int soff = (-spi - 1) * BPF_REG_SIZE;
1047
1048				/* detected reuse of integer stack slot with a pointer
1049				 * which means either llvm is reusing stack slot or
1050				 * an attacker is trying to exploit CVE-2018-3639
1051				 * (speculative store bypass)
1052				 * Have to sanitize that slot with preemptive
1053				 * store of zero.
1054				 */
1055				if (*poff && *poff != soff) {
1056					/* disallow programs where single insn stores
1057					 * into two different stack slots, since verifier
1058					 * cannot sanitize them
1059					 */
1060					verbose(env,
1061						"insn %d cannot access two stack slots fp%d and fp%d",
1062						insn_idx, *poff, soff);
1063					return -EINVAL;
1064				}
1065				*poff = soff;
1066			}
1067			state->stack[spi].slot_type[i] = STACK_SPILL;
1068		}
1069	} else {
1070		u8 type = STACK_MISC;
1071
1072		/* regular write of data into stack */
1073		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
1074
1075		/* only mark the slot as written if all 8 bytes were written
1076		 * otherwise read propagation may incorrectly stop too soon
1077		 * when stack slots are partially written.
1078		 * This heuristic means that read propagation will be
1079		 * conservative, since it will add reg_live_read marks
1080		 * to stack slots all the way to first state when programs
1081		 * writes+reads less than 8 bytes
1082		 */
1083		if (size == BPF_REG_SIZE)
1084			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1085
1086		/* when we zero initialize stack slots mark them as such */
1087		if (value_regno >= 0 &&
1088		    register_is_null(&cur->regs[value_regno]))
1089			type = STACK_ZERO;
1090
1091		for (i = 0; i < size; i++)
1092			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
1093				type;
1094	}
1095	return 0;
1096}
1097
1098/* registers of every function are unique and mark_reg_read() propagates
1099 * the liveness in the following cases:
1100 * - from callee into caller for R1 - R5 that were used as arguments
1101 * - from caller into callee for R0 that used as result of the call
1102 * - from caller to the same caller skipping states of the callee for R6 - R9,
1103 *   since R6 - R9 are callee saved by implicit function prologue and
1104 *   caller's R6 != callee's R6, so when we propagate liveness up to
1105 *   parent states we need to skip callee states for R6 - R9.
1106 *
1107 * stack slot marking is different, since stacks of caller and callee are
1108 * accessible in both (since caller can pass a pointer to caller's stack to
1109 * callee which can pass it to another function), hence mark_stack_slot_read()
1110 * has to propagate the stack liveness to all parent states at given frame number.
1111 * Consider code:
1112 * f1() {
1113 *   ptr = fp - 8;
1114 *   *ptr = ctx;
1115 *   call f2 {
1116 *      .. = *ptr;
1117 *   }
1118 *   .. = *ptr;
1119 * }
1120 * First *ptr is reading from f1's stack and mark_stack_slot_read() has
1121 * to mark liveness at the f1's frame and not f2's frame.
1122 * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
1123 * to propagate liveness to f2 states at f1's frame level and further into
1124 * f1 states at f1's frame level until write into that stack slot
1125 */
1126static void mark_stack_slot_read(struct bpf_verifier_env *env,
1127				 const struct bpf_verifier_state *state,
1128				 struct bpf_verifier_state *parent,
1129				 int slot, int frameno)
1130{
1131	bool writes = parent == state->parent; /* Observe write marks */
1132
1133	while (parent) {
1134		if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
1135			/* since LIVE_WRITTEN mark is only done for full 8-byte
1136			 * write the read marks are conservative and parent
1137			 * state may not even have the stack allocated. In such case
1138			 * end the propagation, since the loop reached beginning
1139			 * of the function
1140			 */
1141			break;
1142		/* if read wasn't screened by an earlier write ... */
1143		if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
1144			break;
1145		/* ... then we depend on parent's value */
1146		parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
1147		state = parent;
1148		parent = state->parent;
1149		writes = true;
1150	}
1151}
1152
1153static int check_stack_read(struct bpf_verifier_env *env,
1154			    struct bpf_func_state *reg_state /* func where register points to */,
1155			    int off, int size, int value_regno)
1156{
1157	struct bpf_verifier_state *vstate = env->cur_state;
1158	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1159	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
1160	u8 *stype;
1161
1162	if (reg_state->allocated_stack <= slot) {
1163		verbose(env, "invalid read from stack off %d+0 size %d\n",
1164			off, size);
1165		return -EACCES;
1166	}
1167	stype = reg_state->stack[spi].slot_type;
1168
1169	if (stype[0] == STACK_SPILL) {
1170		if (size != BPF_REG_SIZE) {
1171			verbose(env, "invalid size of register spill\n");
1172			return -EACCES;
1173		}
1174		for (i = 1; i < BPF_REG_SIZE; i++) {
1175			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
1176				verbose(env, "corrupted spill memory\n");
1177				return -EACCES;
1178			}
1179		}
1180
1181		if (value_regno >= 0) {
1182			/* restore register state from stack */
1183			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
1184			/* mark reg as written since spilled pointer state likely
1185			 * has its liveness marks cleared by is_state_visited()
1186			 * which resets stack/reg liveness for state transitions
1187			 */
1188			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1189		}
1190		mark_stack_slot_read(env, vstate, vstate->parent, spi,
1191				     reg_state->frameno);
1192		return 0;
1193	} else {
1194		int zeros = 0;
1195
1196		for (i = 0; i < size; i++) {
1197			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
1198				continue;
1199			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
1200				zeros++;
1201				continue;
1202			}
1203			verbose(env, "invalid read from stack off %d+%d size %d\n",
1204				off, i, size);
1205			return -EACCES;
1206		}
1207		mark_stack_slot_read(env, vstate, vstate->parent, spi,
1208				     reg_state->frameno);
1209		if (value_regno >= 0) {
1210			if (zeros == size) {
1211				/* any size read into register is zero extended,
1212				 * so the whole register == const_zero
1213				 */
1214				__mark_reg_const_zero(&state->regs[value_regno]);
1215			} else {
1216				/* have read misc data from the stack */
1217				mark_reg_unknown(env, state->regs, value_regno);
1218			}
1219			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
1220		}
1221		return 0;
1222	}
1223}
1224
1225/* check read/write into map element returned by bpf_map_lookup_elem() */
1226static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
1227			      int size, bool zero_size_allowed)
1228{
1229	struct bpf_reg_state *regs = cur_regs(env);
1230	struct bpf_map *map = regs[regno].map_ptr;
1231
1232	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1233	    off + size > map->value_size) {
1234		verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
1235			map->value_size, off, size);
1236		return -EACCES;
1237	}
1238	return 0;
1239}
1240
1241/* check read/write into a map element with possible variable offset */
1242static int check_map_access(struct bpf_verifier_env *env, u32 regno,
1243			    int off, int size, bool zero_size_allowed)
1244{
1245	struct bpf_verifier_state *vstate = env->cur_state;
1246	struct bpf_func_state *state = vstate->frame[vstate->curframe];
1247	struct bpf_reg_state *reg = &state->regs[regno];
1248	int err;
1249
1250	/* We may have adjusted the register to this map value, so we
1251	 * need to try adding each of min_value and max_value to off
1252	 * to make sure our theoretical access will be safe.
1253	 */
1254	if (env->log.level)
1255		print_verifier_state(env, state);
1256	/* The minimum value is only important with signed
1257	 * comparisons where we can't assume the floor of a
1258	 * value is 0.  If we are using signed variables for our
1259	 * index'es we need to make sure that whatever we use
1260	 * will have a set floor within our range.
1261	 */
1262	if (reg->smin_value < 0) {
1263		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1264			regno);
1265		return -EACCES;
1266	}
1267	err = __check_map_access(env, regno, reg->smin_value + off, size,
1268				 zero_size_allowed);
1269	if (err) {
1270		verbose(env, "R%d min value is outside of the array range\n",
1271			regno);
1272		return err;
1273	}
1274
1275	/* If we haven't set a max value then we need to bail since we can't be
1276	 * sure we won't do bad things.
1277	 * If reg->umax_value + off could overflow, treat that as unbounded too.
1278	 */
1279	if (reg->umax_value >= BPF_MAX_VAR_OFF) {
1280		verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1281			regno);
1282		return -EACCES;
1283	}
1284	err = __check_map_access(env, regno, reg->umax_value + off, size,
1285				 zero_size_allowed);
1286	if (err)
1287		verbose(env, "R%d max value is outside of the array range\n",
1288			regno);
1289	return err;
1290}
1291
1292#define MAX_PACKET_OFF 0xffff
1293
1294static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1295				       const struct bpf_call_arg_meta *meta,
1296				       enum bpf_access_type t)
1297{
1298	switch (env->prog->type) {
1299	case BPF_PROG_TYPE_LWT_IN:
1300	case BPF_PROG_TYPE_LWT_OUT:
1301		/* dst_input() and dst_output() can't write for now */
1302		if (t == BPF_WRITE)
1303			return false;
1304		/* fallthrough */
1305	case BPF_PROG_TYPE_SCHED_CLS:
1306	case BPF_PROG_TYPE_SCHED_ACT:
1307	case BPF_PROG_TYPE_XDP:
1308	case BPF_PROG_TYPE_LWT_XMIT:
1309	case BPF_PROG_TYPE_SK_SKB:
1310	case BPF_PROG_TYPE_SK_MSG:
1311		if (meta)
1312			return meta->pkt_access;
1313
1314		env->seen_direct_write = true;
1315		return true;
1316	default:
1317		return false;
1318	}
1319}
1320
1321static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
1322				 int off, int size, bool zero_size_allowed)
1323{
1324	struct bpf_reg_state *regs = cur_regs(env);
1325	struct bpf_reg_state *reg = &regs[regno];
1326
1327	if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
1328	    (u64)off + size > reg->range) {
1329		verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1330			off, size, regno, reg->id, reg->off, reg->range);
1331		return -EACCES;
1332	}
1333	return 0;
1334}
1335
1336static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
1337			       int size, bool zero_size_allowed)
1338{
1339	struct bpf_reg_state *regs = cur_regs(env);
1340	struct bpf_reg_state *reg = &regs[regno];
1341	int err;
1342
1343	/* We may have added a variable offset to the packet pointer; but any
1344	 * reg->range we have comes after that.  We are only checking the fixed
1345	 * offset.
1346	 */
1347
1348	/* We don't allow negative numbers, because we aren't tracking enough
1349	 * detail to prove they're safe.
1350	 */
1351	if (reg->smin_value < 0) {
1352		verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1353			regno);
1354		return -EACCES;
1355	}
1356	err = __check_packet_access(env, regno, off, size, zero_size_allowed);
1357	if (err) {
1358		verbose(env, "R%d offset is outside of the packet\n", regno);
1359		return err;
1360	}
1361	return err;
1362}
1363
1364/* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
1365static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
1366			    enum bpf_access_type t, enum bpf_reg_type *reg_type)
1367{
1368	struct bpf_insn_access_aux info = {
1369		.reg_type = *reg_type,
1370	};
1371
1372	if (env->ops->is_valid_access &&
1373	    env->ops->is_valid_access(off, size, t, env->prog, &info)) {
1374		/* A non zero info.ctx_field_size indicates that this field is a
1375		 * candidate for later verifier transformation to load the whole
1376		 * field and then apply a mask when accessed with a narrower
1377		 * access than actual ctx access size. A zero info.ctx_field_size
1378		 * will only allow for whole field access and rejects any other
1379		 * type of narrower access.
1380		 */
1381		*reg_type = info.reg_type;
1382
1383		env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
1384		/* remember the offset of last byte accessed in ctx */
1385		if (env->prog->aux->max_ctx_offset < off + size)
1386			env->prog->aux->max_ctx_offset = off + size;
1387		return 0;
1388	}
1389
1390	verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
1391	return -EACCES;
1392}
1393
1394static bool __is_pointer_value(bool allow_ptr_leaks,
1395			       const struct bpf_reg_state *reg)
1396{
1397	if (allow_ptr_leaks)
1398		return false;
1399
1400	return reg->type != SCALAR_VALUE;
1401}
1402
1403static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1404{
1405	return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
1406}
1407
1408static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1409{
1410	const struct bpf_reg_state *reg = cur_regs(env) + regno;
1411
1412	return reg->type == PTR_TO_CTX;
1413}
1414
1415static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
1416{
1417	const struct bpf_reg_state *reg = cur_regs(env) + regno;
1418
1419	return type_is_pkt_pointer(reg->type);
1420}
1421
1422static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
1423				   const struct bpf_reg_state *reg,
1424				   int off, int size, bool strict)
1425{
1426	struct tnum reg_off;
1427	int ip_align;
1428
1429	/* Byte size accesses are always allowed. */
1430	if (!strict || size == 1)
1431		return 0;
1432
1433	/* For platforms that do not have a Kconfig enabling
1434	 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1435	 * NET_IP_ALIGN is universally set to '2'.  And on platforms
1436	 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1437	 * to this code only in strict mode where we want to emulate
1438	 * the NET_IP_ALIGN==2 checking.  Therefore use an
1439	 * unconditional IP align value of '2'.
1440	 */
1441	ip_align = 2;
1442
1443	reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
1444	if (!tnum_is_aligned(reg_off, size)) {
1445		char tn_buf[48];
1446
1447		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1448		verbose(env,
1449			"misaligned packet access off %d+%s+%d+%d size %d\n",
1450			ip_align, tn_buf, reg->off, off, size);
1451		return -EACCES;
1452	}
1453
1454	return 0;
1455}
1456
1457static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
1458				       const struct bpf_reg_state *reg,
1459				       const char *pointer_desc,
1460				       int off, int size, bool strict)
1461{
1462	struct tnum reg_off;
1463
1464	/* Byte size accesses are always allowed. */
1465	if (!strict || size == 1)
1466		return 0;
1467
1468	reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
1469	if (!tnum_is_aligned(reg_off, size)) {
1470		char tn_buf[48];
1471
1472		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1473		verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
1474			pointer_desc, tn_buf, reg->off, off, size);
1475		return -EACCES;
1476	}
1477
1478	return 0;
1479}
1480
1481static int check_ptr_alignment(struct bpf_verifier_env *env,
1482			       const struct bpf_reg_state *reg, int off,
1483			       int size, bool strict_alignment_once)
1484{
1485	bool strict = env->strict_alignment || strict_alignment_once;
1486	const char *pointer_desc = "";
1487
1488	switch (reg->type) {
1489	case PTR_TO_PACKET:
1490	case PTR_TO_PACKET_META:
1491		/* Special case, because of NET_IP_ALIGN. Given metadata sits
1492		 * right in front, treat it the very same way.
1493		 */
1494		return check_pkt_ptr_alignment(env, reg, off, size, strict);
1495	case PTR_TO_MAP_VALUE:
1496		pointer_desc = "value ";
1497		break;
1498	case PTR_TO_CTX:
1499		pointer_desc = "context ";
1500		break;
1501	case PTR_TO_STACK:
1502		pointer_desc = "stack ";
1503		/* The stack spill tracking logic in check_stack_write()
1504		 * and check_stack_read() relies on stack accesses being
1505		 * aligned.
1506		 */
1507		strict = true;
1508		break;
1509	default:
1510		break;
1511	}
1512	return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
1513					   strict);
1514}
1515
1516static int update_stack_depth(struct bpf_verifier_env *env,
1517			      const struct bpf_func_state *func,
1518			      int off)
1519{
1520	u16 stack = env->subprog_stack_depth[func->subprogno];
1521
1522	if (stack >= -off)
1523		return 0;
1524
1525	/* update known max for given subprogram */
1526	env->subprog_stack_depth[func->subprogno] = -off;
1527	return 0;
1528}
1529
1530/* starting from main bpf function walk all instructions of the function
1531 * and recursively walk all callees that given function can call.
1532 * Ignore jump and exit insns.
1533 * Since recursion is prevented by check_cfg() this algorithm
1534 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1535 */
1536static int check_max_stack_depth(struct bpf_verifier_env *env)
1537{
1538	int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
1539	struct bpf_insn *insn = env->prog->insnsi;
1540	int insn_cnt = env->prog->len;
1541	int ret_insn[MAX_CALL_FRAMES];
1542	int ret_prog[MAX_CALL_FRAMES];
1543
1544process_func:
1545	/* round up to 32-bytes, since this is granularity
1546	 * of interpreter stack size
1547	 */
1548	depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
1549	if (depth > MAX_BPF_STACK) {
1550		verbose(env, "combined stack size of %d calls is %d. Too large\n",
1551			frame + 1, depth);
1552		return -EACCES;
1553	}
1554continue_func:
1555	if (env->subprog_cnt == subprog)
1556		subprog_end = insn_cnt;
1557	else
1558		subprog_end = env->subprog_starts[subprog];
1559	for (; i < subprog_end; i++) {
1560		if (insn[i].code != (BPF_JMP | BPF_CALL))
1561			continue;
1562		if (insn[i].src_reg != BPF_PSEUDO_CALL)
1563			continue;
1564		/* remember insn and function to return to */
1565		ret_insn[frame] = i + 1;
1566		ret_prog[frame] = subprog;
1567
1568		/* find the callee */
1569		i = i + insn[i].imm + 1;
1570		subprog = find_subprog(env, i);
1571		if (subprog < 0) {
1572			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1573				  i);
1574			return -EFAULT;
1575		}
1576		subprog++;
1577		frame++;
1578		if (frame >= MAX_CALL_FRAMES) {
1579			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1580			return -EFAULT;
1581		}
1582		goto process_func;
1583	}
1584	/* end of for() loop means the last insn of the 'subprog'
1585	 * was reached. Doesn't matter whether it was JA or EXIT
1586	 */
1587	if (frame == 0)
1588		return 0;
1589	depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
1590	frame--;
1591	i = ret_insn[frame];
1592	subprog = ret_prog[frame];
1593	goto continue_func;
1594}
1595
1596#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1597static int get_callee_stack_depth(struct bpf_verifier_env *env,
1598				  const struct bpf_insn *insn, int idx)
1599{
1600	int start = idx + insn->imm + 1, subprog;
1601
1602	subprog = find_subprog(env, start);
1603	if (subprog < 0) {
1604		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1605			  start);
1606		return -EFAULT;
1607	}
1608	subprog++;
1609	return env->subprog_stack_depth[subprog];
1610}
1611#endif
1612
1613/* truncate register to smaller size (in bytes)
1614 * must be called with size < BPF_REG_SIZE
1615 */
1616static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1617{
1618	u64 mask;
1619
1620	/* clear high bits in bit representation */
1621	reg->var_off = tnum_cast(reg->var_off, size);
1622
1623	/* fix arithmetic bounds */
1624	mask = ((u64)1 << (size * 8)) - 1;
1625	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1626		reg->umin_value &= mask;
1627		reg->umax_value &= mask;
1628	} else {
1629		reg->umin_value = 0;
1630		reg->umax_value = mask;
1631	}
1632	reg->smin_value = reg->umin_value;
1633	reg->smax_value = reg->umax_value;
1634}
1635
1636/* check whether memory at (regno + off) is accessible for t = (read | write)
1637 * if t==write, value_regno is a register which value is stored into memory
1638 * if t==read, value_regno is a register which will receive the value from memory
1639 * if t==write && value_regno==-1, some unknown value is stored into memory
1640 * if t==read && value_regno==-1, don't care what we read from memory
1641 */
1642static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
1643			    int off, int bpf_size, enum bpf_access_type t,
1644			    int value_regno, bool strict_alignment_once)
1645{
1646	struct bpf_reg_state *regs = cur_regs(env);
1647	struct bpf_reg_state *reg = regs + regno;
1648	struct bpf_func_state *state;
1649	int size, err = 0;
1650
1651	size = bpf_size_to_bytes(bpf_size);
1652	if (size < 0)
1653		return size;
1654
1655	/* alignment checks will add in reg->off themselves */
1656	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
1657	if (err)
1658		return err;
1659
1660	/* for access checks, reg->off is just part of off */
1661	off += reg->off;
1662
1663	if (reg->type == PTR_TO_MAP_VALUE) {
1664		if (t == BPF_WRITE && value_regno >= 0 &&
1665		    is_pointer_value(env, value_regno)) {
1666			verbose(env, "R%d leaks addr into map\n", value_regno);
1667			return -EACCES;
1668		}
1669
1670		err = check_map_access(env, regno, off, size, false);
1671		if (!err && t == BPF_READ && value_regno >= 0)
1672			mark_reg_unknown(env, regs, value_regno);
1673
1674	} else if (reg->type == PTR_TO_CTX) {
1675		enum bpf_reg_type reg_type = SCALAR_VALUE;
1676
1677		if (t == BPF_WRITE && value_regno >= 0 &&
1678		    is_pointer_value(env, value_regno)) {
1679			verbose(env, "R%d leaks addr into ctx\n", value_regno);
1680			return -EACCES;
1681		}
1682		/* ctx accesses must be at a fixed offset, so that we can
1683		 * determine what type of data were returned.
1684		 */
1685		if (reg->off) {
1686			verbose(env,
1687				"dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1688				regno, reg->off, off - reg->off);
1689			return -EACCES;
1690		}
1691		if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1692			char tn_buf[48];
1693
1694			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1695			verbose(env,
1696				"variable ctx access var_off=%s off=%d size=%d",
1697				tn_buf, off, size);
1698			return -EACCES;
1699		}
1700		err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1701		if (!err && t == BPF_READ && value_regno >= 0) {
1702			/* ctx access returns either a scalar, or a
1703			 * PTR_TO_PACKET[_META,_END]. In the latter
1704			 * case, we know the offset is zero.
1705			 */
1706			if (reg_type == SCALAR_VALUE)
1707				mark_reg_unknown(env, regs, value_regno);
1708			else
1709				mark_reg_known_zero(env, regs,
1710						    value_regno);
1711			regs[value_regno].id = 0;
1712			regs[value_regno].off = 0;
1713			regs[value_regno].range = 0;
1714			regs[value_regno].type = reg_type;
1715		}
1716
1717	} else if (reg->type == PTR_TO_STACK) {
1718		/* stack accesses must be at a fixed offset, so that we can
1719		 * determine what type of data were returned.
1720		 * See check_stack_read().
1721		 */
1722		if (!tnum_is_const(reg->var_off)) {
1723			char tn_buf[48];
1724
1725			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1726			verbose(env, "variable stack access var_off=%s off=%d size=%d",
1727				tn_buf, off, size);
1728			return -EACCES;
1729		}
1730		off += reg->var_off.value;
1731		if (off >= 0 || off < -MAX_BPF_STACK) {
1732			verbose(env, "invalid stack off=%d size=%d\n", off,
1733				size);
1734			return -EACCES;
1735		}
1736
1737		state = func(env, reg);
1738		err = update_stack_depth(env, state, off);
1739		if (err)
1740			return err;
1741
1742		if (t == BPF_WRITE)
1743			err = check_stack_write(env, state, off, size,
1744						value_regno, insn_idx);
1745		else
1746			err = check_stack_read(env, state, off, size,
1747					       value_regno);
1748	} else if (reg_is_pkt_pointer(reg)) {
1749		if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
1750			verbose(env, "cannot write into packet\n");
1751			return -EACCES;
1752		}
1753		if (t == BPF_WRITE && value_regno >= 0 &&
1754		    is_pointer_value(env, value_regno)) {
1755			verbose(env, "R%d leaks addr into packet\n",
1756				value_regno);
1757			return -EACCES;
1758		}
1759		err = check_packet_access(env, regno, off, size, false);
1760		if (!err && t == BPF_READ && value_regno >= 0)
1761			mark_reg_unknown(env, regs, value_regno);
1762	} else {
1763		verbose(env, "R%d invalid mem access '%s'\n", regno,
1764			reg_type_str[reg->type]);
1765		return -EACCES;
1766	}
1767
1768	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1769	    regs[value_regno].type == SCALAR_VALUE) {
1770		/* b/h/w load zero-extends, mark upper bits as known 0 */
1771		coerce_reg_to_size(&regs[value_regno], size);
1772	}
1773	return err;
1774}
1775
1776static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
1777{
1778	int err;
1779
1780	if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
1781	    insn->imm != 0) {
1782		verbose(env, "BPF_XADD uses reserved fields\n");
1783		return -EINVAL;
1784	}
1785
1786	/* check src1 operand */
1787	err = check_reg_arg(env, insn->src_reg, SRC_OP);
1788	if (err)
1789		return err;
1790
1791	/* check src2 operand */
1792	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
1793	if (err)
1794		return err;
1795
1796	if (is_pointer_value(env, insn->src_reg)) {
1797		verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
1798		return -EACCES;
1799	}
1800
1801	if (is_ctx_reg(env, insn->dst_reg) ||
1802	    is_pkt_reg(env, insn->dst_reg)) {
1803		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
1804			insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
1805			"context" : "packet");
1806		return -EACCES;
1807	}
1808
1809	/* check whether atomic_add can read the memory */
1810	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1811			       BPF_SIZE(insn->code), BPF_READ, -1, true);
1812	if (err)
1813		return err;
1814
1815	/* check whether atomic_add can write into the same memory */
1816	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1817				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
1818}
1819
1820/* when register 'regno' is passed into function that will read 'access_size'
1821 * bytes from that pointer, make sure that it's within stack boundary
1822 * and all elements of stack are initialized.
1823 * Unlike most pointer bounds-checking functions, this one doesn't take an
1824 * 'off' argument, so it has to add in reg->off itself.
1825 */
1826static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1827				int access_size, bool zero_size_allowed,
1828				struct bpf_call_arg_meta *meta)
1829{
1830	struct bpf_reg_state *reg = cur_regs(env) + regno;
1831	struct bpf_func_state *state = func(env, reg);
1832	int off, i, slot, spi;
1833
1834	if (reg->type != PTR_TO_STACK) {
1835		/* Allow zero-byte read from NULL, regardless of pointer type */
1836		if (zero_size_allowed && access_size == 0 &&
1837		    register_is_null(reg))
1838			return 0;
1839
1840		verbose(env, "R%d type=%s expected=%s\n", regno,
1841			reg_type_str[reg->type],
1842			reg_type_str[PTR_TO_STACK]);
1843		return -EACCES;
1844	}
1845
1846	/* Only allow fixed-offset stack reads */
1847	if (!tnum_is_const(reg->var_off)) {
1848		char tn_buf[48];
1849
1850		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1851		verbose(env, "invalid variable stack read R%d var_off=%s\n",
1852			regno, tn_buf);
1853		return -EACCES;
1854	}
1855	off = reg->off + reg->var_off.value;
1856	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
1857	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
1858		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
1859			regno, off, access_size);
1860		return -EACCES;
1861	}
1862
1863	if (meta && meta->raw_mode) {
1864		meta->access_size = access_size;
1865		meta->regno = regno;
1866		return 0;
1867	}
1868
1869	for (i = 0; i < access_size; i++) {
1870		u8 *stype;
1871
1872		slot = -(off + i) - 1;
1873		spi = slot / BPF_REG_SIZE;
1874		if (state->allocated_stack <= slot)
1875			goto err;
1876		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
1877		if (*stype == STACK_MISC)
1878			goto mark;
1879		if (*stype == STACK_ZERO) {
1880			/* helper can write anything into the stack */
1881			*stype = STACK_MISC;
1882			goto mark;
1883		}
1884err:
1885		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
1886			off, i, access_size);
1887		return -EACCES;
1888mark:
1889		/* reading any byte out of 8-byte 'spill_slot' will cause
1890		 * the whole slot to be marked as 'read'
1891		 */
1892		mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
1893				     spi, state->frameno);
1894	}
1895	return update_stack_depth(env, state, off);
1896}
1897
1898static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
1899				   int access_size, bool zero_size_allowed,
1900				   struct bpf_call_arg_meta *meta)
1901{
1902	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1903
1904	switch (reg->type) {
1905	case PTR_TO_PACKET:
1906	case PTR_TO_PACKET_META:
1907		return check_packet_access(env, regno, reg->off, access_size,
1908					   zero_size_allowed);
1909	case PTR_TO_MAP_VALUE:
1910		return check_map_access(env, regno, reg->off, access_size,
1911					zero_size_allowed);
1912	default: /* scalar_value|ptr_to_stack or invalid ptr */
1913		return check_stack_boundary(env, regno, access_size,
1914					    zero_size_allowed, meta);
1915	}
1916}
1917
1918static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
1919{
1920	return type == ARG_PTR_TO_MEM ||
1921	       type == ARG_PTR_TO_MEM_OR_NULL ||
1922	       type == ARG_PTR_TO_UNINIT_MEM;
1923}
1924
1925static bool arg_type_is_mem_size(enum bpf_arg_type type)
1926{
1927	return type == ARG_CONST_SIZE ||
1928	       type == ARG_CONST_SIZE_OR_ZERO;
1929}
1930
1931static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
1932			  enum bpf_arg_type arg_type,
1933			  struct bpf_call_arg_meta *meta)
1934{
1935	struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1936	enum bpf_reg_type expected_type, type = reg->type;
1937	int err = 0;
1938
1939	if (arg_type == ARG_DONTCARE)
1940		return 0;
1941
1942	err = check_reg_arg(env, regno, SRC_OP);
1943	if (err)
1944		return err;
1945
1946	if (arg_type == ARG_ANYTHING) {
1947		if (is_pointer_value(env, regno)) {
1948			verbose(env, "R%d leaks addr into helper function\n",
1949				regno);
1950			return -EACCES;
1951		}
1952		return 0;
1953	}
1954
1955	if (type_is_pkt_pointer(type) &&
1956	    !may_access_direct_pkt_data(env, meta, BPF_READ)) {
1957		verbose(env, "helper access to the packet is not allowed\n");
1958		return -EACCES;
1959	}
1960
1961	if (arg_type == ARG_PTR_TO_MAP_KEY ||
1962	    arg_type == ARG_PTR_TO_MAP_VALUE) {
1963		expected_type = PTR_TO_STACK;
1964		if (!type_is_pkt_pointer(type) &&
1965		    type != expected_type)
1966			goto err_type;
1967	} else if (arg_type == ARG_CONST_SIZE ||
1968		   arg_type == ARG_CONST_SIZE_OR_ZERO) {
1969		expected_type = SCALAR_VALUE;
1970		if (type != expected_type)
1971			goto err_type;
1972	} else if (arg_type == ARG_CONST_MAP_PTR) {
1973		expected_type = CONST_PTR_TO_MAP;
1974		if (type != expected_type)
1975			goto err_type;
1976	} else if (arg_type == ARG_PTR_TO_CTX) {
1977		expected_type = PTR_TO_CTX;
1978		if (type != expected_type)
1979			goto err_type;
1980	} else if (arg_type_is_mem_ptr(arg_type)) {
1981		expected_type = PTR_TO_STACK;
1982		/* One exception here. In case function allows for NULL to be
1983		 * passed in as argument, it's a SCALAR_VALUE type. Final test
1984		 * happens during stack boundary checking.
1985		 */
1986		if (register_is_null(reg) &&
1987		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
1988			/* final test in check_stack_boundary() */;
1989		else if (!type_is_pkt_pointer(type) &&
1990			 type != PTR_TO_MAP_VALUE &&
1991			 type != expected_type)
1992			goto err_type;
1993		meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
1994	} else {
1995		verbose(env, "unsupported arg_type %d\n", arg_type);
1996		return -EFAULT;
1997	}
1998
1999	if (arg_type == ARG_CONST_MAP_PTR) {
2000		/* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2001		meta->map_ptr = reg->map_ptr;
2002	} else if (arg_type == ARG_PTR_TO_MAP_KEY) {
2003		/* bpf_map_xxx(..., map_ptr, ..., key) call:
2004		 * check that [key, key + map->key_size) are within
2005		 * stack limits and initialized
2006		 */
2007		if (!meta->map_ptr) {
2008			/* in function declaration map_ptr must come before
2009			 * map_key, so that it's verified and known before
2010			 * we have to check map_key here. Otherwise it means
2011			 * that kernel subsystem misconfigured verifier
2012			 */
2013			verbose(env, "invalid map_ptr to access map->key\n");
2014			return -EACCES;
2015		}
2016		if (type_is_pkt_pointer(type))
2017			err = check_packet_access(env, regno, reg->off,
2018						  meta->map_ptr->key_size,
2019						  false);
2020		else
2021			err = check_stack_boundary(env, regno,
2022						   meta->map_ptr->key_size,
2023						   false, NULL);
2024	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
2025		/* bpf_map_xxx(..., map_ptr, ..., value) call:
2026		 * check [value, value + map->value_size) validity
2027		 */
2028		if (!meta->map_ptr) {
2029			/* kernel subsystem misconfigured verifier */
2030			verbose(env, "invalid map_ptr to access map->value\n");
2031			return -EACCES;
2032		}
2033		if (type_is_pkt_pointer(type))
2034			err = check_packet_access(env, regno, reg->off,
2035						  meta->map_ptr->value_size,
2036						  false);
2037		else
2038			err = check_stack_boundary(env, regno,
2039						   meta->map_ptr->value_size,
2040						   false, NULL);
2041	} else if (arg_type_is_mem_size(arg_type)) {
2042		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
2043
2044		/* The register is SCALAR_VALUE; the access check
2045		 * happens using its boundaries.
2046		 */
2047		if (!tnum_is_const(reg->var_off))
2048			/* For unprivileged variable accesses, disable raw
2049			 * mode so that the program is required to
2050			 * initialize all the memory that the helper could
2051			 * just partially fill up.
2052			 */
2053			meta = NULL;
2054
2055		if (reg->smin_value < 0) {
2056			verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2057				regno);
2058			return -EACCES;
2059		}
2060
2061		if (reg->umin_value == 0) {
2062			err = check_helper_mem_access(env, regno - 1, 0,
2063						      zero_size_allowed,
2064						      meta);
2065			if (err)
2066				return err;
2067		}
2068
2069		if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
2070			verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2071				regno);
2072			return -EACCES;
2073		}
2074		err = check_helper_mem_access(env, regno - 1,
2075					      reg->umax_value,
2076					      zero_size_allowed, meta);
2077	}
2078
2079	return err;
2080err_type:
2081	verbose(env, "R%d type=%s expected=%s\n", regno,
2082		reg_type_str[type], reg_type_str[expected_type]);
2083	return -EACCES;
2084}
2085
2086static int check_map_func_compatibility(struct bpf_verifier_env *env,
2087					struct bpf_map *map, int func_id)
2088{
2089	if (!map)
2090		return 0;
2091
2092	/* We need a two way check, first is from map perspective ... */
2093	switch (map->map_type) {
2094	case BPF_MAP_TYPE_PROG_ARRAY:
2095		if (func_id != BPF_FUNC_tail_call)
2096			goto error;
2097		break;
2098	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
2099		if (func_id != BPF_FUNC_perf_event_read &&
2100		    func_id != BPF_FUNC_perf_event_output &&
2101		    func_id != BPF_FUNC_perf_event_read_value)
2102			goto error;
2103		break;
2104	case BPF_MAP_TYPE_STACK_TRACE:
2105		if (func_id != BPF_FUNC_get_stackid)
2106			goto error;
2107		break;
2108	case BPF_MAP_TYPE_CGROUP_ARRAY:
2109		if (func_id != BPF_FUNC_skb_under_cgroup &&
2110		    func_id != BPF_FUNC_current_task_under_cgroup)
2111			goto error;
2112		break;
2113	/* devmap returns a pointer to a live net_device ifindex that we cannot
2114	 * allow to be modified from bpf side. So do not allow lookup elements
2115	 * for now.
2116	 */
2117	case BPF_MAP_TYPE_DEVMAP:
2118		if (func_id != BPF_FUNC_redirect_map)
2119			goto error;
2120		break;
2121	/* Restrict bpf side of cpumap, open when use-cases appear */
2122	case BPF_MAP_TYPE_CPUMAP:
2123		if (func_id != BPF_FUNC_redirect_map)
2124			goto error;
2125		break;
2126	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
2127	case BPF_MAP_TYPE_HASH_OF_MAPS:
2128		if (func_id != BPF_FUNC_map_lookup_elem)
2129			goto error;
2130		break;
2131	case BPF_MAP_TYPE_SOCKMAP:
2132		if (func_id != BPF_FUNC_sk_redirect_map &&
2133		    func_id != BPF_FUNC_sock_map_update &&
2134		    func_id != BPF_FUNC_map_delete_elem &&
2135		    func_id != BPF_FUNC_msg_redirect_map)
2136			goto error;
2137		break;
2138	default:
2139		break;
2140	}
2141
2142	/* ... and second from the function itself. */
2143	switch (func_id) {
2144	case BPF_FUNC_tail_call:
2145		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2146			goto error;
2147		if (env->subprog_cnt) {
2148			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2149			return -EINVAL;
2150		}
2151		break;
2152	case BPF_FUNC_perf_event_read:
2153	case BPF_FUNC_perf_event_output:
2154	case BPF_FUNC_perf_event_read_value:
2155		if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
2156			goto error;
2157		break;
2158	case BPF_FUNC_get_stackid:
2159		if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
2160			goto error;
2161		break;
2162	case BPF_FUNC_current_task_under_cgroup:
2163	case BPF_FUNC_skb_under_cgroup:
2164		if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
2165			goto error;
2166		break;
2167	case BPF_FUNC_redirect_map:
2168		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
2169		    map->map_type != BPF_MAP_TYPE_CPUMAP)
2170			goto error;
2171		break;
2172	case BPF_FUNC_sk_redirect_map:
2173	case BPF_FUNC_msg_redirect_map:
2174		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2175			goto error;
2176		break;
2177	case BPF_FUNC_sock_map_update:
2178		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
2179			goto error;
2180		break;
2181	default:
2182		break;
2183	}
2184
2185	return 0;
2186error:
2187	verbose(env, "cannot pass map_type %d into func %s#%d\n",
2188		map->map_type, func_id_name(func_id), func_id);
2189	return -EINVAL;
2190}
2191
2192static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
2193{
2194	int count = 0;
2195
2196	if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
2197		count++;
2198	if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
2199		count++;
2200	if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
2201		count++;
2202	if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
2203		count++;
2204	if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
2205		count++;
2206
2207	/* We only support one arg being in raw mode at the moment,
2208	 * which is sufficient for the helper functions we have
2209	 * right now.
2210	 */
2211	return count <= 1;
2212}
2213
2214static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
2215				    enum bpf_arg_type arg_next)
2216{
2217	return (arg_type_is_mem_ptr(arg_curr) &&
2218	        !arg_type_is_mem_size(arg_next)) ||
2219	       (!arg_type_is_mem_ptr(arg_curr) &&
2220		arg_type_is_mem_size(arg_next));
2221}
2222
2223static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2224{
2225	/* bpf_xxx(..., buf, len) call will access 'len'
2226	 * bytes from memory 'buf'. Both arg types need
2227	 * to be paired, so make sure there's no buggy
2228	 * helper function specification.
2229	 */
2230	if (arg_type_is_mem_size(fn->arg1_type) ||
2231	    arg_type_is_mem_ptr(fn->arg5_type)  ||
2232	    check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
2233	    check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
2234	    check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
2235	    check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
2236		return false;
2237
2238	return true;
2239}
2240
2241static int check_func_proto(const struct bpf_func_proto *fn)
2242{
2243	return check_raw_mode_ok(fn) &&
2244	       check_arg_pair_ok(fn) ? 0 : -EINVAL;
2245}
2246
2247/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2248 * are now invalid, so turn them into unknown SCALAR_VALUE.
2249 */
2250static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
2251				     struct bpf_func_state *state)
2252{
2253	struct bpf_reg_state *regs = state->regs, *reg;
2254	int i;
2255
2256	for (i = 0; i < MAX_BPF_REG; i++)
2257		if (reg_is_pkt_pointer_any(&regs[i]))
2258			mark_reg_unknown(env, regs, i);
2259
2260	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
2261		if (state->stack[i].slot_type[0] != STACK_SPILL)
2262			continue;
2263		reg = &state->stack[i].spilled_ptr;
2264		if (reg_is_pkt_pointer_any(reg))
2265			__mark_reg_unknown(reg);
2266	}
2267}
2268
2269static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2270{
2271	struct bpf_verifier_state *vstate = env->cur_state;
2272	int i;
2273
2274	for (i = 0; i <= vstate->curframe; i++)
2275		__clear_all_pkt_pointers(env, vstate->frame[i]);
2276}
2277
2278static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2279			   int *insn_idx)
2280{
2281	struct bpf_verifier_state *state = env->cur_state;
2282	struct bpf_func_state *caller, *callee;
2283	int i, subprog, target_insn;
2284
2285	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2286		verbose(env, "the call stack of %d frames is too deep\n",
2287			state->curframe + 2);
2288		return -E2BIG;
2289	}
2290
2291	target_insn = *insn_idx + insn->imm;
2292	subprog = find_subprog(env, target_insn + 1);
2293	if (subprog < 0) {
2294		verbose(env, "verifier bug. No program starts at insn %d\n",
2295			target_insn + 1);
2296		return -EFAULT;
2297	}
2298
2299	caller = state->frame[state->curframe];
2300	if (state->frame[state->curframe + 1]) {
2301		verbose(env, "verifier bug. Frame %d already allocated\n",
2302			state->curframe + 1);
2303		return -EFAULT;
2304	}
2305
2306	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
2307	if (!callee)
2308		return -ENOMEM;
2309	state->frame[state->curframe + 1] = callee;
2310
2311	/* callee cannot access r0, r6 - r9 for reading and has to write
2312	 * into its own stack before reading from it.
2313	 * callee can read/write into caller's stack
2314	 */
2315	init_func_state(env, callee,
2316			/* remember the callsite, it will be used by bpf_exit */
2317			*insn_idx /* callsite */,
2318			state->curframe + 1 /* frameno within this callchain */,
2319			subprog + 1 /* subprog number within this prog */);
2320
2321	/* copy r1 - r5 args that callee can access */
2322	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
2323		callee->regs[i] = caller->regs[i];
2324
2325	/* after the call regsiters r0 - r5 were scratched */
2326	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2327		mark_reg_not_init(env, caller->regs, caller_saved[i]);
2328		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2329	}
2330
2331	/* only increment it after check_reg_arg() finished */
2332	state->curframe++;
2333
2334	/* and go analyze first insn of the callee */
2335	*insn_idx = target_insn;
2336
2337	if (env->log.level) {
2338		verbose(env, "caller:\n");
2339		print_verifier_state(env, caller);
2340		verbose(env, "callee:\n");
2341		print_verifier_state(env, callee);
2342	}
2343	return 0;
2344}
2345
2346static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2347{
2348	struct bpf_verifier_state *state = env->cur_state;
2349	struct bpf_func_state *caller, *callee;
2350	struct bpf_reg_state *r0;
2351
2352	callee = state->frame[state->curframe];
2353	r0 = &callee->regs[BPF_REG_0];
2354	if (r0->type == PTR_TO_STACK) {
2355		/* technically it's ok to return caller's stack pointer
2356		 * (or caller's caller's pointer) back to the caller,
2357		 * since these pointers are valid. Only current stack
2358		 * pointer will be invalid as soon as function exits,
2359		 * but let's be conservative
2360		 */
2361		verbose(env, "cannot return stack pointer to the caller\n");
2362		return -EINVAL;
2363	}
2364
2365	state->curframe--;
2366	caller = state->frame[state->curframe];
2367	/* return to the caller whatever r0 had in the callee */
2368	caller->regs[BPF_REG_0] = *r0;
2369
2370	*insn_idx = callee->callsite + 1;
2371	if (env->log.level) {
2372		verbose(env, "returning from callee:\n");
2373		print_verifier_state(env, callee);
2374		verbose(env, "to caller at %d:\n", *insn_idx);
2375		print_verifier_state(env, caller);
2376	}
2377	/* clear everything in the callee */
2378	free_func_state(callee);
2379	state->frame[state->curframe + 1] = NULL;
2380	return 0;
2381}
2382
2383static int
2384record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2385		int func_id, int insn_idx)
2386{
2387	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2388
2389	if (func_id != BPF_FUNC_tail_call &&
2390	    func_id != BPF_FUNC_map_lookup_elem)
2391		return 0;
2392	if (meta->map_ptr == NULL) {
2393		verbose(env, "kernel subsystem misconfigured verifier\n");
2394		return -EINVAL;
2395	}
2396
2397	if (!BPF_MAP_PTR(aux->map_state))
2398		bpf_map_ptr_store(aux, meta->map_ptr,
2399				  meta->map_ptr->unpriv_array);
2400	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2401		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2402				  meta->map_ptr->unpriv_array);
2403	return 0;
2404}
2405
2406static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2407{
2408	const struct bpf_func_proto *fn = NULL;
2409	struct bpf_reg_state *regs;
2410	struct bpf_call_arg_meta meta;
2411	bool changes_data;
2412	int i, err;
2413
2414	/* find function prototype */
2415	if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
2416		verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
2417			func_id);
2418		return -EINVAL;
2419	}
2420
2421	if (env->ops->get_func_proto)
2422		fn = env->ops->get_func_proto(func_id, env->prog);
2423	if (!fn) {
2424		verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
2425			func_id);
2426		return -EINVAL;
2427	}
2428
2429	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2430	if (!env->prog->gpl_compatible && fn->gpl_only) {
2431		verbose(env, "cannot call GPL only function from proprietary program\n");
2432		return -EINVAL;
2433	}
2434
2435	/* With LD_ABS/IND some JITs save/restore skb from r1. */
2436	changes_data = bpf_helper_changes_pkt_data(fn->func);
2437	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
2438		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2439			func_id_name(func_id), func_id);
2440		return -EINVAL;
2441	}
2442
2443	memset(&meta, 0, sizeof(meta));
2444	meta.pkt_access = fn->pkt_access;
2445
2446	err = check_func_proto(fn);
2447	if (err) {
2448		verbose(env, "kernel subsystem misconfigured func %s#%d\n",
2449			func_id_name(func_id), func_id);
2450		return err;
2451	}
2452
2453	/* check args */
2454	err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta);
2455	if (err)
2456		return err;
2457	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2458	if (err)
2459		return err;
2460	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2461	if (err)
2462		return err;
2463	err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta);
2464	if (err)
2465		return err;
2466	err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta);
2467	if (err)
2468		return err;
2469
2470	err = record_func_map(env, &meta, func_id, insn_idx);
2471	if (err)
2472		return err;
2473
2474	/* Mark slots with STACK_MISC in case of raw mode, stack offset
2475	 * is inferred from register state.
2476	 */
2477	for (i = 0; i < meta.access_size; i++) {
2478		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
2479				       BPF_WRITE, -1, false);
2480		if (err)
2481			return err;
2482	}
2483
2484	regs = cur_regs(env);
2485	/* reset caller saved regs */
2486	for (i = 0; i < CALLER_SAVED_REGS; i++) {
2487		mark_reg_not_init(env, regs, caller_saved[i]);
2488		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
2489	}
2490
2491	/* update return register (already marked as written above) */
2492	if (fn->ret_type == RET_INTEGER) {
2493		/* sets type to SCALAR_VALUE */
2494		mark_reg_unknown(env, regs, BPF_REG_0);
2495	} else if (fn->ret_type == RET_VOID) {
2496		regs[BPF_REG_0].type = NOT_INIT;
2497	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
2498		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2499		/* There is no offset yet applied, variable or fixed */
2500		mark_reg_known_zero(env, regs, BPF_REG_0);
2501		regs[BPF_REG_0].off = 0;
2502		/* remember map_ptr, so that check_map_access()
2503		 * can check 'value_size' boundary of memory access
2504		 * to map element returned from bpf_map_lookup_elem()
2505		 */
2506		if (meta.map_ptr == NULL) {
2507			verbose(env,
2508				"kernel subsystem misconfigured verifier\n");
2509			return -EINVAL;
2510		}
2511		regs[BPF_REG_0].map_ptr = meta.map_ptr;
2512		regs[BPF_REG_0].id = ++env->id_gen;
2513	} else {
2514		verbose(env, "unknown return type %d of func %s#%d\n",
2515			fn->ret_type, func_id_name(func_id), func_id);
2516		return -EINVAL;
2517	}
2518
2519	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
2520	if (err)
2521		return err;
2522
2523	if (changes_data)
2524		clear_all_pkt_pointers(env);
2525	return 0;
2526}
2527
2528static bool signed_add_overflows(s64 a, s64 b)
2529{
2530	/* Do the add in u64, where overflow is well-defined */
2531	s64 res = (s64)((u64)a + (u64)b);
2532
2533	if (b < 0)
2534		return res > a;
2535	return res < a;
2536}
2537
2538static bool signed_sub_overflows(s64 a, s64 b)
2539{
2540	/* Do the sub in u64, where overflow is well-defined */
2541	s64 res = (s64)((u64)a - (u64)b);
2542
2543	if (b < 0)
2544		return res < a;
2545	return res > a;
2546}
2547
2548static bool check_reg_sane_offset(struct bpf_verifier_env *env,
2549				  const struct bpf_reg_state *reg,
2550				  enum bpf_reg_type type)
2551{
2552	bool known = tnum_is_const(reg->var_off);
2553	s64 val = reg->var_off.value;
2554	s64 smin = reg->smin_value;
2555
2556	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
2557		verbose(env, "math between %s pointer and %lld is not allowed\n",
2558			reg_type_str[type], val);
2559		return false;
2560	}
2561
2562	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
2563		verbose(env, "%s pointer offset %d is not allowed\n",
2564			reg_type_str[type], reg->off);
2565		return false;
2566	}
2567
2568	if (smin == S64_MIN) {
2569		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
2570			reg_type_str[type]);
2571		return false;
2572	}
2573
2574	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
2575		verbose(env, "value %lld makes %s pointer be out of bounds\n",
2576			smin, reg_type_str[type]);
2577		return false;
2578	}
2579
2580	return true;
2581}
2582
2583/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
2584 * Caller should also handle BPF_MOV case separately.
2585 * If we return -EACCES, caller may want to try again treating pointer as a
2586 * scalar.  So we only emit a diagnostic if !env->allow_ptr_leaks.
2587 */
2588static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
2589				   struct bpf_insn *insn,
2590				   const struct bpf_reg_state *ptr_reg,
2591				   const struct bpf_reg_state *off_reg)
2592{
2593	struct bpf_verifier_state *vstate = env->cur_state;
2594	struct bpf_func_state *state = vstate->frame[vstate->curframe];
2595	struct bpf_reg_state *regs = state->regs, *dst_reg;
2596	bool known = tnum_is_const(off_reg->var_off);
2597	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
2598	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
2599	u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
2600	    umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
2601	u8 opcode = BPF_OP(insn->code);
2602	u32 dst = insn->dst_reg;
2603
2604	dst_reg = &regs[dst];
2605
2606	if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
2607	    smin_val > smax_val || umin_val > umax_val) {
2608		/* Taint dst register if offset had invalid bounds derived from
2609		 * e.g. dead branches.
2610		 */
2611		__mark_reg_unknown(dst_reg);
2612		return 0;
2613	}
2614
2615	if (BPF_CLASS(insn->code) != BPF_ALU64) {
2616		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
2617		verbose(env,
2618			"R%d 32-bit pointer arithmetic prohibited\n",
2619			dst);
2620		return -EACCES;
2621	}
2622
2623	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
2624		verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
2625			dst);
2626		return -EACCES;
2627	}
2628	if (ptr_reg->type == CONST_PTR_TO_MAP) {
2629		verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
2630			dst);
2631		return -EACCES;
2632	}
2633	if (ptr_reg->type == PTR_TO_PACKET_END) {
2634		verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
2635			dst);
2636		return -EACCES;
2637	}
2638
2639	/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
2640	 * The id may be overwritten later if we create a new variable offset.
2641	 */
2642	dst_reg->type = ptr_reg->type;
2643	dst_reg->id = ptr_reg->id;
2644
2645	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
2646	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
2647		return -EINVAL;
2648
2649	switch (opcode) {
2650	case BPF_ADD:
2651		/* We can take a fixed offset as long as it doesn't overflow
2652		 * the s32 'off' field
2653		 */
2654		if (known && (ptr_reg->off + smin_val ==
2655			      (s64)(s32)(ptr_reg->off + smin_val))) {
2656			/* pointer += K.  Accumulate it into fixed offset */
2657			dst_reg->smin_value = smin_ptr;
2658			dst_reg->smax_value = smax_ptr;
2659			dst_reg->umin_value = umin_ptr;
2660			dst_reg->umax_value = umax_ptr;
2661			dst_reg->var_off = ptr_reg->var_off;
2662			dst_reg->off = ptr_reg->off + smin_val;
2663			dst_reg->range = ptr_reg->range;
2664			break;
2665		}
2666		/* A new variable offset is created.  Note that off_reg->off
2667		 * == 0, since it's a scalar.
2668		 * dst_reg gets the pointer type and since some positive
2669		 * integer value was added to the pointer, give it a new 'id'
2670		 * if it's a PTR_TO_PACKET.
2671		 * this creates a new 'base' pointer, off_reg (variable) gets
2672		 * added into the variable offset, and we copy the fixed offset
2673		 * from ptr_reg.
2674		 */
2675		if (signed_add_overflows(smin_ptr, smin_val) ||
2676		    signed_add_overflows(smax_ptr, smax_val)) {
2677			dst_reg->smin_value = S64_MIN;
2678			dst_reg->smax_value = S64_MAX;
2679		} else {
2680			dst_reg->smin_value = smin_ptr + smin_val;
2681			dst_reg->smax_value = smax_ptr + smax_val;
2682		}
2683		if (umin_ptr + umin_val < umin_ptr ||
2684		    umax_ptr + umax_val < umax_ptr) {
2685			dst_reg->umin_value = 0;
2686			dst_reg->umax_value = U64_MAX;
2687		} else {
2688			dst_reg->umin_value = umin_ptr + umin_val;
2689			dst_reg->umax_value = umax_ptr + umax_val;
2690		}
2691		dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
2692		dst_reg->off = ptr_reg->off;
2693		if (reg_is_pkt_pointer(ptr_reg)) {
2694			dst_reg->id = ++env->id_gen;
2695			/* something was added to pkt_ptr, set range to zero */
2696			dst_reg->range = 0;
2697		}
2698		break;
2699	case BPF_SUB:
2700		if (dst_reg == off_reg) {
2701			/* scalar -= pointer.  Creates an unknown scalar */
2702			verbose(env, "R%d tried to subtract pointer from scalar\n",
2703				dst);
2704			return -EACCES;
2705		}
2706		/* We don't allow subtraction from FP, because (according to
2707		 * test_verifier.c test "invalid fp arithmetic", JITs might not
2708		 * be able to deal with it.
2709		 */
2710		if (ptr_reg->type == PTR_TO_STACK) {
2711			verbose(env, "R%d subtraction from stack pointer prohibited\n",
2712				dst);
2713			return -EACCES;
2714		}
2715		if (known && (ptr_reg->off - smin_val ==
2716			      (s64)(s32)(ptr_reg->off - smin_val))) {
2717			/* pointer -= K.  Subtract it from fixed offset */
2718			dst_reg->smin_value = smin_ptr;
2719			dst_reg->smax_value = smax_ptr;
2720			dst_reg->umin_value = umin_ptr;
2721			dst_reg->umax_value = umax_ptr;
2722			dst_reg->var_off = ptr_reg->var_off;
2723			dst_reg->id = ptr_reg->id;
2724			dst_reg->off = ptr_reg->off - smin_val;
2725			dst_reg->range = ptr_reg->range;
2726			break;
2727		}
2728		/* A new variable offset is created.  If the subtrahend is known
2729		 * nonnegative, then any reg->range we had before is still good.
2730		 */
2731		if (signed_sub_overflows(smin_ptr, smax_val) ||
2732		    signed_sub_overflows(smax_ptr, smin_val)) {
2733			/* Overflow possible, we know nothing */
2734			dst_reg->smin_value = S64_MIN;
2735			dst_reg->smax_value = S64_MAX;
2736		} else {
2737			dst_reg->smin_value = smin_ptr - smax_val;
2738			dst_reg->smax_value = smax_ptr - smin_val;
2739		}
2740		if (umin_ptr < umax_val) {
2741			/* Overflow possible, we know nothing */
2742			dst_reg->umin_value = 0;
2743			dst_reg->umax_value = U64_MAX;
2744		} else {
2745			/* Cannot overflow (as long as bounds are consistent) */
2746			dst_reg->umin_value = umin_ptr - umax_val;
2747			dst_reg->umax_value = umax_ptr - umin_val;
2748		}
2749		dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
2750		dst_reg->off = ptr_reg->off;
2751		if (reg_is_pkt_pointer(ptr_reg)) {
2752			dst_reg->id = ++env->id_gen;
2753			/* something was added to pkt_ptr, set range to zero */
2754			if (smin_val < 0)
2755				dst_reg->range = 0;
2756		}
2757		break;
2758	case BPF_AND:
2759	case BPF_OR:
2760	case BPF_XOR:
2761		/* bitwise ops on pointers are troublesome, prohibit. */
2762		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
2763			dst, bpf_alu_string[opcode >> 4]);
2764		return -EACCES;
2765	default:
2766		/* other operators (e.g. MUL,LSH) produce non-pointer results */
2767		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
2768			dst, bpf_alu_string[opcode >> 4]);
2769		return -EACCES;
2770	}
2771
2772	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2773		return -EINVAL;
2774
2775	__update_reg_bounds(dst_reg);
2776	__reg_deduce_bounds(dst_reg);
2777	__reg_bound_offset(dst_reg);
2778	return 0;
2779}
2780
2781/* WARNING: This function does calculations on 64-bit values, but the actual
2782 * execution may occur on 32-bit values. Therefore, things like bitshifts
2783 * need extra checks in the 32-bit case.
2784 */
2785static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2786				      struct bpf_insn *insn,
2787				      struct bpf_reg_state *dst_reg,
2788				      struct bpf_reg_state src_reg)
2789{
2790	struct bpf_reg_state *regs = cur_regs(env);
2791	u8 opcode = BPF_OP(insn->code);
2792	bool src_known, dst_known;
2793	s64 smin_val, smax_val;
2794	u64 umin_val, umax_val;
2795	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2796
2797	smin_val = src_reg.smin_value;
2798	smax_val = src_reg.smax_value;
2799	umin_val = src_reg.umin_value;
2800	umax_val = src_reg.umax_value;
2801	src_known = tnum_is_const(src_reg.var_off);
2802	dst_known = tnum_is_const(dst_reg->var_off);
2803
2804	if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
2805	    smin_val > smax_val || umin_val > umax_val) {
2806		/* Taint dst register if offset had invalid bounds derived from
2807		 * e.g. dead branches.
2808		 */
2809		__mark_reg_unknown(dst_reg);
2810		return 0;
2811	}
2812
2813	if (!src_known &&
2814	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2815		__mark_reg_unknown(dst_reg);
2816		return 0;
2817	}
2818
2819	switch (opcode) {
2820	case BPF_ADD:
2821		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
2822		    signed_add_overflows(dst_reg->smax_value, smax_val)) {
2823			dst_reg->smin_value = S64_MIN;
2824			dst_reg->smax_value = S64_MAX;
2825		} else {
2826			dst_reg->smin_value += smin_val;
2827			dst_reg->smax_value += smax_val;
2828		}
2829		if (dst_reg->umin_value + umin_val < umin_val ||
2830		    dst_reg->umax_value + umax_val < umax_val) {
2831			dst_reg->umin_value = 0;
2832			dst_reg->umax_value = U64_MAX;
2833		} else {
2834			dst_reg->umin_value += umin_val;
2835			dst_reg->umax_value += umax_val;
2836		}
2837		dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
2838		break;
2839	case BPF_SUB:
2840		if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
2841		    signed_sub_overflows(dst_reg->smax_value, smin_val)) {
2842			/* Overflow possible, we know nothing */
2843			dst_reg->smin_value = S64_MIN;
2844			dst_reg->smax_value = S64_MAX;
2845		} else {
2846			dst_reg->smin_value -= smax_val;
2847			dst_reg->smax_value -= smin_val;
2848		}
2849		if (dst_reg->umin_value < umax_val) {
2850			/* Overflow possible, we know nothing */
2851			dst_reg->umin_value = 0;
2852			dst_reg->umax_value = U64_MAX;
2853		} else {
2854			/* Cannot overflow (as long as bounds are consistent) */
2855			dst_reg->umin_value -= umax_val;
2856			dst_reg->umax_value -= umin_val;
2857		}
2858		dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
2859		break;
2860	case BPF_MUL:
2861		dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
2862		if (smin_val < 0 || dst_reg->smin_value < 0) {
2863			/* Ain't nobody got time to multiply that sign */
2864			__mark_reg_unbounded(dst_reg);
2865			__update_reg_bounds(dst_reg);
2866			break;
2867		}
2868		/* Both values are positive, so we can work with unsigned and
2869		 * copy the result to signed (unless it exceeds S64_MAX).
2870		 */
2871		if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
2872			/* Potential overflow, we know nothing */
2873			__mark_reg_unbounded(dst_reg);
2874			/* (except what we can learn from the var_off) */
2875			__update_reg_bounds(dst_reg);
2876			break;
2877		}
2878		dst_reg->umin_value *= umin_val;
2879		dst_reg->umax_value *= umax_val;
2880		if (dst_reg->umax_value > S64_MAX) {
2881			/* Overflow possible, we know nothing */
2882			dst_reg->smin_value = S64_MIN;
2883			dst_reg->smax_value = S64_MAX;
2884		} else {
2885			dst_reg->smin_value = dst_reg->umin_value;
2886			dst_reg->smax_value = dst_reg->umax_value;
2887		}
2888		break;
2889	case BPF_AND:
2890		if (src_known && dst_known) {
2891			__mark_reg_known(dst_reg, dst_reg->var_off.value &
2892						  src_reg.var_off.value);
2893			break;
2894		}
2895		/* We get our minimum from the var_off, since that's inherently
2896		 * bitwise.  Our maximum is the minimum of the operands' maxima.
2897		 */
2898		dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
2899		dst_reg->umin_value = dst_reg->var_off.value;
2900		dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
2901		if (dst_reg->smin_value < 0 || smin_val < 0) {
2902			/* Lose signed bounds when ANDing negative numbers,
2903			 * ain't nobody got time for that.
2904			 */
2905			dst_reg->smin_value = S64_MIN;
2906			dst_reg->smax_value = S64_MAX;
2907		} else {
2908			/* ANDing two positives gives a positive, so safe to
2909			 * cast result into s64.
2910			 */
2911			dst_reg->smin_value = dst_reg->umin_value;
2912			dst_reg->smax_value = dst_reg->umax_value;
2913		}
2914		/* We may learn something more from the var_off */
2915		__update_reg_bounds(dst_reg);
2916		break;
2917	case BPF_OR:
2918		if (src_known && dst_known) {
2919			__mark_reg_known(dst_reg, dst_reg->var_off.value |
2920						  src_reg.var_off.value);
2921			break;
2922		}
2923		/* We get our maximum from the var_off, and our minimum is the
2924		 * maximum of the operands' minima
2925		 */
2926		dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
2927		dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
2928		dst_reg->umax_value = dst_reg->var_off.value |
2929				      dst_reg->var_off.mask;
2930		if (dst_reg->smin_value < 0 || smin_val < 0) {
2931			/* Lose signed bounds when ORing negative numbers,
2932			 * ain't nobody got time for that.
2933			 */
2934			dst_reg->smin_value = S64_MIN;
2935			dst_reg->smax_value = S64_MAX;
2936		} else {
2937			/* ORing two positives gives a positive, so safe to
2938			 * cast result into s64.
2939			 */
2940			dst_reg->smin_value = dst_reg->umin_value;
2941			dst_reg->smax_value = dst_reg->umax_value;
2942		}
2943		/* We may learn something more from the var_off */
2944		__update_reg_bounds(dst_reg);
2945		break;
2946	case BPF_LSH:
2947		if (umax_val >= insn_bitness) {
2948			/* Shifts greater than 31 or 63 are undefined.
2949			 * This includes shifts by a negative number.
2950			 */
2951			mark_reg_unknown(env, regs, insn->dst_reg);
2952			break;
2953		}
2954		/* We lose all sign bit information (except what we can pick
2955		 * up from var_off)
2956		 */
2957		dst_reg->smin_value = S64_MIN;
2958		dst_reg->smax_value = S64_MAX;
2959		/* If we might shift our top bit out, then we know nothing */
2960		if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
2961			dst_reg->umin_value = 0;
2962			dst_reg->umax_value = U64_MAX;
2963		} else {
2964			dst_reg->umin_value <<= umin_val;
2965			dst_reg->umax_value <<= umax_val;
2966		}
2967		if (src_known)
2968			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
2969		else
2970			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
2971		/* We may learn something more from the var_off */
2972		__update_reg_bounds(dst_reg);
2973		break;
2974	case BPF_RSH:
2975		if (umax_val >= insn_bitness) {
2976			/* Shifts greater than 31 or 63 are undefined.
2977			 * This includes shifts by a negative number.
2978			 */
2979			mark_reg_unknown(env, regs, insn->dst_reg);
2980			break;
2981		}
2982		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
2983		 * be negative, then either:
2984		 * 1) src_reg might be zero, so the sign bit of the result is
2985		 *    unknown, so we lose our signed bounds
2986		 * 2) it's known negative, thus the unsigned bounds capture the
2987		 *    signed bounds
2988		 * 3) the signed bounds cross zero, so they tell us nothing
2989		 *    about the result
2990		 * If the value in dst_reg is known nonnegative, then again the
2991		 * unsigned bounts capture the signed bounds.
2992		 * Thus, in all cases it suffices to blow away our signed bounds
2993		 * and rely on inferring new ones from the unsigned bounds and
2994		 * var_off of the result.
2995		 */
2996		dst_reg->smin_value = S64_MIN;
2997		dst_reg->smax_value = S64_MAX;
2998		if (src_known)
2999			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
3000						       umin_val);
3001		else
3002			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
3003		dst_reg->umin_value >>= umax_val;
3004		dst_reg->umax_value >>= umin_val;
3005		/* We may learn something more from the var_off */
3006		__update_reg_bounds(dst_reg);
3007		break;
3008	default:
3009		mark_reg_unknown(env, regs, insn->dst_reg);
3010		break;
3011	}
3012
3013	if (BPF_CLASS(insn->code) != BPF_ALU64) {
3014		/* 32-bit ALU ops are (32,32)->32 */
3015		coerce_reg_to_size(dst_reg, 4);
3016		coerce_reg_to_size(&src_reg, 4);
3017	}
3018
3019	__reg_deduce_bounds(dst_reg);
3020	__reg_bound_offset(dst_reg);
3021	return 0;
3022}
3023
3024/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3025 * and var_off.
3026 */
3027static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3028				   struct bpf_insn *insn)
3029{
3030	struct bpf_verifier_state *vstate = env->cur_state;
3031	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3032	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
3033	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
3034	u8 opcode = BPF_OP(insn->code);
3035
3036	dst_reg = &regs[insn->dst_reg];
3037	src_reg = NULL;
3038	if (dst_reg->type != SCALAR_VALUE)
3039		ptr_reg = dst_reg;
3040	if (BPF_SRC(insn->code) == BPF_X) {
3041		src_reg = &regs[insn->src_reg];
3042		if (src_reg->type != SCALAR_VALUE) {
3043			if (dst_reg->type != SCALAR_VALUE) {
3044				/* Combining two pointers by any ALU op yields
3045				 * an arbitrary scalar. Disallow all math except
3046				 * pointer subtraction
3047				 */
3048				if (opcode == BPF_SUB){
3049					mark_reg_unknown(env, regs, insn->dst_reg);
3050					return 0;
3051				}
3052				verbose(env, "R%d pointer %s pointer prohibited\n",
3053					insn->dst_reg,
3054					bpf_alu_string[opcode >> 4]);
3055				return -EACCES;
3056			} else {
3057				/* scalar += pointer
3058				 * This is legal, but we have to reverse our
3059				 * src/dest handling in computing the range
3060				 */
3061				return adjust_ptr_min_max_vals(env, insn,
3062							       src_reg, dst_reg);
3063			}
3064		} else if (ptr_reg) {
3065			/* pointer += scalar */
3066			return adjust_ptr_min_max_vals(env, insn,
3067						       dst_reg, src_reg);
3068		}
3069	} else {
3070		/* Pretend the src is a reg with a known value, since we only
3071		 * need to be able to read from this state.
3072		 */
3073		off_reg.type = SCALAR_VALUE;
3074		__mark_reg_known(&off_reg, insn->imm);
3075		src_reg = &off_reg;
3076		if (ptr_reg) /* pointer += K */
3077			return adjust_ptr_min_max_vals(env, insn,
3078						       ptr_reg, src_reg);
3079	}
3080
3081	/* Got here implies adding two SCALAR_VALUEs */
3082	if (WARN_ON_ONCE(ptr_reg)) {
3083		print_verifier_state(env, state);
3084		verbose(env, "verifier internal error: unexpected ptr_reg\n");
3085		return -EINVAL;
3086	}
3087	if (WARN_ON(!src_reg)) {
3088		print_verifier_state(env, state);
3089		verbose(env, "verifier internal error: no src_reg\n");
3090		return -EINVAL;
3091	}
3092	return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
3093}
3094
3095/* check validity of 32-bit and 64-bit arithmetic operations */
3096static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
3097{
3098	struct bpf_reg_state *regs = cur_regs(env);
3099	u8 opcode = BPF_OP(insn->code);
3100	int err;
3101
3102	if (opcode == BPF_END || opcode == BPF_NEG) {
3103		if (opcode == BPF_NEG) {
3104			if (BPF_SRC(insn->code) != 0 ||
3105			    insn->src_reg != BPF_REG_0 ||
3106			    insn->off != 0 || insn->imm != 0) {
3107				verbose(env, "BPF_NEG uses reserved fields\n");
3108				return -EINVAL;
3109			}
3110		} else {
3111			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
3112			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
3113			    BPF_CLASS(insn->code) == BPF_ALU64) {
3114				verbose(env, "BPF_END uses reserved fields\n");
3115				return -EINVAL;
3116			}
3117		}
3118
3119		/* check src operand */
3120		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3121		if (err)
3122			return err;
3123
3124		if (is_pointer_value(env, insn->dst_reg)) {
3125			verbose(env, "R%d pointer arithmetic prohibited\n",
3126				insn->dst_reg);
3127			return -EACCES;
3128		}
3129
3130		/* check dest operand */
3131		err = check_reg_arg(env, insn->dst_reg, DST_OP);
3132		if (err)
3133			return err;
3134
3135	} else if (opcode == BPF_MOV) {
3136
3137		if (BPF_SRC(insn->code) == BPF_X) {
3138			if (insn->imm != 0 || insn->off != 0) {
3139				verbose(env, "BPF_MOV uses reserved fields\n");
3140				return -EINVAL;
3141			}
3142
3143			/* check src operand */
3144			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3145			if (err)
3146				return err;
3147		} else {
3148			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3149				verbose(env, "BPF_MOV uses reserved fields\n");
3150				return -EINVAL;
3151			}
3152		}
3153
3154		/* check dest operand */
3155		err = check_reg_arg(env, insn->dst_reg, DST_OP);
3156		if (err)
3157			return err;
3158
3159		if (BPF_SRC(insn->code) == BPF_X) {
3160			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3161				/* case: R1 = R2
3162				 * copy register state to dest reg
3163				 */
3164				regs[insn->dst_reg] = regs[insn->src_reg];
3165				regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
3166			} else {
3167				/* R1 = (u32) R2 */
3168				if (is_pointer_value(env, insn->src_reg)) {
3169					verbose(env,
3170						"R%d partial copy of pointer\n",
3171						insn->src_reg);
3172					return -EACCES;
3173				}
3174				mark_reg_unknown(env, regs, insn->dst_reg);
3175				coerce_reg_to_size(&regs[insn->dst_reg], 4);
3176			}
3177		} else {
3178			/* case: R = imm
3179			 * remember the value we stored into this reg
3180			 */
3181			regs[insn->dst_reg].type = SCALAR_VALUE;
3182			if (BPF_CLASS(insn->code) == BPF_ALU64) {
3183				__mark_reg_known(regs + insn->dst_reg,
3184						 insn->imm);
3185			} else {
3186				__mark_reg_known(regs + insn->dst_reg,
3187						 (u32)insn->imm);
3188			}
3189		}
3190
3191	} else if (opcode > BPF_END) {
3192		verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
3193		return -EINVAL;
3194
3195	} else {	/* all other ALU ops: and, sub, xor, add, ... */
3196
3197		if (BPF_SRC(insn->code) == BPF_X) {
3198			if (insn->imm != 0 || insn->off != 0) {
3199				verbose(env, "BPF_ALU uses reserved fields\n");
3200				return -EINVAL;
3201			}
3202			/* check src1 operand */
3203			err = check_reg_arg(env, insn->src_reg, SRC_OP);
3204			if (err)
3205				return err;
3206		} else {
3207			if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
3208				verbose(env, "BPF_ALU uses reserved fields\n");
3209				return -EINVAL;
3210			}
3211		}
3212
3213		/* check src2 operand */
3214		err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3215		if (err)
3216			return err;
3217
3218		if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
3219		    BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
3220			verbose(env, "div by zero\n");
3221			return -EINVAL;
3222		}
3223
3224		if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
3225			verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
3226			return -EINVAL;
3227		}
3228
3229		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
3230		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
3231			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
3232
3233			if (insn->imm < 0 || insn->imm >= size) {
3234				verbose(env, "invalid shift %d\n", insn->imm);
3235				return -EINVAL;
3236			}
3237		}
3238
3239		/* check dest operand */
3240		err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
3241		if (err)
3242			return err;
3243
3244		return adjust_reg_min_max_vals(env, insn);
3245	}
3246
3247	return 0;
3248}
3249
3250static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
3251				   struct bpf_reg_state *dst_reg,
3252				   enum bpf_reg_type type,
3253				   bool range_right_open)
3254{
3255	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3256	struct bpf_reg_state *regs = state->regs, *reg;
3257	u16 new_range;
3258	int i, j;
3259
3260	if (dst_reg->off < 0 ||
3261	    (dst_reg->off == 0 && range_right_open))
3262		/* This doesn't give us any range */
3263		return;
3264
3265	if (dst_reg->umax_value > MAX_PACKET_OFF ||
3266	    dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
3267		/* Risk of overflow.  For instance, ptr + (1<<63) may be less
3268		 * than pkt_end, but that's because it's also less than pkt.
3269		 */
3270		return;
3271
3272	new_range = dst_reg->off;
3273	if (range_right_open)
3274		new_range--;
3275
3276	/* Examples for register markings:
3277	 *
3278	 * pkt_data in dst register:
3279	 *
3280	 *   r2 = r3;
3281	 *   r2 += 8;
3282	 *   if (r2 > pkt_end) goto <handle exception>
3283	 *   <access okay>
3284	 *
3285	 *   r2 = r3;
3286	 *   r2 += 8;
3287	 *   if (r2 < pkt_end) goto <access okay>
3288	 *   <handle exception>
3289	 *
3290	 *   Where:
3291	 *     r2 == dst_reg, pkt_end == src_reg
3292	 *     r2=pkt(id=n,off=8,r=0)
3293	 *     r3=pkt(id=n,off=0,r=0)
3294	 *
3295	 * pkt_data in src register:
3296	 *
3297	 *   r2 = r3;
3298	 *   r2 += 8;
3299	 *   if (pkt_end >= r2) goto <access okay>
3300	 *   <handle exception>
3301	 *
3302	 *   r2 = r3;
3303	 *   r2 += 8;
3304	 *   if (pkt_end <= r2) goto <handle exception>
3305	 *   <access okay>
3306	 *
3307	 *   Where:
3308	 *     pkt_end == dst_reg, r2 == src_reg
3309	 *     r2=pkt(id=n,off=8,r=0)
3310	 *     r3=pkt(id=n,off=0,r=0)
3311	 *
3312	 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
3313	 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
3314	 * and [r3, r3 + 8-1) respectively is safe to access depending on
3315	 * the check.
3316	 */
3317
3318	/* If our ids match, then we must have the same max_value.  And we
3319	 * don't care about the other reg's fixed offset, since if it's too big
3320	 * the range won't allow anything.
3321	 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
3322	 */
3323	for (i = 0; i < MAX_BPF_REG; i++)
3324		if (regs[i].type == type && regs[i].id == dst_reg->id)
3325			/* keep the maximum range already checked */
3326			regs[i].range = max(regs[i].range, new_range);
3327
3328	for (j = 0; j <= vstate->curframe; j++) {
3329		state = vstate->frame[j];
3330		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3331			if (state->stack[i].slot_type[0] != STACK_SPILL)
3332				continue;
3333			reg = &state->stack[i].spilled_ptr;
3334			if (reg->type == type && reg->id == dst_reg->id)
3335				reg->range = max(reg->range, new_range);
3336		}
3337	}
3338}
3339
3340/* Adjusts the register min/max values in the case that the dst_reg is the
3341 * variable register that we are working on, and src_reg is a constant or we're
3342 * simply doing a BPF_K check.
3343 * In JEQ/JNE cases we also adjust the var_off values.
3344 */
3345static void reg_set_min_max(struct bpf_reg_state *true_reg,
3346			    struct bpf_reg_state *false_reg, u64 val,
3347			    u8 opcode)
3348{
3349	/* If the dst_reg is a pointer, we can't learn anything about its
3350	 * variable offset from the compare (unless src_reg were a pointer into
3351	 * the same object, but we don't bother with that.
3352	 * Since false_reg and true_reg have the same type by construction, we
3353	 * only need to check one of them for pointerness.
3354	 */
3355	if (__is_pointer_value(false, false_reg))
3356		return;
3357
3358	switch (opcode) {
3359	case BPF_JEQ:
3360		/* If this is false then we know nothing Jon Snow, but if it is
3361		 * true then we know for sure.
3362		 */
3363		__mark_reg_known(true_reg, val);
3364		break;
3365	case BPF_JNE:
3366		/* If this is true we know nothing Jon Snow, but if it is false
3367		 * we know the value for sure;
3368		 */
3369		__mark_reg_known(false_reg, val);
3370		break;
3371	case BPF_JGT:
3372		false_reg->umax_value = min(false_reg->umax_value, val);
3373		true_reg->umin_value = max(true_reg->umin_value, val + 1);
3374		break;
3375	case BPF_JSGT:
3376		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3377		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3378		break;
3379	case BPF_JLT:
3380		false_reg->umin_value = max(false_reg->umin_value, val);
3381		true_reg->umax_value = min(true_reg->umax_value, val - 1);
3382		break;
3383	case BPF_JSLT:
3384		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3385		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3386		break;
3387	case BPF_JGE:
3388		false_reg->umax_value = min(false_reg->umax_value, val - 1);
3389		true_reg->umin_value = max(true_reg->umin_value, val);
3390		break;
3391	case BPF_JSGE:
3392		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3393		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3394		break;
3395	case BPF_JLE:
3396		false_reg->umin_value = max(false_reg->umin_value, val + 1);
3397		true_reg->umax_value = min(true_reg->umax_value, val);
3398		break;
3399	case BPF_JSLE:
3400		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3401		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3402		break;
3403	default:
3404		break;
3405	}
3406
3407	__reg_deduce_bounds(false_reg);
3408	__reg_deduce_bounds(true_reg);
3409	/* We might have learned some bits from the bounds. */
3410	__reg_bound_offset(false_reg);
3411	__reg_bound_offset(true_reg);
3412	/* Intersecting with the old var_off might have improved our bounds
3413	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3414	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3415	 */
3416	__update_reg_bounds(false_reg);
3417	__update_reg_bounds(true_reg);
3418}
3419
3420/* Same as above, but for the case that dst_reg holds a constant and src_reg is
3421 * the variable reg.
3422 */
3423static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3424				struct bpf_reg_state *false_reg, u64 val,
3425				u8 opcode)
3426{
3427	if (__is_pointer_value(false, false_reg))
3428		return;
3429
3430	switch (opcode) {
3431	case BPF_JEQ:
3432		/* If this is false then we know nothing Jon Snow, but if it is
3433		 * true then we know for sure.
3434		 */
3435		__mark_reg_known(true_reg, val);
3436		break;
3437	case BPF_JNE:
3438		/* If this is true we know nothing Jon Snow, but if it is false
3439		 * we know the value for sure;
3440		 */
3441		__mark_reg_known(false_reg, val);
3442		break;
3443	case BPF_JGT:
3444		true_reg->umax_value = min(true_reg->umax_value, val - 1);
3445		false_reg->umin_value = max(false_reg->umin_value, val);
3446		break;
3447	case BPF_JSGT:
3448		true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1);
3449		false_reg->smin_value = max_t(s64, false_reg->smin_value, val);
3450		break;
3451	case BPF_JLT:
3452		true_reg->umin_value = max(true_reg->umin_value, val + 1);
3453		false_reg->umax_value = min(false_reg->umax_value, val);
3454		break;
3455	case BPF_JSLT:
3456		true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1);
3457		false_reg->smax_value = min_t(s64, false_reg->smax_value, val);
3458		break;
3459	case BPF_JGE:
3460		true_reg->umax_value = min(true_reg->umax_value, val);
3461		false_reg->umin_value = max(false_reg->umin_value, val + 1);
3462		break;
3463	case BPF_JSGE:
3464		true_reg->smax_value = min_t(s64, true_reg->smax_value, val);
3465		false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1);
3466		break;
3467	case BPF_JLE:
3468		true_reg->umin_value = max(true_reg->umin_value, val);
3469		false_reg->umax_value = min(false_reg->umax_value, val - 1);
3470		break;
3471	case BPF_JSLE:
3472		true_reg->smin_value = max_t(s64, true_reg->smin_value, val);
3473		false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1);
3474		break;
3475	default:
3476		break;
3477	}
3478
3479	__reg_deduce_bounds(false_reg);
3480	__reg_deduce_bounds(true_reg);
3481	/* We might have learned some bits from the bounds. */
3482	__reg_bound_offset(false_reg);
3483	__reg_bound_offset(true_reg);
3484	/* Intersecting with the old var_off might have improved our bounds
3485	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3486	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3487	 */
3488	__update_reg_bounds(false_reg);
3489	__update_reg_bounds(true_reg);
3490}
3491
3492/* Regs are known to be equal, so intersect their min/max/var_off */
3493static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
3494				  struct bpf_reg_state *dst_reg)
3495{
3496	src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
3497							dst_reg->umin_value);
3498	src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
3499							dst_reg->umax_value);
3500	src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
3501							dst_reg->smin_value);
3502	src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
3503							dst_reg->smax_value);
3504	src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
3505							     dst_reg->var_off);
3506	/* We might have learned new bounds from the var_off. */
3507	__update_reg_bounds(src_reg);
3508	__update_reg_bounds(dst_reg);
3509	/* We might have learned something about the sign bit. */
3510	__reg_deduce_bounds(src_reg);
3511	__reg_deduce_bounds(dst_reg);
3512	/* We might have learned some bits from the bounds. */
3513	__reg_bound_offset(src_reg);
3514	__reg_bound_offset(dst_reg);
3515	/* Intersecting with the old var_off might have improved our bounds
3516	 * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3517	 * then new var_off is (0; 0x7f...fc) which improves our umax.
3518	 */
3519	__update_reg_bounds(src_reg);
3520	__update_reg_bounds(dst_reg);
3521}
3522
3523static void reg_combine_min_max(struct bpf_reg_state *true_src,
3524				struct bpf_reg_state *true_dst,
3525				struct bpf_reg_state *false_src,
3526				struct bpf_reg_state *false_dst,
3527				u8 opcode)
3528{
3529	switch (opcode) {
3530	case BPF_JEQ:
3531		__reg_combine_min_max(true_src, true_dst);
3532		break;
3533	case BPF_JNE:
3534		__reg_combine_min_max(false_src, false_dst);
3535		break;
3536	}
3537}
3538
3539static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
3540			 bool is_null)
3541{
3542	struct bpf_reg_state *reg = &regs[regno];
3543
3544	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
3545		/* Old offset (both fixed and variable parts) should
3546		 * have been known-zero, because we don't allow pointer
3547		 * arithmetic on pointers that might be NULL.
3548		 */
3549		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
3550				 !tnum_equals_const(reg->var_off, 0) ||
3551				 reg->off)) {
3552			__mark_reg_known_zero(reg);
3553			reg->off = 0;
3554		}
3555		if (is_null) {
3556			reg->type = SCALAR_VALUE;
3557		} else if (reg->map_ptr->inner_map_meta) {
3558			reg->type = CONST_PTR_TO_MAP;
3559			reg->map_ptr = reg->map_ptr->inner_map_meta;
3560		} else {
3561			reg->type = PTR_TO_MAP_VALUE;
3562		}
3563		/* We don't need id from this point onwards anymore, thus we
3564		 * should better reset it, so that state pruning has chances
3565		 * to take effect.
3566		 */
3567		reg->id = 0;
3568	}
3569}
3570
3571/* The logic is similar to find_good_pkt_pointers(), both could eventually
3572 * be folded together at some point.
3573 */
3574static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
3575			  bool is_null)
3576{
3577	struct bpf_func_state *state = vstate->frame[vstate->curframe];
3578	struct bpf_reg_state *regs = state->regs;
3579	u32 id = regs[regno].id;
3580	int i, j;
3581
3582	for (i = 0; i < MAX_BPF_REG; i++)
3583		mark_map_reg(regs, i, id, is_null);
3584
3585	for (j = 0; j <= vstate->curframe; j++) {
3586		state = vstate->frame[j];
3587		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
3588			if (state->stack[i].slot_type[0] != STACK_SPILL)
3589				continue;
3590			mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
3591		}
3592	}
3593}
3594
3595static bool try_match_pkt_pointers(const struct bpf_insn *insn,
3596				   struct bpf_reg_state *dst_reg,
3597				   struct bpf_reg_state *src_reg,
3598				   struct bpf_verifier_state *this_branch,
3599				   struct bpf_verifier_state *other_branch)
3600{
3601	if (BPF_SRC(insn->code) != BPF_X)
3602		return false;
3603
3604	switch (BPF_OP(insn->code)) {
3605	case BPF_JGT:
3606		if ((dst_reg->type == PTR_TO_PACKET &&
3607		     src_reg->type == PTR_TO_PACKET_END) ||
3608		    (dst_reg->type == PTR_TO_PACKET_META &&
3609		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3610			/* pkt_data' > pkt_end, pkt_meta' > pkt_data */
3611			find_good_pkt_pointers(this_branch, dst_reg,
3612					       dst_reg->type, false);
3613		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3614			    src_reg->type == PTR_TO_PACKET) ||
3615			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3616			    src_reg->type == PTR_TO_PACKET_META)) {
3617			/* pkt_end > pkt_data', pkt_data > pkt_meta' */
3618			find_good_pkt_pointers(other_branch, src_reg,
3619					       src_reg->type, true);
3620		} else {
3621			return false;
3622		}
3623		break;
3624	case BPF_JLT:
3625		if ((dst_reg->type == PTR_TO_PACKET &&
3626		     src_reg->type == PTR_TO_PACKET_END) ||
3627		    (dst_reg->type == PTR_TO_PACKET_META &&
3628		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3629			/* pkt_data' < pkt_end, pkt_meta' < pkt_data */
3630			find_good_pkt_pointers(other_branch, dst_reg,
3631					       dst_reg->type, true);
3632		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3633			    src_reg->type == PTR_TO_PACKET) ||
3634			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3635			    src_reg->type == PTR_TO_PACKET_META)) {
3636			/* pkt_end < pkt_data', pkt_data > pkt_meta' */
3637			find_good_pkt_pointers(this_branch, src_reg,
3638					       src_reg->type, false);
3639		} else {
3640			return false;
3641		}
3642		break;
3643	case BPF_JGE:
3644		if ((dst_reg->type == PTR_TO_PACKET &&
3645		     src_reg->type == PTR_TO_PACKET_END) ||
3646		    (dst_reg->type == PTR_TO_PACKET_META &&
3647		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3648			/* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
3649			find_good_pkt_pointers(this_branch, dst_reg,
3650					       dst_reg->type, true);
3651		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3652			    src_reg->type == PTR_TO_PACKET) ||
3653			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3654			    src_reg->type == PTR_TO_PACKET_META)) {
3655			/* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
3656			find_good_pkt_pointers(other_branch, src_reg,
3657					       src_reg->type, false);
3658		} else {
3659			return false;
3660		}
3661		break;
3662	case BPF_JLE:
3663		if ((dst_reg->type == PTR_TO_PACKET &&
3664		     src_reg->type == PTR_TO_PACKET_END) ||
3665		    (dst_reg->type == PTR_TO_PACKET_META &&
3666		     reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
3667			/* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
3668			find_good_pkt_pointers(other_branch, dst_reg,
3669					       dst_reg->type, false);
3670		} else if ((dst_reg->type == PTR_TO_PACKET_END &&
3671			    src_reg->type == PTR_TO_PACKET) ||
3672			   (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
3673			    src_reg->type == PTR_TO_PACKET_META)) {
3674			/* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
3675			find_good_pkt_pointers(this_branch, src_reg,
3676					       src_reg->type, true);
3677		} else {
3678			return false;
3679		}
3680		break;
3681	default:
3682		return false;
3683	}
3684
3685	return true;
3686}
3687
3688static int check_cond_jmp_op(struct bpf_verifier_env *env,
3689			     struct bpf_insn *insn, int *insn_idx)
3690{
3691	struct bpf_verifier_state *this_branch = env->cur_state;
3692	struct bpf_verifier_state *other_branch;
3693	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
3694	struct bpf_reg_state *dst_reg, *other_branch_regs;
3695	u8 opcode = BPF_OP(insn->code);
3696	int err;
3697
3698	if (opcode > BPF_JSLE) {
3699		verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
3700		return -EINVAL;
3701	}
3702
3703	if (BPF_SRC(insn->code) == BPF_X) {
3704		if (insn->imm != 0) {
3705			verbose(env, "BPF_JMP uses reserved fields\n");
3706			return -EINVAL;
3707		}
3708
3709		/* check src1 operand */
3710		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3711		if (err)
3712			return err;
3713
3714		if (is_pointer_value(env, insn->src_reg)) {
3715			verbose(env, "R%d pointer comparison prohibited\n",
3716				insn->src_reg);
3717			return -EACCES;
3718		}
3719	} else {
3720		if (insn->src_reg != BPF_REG_0) {
3721			verbose(env, "BPF_JMP uses reserved fields\n");
3722			return -EINVAL;
3723		}
3724	}
3725
3726	/* check src2 operand */
3727	err = check_reg_arg(env, insn->dst_reg, SRC_OP);
3728	if (err)
3729		return err;
3730
3731	dst_reg = &regs[insn->dst_reg];
3732
3733	/* detect if R == 0 where R was initialized to zero earlier */
3734	if (BPF_SRC(insn->code) == BPF_K &&
3735	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3736	    dst_reg->type == SCALAR_VALUE &&
3737	    tnum_is_const(dst_reg->var_off)) {
3738		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
3739		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
3740			/* if (imm == imm) goto pc+off;
3741			 * only follow the goto, ignore fall-through
3742			 */
3743			*insn_idx += insn->off;
3744			return 0;
3745		} else {
3746			/* if (imm != imm) goto pc+off;
3747			 * only follow fall-through branch, since
3748			 * that's where the program will go
3749			 */
3750			return 0;
3751		}
3752	}
3753
3754	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
3755	if (!other_branch)
3756		return -EFAULT;
3757	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
3758
3759	/* detect if we are comparing against a constant value so we can adjust
3760	 * our min/max values for our dst register.
3761	 * this is only legit if both are scalars (or pointers to the same
3762	 * object, I suppose, but we don't support that right now), because
3763	 * otherwise the different base pointers mean the offsets aren't
3764	 * comparable.
3765	 */
3766	if (BPF_SRC(insn->code) == BPF_X) {
3767		if (dst_reg->type == SCALAR_VALUE &&
3768		    regs[insn->src_reg].type == SCALAR_VALUE) {
3769			if (tnum_is_const(regs[insn->src_reg].var_off))
3770				reg_set_min_max(&other_branch_regs[insn->dst_reg],
3771						dst_reg, regs[insn->src_reg].var_off.value,
3772						opcode);
3773			else if (tnum_is_const(dst_reg->var_off))
3774				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
3775						    &regs[insn->src_reg],
3776						    dst_reg->var_off.value, opcode);
3777			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
3778				/* Comparing for equality, we can combine knowledge */
3779				reg_combine_min_max(&other_branch_regs[insn->src_reg],
3780						    &other_branch_regs[insn->dst_reg],
3781						    &regs[insn->src_reg],
3782						    &regs[insn->dst_reg], opcode);
3783		}
3784	} else if (dst_reg->type == SCALAR_VALUE) {
3785		reg_set_min_max(&other_branch_regs[insn->dst_reg],
3786					dst_reg, insn->imm, opcode);
3787	}
3788
3789	/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
3790	if (BPF_SRC(insn->code) == BPF_K &&
3791	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
3792	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
3793		/* Mark all identical map registers in each branch as either
3794		 * safe or unknown depending R == 0 or R != 0 conditional.
3795		 */
3796		mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
3797		mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
3798	} else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
3799					   this_branch, other_branch) &&
3800		   is_pointer_value(env, insn->dst_reg)) {
3801		verbose(env, "R%d pointer comparison prohibited\n",
3802			insn->dst_reg);
3803		return -EACCES;
3804	}
3805	if (env->log.level)
3806		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
3807	return 0;
3808}
3809
3810/* return the map pointer stored inside BPF_LD_IMM64 instruction */
3811static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
3812{
3813	u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
3814
3815	return (struct bpf_map *) (unsigned long) imm64;
3816}
3817
3818/* verify BPF_LD_IMM64 instruction */
3819static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
3820{
3821	struct bpf_reg_state *regs = cur_regs(env);
3822	int err;
3823
3824	if (BPF_SIZE(insn->code) != BPF_DW) {
3825		verbose(env, "invalid BPF_LD_IMM insn\n");
3826		return -EINVAL;
3827	}
3828	if (insn->off != 0) {
3829		verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
3830		return -EINVAL;
3831	}
3832
3833	err = check_reg_arg(env, insn->dst_reg, DST_OP);
3834	if (err)
3835		return err;
3836
3837	if (insn->src_reg == 0) {
3838		u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
3839
3840		regs[insn->dst_reg].type = SCALAR_VALUE;
3841		__mark_reg_known(&regs[insn->dst_reg], imm);
3842		return 0;
3843	}
3844
3845	/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
3846	BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
3847
3848	regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
3849	regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
3850	return 0;
3851}
3852
3853static bool may_access_skb(enum bpf_prog_type type)
3854{
3855	switch (type) {
3856	case BPF_PROG_TYPE_SOCKET_FILTER:
3857	case BPF_PROG_TYPE_SCHED_CLS:
3858	case BPF_PROG_TYPE_SCHED_ACT:
3859		return true;
3860	default:
3861		return false;
3862	}
3863}
3864
3865/* verify safety of LD_ABS|LD_IND instructions:
3866 * - they can only appear in the programs where ctx == skb
3867 * - since they are wrappers of function calls, they scratch R1-R5 registers,
3868 *   preserve R6-R9, and store return value into R0
3869 *
3870 * Implicit input:
3871 *   ctx == skb == R6 == CTX
3872 *
3873 * Explicit input:
3874 *   SRC == any register
3875 *   IMM == 32-bit immediate
3876 *
3877 * Output:
3878 *   R0 - 8/16/32-bit skb data converted to cpu endianness
3879 */
3880static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3881{
3882	struct bpf_reg_state *regs = cur_regs(env);
3883	u8 mode = BPF_MODE(insn->code);
3884	int i, err;
3885
3886	if (!may_access_skb(env->prog->type)) {
3887		verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
3888		return -EINVAL;
3889	}
3890
3891	if (env->subprog_cnt) {
3892		/* when program has LD_ABS insn JITs and interpreter assume
3893		 * that r1 == ctx == skb which is not the case for callees
3894		 * that can have arbitrary arguments. It's problematic
3895		 * for main prog as well since JITs would need to analyze
3896		 * all functions in order to make proper register save/restore
3897		 * decisions in the main prog. Hence disallow LD_ABS with calls
3898		 */
3899		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
3900		return -EINVAL;
3901	}
3902
3903	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
3904	    BPF_SIZE(insn->code) == BPF_DW ||
3905	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
3906		verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
3907		return -EINVAL;
3908	}
3909
3910	/* check whether implicit source operand (register R6) is readable */
3911	err = check_reg_arg(env, BPF_REG_6, SRC_OP);
3912	if (err)
3913		return err;
3914
3915	if (regs[BPF_REG_6].type != PTR_TO_CTX) {
3916		verbose(env,
3917			"at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
3918		return -EINVAL;
3919	}
3920
3921	if (mode == BPF_IND) {
3922		/* check explicit source operand */
3923		err = check_reg_arg(env, insn->src_reg, SRC_OP);
3924		if (err)
3925			return err;
3926	}
3927
3928	/* reset caller saved regs to unreadable */
3929	for (i = 0; i < CALLER_SAVED_REGS; i++) {
3930		mark_reg_not_init(env, regs, caller_saved[i]);
3931		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
3932	}
3933
3934	/* mark destination R0 register as readable, since it contains
3935	 * the value fetched from the packet.
3936	 * Already marked as written above.
3937	 */
3938	mark_reg_unknown(env, regs, BPF_REG_0);
3939	return 0;
3940}
3941
3942static int check_return_code(struct bpf_verifier_env *env)
3943{
3944	struct bpf_reg_state *reg;
3945	struct tnum range = tnum_range(0, 1);
3946
3947	switch (env->prog->type) {
3948	case BPF_PROG_TYPE_CGROUP_SKB:
3949	case BPF_PROG_TYPE_CGROUP_SOCK:
3950	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3951	case BPF_PROG_TYPE_SOCK_OPS:
3952	case BPF_PROG_TYPE_CGROUP_DEVICE:
3953		break;
3954	default:
3955		return 0;
3956	}
3957
3958	reg = cur_regs(env) + BPF_REG_0;
3959	if (reg->type != SCALAR_VALUE) {
3960		verbose(env, "At program exit the register R0 is not a known value (%s)\n",
3961			reg_type_str[reg->type]);
3962		return -EINVAL;
3963	}
3964
3965	if (!tnum_in(range, reg->var_off)) {
3966		verbose(env, "At program exit the register R0 ");
3967		if (!tnum_is_unknown(reg->var_off)) {
3968			char tn_buf[48];
3969
3970			tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3971			verbose(env, "has value %s", tn_buf);
3972		} else {
3973			verbose(env, "has unknown scalar value");
3974		}
3975		verbose(env, " should have been 0 or 1\n");
3976		return -EINVAL;
3977	}
3978	return 0;
3979}
3980
3981/* non-recursive DFS pseudo code
3982 * 1  procedure DFS-iterative(G,v):
3983 * 2      label v as discovered
3984 * 3      let S be a stack
3985 * 4      S.push(v)
3986 * 5      while S is not empty
3987 * 6            t <- S.pop()
3988 * 7            if t is what we're looking for:
3989 * 8                return t
3990 * 9            for all edges e in G.adjacentEdges(t) do
3991 * 10               if edge e is already labelled
3992 * 11                   continue with the next edge
3993 * 12               w <- G.adjacentVertex(t,e)
3994 * 13               if vertex w is not discovered and not explored
3995 * 14                   label e as tree-edge
3996 * 15                   label w as discovered
3997 * 16                   S.push(w)
3998 * 17                   continue at 5
3999 * 18               else if vertex w is discovered
4000 * 19                   label e as back-edge
4001 * 20               else
4002 * 21                   // vertex w is explored
4003 * 22                   label e as forward- or cross-edge
4004 * 23           label t as explored
4005 * 24           S.pop()
4006 *
4007 * convention:
4008 * 0x10 - discovered
4009 * 0x11 - discovered and fall-through edge labelled
4010 * 0x12 - discovered and fall-through and branch edges labelled
4011 * 0x20 - explored
4012 */
4013
4014enum {
4015	DISCOVERED = 0x10,
4016	EXPLORED = 0x20,
4017	FALLTHROUGH = 1,
4018	BRANCH = 2,
4019};
4020
4021#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4022
4023static int *insn_stack;	/* stack of insns to process */
4024static int cur_stack;	/* current stack index */
4025static int *insn_state;
4026
4027/* t, w, e - match pseudo-code above:
4028 * t - index of current instruction
4029 * w - next instruction
4030 * e - edge
4031 */
4032static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
4033{
4034	if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
4035		return 0;
4036
4037	if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
4038		return 0;
4039
4040	if (w < 0 || w >= env->prog->len) {
4041		verbose(env, "jump out of range from insn %d to %d\n", t, w);
4042		return -EINVAL;
4043	}
4044
4045	if (e == BRANCH)
4046		/* mark branch target for state pruning */
4047		env->explored_states[w] = STATE_LIST_MARK;
4048
4049	if (insn_state[w] == 0) {
4050		/* tree-edge */
4051		insn_state[t] = DISCOVERED | e;
4052		insn_state[w] = DISCOVERED;
4053		if (cur_stack >= env->prog->len)
4054			return -E2BIG;
4055		insn_stack[cur_stack++] = w;
4056		return 1;
4057	} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
4058		verbose(env, "back-edge from insn %d to %d\n", t, w);
4059		return -EINVAL;
4060	} else if (insn_state[w] == EXPLORED) {
4061		/* forward- or cross-edge */
4062		insn_state[t] = DISCOVERED | e;
4063	} else {
4064		verbose(env, "insn state internal bug\n");
4065		return -EFAULT;
4066	}
4067	return 0;
4068}
4069
4070/* non-recursive depth-first-search to detect loops in BPF program
4071 * loop == back-edge in directed graph
4072 */
4073static int check_cfg(struct bpf_verifier_env *env)
4074{
4075	struct bpf_insn *insns = env->prog->insnsi;
4076	int insn_cnt = env->prog->len;
4077	int ret = 0;
4078	int i, t;
4079
4080	ret = check_subprogs(env);
4081	if (ret < 0)
4082		return ret;
4083
4084	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4085	if (!insn_state)
4086		return -ENOMEM;
4087
4088	insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
4089	if (!insn_stack) {
4090		kfree(insn_state);
4091		return -ENOMEM;
4092	}
4093
4094	insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
4095	insn_stack[0] = 0; /* 0 is the first instruction */
4096	cur_stack = 1;
4097
4098peek_stack:
4099	if (cur_stack == 0)
4100		goto check_state;
4101	t = insn_stack[cur_stack - 1];
4102
4103	if (BPF_CLASS(insns[t].code) == BPF_JMP) {
4104		u8 opcode = BPF_OP(insns[t].code);
4105
4106		if (opcode == BPF_EXIT) {
4107			goto mark_explored;
4108		} else if (opcode == BPF_CALL) {
4109			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4110			if (ret == 1)
4111				goto peek_stack;
4112			else if (ret < 0)
4113				goto err_free;
4114			if (t + 1 < insn_cnt)
4115				env->explored_states[t + 1] = STATE_LIST_MARK;
4116			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
4117				env->explored_states[t] = STATE_LIST_MARK;
4118				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
4119				if (ret == 1)
4120					goto peek_stack;
4121				else if (ret < 0)
4122					goto err_free;
4123			}
4124		} else if (opcode == BPF_JA) {
4125			if (BPF_SRC(insns[t].code) != BPF_K) {
4126				ret = -EINVAL;
4127				goto err_free;
4128			}
4129			/* unconditional jump with single edge */
4130			ret = push_insn(t, t + insns[t].off + 1,
4131					FALLTHROUGH, env);
4132			if (ret == 1)
4133				goto peek_stack;
4134			else if (ret < 0)
4135				goto err_free;
4136			/* tell verifier to check for equivalent states
4137			 * after every call and jump
4138			 */
4139			if (t + 1 < insn_cnt)
4140				env->explored_states[t + 1] = STATE_LIST_MARK;
4141		} else {
4142			/* conditional jump with two edges */
4143			env->explored_states[t] = STATE_LIST_MARK;
4144			ret = push_insn(t, t + 1, FALLTHROUGH, env);
4145			if (ret == 1)
4146				goto peek_stack;
4147			else if (ret < 0)
4148				goto err_free;
4149
4150			ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
4151			if (ret == 1)
4152				goto peek_stack;
4153			else if (ret < 0)
4154				goto err_free;
4155		}
4156	} else {
4157		/* all other non-branch instructions with single
4158		 * fall-through edge
4159		 */
4160		ret = push_insn(t, t + 1, FALLTHROUGH, env);
4161		if (ret == 1)
4162			goto peek_stack;
4163		else if (ret < 0)
4164			goto err_free;
4165	}
4166
4167mark_explored:
4168	insn_state[t] = EXPLORED;
4169	if (cur_stack-- <= 0) {
4170		verbose(env, "pop stack internal bug\n");
4171		ret = -EFAULT;
4172		goto err_free;
4173	}
4174	goto peek_stack;
4175
4176check_state:
4177	for (i = 0; i < insn_cnt; i++) {
4178		if (insn_state[i] != EXPLORED) {
4179			verbose(env, "unreachable insn %d\n", i);
4180			ret = -EINVAL;
4181			goto err_free;
4182		}
4183	}
4184	ret = 0; /* cfg looks good */
4185
4186err_free:
4187	kfree(insn_state);
4188	kfree(insn_stack);
4189	return ret;
4190}
4191
4192/* check %cur's range satisfies %old's */
4193static bool range_within(struct bpf_reg_state *old,
4194			 struct bpf_reg_state *cur)
4195{
4196	return old->umin_value <= cur->umin_value &&
4197	       old->umax_value >= cur->umax_value &&
4198	       old->smin_value <= cur->smin_value &&
4199	       old->smax_value >= cur->smax_value;
4200}
4201
4202/* Maximum number of register states that can exist at once */
4203#define ID_MAP_SIZE	(MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
4204struct idpair {
4205	u32 old;
4206	u32 cur;
4207};
4208
4209/* If in the old state two registers had the same id, then they need to have
4210 * the same id in the new state as well.  But that id could be different from
4211 * the old state, so we need to track the mapping from old to new ids.
4212 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
4213 * regs with old id 5 must also have new id 9 for the new state to be safe.  But
4214 * regs with a different old id could still have new id 9, we don't care about
4215 * that.
4216 * So we look through our idmap to see if this old id has been seen before.  If
4217 * so, we require the new id to match; otherwise, we add the id pair to the map.
4218 */
4219static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
4220{
4221	unsigned int i;
4222
4223	for (i = 0; i < ID_MAP_SIZE; i++) {
4224		if (!idmap[i].old) {
4225			/* Reached an empty slot; haven't seen this id before */
4226			idmap[i].old = old_id;
4227			idmap[i].cur = cur_id;
4228			return true;
4229		}
4230		if (idmap[i].old == old_id)
4231			return idmap[i].cur == cur_id;
4232	}
4233	/* We ran out of idmap slots, which should be impossible */
4234	WARN_ON_ONCE(1);
4235	return false;
4236}
4237
4238/* Returns true if (rold safe implies rcur safe) */
4239static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
4240		    struct idpair *idmap)
4241{
4242	bool equal;
4243
4244	if (!(rold->live & REG_LIVE_READ))
4245		/* explored state didn't use this */
4246		return true;
4247
4248	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
4249
4250	if (rold->type == PTR_TO_STACK)
4251		/* two stack pointers are equal only if they're pointing to
4252		 * the same stack frame, since fp-8 in foo != fp-8 in bar
4253		 */
4254		return equal && rold->frameno == rcur->frameno;
4255
4256	if (equal)
4257		return true;
4258
4259	if (rold->type == NOT_INIT)
4260		/* explored state can't have used this */
4261		return true;
4262	if (rcur->type == NOT_INIT)
4263		return false;
4264	switch (rold->type) {
4265	case SCALAR_VALUE:
4266		if (rcur->type == SCALAR_VALUE) {
4267			/* new val must satisfy old val knowledge */
4268			return range_within(rold, rcur) &&
4269			       tnum_in(rold->var_off, rcur->var_off);
4270		} else {
4271			/* We're trying to use a pointer in place of a scalar.
4272			 * Even if the scalar was unbounded, this could lead to
4273			 * pointer leaks because scalars are allowed to leak
4274			 * while pointers are not. We could make this safe in
4275			 * special cases if root is calling us, but it's
4276			 * probably not worth the hassle.
4277			 */
4278			return false;
4279		}
4280	case PTR_TO_MAP_VALUE:
4281		/* If the new min/max/var_off satisfy the old ones and
4282		 * everything else matches, we are OK.
4283		 * We don't care about the 'id' value, because nothing
4284		 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
4285		 */
4286		return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
4287		       range_within(rold, rcur) &&
4288		       tnum_in(rold->var_off, rcur->var_off);
4289	case PTR_TO_MAP_VALUE_OR_NULL:
4290		/* a PTR_TO_MAP_VALUE could be safe to use as a
4291		 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
4292		 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
4293		 * checked, doing so could have affected others with the same
4294		 * id, and we can't check for that because we lost the id when
4295		 * we converted to a PTR_TO_MAP_VALUE.
4296		 */
4297		if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
4298			return false;
4299		if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
4300			return false;
4301		/* Check our ids match any regs they're supposed to */
4302		return check_ids(rold->id, rcur->id, idmap);
4303	case PTR_TO_PACKET_META:
4304	case PTR_TO_PACKET:
4305		if (rcur->type != rold->type)
4306			return false;
4307		/* We must have at least as much range as the old ptr
4308		 * did, so that any accesses which were safe before are
4309		 * still safe.  This is true even if old range < old off,
4310		 * since someone could have accessed through (ptr - k), or
4311		 * even done ptr -= k in a register, to get a safe access.
4312		 */
4313		if (rold->range > rcur->range)
4314			return false;
4315		/* If the offsets don't match, we can't trust our alignment;
4316		 * nor can we be sure that we won't fall out of range.
4317		 */
4318		if (rold->off != rcur->off)
4319			return false;
4320		/* id relations must be preserved */
4321		if (rold->id && !check_ids(rold->id, rcur->id, idmap))
4322			return false;
4323		/* new val must satisfy old val knowledge */
4324		return range_within(rold, rcur) &&
4325		       tnum_in(rold->var_off, rcur->var_off);
4326	case PTR_TO_CTX:
4327	case CONST_PTR_TO_MAP:
4328	case PTR_TO_PACKET_END:
4329		/* Only valid matches are exact, which memcmp() above
4330		 * would have accepted
4331		 */
4332	default:
4333		/* Don't know what's going on, just say it's not safe */
4334		return false;
4335	}
4336
4337	/* Shouldn't get here; if we do, say it's not safe */
4338	WARN_ON_ONCE(1);
4339	return false;
4340}
4341
4342static bool stacksafe(struct bpf_func_state *old,
4343		      struct bpf_func_state *cur,
4344		      struct idpair *idmap)
4345{
4346	int i, spi;
4347
4348	/* if explored stack has more populated slots than current stack
4349	 * such stacks are not equivalent
4350	 */
4351	if (old->allocated_stack > cur->allocated_stack)
4352		return false;
4353
4354	/* walk slots of the explored stack and ignore any additional
4355	 * slots in the current stack, since explored(safe) state
4356	 * didn't use them
4357	 */
4358	for (i = 0; i < old->allocated_stack; i++) {
4359		spi = i / BPF_REG_SIZE;
4360
4361		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
4362			/* explored state didn't use this */
4363			continue;
4364
4365		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
4366			continue;
4367		/* if old state was safe with misc data in the stack
4368		 * it will be safe with zero-initialized stack.
4369		 * The opposite is not true
4370		 */
4371		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
4372		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
4373			continue;
4374		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
4375		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
4376			/* Ex: old explored (safe) state has STACK_SPILL in
4377			 * this stack slot, but current has has STACK_MISC ->
4378			 * this verifier states are not equivalent,
4379			 * return false to continue verification of this path
4380			 */
4381			return false;
4382		if (i % BPF_REG_SIZE)
4383			continue;
4384		if (old->stack[spi].slot_type[0] != STACK_SPILL)
4385			continue;
4386		if (!regsafe(&old->stack[spi].spilled_ptr,
4387			     &cur->stack[spi].spilled_ptr,
4388			     idmap))
4389			/* when explored and current stack slot are both storing
4390			 * spilled registers, check that stored pointers types
4391			 * are the same as well.
4392			 * Ex: explored safe path could have stored
4393			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
4394			 * but current path has stored:
4395			 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
4396			 * such verifier states are not equivalent.
4397			 * return false to continue verification of this path
4398			 */
4399			return false;
4400	}
4401	return true;
4402}
4403
4404/* compare two verifier states
4405 *
4406 * all states stored in state_list are known to be valid, since
4407 * verifier reached 'bpf_exit' instruction through them
4408 *
4409 * this function is called when verifier exploring different branches of
4410 * execution popped from the state stack. If it sees an old state that has
4411 * more strict register state and more strict stack state then this execution
4412 * branch doesn't need to be explored further, since verifier already
4413 * concluded that more strict state leads to valid finish.
4414 *
4415 * Therefore two states are equivalent if register state is more conservative
4416 * and explored stack state is more conservative than the current one.
4417 * Example:
4418 *       explored                   current
4419 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
4420 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
4421 *
4422 * In other words if current stack state (one being explored) has more
4423 * valid slots than old one that already passed validation, it means
4424 * the verifier can stop exploring and conclude that current state is valid too
4425 *
4426 * Similarly with registers. If explored state has register type as invalid
4427 * whereas register type in current state is meaningful, it means that
4428 * the current state will reach 'bpf_exit' instruction safely
4429 */
4430static bool func_states_equal(struct bpf_func_state *old,
4431			      struct bpf_func_state *cur)
4432{
4433	struct idpair *idmap;
4434	bool ret = false;
4435	int i;
4436
4437	idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
4438	/* If we failed to allocate the idmap, just say it's not safe */
4439	if (!idmap)
4440		return false;
4441
4442	for (i = 0; i < MAX_BPF_REG; i++) {
4443		if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
4444			goto out_free;
4445	}
4446
4447	if (!stacksafe(old, cur, idmap))
4448		goto out_free;
4449	ret = true;
4450out_free:
4451	kfree(idmap);
4452	return ret;
4453}
4454
4455static bool states_equal(struct bpf_verifier_env *env,
4456			 struct bpf_verifier_state *old,
4457			 struct bpf_verifier_state *cur)
4458{
4459	int i;
4460
4461	if (old->curframe != cur->curframe)
4462		return false;
4463
4464	/* for states to be equal callsites have to be the same
4465	 * and all frame states need to be equivalent
4466	 */
4467	for (i = 0; i <= old->curframe; i++) {
4468		if (old->frame[i]->callsite != cur->frame[i]->callsite)
4469			return false;
4470		if (!func_states_equal(old->frame[i], cur->frame[i]))
4471			return false;
4472	}
4473	return true;
4474}
4475
4476/* A write screens off any subsequent reads; but write marks come from the
4477 * straight-line code between a state and its parent.  When we arrive at an
4478 * equivalent state (jump target or such) we didn't arrive by the straight-line
4479 * code, so read marks in the state must propagate to the parent regardless
4480 * of the state's write marks. That's what 'parent == state->parent' comparison
4481 * in mark_reg_read() and mark_stack_slot_read() is for.
4482 */
4483static int propagate_liveness(struct bpf_verifier_env *env,
4484			      const struct bpf_verifier_state *vstate,
4485			      struct bpf_verifier_state *vparent)
4486{
4487	int i, frame, err = 0;
4488	struct bpf_func_state *state, *parent;
4489
4490	if (vparent->curframe != vstate->curframe) {
4491		WARN(1, "propagate_live: parent frame %d current frame %d\n",
4492		     vparent->curframe, vstate->curframe);
4493		return -EFAULT;
4494	}
4495	/* Propagate read liveness of registers... */
4496	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
4497	/* We don't need to worry about FP liveness because it's read-only */
4498	for (i = 0; i < BPF_REG_FP; i++) {
4499		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
4500			continue;
4501		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
4502			err = mark_reg_read(env, vstate, vparent, i);
4503			if (err)
4504				return err;
4505		}
4506	}
4507
4508	/* ... and stack slots */
4509	for (frame = 0; frame <= vstate->curframe; frame++) {
4510		state = vstate->frame[frame];
4511		parent = vparent->frame[frame];
4512		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
4513			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
4514			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
4515				continue;
4516			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
4517				mark_stack_slot_read(env, vstate, vparent, i, frame);
4518		}
4519	}
4520	return err;
4521}
4522
4523static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
4524{
4525	struct bpf_verifier_state_list *new_sl;
4526	struct bpf_verifier_state_list *sl;
4527	struct bpf_verifier_state *cur = env->cur_state;
4528	int i, j, err;
4529
4530	sl = env->explored_states[insn_idx];
4531	if (!sl)
4532		/* this 'insn_idx' instruction wasn't marked, so we will not
4533		 * be doing state search here
4534		 */
4535		return 0;
4536
4537	while (sl != STATE_LIST_MARK) {
4538		if (states_equal(env, &sl->state, cur)) {
4539			/* reached equivalent register/stack state,
4540			 * prune the search.
4541			 * Registers read by the continuation are read by us.
4542			 * If we have any write marks in env->cur_state, they
4543			 * will prevent corresponding reads in the continuation
4544			 * from reaching our parent (an explored_state).  Our
4545			 * own state will get the read marks recorded, but
4546			 * they'll be immediately forgotten as we're pruning
4547			 * this state and will pop a new one.
4548			 */
4549			err = propagate_liveness(env, &sl->state, cur);
4550			if (err)
4551				return err;
4552			return 1;
4553		}
4554		sl = sl->next;
4555	}
4556
4557	/* there were no equivalent states, remember current one.
4558	 * technically the current state is not proven to be safe yet,
4559	 * but it will either reach outer most bpf_exit (which means it's safe)
4560	 * or it will be rejected. Since there are no loops, we won't be
4561	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
4562	 * again on the way to bpf_exit
4563	 */
4564	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
4565	if (!new_sl)
4566		return -ENOMEM;
4567
4568	/* add new state to the head of linked list */
4569	err = copy_verifier_state(&new_sl->state, cur);
4570	if (err) {
4571		free_verifier_state(&new_sl->state, false);
4572		kfree(new_sl);
4573		return err;
4574	}
4575	new_sl->next = env->explored_states[insn_idx];
4576	env->explored_states[insn_idx] = new_sl;
4577	/* connect new state to parentage chain */
4578	cur->parent = &new_sl->state;
4579	/* clear write marks in current state: the writes we did are not writes
4580	 * our child did, so they don't screen off its reads from us.
4581	 * (There are no read marks in current state, because reads always mark
4582	 * their parent and current state never has children yet.  Only
4583	 * explored_states can get read marks.)
4584	 */
4585	for (i = 0; i < BPF_REG_FP; i++)
4586		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
4587
4588	/* all stack frames are accessible from callee, clear them all */
4589	for (j = 0; j <= cur->curframe; j++) {
4590		struct bpf_func_state *frame = cur->frame[j];
4591
4592		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
4593			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
4594	}
4595	return 0;
4596}
4597
4598static int do_check(struct bpf_verifier_env *env)
4599{
4600	struct bpf_verifier_state *state;
4601	struct bpf_insn *insns = env->prog->insnsi;
4602	struct bpf_reg_state *regs;
4603	int insn_cnt = env->prog->len, i;
4604	int insn_idx, prev_insn_idx = 0;
4605	int insn_processed = 0;
4606	bool do_print_state = false;
4607
4608	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
4609	if (!state)
4610		return -ENOMEM;
4611	state->curframe = 0;
4612	state->parent = NULL;
4613	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
4614	if (!state->frame[0]) {
4615		kfree(state);
4616		return -ENOMEM;
4617	}
4618	env->cur_state = state;
4619	init_func_state(env, state->frame[0],
4620			BPF_MAIN_FUNC /* callsite */,
4621			0 /* frameno */,
4622			0 /* subprogno, zero == main subprog */);
4623	insn_idx = 0;
4624	for (;;) {
4625		struct bpf_insn *insn;
4626		u8 class;
4627		int err;
4628
4629		if (insn_idx >= insn_cnt) {
4630			verbose(env, "invalid insn idx %d insn_cnt %d\n",
4631				insn_idx, insn_cnt);
4632			return -EFAULT;
4633		}
4634
4635		insn = &insns[insn_idx];
4636		class = BPF_CLASS(insn->code);
4637
4638		if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
4639			verbose(env,
4640				"BPF program is too large. Processed %d insn\n",
4641				insn_processed);
4642			return -E2BIG;
4643		}
4644
4645		err = is_state_visited(env, insn_idx);
4646		if (err < 0)
4647			return err;
4648		if (err == 1) {
4649			/* found equivalent state, can prune the search */
4650			if (env->log.level) {
4651				if (do_print_state)
4652					verbose(env, "\nfrom %d to %d: safe\n",
4653						prev_insn_idx, insn_idx);
4654				else
4655					verbose(env, "%d: safe\n", insn_idx);
4656			}
4657			goto process_bpf_exit;
4658		}
4659
4660		if (need_resched())
4661			cond_resched();
4662
4663		if (env->log.level > 1 || (env->log.level && do_print_state)) {
4664			if (env->log.level > 1)
4665				verbose(env, "%d:", insn_idx);
4666			else
4667				verbose(env, "\nfrom %d to %d:",
4668					prev_insn_idx, insn_idx);
4669			print_verifier_state(env, state->frame[state->curframe]);
4670			do_print_state = false;
4671		}
4672
4673		if (env->log.level) {
4674			const struct bpf_insn_cbs cbs = {
4675				.cb_print	= verbose,
4676				.private_data	= env,
4677			};
4678
4679			verbose(env, "%d: ", insn_idx);
4680			print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
4681		}
4682
4683		if (bpf_prog_is_dev_bound(env->prog->aux)) {
4684			err = bpf_prog_offload_verify_insn(env, insn_idx,
4685							   prev_insn_idx);
4686			if (err)
4687				return err;
4688		}
4689
4690		regs = cur_regs(env);
4691		env->insn_aux_data[insn_idx].seen = true;
4692		if (class == BPF_ALU || class == BPF_ALU64) {
4693			err = check_alu_op(env, insn);
4694			if (err)
4695				return err;
4696
4697		} else if (class == BPF_LDX) {
4698			enum bpf_reg_type *prev_src_type, src_reg_type;
4699
4700			/* check for reserved fields is already done */
4701
4702			/* check src operand */
4703			err = check_reg_arg(env, insn->src_reg, SRC_OP);
4704			if (err)
4705				return err;
4706
4707			err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
4708			if (err)
4709				return err;
4710
4711			src_reg_type = regs[insn->src_reg].type;
4712
4713			/* check that memory (src_reg + off) is readable,
4714			 * the state of dst_reg will be updated by this func
4715			 */
4716			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
4717					       BPF_SIZE(insn->code), BPF_READ,
4718					       insn->dst_reg, false);
4719			if (err)
4720				return err;
4721
4722			prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
4723
4724			if (*prev_src_type == NOT_INIT) {
4725				/* saw a valid insn
4726				 * dst_reg = *(u32 *)(src_reg + off)
4727				 * save type to validate intersecting paths
4728				 */
4729				*prev_src_type = src_reg_type;
4730
4731			} else if (src_reg_type != *prev_src_type &&
4732				   (src_reg_type == PTR_TO_CTX ||
4733				    *prev_src_type == PTR_TO_CTX)) {
4734				/* ABuser program is trying to use the same insn
4735				 * dst_reg = *(u32*) (src_reg + off)
4736				 * with different pointer types:
4737				 * src_reg == ctx in one branch and
4738				 * src_reg == stack|map in some other branch.
4739				 * Reject it.
4740				 */
4741				verbose(env, "same insn cannot be used with different pointers\n");
4742				return -EINVAL;
4743			}
4744
4745		} else if (class == BPF_STX) {
4746			enum bpf_reg_type *prev_dst_type, dst_reg_type;
4747
4748			if (BPF_MODE(insn->code) == BPF_XADD) {
4749				err = check_xadd(env, insn_idx, insn);
4750				if (err)
4751					return err;
4752				insn_idx++;
4753				continue;
4754			}
4755
4756			/* check src1 operand */
4757			err = check_reg_arg(env, insn->src_reg, SRC_OP);
4758			if (err)
4759				return err;
4760			/* check src2 operand */
4761			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4762			if (err)
4763				return err;
4764
4765			dst_reg_type = regs[insn->dst_reg].type;
4766
4767			/* check that memory (dst_reg + off) is writeable */
4768			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4769					       BPF_SIZE(insn->code), BPF_WRITE,
4770					       insn->src_reg, false);
4771			if (err)
4772				return err;
4773
4774			prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
4775
4776			if (*prev_dst_type == NOT_INIT) {
4777				*prev_dst_type = dst_reg_type;
4778			} else if (dst_reg_type != *prev_dst_type &&
4779				   (dst_reg_type == PTR_TO_CTX ||
4780				    *prev_dst_type == PTR_TO_CTX)) {
4781				verbose(env, "same insn cannot be used with different pointers\n");
4782				return -EINVAL;
4783			}
4784
4785		} else if (class == BPF_ST) {
4786			if (BPF_MODE(insn->code) != BPF_MEM ||
4787			    insn->src_reg != BPF_REG_0) {
4788				verbose(env, "BPF_ST uses reserved fields\n");
4789				return -EINVAL;
4790			}
4791			/* check src operand */
4792			err = check_reg_arg(env, insn->dst_reg, SRC_OP);
4793			if (err)
4794				return err;
4795
4796			if (is_ctx_reg(env, insn->dst_reg)) {
4797				verbose(env, "BPF_ST stores into R%d context is not allowed\n",
4798					insn->dst_reg);
4799				return -EACCES;
4800			}
4801
4802			/* check that memory (dst_reg + off) is writeable */
4803			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
4804					       BPF_SIZE(insn->code), BPF_WRITE,
4805					       -1, false);
4806			if (err)
4807				return err;
4808
4809		} else if (class == BPF_JMP) {
4810			u8 opcode = BPF_OP(insn->code);
4811
4812			if (opcode == BPF_CALL) {
4813				if (BPF_SRC(insn->code) != BPF_K ||
4814				    insn->off != 0 ||
4815				    (insn->src_reg != BPF_REG_0 &&
4816				     insn->src_reg != BPF_PSEUDO_CALL) ||
4817				    insn->dst_reg != BPF_REG_0) {
4818					verbose(env, "BPF_CALL uses reserved fields\n");
4819					return -EINVAL;
4820				}
4821
4822				if (insn->src_reg == BPF_PSEUDO_CALL)
4823					err = check_func_call(env, insn, &insn_idx);
4824				else
4825					err = check_helper_call(env, insn->imm, insn_idx);
4826				if (err)
4827					return err;
4828
4829			} else if (opcode == BPF_JA) {
4830				if (BPF_SRC(insn->code) != BPF_K ||
4831				    insn->imm != 0 ||
4832				    insn->src_reg != BPF_REG_0 ||
4833				    insn->dst_reg != BPF_REG_0) {
4834					verbose(env, "BPF_JA uses reserved fields\n");
4835					return -EINVAL;
4836				}
4837
4838				insn_idx += insn->off + 1;
4839				continue;
4840
4841			} else if (opcode == BPF_EXIT) {
4842				if (BPF_SRC(insn->code) != BPF_K ||
4843				    insn->imm != 0 ||
4844				    insn->src_reg != BPF_REG_0 ||
4845				    insn->dst_reg != BPF_REG_0) {
4846					verbose(env, "BPF_EXIT uses reserved fields\n");
4847					return -EINVAL;
4848				}
4849
4850				if (state->curframe) {
4851					/* exit from nested function */
4852					prev_insn_idx = insn_idx;
4853					err = prepare_func_exit(env, &insn_idx);
4854					if (err)
4855						return err;
4856					do_print_state = true;
4857					continue;
4858				}
4859
4860				/* eBPF calling convetion is such that R0 is used
4861				 * to return the value from eBPF program.
4862				 * Make sure that it's readable at this time
4863				 * of bpf_exit, which means that program wrote
4864				 * something into it earlier
4865				 */
4866				err = check_reg_arg(env, BPF_REG_0, SRC_OP);
4867				if (err)
4868					return err;
4869
4870				if (is_pointer_value(env, BPF_REG_0)) {
4871					verbose(env, "R0 leaks addr as return value\n");
4872					return -EACCES;
4873				}
4874
4875				err = check_return_code(env);
4876				if (err)
4877					return err;
4878process_bpf_exit:
4879				err = pop_stack(env, &prev_insn_idx, &insn_idx);
4880				if (err < 0) {
4881					if (err != -ENOENT)
4882						return err;
4883					break;
4884				} else {
4885					do_print_state = true;
4886					continue;
4887				}
4888			} else {
4889				err = check_cond_jmp_op(env, insn, &insn_idx);
4890				if (err)
4891					return err;
4892			}
4893		} else if (class == BPF_LD) {
4894			u8 mode = BPF_MODE(insn->code);
4895
4896			if (mode == BPF_ABS || mode == BPF_IND) {
4897				err = check_ld_abs(env, insn);
4898				if (err)
4899					return err;
4900
4901			} else if (mode == BPF_IMM) {
4902				err = check_ld_imm(env, insn);
4903				if (err)
4904					return err;
4905
4906				insn_idx++;
4907				env->insn_aux_data[insn_idx].seen = true;
4908			} else {
4909				verbose(env, "invalid BPF_LD mode\n");
4910				return -EINVAL;
4911			}
4912		} else {
4913			verbose(env, "unknown insn class %d\n", class);
4914			return -EINVAL;
4915		}
4916
4917		insn_idx++;
4918	}
4919
4920	verbose(env, "processed %d insns (limit %d), stack depth ",
4921		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
4922	for (i = 0; i < env->subprog_cnt + 1; i++) {
4923		u32 depth = env->subprog_stack_depth[i];
4924
4925		verbose(env, "%d", depth);
4926		if (i + 1 < env->subprog_cnt + 1)
4927			verbose(env, "+");
4928	}
4929	verbose(env, "\n");
4930	env->prog->aux->stack_depth = env->subprog_stack_depth[0];
4931	return 0;
4932}
4933
4934static int check_map_prealloc(struct bpf_map *map)
4935{
4936	return (map->map_type != BPF_MAP_TYPE_HASH &&
4937		map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
4938		map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
4939		!(map->map_flags & BPF_F_NO_PREALLOC);
4940}
4941
4942static int check_map_prog_compatibility(struct bpf_verifier_env *env,
4943					struct bpf_map *map,
4944					struct bpf_prog *prog)
4945
4946{
4947	/* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
4948	 * preallocated hash maps, since doing memory allocation
4949	 * in overflow_handler can crash depending on where nmi got
4950	 * triggered.
4951	 */
4952	if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
4953		if (!check_map_prealloc(map)) {
4954			verbose(env, "perf_event programs can only use preallocated hash map\n");
4955			return -EINVAL;
4956		}
4957		if (map->inner_map_meta &&
4958		    !check_map_prealloc(map->inner_map_meta)) {
4959			verbose(env, "perf_event programs can only use preallocated inner hash map\n");
4960			return -EINVAL;
4961		}
4962	}
4963
4964	if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
4965	    !bpf_offload_dev_match(prog, map)) {
4966		verbose(env, "offload device mismatch between prog and map\n");
4967		return -EINVAL;
4968	}
4969
4970	return 0;
4971}
4972
4973/* look for pseudo eBPF instructions that access map FDs and
4974 * replace them with actual map pointers
4975 */
4976static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
4977{
4978	struct bpf_insn *insn = env->prog->insnsi;
4979	int insn_cnt = env->prog->len;
4980	int i, j, err;
4981
4982	err = bpf_prog_calc_tag(env->prog);
4983	if (err)
4984		return err;
4985
4986	for (i = 0; i < insn_cnt; i++, insn++) {
4987		if (BPF_CLASS(insn->code) == BPF_LDX &&
4988		    (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
4989			verbose(env, "BPF_LDX uses reserved fields\n");
4990			return -EINVAL;
4991		}
4992
4993		if (BPF_CLASS(insn->code) == BPF_STX &&
4994		    ((BPF_MODE(insn->code) != BPF_MEM &&
4995		      BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
4996			verbose(env, "BPF_STX uses reserved fields\n");
4997			return -EINVAL;
4998		}
4999
5000		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
5001			struct bpf_map *map;
5002			struct fd f;
5003
5004			if (i == insn_cnt - 1 || insn[1].code != 0 ||
5005			    insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
5006			    insn[1].off != 0) {
5007				verbose(env, "invalid bpf_ld_imm64 insn\n");
5008				return -EINVAL;
5009			}
5010
5011			if (insn->src_reg == 0)
5012				/* valid generic load 64-bit imm */
5013				goto next_insn;
5014
5015			if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
5016				verbose(env,
5017					"unrecognized bpf_ld_imm64 insn\n");
5018				return -EINVAL;
5019			}
5020
5021			f = fdget(insn->imm);
5022			map = __bpf_map_get(f);
5023			if (IS_ERR(map)) {
5024				verbose(env, "fd %d is not pointing to valid bpf_map\n",
5025					insn->imm);
5026				return PTR_ERR(map);
5027			}
5028
5029			err = check_map_prog_compatibility(env, map, env->prog);
5030			if (err) {
5031				fdput(f);
5032				return err;
5033			}
5034
5035			/* store map pointer inside BPF_LD_IMM64 instruction */
5036			insn[0].imm = (u32) (unsigned long) map;
5037			insn[1].imm = ((u64) (unsigned long) map) >> 32;
5038
5039			/* check whether we recorded this map already */
5040			for (j = 0; j < env->used_map_cnt; j++)
5041				if (env->used_maps[j] == map) {
5042					fdput(f);
5043					goto next_insn;
5044				}
5045
5046			if (env->used_map_cnt >= MAX_USED_MAPS) {
5047				fdput(f);
5048				return -E2BIG;
5049			}
5050
5051			/* hold the map. If the program is rejected by verifier,
5052			 * the map will be released by release_maps() or it
5053			 * will be used by the valid program until it's unloaded
5054			 * and all maps are released in free_bpf_prog_info()
5055			 */
5056			map = bpf_map_inc(map, false);
5057			if (IS_ERR(map)) {
5058				fdput(f);
5059				return PTR_ERR(map);
5060			}
5061			env->used_maps[env->used_map_cnt++] = map;
5062
5063			fdput(f);
5064next_insn:
5065			insn++;
5066			i++;
5067			continue;
5068		}
5069
5070		/* Basic sanity check before we invest more work here. */
5071		if (!bpf_opcode_in_insntable(insn->code)) {
5072			verbose(env, "unknown opcode %02x\n", insn->code);
5073			return -EINVAL;
5074		}
5075	}
5076
5077	/* now all pseudo BPF_LD_IMM64 instructions load valid
5078	 * 'struct bpf_map *' into a register instead of user map_fd.
5079	 * These pointers will be used later by verifier to validate map access.
5080	 */
5081	return 0;
5082}
5083
5084/* drop refcnt of maps used by the rejected program */
5085static void release_maps(struct bpf_verifier_env *env)
5086{
5087	int i;
5088
5089	for (i = 0; i < env->used_map_cnt; i++)
5090		bpf_map_put(env->used_maps[i]);
5091}
5092
5093/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
5094static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
5095{
5096	struct bpf_insn *insn = env->prog->insnsi;
5097	int insn_cnt = env->prog->len;
5098	int i;
5099
5100	for (i = 0; i < insn_cnt; i++, insn++)
5101		if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
5102			insn->src_reg = 0;
5103}
5104
5105/* single env->prog->insni[off] instruction was replaced with the range
5106 * insni[off, off + cnt).  Adjust corresponding insn_aux_data by copying
5107 * [0, off) and [off, end) to new locations, so the patched range stays zero
5108 */
5109static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
5110				u32 off, u32 cnt)
5111{
5112	struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
5113	int i;
5114
5115	if (cnt == 1)
5116		return 0;
5117	new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len);
5118	if (!new_data)
5119		return -ENOMEM;
5120	memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
5121	memcpy(new_data + off + cnt - 1, old_data + off,
5122	       sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
5123	for (i = off; i < off + cnt - 1; i++)
5124		new_data[i].seen = true;
5125	env->insn_aux_data = new_data;
5126	vfree(old_data);
5127	return 0;
5128}
5129
5130static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
5131{
5132	int i;
5133
5134	if (len == 1)
5135		return;
5136	for (i = 0; i < env->subprog_cnt; i++) {
5137		if (env->subprog_starts[i] < off)
5138			continue;
5139		env->subprog_starts[i] += len - 1;
5140	}
5141}
5142
5143static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
5144					    const struct bpf_insn *patch, u32 len)
5145{
5146	struct bpf_prog *new_prog;
5147
5148	new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
5149	if (!new_prog)
5150		return NULL;
5151	if (adjust_insn_aux_data(env, new_prog->len, off, len))
5152		return NULL;
5153	adjust_subprog_starts(env, off, len);
5154	return new_prog;
5155}
5156
5157/* The verifier does more data flow analysis than llvm and will not
5158 * explore branches that are dead at run time. Malicious programs can
5159 * have dead code too. Therefore replace all dead at-run-time code
5160 * with 'ja -1'.
5161 *
5162 * Just nops are not optimal, e.g. if they would sit at the end of the
5163 * program and through another bug we would manage to jump there, then
5164 * we'd execute beyond program memory otherwise. Returning exception
5165 * code also wouldn't work since we can have subprogs where the dead
5166 * code could be located.
5167 */
5168static void sanitize_dead_code(struct bpf_verifier_env *env)
5169{
5170	struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
5171	struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
5172	struct bpf_insn *insn = env->prog->insnsi;
5173	const int insn_cnt = env->prog->len;
5174	int i;
5175
5176	for (i = 0; i < insn_cnt; i++) {
5177		if (aux_data[i].seen)
5178			continue;
5179		memcpy(insn + i, &trap, sizeof(trap));
5180	}
5181}
5182
5183/* convert load instructions that access fields of 'struct __sk_buff'
5184 * into sequence of instructions that access fields of 'struct sk_buff'
5185 */
5186static int convert_ctx_accesses(struct bpf_verifier_env *env)
5187{
5188	const struct bpf_verifier_ops *ops = env->ops;
5189	int i, cnt, size, ctx_field_size, delta = 0;
5190	const int insn_cnt = env->prog->len;
5191	struct bpf_insn insn_buf[16], *insn;
5192	struct bpf_prog *new_prog;
5193	enum bpf_access_type type;
5194	bool is_narrower_load;
5195	u32 target_size;
5196
5197	if (ops->gen_prologue) {
5198		cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
5199					env->prog);
5200		if (cnt >= ARRAY_SIZE(insn_buf)) {
5201			verbose(env, "bpf verifier is misconfigured\n");
5202			return -EINVAL;
5203		} else if (cnt) {
5204			new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
5205			if (!new_prog)
5206				return -ENOMEM;
5207
5208			env->prog = new_prog;
5209			delta += cnt - 1;
5210		}
5211	}
5212
5213	if (!ops->convert_ctx_access)
5214		return 0;
5215
5216	insn = env->prog->insnsi + delta;
5217
5218	for (i = 0; i < insn_cnt; i++, insn++) {
5219		if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
5220		    insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
5221		    insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
5222		    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
5223			type = BPF_READ;
5224		else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
5225			 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
5226			 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
5227			 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
5228			type = BPF_WRITE;
5229		else
5230			continue;
5231
5232		if (type == BPF_WRITE &&
5233		    env->insn_aux_data[i + delta].sanitize_stack_off) {
5234			struct bpf_insn patch[] = {
5235				/* Sanitize suspicious stack slot with zero.
5236				 * There are no memory dependencies for this store,
5237				 * since it's only using frame pointer and immediate
5238				 * constant of zero
5239				 */
5240				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
5241					   env->insn_aux_data[i + delta].sanitize_stack_off,
5242					   0),
5243				/* the original STX instruction will immediately
5244				 * overwrite the same stack slot with appropriate value
5245				 */
5246				*insn,
5247			};
5248
5249			cnt = ARRAY_SIZE(patch);
5250			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
5251			if (!new_prog)
5252				return -ENOMEM;
5253
5254			delta    += cnt - 1;
5255			env->prog = new_prog;
5256			insn      = new_prog->insnsi + i + delta;
5257			continue;
5258		}
5259
5260		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
5261			continue;
5262
5263		ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
5264		size = BPF_LDST_BYTES(insn);
5265
5266		/* If the read access is a narrower load of the field,
5267		 * convert to a 4/8-byte load, to minimum program type specific
5268		 * convert_ctx_access changes. If conversion is successful,
5269		 * we will apply proper mask to the result.
5270		 */
5271		is_narrower_load = size < ctx_field_size;
5272		if (is_narrower_load) {
5273			u32 off = insn->off;
5274			u8 size_code;
5275
5276			if (type == BPF_WRITE) {
5277				verbose(env, "bpf verifier narrow ctx access misconfigured\n");
5278				return -EINVAL;
5279			}
5280
5281			size_code = BPF_H;
5282			if (ctx_field_size == 4)
5283				size_code = BPF_W;
5284			else if (ctx_field_size == 8)
5285				size_code = BPF_DW;
5286
5287			insn->off = off & ~(ctx_field_size - 1);
5288			insn->code = BPF_LDX | BPF_MEM | size_code;
5289		}
5290
5291		target_size = 0;
5292		cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog,
5293					      &target_size);
5294		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
5295		    (ctx_field_size && !target_size)) {
5296			verbose(env, "bpf verifier is misconfigured\n");
5297			return -EINVAL;
5298		}
5299
5300		if (is_narrower_load && size < target_size) {
5301			if (ctx_field_size <= 4)
5302				insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
5303								(1 << size * 8) - 1);
5304			else
5305				insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
5306								(1 << size * 8) - 1);
5307		}
5308
5309		new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5310		if (!new_prog)
5311			return -ENOMEM;
5312
5313		delta += cnt - 1;
5314
5315		/* keep walking new program and skip insns we just inserted */
5316		env->prog = new_prog;
5317		insn      = new_prog->insnsi + i + delta;
5318	}
5319
5320	return 0;
5321}
5322
5323static int jit_subprogs(struct bpf_verifier_env *env)
5324{
5325	struct bpf_prog *prog = env->prog, **func, *tmp;
5326	int i, j, subprog_start, subprog_end = 0, len, subprog;
5327	struct bpf_insn *insn;
5328	void *old_bpf_func;
5329	int err = -ENOMEM;
5330
5331	if (env->subprog_cnt == 0)
5332		return 0;
5333
5334	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5335		if (insn->code != (BPF_JMP | BPF_CALL) ||
5336		    insn->src_reg != BPF_PSEUDO_CALL)
5337			continue;
5338		subprog = find_subprog(env, i + insn->imm + 1);
5339		if (subprog < 0) {
5340			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5341				  i + insn->imm + 1);
5342			return -EFAULT;
5343		}
5344		/* temporarily remember subprog id inside insn instead of
5345		 * aux_data, since next loop will split up all insns into funcs
5346		 */
5347		insn->off = subprog + 1;
5348		/* remember original imm in case JIT fails and fallback
5349		 * to interpreter will be needed
5350		 */
5351		env->insn_aux_data[i].call_imm = insn->imm;
5352		/* point imm to __bpf_call_base+1 from JITs point of view */
5353		insn->imm = 1;
5354	}
5355
5356	func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
5357	if (!func)
5358		return -ENOMEM;
5359
5360	for (i = 0; i <= env->subprog_cnt; i++) {
5361		subprog_start = subprog_end;
5362		if (env->subprog_cnt == i)
5363			subprog_end = prog->len;
5364		else
5365			subprog_end = env->subprog_starts[i];
5366
5367		len = subprog_end - subprog_start;
5368		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
5369		if (!func[i])
5370			goto out_free;
5371		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5372		       len * sizeof(struct bpf_insn));
5373		func[i]->type = prog->type;
5374		func[i]->len = len;
5375		if (bpf_prog_calc_tag(func[i]))
5376			goto out_free;
5377		func[i]->is_func = 1;
5378		/* Use bpf_prog_F_tag to indicate functions in stack traces.
5379		 * Long term would need debug info to populate names
5380		 */
5381		func[i]->aux->name[0] = 'F';
5382		func[i]->aux->stack_depth = env->subprog_stack_depth[i];
5383		func[i]->jit_requested = 1;
5384		func[i] = bpf_int_jit_compile(func[i]);
5385		if (!func[i]->jited) {
5386			err = -ENOTSUPP;
5387			goto out_free;
5388		}
5389		cond_resched();
5390	}
5391	/* at this point all bpf functions were successfully JITed
5392	 * now populate all bpf_calls with correct addresses and
5393	 * run last pass of JIT
5394	 */
5395	for (i = 0; i <= env->subprog_cnt; i++) {
5396		insn = func[i]->insnsi;
5397		for (j = 0; j < func[i]->len; j++, insn++) {
5398			if (insn->code != (BPF_JMP | BPF_CALL) ||
5399			    insn->src_reg != BPF_PSEUDO_CALL)
5400				continue;
5401			subprog = insn->off;
5402			insn->off = 0;
5403			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5404				func[subprog]->bpf_func -
5405				__bpf_call_base;
5406		}
5407	}
5408	for (i = 0; i <= env->subprog_cnt; i++) {
5409		old_bpf_func = func[i]->bpf_func;
5410		tmp = bpf_int_jit_compile(func[i]);
5411		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5412			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5413			err = -EFAULT;
5414			goto out_free;
5415		}
5416		cond_resched();
5417	}
5418
5419	/* finally lock prog and jit images for all functions and
5420	 * populate kallsysm
5421	 */
5422	for (i = 0; i <= env->subprog_cnt; i++) {
5423		bpf_prog_lock_ro(func[i]);
5424		bpf_prog_kallsyms_add(func[i]);
5425	}
5426
5427	/* Last step: make now unused interpreter insns from main
5428	 * prog consistent for later dump requests, so they can
5429	 * later look the same as if they were interpreted only.
5430	 */
5431	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5432		unsigned long addr;
5433
5434		if (insn->code != (BPF_JMP | BPF_CALL) ||
5435		    insn->src_reg != BPF_PSEUDO_CALL)
5436			continue;
5437		insn->off = env->insn_aux_data[i].call_imm;
5438		subprog = find_subprog(env, i + insn->off + 1);
5439		addr  = (unsigned long)func[subprog + 1]->bpf_func;
5440		addr &= PAGE_MASK;
5441		insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5442			    addr - __bpf_call_base;
5443	}
5444
5445	prog->jited = 1;
5446	prog->bpf_func = func[0]->bpf_func;
5447	prog->aux->func = func;
5448	prog->aux->func_cnt = env->subprog_cnt + 1;
5449	return 0;
5450out_free:
5451	for (i = 0; i <= env->subprog_cnt; i++)
5452		if (func[i])
5453			bpf_jit_free(func[i]);
5454	kfree(func);
5455	/* cleanup main prog to be interpreted */
5456	prog->jit_requested = 0;
5457	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5458		if (insn->code != (BPF_JMP | BPF_CALL) ||
5459		    insn->src_reg != BPF_PSEUDO_CALL)
5460			continue;
5461		insn->off = 0;
5462		insn->imm = env->insn_aux_data[i].call_imm;
5463	}
5464	return err;
5465}
5466
5467static int fixup_call_args(struct bpf_verifier_env *env)
5468{
5469#ifndef CONFIG_BPF_JIT_ALWAYS_ON
5470	struct bpf_prog *prog = env->prog;
5471	struct bpf_insn *insn = prog->insnsi;
5472	int i, depth;
5473#endif
5474	int err;
5475
5476	err = 0;
5477	if (env->prog->jit_requested) {
5478		err = jit_subprogs(env);
5479		if (err == 0)
5480			return 0;
5481	}
5482#ifndef CONFIG_BPF_JIT_ALWAYS_ON
5483	for (i = 0; i < prog->len; i++, insn++) {
5484		if (insn->code != (BPF_JMP | BPF_CALL) ||
5485		    insn->src_reg != BPF_PSEUDO_CALL)
5486			continue;
5487		depth = get_callee_stack_depth(env, insn, i);
5488		if (depth < 0)
5489			return depth;
5490		bpf_patch_call_args(insn, depth);
5491	}
5492	err = 0;
5493#endif
5494	return err;
5495}
5496
5497/* fixup insn->imm field of bpf_call instructions
5498 * and inline eligible helpers as explicit sequence of BPF instructions
5499 *
5500 * this function is called after eBPF program passed verification
5501 */
5502static int fixup_bpf_calls(struct bpf_verifier_env *env)
5503{
5504	struct bpf_prog *prog = env->prog;
5505	struct bpf_insn *insn = prog->insnsi;
5506	const struct bpf_func_proto *fn;
5507	const int insn_cnt = prog->len;
5508	struct bpf_insn_aux_data *aux;
5509	struct bpf_insn insn_buf[16];
5510	struct bpf_prog *new_prog;
5511	struct bpf_map *map_ptr;
5512	int i, cnt, delta = 0;
5513
5514	for (i = 0; i < insn_cnt; i++, insn++) {
5515		if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
5516		    insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5517		    insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
5518		    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5519			bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
5520			struct bpf_insn mask_and_div[] = {
5521				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5522				/* Rx div 0 -> 0 */
5523				BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
5524				BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
5525				BPF_JMP_IMM(BPF_JA, 0, 0, 1),
5526				*insn,
5527			};
5528			struct bpf_insn mask_and_mod[] = {
5529				BPF_MOV32_REG(insn->src_reg, insn->src_reg),
5530				/* Rx mod 0 -> Rx */
5531				BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
5532				*insn,
5533			};
5534			struct bpf_insn *patchlet;
5535
5536			if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
5537			    insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
5538				patchlet = mask_and_div + (is64 ? 1 : 0);
5539				cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
5540			} else {
5541				patchlet = mask_and_mod + (is64 ? 1 : 0);
5542				cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
5543			}
5544
5545			new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
5546			if (!new_prog)
5547				return -ENOMEM;
5548
5549			delta    += cnt - 1;
5550			env->prog = prog = new_prog;
5551			insn      = new_prog->insnsi + i + delta;
5552			continue;
5553		}
5554
5555		if (insn->code != (BPF_JMP | BPF_CALL))
5556			continue;
5557		if (insn->src_reg == BPF_PSEUDO_CALL)
5558			continue;
5559
5560		if (insn->imm == BPF_FUNC_get_route_realm)
5561			prog->dst_needed = 1;
5562		if (insn->imm == BPF_FUNC_get_prandom_u32)
5563			bpf_user_rnd_init_once();
5564		if (insn->imm == BPF_FUNC_override_return)
5565			prog->kprobe_override = 1;
5566		if (insn->imm == BPF_FUNC_tail_call) {
5567			/* If we tail call into other programs, we
5568			 * cannot make any assumptions since they can
5569			 * be replaced dynamically during runtime in
5570			 * the program array.
5571			 */
5572			prog->cb_access = 1;
5573			env->prog->aux->stack_depth = MAX_BPF_STACK;
5574
5575			/* mark bpf_tail_call as different opcode to avoid
5576			 * conditional branch in the interpeter for every normal
5577			 * call and to prevent accidental JITing by JIT compiler
5578			 * that doesn't support bpf_tail_call yet
5579			 */
5580			insn->imm = 0;
5581			insn->code = BPF_JMP | BPF_TAIL_CALL;
5582
5583			aux = &env->insn_aux_data[i + delta];
5584			if (!bpf_map_ptr_unpriv(aux))
5585				continue;
5586
5587			/* instead of changing every JIT dealing with tail_call
5588			 * emit two extra insns:
5589			 * if (index >= max_entries) goto out;
5590			 * index &= array->index_mask;
5591			 * to avoid out-of-bounds cpu speculation
5592			 */
5593			if (bpf_map_ptr_poisoned(aux)) {
5594				verbose(env, "tail_call abusing map_ptr\n");
5595				return -EINVAL;
5596			}
5597
5598			map_ptr = BPF_MAP_PTR(aux->map_state);
5599			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5600						  map_ptr->max_entries, 2);
5601			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
5602						    container_of(map_ptr,
5603								 struct bpf_array,
5604								 map)->index_mask);
5605			insn_buf[2] = *insn;
5606			cnt = 3;
5607			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
5608			if (!new_prog)
5609				return -ENOMEM;
5610
5611			delta    += cnt - 1;
5612			env->prog = prog = new_prog;
5613			insn      = new_prog->insnsi + i + delta;
5614			continue;
5615		}
5616
5617		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
5618		 * handlers are currently limited to 64 bit only.
5619		 */
5620		if (prog->jit_requested && BITS_PER_LONG == 64 &&
5621		    insn->imm == BPF_FUNC_map_lookup_elem) {
5622			aux = &env->insn_aux_data[i + delta];
5623			if (bpf_map_ptr_poisoned(aux))
5624				goto patch_call_imm;
5625
5626			map_ptr = BPF_MAP_PTR(aux->map_state);
5627			if (!map_ptr->ops->map_gen_lookup)
5628				goto patch_call_imm;
5629
5630			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
5631			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
5632				verbose(env, "bpf verifier is misconfigured\n");
5633				return -EINVAL;
5634			}
5635
5636			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
5637						       cnt);
5638			if (!new_prog)
5639				return -ENOMEM;
5640
5641			delta += cnt - 1;
5642
5643			/* keep walking new program and skip insns we just inserted */
5644			env->prog = prog = new_prog;
5645			insn      = new_prog->insnsi + i + delta;
5646			continue;
5647		}
5648
5649		if (insn->imm == BPF_FUNC_redirect_map) {
5650			/* Note, we cannot use prog directly as imm as subsequent
5651			 * rewrites would still change the prog pointer. The only
5652			 * stable address we can use is aux, which also works with
5653			 * prog clones during blinding.
5654			 */
5655			u64 addr = (unsigned long)prog->aux;
5656			struct bpf_insn r4_ld[] = {
5657				BPF_LD_IMM64(BPF_REG_4, addr),
5658				*insn,
5659			};
5660			cnt = ARRAY_SIZE(r4_ld);
5661
5662			new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt);
5663			if (!new_prog)
5664				return -ENOMEM;
5665
5666			delta    += cnt - 1;
5667			env->prog = prog = new_prog;
5668			insn      = new_prog->insnsi + i + delta;
5669		}
5670patch_call_imm:
5671		fn = env->ops->get_func_proto(insn->imm, env->prog);
5672		/* all functions that have prototype and verifier allowed
5673		 * programs to call them, must be real in-kernel functions
5674		 */
5675		if (!fn->func) {
5676			verbose(env,
5677				"kernel subsystem misconfigured func %s#%d\n",
5678				func_id_name(insn->imm), insn->imm);
5679			return -EFAULT;
5680		}
5681		insn->imm = fn->func - __bpf_call_base;
5682	}
5683
5684	return 0;
5685}
5686
5687static void free_states(struct bpf_verifier_env *env)
5688{
5689	struct bpf_verifier_state_list *sl, *sln;
5690	int i;
5691
5692	if (!env->explored_states)
5693		return;
5694
5695	for (i = 0; i < env->prog->len; i++) {
5696		sl = env->explored_states[i];
5697
5698		if (sl)
5699			while (sl != STATE_LIST_MARK) {
5700				sln = sl->next;
5701				free_verifier_state(&sl->state, false);
5702				kfree(sl);
5703				sl = sln;
5704			}
5705	}
5706
5707	kfree(env->explored_states);
5708}
5709
5710int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
5711{
5712	struct bpf_verifier_env *env;
5713	struct bpf_verifier_log *log;
5714	int ret = -EINVAL;
5715
5716	/* no program is valid */
5717	if (ARRAY_SIZE(bpf_verifier_ops) == 0)
5718		return -EINVAL;
5719
5720	/* 'struct bpf_verifier_env' can be global, but since it's not small,
5721	 * allocate/free it every time bpf_check() is called
5722	 */
5723	env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
5724	if (!env)
5725		return -ENOMEM;
5726	log = &env->log;
5727
5728	env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
5729				     (*prog)->len);
5730	ret = -ENOMEM;
5731	if (!env->insn_aux_data)
5732		goto err_free_env;
5733	env->prog = *prog;
5734	env->ops = bpf_verifier_ops[env->prog->type];
5735
5736	/* grab the mutex to protect few globals used by verifier */
5737	mutex_lock(&bpf_verifier_lock);
5738
5739	if (attr->log_level || attr->log_buf || attr->log_size) {
5740		/* user requested verbose verifier output
5741		 * and supplied buffer to store the verification trace
5742		 */
5743		log->level = attr->log_level;
5744		log->ubuf = (char __user *) (unsigned long) attr->log_buf;
5745		log->len_total = attr->log_size;
5746
5747		ret = -EINVAL;
5748		/* log attributes have to be sane */
5749		if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
5750		    !log->level || !log->ubuf)
5751			goto err_unlock;
5752	}
5753
5754	env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
5755	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
5756		env->strict_alignment = true;
5757
5758	if (bpf_prog_is_dev_bound(env->prog->aux)) {
5759		ret = bpf_prog_offload_verifier_prep(env);
5760		if (ret)
5761			goto err_unlock;
5762	}
5763
5764	ret = replace_map_fd_with_map_ptr(env);
5765	if (ret < 0)
5766		goto skip_full_check;
5767
5768	env->explored_states = kcalloc(env->prog->len,
5769				       sizeof(struct bpf_verifier_state_list *),
5770				       GFP_USER);
5771	ret = -ENOMEM;
5772	if (!env->explored_states)
5773		goto skip_full_check;
5774
5775	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
5776
5777	ret = check_cfg(env);
5778	if (ret < 0)
5779		goto skip_full_check;
5780
5781	ret = do_check(env);
5782	if (env->cur_state) {
5783		free_verifier_state(env->cur_state, true);
5784		env->cur_state = NULL;
5785	}
5786
5787skip_full_check:
5788	while (!pop_stack(env, NULL, NULL));
5789	free_states(env);
5790
5791	if (ret == 0)
5792		sanitize_dead_code(env);
5793
5794	if (ret == 0)
5795		ret = check_max_stack_depth(env);
5796
5797	if (ret == 0)
5798		/* program is valid, convert *(u32*)(ctx + off) accesses */
5799		ret = convert_ctx_accesses(env);
5800
5801	if (ret == 0)
5802		ret = fixup_bpf_calls(env);
5803
5804	if (ret == 0)
5805		ret = fixup_call_args(env);
5806
5807	if (log->level && bpf_verifier_log_full(log))
5808		ret = -ENOSPC;
5809	if (log->level && !log->ubuf) {
5810		ret = -EFAULT;
5811		goto err_release_maps;
5812	}
5813
5814	if (ret == 0 && env->used_map_cnt) {
5815		/* if program passed verifier, update used_maps in bpf_prog_info */
5816		env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
5817							  sizeof(env->used_maps[0]),
5818							  GFP_KERNEL);
5819
5820		if (!env->prog->aux->used_maps) {
5821			ret = -ENOMEM;
5822			goto err_release_maps;
5823		}
5824
5825		memcpy(env->prog->aux->used_maps, env->used_maps,
5826		       sizeof(env->used_maps[0]) * env->used_map_cnt);
5827		env->prog->aux->used_map_cnt = env->used_map_cnt;
5828
5829		/* program is valid. Convert pseudo bpf_ld_imm64 into generic
5830		 * bpf_ld_imm64 instructions
5831		 */
5832		convert_pseudo_ld_imm64(env);
5833	}
5834
5835err_release_maps:
5836	if (!env->prog->aux->used_maps)
5837		/* if we didn't copy map pointers into bpf_prog_info, release
5838		 * them now. Otherwise free_bpf_prog_info() will release them.
5839		 */
5840		release_maps(env);
5841	*prog = env->prog;
5842err_unlock:
5843	mutex_unlock(&bpf_verifier_lock);
5844	vfree(env->insn_aux_data);
5845err_free_env:
5846	kfree(env);
5847	return ret;
5848}