Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Just-In-Time compiler for eBPF filters on 32bit ARM
   4 *
   5 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
   6 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
 
 
 
 
   7 */
   8
   9#include <linux/bpf.h>
  10#include <linux/bitops.h>
  11#include <linux/compiler.h>
  12#include <linux/errno.h>
  13#include <linux/filter.h>
  14#include <linux/netdevice.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/if_vlan.h>
  18
  19#include <asm/cacheflush.h>
  20#include <asm/hwcap.h>
  21#include <asm/opcodes.h>
  22#include <asm/system_info.h>
  23
  24#include "bpf_jit_32.h"
  25
  26/*
  27 * eBPF prog stack layout:
  28 *
  29 *                         high
  30 * original ARM_SP =>     +-----+
  31 *                        |     | callee saved registers
  32 *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
  33 *                        | ... | eBPF JIT scratch space
  34 * eBPF fp register =>    +-----+
  35 *   (BPF_FP)             | ... | eBPF prog stack
  36 *                        +-----+
  37 *                        |RSVD | JIT scratchpad
  38 * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
  39 *                        | ... | caller-saved registers
  40 *                        +-----+
  41 *                        | ... | arguments passed on stack
  42 * ARM_SP during call =>  +-----|
  43 *                        |     |
  44 *                        | ... | Function call stack
  45 *                        |     |
  46 *                        +-----+
  47 *                          low
  48 *
  49 * The callee saved registers depends on whether frame pointers are enabled.
  50 * With frame pointers (to be compliant with the ABI):
  51 *
  52 *                              high
  53 * original ARM_SP =>     +--------------+ \
  54 *                        |      pc      | |
  55 * current ARM_FP =>      +--------------+ } callee saved registers
  56 *                        |r4-r9,fp,ip,lr| |
  57 *                        +--------------+ /
  58 *                              low
  59 *
  60 * Without frame pointers:
  61 *
  62 *                              high
  63 * original ARM_SP =>     +--------------+
  64 *                        |  r4-r9,fp,lr | callee saved registers
  65 * current ARM_FP =>      +--------------+
  66 *                              low
  67 *
  68 * When popping registers off the stack at the end of a BPF function, we
  69 * reference them via the current ARM_FP register.
  70 *
  71 * Some eBPF operations are implemented via a call to a helper function.
  72 * Such calls are "invisible" in the eBPF code, so it is up to the calling
  73 * program to preserve any caller-saved ARM registers during the call. The
  74 * JIT emits code to push and pop those registers onto the stack, immediately
  75 * above the callee stack frame.
  76 */
  77#define CALLEE_MASK	(1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
  78			 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
  79			 1 << ARM_FP)
  80#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
  81#define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
  82
  83#define CALLER_MASK	(1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
  84
  85enum {
  86	/* Stack layout - these are offsets from (top of stack - 4) */
  87	BPF_R2_HI,
  88	BPF_R2_LO,
  89	BPF_R3_HI,
  90	BPF_R3_LO,
  91	BPF_R4_HI,
  92	BPF_R4_LO,
  93	BPF_R5_HI,
  94	BPF_R5_LO,
  95	BPF_R7_HI,
  96	BPF_R7_LO,
  97	BPF_R8_HI,
  98	BPF_R8_LO,
  99	BPF_R9_HI,
 100	BPF_R9_LO,
 101	BPF_FP_HI,
 102	BPF_FP_LO,
 103	BPF_TC_HI,
 104	BPF_TC_LO,
 105	BPF_AX_HI,
 106	BPF_AX_LO,
 107	/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
 108	 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
 109	 * BPF_REG_FP and Tail call counts.
 110	 */
 111	BPF_JIT_SCRATCH_REGS,
 112};
 113
 114/*
 115 * Negative "register" values indicate the register is stored on the stack
 116 * and are the offset from the top of the eBPF JIT scratch space.
 117 */
 118#define STACK_OFFSET(k)	(-4 - (k) * 4)
 119#define SCRATCH_SIZE	(BPF_JIT_SCRATCH_REGS * 4)
 120
 121#ifdef CONFIG_FRAME_POINTER
 122#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
 123#else
 124#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
 125#endif
 126
 127#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)	/* TEMP Register 1 */
 128#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)	/* TEMP Register 2 */
 129#define TCALL_CNT	(MAX_BPF_JIT_REG + 2)	/* Tail Call Count */
 130
 131#define FLAG_IMM_OVERFLOW	(1 << 0)
 132
 133/*
 134 * Map eBPF registers to ARM 32bit registers or stack scratch space.
 135 *
 136 * 1. First argument is passed using the arm 32bit registers and rest of the
 137 * arguments are passed on stack scratch space.
 138 * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
 139 * arguments are mapped to scratch space on stack.
 140 * 3. We need two 64 bit temp registers to do complex operations on eBPF
 141 * registers.
 142 *
 143 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
 144 * registers, we have to map each eBPF registers with two arm 32 bit regs or
 145 * scratch memory space and we have to build eBPF 64 bit register from those.
 146 *
 
 
 
 
 
 
 147 */
 148static const s8 bpf2a32[][2] = {
 149	/* return value from in-kernel function, and exit value from eBPF */
 150	[BPF_REG_0] = {ARM_R1, ARM_R0},
 151	/* arguments from eBPF program to in-kernel function */
 152	[BPF_REG_1] = {ARM_R3, ARM_R2},
 153	/* Stored on stack scratch space */
 154	[BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
 155	[BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
 156	[BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
 157	[BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
 158	/* callee saved registers that in-kernel function will preserve */
 159	[BPF_REG_6] = {ARM_R5, ARM_R4},
 160	/* Stored on stack scratch space */
 161	[BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
 162	[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
 163	[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
 164	/* Read only Frame Pointer to access Stack */
 165	[BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
 166	/* Temporary Register for internal BPF JIT, can be used
 167	 * for constant blindings and others.
 168	 */
 169	[TMP_REG_1] = {ARM_R7, ARM_R6},
 170	[TMP_REG_2] = {ARM_R9, ARM_R8},
 171	/* Tail call count. Stored on stack scratch space. */
 172	[TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
 173	/* temporary register for blinding constants.
 174	 * Stored on stack scratch space.
 175	 */
 176	[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
 177};
 178
 179#define	dst_lo	dst[1]
 180#define dst_hi	dst[0]
 181#define src_lo	src[1]
 182#define src_hi	src[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183
 184/*
 185 * JIT Context:
 186 *
 187 * prog			:	bpf_prog
 188 * idx			:	index of current last JITed instruction.
 189 * prologue_bytes	:	bytes used in prologue.
 190 * epilogue_offset	:	offset of epilogue starting.
 191 * offsets		:	array of eBPF instruction offsets in
 192 *				JITed code.
 193 * target		:	final JITed code.
 194 * epilogue_bytes	:	no of bytes used in epilogue.
 195 * imm_count		:	no of immediate counts used for global
 196 *				variables.
 197 * imms			:	array of global variable addresses.
 198 */
 199
 200struct jit_ctx {
 201	const struct bpf_prog *prog;
 202	unsigned int idx;
 203	unsigned int prologue_bytes;
 204	unsigned int epilogue_offset;
 205	unsigned int cpu_architecture;
 206	u32 flags;
 207	u32 *offsets;
 208	u32 *target;
 209	u32 stack_size;
 210#if __LINUX_ARM_ARCH__ < 7
 211	u16 epilogue_bytes;
 212	u16 imm_count;
 213	u32 *imms;
 214#endif
 215};
 216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217/*
 218 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
 219 * (where the assembly routines like __aeabi_uidiv could cause problems).
 220 */
 221static u32 jit_udiv32(u32 dividend, u32 divisor)
 222{
 223	return dividend / divisor;
 224}
 225
 226static u32 jit_mod32(u32 dividend, u32 divisor)
 227{
 228	return dividend % divisor;
 229}
 230
 231static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 232{
 233	inst |= (cond << 28);
 234	inst = __opcode_to_mem_arm(inst);
 235
 236	if (ctx->target != NULL)
 237		ctx->target[ctx->idx] = inst;
 238
 239	ctx->idx++;
 240}
 241
 242/*
 243 * Emit an instruction that will be executed unconditionally.
 244 */
 245static inline void emit(u32 inst, struct jit_ctx *ctx)
 246{
 247	_emit(ARM_COND_AL, inst, ctx);
 248}
 249
 250/*
 251 * This is rather horrid, but necessary to convert an integer constant
 252 * to an immediate operand for the opcodes, and be able to detect at
 253 * build time whether the constant can't be converted (iow, usable in
 254 * BUILD_BUG_ON()).
 255 */
 256#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
 257#define const_imm8m(x)					\
 258	({ int r;					\
 259	   u32 v = (x);					\
 260	   if (!(v & ~0x000000ff))			\
 261		r = imm12val(v, 0);			\
 262	   else if (!(v & ~0xc000003f))			\
 263		r = imm12val(v, 2);			\
 264	   else if (!(v & ~0xf000000f))			\
 265		r = imm12val(v, 4);			\
 266	   else if (!(v & ~0xfc000003))			\
 267		r = imm12val(v, 6);			\
 268	   else if (!(v & ~0xff000000))			\
 269		r = imm12val(v, 8);			\
 270	   else if (!(v & ~0x3fc00000))			\
 271		r = imm12val(v, 10);			\
 272	   else if (!(v & ~0x0ff00000))			\
 273		r = imm12val(v, 12);			\
 274	   else if (!(v & ~0x03fc0000))			\
 275		r = imm12val(v, 14);			\
 276	   else if (!(v & ~0x00ff0000))			\
 277		r = imm12val(v, 16);			\
 278	   else if (!(v & ~0x003fc000))			\
 279		r = imm12val(v, 18);			\
 280	   else if (!(v & ~0x000ff000))			\
 281		r = imm12val(v, 20);			\
 282	   else if (!(v & ~0x0003fc00))			\
 283		r = imm12val(v, 22);			\
 284	   else if (!(v & ~0x0000ff00))			\
 285		r = imm12val(v, 24);			\
 286	   else if (!(v & ~0x00003fc0))			\
 287		r = imm12val(v, 26);			\
 288	   else if (!(v & ~0x00000ff0))			\
 289		r = imm12val(v, 28);			\
 290	   else if (!(v & ~0x000003fc))			\
 291		r = imm12val(v, 30);			\
 292	   else						\
 293		r = -1;					\
 294	   r; })
 295
 296/*
 297 * Checks if immediate value can be converted to imm12(12 bits) value.
 298 */
 299static int imm8m(u32 x)
 300{
 301	u32 rot;
 302
 303	for (rot = 0; rot < 16; rot++)
 304		if ((x & ~ror32(0xff, 2 * rot)) == 0)
 305			return rol32(x, 2 * rot) | (rot << 8);
 306	return -1;
 307}
 308
 309#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
 
 
 
 
 
 
 
 
 
 
 
 310
 311static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
 312{
 313	op |= rt << 12 | rn << 16;
 314	if (imm12 >= 0)
 315		op |= ARM_INST_LDST__U;
 316	else
 317		imm12 = -imm12;
 318	return op | (imm12 & ARM_INST_LDST__IMM12);
 319}
 320
 321static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
 322{
 323	op |= rt << 12 | rn << 16;
 324	if (imm8 >= 0)
 325		op |= ARM_INST_LDST__U;
 326	else
 327		imm8 = -imm8;
 328	return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
 329}
 330
 331#define ARM_LDR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
 332#define ARM_LDRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
 333#define ARM_LDRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
 334#define ARM_LDRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
 335
 336#define ARM_STR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
 337#define ARM_STRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
 338#define ARM_STRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
 339#define ARM_STRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
 340
 341/*
 342 * Initializes the JIT space with undefined instructions.
 343 */
 344static void jit_fill_hole(void *area, unsigned int size)
 345{
 346	u32 *ptr;
 347	/* We are guaranteed to have aligned memory. */
 348	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
 349		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 350}
 351
 352#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 353/* EABI requires the stack to be aligned to 64-bit boundaries */
 354#define STACK_ALIGNMENT	8
 
 
 
 
 
 
 355#else
 356/* Stack must be aligned to 32-bit boundaries */
 357#define STACK_ALIGNMENT	4
 358#endif
 359
 360/* total stack size used in JITed code */
 361#define _STACK_SIZE	(ctx->prog->aux->stack_depth + SCRATCH_SIZE)
 362#define STACK_SIZE	ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 363
 364#if __LINUX_ARM_ARCH__ < 7
 365
 366static u16 imm_offset(u32 k, struct jit_ctx *ctx)
 367{
 368	unsigned int i = 0, offset;
 369	u16 imm;
 370
 371	/* on the "fake" run we just count them (duplicates included) */
 372	if (ctx->target == NULL) {
 373		ctx->imm_count++;
 374		return 0;
 375	}
 376
 377	while ((i < ctx->imm_count) && ctx->imms[i]) {
 378		if (ctx->imms[i] == k)
 379			break;
 380		i++;
 381	}
 382
 383	if (ctx->imms[i] == 0)
 384		ctx->imms[i] = k;
 385
 386	/* constants go just after the epilogue */
 387	offset =  ctx->offsets[ctx->prog->len - 1] * 4;
 388	offset += ctx->prologue_bytes;
 389	offset += ctx->epilogue_bytes;
 390	offset += i * 4;
 391
 392	ctx->target[offset / 4] = k;
 393
 394	/* PC in ARM mode == address of the instruction + 8 */
 395	imm = offset - (8 + ctx->idx * 4);
 396
 397	if (imm & ~0xfff) {
 398		/*
 399		 * literal pool is too far, signal it into flags. we
 400		 * can only detect it on the second pass unfortunately.
 401		 */
 402		ctx->flags |= FLAG_IMM_OVERFLOW;
 403		return 0;
 404	}
 405
 406	return imm;
 407}
 408
 409#endif /* __LINUX_ARM_ARCH__ */
 410
 411static inline int bpf2a32_offset(int bpf_to, int bpf_from,
 412				 const struct jit_ctx *ctx) {
 413	int to, from;
 414
 415	if (ctx->target == NULL)
 416		return 0;
 417	to = ctx->offsets[bpf_to];
 418	from = ctx->offsets[bpf_from];
 419
 420	return to - from - 1;
 421}
 422
 423/*
 424 * Move an immediate that's not an imm8m to a core register.
 425 */
 426static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
 427{
 428#if __LINUX_ARM_ARCH__ < 7
 429	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
 430#else
 431	emit(ARM_MOVW(rd, val & 0xffff), ctx);
 432	if (val > 0xffff)
 433		emit(ARM_MOVT(rd, val >> 16), ctx);
 434#endif
 435}
 436
 437static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
 438{
 439	int imm12 = imm8m(val);
 440
 441	if (imm12 >= 0)
 442		emit(ARM_MOV_I(rd, imm12), ctx);
 443	else
 444		emit_mov_i_no8m(rd, val, ctx);
 445}
 446
 447static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
 448{
 449	if (elf_hwcap & HWCAP_THUMB)
 450		emit(ARM_BX(tgt_reg), ctx);
 451	else
 452		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
 453}
 454
 455static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
 456{
 457#if __LINUX_ARM_ARCH__ < 5
 458	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
 459	emit_bx_r(tgt_reg, ctx);
 460#else
 461	emit(ARM_BLX_R(tgt_reg), ctx);
 462#endif
 
 
 463}
 464
 465static inline int epilogue_offset(const struct jit_ctx *ctx)
 466{
 467	int to, from;
 468	/* No need for 1st dummy run */
 469	if (ctx->target == NULL)
 470		return 0;
 471	to = ctx->epilogue_offset;
 472	from = ctx->idx;
 473
 474	return to - from - 2;
 475}
 476
 477static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 478{
 479	const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
 480	const s8 *tmp = bpf2a32[TMP_REG_1];
 481
 482#if __LINUX_ARM_ARCH__ == 7
 483	if (elf_hwcap & HWCAP_IDIVA) {
 484		if (op == BPF_DIV)
 485			emit(ARM_UDIV(rd, rm, rn), ctx);
 486		else {
 487			emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
 488			emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
 489		}
 490		return;
 491	}
 492#endif
 493
 494	/*
 495	 * For BPF_ALU | BPF_DIV | BPF_K instructions
 496	 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
 497	 * function, we need to save it on caller side to save
 498	 * it from getting destroyed within callee.
 499	 * After the return from the callee, we restore ARM_R0
 500	 * ARM_R1.
 501	 */
 502	if (rn != ARM_R1) {
 503		emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
 504		emit(ARM_MOV_R(ARM_R1, rn), ctx);
 505	}
 506	if (rm != ARM_R0) {
 507		emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
 508		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 509	}
 510
 511	/* Push caller-saved registers on stack */
 512	emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
 513
 514	/* Call appropriate function */
 515	emit_mov_i(ARM_IP, op == BPF_DIV ?
 516		   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
 517	emit_blx_r(ARM_IP, ctx);
 518
 519	/* Restore caller-saved registers from stack */
 520	emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
 521
 522	/* Save return value */
 523	if (rd != ARM_R0)
 524		emit(ARM_MOV_R(rd, ARM_R0), ctx);
 525
 526	/* Restore ARM_R0 and ARM_R1 */
 527	if (rn != ARM_R1)
 528		emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
 529	if (rm != ARM_R0)
 530		emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
 531}
 532
 533/* Is the translated BPF register on stack? */
 534static bool is_stacked(s8 reg)
 535{
 536	return reg < 0;
 537}
 538
 539/* If a BPF register is on the stack (stk is true), load it to the
 540 * supplied temporary register and return the temporary register
 541 * for subsequent operations, otherwise just use the CPU register.
 542 */
 543static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
 544{
 545	if (is_stacked(reg)) {
 546		emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
 547		reg = tmp;
 548	}
 549	return reg;
 550}
 551
 552static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
 553				   struct jit_ctx *ctx)
 554{
 555	if (is_stacked(reg[1])) {
 556		if (__LINUX_ARM_ARCH__ >= 6 ||
 557		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
 558			emit(ARM_LDRD_I(tmp[1], ARM_FP,
 559					EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 560		} else {
 561			emit(ARM_LDR_I(tmp[1], ARM_FP,
 562				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 563			emit(ARM_LDR_I(tmp[0], ARM_FP,
 564				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
 565		}
 566		reg = tmp;
 567	}
 568	return reg;
 569}
 570
 571/* If a BPF register is on the stack (stk is true), save the register
 572 * back to the stack.  If the source register is not the same, then
 573 * move it into the correct register.
 574 */
 575static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
 576{
 577	if (is_stacked(reg))
 578		emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
 579	else if (reg != src)
 580		emit(ARM_MOV_R(reg, src), ctx);
 581}
 582
 583static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
 584			      struct jit_ctx *ctx)
 585{
 586	if (is_stacked(reg[1])) {
 587		if (__LINUX_ARM_ARCH__ >= 6 ||
 588		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
 589			emit(ARM_STRD_I(src[1], ARM_FP,
 590				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 591		} else {
 592			emit(ARM_STR_I(src[1], ARM_FP,
 593				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 594			emit(ARM_STR_I(src[0], ARM_FP,
 595				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
 596		}
 597	} else {
 598		if (reg[1] != src[1])
 599			emit(ARM_MOV_R(reg[1], src[1]), ctx);
 600		if (reg[0] != src[0])
 601			emit(ARM_MOV_R(reg[0], src[0]), ctx);
 602	}
 603}
 604
 605static inline void emit_a32_mov_i(const s8 dst, const u32 val,
 606				  struct jit_ctx *ctx)
 607{
 608	const s8 *tmp = bpf2a32[TMP_REG_1];
 609
 610	if (is_stacked(dst)) {
 611		emit_mov_i(tmp[1], val, ctx);
 612		arm_bpf_put_reg32(dst, tmp[1], ctx);
 613	} else {
 614		emit_mov_i(dst, val, ctx);
 615	}
 616}
 617
 618static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
 
 619{
 620	const s8 *tmp = bpf2a32[TMP_REG_1];
 621	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
 622
 623	emit_mov_i(rd[1], (u32)val, ctx);
 624	emit_mov_i(rd[0], val >> 32, ctx);
 625
 626	arm_bpf_put_reg64(dst, rd, ctx);
 627}
 628
 629/* Sign extended move */
 630static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
 631				       const u32 val, struct jit_ctx *ctx) {
 632	u64 val64 = val;
 633
 634	if (is64 && (val & (1<<31)))
 635		val64 |= 0xffffffff00000000ULL;
 636	emit_a32_mov_i64(dst, val64, ctx);
 637}
 638
 639static inline void emit_a32_add_r(const u8 dst, const u8 src,
 640			      const bool is64, const bool hi,
 641			      struct jit_ctx *ctx) {
 642	/* 64 bit :
 643	 *	adds dst_lo, dst_lo, src_lo
 644	 *	adc dst_hi, dst_hi, src_hi
 645	 * 32 bit :
 646	 *	add dst_lo, dst_lo, src_lo
 647	 */
 648	if (!hi && is64)
 649		emit(ARM_ADDS_R(dst, dst, src), ctx);
 650	else if (hi && is64)
 651		emit(ARM_ADC_R(dst, dst, src), ctx);
 652	else
 653		emit(ARM_ADD_R(dst, dst, src), ctx);
 654}
 655
 656static inline void emit_a32_sub_r(const u8 dst, const u8 src,
 657				  const bool is64, const bool hi,
 658				  struct jit_ctx *ctx) {
 659	/* 64 bit :
 660	 *	subs dst_lo, dst_lo, src_lo
 661	 *	sbc dst_hi, dst_hi, src_hi
 662	 * 32 bit :
 663	 *	sub dst_lo, dst_lo, src_lo
 664	 */
 665	if (!hi && is64)
 666		emit(ARM_SUBS_R(dst, dst, src), ctx);
 667	else if (hi && is64)
 668		emit(ARM_SBC_R(dst, dst, src), ctx);
 669	else
 670		emit(ARM_SUB_R(dst, dst, src), ctx);
 671}
 672
 673static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
 674			      const bool hi, const u8 op, struct jit_ctx *ctx){
 675	switch (BPF_OP(op)) {
 676	/* dst = dst + src */
 677	case BPF_ADD:
 678		emit_a32_add_r(dst, src, is64, hi, ctx);
 679		break;
 680	/* dst = dst - src */
 681	case BPF_SUB:
 682		emit_a32_sub_r(dst, src, is64, hi, ctx);
 683		break;
 684	/* dst = dst | src */
 685	case BPF_OR:
 686		emit(ARM_ORR_R(dst, dst, src), ctx);
 687		break;
 688	/* dst = dst & src */
 689	case BPF_AND:
 690		emit(ARM_AND_R(dst, dst, src), ctx);
 691		break;
 692	/* dst = dst ^ src */
 693	case BPF_XOR:
 694		emit(ARM_EOR_R(dst, dst, src), ctx);
 695		break;
 696	/* dst = dst * src */
 697	case BPF_MUL:
 698		emit(ARM_MUL(dst, dst, src), ctx);
 699		break;
 700	/* dst = dst << src */
 701	case BPF_LSH:
 702		emit(ARM_LSL_R(dst, dst, src), ctx);
 703		break;
 704	/* dst = dst >> src */
 705	case BPF_RSH:
 706		emit(ARM_LSR_R(dst, dst, src), ctx);
 707		break;
 708	/* dst = dst >> src (signed)*/
 709	case BPF_ARSH:
 710		emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
 711		break;
 712	}
 713}
 714
 715/* ALU operation (32 bit)
 716 * dst = dst (op) src
 717 */
 718static inline void emit_a32_alu_r(const s8 dst, const s8 src,
 719				  struct jit_ctx *ctx, const bool is64,
 720				  const bool hi, const u8 op) {
 721	const s8 *tmp = bpf2a32[TMP_REG_1];
 722	s8 rn, rd;
 723
 724	rn = arm_bpf_get_reg32(src, tmp[1], ctx);
 725	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
 726	/* ALU operation */
 727	emit_alu_r(rd, rn, is64, hi, op, ctx);
 728	arm_bpf_put_reg32(dst, rd, ctx);
 729}
 730
 731/* ALU operation (64 bit) */
 732static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
 733				  const s8 src[], struct jit_ctx *ctx,
 734				  const u8 op) {
 735	const s8 *tmp = bpf2a32[TMP_REG_1];
 736	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 737	const s8 *rd;
 738
 739	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 740	if (is64) {
 741		const s8 *rs;
 742
 743		rs = arm_bpf_get_reg64(src, tmp2, ctx);
 744
 745		/* ALU operation */
 746		emit_alu_r(rd[1], rs[1], true, false, op, ctx);
 747		emit_alu_r(rd[0], rs[0], true, true, op, ctx);
 748	} else {
 749		s8 rs;
 750
 751		rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 752
 753		/* ALU operation */
 754		emit_alu_r(rd[1], rs, true, false, op, ctx);
 755		if (!ctx->prog->aux->verifier_zext)
 756			emit_a32_mov_i(rd[0], 0, ctx);
 757	}
 758
 759	arm_bpf_put_reg64(dst, rd, ctx);
 760}
 761
 762/* dst = src (4 bytes)*/
 763static inline void emit_a32_mov_r(const s8 dst, const s8 src,
 764				  struct jit_ctx *ctx) {
 765	const s8 *tmp = bpf2a32[TMP_REG_1];
 766	s8 rt;
 767
 768	rt = arm_bpf_get_reg32(src, tmp[0], ctx);
 769	arm_bpf_put_reg32(dst, rt, ctx);
 770}
 771
 772/* dst = src */
 773static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
 774				  const s8 src[],
 775				  struct jit_ctx *ctx) {
 776	if (!is64) {
 777		emit_a32_mov_r(dst_lo, src_lo, ctx);
 778		if (!ctx->prog->aux->verifier_zext)
 779			/* Zero out high 4 bytes */
 780			emit_a32_mov_i(dst_hi, 0, ctx);
 781	} else if (__LINUX_ARM_ARCH__ < 6 &&
 782		   ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
 783		/* complete 8 byte move */
 784		emit_a32_mov_r(dst_lo, src_lo, ctx);
 785		emit_a32_mov_r(dst_hi, src_hi, ctx);
 786	} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
 787		const u8 *tmp = bpf2a32[TMP_REG_1];
 788
 789		emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
 790		emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
 791	} else if (is_stacked(src_lo)) {
 792		emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
 793	} else if (is_stacked(dst_lo)) {
 794		emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
 795	} else {
 796		emit(ARM_MOV_R(dst[0], src[0]), ctx);
 797		emit(ARM_MOV_R(dst[1], src[1]), ctx);
 798	}
 799}
 800
 801/* Shift operations */
 802static inline void emit_a32_alu_i(const s8 dst, const u32 val,
 803				struct jit_ctx *ctx, const u8 op) {
 804	const s8 *tmp = bpf2a32[TMP_REG_1];
 805	s8 rd;
 806
 807	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
 808
 809	/* Do shift operation */
 810	switch (op) {
 811	case BPF_LSH:
 812		emit(ARM_LSL_I(rd, rd, val), ctx);
 813		break;
 814	case BPF_RSH:
 815		emit(ARM_LSR_I(rd, rd, val), ctx);
 816		break;
 817	case BPF_ARSH:
 818		emit(ARM_ASR_I(rd, rd, val), ctx);
 819		break;
 820	case BPF_NEG:
 821		emit(ARM_RSB_I(rd, rd, val), ctx);
 822		break;
 823	}
 824
 825	arm_bpf_put_reg32(dst, rd, ctx);
 826}
 827
 828/* dst = ~dst (64 bit) */
 829static inline void emit_a32_neg64(const s8 dst[],
 830				struct jit_ctx *ctx){
 831	const s8 *tmp = bpf2a32[TMP_REG_1];
 832	const s8 *rd;
 833
 834	/* Setup Operand */
 835	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 836
 837	/* Do Negate Operation */
 838	emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
 839	emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
 840
 841	arm_bpf_put_reg64(dst, rd, ctx);
 842}
 843
 844/* dst = dst << src */
 845static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
 846				    struct jit_ctx *ctx) {
 847	const s8 *tmp = bpf2a32[TMP_REG_1];
 848	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 849	const s8 *rd;
 850	s8 rt;
 851
 852	/* Setup Operands */
 853	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 854	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 855
 856	/* Do LSH operation */
 857	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
 858	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
 859	emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
 860	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
 861	emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
 862	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
 863
 864	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 865	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 866}
 867
 868/* dst = dst >> src (signed)*/
 869static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
 870				     struct jit_ctx *ctx) {
 871	const s8 *tmp = bpf2a32[TMP_REG_1];
 872	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 873	const s8 *rd;
 874	s8 rt;
 875
 876	/* Setup Operands */
 877	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 878	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 879
 880	/* Do the ARSH operation */
 881	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
 882	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
 883	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
 884	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
 885	_emit(ARM_COND_PL,
 886	      ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
 887	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
 888
 889	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 890	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 891}
 892
 893/* dst = dst >> src */
 894static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
 895				    struct jit_ctx *ctx) {
 896	const s8 *tmp = bpf2a32[TMP_REG_1];
 897	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 898	const s8 *rd;
 899	s8 rt;
 900
 901	/* Setup Operands */
 902	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 903	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 904
 905	/* Do RSH operation */
 906	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
 907	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
 908	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
 909	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
 910	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
 911	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
 912
 913	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 914	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 915}
 916
 917/* dst = dst << val */
 918static inline void emit_a32_lsh_i64(const s8 dst[],
 919				    const u32 val, struct jit_ctx *ctx){
 920	const s8 *tmp = bpf2a32[TMP_REG_1];
 921	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 922	const s8 *rd;
 923
 924	/* Setup operands */
 925	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 926
 927	/* Do LSH operation */
 928	if (val < 32) {
 929		emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
 930		emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
 931		emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
 932	} else {
 933		if (val == 32)
 934			emit(ARM_MOV_R(rd[0], rd[1]), ctx);
 935		else
 936			emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
 937		emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
 938	}
 939
 940	arm_bpf_put_reg64(dst, rd, ctx);
 941}
 942
 943/* dst = dst >> val */
 944static inline void emit_a32_rsh_i64(const s8 dst[],
 945				    const u32 val, struct jit_ctx *ctx) {
 946	const s8 *tmp = bpf2a32[TMP_REG_1];
 947	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 948	const s8 *rd;
 949
 950	/* Setup operands */
 951	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 952
 953	/* Do LSR operation */
 954	if (val == 0) {
 955		/* An immediate value of 0 encodes a shift amount of 32
 956		 * for LSR. To shift by 0, don't do anything.
 957		 */
 958	} else if (val < 32) {
 959		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
 960		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
 961		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
 962	} else if (val == 32) {
 963		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
 964		emit(ARM_MOV_I(rd[0], 0), ctx);
 965	} else {
 966		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
 967		emit(ARM_MOV_I(rd[0], 0), ctx);
 968	}
 969
 970	arm_bpf_put_reg64(dst, rd, ctx);
 971}
 972
 973/* dst = dst >> val (signed) */
 974static inline void emit_a32_arsh_i64(const s8 dst[],
 975				     const u32 val, struct jit_ctx *ctx){
 976	const s8 *tmp = bpf2a32[TMP_REG_1];
 977	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 978	const s8 *rd;
 979
 980	/* Setup operands */
 981	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 982
 983	/* Do ARSH operation */
 984	if (val == 0) {
 985		/* An immediate value of 0 encodes a shift amount of 32
 986		 * for ASR. To shift by 0, don't do anything.
 987		 */
 988	} else if (val < 32) {
 989		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
 990		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
 991		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
 992	} else if (val == 32) {
 993		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
 994		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
 995	} else {
 996		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
 997		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
 998	}
 999
1000	arm_bpf_put_reg64(dst, rd, ctx);
1001}
1002
1003static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
1004				    struct jit_ctx *ctx) {
1005	const s8 *tmp = bpf2a32[TMP_REG_1];
1006	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1007	const s8 *rd, *rt;
1008
1009	/* Setup operands for multiplication */
1010	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1011	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1012
1013	/* Do Multiplication */
1014	emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
1015	emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
1016	emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
1017
1018	emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
1019	emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
1020
1021	arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
1022	arm_bpf_put_reg32(dst_hi, rd[0], ctx);
1023}
1024
1025static bool is_ldst_imm(s16 off, const u8 size)
1026{
1027	s16 off_max = 0;
1028
1029	switch (size) {
1030	case BPF_B:
1031	case BPF_W:
1032		off_max = 0xfff;
1033		break;
1034	case BPF_H:
1035		off_max = 0xff;
1036		break;
1037	case BPF_DW:
1038		/* Need to make sure off+4 does not overflow. */
1039		off_max = 0xfff - 4;
1040		break;
1041	}
1042	return -off_max <= off && off <= off_max;
1043}
1044
1045/* *(size *)(dst + off) = src */
1046static inline void emit_str_r(const s8 dst, const s8 src[],
1047			      s16 off, struct jit_ctx *ctx, const u8 sz){
1048	const s8 *tmp = bpf2a32[TMP_REG_1];
1049	s8 rd;
1050
1051	rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1052
1053	if (!is_ldst_imm(off, sz)) {
1054		emit_a32_mov_i(tmp[0], off, ctx);
1055		emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1056		rd = tmp[0];
1057		off = 0;
1058	}
1059	switch (sz) {
1060	case BPF_B:
1061		/* Store a Byte */
1062		emit(ARM_STRB_I(src_lo, rd, off), ctx);
1063		break;
1064	case BPF_H:
1065		/* Store a HalfWord */
1066		emit(ARM_STRH_I(src_lo, rd, off), ctx);
1067		break;
1068	case BPF_W:
1069		/* Store a Word */
1070		emit(ARM_STR_I(src_lo, rd, off), ctx);
1071		break;
1072	case BPF_DW:
1073		/* Store a Double Word */
1074		emit(ARM_STR_I(src_lo, rd, off), ctx);
1075		emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1076		break;
1077	}
1078}
1079
1080/* dst = *(size*)(src + off) */
1081static inline void emit_ldx_r(const s8 dst[], const s8 src,
1082			      s16 off, struct jit_ctx *ctx, const u8 sz){
1083	const s8 *tmp = bpf2a32[TMP_REG_1];
1084	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1085	s8 rm = src;
1086
1087	if (!is_ldst_imm(off, sz)) {
1088		emit_a32_mov_i(tmp[0], off, ctx);
1089		emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1090		rm = tmp[0];
1091		off = 0;
1092	} else if (rd[1] == rm) {
1093		emit(ARM_MOV_R(tmp[0], rm), ctx);
1094		rm = tmp[0];
1095	}
1096	switch (sz) {
1097	case BPF_B:
1098		/* Load a Byte */
1099		emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1100		if (!ctx->prog->aux->verifier_zext)
1101			emit_a32_mov_i(rd[0], 0, ctx);
1102		break;
1103	case BPF_H:
1104		/* Load a HalfWord */
1105		emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1106		if (!ctx->prog->aux->verifier_zext)
1107			emit_a32_mov_i(rd[0], 0, ctx);
1108		break;
1109	case BPF_W:
1110		/* Load a Word */
1111		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1112		if (!ctx->prog->aux->verifier_zext)
1113			emit_a32_mov_i(rd[0], 0, ctx);
1114		break;
1115	case BPF_DW:
1116		/* Load a Double Word */
1117		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1118		emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1119		break;
1120	}
1121	arm_bpf_put_reg64(dst, rd, ctx);
1122}
1123
1124/* Arithmatic Operation */
1125static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1126			     const u8 rn, struct jit_ctx *ctx, u8 op,
1127			     bool is_jmp64) {
1128	switch (op) {
1129	case BPF_JSET:
1130		if (is_jmp64) {
1131			emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1132			emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1133			emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1134		} else {
1135			emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1136		}
1137		break;
1138	case BPF_JEQ:
1139	case BPF_JNE:
1140	case BPF_JGT:
1141	case BPF_JGE:
1142	case BPF_JLE:
1143	case BPF_JLT:
1144		if (is_jmp64) {
1145			emit(ARM_CMP_R(rd, rm), ctx);
1146			/* Only compare low halve if high halve are equal. */
1147			_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1148		} else {
1149			emit(ARM_CMP_R(rt, rn), ctx);
1150		}
1151		break;
1152	case BPF_JSLE:
1153	case BPF_JSGT:
1154		emit(ARM_CMP_R(rn, rt), ctx);
1155		if (is_jmp64)
1156			emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1157		break;
1158	case BPF_JSLT:
1159	case BPF_JSGE:
1160		emit(ARM_CMP_R(rt, rn), ctx);
1161		if (is_jmp64)
1162			emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1163		break;
1164	}
1165}
1166
1167static int out_offset = -1; /* initialized on the first pass of build_body() */
1168static int emit_bpf_tail_call(struct jit_ctx *ctx)
1169{
 
 
1170
1171	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1172	const s8 *r2 = bpf2a32[BPF_REG_2];
1173	const s8 *r3 = bpf2a32[BPF_REG_3];
1174	const s8 *tmp = bpf2a32[TMP_REG_1];
1175	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1176	const s8 *tcc = bpf2a32[TCALL_CNT];
1177	const s8 *tc;
1178	const int idx0 = ctx->idx;
1179#define cur_offset (ctx->idx - idx0)
1180#define jmp_offset (out_offset - (cur_offset) - 2)
1181	u32 lo, hi;
1182	s8 r_array, r_index;
1183	int off;
1184
1185	/* if (index >= array->map.max_entries)
1186	 *	goto out;
1187	 */
1188	BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1189		     ARM_INST_LDST__IMM12);
1190	off = offsetof(struct bpf_array, map.max_entries);
1191	r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1192	/* index is 32-bit for arrays */
1193	r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1194	/* array->map.max_entries */
1195	emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1196	/* index >= array->map.max_entries */
1197	emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1198	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1199
1200	/* tmp2[0] = array, tmp2[1] = index */
1201
1202	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1203	 *	goto out;
1204	 * tail_call_cnt++;
1205	 */
1206	lo = (u32)MAX_TAIL_CALL_CNT;
1207	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1208	tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1209	emit(ARM_CMP_I(tc[0], hi), ctx);
1210	_emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1211	_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1212	emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1213	emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1214	arm_bpf_put_reg64(tcc, tmp, ctx);
1215
1216	/* prog = array->ptrs[index]
1217	 * if (prog == NULL)
1218	 *	goto out;
1219	 */
1220	BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1221	off = imm8m(offsetof(struct bpf_array, ptrs));
1222	emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1223	emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1224	emit(ARM_CMP_I(tmp[1], 0), ctx);
1225	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1226
1227	/* goto *(prog->bpf_func + prologue_size); */
1228	BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1229		     ARM_INST_LDST__IMM12);
1230	off = offsetof(struct bpf_prog, bpf_func);
1231	emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1232	emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1233	emit_bx_r(tmp[1], ctx);
1234
1235	/* out: */
1236	if (out_offset == -1)
1237		out_offset = cur_offset;
1238	if (cur_offset != out_offset) {
1239		pr_err_once("tail_call out_offset = %d, expected %d!\n",
1240			    cur_offset, out_offset);
1241		return -1;
1242	}
1243	return 0;
1244#undef cur_offset
1245#undef jmp_offset
1246}
1247
1248/* 0xabcd => 0xcdab */
1249static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1250{
1251#if __LINUX_ARM_ARCH__ < 6
1252	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1253
1254	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1255	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1256	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1257	emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1258#else /* ARMv6+ */
1259	emit(ARM_REV16(rd, rn), ctx);
 
1260#endif
1261}
1262
1263/* 0xabcdefgh => 0xghefcdab */
1264static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1265{
1266#if __LINUX_ARM_ARCH__ < 6
1267	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 
 
 
 
 
 
 
 
 
1268
1269	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1270	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1271	emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1272
1273	emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1274	emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1275	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1276	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1277	emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1278	emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1279	emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1280
1281#else /* ARMv6+ */
1282	emit(ARM_REV(rd, rn), ctx);
1283#endif
1284}
1285
1286// push the scratch stack register on top of the stack
1287static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1288{
1289	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1290	const s8 *rt;
1291	u16 reg_set = 0;
1292
1293	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1294
1295	reg_set = (1 << rt[1]) | (1 << rt[0]);
1296	emit(ARM_PUSH(reg_set), ctx);
1297}
1298
1299static void build_prologue(struct jit_ctx *ctx)
1300{
1301	const s8 arm_r0 = bpf2a32[BPF_REG_0][1];
1302	const s8 *bpf_r1 = bpf2a32[BPF_REG_1];
1303	const s8 *bpf_fp = bpf2a32[BPF_REG_FP];
1304	const s8 *tcc = bpf2a32[TCALL_CNT];
 
 
1305
1306	/* Save callee saved registers. */
1307#ifdef CONFIG_FRAME_POINTER
1308	u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1309	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1310	emit(ARM_PUSH(reg_set), ctx);
1311	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1312#else
1313	emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1314	emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1315#endif
1316	/* mov r3, #0 */
1317	/* sub r2, sp, #SCRATCH_SIZE */
1318	emit(ARM_MOV_I(bpf_r1[0], 0), ctx);
1319	emit(ARM_SUB_I(bpf_r1[1], ARM_SP, SCRATCH_SIZE), ctx);
1320
1321	ctx->stack_size = imm8m(STACK_SIZE);
1322
1323	/* Set up function call stack */
1324	emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1325
1326	/* Set up BPF prog stack base register */
1327	emit_a32_mov_r64(true, bpf_fp, bpf_r1, ctx);
 
 
1328
1329	/* Initialize Tail Count */
1330	emit(ARM_MOV_I(bpf_r1[1], 0), ctx);
1331	emit_a32_mov_r64(true, tcc, bpf_r1, ctx);
1332
1333	/* Move BPF_CTX to BPF_R1 */
1334	emit(ARM_MOV_R(bpf_r1[1], arm_r0), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335
1336	/* end of prologue */
1337}
 
 
 
 
 
 
 
 
 
1338
1339/* restore callee saved registers. */
1340static void build_epilogue(struct jit_ctx *ctx)
1341{
1342#ifdef CONFIG_FRAME_POINTER
1343	/* When using frame pointers, some additional registers need to
1344	 * be loaded. */
1345	u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1346	emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1347	emit(ARM_LDM(ARM_SP, reg_set), ctx);
1348#else
1349	/* Restore callee saved registers. */
1350	emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1351	emit(ARM_POP(CALLEE_POP_MASK), ctx);
1352#endif
1353}
1354
1355/*
1356 * Convert an eBPF instruction to native instruction, i.e
1357 * JITs an eBPF instruction.
1358 * Returns :
1359 *	0  - Successfully JITed an 8-byte eBPF instruction
1360 *	>0 - Successfully JITed a 16-byte eBPF instruction
1361 *	<0 - Failed to JIT.
1362 */
1363static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1364{
1365	const u8 code = insn->code;
1366	const s8 *dst = bpf2a32[insn->dst_reg];
1367	const s8 *src = bpf2a32[insn->src_reg];
1368	const s8 *tmp = bpf2a32[TMP_REG_1];
1369	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1370	const s16 off = insn->off;
1371	const s32 imm = insn->imm;
1372	const int i = insn - ctx->prog->insnsi;
1373	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1374	const s8 *rd, *rs;
1375	s8 rd_lo, rt, rm, rn;
1376	s32 jmp_offset;
1377
1378#define check_imm(bits, imm) do {				\
1379	if ((imm) >= (1 << ((bits) - 1)) ||			\
1380	    (imm) < -(1 << ((bits) - 1))) {			\
1381		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
1382			i, imm, imm);				\
1383		return -EINVAL;					\
1384	}							\
1385} while (0)
1386#define check_imm24(imm) check_imm(24, imm)
1387
1388	switch (code) {
1389	/* ALU operations */
1390
1391	/* dst = src */
1392	case BPF_ALU | BPF_MOV | BPF_K:
1393	case BPF_ALU | BPF_MOV | BPF_X:
1394	case BPF_ALU64 | BPF_MOV | BPF_K:
1395	case BPF_ALU64 | BPF_MOV | BPF_X:
1396		switch (BPF_SRC(code)) {
1397		case BPF_X:
1398			if (imm == 1) {
1399				/* Special mov32 for zext */
1400				emit_a32_mov_i(dst_hi, 0, ctx);
1401				break;
1402			}
1403			emit_a32_mov_r64(is64, dst, src, ctx);
1404			break;
1405		case BPF_K:
1406			/* Sign-extend immediate value to destination reg */
1407			emit_a32_mov_se_i64(is64, dst, imm, ctx);
1408			break;
1409		}
1410		break;
1411	/* dst = dst + src/imm */
1412	/* dst = dst - src/imm */
1413	/* dst = dst | src/imm */
1414	/* dst = dst & src/imm */
1415	/* dst = dst ^ src/imm */
1416	/* dst = dst * src/imm */
1417	/* dst = dst << src */
1418	/* dst = dst >> src */
1419	case BPF_ALU | BPF_ADD | BPF_K:
1420	case BPF_ALU | BPF_ADD | BPF_X:
1421	case BPF_ALU | BPF_SUB | BPF_K:
1422	case BPF_ALU | BPF_SUB | BPF_X:
1423	case BPF_ALU | BPF_OR | BPF_K:
1424	case BPF_ALU | BPF_OR | BPF_X:
1425	case BPF_ALU | BPF_AND | BPF_K:
1426	case BPF_ALU | BPF_AND | BPF_X:
1427	case BPF_ALU | BPF_XOR | BPF_K:
1428	case BPF_ALU | BPF_XOR | BPF_X:
1429	case BPF_ALU | BPF_MUL | BPF_K:
1430	case BPF_ALU | BPF_MUL | BPF_X:
1431	case BPF_ALU | BPF_LSH | BPF_X:
1432	case BPF_ALU | BPF_RSH | BPF_X:
1433	case BPF_ALU | BPF_ARSH | BPF_X:
1434	case BPF_ALU64 | BPF_ADD | BPF_K:
1435	case BPF_ALU64 | BPF_ADD | BPF_X:
1436	case BPF_ALU64 | BPF_SUB | BPF_K:
1437	case BPF_ALU64 | BPF_SUB | BPF_X:
1438	case BPF_ALU64 | BPF_OR | BPF_K:
1439	case BPF_ALU64 | BPF_OR | BPF_X:
1440	case BPF_ALU64 | BPF_AND | BPF_K:
1441	case BPF_ALU64 | BPF_AND | BPF_X:
1442	case BPF_ALU64 | BPF_XOR | BPF_K:
1443	case BPF_ALU64 | BPF_XOR | BPF_X:
1444		switch (BPF_SRC(code)) {
1445		case BPF_X:
1446			emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1447			break;
1448		case BPF_K:
1449			/* Move immediate value to the temporary register
1450			 * and then do the ALU operation on the temporary
1451			 * register as this will sign-extend the immediate
1452			 * value into temporary reg and then it would be
1453			 * safe to do the operation on it.
1454			 */
1455			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1456			emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1457			break;
1458		}
1459		break;
1460	/* dst = dst / src(imm) */
1461	/* dst = dst % src(imm) */
1462	case BPF_ALU | BPF_DIV | BPF_K:
1463	case BPF_ALU | BPF_DIV | BPF_X:
1464	case BPF_ALU | BPF_MOD | BPF_K:
1465	case BPF_ALU | BPF_MOD | BPF_X:
1466		rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1467		switch (BPF_SRC(code)) {
1468		case BPF_X:
1469			rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1470			break;
1471		case BPF_K:
1472			rt = tmp2[0];
1473			emit_a32_mov_i(rt, imm, ctx);
1474			break;
1475		default:
1476			rt = src_lo;
 
 
 
 
 
 
 
 
 
 
 
1477			break;
1478		}
1479		emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
1480		arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1481		if (!ctx->prog->aux->verifier_zext)
1482			emit_a32_mov_i(dst_hi, 0, ctx);
1483		break;
1484	case BPF_ALU64 | BPF_DIV | BPF_K:
1485	case BPF_ALU64 | BPF_DIV | BPF_X:
1486	case BPF_ALU64 | BPF_MOD | BPF_K:
1487	case BPF_ALU64 | BPF_MOD | BPF_X:
1488		goto notyet;
1489	/* dst = dst << imm */
1490	/* dst = dst >> imm */
1491	/* dst = dst >> imm (signed) */
1492	case BPF_ALU | BPF_LSH | BPF_K:
1493	case BPF_ALU | BPF_RSH | BPF_K:
1494	case BPF_ALU | BPF_ARSH | BPF_K:
1495		if (unlikely(imm > 31))
1496			return -EINVAL;
1497		if (imm)
1498			emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1499		if (!ctx->prog->aux->verifier_zext)
1500			emit_a32_mov_i(dst_hi, 0, ctx);
1501		break;
1502	/* dst = dst << imm */
1503	case BPF_ALU64 | BPF_LSH | BPF_K:
1504		if (unlikely(imm > 63))
1505			return -EINVAL;
1506		emit_a32_lsh_i64(dst, imm, ctx);
1507		break;
1508	/* dst = dst >> imm */
1509	case BPF_ALU64 | BPF_RSH | BPF_K:
1510		if (unlikely(imm > 63))
1511			return -EINVAL;
1512		emit_a32_rsh_i64(dst, imm, ctx);
1513		break;
1514	/* dst = dst << src */
1515	case BPF_ALU64 | BPF_LSH | BPF_X:
1516		emit_a32_lsh_r64(dst, src, ctx);
1517		break;
1518	/* dst = dst >> src */
1519	case BPF_ALU64 | BPF_RSH | BPF_X:
1520		emit_a32_rsh_r64(dst, src, ctx);
1521		break;
1522	/* dst = dst >> src (signed) */
1523	case BPF_ALU64 | BPF_ARSH | BPF_X:
1524		emit_a32_arsh_r64(dst, src, ctx);
1525		break;
1526	/* dst = dst >> imm (signed) */
1527	case BPF_ALU64 | BPF_ARSH | BPF_K:
1528		if (unlikely(imm > 63))
1529			return -EINVAL;
1530		emit_a32_arsh_i64(dst, imm, ctx);
1531		break;
1532	/* dst = ~dst */
1533	case BPF_ALU | BPF_NEG:
1534		emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1535		if (!ctx->prog->aux->verifier_zext)
1536			emit_a32_mov_i(dst_hi, 0, ctx);
1537		break;
1538	/* dst = ~dst (64 bit) */
1539	case BPF_ALU64 | BPF_NEG:
1540		emit_a32_neg64(dst, ctx);
1541		break;
1542	/* dst = dst * src/imm */
1543	case BPF_ALU64 | BPF_MUL | BPF_X:
1544	case BPF_ALU64 | BPF_MUL | BPF_K:
1545		switch (BPF_SRC(code)) {
1546		case BPF_X:
1547			emit_a32_mul_r64(dst, src, ctx);
1548			break;
1549		case BPF_K:
1550			/* Move immediate value to the temporary register
1551			 * and then do the multiplication on it as this
1552			 * will sign-extend the immediate value into temp
1553			 * reg then it would be safe to do the operation
1554			 * on it.
1555			 */
1556			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1557			emit_a32_mul_r64(dst, tmp2, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558			break;
1559		}
1560		break;
1561	/* dst = htole(dst) */
1562	/* dst = htobe(dst) */
1563	case BPF_ALU | BPF_END | BPF_FROM_LE:
1564	case BPF_ALU | BPF_END | BPF_FROM_BE:
1565		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1566		if (BPF_SRC(code) == BPF_FROM_LE)
1567			goto emit_bswap_uxt;
1568		switch (imm) {
1569		case 16:
1570			emit_rev16(rd[1], rd[1], ctx);
1571			goto emit_bswap_uxt;
1572		case 32:
1573			emit_rev32(rd[1], rd[1], ctx);
1574			goto emit_bswap_uxt;
1575		case 64:
1576			emit_rev32(ARM_LR, rd[1], ctx);
1577			emit_rev32(rd[1], rd[0], ctx);
1578			emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
 
 
 
 
1579			break;
1580		}
1581		goto exit;
1582emit_bswap_uxt:
1583		switch (imm) {
1584		case 16:
1585			/* zero-extend 16 bits into 64 bits */
1586#if __LINUX_ARM_ARCH__ < 6
1587			emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1588			emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1589#else /* ARMv6+ */
1590			emit(ARM_UXTH(rd[1], rd[1]), ctx);
1591#endif
1592			if (!ctx->prog->aux->verifier_zext)
1593				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1594			break;
1595		case 32:
1596			/* zero-extend 32 bits into 64 bits */
1597			if (!ctx->prog->aux->verifier_zext)
1598				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1599			break;
1600		case 64:
1601			/* nop */
1602			break;
1603		}
1604exit:
1605		arm_bpf_put_reg64(dst, rd, ctx);
1606		break;
1607	/* dst = imm64 */
1608	case BPF_LD | BPF_IMM | BPF_DW:
1609	{
1610		u64 val = (u32)imm | (u64)insn[1].imm << 32;
1611
1612		emit_a32_mov_i64(dst, val, ctx);
1613
1614		return 1;
1615	}
1616	/* LDX: dst = *(size *)(src + off) */
1617	case BPF_LDX | BPF_MEM | BPF_W:
1618	case BPF_LDX | BPF_MEM | BPF_H:
1619	case BPF_LDX | BPF_MEM | BPF_B:
1620	case BPF_LDX | BPF_MEM | BPF_DW:
1621		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1622		emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1623		break;
1624	/* speculation barrier */
1625	case BPF_ST | BPF_NOSPEC:
1626		break;
1627	/* ST: *(size *)(dst + off) = imm */
1628	case BPF_ST | BPF_MEM | BPF_W:
1629	case BPF_ST | BPF_MEM | BPF_H:
1630	case BPF_ST | BPF_MEM | BPF_B:
1631	case BPF_ST | BPF_MEM | BPF_DW:
1632		switch (BPF_SIZE(code)) {
1633		case BPF_DW:
1634			/* Sign-extend immediate value into temp reg */
1635			emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1636			break;
1637		case BPF_W:
1638		case BPF_H:
1639		case BPF_B:
1640			emit_a32_mov_i(tmp2[1], imm, ctx);
1641			break;
1642		}
1643		emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1644		break;
1645	/* Atomic ops */
1646	case BPF_STX | BPF_ATOMIC | BPF_W:
1647	case BPF_STX | BPF_ATOMIC | BPF_DW:
1648		goto notyet;
1649	/* STX: *(size *)(dst + off) = src */
1650	case BPF_STX | BPF_MEM | BPF_W:
1651	case BPF_STX | BPF_MEM | BPF_H:
1652	case BPF_STX | BPF_MEM | BPF_B:
1653	case BPF_STX | BPF_MEM | BPF_DW:
1654		rs = arm_bpf_get_reg64(src, tmp2, ctx);
1655		emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1656		break;
1657	/* PC += off if dst == src */
1658	/* PC += off if dst > src */
1659	/* PC += off if dst >= src */
1660	/* PC += off if dst < src */
1661	/* PC += off if dst <= src */
1662	/* PC += off if dst != src */
1663	/* PC += off if dst > src (signed) */
1664	/* PC += off if dst >= src (signed) */
1665	/* PC += off if dst < src (signed) */
1666	/* PC += off if dst <= src (signed) */
1667	/* PC += off if dst & src */
1668	case BPF_JMP | BPF_JEQ | BPF_X:
1669	case BPF_JMP | BPF_JGT | BPF_X:
1670	case BPF_JMP | BPF_JGE | BPF_X:
1671	case BPF_JMP | BPF_JNE | BPF_X:
1672	case BPF_JMP | BPF_JSGT | BPF_X:
1673	case BPF_JMP | BPF_JSGE | BPF_X:
1674	case BPF_JMP | BPF_JSET | BPF_X:
1675	case BPF_JMP | BPF_JLE | BPF_X:
1676	case BPF_JMP | BPF_JLT | BPF_X:
1677	case BPF_JMP | BPF_JSLT | BPF_X:
1678	case BPF_JMP | BPF_JSLE | BPF_X:
1679	case BPF_JMP32 | BPF_JEQ | BPF_X:
1680	case BPF_JMP32 | BPF_JGT | BPF_X:
1681	case BPF_JMP32 | BPF_JGE | BPF_X:
1682	case BPF_JMP32 | BPF_JNE | BPF_X:
1683	case BPF_JMP32 | BPF_JSGT | BPF_X:
1684	case BPF_JMP32 | BPF_JSGE | BPF_X:
1685	case BPF_JMP32 | BPF_JSET | BPF_X:
1686	case BPF_JMP32 | BPF_JLE | BPF_X:
1687	case BPF_JMP32 | BPF_JLT | BPF_X:
1688	case BPF_JMP32 | BPF_JSLT | BPF_X:
1689	case BPF_JMP32 | BPF_JSLE | BPF_X:
1690		/* Setup source registers */
1691		rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1692		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1693		goto go_jmp;
1694	/* PC += off if dst == imm */
1695	/* PC += off if dst > imm */
1696	/* PC += off if dst >= imm */
1697	/* PC += off if dst < imm */
1698	/* PC += off if dst <= imm */
1699	/* PC += off if dst != imm */
1700	/* PC += off if dst > imm (signed) */
1701	/* PC += off if dst >= imm (signed) */
1702	/* PC += off if dst < imm (signed) */
1703	/* PC += off if dst <= imm (signed) */
1704	/* PC += off if dst & imm */
1705	case BPF_JMP | BPF_JEQ | BPF_K:
1706	case BPF_JMP | BPF_JGT | BPF_K:
1707	case BPF_JMP | BPF_JGE | BPF_K:
1708	case BPF_JMP | BPF_JNE | BPF_K:
1709	case BPF_JMP | BPF_JSGT | BPF_K:
1710	case BPF_JMP | BPF_JSGE | BPF_K:
1711	case BPF_JMP | BPF_JSET | BPF_K:
1712	case BPF_JMP | BPF_JLT | BPF_K:
1713	case BPF_JMP | BPF_JLE | BPF_K:
1714	case BPF_JMP | BPF_JSLT | BPF_K:
1715	case BPF_JMP | BPF_JSLE | BPF_K:
1716	case BPF_JMP32 | BPF_JEQ | BPF_K:
1717	case BPF_JMP32 | BPF_JGT | BPF_K:
1718	case BPF_JMP32 | BPF_JGE | BPF_K:
1719	case BPF_JMP32 | BPF_JNE | BPF_K:
1720	case BPF_JMP32 | BPF_JSGT | BPF_K:
1721	case BPF_JMP32 | BPF_JSGE | BPF_K:
1722	case BPF_JMP32 | BPF_JSET | BPF_K:
1723	case BPF_JMP32 | BPF_JLT | BPF_K:
1724	case BPF_JMP32 | BPF_JLE | BPF_K:
1725	case BPF_JMP32 | BPF_JSLT | BPF_K:
1726	case BPF_JMP32 | BPF_JSLE | BPF_K:
1727		if (off == 0)
1728			break;
1729		rm = tmp2[0];
1730		rn = tmp2[1];
1731		/* Sign-extend immediate value */
1732		emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1733go_jmp:
1734		/* Setup destination register */
1735		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1736
1737		/* Check for the condition */
1738		emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1739			  BPF_CLASS(code) == BPF_JMP);
1740
1741		/* Setup JUMP instruction */
1742		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1743		switch (BPF_OP(code)) {
1744		case BPF_JNE:
1745		case BPF_JSET:
1746			_emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1747			break;
1748		case BPF_JEQ:
1749			_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1750			break;
1751		case BPF_JGT:
1752			_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1753			break;
1754		case BPF_JGE:
1755			_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1756			break;
1757		case BPF_JSGT:
1758			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1759			break;
1760		case BPF_JSGE:
1761			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1762			break;
1763		case BPF_JLE:
1764			_emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1765			break;
1766		case BPF_JLT:
1767			_emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1768			break;
1769		case BPF_JSLT:
1770			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1771			break;
1772		case BPF_JSLE:
1773			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1774			break;
1775		}
1776		break;
1777	/* JMP OFF */
1778	case BPF_JMP | BPF_JA:
1779	{
1780		if (off == 0)
1781			break;
1782		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1783		check_imm24(jmp_offset);
1784		emit(ARM_B(jmp_offset), ctx);
1785		break;
1786	}
1787	/* tail call */
1788	case BPF_JMP | BPF_TAIL_CALL:
1789		if (emit_bpf_tail_call(ctx))
1790			return -EFAULT;
1791		break;
1792	/* function call */
1793	case BPF_JMP | BPF_CALL:
1794	{
1795		const s8 *r0 = bpf2a32[BPF_REG_0];
1796		const s8 *r1 = bpf2a32[BPF_REG_1];
1797		const s8 *r2 = bpf2a32[BPF_REG_2];
1798		const s8 *r3 = bpf2a32[BPF_REG_3];
1799		const s8 *r4 = bpf2a32[BPF_REG_4];
1800		const s8 *r5 = bpf2a32[BPF_REG_5];
1801		const u32 func = (u32)__bpf_call_base + (u32)imm;
1802
1803		emit_a32_mov_r64(true, r0, r1, ctx);
1804		emit_a32_mov_r64(true, r1, r2, ctx);
1805		emit_push_r64(r5, ctx);
1806		emit_push_r64(r4, ctx);
1807		emit_push_r64(r3, ctx);
1808
1809		emit_a32_mov_i(tmp[1], func, ctx);
1810		emit_blx_r(tmp[1], ctx);
1811
1812		emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1813		break;
1814	}
1815	/* function return */
1816	case BPF_JMP | BPF_EXIT:
1817		/* Optimization: when last instruction is EXIT
1818		 * simply fallthrough to epilogue.
1819		 */
1820		if (i == ctx->prog->len - 1)
1821			break;
1822		jmp_offset = epilogue_offset(ctx);
1823		check_imm24(jmp_offset);
1824		emit(ARM_B(jmp_offset), ctx);
1825		break;
1826notyet:
1827		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1828		return -EFAULT;
1829	default:
1830		pr_err_once("unknown opcode %02x\n", code);
1831		return -EINVAL;
1832	}
1833
1834	if (ctx->flags & FLAG_IMM_OVERFLOW)
1835		/*
1836		 * this instruction generated an overflow when
1837		 * trying to access the literal pool, so
1838		 * delegate this filter to the kernel interpreter.
1839		 */
1840		return -1;
1841	return 0;
1842}
1843
1844static int build_body(struct jit_ctx *ctx)
1845{
1846	const struct bpf_prog *prog = ctx->prog;
1847	unsigned int i;
1848
1849	for (i = 0; i < prog->len; i++) {
1850		const struct bpf_insn *insn = &(prog->insnsi[i]);
1851		int ret;
1852
1853		ret = build_insn(insn, ctx);
1854
1855		/* It's used with loading the 64 bit immediate value. */
1856		if (ret > 0) {
1857			i++;
1858			if (ctx->target == NULL)
1859				ctx->offsets[i] = ctx->idx;
1860			continue;
1861		}
1862
1863		if (ctx->target == NULL)
1864			ctx->offsets[i] = ctx->idx;
1865
1866		/* If unsuccesfull, return with error code */
1867		if (ret)
1868			return ret;
1869	}
1870	return 0;
1871}
1872
1873static int validate_code(struct jit_ctx *ctx)
1874{
1875	int i;
1876
1877	for (i = 0; i < ctx->idx; i++) {
1878		if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1879			return -1;
1880	}
1881
1882	return 0;
1883}
 
1884
1885void bpf_jit_compile(struct bpf_prog *prog)
1886{
1887	/* Nothing to do here. We support Internal BPF. */
1888}
1889
1890bool bpf_jit_needs_zext(void)
1891{
1892	return true;
1893}
1894
1895struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1896{
1897	struct bpf_prog *tmp, *orig_prog = prog;
1898	struct bpf_binary_header *header;
1899	bool tmp_blinded = false;
1900	struct jit_ctx ctx;
1901	unsigned int tmp_idx;
1902	unsigned int image_size;
1903	u8 *image_ptr;
1904
1905	/* If BPF JIT was not enabled then we must fall back to
1906	 * the interpreter.
1907	 */
1908	if (!prog->jit_requested)
1909		return orig_prog;
1910
1911	/* If constant blinding was enabled and we failed during blinding
1912	 * then we must fall back to the interpreter. Otherwise, we save
1913	 * the new JITed code.
1914	 */
1915	tmp = bpf_jit_blind_constants(prog);
1916
1917	if (IS_ERR(tmp))
1918		return orig_prog;
1919	if (tmp != prog) {
1920		tmp_blinded = true;
1921		prog = tmp;
1922	}
1923
1924	memset(&ctx, 0, sizeof(ctx));
1925	ctx.prog = prog;
1926	ctx.cpu_architecture = cpu_architecture();
1927
1928	/* Not able to allocate memory for offsets[] , then
1929	 * we must fall back to the interpreter
1930	 */
1931	ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1932	if (ctx.offsets == NULL) {
1933		prog = orig_prog;
1934		goto out;
1935	}
1936
1937	/* 1) fake pass to find in the length of the JITed code,
1938	 * to compute ctx->offsets and other context variables
1939	 * needed to compute final JITed code.
1940	 * Also, calculate random starting pointer/start of JITed code
1941	 * which is prefixed by random number of fault instructions.
1942	 *
1943	 * If the first pass fails then there is no chance of it
1944	 * being successful in the second pass, so just fall back
1945	 * to the interpreter.
1946	 */
1947	if (build_body(&ctx)) {
1948		prog = orig_prog;
1949		goto out_off;
1950	}
1951
1952	tmp_idx = ctx.idx;
1953	build_prologue(&ctx);
1954	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1955
1956	ctx.epilogue_offset = ctx.idx;
1957
1958#if __LINUX_ARM_ARCH__ < 7
1959	tmp_idx = ctx.idx;
1960	build_epilogue(&ctx);
1961	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1962
1963	ctx.idx += ctx.imm_count;
1964	if (ctx.imm_count) {
1965		ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1966		if (ctx.imms == NULL) {
1967			prog = orig_prog;
1968			goto out_off;
1969		}
1970	}
1971#else
1972	/* there's nothing about the epilogue on ARMv7 */
1973	build_epilogue(&ctx);
1974#endif
1975	/* Now we can get the actual image size of the JITed arm code.
1976	 * Currently, we are not considering the THUMB-2 instructions
1977	 * for jit, although it can decrease the size of the image.
1978	 *
1979	 * As each arm instruction is of length 32bit, we are translating
1980	 * number of JITed intructions into the size required to store these
1981	 * JITed code.
1982	 */
1983	image_size = sizeof(u32) * ctx.idx;
1984
1985	/* Now we know the size of the structure to make */
1986	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1987				      sizeof(u32), jit_fill_hole);
1988	/* Not able to allocate memory for the structure then
1989	 * we must fall back to the interpretation
1990	 */
1991	if (header == NULL) {
1992		prog = orig_prog;
1993		goto out_imms;
1994	}
1995
1996	/* 2.) Actual pass to generate final JIT code */
1997	ctx.target = (u32 *) image_ptr;
1998	ctx.idx = 0;
1999
2000	build_prologue(&ctx);
2001
2002	/* If building the body of the JITed code fails somehow,
2003	 * we fall back to the interpretation.
2004	 */
2005	if (build_body(&ctx) < 0) {
2006		image_ptr = NULL;
 
 
 
2007		bpf_jit_binary_free(header);
2008		prog = orig_prog;
2009		goto out_imms;
2010	}
2011	build_epilogue(&ctx);
2012
2013	/* 3.) Extra pass to validate JITed Code */
2014	if (validate_code(&ctx)) {
2015		image_ptr = NULL;
2016		bpf_jit_binary_free(header);
2017		prog = orig_prog;
2018		goto out_imms;
2019	}
2020	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
2021
2022	if (bpf_jit_enable > 1)
2023		/* there are 2 passes here */
2024		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
2025
2026	bpf_jit_binary_lock_ro(header);
2027	prog->bpf_func = (void *)ctx.target;
2028	prog->jited = 1;
2029	prog->jited_len = image_size;
2030
2031out_imms:
2032#if __LINUX_ARM_ARCH__ < 7
2033	if (ctx.imm_count)
2034		kfree(ctx.imms);
2035#endif
2036out_off:
2037	kfree(ctx.offsets);
 
 
 
 
 
 
2038out:
2039	if (tmp_blinded)
2040		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2041					   tmp : orig_prog);
2042	return prog;
2043}
2044
v4.10.11
 
   1/*
   2 * Just-In-Time compiler for BPF filters on 32bit ARM
   3 *
 
   4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License.
   9 */
  10
 
  11#include <linux/bitops.h>
  12#include <linux/compiler.h>
  13#include <linux/errno.h>
  14#include <linux/filter.h>
  15#include <linux/netdevice.h>
  16#include <linux/string.h>
  17#include <linux/slab.h>
  18#include <linux/if_vlan.h>
  19
  20#include <asm/cacheflush.h>
  21#include <asm/hwcap.h>
  22#include <asm/opcodes.h>
 
  23
  24#include "bpf_jit_32.h"
  25
  26/*
  27 * ABI:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  28 *
  29 * r0	scratch register
  30 * r4	BPF register A
  31 * r5	BPF register X
  32 * r6	pointer to the skb
  33 * r7	skb->data
  34 * r8	skb_headlen(skb)
  35 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37#define r_scratch	ARM_R0
  38/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
  39#define r_off		ARM_R1
  40#define r_A		ARM_R4
  41#define r_X		ARM_R5
  42#define r_skb		ARM_R6
  43#define r_skb_data	ARM_R7
  44#define r_skb_hl	ARM_R8
  45
  46#define SCRATCH_SP_OFFSET	0
  47#define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + 4 * (k))
  48
  49#define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
  50#define SEEN_MEM_WORD(k)	(1 << (k))
  51#define SEEN_X			(1 << BPF_MEMWORDS)
  52#define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
  53#define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
  54#define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
  55
  56#define FLAG_NEED_X_RESET	(1 << 0)
  57#define FLAG_IMM_OVERFLOW	(1 << 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59struct jit_ctx {
  60	const struct bpf_prog *skf;
  61	unsigned idx;
  62	unsigned prologue_bytes;
  63	int ret0_fp_idx;
  64	u32 seen;
  65	u32 flags;
  66	u32 *offsets;
  67	u32 *target;
 
  68#if __LINUX_ARM_ARCH__ < 7
  69	u16 epilogue_bytes;
  70	u16 imm_count;
  71	u32 *imms;
  72#endif
  73};
  74
  75int bpf_jit_enable __read_mostly;
  76
  77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
  78		      unsigned int size)
  79{
  80	void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
  81
  82	if (!ptr)
  83		return -EFAULT;
  84	memcpy(ret, ptr, size);
  85	return 0;
  86}
  87
  88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
  89{
  90	u8 ret;
  91	int err;
  92
  93	if (offset < 0)
  94		err = call_neg_helper(skb, offset, &ret, 1);
  95	else
  96		err = skb_copy_bits(skb, offset, &ret, 1);
  97
  98	return (u64)err << 32 | ret;
  99}
 100
 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
 102{
 103	u16 ret;
 104	int err;
 105
 106	if (offset < 0)
 107		err = call_neg_helper(skb, offset, &ret, 2);
 108	else
 109		err = skb_copy_bits(skb, offset, &ret, 2);
 110
 111	return (u64)err << 32 | ntohs(ret);
 112}
 113
 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
 115{
 116	u32 ret;
 117	int err;
 118
 119	if (offset < 0)
 120		err = call_neg_helper(skb, offset, &ret, 4);
 121	else
 122		err = skb_copy_bits(skb, offset, &ret, 4);
 123
 124	return (u64)err << 32 | ntohl(ret);
 125}
 126
 127/*
 128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
 129 * (where the assembly routines like __aeabi_uidiv could cause problems).
 130 */
 131static u32 jit_udiv(u32 dividend, u32 divisor)
 132{
 133	return dividend / divisor;
 134}
 135
 136static u32 jit_mod(u32 dividend, u32 divisor)
 137{
 138	return dividend % divisor;
 139}
 140
 141static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 142{
 143	inst |= (cond << 28);
 144	inst = __opcode_to_mem_arm(inst);
 145
 146	if (ctx->target != NULL)
 147		ctx->target[ctx->idx] = inst;
 148
 149	ctx->idx++;
 150}
 151
 152/*
 153 * Emit an instruction that will be executed unconditionally.
 154 */
 155static inline void emit(u32 inst, struct jit_ctx *ctx)
 156{
 157	_emit(ARM_COND_AL, inst, ctx);
 158}
 159
 160static u16 saved_regs(struct jit_ctx *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161{
 162	u16 ret = 0;
 163
 164	if ((ctx->skf->len > 1) ||
 165	    (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
 166		ret |= 1 << r_A;
 
 
 167
 168#ifdef CONFIG_FRAME_POINTER
 169	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
 170#else
 171	if (ctx->seen & SEEN_CALL)
 172		ret |= 1 << ARM_LR;
 173#endif
 174	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
 175		ret |= 1 << r_skb;
 176	if (ctx->seen & SEEN_DATA)
 177		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
 178	if (ctx->seen & SEEN_X)
 179		ret |= 1 << r_X;
 180
 181	return ret;
 
 
 
 
 
 
 
 182}
 183
 184static inline int mem_words_used(struct jit_ctx *ctx)
 185{
 186	/* yes, we do waste some stack space IF there are "holes" in the set" */
 187	return fls(ctx->seen & SEEN_MEM);
 
 
 
 
 188}
 189
 
 
 
 
 
 
 
 
 
 
 
 
 
 190static void jit_fill_hole(void *area, unsigned int size)
 191{
 192	u32 *ptr;
 193	/* We are guaranteed to have aligned memory. */
 194	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
 195		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 196}
 197
 198static void build_prologue(struct jit_ctx *ctx)
 199{
 200	u16 reg_set = saved_regs(ctx);
 201	u16 off;
 202
 203#ifdef CONFIG_FRAME_POINTER
 204	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
 205	emit(ARM_PUSH(reg_set), ctx);
 206	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
 207#else
 208	if (reg_set)
 209		emit(ARM_PUSH(reg_set), ctx);
 210#endif
 211
 212	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
 213		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
 214
 215	if (ctx->seen & SEEN_DATA) {
 216		off = offsetof(struct sk_buff, data);
 217		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
 218		/* headlen = len - data_len */
 219		off = offsetof(struct sk_buff, len);
 220		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
 221		off = offsetof(struct sk_buff, data_len);
 222		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
 223		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
 224	}
 225
 226	if (ctx->flags & FLAG_NEED_X_RESET)
 227		emit(ARM_MOV_I(r_X, 0), ctx);
 228
 229	/* do not leak kernel data to userspace */
 230	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
 231		emit(ARM_MOV_I(r_A, 0), ctx);
 232
 233	/* stack space for the BPF_MEM words */
 234	if (ctx->seen & SEEN_MEM)
 235		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
 236}
 237
 238static void build_epilogue(struct jit_ctx *ctx)
 239{
 240	u16 reg_set = saved_regs(ctx);
 241
 242	if (ctx->seen & SEEN_MEM)
 243		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
 244
 245	reg_set &= ~(1 << ARM_LR);
 246
 247#ifdef CONFIG_FRAME_POINTER
 248	/* the first instruction of the prologue was: mov ip, sp */
 249	reg_set &= ~(1 << ARM_IP);
 250	reg_set |= (1 << ARM_SP);
 251	emit(ARM_LDM(ARM_SP, reg_set), ctx);
 252#else
 253	if (reg_set) {
 254		if (ctx->seen & SEEN_CALL)
 255			reg_set |= 1 << ARM_PC;
 256		emit(ARM_POP(reg_set), ctx);
 257	}
 258
 259	if (!(ctx->seen & SEEN_CALL))
 260		emit(ARM_BX(ARM_LR), ctx);
 261#endif
 262}
 263
 264static int16_t imm8m(u32 x)
 265{
 266	u32 rot;
 267
 268	for (rot = 0; rot < 16; rot++)
 269		if ((x & ~ror32(0xff, 2 * rot)) == 0)
 270			return rol32(x, 2 * rot) | (rot << 8);
 271
 272	return -1;
 273}
 274
 275#if __LINUX_ARM_ARCH__ < 7
 276
 277static u16 imm_offset(u32 k, struct jit_ctx *ctx)
 278{
 279	unsigned i = 0, offset;
 280	u16 imm;
 281
 282	/* on the "fake" run we just count them (duplicates included) */
 283	if (ctx->target == NULL) {
 284		ctx->imm_count++;
 285		return 0;
 286	}
 287
 288	while ((i < ctx->imm_count) && ctx->imms[i]) {
 289		if (ctx->imms[i] == k)
 290			break;
 291		i++;
 292	}
 293
 294	if (ctx->imms[i] == 0)
 295		ctx->imms[i] = k;
 296
 297	/* constants go just after the epilogue */
 298	offset =  ctx->offsets[ctx->skf->len];
 299	offset += ctx->prologue_bytes;
 300	offset += ctx->epilogue_bytes;
 301	offset += i * 4;
 302
 303	ctx->target[offset / 4] = k;
 304
 305	/* PC in ARM mode == address of the instruction + 8 */
 306	imm = offset - (8 + ctx->idx * 4);
 307
 308	if (imm & ~0xfff) {
 309		/*
 310		 * literal pool is too far, signal it into flags. we
 311		 * can only detect it on the second pass unfortunately.
 312		 */
 313		ctx->flags |= FLAG_IMM_OVERFLOW;
 314		return 0;
 315	}
 316
 317	return imm;
 318}
 319
 320#endif /* __LINUX_ARM_ARCH__ */
 321
 
 
 
 
 
 
 
 
 
 
 
 
 322/*
 323 * Move an immediate that's not an imm8m to a core register.
 324 */
 325static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
 326{
 327#if __LINUX_ARM_ARCH__ < 7
 328	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
 329#else
 330	emit(ARM_MOVW(rd, val & 0xffff), ctx);
 331	if (val > 0xffff)
 332		emit(ARM_MOVT(rd, val >> 16), ctx);
 333#endif
 334}
 335
 336static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
 337{
 338	int imm12 = imm8m(val);
 339
 340	if (imm12 >= 0)
 341		emit(ARM_MOV_I(rd, imm12), ctx);
 342	else
 343		emit_mov_i_no8m(rd, val, ctx);
 344}
 345
 346#if __LINUX_ARM_ARCH__ < 6
 
 
 
 
 
 
 347
 348static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 349{
 350	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
 351	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
 352	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
 353	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
 354	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
 355	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
 356	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
 357	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
 358}
 359
 360static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 361{
 362	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
 363	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
 364	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
 
 
 
 
 
 365}
 366
 367static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
 368{
 369	/* r_dst = (r_src << 8) | (r_src >> 8) */
 370	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
 371	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
 
 
 
 
 
 
 
 
 
 
 
 372
 373	/*
 374	 * we need to mask out the bits set in r_dst[23:16] due to
 375	 * the first shift instruction.
 376	 *
 377	 * note that 0x8ff is the encoded immediate 0x00ff0000.
 
 
 378	 */
 379	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380}
 381
 382#else  /* ARMv6+ */
 
 
 
 
 383
 384static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 
 
 
 
 385{
 386	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
 387#ifdef __LITTLE_ENDIAN
 388	_emit(cond, ARM_REV(r_res, r_res), ctx);
 389#endif
 
 390}
 391
 392static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 
 393{
 394	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
 395#ifdef __LITTLE_ENDIAN
 396	_emit(cond, ARM_REV16(r_res, r_res), ctx);
 397#endif
 
 
 
 
 
 
 
 
 
 
 398}
 399
 400static inline void emit_swap16(u8 r_dst __maybe_unused,
 401			       u8 r_src __maybe_unused,
 402			       struct jit_ctx *ctx __maybe_unused)
 
 
 403{
 404#ifdef __LITTLE_ENDIAN
 405	emit(ARM_REV16(r_dst, r_src), ctx);
 406#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407}
 408
 409#endif /* __LINUX_ARM_ARCH__ < 6 */
 
 
 
 410
 
 
 
 
 
 
 
 411
 412/* Compute the immediate value for a PC-relative branch. */
 413static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
 414{
 415	u32 imm;
 
 416
 417	if (ctx->target == NULL)
 418		return 0;
 419	/*
 420	 * BPF allows only forward jumps and the offset of the target is
 421	 * still the one computed during the first pass.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422	 */
 423	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424
 425	return imm >> 2;
 426}
 427
 428#define OP_IMM3(op, r1, r2, imm_val, ctx)				\
 429	do {								\
 430		imm12 = imm8m(imm_val);					\
 431		if (imm12 < 0) {					\
 432			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
 433			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
 434		} else {						\
 435			emit(op ## _I((r1), (r2), imm12), ctx);		\
 436		}							\
 437	} while (0)
 438
 439static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
 440{
 441	if (ctx->ret0_fp_idx >= 0) {
 442		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
 443		/* NOP to keep the size constant between passes */
 444		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	} else {
 446		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
 447		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
 448	}
 449}
 450
 451static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452{
 453#if __LINUX_ARM_ARCH__ < 5
 454	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
 455
 456	if (elf_hwcap & HWCAP_THUMB)
 457		emit(ARM_BX(tgt_reg), ctx);
 458	else
 459		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
 460#else
 461	emit(ARM_BLX_R(tgt_reg), ctx);
 462#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463}
 464
 465static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
 466				int bpf_op)
 467{
 468#if __LINUX_ARM_ARCH__ == 7
 469	if (elf_hwcap & HWCAP_IDIVA) {
 470		if (bpf_op == BPF_DIV)
 471			emit(ARM_UDIV(rd, rm, rn), ctx);
 472		else {
 473			emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
 474			emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
 475		}
 476		return;
 477	}
 478#endif
 
 479
 480	/*
 481	 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
 482	 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
 483	 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
 484	 * before using it as a source for ARM_R1.
 485	 *
 486	 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
 487	 * ARM_R5 (r_X) so there is no particular register overlap
 488	 * issues.
 489	 */
 490	if (rn != ARM_R1)
 491		emit(ARM_MOV_R(ARM_R1, rn), ctx);
 492	if (rm != ARM_R0)
 493		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 494
 495	ctx->seen |= SEEN_CALL;
 496	emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
 497		   ctx);
 498	emit_blx_r(ARM_R3, ctx);
 
 
 
 
 
 
 
 499
 500	if (rd != ARM_R0)
 501		emit(ARM_MOV_R(rd, ARM_R0), ctx);
 
 502}
 503
 504static inline void update_on_xread(struct jit_ctx *ctx)
 
 505{
 506	if (!(ctx->seen & SEEN_X))
 507		ctx->flags |= FLAG_NEED_X_RESET;
 
 
 
 508
 509	ctx->seen |= SEEN_X;
 
 510}
 511
 512static int build_body(struct jit_ctx *ctx)
 513{
 514	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
 515	const struct bpf_prog *prog = ctx->skf;
 516	const struct sock_filter *inst;
 517	unsigned i, load_order, off, condt;
 518	int imm12;
 519	u32 k;
 520
 521	for (i = 0; i < prog->len; i++) {
 522		u16 code;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523
 524		inst = &(prog->insns[i]);
 525		/* K as an immediate value operand */
 526		k = inst->k;
 527		code = bpf_anc_helper(inst);
 528
 529		/* compute offsets only in the fake pass */
 530		if (ctx->target == NULL)
 531			ctx->offsets[i] = ctx->idx * 4;
 532
 533		switch (code) {
 534		case BPF_LD | BPF_IMM:
 535			emit_mov_i(r_A, k, ctx);
 536			break;
 537		case BPF_LD | BPF_W | BPF_LEN:
 538			ctx->seen |= SEEN_SKB;
 539			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 540			emit(ARM_LDR_I(r_A, r_skb,
 541				       offsetof(struct sk_buff, len)), ctx);
 542			break;
 543		case BPF_LD | BPF_MEM:
 544			/* A = scratch[k] */
 545			ctx->seen |= SEEN_MEM_WORD(k);
 546			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
 547			break;
 548		case BPF_LD | BPF_W | BPF_ABS:
 549			load_order = 2;
 550			goto load;
 551		case BPF_LD | BPF_H | BPF_ABS:
 552			load_order = 1;
 553			goto load;
 554		case BPF_LD | BPF_B | BPF_ABS:
 555			load_order = 0;
 556load:
 557			emit_mov_i(r_off, k, ctx);
 558load_common:
 559			ctx->seen |= SEEN_DATA | SEEN_CALL;
 560
 561			if (load_order > 0) {
 562				emit(ARM_SUB_I(r_scratch, r_skb_hl,
 563					       1 << load_order), ctx);
 564				emit(ARM_CMP_R(r_scratch, r_off), ctx);
 565				condt = ARM_COND_GE;
 566			} else {
 567				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
 568				condt = ARM_COND_HI;
 569			}
 570
 571			/*
 572			 * test for negative offset, only if we are
 573			 * currently scheduled to take the fast
 574			 * path. this will update the flags so that
 575			 * the slowpath instruction are ignored if the
 576			 * offset is negative.
 577			 *
 578			 * for loard_order == 0 the HI condition will
 579			 * make loads at offset 0 take the slow path too.
 580			 */
 581			_emit(condt, ARM_CMP_I(r_off, 0), ctx);
 582
 583			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
 584			      ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 585
 586			if (load_order == 0)
 587				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
 588				      ctx);
 589			else if (load_order == 1)
 590				emit_load_be16(condt, r_A, r_scratch, ctx);
 591			else if (load_order == 2)
 592				emit_load_be32(condt, r_A, r_scratch, ctx);
 593
 594			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
 595
 596			/* the slowpath */
 597			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
 598			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 599			/* the offset is already in R1 */
 600			emit_blx_r(ARM_R3, ctx);
 601			/* check the result of skb_copy_bits */
 602			emit(ARM_CMP_I(ARM_R1, 0), ctx);
 603			emit_err_ret(ARM_COND_NE, ctx);
 604			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
 605			break;
 606		case BPF_LD | BPF_W | BPF_IND:
 607			load_order = 2;
 608			goto load_ind;
 609		case BPF_LD | BPF_H | BPF_IND:
 610			load_order = 1;
 611			goto load_ind;
 612		case BPF_LD | BPF_B | BPF_IND:
 613			load_order = 0;
 614load_ind:
 615			update_on_xread(ctx);
 616			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
 617			goto load_common;
 618		case BPF_LDX | BPF_IMM:
 619			ctx->seen |= SEEN_X;
 620			emit_mov_i(r_X, k, ctx);
 621			break;
 622		case BPF_LDX | BPF_W | BPF_LEN:
 623			ctx->seen |= SEEN_X | SEEN_SKB;
 624			emit(ARM_LDR_I(r_X, r_skb,
 625				       offsetof(struct sk_buff, len)), ctx);
 626			break;
 627		case BPF_LDX | BPF_MEM:
 628			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
 629			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
 630			break;
 631		case BPF_LDX | BPF_B | BPF_MSH:
 632			/* x = ((*(frame + k)) & 0xf) << 2; */
 633			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
 634			/* the interpreter should deal with the negative K */
 635			if ((int)k < 0)
 636				return -1;
 637			/* offset in r1: we might have to take the slow path */
 638			emit_mov_i(r_off, k, ctx);
 639			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
 640
 641			/* load in r0: common with the slowpath */
 642			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
 643						      ARM_R1), ctx);
 644			/*
 645			 * emit_mov_i() might generate one or two instructions,
 646			 * the same holds for emit_blx_r()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647			 */
 648			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
 649
 650			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 651			/* r_off is r1 */
 652			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
 653			emit_blx_r(ARM_R3, ctx);
 654			/* check the return value of skb_copy_bits */
 655			emit(ARM_CMP_I(ARM_R1, 0), ctx);
 656			emit_err_ret(ARM_COND_NE, ctx);
 657
 658			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
 659			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
 660			break;
 661		case BPF_ST:
 662			ctx->seen |= SEEN_MEM_WORD(k);
 663			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
 664			break;
 665		case BPF_STX:
 666			update_on_xread(ctx);
 667			ctx->seen |= SEEN_MEM_WORD(k);
 668			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
 669			break;
 670		case BPF_ALU | BPF_ADD | BPF_K:
 671			/* A += K */
 672			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
 673			break;
 674		case BPF_ALU | BPF_ADD | BPF_X:
 675			update_on_xread(ctx);
 676			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
 677			break;
 678		case BPF_ALU | BPF_SUB | BPF_K:
 679			/* A -= K */
 680			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
 681			break;
 682		case BPF_ALU | BPF_SUB | BPF_X:
 683			update_on_xread(ctx);
 684			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
 685			break;
 686		case BPF_ALU | BPF_MUL | BPF_K:
 687			/* A *= K */
 688			emit_mov_i(r_scratch, k, ctx);
 689			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
 690			break;
 691		case BPF_ALU | BPF_MUL | BPF_X:
 692			update_on_xread(ctx);
 693			emit(ARM_MUL(r_A, r_A, r_X), ctx);
 694			break;
 695		case BPF_ALU | BPF_DIV | BPF_K:
 696			if (k == 1)
 697				break;
 698			emit_mov_i(r_scratch, k, ctx);
 699			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
 
 
 
 
 
 
 
 
 
 
 
 700			break;
 701		case BPF_ALU | BPF_DIV | BPF_X:
 702			update_on_xread(ctx);
 703			emit(ARM_CMP_I(r_X, 0), ctx);
 704			emit_err_ret(ARM_COND_EQ, ctx);
 705			emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
 706			break;
 707		case BPF_ALU | BPF_MOD | BPF_K:
 708			if (k == 1) {
 709				emit_mov_i(r_A, 0, ctx);
 710				break;
 711			}
 712			emit_mov_i(r_scratch, k, ctx);
 713			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
 714			break;
 715		case BPF_ALU | BPF_MOD | BPF_X:
 716			update_on_xread(ctx);
 717			emit(ARM_CMP_I(r_X, 0), ctx);
 718			emit_err_ret(ARM_COND_EQ, ctx);
 719			emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
 720			break;
 721		case BPF_ALU | BPF_OR | BPF_K:
 722			/* A |= K */
 723			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
 724			break;
 725		case BPF_ALU | BPF_OR | BPF_X:
 726			update_on_xread(ctx);
 727			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
 728			break;
 729		case BPF_ALU | BPF_XOR | BPF_K:
 730			/* A ^= K; */
 731			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
 732			break;
 733		case BPF_ANC | SKF_AD_ALU_XOR_X:
 734		case BPF_ALU | BPF_XOR | BPF_X:
 735			/* A ^= X */
 736			update_on_xread(ctx);
 737			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
 738			break;
 739		case BPF_ALU | BPF_AND | BPF_K:
 740			/* A &= K */
 741			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
 742			break;
 743		case BPF_ALU | BPF_AND | BPF_X:
 744			update_on_xread(ctx);
 745			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
 746			break;
 747		case BPF_ALU | BPF_LSH | BPF_K:
 748			if (unlikely(k > 31))
 749				return -1;
 750			emit(ARM_LSL_I(r_A, r_A, k), ctx);
 751			break;
 752		case BPF_ALU | BPF_LSH | BPF_X:
 753			update_on_xread(ctx);
 754			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
 755			break;
 756		case BPF_ALU | BPF_RSH | BPF_K:
 757			if (unlikely(k > 31))
 758				return -1;
 759			if (k)
 760				emit(ARM_LSR_I(r_A, r_A, k), ctx);
 761			break;
 762		case BPF_ALU | BPF_RSH | BPF_X:
 763			update_on_xread(ctx);
 764			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
 765			break;
 766		case BPF_ALU | BPF_NEG:
 767			/* A = -A */
 768			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
 769			break;
 770		case BPF_JMP | BPF_JA:
 771			/* pc += K */
 772			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
 773			break;
 774		case BPF_JMP | BPF_JEQ | BPF_K:
 775			/* pc += (A == K) ? pc->jt : pc->jf */
 776			condt  = ARM_COND_EQ;
 777			goto cmp_imm;
 778		case BPF_JMP | BPF_JGT | BPF_K:
 779			/* pc += (A > K) ? pc->jt : pc->jf */
 780			condt  = ARM_COND_HI;
 781			goto cmp_imm;
 782		case BPF_JMP | BPF_JGE | BPF_K:
 783			/* pc += (A >= K) ? pc->jt : pc->jf */
 784			condt  = ARM_COND_HS;
 785cmp_imm:
 786			imm12 = imm8m(k);
 787			if (imm12 < 0) {
 788				emit_mov_i_no8m(r_scratch, k, ctx);
 789				emit(ARM_CMP_R(r_A, r_scratch), ctx);
 790			} else {
 791				emit(ARM_CMP_I(r_A, imm12), ctx);
 792			}
 793cond_jump:
 794			if (inst->jt)
 795				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
 796						   ctx)), ctx);
 797			if (inst->jf)
 798				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
 799							     ctx)), ctx);
 800			break;
 801		case BPF_JMP | BPF_JEQ | BPF_X:
 802			/* pc += (A == X) ? pc->jt : pc->jf */
 803			condt   = ARM_COND_EQ;
 804			goto cmp_x;
 805		case BPF_JMP | BPF_JGT | BPF_X:
 806			/* pc += (A > X) ? pc->jt : pc->jf */
 807			condt   = ARM_COND_HI;
 808			goto cmp_x;
 809		case BPF_JMP | BPF_JGE | BPF_X:
 810			/* pc += (A >= X) ? pc->jt : pc->jf */
 811			condt   = ARM_COND_CS;
 812cmp_x:
 813			update_on_xread(ctx);
 814			emit(ARM_CMP_R(r_A, r_X), ctx);
 815			goto cond_jump;
 816		case BPF_JMP | BPF_JSET | BPF_K:
 817			/* pc += (A & K) ? pc->jt : pc->jf */
 818			condt  = ARM_COND_NE;
 819			/* not set iff all zeroes iff Z==1 iff EQ */
 820
 821			imm12 = imm8m(k);
 822			if (imm12 < 0) {
 823				emit_mov_i_no8m(r_scratch, k, ctx);
 824				emit(ARM_TST_R(r_A, r_scratch), ctx);
 825			} else {
 826				emit(ARM_TST_I(r_A, imm12), ctx);
 827			}
 828			goto cond_jump;
 829		case BPF_JMP | BPF_JSET | BPF_X:
 830			/* pc += (A & X) ? pc->jt : pc->jf */
 831			update_on_xread(ctx);
 832			condt  = ARM_COND_NE;
 833			emit(ARM_TST_R(r_A, r_X), ctx);
 834			goto cond_jump;
 835		case BPF_RET | BPF_A:
 836			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
 837			goto b_epilogue;
 838		case BPF_RET | BPF_K:
 839			if ((k == 0) && (ctx->ret0_fp_idx < 0))
 840				ctx->ret0_fp_idx = i;
 841			emit_mov_i(ARM_R0, k, ctx);
 842b_epilogue:
 843			if (i != ctx->skf->len - 1)
 844				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
 845			break;
 846		case BPF_MISC | BPF_TAX:
 847			/* X = A */
 848			ctx->seen |= SEEN_X;
 849			emit(ARM_MOV_R(r_X, r_A), ctx);
 850			break;
 851		case BPF_MISC | BPF_TXA:
 852			/* A = X */
 853			update_on_xread(ctx);
 854			emit(ARM_MOV_R(r_A, r_X), ctx);
 855			break;
 856		case BPF_ANC | SKF_AD_PROTOCOL:
 857			/* A = ntohs(skb->protocol) */
 858			ctx->seen |= SEEN_SKB;
 859			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 860						  protocol) != 2);
 861			off = offsetof(struct sk_buff, protocol);
 862			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
 863			emit_swap16(r_A, r_scratch, ctx);
 864			break;
 865		case BPF_ANC | SKF_AD_CPU:
 866			/* r_scratch = current_thread_info() */
 867			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
 868			/* A = current_thread_info()->cpu */
 869			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
 870			off = offsetof(struct thread_info, cpu);
 871			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 872			break;
 873		case BPF_ANC | SKF_AD_IFINDEX:
 874		case BPF_ANC | SKF_AD_HATYPE:
 875			/* A = skb->dev->ifindex */
 876			/* A = skb->dev->type */
 877			ctx->seen |= SEEN_SKB;
 878			off = offsetof(struct sk_buff, dev);
 879			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
 880
 881			emit(ARM_CMP_I(r_scratch, 0), ctx);
 882			emit_err_ret(ARM_COND_EQ, ctx);
 883
 884			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 885						  ifindex) != 4);
 886			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 887						  type) != 2);
 888
 889			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
 890				off = offsetof(struct net_device, ifindex);
 891				emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 892			} else {
 893				/*
 894				 * offset of field "type" in "struct
 895				 * net_device" is above what can be
 896				 * used in the ldrh rd, [rn, #imm]
 897				 * instruction, so load the offset in
 898				 * a register and use ldrh rd, [rn, rm]
 899				 */
 900				off = offsetof(struct net_device, type);
 901				emit_mov_i(ARM_R3, off, ctx);
 902				emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
 903			}
 904			break;
 905		case BPF_ANC | SKF_AD_MARK:
 906			ctx->seen |= SEEN_SKB;
 907			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 908			off = offsetof(struct sk_buff, mark);
 909			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
 910			break;
 911		case BPF_ANC | SKF_AD_RXHASH:
 912			ctx->seen |= SEEN_SKB;
 913			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 914			off = offsetof(struct sk_buff, hash);
 915			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
 916			break;
 917		case BPF_ANC | SKF_AD_VLAN_TAG:
 918		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 919			ctx->seen |= SEEN_SKB;
 920			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 921			off = offsetof(struct sk_buff, vlan_tci);
 922			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 923			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
 924				OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
 925			else {
 926				OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
 927				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
 928			}
 929			break;
 930		case BPF_ANC | SKF_AD_PKTTYPE:
 931			ctx->seen |= SEEN_SKB;
 932			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 933						  __pkt_type_offset[0]) != 1);
 934			off = PKT_TYPE_OFFSET();
 935			emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
 936			emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
 937#ifdef __BIG_ENDIAN_BITFIELD
 938			emit(ARM_LSR_I(r_A, r_A, 5), ctx);
 
 
 939#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940			break;
 941		case BPF_ANC | SKF_AD_QUEUE:
 942			ctx->seen |= SEEN_SKB;
 943			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 944						  queue_mapping) != 2);
 945			BUILD_BUG_ON(offsetof(struct sk_buff,
 946					      queue_mapping) > 0xff);
 947			off = offsetof(struct sk_buff, queue_mapping);
 948			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 949			break;
 950		case BPF_ANC | SKF_AD_PAY_OFFSET:
 951			ctx->seen |= SEEN_SKB | SEEN_CALL;
 952
 953			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 954			emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
 955			emit_blx_r(ARM_R3, ctx);
 956			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
 957			break;
 958		case BPF_LDX | BPF_W | BPF_ABS:
 959			/*
 960			 * load a 32bit word from struct seccomp_data.
 961			 * seccomp_check_filter() will already have checked
 962			 * that k is 32bit aligned and lies within the
 963			 * struct seccomp_data.
 964			 */
 965			ctx->seen |= SEEN_SKB;
 966			emit(ARM_LDR_I(r_A, r_skb, k), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967			break;
 968		default:
 969			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970		}
 971
 972		if (ctx->flags & FLAG_IMM_OVERFLOW)
 973			/*
 974			 * this instruction generated an overflow when
 975			 * trying to access the literal pool, so
 976			 * delegate this filter to the kernel interpreter.
 977			 */
 
 
 
 
 
 
 
 
 
 
 978			return -1;
 979	}
 980
 981	/* compute offsets only during the first pass */
 982	if (ctx->target == NULL)
 983		ctx->offsets[i] = ctx->idx * 4;
 984
 985	return 0;
 
 
 986}
 987
 
 
 
 
 988
 989void bpf_jit_compile(struct bpf_prog *fp)
 990{
 
 991	struct bpf_binary_header *header;
 
 992	struct jit_ctx ctx;
 993	unsigned tmp_idx;
 994	unsigned alloc_size;
 995	u8 *target_ptr;
 996
 997	if (!bpf_jit_enable)
 998		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999
1000	memset(&ctx, 0, sizeof(ctx));
1001	ctx.skf		= fp;
1002	ctx.ret0_fp_idx = -1;
1003
1004	ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
1005	if (ctx.offsets == NULL)
1006		return;
 
 
 
 
 
1007
1008	/* fake pass to fill in the ctx->seen */
1009	if (unlikely(build_body(&ctx)))
1010		goto out;
 
 
 
 
 
 
 
 
 
 
 
1011
1012	tmp_idx = ctx.idx;
1013	build_prologue(&ctx);
1014	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1015
 
 
1016#if __LINUX_ARM_ARCH__ < 7
1017	tmp_idx = ctx.idx;
1018	build_epilogue(&ctx);
1019	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1020
1021	ctx.idx += ctx.imm_count;
1022	if (ctx.imm_count) {
1023		ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1024		if (ctx.imms == NULL)
1025			goto out;
 
 
1026	}
1027#else
1028	/* there's nothing after the epilogue on ARMv7 */
1029	build_epilogue(&ctx);
1030#endif
1031	alloc_size = 4 * ctx.idx;
1032	header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1033				      4, jit_fill_hole);
1034	if (header == NULL)
1035		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036
1037	ctx.target = (u32 *) target_ptr;
 
1038	ctx.idx = 0;
1039
1040	build_prologue(&ctx);
 
 
 
 
1041	if (build_body(&ctx) < 0) {
1042#if __LINUX_ARM_ARCH__ < 7
1043		if (ctx.imm_count)
1044			kfree(ctx.imms);
1045#endif
1046		bpf_jit_binary_free(header);
1047		goto out;
 
1048	}
1049	build_epilogue(&ctx);
1050
 
 
 
 
 
 
 
1051	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1052
 
 
 
 
 
 
 
 
 
 
1053#if __LINUX_ARM_ARCH__ < 7
1054	if (ctx.imm_count)
1055		kfree(ctx.imms);
1056#endif
1057
1058	if (bpf_jit_enable > 1)
1059		/* there are 2 passes here */
1060		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1061
1062	set_memory_ro((unsigned long)header, header->pages);
1063	fp->bpf_func = (void *)ctx.target;
1064	fp->jited = 1;
1065out:
1066	kfree(ctx.offsets);
1067	return;
 
 
1068}
1069
1070void bpf_jit_free(struct bpf_prog *fp)
1071{
1072	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1073	struct bpf_binary_header *header = (void *)addr;
1074
1075	if (!fp->jited)
1076		goto free_filter;
1077
1078	set_memory_rw(addr, header->pages);
1079	bpf_jit_binary_free(header);
1080
1081free_filter:
1082	bpf_prog_unlock_free(fp);
1083}