Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Just-In-Time compiler for eBPF filters on 32bit ARM
   4 *
   5 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
   6 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
 
 
 
 
   7 */
   8
   9#include <linux/bpf.h>
  10#include <linux/bitops.h>
  11#include <linux/compiler.h>
  12#include <linux/errno.h>
  13#include <linux/filter.h>
  14#include <linux/netdevice.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/if_vlan.h>
  18
  19#include <asm/cacheflush.h>
  20#include <asm/hwcap.h>
  21#include <asm/opcodes.h>
  22#include <asm/system_info.h>
  23
  24#include "bpf_jit_32.h"
  25
  26/*
  27 * eBPF prog stack layout:
  28 *
  29 *                         high
  30 * original ARM_SP =>     +-----+
  31 *                        |     | callee saved registers
  32 *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
  33 *                        | ... | eBPF JIT scratch space
  34 * eBPF fp register =>    +-----+
  35 *   (BPF_FP)             | ... | eBPF prog stack
  36 *                        +-----+
  37 *                        |RSVD | JIT scratchpad
  38 * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
  39 *                        |     |
  40 *                        | ... | Function call stack
  41 *                        |     |
  42 *                        +-----+
  43 *                          low
  44 *
  45 * The callee saved registers depends on whether frame pointers are enabled.
  46 * With frame pointers (to be compliant with the ABI):
  47 *
  48 *                              high
  49 * original ARM_SP =>     +--------------+ \
  50 *                        |      pc      | |
  51 * current ARM_FP =>      +--------------+ } callee saved registers
  52 *                        |r4-r9,fp,ip,lr| |
  53 *                        +--------------+ /
  54 *                              low
  55 *
  56 * Without frame pointers:
  57 *
  58 *                              high
  59 * original ARM_SP =>     +--------------+
  60 *                        |  r4-r9,fp,lr | callee saved registers
  61 * current ARM_FP =>      +--------------+
  62 *                              low
  63 *
  64 * When popping registers off the stack at the end of a BPF function, we
  65 * reference them via the current ARM_FP register.
  66 */
  67#define CALLEE_MASK	(1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
  68			 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
  69			 1 << ARM_FP)
  70#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
  71#define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
  72
  73enum {
  74	/* Stack layout - these are offsets from (top of stack - 4) */
  75	BPF_R2_HI,
  76	BPF_R2_LO,
  77	BPF_R3_HI,
  78	BPF_R3_LO,
  79	BPF_R4_HI,
  80	BPF_R4_LO,
  81	BPF_R5_HI,
  82	BPF_R5_LO,
  83	BPF_R7_HI,
  84	BPF_R7_LO,
  85	BPF_R8_HI,
  86	BPF_R8_LO,
  87	BPF_R9_HI,
  88	BPF_R9_LO,
  89	BPF_FP_HI,
  90	BPF_FP_LO,
  91	BPF_TC_HI,
  92	BPF_TC_LO,
  93	BPF_AX_HI,
  94	BPF_AX_LO,
  95	/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
  96	 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
  97	 * BPF_REG_FP and Tail call counts.
  98	 */
  99	BPF_JIT_SCRATCH_REGS,
 100};
 101
 102/*
 103 * Negative "register" values indicate the register is stored on the stack
 104 * and are the offset from the top of the eBPF JIT scratch space.
 105 */
 106#define STACK_OFFSET(k)	(-4 - (k) * 4)
 107#define SCRATCH_SIZE	(BPF_JIT_SCRATCH_REGS * 4)
 108
 109#ifdef CONFIG_FRAME_POINTER
 110#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
 111#else
 112#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
 113#endif
 114
 115#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)	/* TEMP Register 1 */
 116#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)	/* TEMP Register 2 */
 117#define TCALL_CNT	(MAX_BPF_JIT_REG + 2)	/* Tail Call Count */
 118
 119#define FLAG_IMM_OVERFLOW	(1 << 0)
 120
 121/*
 122 * Map eBPF registers to ARM 32bit registers or stack scratch space.
 123 *
 124 * 1. First argument is passed using the arm 32bit registers and rest of the
 125 * arguments are passed on stack scratch space.
 126 * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
 127 * arguments are mapped to scratch space on stack.
 128 * 3. We need two 64 bit temp registers to do complex operations on eBPF
 129 * registers.
 130 *
 131 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
 132 * registers, we have to map each eBPF registers with two arm 32 bit regs or
 133 * scratch memory space and we have to build eBPF 64 bit register from those.
 134 *
 
 
 
 
 
 
 135 */
 136static const s8 bpf2a32[][2] = {
 137	/* return value from in-kernel function, and exit value from eBPF */
 138	[BPF_REG_0] = {ARM_R1, ARM_R0},
 139	/* arguments from eBPF program to in-kernel function */
 140	[BPF_REG_1] = {ARM_R3, ARM_R2},
 141	/* Stored on stack scratch space */
 142	[BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
 143	[BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
 144	[BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
 145	[BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
 146	/* callee saved registers that in-kernel function will preserve */
 147	[BPF_REG_6] = {ARM_R5, ARM_R4},
 148	/* Stored on stack scratch space */
 149	[BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
 150	[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
 151	[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
 152	/* Read only Frame Pointer to access Stack */
 153	[BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
 154	/* Temporary Register for internal BPF JIT, can be used
 155	 * for constant blindings and others.
 156	 */
 157	[TMP_REG_1] = {ARM_R7, ARM_R6},
 158	[TMP_REG_2] = {ARM_R9, ARM_R8},
 159	/* Tail call count. Stored on stack scratch space. */
 160	[TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
 161	/* temporary register for blinding constants.
 162	 * Stored on stack scratch space.
 163	 */
 164	[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
 165};
 166
 167#define	dst_lo	dst[1]
 168#define dst_hi	dst[0]
 169#define src_lo	src[1]
 170#define src_hi	src[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171
 172/*
 173 * JIT Context:
 174 *
 175 * prog			:	bpf_prog
 176 * idx			:	index of current last JITed instruction.
 177 * prologue_bytes	:	bytes used in prologue.
 178 * epilogue_offset	:	offset of epilogue starting.
 179 * offsets		:	array of eBPF instruction offsets in
 180 *				JITed code.
 181 * target		:	final JITed code.
 182 * epilogue_bytes	:	no of bytes used in epilogue.
 183 * imm_count		:	no of immediate counts used for global
 184 *				variables.
 185 * imms			:	array of global variable addresses.
 186 */
 187
 188struct jit_ctx {
 189	const struct bpf_prog *prog;
 190	unsigned int idx;
 191	unsigned int prologue_bytes;
 192	unsigned int epilogue_offset;
 193	unsigned int cpu_architecture;
 194	u32 flags;
 195	u32 *offsets;
 196	u32 *target;
 197	u32 stack_size;
 198#if __LINUX_ARM_ARCH__ < 7
 199	u16 epilogue_bytes;
 200	u16 imm_count;
 201	u32 *imms;
 202#endif
 203};
 204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 205/*
 206 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
 207 * (where the assembly routines like __aeabi_uidiv could cause problems).
 208 */
 209static u32 jit_udiv32(u32 dividend, u32 divisor)
 210{
 211	return dividend / divisor;
 212}
 213
 214static u32 jit_mod32(u32 dividend, u32 divisor)
 215{
 216	return dividend % divisor;
 217}
 218
 219static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 220{
 221	inst |= (cond << 28);
 222	inst = __opcode_to_mem_arm(inst);
 223
 224	if (ctx->target != NULL)
 225		ctx->target[ctx->idx] = inst;
 226
 227	ctx->idx++;
 228}
 229
 230/*
 231 * Emit an instruction that will be executed unconditionally.
 232 */
 233static inline void emit(u32 inst, struct jit_ctx *ctx)
 234{
 235	_emit(ARM_COND_AL, inst, ctx);
 236}
 237
 238/*
 239 * This is rather horrid, but necessary to convert an integer constant
 240 * to an immediate operand for the opcodes, and be able to detect at
 241 * build time whether the constant can't be converted (iow, usable in
 242 * BUILD_BUG_ON()).
 243 */
 244#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
 245#define const_imm8m(x)					\
 246	({ int r;					\
 247	   u32 v = (x);					\
 248	   if (!(v & ~0x000000ff))			\
 249		r = imm12val(v, 0);			\
 250	   else if (!(v & ~0xc000003f))			\
 251		r = imm12val(v, 2);			\
 252	   else if (!(v & ~0xf000000f))			\
 253		r = imm12val(v, 4);			\
 254	   else if (!(v & ~0xfc000003))			\
 255		r = imm12val(v, 6);			\
 256	   else if (!(v & ~0xff000000))			\
 257		r = imm12val(v, 8);			\
 258	   else if (!(v & ~0x3fc00000))			\
 259		r = imm12val(v, 10);			\
 260	   else if (!(v & ~0x0ff00000))			\
 261		r = imm12val(v, 12);			\
 262	   else if (!(v & ~0x03fc0000))			\
 263		r = imm12val(v, 14);			\
 264	   else if (!(v & ~0x00ff0000))			\
 265		r = imm12val(v, 16);			\
 266	   else if (!(v & ~0x003fc000))			\
 267		r = imm12val(v, 18);			\
 268	   else if (!(v & ~0x000ff000))			\
 269		r = imm12val(v, 20);			\
 270	   else if (!(v & ~0x0003fc00))			\
 271		r = imm12val(v, 22);			\
 272	   else if (!(v & ~0x0000ff00))			\
 273		r = imm12val(v, 24);			\
 274	   else if (!(v & ~0x00003fc0))			\
 275		r = imm12val(v, 26);			\
 276	   else if (!(v & ~0x00000ff0))			\
 277		r = imm12val(v, 28);			\
 278	   else if (!(v & ~0x000003fc))			\
 279		r = imm12val(v, 30);			\
 280	   else						\
 281		r = -1;					\
 282	   r; })
 283
 284/*
 285 * Checks if immediate value can be converted to imm12(12 bits) value.
 286 */
 287static int imm8m(u32 x)
 288{
 289	u32 rot;
 290
 291	for (rot = 0; rot < 16; rot++)
 292		if ((x & ~ror32(0xff, 2 * rot)) == 0)
 293			return rol32(x, 2 * rot) | (rot << 8);
 294	return -1;
 295}
 296
 297#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
 
 
 
 
 
 
 
 
 
 
 
 298
 299static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
 300{
 301	op |= rt << 12 | rn << 16;
 302	if (imm12 >= 0)
 303		op |= ARM_INST_LDST__U;
 304	else
 305		imm12 = -imm12;
 306	return op | (imm12 & ARM_INST_LDST__IMM12);
 307}
 308
 309static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
 310{
 311	op |= rt << 12 | rn << 16;
 312	if (imm8 >= 0)
 313		op |= ARM_INST_LDST__U;
 314	else
 315		imm8 = -imm8;
 316	return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
 317}
 318
 319#define ARM_LDR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
 320#define ARM_LDRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
 321#define ARM_LDRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
 322#define ARM_LDRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
 323
 324#define ARM_STR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
 325#define ARM_STRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
 326#define ARM_STRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
 327#define ARM_STRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
 328
 329/*
 330 * Initializes the JIT space with undefined instructions.
 331 */
 332static void jit_fill_hole(void *area, unsigned int size)
 333{
 334	u32 *ptr;
 335	/* We are guaranteed to have aligned memory. */
 336	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
 337		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 338}
 339
 340#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
 341/* EABI requires the stack to be aligned to 64-bit boundaries */
 342#define STACK_ALIGNMENT	8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343#else
 344/* Stack must be aligned to 32-bit boundaries */
 345#define STACK_ALIGNMENT	4
 
 
 
 
 
 
 346#endif
 
 347
 348/* total stack size used in JITed code */
 349#define _STACK_SIZE	(ctx->prog->aux->stack_depth + SCRATCH_SIZE)
 350#define STACK_SIZE	ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
 
 
 
 
 
 
 
 351
 352#if __LINUX_ARM_ARCH__ < 7
 353
 354static u16 imm_offset(u32 k, struct jit_ctx *ctx)
 355{
 356	unsigned int i = 0, offset;
 357	u16 imm;
 358
 359	/* on the "fake" run we just count them (duplicates included) */
 360	if (ctx->target == NULL) {
 361		ctx->imm_count++;
 362		return 0;
 363	}
 364
 365	while ((i < ctx->imm_count) && ctx->imms[i]) {
 366		if (ctx->imms[i] == k)
 367			break;
 368		i++;
 369	}
 370
 371	if (ctx->imms[i] == 0)
 372		ctx->imms[i] = k;
 373
 374	/* constants go just after the epilogue */
 375	offset =  ctx->offsets[ctx->prog->len - 1] * 4;
 376	offset += ctx->prologue_bytes;
 377	offset += ctx->epilogue_bytes;
 378	offset += i * 4;
 379
 380	ctx->target[offset / 4] = k;
 381
 382	/* PC in ARM mode == address of the instruction + 8 */
 383	imm = offset - (8 + ctx->idx * 4);
 384
 385	if (imm & ~0xfff) {
 386		/*
 387		 * literal pool is too far, signal it into flags. we
 388		 * can only detect it on the second pass unfortunately.
 389		 */
 390		ctx->flags |= FLAG_IMM_OVERFLOW;
 391		return 0;
 392	}
 393
 394	return imm;
 395}
 396
 397#endif /* __LINUX_ARM_ARCH__ */
 398
 399static inline int bpf2a32_offset(int bpf_to, int bpf_from,
 400				 const struct jit_ctx *ctx) {
 401	int to, from;
 402
 403	if (ctx->target == NULL)
 404		return 0;
 405	to = ctx->offsets[bpf_to];
 406	from = ctx->offsets[bpf_from];
 407
 408	return to - from - 1;
 409}
 410
 411/*
 412 * Move an immediate that's not an imm8m to a core register.
 413 */
 414static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
 415{
 416#if __LINUX_ARM_ARCH__ < 7
 417	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
 418#else
 419	emit(ARM_MOVW(rd, val & 0xffff), ctx);
 420	if (val > 0xffff)
 421		emit(ARM_MOVT(rd, val >> 16), ctx);
 422#endif
 423}
 424
 425static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
 426{
 427	int imm12 = imm8m(val);
 428
 429	if (imm12 >= 0)
 430		emit(ARM_MOV_I(rd, imm12), ctx);
 431	else
 432		emit_mov_i_no8m(rd, val, ctx);
 433}
 434
 435static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
 436{
 437	if (elf_hwcap & HWCAP_THUMB)
 438		emit(ARM_BX(tgt_reg), ctx);
 439	else
 440		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
 441}
 442
 443static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
 444{
 445#if __LINUX_ARM_ARCH__ < 5
 446	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
 447	emit_bx_r(tgt_reg, ctx);
 448#else
 449	emit(ARM_BLX_R(tgt_reg), ctx);
 450#endif
 
 
 451}
 452
 453static inline int epilogue_offset(const struct jit_ctx *ctx)
 454{
 455	int to, from;
 456	/* No need for 1st dummy run */
 457	if (ctx->target == NULL)
 458		return 0;
 459	to = ctx->epilogue_offset;
 460	from = ctx->idx;
 461
 462	return to - from - 2;
 463}
 464
 465static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
 466{
 467	const s8 *tmp = bpf2a32[TMP_REG_1];
 468
 469#if __LINUX_ARM_ARCH__ == 7
 470	if (elf_hwcap & HWCAP_IDIVA) {
 471		if (op == BPF_DIV)
 472			emit(ARM_UDIV(rd, rm, rn), ctx);
 473		else {
 474			emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
 475			emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
 476		}
 477		return;
 478	}
 479#endif
 480
 481	/*
 482	 * For BPF_ALU | BPF_DIV | BPF_K instructions
 483	 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
 484	 * function, we need to save it on caller side to save
 485	 * it from getting destroyed within callee.
 486	 * After the return from the callee, we restore ARM_R0
 487	 * ARM_R1.
 488	 */
 489	if (rn != ARM_R1) {
 490		emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
 491		emit(ARM_MOV_R(ARM_R1, rn), ctx);
 492	}
 493	if (rm != ARM_R0) {
 494		emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
 495		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 496	}
 497
 498	/* Call appropriate function */
 499	emit_mov_i(ARM_IP, op == BPF_DIV ?
 500		   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
 501	emit_blx_r(ARM_IP, ctx);
 502
 503	/* Save return value */
 504	if (rd != ARM_R0)
 505		emit(ARM_MOV_R(rd, ARM_R0), ctx);
 506
 507	/* Restore ARM_R0 and ARM_R1 */
 508	if (rn != ARM_R1)
 509		emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
 510	if (rm != ARM_R0)
 511		emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
 512}
 513
 514/* Is the translated BPF register on stack? */
 515static bool is_stacked(s8 reg)
 516{
 517	return reg < 0;
 518}
 519
 520/* If a BPF register is on the stack (stk is true), load it to the
 521 * supplied temporary register and return the temporary register
 522 * for subsequent operations, otherwise just use the CPU register.
 523 */
 524static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
 525{
 526	if (is_stacked(reg)) {
 527		emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
 528		reg = tmp;
 529	}
 530	return reg;
 531}
 532
 533static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
 534				   struct jit_ctx *ctx)
 535{
 536	if (is_stacked(reg[1])) {
 537		if (__LINUX_ARM_ARCH__ >= 6 ||
 538		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
 539			emit(ARM_LDRD_I(tmp[1], ARM_FP,
 540					EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 541		} else {
 542			emit(ARM_LDR_I(tmp[1], ARM_FP,
 543				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 544			emit(ARM_LDR_I(tmp[0], ARM_FP,
 545				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
 546		}
 547		reg = tmp;
 548	}
 549	return reg;
 550}
 551
 552/* If a BPF register is on the stack (stk is true), save the register
 553 * back to the stack.  If the source register is not the same, then
 554 * move it into the correct register.
 555 */
 556static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
 557{
 558	if (is_stacked(reg))
 559		emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
 560	else if (reg != src)
 561		emit(ARM_MOV_R(reg, src), ctx);
 562}
 563
 564static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
 565			      struct jit_ctx *ctx)
 566{
 567	if (is_stacked(reg[1])) {
 568		if (__LINUX_ARM_ARCH__ >= 6 ||
 569		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
 570			emit(ARM_STRD_I(src[1], ARM_FP,
 571				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 572		} else {
 573			emit(ARM_STR_I(src[1], ARM_FP,
 574				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
 575			emit(ARM_STR_I(src[0], ARM_FP,
 576				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
 577		}
 578	} else {
 579		if (reg[1] != src[1])
 580			emit(ARM_MOV_R(reg[1], src[1]), ctx);
 581		if (reg[0] != src[0])
 582			emit(ARM_MOV_R(reg[0], src[0]), ctx);
 583	}
 584}
 585
 586static inline void emit_a32_mov_i(const s8 dst, const u32 val,
 587				  struct jit_ctx *ctx)
 588{
 589	const s8 *tmp = bpf2a32[TMP_REG_1];
 590
 591	if (is_stacked(dst)) {
 592		emit_mov_i(tmp[1], val, ctx);
 593		arm_bpf_put_reg32(dst, tmp[1], ctx);
 594	} else {
 595		emit_mov_i(dst, val, ctx);
 596	}
 597}
 598
 599static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
 
 600{
 601	const s8 *tmp = bpf2a32[TMP_REG_1];
 602	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
 603
 604	emit_mov_i(rd[1], (u32)val, ctx);
 605	emit_mov_i(rd[0], val >> 32, ctx);
 606
 607	arm_bpf_put_reg64(dst, rd, ctx);
 608}
 609
 610/* Sign extended move */
 611static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
 612				       const u32 val, struct jit_ctx *ctx) {
 613	u64 val64 = val;
 614
 615	if (is64 && (val & (1<<31)))
 616		val64 |= 0xffffffff00000000ULL;
 617	emit_a32_mov_i64(dst, val64, ctx);
 618}
 619
 620static inline void emit_a32_add_r(const u8 dst, const u8 src,
 621			      const bool is64, const bool hi,
 622			      struct jit_ctx *ctx) {
 623	/* 64 bit :
 624	 *	adds dst_lo, dst_lo, src_lo
 625	 *	adc dst_hi, dst_hi, src_hi
 626	 * 32 bit :
 627	 *	add dst_lo, dst_lo, src_lo
 628	 */
 629	if (!hi && is64)
 630		emit(ARM_ADDS_R(dst, dst, src), ctx);
 631	else if (hi && is64)
 632		emit(ARM_ADC_R(dst, dst, src), ctx);
 633	else
 634		emit(ARM_ADD_R(dst, dst, src), ctx);
 635}
 636
 637static inline void emit_a32_sub_r(const u8 dst, const u8 src,
 638				  const bool is64, const bool hi,
 639				  struct jit_ctx *ctx) {
 640	/* 64 bit :
 641	 *	subs dst_lo, dst_lo, src_lo
 642	 *	sbc dst_hi, dst_hi, src_hi
 643	 * 32 bit :
 644	 *	sub dst_lo, dst_lo, src_lo
 645	 */
 646	if (!hi && is64)
 647		emit(ARM_SUBS_R(dst, dst, src), ctx);
 648	else if (hi && is64)
 649		emit(ARM_SBC_R(dst, dst, src), ctx);
 650	else
 651		emit(ARM_SUB_R(dst, dst, src), ctx);
 652}
 653
 654static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
 655			      const bool hi, const u8 op, struct jit_ctx *ctx){
 656	switch (BPF_OP(op)) {
 657	/* dst = dst + src */
 658	case BPF_ADD:
 659		emit_a32_add_r(dst, src, is64, hi, ctx);
 660		break;
 661	/* dst = dst - src */
 662	case BPF_SUB:
 663		emit_a32_sub_r(dst, src, is64, hi, ctx);
 664		break;
 665	/* dst = dst | src */
 666	case BPF_OR:
 667		emit(ARM_ORR_R(dst, dst, src), ctx);
 668		break;
 669	/* dst = dst & src */
 670	case BPF_AND:
 671		emit(ARM_AND_R(dst, dst, src), ctx);
 672		break;
 673	/* dst = dst ^ src */
 674	case BPF_XOR:
 675		emit(ARM_EOR_R(dst, dst, src), ctx);
 676		break;
 677	/* dst = dst * src */
 678	case BPF_MUL:
 679		emit(ARM_MUL(dst, dst, src), ctx);
 680		break;
 681	/* dst = dst << src */
 682	case BPF_LSH:
 683		emit(ARM_LSL_R(dst, dst, src), ctx);
 684		break;
 685	/* dst = dst >> src */
 686	case BPF_RSH:
 687		emit(ARM_LSR_R(dst, dst, src), ctx);
 688		break;
 689	/* dst = dst >> src (signed)*/
 690	case BPF_ARSH:
 691		emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
 692		break;
 693	}
 694}
 695
 696/* ALU operation (32 bit)
 697 * dst = dst (op) src
 698 */
 699static inline void emit_a32_alu_r(const s8 dst, const s8 src,
 700				  struct jit_ctx *ctx, const bool is64,
 701				  const bool hi, const u8 op) {
 702	const s8 *tmp = bpf2a32[TMP_REG_1];
 703	s8 rn, rd;
 704
 705	rn = arm_bpf_get_reg32(src, tmp[1], ctx);
 706	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
 707	/* ALU operation */
 708	emit_alu_r(rd, rn, is64, hi, op, ctx);
 709	arm_bpf_put_reg32(dst, rd, ctx);
 710}
 711
 712/* ALU operation (64 bit) */
 713static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
 714				  const s8 src[], struct jit_ctx *ctx,
 715				  const u8 op) {
 716	const s8 *tmp = bpf2a32[TMP_REG_1];
 717	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 718	const s8 *rd;
 719
 720	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 721	if (is64) {
 722		const s8 *rs;
 723
 724		rs = arm_bpf_get_reg64(src, tmp2, ctx);
 725
 726		/* ALU operation */
 727		emit_alu_r(rd[1], rs[1], true, false, op, ctx);
 728		emit_alu_r(rd[0], rs[0], true, true, op, ctx);
 729	} else {
 730		s8 rs;
 731
 732		rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 733
 734		/* ALU operation */
 735		emit_alu_r(rd[1], rs, true, false, op, ctx);
 736		if (!ctx->prog->aux->verifier_zext)
 737			emit_a32_mov_i(rd[0], 0, ctx);
 738	}
 739
 740	arm_bpf_put_reg64(dst, rd, ctx);
 741}
 742
 743/* dst = src (4 bytes)*/
 744static inline void emit_a32_mov_r(const s8 dst, const s8 src,
 745				  struct jit_ctx *ctx) {
 746	const s8 *tmp = bpf2a32[TMP_REG_1];
 747	s8 rt;
 748
 749	rt = arm_bpf_get_reg32(src, tmp[0], ctx);
 750	arm_bpf_put_reg32(dst, rt, ctx);
 751}
 752
 753/* dst = src */
 754static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
 755				  const s8 src[],
 756				  struct jit_ctx *ctx) {
 757	if (!is64) {
 758		emit_a32_mov_r(dst_lo, src_lo, ctx);
 759		if (!ctx->prog->aux->verifier_zext)
 760			/* Zero out high 4 bytes */
 761			emit_a32_mov_i(dst_hi, 0, ctx);
 762	} else if (__LINUX_ARM_ARCH__ < 6 &&
 763		   ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
 764		/* complete 8 byte move */
 765		emit_a32_mov_r(dst_lo, src_lo, ctx);
 766		emit_a32_mov_r(dst_hi, src_hi, ctx);
 767	} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
 768		const u8 *tmp = bpf2a32[TMP_REG_1];
 769
 770		emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
 771		emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
 772	} else if (is_stacked(src_lo)) {
 773		emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
 774	} else if (is_stacked(dst_lo)) {
 775		emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
 776	} else {
 777		emit(ARM_MOV_R(dst[0], src[0]), ctx);
 778		emit(ARM_MOV_R(dst[1], src[1]), ctx);
 779	}
 780}
 781
 782/* Shift operations */
 783static inline void emit_a32_alu_i(const s8 dst, const u32 val,
 784				struct jit_ctx *ctx, const u8 op) {
 785	const s8 *tmp = bpf2a32[TMP_REG_1];
 786	s8 rd;
 787
 788	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
 789
 790	/* Do shift operation */
 791	switch (op) {
 792	case BPF_LSH:
 793		emit(ARM_LSL_I(rd, rd, val), ctx);
 794		break;
 795	case BPF_RSH:
 796		emit(ARM_LSR_I(rd, rd, val), ctx);
 797		break;
 798	case BPF_NEG:
 799		emit(ARM_RSB_I(rd, rd, val), ctx);
 800		break;
 801	}
 802
 803	arm_bpf_put_reg32(dst, rd, ctx);
 804}
 805
 806/* dst = ~dst (64 bit) */
 807static inline void emit_a32_neg64(const s8 dst[],
 808				struct jit_ctx *ctx){
 809	const s8 *tmp = bpf2a32[TMP_REG_1];
 810	const s8 *rd;
 811
 812	/* Setup Operand */
 813	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 814
 815	/* Do Negate Operation */
 816	emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
 817	emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
 818
 819	arm_bpf_put_reg64(dst, rd, ctx);
 820}
 821
 822/* dst = dst << src */
 823static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
 824				    struct jit_ctx *ctx) {
 825	const s8 *tmp = bpf2a32[TMP_REG_1];
 826	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 827	const s8 *rd;
 828	s8 rt;
 829
 830	/* Setup Operands */
 831	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 832	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 833
 834	/* Do LSH operation */
 835	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
 836	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
 837	emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
 838	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
 839	emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
 840	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
 841
 842	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 843	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 844}
 845
 846/* dst = dst >> src (signed)*/
 847static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
 848				     struct jit_ctx *ctx) {
 849	const s8 *tmp = bpf2a32[TMP_REG_1];
 850	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 851	const s8 *rd;
 852	s8 rt;
 853
 854	/* Setup Operands */
 855	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 856	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 857
 858	/* Do the ARSH operation */
 859	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
 860	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
 861	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
 862	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
 863	_emit(ARM_COND_MI, ARM_B(0), ctx);
 864	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
 865	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
 866
 867	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 868	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 869}
 870
 871/* dst = dst >> src */
 872static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
 873				    struct jit_ctx *ctx) {
 874	const s8 *tmp = bpf2a32[TMP_REG_1];
 875	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 876	const s8 *rd;
 877	s8 rt;
 878
 879	/* Setup Operands */
 880	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
 881	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 882
 883	/* Do RSH operation */
 884	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
 885	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
 886	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
 887	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
 888	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
 889	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
 890
 891	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
 892	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
 893}
 894
 895/* dst = dst << val */
 896static inline void emit_a32_lsh_i64(const s8 dst[],
 897				    const u32 val, struct jit_ctx *ctx){
 898	const s8 *tmp = bpf2a32[TMP_REG_1];
 899	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 900	const s8 *rd;
 901
 902	/* Setup operands */
 903	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 904
 905	/* Do LSH operation */
 906	if (val < 32) {
 907		emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
 908		emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
 909		emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
 910	} else {
 911		if (val == 32)
 912			emit(ARM_MOV_R(rd[0], rd[1]), ctx);
 913		else
 914			emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
 915		emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
 916	}
 917
 918	arm_bpf_put_reg64(dst, rd, ctx);
 919}
 920
 921/* dst = dst >> val */
 922static inline void emit_a32_rsh_i64(const s8 dst[],
 923				    const u32 val, struct jit_ctx *ctx) {
 924	const s8 *tmp = bpf2a32[TMP_REG_1];
 925	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 926	const s8 *rd;
 927
 928	/* Setup operands */
 929	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 930
 931	/* Do LSR operation */
 932	if (val < 32) {
 933		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
 934		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
 935		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
 936	} else if (val == 32) {
 937		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
 938		emit(ARM_MOV_I(rd[0], 0), ctx);
 939	} else {
 940		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
 941		emit(ARM_MOV_I(rd[0], 0), ctx);
 942	}
 943
 944	arm_bpf_put_reg64(dst, rd, ctx);
 945}
 946
 947/* dst = dst >> val (signed) */
 948static inline void emit_a32_arsh_i64(const s8 dst[],
 949				     const u32 val, struct jit_ctx *ctx){
 950	const s8 *tmp = bpf2a32[TMP_REG_1];
 951	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 952	const s8 *rd;
 953
 954	/* Setup operands */
 955	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 956
 957	/* Do ARSH operation */
 958	if (val < 32) {
 959		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
 960		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
 961		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
 962	} else if (val == 32) {
 963		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
 964		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
 965	} else {
 966		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
 967		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
 968	}
 969
 970	arm_bpf_put_reg64(dst, rd, ctx);
 971}
 972
 973static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
 974				    struct jit_ctx *ctx) {
 975	const s8 *tmp = bpf2a32[TMP_REG_1];
 976	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 977	const s8 *rd, *rt;
 978
 979	/* Setup operands for multiplication */
 980	rd = arm_bpf_get_reg64(dst, tmp, ctx);
 981	rt = arm_bpf_get_reg64(src, tmp2, ctx);
 982
 983	/* Do Multiplication */
 984	emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
 985	emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
 986	emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
 987
 988	emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
 989	emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
 990
 991	arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
 992	arm_bpf_put_reg32(dst_hi, rd[0], ctx);
 993}
 994
 995/* *(size *)(dst + off) = src */
 996static inline void emit_str_r(const s8 dst, const s8 src[],
 997			      s32 off, struct jit_ctx *ctx, const u8 sz){
 998	const s8 *tmp = bpf2a32[TMP_REG_1];
 999	s32 off_max;
1000	s8 rd;
1001
1002	rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1003
1004	if (sz == BPF_H)
1005		off_max = 0xff;
1006	else
1007		off_max = 0xfff;
1008
1009	if (off < 0 || off > off_max) {
1010		emit_a32_mov_i(tmp[0], off, ctx);
1011		emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1012		rd = tmp[0];
1013		off = 0;
1014	}
1015	switch (sz) {
1016	case BPF_B:
1017		/* Store a Byte */
1018		emit(ARM_STRB_I(src_lo, rd, off), ctx);
1019		break;
1020	case BPF_H:
1021		/* Store a HalfWord */
1022		emit(ARM_STRH_I(src_lo, rd, off), ctx);
1023		break;
1024	case BPF_W:
1025		/* Store a Word */
1026		emit(ARM_STR_I(src_lo, rd, off), ctx);
1027		break;
1028	case BPF_DW:
1029		/* Store a Double Word */
1030		emit(ARM_STR_I(src_lo, rd, off), ctx);
1031		emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1032		break;
1033	}
1034}
1035
1036/* dst = *(size*)(src + off) */
1037static inline void emit_ldx_r(const s8 dst[], const s8 src,
1038			      s32 off, struct jit_ctx *ctx, const u8 sz){
1039	const s8 *tmp = bpf2a32[TMP_REG_1];
1040	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1041	s8 rm = src;
1042	s32 off_max;
1043
1044	if (sz == BPF_H)
1045		off_max = 0xff;
1046	else
1047		off_max = 0xfff;
1048
1049	if (off < 0 || off > off_max) {
1050		emit_a32_mov_i(tmp[0], off, ctx);
1051		emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1052		rm = tmp[0];
1053		off = 0;
1054	} else if (rd[1] == rm) {
1055		emit(ARM_MOV_R(tmp[0], rm), ctx);
1056		rm = tmp[0];
1057	}
1058	switch (sz) {
1059	case BPF_B:
1060		/* Load a Byte */
1061		emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1062		if (!ctx->prog->aux->verifier_zext)
1063			emit_a32_mov_i(rd[0], 0, ctx);
1064		break;
1065	case BPF_H:
1066		/* Load a HalfWord */
1067		emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1068		if (!ctx->prog->aux->verifier_zext)
1069			emit_a32_mov_i(rd[0], 0, ctx);
1070		break;
1071	case BPF_W:
1072		/* Load a Word */
1073		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1074		if (!ctx->prog->aux->verifier_zext)
1075			emit_a32_mov_i(rd[0], 0, ctx);
1076		break;
1077	case BPF_DW:
1078		/* Load a Double Word */
1079		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1080		emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1081		break;
1082	}
1083	arm_bpf_put_reg64(dst, rd, ctx);
1084}
1085
1086/* Arithmatic Operation */
1087static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1088			     const u8 rn, struct jit_ctx *ctx, u8 op,
1089			     bool is_jmp64) {
1090	switch (op) {
1091	case BPF_JSET:
1092		if (is_jmp64) {
1093			emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1094			emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1095			emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1096		} else {
1097			emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1098		}
1099		break;
1100	case BPF_JEQ:
1101	case BPF_JNE:
1102	case BPF_JGT:
1103	case BPF_JGE:
1104	case BPF_JLE:
1105	case BPF_JLT:
1106		if (is_jmp64) {
1107			emit(ARM_CMP_R(rd, rm), ctx);
1108			/* Only compare low halve if high halve are equal. */
1109			_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1110		} else {
1111			emit(ARM_CMP_R(rt, rn), ctx);
1112		}
1113		break;
1114	case BPF_JSLE:
1115	case BPF_JSGT:
1116		emit(ARM_CMP_R(rn, rt), ctx);
1117		if (is_jmp64)
1118			emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1119		break;
1120	case BPF_JSLT:
1121	case BPF_JSGE:
1122		emit(ARM_CMP_R(rt, rn), ctx);
1123		if (is_jmp64)
1124			emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1125		break;
1126	}
1127}
1128
1129static int out_offset = -1; /* initialized on the first pass of build_body() */
1130static int emit_bpf_tail_call(struct jit_ctx *ctx)
1131{
1132
1133	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1134	const s8 *r2 = bpf2a32[BPF_REG_2];
1135	const s8 *r3 = bpf2a32[BPF_REG_3];
1136	const s8 *tmp = bpf2a32[TMP_REG_1];
1137	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1138	const s8 *tcc = bpf2a32[TCALL_CNT];
1139	const s8 *tc;
1140	const int idx0 = ctx->idx;
1141#define cur_offset (ctx->idx - idx0)
1142#define jmp_offset (out_offset - (cur_offset) - 2)
1143	u32 lo, hi;
1144	s8 r_array, r_index;
1145	int off;
1146
1147	/* if (index >= array->map.max_entries)
1148	 *	goto out;
1149	 */
1150	BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1151		     ARM_INST_LDST__IMM12);
1152	off = offsetof(struct bpf_array, map.max_entries);
1153	r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1154	/* index is 32-bit for arrays */
1155	r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1156	/* array->map.max_entries */
1157	emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1158	/* index >= array->map.max_entries */
1159	emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1160	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1161
1162	/* tmp2[0] = array, tmp2[1] = index */
1163
1164	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1165	 *	goto out;
1166	 * tail_call_cnt++;
1167	 */
1168	lo = (u32)MAX_TAIL_CALL_CNT;
1169	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1170	tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1171	emit(ARM_CMP_I(tc[0], hi), ctx);
1172	_emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1173	_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1174	emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1175	emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1176	arm_bpf_put_reg64(tcc, tmp, ctx);
1177
1178	/* prog = array->ptrs[index]
1179	 * if (prog == NULL)
1180	 *	goto out;
1181	 */
1182	BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1183	off = imm8m(offsetof(struct bpf_array, ptrs));
1184	emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1185	emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1186	emit(ARM_CMP_I(tmp[1], 0), ctx);
1187	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1188
1189	/* goto *(prog->bpf_func + prologue_size); */
1190	BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1191		     ARM_INST_LDST__IMM12);
1192	off = offsetof(struct bpf_prog, bpf_func);
1193	emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1194	emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1195	emit_bx_r(tmp[1], ctx);
1196
1197	/* out: */
1198	if (out_offset == -1)
1199		out_offset = cur_offset;
1200	if (cur_offset != out_offset) {
1201		pr_err_once("tail_call out_offset = %d, expected %d!\n",
1202			    cur_offset, out_offset);
1203		return -1;
1204	}
1205	return 0;
1206#undef cur_offset
1207#undef jmp_offset
1208}
1209
1210/* 0xabcd => 0xcdab */
1211static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1212{
1213#if __LINUX_ARM_ARCH__ < 6
1214	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1215
1216	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1217	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1218	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1219	emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1220#else /* ARMv6+ */
1221	emit(ARM_REV16(rd, rn), ctx);
1222#endif
1223}
1224
1225/* 0xabcdefgh => 0xghefcdab */
1226static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1227{
1228#if __LINUX_ARM_ARCH__ < 6
1229	const s8 *tmp2 = bpf2a32[TMP_REG_2];
 
 
 
 
 
 
 
 
 
1230
1231	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1232	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1233	emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1234
1235	emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1236	emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1237	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1238	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1239	emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1240	emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1241	emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1242
1243#else /* ARMv6+ */
1244	emit(ARM_REV(rd, rn), ctx);
1245#endif
1246}
1247
1248// push the scratch stack register on top of the stack
1249static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1250{
1251	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1252	const s8 *rt;
1253	u16 reg_set = 0;
1254
1255	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1256
1257	reg_set = (1 << rt[1]) | (1 << rt[0]);
1258	emit(ARM_PUSH(reg_set), ctx);
1259}
1260
1261static void build_prologue(struct jit_ctx *ctx)
1262{
1263	const s8 r0 = bpf2a32[BPF_REG_0][1];
1264	const s8 r2 = bpf2a32[BPF_REG_1][1];
1265	const s8 r3 = bpf2a32[BPF_REG_1][0];
1266	const s8 r4 = bpf2a32[BPF_REG_6][1];
1267	const s8 fplo = bpf2a32[BPF_REG_FP][1];
1268	const s8 fphi = bpf2a32[BPF_REG_FP][0];
1269	const s8 *tcc = bpf2a32[TCALL_CNT];
1270
1271	/* Save callee saved registers. */
1272#ifdef CONFIG_FRAME_POINTER
1273	u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1274	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1275	emit(ARM_PUSH(reg_set), ctx);
1276	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1277#else
1278	emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1279	emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1280#endif
1281	/* Save frame pointer for later */
1282	emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1283
1284	ctx->stack_size = imm8m(STACK_SIZE);
1285
1286	/* Set up function call stack */
1287	emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
 
 
1288
1289	/* Set up BPF prog stack base register */
1290	emit_a32_mov_r(fplo, ARM_IP, ctx);
1291	emit_a32_mov_i(fphi, 0, ctx);
1292
1293	/* mov r4, 0 */
1294	emit(ARM_MOV_I(r4, 0), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295
1296	/* Move BPF_CTX to BPF_R1 */
1297	emit(ARM_MOV_R(r3, r4), ctx);
1298	emit(ARM_MOV_R(r2, r0), ctx);
1299	/* Initialize Tail Count */
1300	emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx);
1301	emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx);
1302	/* end of prologue */
1303}
 
 
 
1304
1305/* restore callee saved registers. */
1306static void build_epilogue(struct jit_ctx *ctx)
1307{
1308#ifdef CONFIG_FRAME_POINTER
1309	/* When using frame pointers, some additional registers need to
1310	 * be loaded. */
1311	u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1312	emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1313	emit(ARM_LDM(ARM_SP, reg_set), ctx);
1314#else
1315	/* Restore callee saved registers. */
1316	emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1317	emit(ARM_POP(CALLEE_POP_MASK), ctx);
1318#endif
1319}
1320
1321/*
1322 * Convert an eBPF instruction to native instruction, i.e
1323 * JITs an eBPF instruction.
1324 * Returns :
1325 *	0  - Successfully JITed an 8-byte eBPF instruction
1326 *	>0 - Successfully JITed a 16-byte eBPF instruction
1327 *	<0 - Failed to JIT.
1328 */
1329static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1330{
1331	const u8 code = insn->code;
1332	const s8 *dst = bpf2a32[insn->dst_reg];
1333	const s8 *src = bpf2a32[insn->src_reg];
1334	const s8 *tmp = bpf2a32[TMP_REG_1];
1335	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1336	const s16 off = insn->off;
1337	const s32 imm = insn->imm;
1338	const int i = insn - ctx->prog->insnsi;
1339	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1340	const s8 *rd, *rs;
1341	s8 rd_lo, rt, rm, rn;
1342	s32 jmp_offset;
1343
1344#define check_imm(bits, imm) do {				\
1345	if ((imm) >= (1 << ((bits) - 1)) ||			\
1346	    (imm) < -(1 << ((bits) - 1))) {			\
1347		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
1348			i, imm, imm);				\
1349		return -EINVAL;					\
1350	}							\
1351} while (0)
1352#define check_imm24(imm) check_imm(24, imm)
1353
1354	switch (code) {
1355	/* ALU operations */
1356
1357	/* dst = src */
1358	case BPF_ALU | BPF_MOV | BPF_K:
1359	case BPF_ALU | BPF_MOV | BPF_X:
1360	case BPF_ALU64 | BPF_MOV | BPF_K:
1361	case BPF_ALU64 | BPF_MOV | BPF_X:
1362		switch (BPF_SRC(code)) {
1363		case BPF_X:
1364			if (imm == 1) {
1365				/* Special mov32 for zext */
1366				emit_a32_mov_i(dst_hi, 0, ctx);
1367				break;
1368			}
1369			emit_a32_mov_r64(is64, dst, src, ctx);
1370			break;
1371		case BPF_K:
1372			/* Sign-extend immediate value to destination reg */
1373			emit_a32_mov_se_i64(is64, dst, imm, ctx);
1374			break;
1375		}
1376		break;
1377	/* dst = dst + src/imm */
1378	/* dst = dst - src/imm */
1379	/* dst = dst | src/imm */
1380	/* dst = dst & src/imm */
1381	/* dst = dst ^ src/imm */
1382	/* dst = dst * src/imm */
1383	/* dst = dst << src */
1384	/* dst = dst >> src */
1385	case BPF_ALU | BPF_ADD | BPF_K:
1386	case BPF_ALU | BPF_ADD | BPF_X:
1387	case BPF_ALU | BPF_SUB | BPF_K:
1388	case BPF_ALU | BPF_SUB | BPF_X:
1389	case BPF_ALU | BPF_OR | BPF_K:
1390	case BPF_ALU | BPF_OR | BPF_X:
1391	case BPF_ALU | BPF_AND | BPF_K:
1392	case BPF_ALU | BPF_AND | BPF_X:
1393	case BPF_ALU | BPF_XOR | BPF_K:
1394	case BPF_ALU | BPF_XOR | BPF_X:
1395	case BPF_ALU | BPF_MUL | BPF_K:
1396	case BPF_ALU | BPF_MUL | BPF_X:
1397	case BPF_ALU | BPF_LSH | BPF_X:
1398	case BPF_ALU | BPF_RSH | BPF_X:
1399	case BPF_ALU | BPF_ARSH | BPF_K:
1400	case BPF_ALU | BPF_ARSH | BPF_X:
1401	case BPF_ALU64 | BPF_ADD | BPF_K:
1402	case BPF_ALU64 | BPF_ADD | BPF_X:
1403	case BPF_ALU64 | BPF_SUB | BPF_K:
1404	case BPF_ALU64 | BPF_SUB | BPF_X:
1405	case BPF_ALU64 | BPF_OR | BPF_K:
1406	case BPF_ALU64 | BPF_OR | BPF_X:
1407	case BPF_ALU64 | BPF_AND | BPF_K:
1408	case BPF_ALU64 | BPF_AND | BPF_X:
1409	case BPF_ALU64 | BPF_XOR | BPF_K:
1410	case BPF_ALU64 | BPF_XOR | BPF_X:
1411		switch (BPF_SRC(code)) {
1412		case BPF_X:
1413			emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1414			break;
1415		case BPF_K:
1416			/* Move immediate value to the temporary register
1417			 * and then do the ALU operation on the temporary
1418			 * register as this will sign-extend the immediate
1419			 * value into temporary reg and then it would be
1420			 * safe to do the operation on it.
1421			 */
1422			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1423			emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1424			break;
1425		}
1426		break;
1427	/* dst = dst / src(imm) */
1428	/* dst = dst % src(imm) */
1429	case BPF_ALU | BPF_DIV | BPF_K:
1430	case BPF_ALU | BPF_DIV | BPF_X:
1431	case BPF_ALU | BPF_MOD | BPF_K:
1432	case BPF_ALU | BPF_MOD | BPF_X:
1433		rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1434		switch (BPF_SRC(code)) {
1435		case BPF_X:
1436			rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1437			break;
1438		case BPF_K:
1439			rt = tmp2[0];
1440			emit_a32_mov_i(rt, imm, ctx);
1441			break;
1442		default:
1443			rt = src_lo;
 
 
 
 
 
 
 
 
 
 
 
1444			break;
1445		}
1446		emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
1447		arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1448		if (!ctx->prog->aux->verifier_zext)
1449			emit_a32_mov_i(dst_hi, 0, ctx);
1450		break;
1451	case BPF_ALU64 | BPF_DIV | BPF_K:
1452	case BPF_ALU64 | BPF_DIV | BPF_X:
1453	case BPF_ALU64 | BPF_MOD | BPF_K:
1454	case BPF_ALU64 | BPF_MOD | BPF_X:
1455		goto notyet;
1456	/* dst = dst >> imm */
1457	/* dst = dst << imm */
1458	case BPF_ALU | BPF_RSH | BPF_K:
1459	case BPF_ALU | BPF_LSH | BPF_K:
1460		if (unlikely(imm > 31))
1461			return -EINVAL;
1462		if (imm)
1463			emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1464		if (!ctx->prog->aux->verifier_zext)
1465			emit_a32_mov_i(dst_hi, 0, ctx);
1466		break;
1467	/* dst = dst << imm */
1468	case BPF_ALU64 | BPF_LSH | BPF_K:
1469		if (unlikely(imm > 63))
1470			return -EINVAL;
1471		emit_a32_lsh_i64(dst, imm, ctx);
1472		break;
1473	/* dst = dst >> imm */
1474	case BPF_ALU64 | BPF_RSH | BPF_K:
1475		if (unlikely(imm > 63))
1476			return -EINVAL;
1477		emit_a32_rsh_i64(dst, imm, ctx);
1478		break;
1479	/* dst = dst << src */
1480	case BPF_ALU64 | BPF_LSH | BPF_X:
1481		emit_a32_lsh_r64(dst, src, ctx);
1482		break;
1483	/* dst = dst >> src */
1484	case BPF_ALU64 | BPF_RSH | BPF_X:
1485		emit_a32_rsh_r64(dst, src, ctx);
1486		break;
1487	/* dst = dst >> src (signed) */
1488	case BPF_ALU64 | BPF_ARSH | BPF_X:
1489		emit_a32_arsh_r64(dst, src, ctx);
1490		break;
1491	/* dst = dst >> imm (signed) */
1492	case BPF_ALU64 | BPF_ARSH | BPF_K:
1493		if (unlikely(imm > 63))
1494			return -EINVAL;
1495		emit_a32_arsh_i64(dst, imm, ctx);
1496		break;
1497	/* dst = ~dst */
1498	case BPF_ALU | BPF_NEG:
1499		emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1500		if (!ctx->prog->aux->verifier_zext)
1501			emit_a32_mov_i(dst_hi, 0, ctx);
1502		break;
1503	/* dst = ~dst (64 bit) */
1504	case BPF_ALU64 | BPF_NEG:
1505		emit_a32_neg64(dst, ctx);
1506		break;
1507	/* dst = dst * src/imm */
1508	case BPF_ALU64 | BPF_MUL | BPF_X:
1509	case BPF_ALU64 | BPF_MUL | BPF_K:
1510		switch (BPF_SRC(code)) {
1511		case BPF_X:
1512			emit_a32_mul_r64(dst, src, ctx);
1513			break;
1514		case BPF_K:
1515			/* Move immediate value to the temporary register
1516			 * and then do the multiplication on it as this
1517			 * will sign-extend the immediate value into temp
1518			 * reg then it would be safe to do the operation
1519			 * on it.
1520			 */
1521			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1522			emit_a32_mul_r64(dst, tmp2, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1523			break;
1524		}
1525		break;
1526	/* dst = htole(dst) */
1527	/* dst = htobe(dst) */
1528	case BPF_ALU | BPF_END | BPF_FROM_LE:
1529	case BPF_ALU | BPF_END | BPF_FROM_BE:
1530		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1531		if (BPF_SRC(code) == BPF_FROM_LE)
1532			goto emit_bswap_uxt;
1533		switch (imm) {
1534		case 16:
1535			emit_rev16(rd[1], rd[1], ctx);
1536			goto emit_bswap_uxt;
1537		case 32:
1538			emit_rev32(rd[1], rd[1], ctx);
1539			goto emit_bswap_uxt;
1540		case 64:
1541			emit_rev32(ARM_LR, rd[1], ctx);
1542			emit_rev32(rd[1], rd[0], ctx);
1543			emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
 
 
 
 
1544			break;
1545		}
1546		goto exit;
1547emit_bswap_uxt:
1548		switch (imm) {
1549		case 16:
1550			/* zero-extend 16 bits into 64 bits */
1551#if __LINUX_ARM_ARCH__ < 6
1552			emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1553			emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1554#else /* ARMv6+ */
1555			emit(ARM_UXTH(rd[1], rd[1]), ctx);
1556#endif
1557			if (!ctx->prog->aux->verifier_zext)
1558				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1559			break;
1560		case 32:
1561			/* zero-extend 32 bits into 64 bits */
1562			if (!ctx->prog->aux->verifier_zext)
1563				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1564			break;
1565		case 64:
1566			/* nop */
1567			break;
1568		}
1569exit:
1570		arm_bpf_put_reg64(dst, rd, ctx);
1571		break;
1572	/* dst = imm64 */
1573	case BPF_LD | BPF_IMM | BPF_DW:
1574	{
1575		u64 val = (u32)imm | (u64)insn[1].imm << 32;
1576
1577		emit_a32_mov_i64(dst, val, ctx);
1578
1579		return 1;
1580	}
1581	/* LDX: dst = *(size *)(src + off) */
1582	case BPF_LDX | BPF_MEM | BPF_W:
1583	case BPF_LDX | BPF_MEM | BPF_H:
1584	case BPF_LDX | BPF_MEM | BPF_B:
1585	case BPF_LDX | BPF_MEM | BPF_DW:
1586		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1587		emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1588		break;
1589	/* ST: *(size *)(dst + off) = imm */
1590	case BPF_ST | BPF_MEM | BPF_W:
1591	case BPF_ST | BPF_MEM | BPF_H:
1592	case BPF_ST | BPF_MEM | BPF_B:
1593	case BPF_ST | BPF_MEM | BPF_DW:
1594		switch (BPF_SIZE(code)) {
1595		case BPF_DW:
1596			/* Sign-extend immediate value into temp reg */
1597			emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1598			break;
1599		case BPF_W:
1600		case BPF_H:
1601		case BPF_B:
1602			emit_a32_mov_i(tmp2[1], imm, ctx);
1603			break;
1604		}
1605		emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1606		break;
1607	/* STX XADD: lock *(u32 *)(dst + off) += src */
1608	case BPF_STX | BPF_XADD | BPF_W:
1609	/* STX XADD: lock *(u64 *)(dst + off) += src */
1610	case BPF_STX | BPF_XADD | BPF_DW:
1611		goto notyet;
1612	/* STX: *(size *)(dst + off) = src */
1613	case BPF_STX | BPF_MEM | BPF_W:
1614	case BPF_STX | BPF_MEM | BPF_H:
1615	case BPF_STX | BPF_MEM | BPF_B:
1616	case BPF_STX | BPF_MEM | BPF_DW:
1617		rs = arm_bpf_get_reg64(src, tmp2, ctx);
1618		emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1619		break;
1620	/* PC += off if dst == src */
1621	/* PC += off if dst > src */
1622	/* PC += off if dst >= src */
1623	/* PC += off if dst < src */
1624	/* PC += off if dst <= src */
1625	/* PC += off if dst != src */
1626	/* PC += off if dst > src (signed) */
1627	/* PC += off if dst >= src (signed) */
1628	/* PC += off if dst < src (signed) */
1629	/* PC += off if dst <= src (signed) */
1630	/* PC += off if dst & src */
1631	case BPF_JMP | BPF_JEQ | BPF_X:
1632	case BPF_JMP | BPF_JGT | BPF_X:
1633	case BPF_JMP | BPF_JGE | BPF_X:
1634	case BPF_JMP | BPF_JNE | BPF_X:
1635	case BPF_JMP | BPF_JSGT | BPF_X:
1636	case BPF_JMP | BPF_JSGE | BPF_X:
1637	case BPF_JMP | BPF_JSET | BPF_X:
1638	case BPF_JMP | BPF_JLE | BPF_X:
1639	case BPF_JMP | BPF_JLT | BPF_X:
1640	case BPF_JMP | BPF_JSLT | BPF_X:
1641	case BPF_JMP | BPF_JSLE | BPF_X:
1642	case BPF_JMP32 | BPF_JEQ | BPF_X:
1643	case BPF_JMP32 | BPF_JGT | BPF_X:
1644	case BPF_JMP32 | BPF_JGE | BPF_X:
1645	case BPF_JMP32 | BPF_JNE | BPF_X:
1646	case BPF_JMP32 | BPF_JSGT | BPF_X:
1647	case BPF_JMP32 | BPF_JSGE | BPF_X:
1648	case BPF_JMP32 | BPF_JSET | BPF_X:
1649	case BPF_JMP32 | BPF_JLE | BPF_X:
1650	case BPF_JMP32 | BPF_JLT | BPF_X:
1651	case BPF_JMP32 | BPF_JSLT | BPF_X:
1652	case BPF_JMP32 | BPF_JSLE | BPF_X:
1653		/* Setup source registers */
1654		rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1655		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1656		goto go_jmp;
1657	/* PC += off if dst == imm */
1658	/* PC += off if dst > imm */
1659	/* PC += off if dst >= imm */
1660	/* PC += off if dst < imm */
1661	/* PC += off if dst <= imm */
1662	/* PC += off if dst != imm */
1663	/* PC += off if dst > imm (signed) */
1664	/* PC += off if dst >= imm (signed) */
1665	/* PC += off if dst < imm (signed) */
1666	/* PC += off if dst <= imm (signed) */
1667	/* PC += off if dst & imm */
1668	case BPF_JMP | BPF_JEQ | BPF_K:
1669	case BPF_JMP | BPF_JGT | BPF_K:
1670	case BPF_JMP | BPF_JGE | BPF_K:
1671	case BPF_JMP | BPF_JNE | BPF_K:
1672	case BPF_JMP | BPF_JSGT | BPF_K:
1673	case BPF_JMP | BPF_JSGE | BPF_K:
1674	case BPF_JMP | BPF_JSET | BPF_K:
1675	case BPF_JMP | BPF_JLT | BPF_K:
1676	case BPF_JMP | BPF_JLE | BPF_K:
1677	case BPF_JMP | BPF_JSLT | BPF_K:
1678	case BPF_JMP | BPF_JSLE | BPF_K:
1679	case BPF_JMP32 | BPF_JEQ | BPF_K:
1680	case BPF_JMP32 | BPF_JGT | BPF_K:
1681	case BPF_JMP32 | BPF_JGE | BPF_K:
1682	case BPF_JMP32 | BPF_JNE | BPF_K:
1683	case BPF_JMP32 | BPF_JSGT | BPF_K:
1684	case BPF_JMP32 | BPF_JSGE | BPF_K:
1685	case BPF_JMP32 | BPF_JSET | BPF_K:
1686	case BPF_JMP32 | BPF_JLT | BPF_K:
1687	case BPF_JMP32 | BPF_JLE | BPF_K:
1688	case BPF_JMP32 | BPF_JSLT | BPF_K:
1689	case BPF_JMP32 | BPF_JSLE | BPF_K:
1690		if (off == 0)
1691			break;
1692		rm = tmp2[0];
1693		rn = tmp2[1];
1694		/* Sign-extend immediate value */
1695		emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1696go_jmp:
1697		/* Setup destination register */
1698		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1699
1700		/* Check for the condition */
1701		emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1702			  BPF_CLASS(code) == BPF_JMP);
1703
1704		/* Setup JUMP instruction */
1705		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1706		switch (BPF_OP(code)) {
1707		case BPF_JNE:
1708		case BPF_JSET:
1709			_emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1710			break;
1711		case BPF_JEQ:
1712			_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1713			break;
1714		case BPF_JGT:
1715			_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1716			break;
1717		case BPF_JGE:
1718			_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1719			break;
1720		case BPF_JSGT:
1721			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1722			break;
1723		case BPF_JSGE:
1724			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1725			break;
1726		case BPF_JLE:
1727			_emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1728			break;
1729		case BPF_JLT:
1730			_emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1731			break;
1732		case BPF_JSLT:
1733			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1734			break;
1735		case BPF_JSLE:
1736			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1737			break;
1738		}
1739		break;
1740	/* JMP OFF */
1741	case BPF_JMP | BPF_JA:
1742	{
1743		if (off == 0)
1744			break;
1745		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1746		check_imm24(jmp_offset);
1747		emit(ARM_B(jmp_offset), ctx);
1748		break;
1749	}
1750	/* tail call */
1751	case BPF_JMP | BPF_TAIL_CALL:
1752		if (emit_bpf_tail_call(ctx))
1753			return -EFAULT;
1754		break;
1755	/* function call */
1756	case BPF_JMP | BPF_CALL:
1757	{
1758		const s8 *r0 = bpf2a32[BPF_REG_0];
1759		const s8 *r1 = bpf2a32[BPF_REG_1];
1760		const s8 *r2 = bpf2a32[BPF_REG_2];
1761		const s8 *r3 = bpf2a32[BPF_REG_3];
1762		const s8 *r4 = bpf2a32[BPF_REG_4];
1763		const s8 *r5 = bpf2a32[BPF_REG_5];
1764		const u32 func = (u32)__bpf_call_base + (u32)imm;
1765
1766		emit_a32_mov_r64(true, r0, r1, ctx);
1767		emit_a32_mov_r64(true, r1, r2, ctx);
1768		emit_push_r64(r5, ctx);
1769		emit_push_r64(r4, ctx);
1770		emit_push_r64(r3, ctx);
1771
1772		emit_a32_mov_i(tmp[1], func, ctx);
1773		emit_blx_r(tmp[1], ctx);
1774
1775		emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1776		break;
1777	}
1778	/* function return */
1779	case BPF_JMP | BPF_EXIT:
1780		/* Optimization: when last instruction is EXIT
1781		 * simply fallthrough to epilogue.
1782		 */
1783		if (i == ctx->prog->len - 1)
1784			break;
1785		jmp_offset = epilogue_offset(ctx);
1786		check_imm24(jmp_offset);
1787		emit(ARM_B(jmp_offset), ctx);
1788		break;
1789notyet:
1790		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1791		return -EFAULT;
1792	default:
1793		pr_err_once("unknown opcode %02x\n", code);
1794		return -EINVAL;
1795	}
1796
1797	if (ctx->flags & FLAG_IMM_OVERFLOW)
1798		/*
1799		 * this instruction generated an overflow when
1800		 * trying to access the literal pool, so
1801		 * delegate this filter to the kernel interpreter.
1802		 */
1803		return -1;
1804	return 0;
1805}
1806
1807static int build_body(struct jit_ctx *ctx)
1808{
1809	const struct bpf_prog *prog = ctx->prog;
1810	unsigned int i;
1811
1812	for (i = 0; i < prog->len; i++) {
1813		const struct bpf_insn *insn = &(prog->insnsi[i]);
1814		int ret;
1815
1816		ret = build_insn(insn, ctx);
1817
1818		/* It's used with loading the 64 bit immediate value. */
1819		if (ret > 0) {
1820			i++;
1821			if (ctx->target == NULL)
1822				ctx->offsets[i] = ctx->idx;
1823			continue;
1824		}
1825
1826		if (ctx->target == NULL)
1827			ctx->offsets[i] = ctx->idx;
1828
1829		/* If unsuccesfull, return with error code */
1830		if (ret)
1831			return ret;
1832	}
1833	return 0;
1834}
1835
1836static int validate_code(struct jit_ctx *ctx)
1837{
1838	int i;
1839
1840	for (i = 0; i < ctx->idx; i++) {
1841		if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1842			return -1;
1843	}
1844
1845	return 0;
1846}
 
1847
1848void bpf_jit_compile(struct bpf_prog *prog)
1849{
1850	/* Nothing to do here. We support Internal BPF. */
1851}
1852
1853bool bpf_jit_needs_zext(void)
1854{
1855	return true;
1856}
1857
1858struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1859{
1860	struct bpf_prog *tmp, *orig_prog = prog;
1861	struct bpf_binary_header *header;
1862	bool tmp_blinded = false;
1863	struct jit_ctx ctx;
1864	unsigned int tmp_idx;
1865	unsigned int image_size;
1866	u8 *image_ptr;
1867
1868	/* If BPF JIT was not enabled then we must fall back to
1869	 * the interpreter.
1870	 */
1871	if (!prog->jit_requested)
1872		return orig_prog;
1873
1874	/* If constant blinding was enabled and we failed during blinding
1875	 * then we must fall back to the interpreter. Otherwise, we save
1876	 * the new JITed code.
1877	 */
1878	tmp = bpf_jit_blind_constants(prog);
1879
1880	if (IS_ERR(tmp))
1881		return orig_prog;
1882	if (tmp != prog) {
1883		tmp_blinded = true;
1884		prog = tmp;
1885	}
1886
1887	memset(&ctx, 0, sizeof(ctx));
1888	ctx.prog = prog;
1889	ctx.cpu_architecture = cpu_architecture();
1890
1891	/* Not able to allocate memory for offsets[] , then
1892	 * we must fall back to the interpreter
1893	 */
1894	ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1895	if (ctx.offsets == NULL) {
1896		prog = orig_prog;
1897		goto out;
1898	}
1899
1900	/* 1) fake pass to find in the length of the JITed code,
1901	 * to compute ctx->offsets and other context variables
1902	 * needed to compute final JITed code.
1903	 * Also, calculate random starting pointer/start of JITed code
1904	 * which is prefixed by random number of fault instructions.
1905	 *
1906	 * If the first pass fails then there is no chance of it
1907	 * being successful in the second pass, so just fall back
1908	 * to the interpreter.
1909	 */
1910	if (build_body(&ctx)) {
1911		prog = orig_prog;
1912		goto out_off;
1913	}
1914
1915	tmp_idx = ctx.idx;
1916	build_prologue(&ctx);
1917	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1918
1919	ctx.epilogue_offset = ctx.idx;
1920
1921#if __LINUX_ARM_ARCH__ < 7
1922	tmp_idx = ctx.idx;
1923	build_epilogue(&ctx);
1924	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1925
1926	ctx.idx += ctx.imm_count;
1927	if (ctx.imm_count) {
1928		ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1929		if (ctx.imms == NULL) {
1930			prog = orig_prog;
1931			goto out_off;
1932		}
1933	}
1934#else
1935	/* there's nothing about the epilogue on ARMv7 */
1936	build_epilogue(&ctx);
1937#endif
1938	/* Now we can get the actual image size of the JITed arm code.
1939	 * Currently, we are not considering the THUMB-2 instructions
1940	 * for jit, although it can decrease the size of the image.
1941	 *
1942	 * As each arm instruction is of length 32bit, we are translating
1943	 * number of JITed intructions into the size required to store these
1944	 * JITed code.
1945	 */
1946	image_size = sizeof(u32) * ctx.idx;
1947
1948	/* Now we know the size of the structure to make */
1949	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1950				      sizeof(u32), jit_fill_hole);
1951	/* Not able to allocate memory for the structure then
1952	 * we must fall back to the interpretation
1953	 */
1954	if (header == NULL) {
1955		prog = orig_prog;
1956		goto out_imms;
1957	}
1958
1959	/* 2.) Actual pass to generate final JIT code */
1960	ctx.target = (u32 *) image_ptr;
1961	ctx.idx = 0;
1962
1963	build_prologue(&ctx);
1964
1965	/* If building the body of the JITed code fails somehow,
1966	 * we fall back to the interpretation.
1967	 */
1968	if (build_body(&ctx) < 0) {
1969		image_ptr = NULL;
 
 
 
1970		bpf_jit_binary_free(header);
1971		prog = orig_prog;
1972		goto out_imms;
1973	}
1974	build_epilogue(&ctx);
1975
1976	/* 3.) Extra pass to validate JITed Code */
1977	if (validate_code(&ctx)) {
1978		image_ptr = NULL;
1979		bpf_jit_binary_free(header);
1980		prog = orig_prog;
1981		goto out_imms;
1982	}
1983	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1984
1985	if (bpf_jit_enable > 1)
1986		/* there are 2 passes here */
1987		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1988
1989	bpf_jit_binary_lock_ro(header);
1990	prog->bpf_func = (void *)ctx.target;
1991	prog->jited = 1;
1992	prog->jited_len = image_size;
1993
1994out_imms:
1995#if __LINUX_ARM_ARCH__ < 7
1996	if (ctx.imm_count)
1997		kfree(ctx.imms);
1998#endif
1999out_off:
2000	kfree(ctx.offsets);
 
 
 
 
 
 
2001out:
2002	if (tmp_blinded)
2003		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2004					   tmp : orig_prog);
2005	return prog;
2006}
2007
v4.6
 
   1/*
   2 * Just-In-Time compiler for BPF filters on 32bit ARM
   3 *
 
   4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the
   8 * Free Software Foundation; version 2 of the License.
   9 */
  10
 
  11#include <linux/bitops.h>
  12#include <linux/compiler.h>
  13#include <linux/errno.h>
  14#include <linux/filter.h>
  15#include <linux/netdevice.h>
  16#include <linux/string.h>
  17#include <linux/slab.h>
  18#include <linux/if_vlan.h>
  19
  20#include <asm/cacheflush.h>
  21#include <asm/hwcap.h>
  22#include <asm/opcodes.h>
 
  23
  24#include "bpf_jit_32.h"
  25
  26/*
  27 * ABI:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  28 *
  29 * r0	scratch register
  30 * r4	BPF register A
  31 * r5	BPF register X
  32 * r6	pointer to the skb
  33 * r7	skb->data
  34 * r8	skb_headlen(skb)
  35 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37#define r_scratch	ARM_R0
  38/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
  39#define r_off		ARM_R1
  40#define r_A		ARM_R4
  41#define r_X		ARM_R5
  42#define r_skb		ARM_R6
  43#define r_skb_data	ARM_R7
  44#define r_skb_hl	ARM_R8
  45
  46#define SCRATCH_SP_OFFSET	0
  47#define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + 4 * (k))
  48
  49#define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
  50#define SEEN_MEM_WORD(k)	(1 << (k))
  51#define SEEN_X			(1 << BPF_MEMWORDS)
  52#define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
  53#define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
  54#define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
  55
  56#define FLAG_NEED_X_RESET	(1 << 0)
  57#define FLAG_IMM_OVERFLOW	(1 << 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
  58
  59struct jit_ctx {
  60	const struct bpf_prog *skf;
  61	unsigned idx;
  62	unsigned prologue_bytes;
  63	int ret0_fp_idx;
  64	u32 seen;
  65	u32 flags;
  66	u32 *offsets;
  67	u32 *target;
 
  68#if __LINUX_ARM_ARCH__ < 7
  69	u16 epilogue_bytes;
  70	u16 imm_count;
  71	u32 *imms;
  72#endif
  73};
  74
  75int bpf_jit_enable __read_mostly;
  76
  77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
  78		      unsigned int size)
  79{
  80	void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
  81
  82	if (!ptr)
  83		return -EFAULT;
  84	memcpy(ret, ptr, size);
  85	return 0;
  86}
  87
  88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
  89{
  90	u8 ret;
  91	int err;
  92
  93	if (offset < 0)
  94		err = call_neg_helper(skb, offset, &ret, 1);
  95	else
  96		err = skb_copy_bits(skb, offset, &ret, 1);
  97
  98	return (u64)err << 32 | ret;
  99}
 100
 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
 102{
 103	u16 ret;
 104	int err;
 105
 106	if (offset < 0)
 107		err = call_neg_helper(skb, offset, &ret, 2);
 108	else
 109		err = skb_copy_bits(skb, offset, &ret, 2);
 110
 111	return (u64)err << 32 | ntohs(ret);
 112}
 113
 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
 115{
 116	u32 ret;
 117	int err;
 118
 119	if (offset < 0)
 120		err = call_neg_helper(skb, offset, &ret, 4);
 121	else
 122		err = skb_copy_bits(skb, offset, &ret, 4);
 123
 124	return (u64)err << 32 | ntohl(ret);
 125}
 126
 127/*
 128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
 129 * (where the assembly routines like __aeabi_uidiv could cause problems).
 130 */
 131static u32 jit_udiv(u32 dividend, u32 divisor)
 132{
 133	return dividend / divisor;
 134}
 135
 136static u32 jit_mod(u32 dividend, u32 divisor)
 137{
 138	return dividend % divisor;
 139}
 140
 141static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 142{
 143	inst |= (cond << 28);
 144	inst = __opcode_to_mem_arm(inst);
 145
 146	if (ctx->target != NULL)
 147		ctx->target[ctx->idx] = inst;
 148
 149	ctx->idx++;
 150}
 151
 152/*
 153 * Emit an instruction that will be executed unconditionally.
 154 */
 155static inline void emit(u32 inst, struct jit_ctx *ctx)
 156{
 157	_emit(ARM_COND_AL, inst, ctx);
 158}
 159
 160static u16 saved_regs(struct jit_ctx *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161{
 162	u16 ret = 0;
 163
 164	if ((ctx->skf->len > 1) ||
 165	    (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
 166		ret |= 1 << r_A;
 
 
 167
 168#ifdef CONFIG_FRAME_POINTER
 169	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
 170#else
 171	if (ctx->seen & SEEN_CALL)
 172		ret |= 1 << ARM_LR;
 173#endif
 174	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
 175		ret |= 1 << r_skb;
 176	if (ctx->seen & SEEN_DATA)
 177		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
 178	if (ctx->seen & SEEN_X)
 179		ret |= 1 << r_X;
 180
 181	return ret;
 
 
 
 
 
 
 
 182}
 183
 184static inline int mem_words_used(struct jit_ctx *ctx)
 185{
 186	/* yes, we do waste some stack space IF there are "holes" in the set" */
 187	return fls(ctx->seen & SEEN_MEM);
 
 
 
 
 188}
 189
 
 
 
 
 
 
 
 
 
 
 
 
 
 190static void jit_fill_hole(void *area, unsigned int size)
 191{
 192	u32 *ptr;
 193	/* We are guaranteed to have aligned memory. */
 194	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
 195		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
 196}
 197
 198static void build_prologue(struct jit_ctx *ctx)
 199{
 200	u16 reg_set = saved_regs(ctx);
 201	u16 off;
 202
 203#ifdef CONFIG_FRAME_POINTER
 204	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
 205	emit(ARM_PUSH(reg_set), ctx);
 206	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
 207#else
 208	if (reg_set)
 209		emit(ARM_PUSH(reg_set), ctx);
 210#endif
 211
 212	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
 213		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
 214
 215	if (ctx->seen & SEEN_DATA) {
 216		off = offsetof(struct sk_buff, data);
 217		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
 218		/* headlen = len - data_len */
 219		off = offsetof(struct sk_buff, len);
 220		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
 221		off = offsetof(struct sk_buff, data_len);
 222		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
 223		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
 224	}
 225
 226	if (ctx->flags & FLAG_NEED_X_RESET)
 227		emit(ARM_MOV_I(r_X, 0), ctx);
 228
 229	/* do not leak kernel data to userspace */
 230	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
 231		emit(ARM_MOV_I(r_A, 0), ctx);
 232
 233	/* stack space for the BPF_MEM words */
 234	if (ctx->seen & SEEN_MEM)
 235		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
 236}
 237
 238static void build_epilogue(struct jit_ctx *ctx)
 239{
 240	u16 reg_set = saved_regs(ctx);
 241
 242	if (ctx->seen & SEEN_MEM)
 243		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
 244
 245	reg_set &= ~(1 << ARM_LR);
 246
 247#ifdef CONFIG_FRAME_POINTER
 248	/* the first instruction of the prologue was: mov ip, sp */
 249	reg_set &= ~(1 << ARM_IP);
 250	reg_set |= (1 << ARM_SP);
 251	emit(ARM_LDM(ARM_SP, reg_set), ctx);
 252#else
 253	if (reg_set) {
 254		if (ctx->seen & SEEN_CALL)
 255			reg_set |= 1 << ARM_PC;
 256		emit(ARM_POP(reg_set), ctx);
 257	}
 258
 259	if (!(ctx->seen & SEEN_CALL))
 260		emit(ARM_BX(ARM_LR), ctx);
 261#endif
 262}
 263
 264static int16_t imm8m(u32 x)
 265{
 266	u32 rot;
 267
 268	for (rot = 0; rot < 16; rot++)
 269		if ((x & ~ror32(0xff, 2 * rot)) == 0)
 270			return rol32(x, 2 * rot) | (rot << 8);
 271
 272	return -1;
 273}
 274
 275#if __LINUX_ARM_ARCH__ < 7
 276
 277static u16 imm_offset(u32 k, struct jit_ctx *ctx)
 278{
 279	unsigned i = 0, offset;
 280	u16 imm;
 281
 282	/* on the "fake" run we just count them (duplicates included) */
 283	if (ctx->target == NULL) {
 284		ctx->imm_count++;
 285		return 0;
 286	}
 287
 288	while ((i < ctx->imm_count) && ctx->imms[i]) {
 289		if (ctx->imms[i] == k)
 290			break;
 291		i++;
 292	}
 293
 294	if (ctx->imms[i] == 0)
 295		ctx->imms[i] = k;
 296
 297	/* constants go just after the epilogue */
 298	offset =  ctx->offsets[ctx->skf->len];
 299	offset += ctx->prologue_bytes;
 300	offset += ctx->epilogue_bytes;
 301	offset += i * 4;
 302
 303	ctx->target[offset / 4] = k;
 304
 305	/* PC in ARM mode == address of the instruction + 8 */
 306	imm = offset - (8 + ctx->idx * 4);
 307
 308	if (imm & ~0xfff) {
 309		/*
 310		 * literal pool is too far, signal it into flags. we
 311		 * can only detect it on the second pass unfortunately.
 312		 */
 313		ctx->flags |= FLAG_IMM_OVERFLOW;
 314		return 0;
 315	}
 316
 317	return imm;
 318}
 319
 320#endif /* __LINUX_ARM_ARCH__ */
 321
 
 
 
 
 
 
 
 
 
 
 
 
 322/*
 323 * Move an immediate that's not an imm8m to a core register.
 324 */
 325static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
 326{
 327#if __LINUX_ARM_ARCH__ < 7
 328	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
 329#else
 330	emit(ARM_MOVW(rd, val & 0xffff), ctx);
 331	if (val > 0xffff)
 332		emit(ARM_MOVT(rd, val >> 16), ctx);
 333#endif
 334}
 335
 336static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
 337{
 338	int imm12 = imm8m(val);
 339
 340	if (imm12 >= 0)
 341		emit(ARM_MOV_I(rd, imm12), ctx);
 342	else
 343		emit_mov_i_no8m(rd, val, ctx);
 344}
 345
 346#if __LINUX_ARM_ARCH__ < 6
 
 
 
 
 
 
 347
 348static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 349{
 350	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
 351	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
 352	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
 353	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
 354	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
 355	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
 356	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
 357	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
 358}
 359
 360static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 361{
 362	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
 363	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
 364	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
 
 
 
 
 
 365}
 366
 367static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
 368{
 369	/* r_dst = (r_src << 8) | (r_src >> 8) */
 370	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
 371	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
 
 
 
 
 
 
 
 
 
 
 372
 373	/*
 374	 * we need to mask out the bits set in r_dst[23:16] due to
 375	 * the first shift instruction.
 376	 *
 377	 * note that 0x8ff is the encoded immediate 0x00ff0000.
 
 
 378	 */
 379	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380}
 381
 382#else  /* ARMv6+ */
 
 
 
 
 383
 384static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 
 
 
 
 385{
 386	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
 387#ifdef __LITTLE_ENDIAN
 388	_emit(cond, ARM_REV(r_res, r_res), ctx);
 389#endif
 
 390}
 391
 392static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
 
 393{
 394	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
 395#ifdef __LITTLE_ENDIAN
 396	_emit(cond, ARM_REV16(r_res, r_res), ctx);
 397#endif
 
 
 
 
 
 
 
 
 
 
 398}
 399
 400static inline void emit_swap16(u8 r_dst __maybe_unused,
 401			       u8 r_src __maybe_unused,
 402			       struct jit_ctx *ctx __maybe_unused)
 
 
 403{
 404#ifdef __LITTLE_ENDIAN
 405	emit(ARM_REV16(r_dst, r_src), ctx);
 406#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407}
 408
 409#endif /* __LINUX_ARM_ARCH__ < 6 */
 
 
 
 410
 
 
 
 
 
 
 
 411
 412/* Compute the immediate value for a PC-relative branch. */
 413static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
 414{
 415	u32 imm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416
 417	if (ctx->target == NULL)
 418		return 0;
 419	/*
 420	 * BPF allows only forward jumps and the offset of the target is
 421	 * still the one computed during the first pass.
 
 
 
 422	 */
 423	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
 
 
 
 
 
 
 424
 425	return imm >> 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 426}
 427
 428#define OP_IMM3(op, r1, r2, imm_val, ctx)				\
 429	do {								\
 430		imm12 = imm8m(imm_val);					\
 431		if (imm12 < 0) {					\
 432			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
 433			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
 434		} else {						\
 435			emit(op ## _I((r1), (r2), imm12), ctx);		\
 436		}							\
 437	} while (0)
 438
 439static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
 440{
 441	if (ctx->ret0_fp_idx >= 0) {
 442		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
 443		/* NOP to keep the size constant between passes */
 444		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445	} else {
 446		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
 447		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
 
 
 
 
 
 
 448	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449}
 450
 451static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
 452{
 453#if __LINUX_ARM_ARCH__ < 5
 454	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455
 456	if (elf_hwcap & HWCAP_THUMB)
 457		emit(ARM_BX(tgt_reg), ctx);
 458	else
 459		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
 460#else
 461	emit(ARM_BLX_R(tgt_reg), ctx);
 462#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463}
 464
 465static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
 466				int bpf_op)
 467{
 468#if __LINUX_ARM_ARCH__ == 7
 469	if (elf_hwcap & HWCAP_IDIVA) {
 470		if (bpf_op == BPF_DIV)
 471			emit(ARM_UDIV(rd, rm, rn), ctx);
 472		else {
 473			emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
 474			emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
 475		}
 476		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478#endif
 
 479
 480	/*
 481	 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
 482	 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
 483	 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
 484	 * before using it as a source for ARM_R1.
 485	 *
 486	 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
 487	 * ARM_R5 (r_X) so there is no particular register overlap
 488	 * issues.
 489	 */
 490	if (rn != ARM_R1)
 491		emit(ARM_MOV_R(ARM_R1, rn), ctx);
 492	if (rm != ARM_R0)
 493		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 494
 495	ctx->seen |= SEEN_CALL;
 496	emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
 497		   ctx);
 498	emit_blx_r(ARM_R3, ctx);
 
 
 
 
 
 
 
 499
 500	if (rd != ARM_R0)
 501		emit(ARM_MOV_R(rd, ARM_R0), ctx);
 
 502}
 503
 504static inline void update_on_xread(struct jit_ctx *ctx)
 
 505{
 506	if (!(ctx->seen & SEEN_X))
 507		ctx->flags |= FLAG_NEED_X_RESET;
 
 508
 509	ctx->seen |= SEEN_X;
 
 
 
 510}
 511
 512static int build_body(struct jit_ctx *ctx)
 513{
 514	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
 515	const struct bpf_prog *prog = ctx->skf;
 516	const struct sock_filter *inst;
 517	unsigned i, load_order, off, condt;
 518	int imm12;
 519	u32 k;
 
 520
 521	for (i = 0; i < prog->len; i++) {
 522		u16 code;
 
 
 
 
 
 
 
 
 
 
 
 
 523
 524		inst = &(prog->insns[i]);
 525		/* K as an immediate value operand */
 526		k = inst->k;
 527		code = bpf_anc_helper(inst);
 528
 529		/* compute offsets only in the fake pass */
 530		if (ctx->target == NULL)
 531			ctx->offsets[i] = ctx->idx * 4;
 532
 533		switch (code) {
 534		case BPF_LD | BPF_IMM:
 535			emit_mov_i(r_A, k, ctx);
 536			break;
 537		case BPF_LD | BPF_W | BPF_LEN:
 538			ctx->seen |= SEEN_SKB;
 539			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 540			emit(ARM_LDR_I(r_A, r_skb,
 541				       offsetof(struct sk_buff, len)), ctx);
 542			break;
 543		case BPF_LD | BPF_MEM:
 544			/* A = scratch[k] */
 545			ctx->seen |= SEEN_MEM_WORD(k);
 546			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
 547			break;
 548		case BPF_LD | BPF_W | BPF_ABS:
 549			load_order = 2;
 550			goto load;
 551		case BPF_LD | BPF_H | BPF_ABS:
 552			load_order = 1;
 553			goto load;
 554		case BPF_LD | BPF_B | BPF_ABS:
 555			load_order = 0;
 556load:
 557			emit_mov_i(r_off, k, ctx);
 558load_common:
 559			ctx->seen |= SEEN_DATA | SEEN_CALL;
 560
 561			if (load_order > 0) {
 562				emit(ARM_SUB_I(r_scratch, r_skb_hl,
 563					       1 << load_order), ctx);
 564				emit(ARM_CMP_R(r_scratch, r_off), ctx);
 565				condt = ARM_COND_GE;
 566			} else {
 567				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
 568				condt = ARM_COND_HI;
 569			}
 570
 571			/*
 572			 * test for negative offset, only if we are
 573			 * currently scheduled to take the fast
 574			 * path. this will update the flags so that
 575			 * the slowpath instruction are ignored if the
 576			 * offset is negative.
 577			 *
 578			 * for loard_order == 0 the HI condition will
 579			 * make loads at offset 0 take the slow path too.
 580			 */
 581			_emit(condt, ARM_CMP_I(r_off, 0), ctx);
 582
 583			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
 584			      ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 585
 586			if (load_order == 0)
 587				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
 588				      ctx);
 589			else if (load_order == 1)
 590				emit_load_be16(condt, r_A, r_scratch, ctx);
 591			else if (load_order == 2)
 592				emit_load_be32(condt, r_A, r_scratch, ctx);
 593
 594			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
 595
 596			/* the slowpath */
 597			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
 598			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 599			/* the offset is already in R1 */
 600			emit_blx_r(ARM_R3, ctx);
 601			/* check the result of skb_copy_bits */
 602			emit(ARM_CMP_I(ARM_R1, 0), ctx);
 603			emit_err_ret(ARM_COND_NE, ctx);
 604			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
 605			break;
 606		case BPF_LD | BPF_W | BPF_IND:
 607			load_order = 2;
 608			goto load_ind;
 609		case BPF_LD | BPF_H | BPF_IND:
 610			load_order = 1;
 611			goto load_ind;
 612		case BPF_LD | BPF_B | BPF_IND:
 613			load_order = 0;
 614load_ind:
 615			update_on_xread(ctx);
 616			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
 617			goto load_common;
 618		case BPF_LDX | BPF_IMM:
 619			ctx->seen |= SEEN_X;
 620			emit_mov_i(r_X, k, ctx);
 621			break;
 622		case BPF_LDX | BPF_W | BPF_LEN:
 623			ctx->seen |= SEEN_X | SEEN_SKB;
 624			emit(ARM_LDR_I(r_X, r_skb,
 625				       offsetof(struct sk_buff, len)), ctx);
 626			break;
 627		case BPF_LDX | BPF_MEM:
 628			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
 629			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
 630			break;
 631		case BPF_LDX | BPF_B | BPF_MSH:
 632			/* x = ((*(frame + k)) & 0xf) << 2; */
 633			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
 634			/* the interpreter should deal with the negative K */
 635			if ((int)k < 0)
 636				return -1;
 637			/* offset in r1: we might have to take the slow path */
 638			emit_mov_i(r_off, k, ctx);
 639			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
 640
 641			/* load in r0: common with the slowpath */
 642			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
 643						      ARM_R1), ctx);
 644			/*
 645			 * emit_mov_i() might generate one or two instructions,
 646			 * the same holds for emit_blx_r()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647			 */
 648			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
 649
 650			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 651			/* r_off is r1 */
 652			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
 653			emit_blx_r(ARM_R3, ctx);
 654			/* check the return value of skb_copy_bits */
 655			emit(ARM_CMP_I(ARM_R1, 0), ctx);
 656			emit_err_ret(ARM_COND_NE, ctx);
 657
 658			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
 659			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
 660			break;
 661		case BPF_ST:
 662			ctx->seen |= SEEN_MEM_WORD(k);
 663			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
 664			break;
 665		case BPF_STX:
 666			update_on_xread(ctx);
 667			ctx->seen |= SEEN_MEM_WORD(k);
 668			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
 669			break;
 670		case BPF_ALU | BPF_ADD | BPF_K:
 671			/* A += K */
 672			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
 673			break;
 674		case BPF_ALU | BPF_ADD | BPF_X:
 675			update_on_xread(ctx);
 676			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
 677			break;
 678		case BPF_ALU | BPF_SUB | BPF_K:
 679			/* A -= K */
 680			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
 681			break;
 682		case BPF_ALU | BPF_SUB | BPF_X:
 683			update_on_xread(ctx);
 684			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
 685			break;
 686		case BPF_ALU | BPF_MUL | BPF_K:
 687			/* A *= K */
 688			emit_mov_i(r_scratch, k, ctx);
 689			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
 690			break;
 691		case BPF_ALU | BPF_MUL | BPF_X:
 692			update_on_xread(ctx);
 693			emit(ARM_MUL(r_A, r_A, r_X), ctx);
 694			break;
 695		case BPF_ALU | BPF_DIV | BPF_K:
 696			if (k == 1)
 697				break;
 698			emit_mov_i(r_scratch, k, ctx);
 699			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
 
 
 
 
 
 
 
 
 
 
 
 700			break;
 701		case BPF_ALU | BPF_DIV | BPF_X:
 702			update_on_xread(ctx);
 703			emit(ARM_CMP_I(r_X, 0), ctx);
 704			emit_err_ret(ARM_COND_EQ, ctx);
 705			emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
 706			break;
 707		case BPF_ALU | BPF_MOD | BPF_K:
 708			if (k == 1) {
 709				emit_mov_i(r_A, 0, ctx);
 710				break;
 711			}
 712			emit_mov_i(r_scratch, k, ctx);
 713			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
 714			break;
 715		case BPF_ALU | BPF_MOD | BPF_X:
 716			update_on_xread(ctx);
 717			emit(ARM_CMP_I(r_X, 0), ctx);
 718			emit_err_ret(ARM_COND_EQ, ctx);
 719			emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
 720			break;
 721		case BPF_ALU | BPF_OR | BPF_K:
 722			/* A |= K */
 723			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
 724			break;
 725		case BPF_ALU | BPF_OR | BPF_X:
 726			update_on_xread(ctx);
 727			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
 728			break;
 729		case BPF_ALU | BPF_XOR | BPF_K:
 730			/* A ^= K; */
 731			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
 732			break;
 733		case BPF_ANC | SKF_AD_ALU_XOR_X:
 734		case BPF_ALU | BPF_XOR | BPF_X:
 735			/* A ^= X */
 736			update_on_xread(ctx);
 737			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
 738			break;
 739		case BPF_ALU | BPF_AND | BPF_K:
 740			/* A &= K */
 741			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
 742			break;
 743		case BPF_ALU | BPF_AND | BPF_X:
 744			update_on_xread(ctx);
 745			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
 746			break;
 747		case BPF_ALU | BPF_LSH | BPF_K:
 748			if (unlikely(k > 31))
 749				return -1;
 750			emit(ARM_LSL_I(r_A, r_A, k), ctx);
 751			break;
 752		case BPF_ALU | BPF_LSH | BPF_X:
 753			update_on_xread(ctx);
 754			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
 755			break;
 756		case BPF_ALU | BPF_RSH | BPF_K:
 757			if (unlikely(k > 31))
 758				return -1;
 759			if (k)
 760				emit(ARM_LSR_I(r_A, r_A, k), ctx);
 761			break;
 762		case BPF_ALU | BPF_RSH | BPF_X:
 763			update_on_xread(ctx);
 764			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
 765			break;
 766		case BPF_ALU | BPF_NEG:
 767			/* A = -A */
 768			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
 769			break;
 770		case BPF_JMP | BPF_JA:
 771			/* pc += K */
 772			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
 773			break;
 774		case BPF_JMP | BPF_JEQ | BPF_K:
 775			/* pc += (A == K) ? pc->jt : pc->jf */
 776			condt  = ARM_COND_EQ;
 777			goto cmp_imm;
 778		case BPF_JMP | BPF_JGT | BPF_K:
 779			/* pc += (A > K) ? pc->jt : pc->jf */
 780			condt  = ARM_COND_HI;
 781			goto cmp_imm;
 782		case BPF_JMP | BPF_JGE | BPF_K:
 783			/* pc += (A >= K) ? pc->jt : pc->jf */
 784			condt  = ARM_COND_HS;
 785cmp_imm:
 786			imm12 = imm8m(k);
 787			if (imm12 < 0) {
 788				emit_mov_i_no8m(r_scratch, k, ctx);
 789				emit(ARM_CMP_R(r_A, r_scratch), ctx);
 790			} else {
 791				emit(ARM_CMP_I(r_A, imm12), ctx);
 792			}
 793cond_jump:
 794			if (inst->jt)
 795				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
 796						   ctx)), ctx);
 797			if (inst->jf)
 798				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
 799							     ctx)), ctx);
 800			break;
 801		case BPF_JMP | BPF_JEQ | BPF_X:
 802			/* pc += (A == X) ? pc->jt : pc->jf */
 803			condt   = ARM_COND_EQ;
 804			goto cmp_x;
 805		case BPF_JMP | BPF_JGT | BPF_X:
 806			/* pc += (A > X) ? pc->jt : pc->jf */
 807			condt   = ARM_COND_HI;
 808			goto cmp_x;
 809		case BPF_JMP | BPF_JGE | BPF_X:
 810			/* pc += (A >= X) ? pc->jt : pc->jf */
 811			condt   = ARM_COND_CS;
 812cmp_x:
 813			update_on_xread(ctx);
 814			emit(ARM_CMP_R(r_A, r_X), ctx);
 815			goto cond_jump;
 816		case BPF_JMP | BPF_JSET | BPF_K:
 817			/* pc += (A & K) ? pc->jt : pc->jf */
 818			condt  = ARM_COND_NE;
 819			/* not set iff all zeroes iff Z==1 iff EQ */
 820
 821			imm12 = imm8m(k);
 822			if (imm12 < 0) {
 823				emit_mov_i_no8m(r_scratch, k, ctx);
 824				emit(ARM_TST_R(r_A, r_scratch), ctx);
 825			} else {
 826				emit(ARM_TST_I(r_A, imm12), ctx);
 827			}
 828			goto cond_jump;
 829		case BPF_JMP | BPF_JSET | BPF_X:
 830			/* pc += (A & X) ? pc->jt : pc->jf */
 831			update_on_xread(ctx);
 832			condt  = ARM_COND_NE;
 833			emit(ARM_TST_R(r_A, r_X), ctx);
 834			goto cond_jump;
 835		case BPF_RET | BPF_A:
 836			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
 837			goto b_epilogue;
 838		case BPF_RET | BPF_K:
 839			if ((k == 0) && (ctx->ret0_fp_idx < 0))
 840				ctx->ret0_fp_idx = i;
 841			emit_mov_i(ARM_R0, k, ctx);
 842b_epilogue:
 843			if (i != ctx->skf->len - 1)
 844				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
 845			break;
 846		case BPF_MISC | BPF_TAX:
 847			/* X = A */
 848			ctx->seen |= SEEN_X;
 849			emit(ARM_MOV_R(r_X, r_A), ctx);
 850			break;
 851		case BPF_MISC | BPF_TXA:
 852			/* A = X */
 853			update_on_xread(ctx);
 854			emit(ARM_MOV_R(r_A, r_X), ctx);
 855			break;
 856		case BPF_ANC | SKF_AD_PROTOCOL:
 857			/* A = ntohs(skb->protocol) */
 858			ctx->seen |= SEEN_SKB;
 859			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 860						  protocol) != 2);
 861			off = offsetof(struct sk_buff, protocol);
 862			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
 863			emit_swap16(r_A, r_scratch, ctx);
 864			break;
 865		case BPF_ANC | SKF_AD_CPU:
 866			/* r_scratch = current_thread_info() */
 867			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
 868			/* A = current_thread_info()->cpu */
 869			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
 870			off = offsetof(struct thread_info, cpu);
 871			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 872			break;
 873		case BPF_ANC | SKF_AD_IFINDEX:
 874		case BPF_ANC | SKF_AD_HATYPE:
 875			/* A = skb->dev->ifindex */
 876			/* A = skb->dev->type */
 877			ctx->seen |= SEEN_SKB;
 878			off = offsetof(struct sk_buff, dev);
 879			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
 880
 881			emit(ARM_CMP_I(r_scratch, 0), ctx);
 882			emit_err_ret(ARM_COND_EQ, ctx);
 883
 884			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 885						  ifindex) != 4);
 886			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 887						  type) != 2);
 888
 889			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
 890				off = offsetof(struct net_device, ifindex);
 891				emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 892			} else {
 893				/*
 894				 * offset of field "type" in "struct
 895				 * net_device" is above what can be
 896				 * used in the ldrh rd, [rn, #imm]
 897				 * instruction, so load the offset in
 898				 * a register and use ldrh rd, [rn, rm]
 899				 */
 900				off = offsetof(struct net_device, type);
 901				emit_mov_i(ARM_R3, off, ctx);
 902				emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
 903			}
 904			break;
 905		case BPF_ANC | SKF_AD_MARK:
 906			ctx->seen |= SEEN_SKB;
 907			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 908			off = offsetof(struct sk_buff, mark);
 909			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
 910			break;
 911		case BPF_ANC | SKF_AD_RXHASH:
 912			ctx->seen |= SEEN_SKB;
 913			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 914			off = offsetof(struct sk_buff, hash);
 915			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
 916			break;
 917		case BPF_ANC | SKF_AD_VLAN_TAG:
 918		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 919			ctx->seen |= SEEN_SKB;
 920			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 921			off = offsetof(struct sk_buff, vlan_tci);
 922			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 923			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
 924				OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
 925			else {
 926				OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
 927				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
 928			}
 929			break;
 930		case BPF_ANC | SKF_AD_PKTTYPE:
 931			ctx->seen |= SEEN_SKB;
 932			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 933						  __pkt_type_offset[0]) != 1);
 934			off = PKT_TYPE_OFFSET();
 935			emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
 936			emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
 937#ifdef __BIG_ENDIAN_BITFIELD
 938			emit(ARM_LSR_I(r_A, r_A, 5), ctx);
 
 
 939#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940			break;
 941		case BPF_ANC | SKF_AD_QUEUE:
 942			ctx->seen |= SEEN_SKB;
 943			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 944						  queue_mapping) != 2);
 945			BUILD_BUG_ON(offsetof(struct sk_buff,
 946					      queue_mapping) > 0xff);
 947			off = offsetof(struct sk_buff, queue_mapping);
 948			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 949			break;
 950		case BPF_ANC | SKF_AD_PAY_OFFSET:
 951			ctx->seen |= SEEN_SKB | SEEN_CALL;
 952
 953			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
 954			emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
 955			emit_blx_r(ARM_R3, ctx);
 956			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
 957			break;
 958		case BPF_LDX | BPF_W | BPF_ABS:
 959			/*
 960			 * load a 32bit word from struct seccomp_data.
 961			 * seccomp_check_filter() will already have checked
 962			 * that k is 32bit aligned and lies within the
 963			 * struct seccomp_data.
 964			 */
 965			ctx->seen |= SEEN_SKB;
 966			emit(ARM_LDR_I(r_A, r_skb, k), ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967			break;
 968		default:
 969			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970		}
 971
 972		if (ctx->flags & FLAG_IMM_OVERFLOW)
 973			/*
 974			 * this instruction generated an overflow when
 975			 * trying to access the literal pool, so
 976			 * delegate this filter to the kernel interpreter.
 977			 */
 
 
 
 
 
 
 
 
 
 
 978			return -1;
 979	}
 980
 981	/* compute offsets only during the first pass */
 982	if (ctx->target == NULL)
 983		ctx->offsets[i] = ctx->idx * 4;
 984
 985	return 0;
 
 
 986}
 987
 
 
 
 
 988
 989void bpf_jit_compile(struct bpf_prog *fp)
 990{
 
 991	struct bpf_binary_header *header;
 
 992	struct jit_ctx ctx;
 993	unsigned tmp_idx;
 994	unsigned alloc_size;
 995	u8 *target_ptr;
 996
 997	if (!bpf_jit_enable)
 998		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999
1000	memset(&ctx, 0, sizeof(ctx));
1001	ctx.skf		= fp;
1002	ctx.ret0_fp_idx = -1;
1003
1004	ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
1005	if (ctx.offsets == NULL)
1006		return;
 
 
 
 
 
1007
1008	/* fake pass to fill in the ctx->seen */
1009	if (unlikely(build_body(&ctx)))
1010		goto out;
 
 
 
 
 
 
 
 
 
 
 
1011
1012	tmp_idx = ctx.idx;
1013	build_prologue(&ctx);
1014	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1015
 
 
1016#if __LINUX_ARM_ARCH__ < 7
1017	tmp_idx = ctx.idx;
1018	build_epilogue(&ctx);
1019	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1020
1021	ctx.idx += ctx.imm_count;
1022	if (ctx.imm_count) {
1023		ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1024		if (ctx.imms == NULL)
1025			goto out;
 
 
1026	}
1027#else
1028	/* there's nothing after the epilogue on ARMv7 */
1029	build_epilogue(&ctx);
1030#endif
1031	alloc_size = 4 * ctx.idx;
1032	header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1033				      4, jit_fill_hole);
1034	if (header == NULL)
1035		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036
1037	ctx.target = (u32 *) target_ptr;
 
1038	ctx.idx = 0;
1039
1040	build_prologue(&ctx);
 
 
 
 
1041	if (build_body(&ctx) < 0) {
1042#if __LINUX_ARM_ARCH__ < 7
1043		if (ctx.imm_count)
1044			kfree(ctx.imms);
1045#endif
1046		bpf_jit_binary_free(header);
1047		goto out;
 
1048	}
1049	build_epilogue(&ctx);
1050
 
 
 
 
 
 
 
1051	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1052
 
 
 
 
 
 
 
 
 
 
1053#if __LINUX_ARM_ARCH__ < 7
1054	if (ctx.imm_count)
1055		kfree(ctx.imms);
1056#endif
1057
1058	if (bpf_jit_enable > 1)
1059		/* there are 2 passes here */
1060		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1061
1062	set_memory_ro((unsigned long)header, header->pages);
1063	fp->bpf_func = (void *)ctx.target;
1064	fp->jited = 1;
1065out:
1066	kfree(ctx.offsets);
1067	return;
 
 
1068}
1069
1070void bpf_jit_free(struct bpf_prog *fp)
1071{
1072	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1073	struct bpf_binary_header *header = (void *)addr;
1074
1075	if (!fp->jited)
1076		goto free_filter;
1077
1078	set_memory_rw(addr, header->pages);
1079	bpf_jit_binary_free(header);
1080
1081free_filter:
1082	bpf_prog_unlock_free(fp);
1083}