Linux Audio

Check our new training course

Loading...
v4.10.11
   1/* bpf_jit_comp.c : BPF JIT compiler
   2 *
   3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
   4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; version 2
   9 * of the License.
  10 */
 
 
  11#include <linux/netdevice.h>
  12#include <linux/filter.h>
  13#include <linux/if_vlan.h>
  14#include <asm/cacheflush.h>
  15#include <linux/bpf.h>
  16
 
 
 
 
 
 
 
 
 
 
 
 
  17int bpf_jit_enable __read_mostly;
  18
  19/*
  20 * assembly code in arch/x86/net/bpf_jit.S
  21 */
  22extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  23extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
  24extern u8 sk_load_byte_positive_offset[];
  25extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
  26extern u8 sk_load_byte_negative_offset[];
  27
  28static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  29{
  30	if (len == 1)
  31		*ptr = bytes;
  32	else if (len == 2)
  33		*(u16 *)ptr = bytes;
  34	else {
  35		*(u32 *)ptr = bytes;
  36		barrier();
  37	}
  38	return ptr + len;
  39}
  40
  41#define EMIT(bytes, len) \
  42	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
  43
  44#define EMIT1(b1)		EMIT(b1, 1)
  45#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
  46#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  47#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
  48#define EMIT1_off32(b1, off) \
  49	do {EMIT1(b1); EMIT(off, 4); } while (0)
  50#define EMIT2_off32(b1, b2, off) \
  51	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
  52#define EMIT3_off32(b1, b2, b3, off) \
  53	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
  54#define EMIT4_off32(b1, b2, b3, b4, off) \
  55	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  56
  57static bool is_imm8(int value)
  58{
  59	return value <= 127 && value >= -128;
  60}
  61
  62static bool is_simm32(s64 value)
  63{
  64	return value == (s64) (s32) value;
  65}
  66
  67/* mov dst, src */
  68#define EMIT_mov(DST, SRC) \
  69	do {if (DST != SRC) \
  70		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
  71	} while (0)
  72
  73static int bpf_size_to_x86_bytes(int bpf_size)
  74{
  75	if (bpf_size == BPF_W)
  76		return 4;
  77	else if (bpf_size == BPF_H)
  78		return 2;
  79	else if (bpf_size == BPF_B)
  80		return 1;
  81	else if (bpf_size == BPF_DW)
  82		return 4; /* imm32 */
  83	else
  84		return 0;
  85}
  86
  87/* list of x86 cond jumps opcodes (. + s8)
  88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  89 */
  90#define X86_JB  0x72
  91#define X86_JAE 0x73
  92#define X86_JE  0x74
  93#define X86_JNE 0x75
  94#define X86_JBE 0x76
  95#define X86_JA  0x77
  96#define X86_JGE 0x7D
  97#define X86_JG  0x7F
  98
  99static void bpf_flush_icache(void *start, void *end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100{
 101	mm_segment_t old_fs = get_fs();
 102
 103	set_fs(KERNEL_DS);
 104	smp_wmb();
 105	flush_icache_range((unsigned long)start, (unsigned long)end);
 106	set_fs(old_fs);
 107}
 108
 109#define CHOOSE_LOAD_FUNC(K, func) \
 110	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 111
 112/* pick a register outside of BPF range for JIT internal work */
 113#define AUX_REG (MAX_BPF_JIT_REG + 1)
 114
 115/* The following table maps BPF registers to x64 registers.
 116 *
 117 * x64 register r12 is unused, since if used as base address
 118 * register in load/store instructions, it always needs an
 119 * extra byte of encoding and is callee saved.
 120 *
 121 *  r9 caches skb->len - skb->data_len
 122 * r10 caches skb->data, and used for blinding (if enabled)
 123 */
 124static const int reg2hex[] = {
 125	[BPF_REG_0] = 0,  /* rax */
 126	[BPF_REG_1] = 7,  /* rdi */
 127	[BPF_REG_2] = 6,  /* rsi */
 128	[BPF_REG_3] = 2,  /* rdx */
 129	[BPF_REG_4] = 1,  /* rcx */
 130	[BPF_REG_5] = 0,  /* r8 */
 131	[BPF_REG_6] = 3,  /* rbx callee saved */
 132	[BPF_REG_7] = 5,  /* r13 callee saved */
 133	[BPF_REG_8] = 6,  /* r14 callee saved */
 134	[BPF_REG_9] = 7,  /* r15 callee saved */
 135	[BPF_REG_FP] = 5, /* rbp readonly */
 136	[BPF_REG_AX] = 2, /* r10 temp register */
 137	[AUX_REG] = 3,    /* r11 temp register */
 138};
 139
 140/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
 141 * which need extra byte of encoding.
 142 * rax,rcx,...,rbp have simpler encoding
 143 */
 144static bool is_ereg(u32 reg)
 
 145{
 146	return (1 << reg) & (BIT(BPF_REG_5) |
 147			     BIT(AUX_REG) |
 148			     BIT(BPF_REG_7) |
 149			     BIT(BPF_REG_8) |
 150			     BIT(BPF_REG_9) |
 151			     BIT(BPF_REG_AX));
 152}
 153
 154/* add modifiers if 'reg' maps to x64 registers r8..r15 */
 155static u8 add_1mod(u8 byte, u32 reg)
 156{
 157	if (is_ereg(reg))
 158		byte |= 1;
 159	return byte;
 160}
 161
 162static u8 add_2mod(u8 byte, u32 r1, u32 r2)
 163{
 164	if (is_ereg(r1))
 165		byte |= 1;
 166	if (is_ereg(r2))
 167		byte |= 4;
 168	return byte;
 169}
 170
 171/* encode 'dst_reg' register into x64 opcode 'byte' */
 172static u8 add_1reg(u8 byte, u32 dst_reg)
 173{
 174	return byte + reg2hex[dst_reg];
 175}
 176
 177/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
 178static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 179{
 180	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
 181}
 182
 183static void jit_fill_hole(void *area, unsigned int size)
 184{
 185	/* fill whole space with int3 instructions */
 186	memset(area, 0xcc, size);
 187}
 188
 189struct jit_context {
 190	int cleanup_addr; /* epilogue code offset */
 191	bool seen_ld_abs;
 192	bool seen_ax_reg;
 
 
 193};
 194
 195/* maximum number of bytes emitted while JITing one eBPF insn */
 196#define BPF_MAX_INSN_SIZE	128
 197#define BPF_INSN_SAFETY		64
 198
 199#define STACKSIZE \
 200	(MAX_BPF_STACK + \
 201	 32 /* space for rbx, r13, r14, r15 */ + \
 202	 8 /* space for skb_copy_bits() buffer */)
 203
 204#define PROLOGUE_SIZE 48
 205
 206/* emit x64 prologue code for BPF program and check it's size.
 207 * bpf_tail_call helper will skip it while jumping into another program
 208 */
 209static void emit_prologue(u8 **pprog)
 210{
 211	u8 *prog = *pprog;
 212	int cnt = 0;
 213
 214	EMIT1(0x55); /* push rbp */
 215	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
 216
 217	/* sub rsp, STACKSIZE */
 218	EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
 219
 220	/* all classic BPF filters use R6(rbx) save it */
 221
 222	/* mov qword ptr [rbp-X],rbx */
 223	EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
 224
 225	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
 226	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
 227	 * R8(r14). R9(r15) spill could be made conditional, but there is only
 228	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
 229	 * The overhead of extra spill is negligible for any filter other
 230	 * than synthetic ones. Therefore not worth adding complexity.
 231	 */
 
 
 
 
 232
 233	/* mov qword ptr [rbp-X],r13 */
 234	EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
 235	/* mov qword ptr [rbp-X],r14 */
 236	EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
 237	/* mov qword ptr [rbp-X],r15 */
 238	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
 239
 240	/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
 241	 * we need to reset the counter to 0. It's done in two instructions,
 242	 * resetting rax register to 0 (xor on eax gets 0 extended), and
 243	 * moving it to the counter location.
 244	 */
 245
 246	/* xor eax, eax */
 247	EMIT2(0x31, 0xc0);
 248	/* mov qword ptr [rbp-X], rax */
 249	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
 250
 251	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
 252	*pprog = prog;
 
 253}
 254
 255/* generate the following code:
 256 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 257 *   if (index >= array->map.max_entries)
 258 *     goto out;
 259 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 260 *     goto out;
 261 *   prog = array->ptrs[index];
 262 *   if (prog == NULL)
 263 *     goto out;
 264 *   goto *(prog->bpf_func + prologue_size);
 265 * out:
 266 */
 267static void emit_bpf_tail_call(u8 **pprog)
 268{
 269	u8 *prog = *pprog;
 270	int label1, label2, label3;
 271	int cnt = 0;
 272
 273	/* rdi - pointer to ctx
 274	 * rsi - pointer to bpf_array
 275	 * rdx - index in bpf_array
 276	 */
 277
 278	/* if (index >= array->map.max_entries)
 279	 *   goto out;
 280	 */
 281	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
 282	      offsetof(struct bpf_array, map.max_entries));
 283	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
 284#define OFFSET1 47 /* number of bytes to jump */
 285	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
 286	label1 = cnt;
 287
 288	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 289	 *   goto out;
 290	 */
 291	EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
 292	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 293#define OFFSET2 36
 294	EMIT2(X86_JA, OFFSET2);                   /* ja out */
 295	label2 = cnt;
 296	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 297	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 298
 299	/* prog = array->ptrs[index]; */
 300	EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
 301		    offsetof(struct bpf_array, ptrs));
 302	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 303
 304	/* if (prog == NULL)
 305	 *   goto out;
 306	 */
 307	EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
 308#define OFFSET3 10
 309	EMIT2(X86_JE, OFFSET3);                   /* je out */
 310	label3 = cnt;
 311
 312	/* goto *(prog->bpf_func + prologue_size); */
 313	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
 314	      offsetof(struct bpf_prog, bpf_func));
 315	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
 316
 317	/* now we're ready to jump into next BPF program
 318	 * rdi == ctx (1st arg)
 319	 * rax == prog->bpf_func + prologue_size
 320	 */
 321	EMIT2(0xFF, 0xE0);                        /* jmp rax */
 322
 323	/* out: */
 324	BUILD_BUG_ON(cnt - label1 != OFFSET1);
 325	BUILD_BUG_ON(cnt - label2 != OFFSET2);
 326	BUILD_BUG_ON(cnt - label3 != OFFSET3);
 327	*pprog = prog;
 328}
 329
 
 
 330
 331static void emit_load_skb_data_hlen(u8 **pprog)
 332{
 333	u8 *prog = *pprog;
 334	int cnt = 0;
 335
 336	/* r9d = skb->len - skb->data_len (headlen)
 337	 * r10 = skb->data
 338	 */
 339	/* mov %r9d, off32(%rdi) */
 340	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
 
 
 
 341
 342	/* sub %r9d, off32(%rdi) */
 343	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
 
 
 
 344
 345	/* mov %r10, off32(%rdi) */
 346	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
 347	*pprog = prog;
 348}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349
 350static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 351		  int oldproglen, struct jit_context *ctx)
 352{
 353	struct bpf_insn *insn = bpf_prog->insnsi;
 354	int insn_cnt = bpf_prog->len;
 355	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
 356	bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
 357	bool seen_exit = false;
 358	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 359	int i, cnt = 0;
 360	int proglen = 0;
 361	u8 *prog = temp;
 362
 363	emit_prologue(&prog);
 364
 365	if (seen_ld_abs)
 366		emit_load_skb_data_hlen(&prog);
 367
 368	for (i = 0; i < insn_cnt; i++, insn++) {
 369		const s32 imm32 = insn->imm;
 370		u32 dst_reg = insn->dst_reg;
 371		u32 src_reg = insn->src_reg;
 372		u8 b1 = 0, b2 = 0, b3 = 0;
 373		s64 jmp_offset;
 374		u8 jmp_cond;
 375		bool reload_skb_data;
 376		int ilen;
 377		u8 *func;
 378
 379		if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
 380			ctx->seen_ax_reg = seen_ax_reg = true;
 381
 382		switch (insn->code) {
 383			/* ALU */
 384		case BPF_ALU | BPF_ADD | BPF_X:
 385		case BPF_ALU | BPF_SUB | BPF_X:
 386		case BPF_ALU | BPF_AND | BPF_X:
 387		case BPF_ALU | BPF_OR | BPF_X:
 388		case BPF_ALU | BPF_XOR | BPF_X:
 389		case BPF_ALU64 | BPF_ADD | BPF_X:
 390		case BPF_ALU64 | BPF_SUB | BPF_X:
 391		case BPF_ALU64 | BPF_AND | BPF_X:
 392		case BPF_ALU64 | BPF_OR | BPF_X:
 393		case BPF_ALU64 | BPF_XOR | BPF_X:
 394			switch (BPF_OP(insn->code)) {
 395			case BPF_ADD: b2 = 0x01; break;
 396			case BPF_SUB: b2 = 0x29; break;
 397			case BPF_AND: b2 = 0x21; break;
 398			case BPF_OR: b2 = 0x09; break;
 399			case BPF_XOR: b2 = 0x31; break;
 400			}
 401			if (BPF_CLASS(insn->code) == BPF_ALU64)
 402				EMIT1(add_2mod(0x48, dst_reg, src_reg));
 403			else if (is_ereg(dst_reg) || is_ereg(src_reg))
 404				EMIT1(add_2mod(0x40, dst_reg, src_reg));
 405			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
 406			break;
 407
 408			/* mov dst, src */
 409		case BPF_ALU64 | BPF_MOV | BPF_X:
 410			EMIT_mov(dst_reg, src_reg);
 411			break;
 412
 413			/* mov32 dst, src */
 414		case BPF_ALU | BPF_MOV | BPF_X:
 415			if (is_ereg(dst_reg) || is_ereg(src_reg))
 416				EMIT1(add_2mod(0x40, dst_reg, src_reg));
 417			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
 418			break;
 419
 420			/* neg dst */
 421		case BPF_ALU | BPF_NEG:
 422		case BPF_ALU64 | BPF_NEG:
 423			if (BPF_CLASS(insn->code) == BPF_ALU64)
 424				EMIT1(add_1mod(0x48, dst_reg));
 425			else if (is_ereg(dst_reg))
 426				EMIT1(add_1mod(0x40, dst_reg));
 427			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
 
 
 
 
 
 
 
 
 428			break;
 
 
 
 
 429
 430		case BPF_ALU | BPF_ADD | BPF_K:
 431		case BPF_ALU | BPF_SUB | BPF_K:
 432		case BPF_ALU | BPF_AND | BPF_K:
 433		case BPF_ALU | BPF_OR | BPF_K:
 434		case BPF_ALU | BPF_XOR | BPF_K:
 435		case BPF_ALU64 | BPF_ADD | BPF_K:
 436		case BPF_ALU64 | BPF_SUB | BPF_K:
 437		case BPF_ALU64 | BPF_AND | BPF_K:
 438		case BPF_ALU64 | BPF_OR | BPF_K:
 439		case BPF_ALU64 | BPF_XOR | BPF_K:
 440			if (BPF_CLASS(insn->code) == BPF_ALU64)
 441				EMIT1(add_1mod(0x48, dst_reg));
 442			else if (is_ereg(dst_reg))
 443				EMIT1(add_1mod(0x40, dst_reg));
 444
 445			switch (BPF_OP(insn->code)) {
 446			case BPF_ADD: b3 = 0xC0; break;
 447			case BPF_SUB: b3 = 0xE8; break;
 448			case BPF_AND: b3 = 0xE0; break;
 449			case BPF_OR: b3 = 0xC8; break;
 450			case BPF_XOR: b3 = 0xF0; break;
 451			}
 452
 453			if (is_imm8(imm32))
 454				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
 455			else
 456				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
 457			break;
 458
 459		case BPF_ALU64 | BPF_MOV | BPF_K:
 460			/* optimization: if imm32 is positive,
 461			 * use 'mov eax, imm32' (which zero-extends imm32)
 462			 * to save 2 bytes
 463			 */
 464			if (imm32 < 0) {
 465				/* 'mov rax, imm32' sign extends imm32 */
 466				b1 = add_1mod(0x48, dst_reg);
 467				b2 = 0xC7;
 468				b3 = 0xC0;
 469				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
 
 470				break;
 471			}
 472
 473		case BPF_ALU | BPF_MOV | BPF_K:
 474			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
 475			 * to save 3 bytes.
 476			 */
 477			if (imm32 == 0) {
 478				if (is_ereg(dst_reg))
 479					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
 480				b2 = 0x31; /* xor */
 481				b3 = 0xC0;
 482				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
 483				break;
 484			}
 485
 486			/* mov %eax, imm32 */
 487			if (is_ereg(dst_reg))
 488				EMIT1(add_1mod(0x40, dst_reg));
 489			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
 490			break;
 491
 492		case BPF_LD | BPF_IMM | BPF_DW:
 493			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
 494			    insn[1].dst_reg != 0 || insn[1].off != 0) {
 495				/* verifier must catch invalid insns */
 496				pr_err("invalid BPF_LD_IMM64 insn\n");
 497				return -EINVAL;
 498			}
 499
 500			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
 501			 * to save 7 bytes.
 502			 */
 503			if (insn[0].imm == 0 && insn[1].imm == 0) {
 504				b1 = add_2mod(0x48, dst_reg, dst_reg);
 505				b2 = 0x31; /* xor */
 506				b3 = 0xC0;
 507				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
 508
 509				insn++;
 510				i++;
 511				break;
 512			}
 513
 514			/* movabsq %rax, imm64 */
 515			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
 516			EMIT(insn[0].imm, 4);
 517			EMIT(insn[1].imm, 4);
 518
 519			insn++;
 520			i++;
 521			break;
 522
 523			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
 524		case BPF_ALU | BPF_MOD | BPF_X:
 525		case BPF_ALU | BPF_DIV | BPF_X:
 526		case BPF_ALU | BPF_MOD | BPF_K:
 527		case BPF_ALU | BPF_DIV | BPF_K:
 528		case BPF_ALU64 | BPF_MOD | BPF_X:
 529		case BPF_ALU64 | BPF_DIV | BPF_X:
 530		case BPF_ALU64 | BPF_MOD | BPF_K:
 531		case BPF_ALU64 | BPF_DIV | BPF_K:
 532			EMIT1(0x50); /* push rax */
 533			EMIT1(0x52); /* push rdx */
 534
 535			if (BPF_SRC(insn->code) == BPF_X)
 536				/* mov r11, src_reg */
 537				EMIT_mov(AUX_REG, src_reg);
 538			else
 539				/* mov r11, imm32 */
 540				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
 541
 542			/* mov rax, dst_reg */
 543			EMIT_mov(BPF_REG_0, dst_reg);
 544
 545			/* xor edx, edx
 546			 * equivalent to 'xor rdx, rdx', but one byte less
 547			 */
 548			EMIT2(0x31, 0xd2);
 549
 550			if (BPF_SRC(insn->code) == BPF_X) {
 551				/* if (src_reg == 0) return 0 */
 552
 553				/* cmp r11, 0 */
 554				EMIT4(0x49, 0x83, 0xFB, 0x00);
 555
 556				/* jne .+9 (skip over pop, pop, xor and jmp) */
 557				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
 558				EMIT1(0x5A); /* pop rdx */
 559				EMIT1(0x58); /* pop rax */
 560				EMIT2(0x31, 0xc0); /* xor eax, eax */
 561
 562				/* jmp cleanup_addr
 563				 * addrs[i] - 11, because there are 11 bytes
 564				 * after this insn: div, mov, pop, pop, mov
 565				 */
 566				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
 567				EMIT1_off32(0xE9, jmp_offset);
 568			}
 569
 570			if (BPF_CLASS(insn->code) == BPF_ALU64)
 571				/* div r11 */
 572				EMIT3(0x49, 0xF7, 0xF3);
 573			else
 574				/* div r11d */
 575				EMIT3(0x41, 0xF7, 0xF3);
 576
 577			if (BPF_OP(insn->code) == BPF_MOD)
 578				/* mov r11, rdx */
 579				EMIT3(0x49, 0x89, 0xD3);
 580			else
 581				/* mov r11, rax */
 582				EMIT3(0x49, 0x89, 0xC3);
 583
 584			EMIT1(0x5A); /* pop rdx */
 585			EMIT1(0x58); /* pop rax */
 586
 587			/* mov dst_reg, r11 */
 588			EMIT_mov(dst_reg, AUX_REG);
 589			break;
 590
 591		case BPF_ALU | BPF_MUL | BPF_K:
 592		case BPF_ALU | BPF_MUL | BPF_X:
 593		case BPF_ALU64 | BPF_MUL | BPF_K:
 594		case BPF_ALU64 | BPF_MUL | BPF_X:
 595			EMIT1(0x50); /* push rax */
 596			EMIT1(0x52); /* push rdx */
 597
 598			/* mov r11, dst_reg */
 599			EMIT_mov(AUX_REG, dst_reg);
 600
 601			if (BPF_SRC(insn->code) == BPF_X)
 602				/* mov rax, src_reg */
 603				EMIT_mov(BPF_REG_0, src_reg);
 604			else
 605				/* mov rax, imm32 */
 606				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
 607
 608			if (BPF_CLASS(insn->code) == BPF_ALU64)
 609				EMIT1(add_1mod(0x48, AUX_REG));
 610			else if (is_ereg(AUX_REG))
 611				EMIT1(add_1mod(0x40, AUX_REG));
 612			/* mul(q) r11 */
 613			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
 614
 615			/* mov r11, rax */
 616			EMIT_mov(AUX_REG, BPF_REG_0);
 617
 618			EMIT1(0x5A); /* pop rdx */
 619			EMIT1(0x58); /* pop rax */
 620
 621			/* mov dst_reg, r11 */
 622			EMIT_mov(dst_reg, AUX_REG);
 623			break;
 624
 625			/* shifts */
 626		case BPF_ALU | BPF_LSH | BPF_K:
 627		case BPF_ALU | BPF_RSH | BPF_K:
 628		case BPF_ALU | BPF_ARSH | BPF_K:
 629		case BPF_ALU64 | BPF_LSH | BPF_K:
 630		case BPF_ALU64 | BPF_RSH | BPF_K:
 631		case BPF_ALU64 | BPF_ARSH | BPF_K:
 632			if (BPF_CLASS(insn->code) == BPF_ALU64)
 633				EMIT1(add_1mod(0x48, dst_reg));
 634			else if (is_ereg(dst_reg))
 635				EMIT1(add_1mod(0x40, dst_reg));
 636
 637			switch (BPF_OP(insn->code)) {
 638			case BPF_LSH: b3 = 0xE0; break;
 639			case BPF_RSH: b3 = 0xE8; break;
 640			case BPF_ARSH: b3 = 0xF8; break;
 641			}
 642			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
 643			break;
 644
 645		case BPF_ALU | BPF_LSH | BPF_X:
 646		case BPF_ALU | BPF_RSH | BPF_X:
 647		case BPF_ALU | BPF_ARSH | BPF_X:
 648		case BPF_ALU64 | BPF_LSH | BPF_X:
 649		case BPF_ALU64 | BPF_RSH | BPF_X:
 650		case BPF_ALU64 | BPF_ARSH | BPF_X:
 651
 652			/* check for bad case when dst_reg == rcx */
 653			if (dst_reg == BPF_REG_4) {
 654				/* mov r11, dst_reg */
 655				EMIT_mov(AUX_REG, dst_reg);
 656				dst_reg = AUX_REG;
 657			}
 658
 659			if (src_reg != BPF_REG_4) { /* common case */
 660				EMIT1(0x51); /* push rcx */
 661
 662				/* mov rcx, src_reg */
 663				EMIT_mov(BPF_REG_4, src_reg);
 664			}
 665
 666			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
 667			if (BPF_CLASS(insn->code) == BPF_ALU64)
 668				EMIT1(add_1mod(0x48, dst_reg));
 669			else if (is_ereg(dst_reg))
 670				EMIT1(add_1mod(0x40, dst_reg));
 671
 672			switch (BPF_OP(insn->code)) {
 673			case BPF_LSH: b3 = 0xE0; break;
 674			case BPF_RSH: b3 = 0xE8; break;
 675			case BPF_ARSH: b3 = 0xF8; break;
 676			}
 677			EMIT2(0xD3, add_1reg(b3, dst_reg));
 678
 679			if (src_reg != BPF_REG_4)
 680				EMIT1(0x59); /* pop rcx */
 681
 682			if (insn->dst_reg == BPF_REG_4)
 683				/* mov dst_reg, r11 */
 684				EMIT_mov(insn->dst_reg, AUX_REG);
 685			break;
 686
 687		case BPF_ALU | BPF_END | BPF_FROM_BE:
 688			switch (imm32) {
 689			case 16:
 690				/* emit 'ror %ax, 8' to swap lower 2 bytes */
 691				EMIT1(0x66);
 692				if (is_ereg(dst_reg))
 693					EMIT1(0x41);
 694				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
 695
 696				/* emit 'movzwl eax, ax' */
 697				if (is_ereg(dst_reg))
 698					EMIT3(0x45, 0x0F, 0xB7);
 699				else
 700					EMIT2(0x0F, 0xB7);
 701				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 702				break;
 703			case 32:
 704				/* emit 'bswap eax' to swap lower 4 bytes */
 705				if (is_ereg(dst_reg))
 706					EMIT2(0x41, 0x0F);
 
 
 
 
 
 
 707				else
 708					EMIT1(0x0F);
 709				EMIT1(add_1reg(0xC8, dst_reg));
 710				break;
 711			case 64:
 712				/* emit 'bswap rax' to swap 8 bytes */
 713				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
 714				      add_1reg(0xC8, dst_reg));
 
 
 
 
 
 
 
 715				break;
 716			}
 717			break;
 718
 719		case BPF_ALU | BPF_END | BPF_FROM_LE:
 720			switch (imm32) {
 721			case 16:
 722				/* emit 'movzwl eax, ax' to zero extend 16-bit
 723				 * into 64 bit
 724				 */
 725				if (is_ereg(dst_reg))
 726					EMIT3(0x45, 0x0F, 0xB7);
 727				else
 728					EMIT2(0x0F, 0xB7);
 729				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 730				break;
 731			case 32:
 732				/* emit 'mov eax, eax' to clear upper 32-bits */
 733				if (is_ereg(dst_reg))
 734					EMIT1(0x45);
 735				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
 736				break;
 737			case 64:
 738				/* nop */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 739				break;
 740			}
 741			break;
 742
 743			/* ST: *(u8*)(dst_reg + off) = imm */
 744		case BPF_ST | BPF_MEM | BPF_B:
 745			if (is_ereg(dst_reg))
 746				EMIT2(0x41, 0xC6);
 747			else
 748				EMIT1(0xC6);
 749			goto st;
 750		case BPF_ST | BPF_MEM | BPF_H:
 751			if (is_ereg(dst_reg))
 752				EMIT3(0x66, 0x41, 0xC7);
 753			else
 754				EMIT2(0x66, 0xC7);
 755			goto st;
 756		case BPF_ST | BPF_MEM | BPF_W:
 757			if (is_ereg(dst_reg))
 758				EMIT2(0x41, 0xC7);
 759			else
 760				EMIT1(0xC7);
 761			goto st;
 762		case BPF_ST | BPF_MEM | BPF_DW:
 763			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
 764
 765st:			if (is_imm8(insn->off))
 766				EMIT2(add_1reg(0x40, dst_reg), insn->off);
 767			else
 768				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
 769
 770			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
 771			break;
 772
 773			/* STX: *(u8*)(dst_reg + off) = src_reg */
 774		case BPF_STX | BPF_MEM | BPF_B:
 775			/* emit 'mov byte ptr [rax + off], al' */
 776			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
 777			    /* have to add extra byte for x86 SIL, DIL regs */
 778			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
 779				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
 780			else
 781				EMIT1(0x88);
 782			goto stx;
 783		case BPF_STX | BPF_MEM | BPF_H:
 784			if (is_ereg(dst_reg) || is_ereg(src_reg))
 785				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
 786			else
 787				EMIT2(0x66, 0x89);
 788			goto stx;
 789		case BPF_STX | BPF_MEM | BPF_W:
 790			if (is_ereg(dst_reg) || is_ereg(src_reg))
 791				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
 792			else
 793				EMIT1(0x89);
 794			goto stx;
 795		case BPF_STX | BPF_MEM | BPF_DW:
 796			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
 797stx:			if (is_imm8(insn->off))
 798				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
 799			else
 800				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
 801					    insn->off);
 802			break;
 803
 804			/* LDX: dst_reg = *(u8*)(src_reg + off) */
 805		case BPF_LDX | BPF_MEM | BPF_B:
 806			/* emit 'movzx rax, byte ptr [rax + off]' */
 807			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
 808			goto ldx;
 809		case BPF_LDX | BPF_MEM | BPF_H:
 810			/* emit 'movzx rax, word ptr [rax + off]' */
 811			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
 812			goto ldx;
 813		case BPF_LDX | BPF_MEM | BPF_W:
 814			/* emit 'mov eax, dword ptr [rax+0x14]' */
 815			if (is_ereg(dst_reg) || is_ereg(src_reg))
 816				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
 817			else
 818				EMIT1(0x8B);
 819			goto ldx;
 820		case BPF_LDX | BPF_MEM | BPF_DW:
 821			/* emit 'mov rax, qword ptr [rax+0x14]' */
 822			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
 823ldx:			/* if insn->off == 0 we can save one extra byte, but
 824			 * special case of x86 r13 which always needs an offset
 825			 * is not worth the hassle
 826			 */
 827			if (is_imm8(insn->off))
 828				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
 829			else
 830				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
 831					    insn->off);
 832			break;
 833
 834			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
 835		case BPF_STX | BPF_XADD | BPF_W:
 836			/* emit 'lock add dword ptr [rax + off], eax' */
 837			if (is_ereg(dst_reg) || is_ereg(src_reg))
 838				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
 839			else
 840				EMIT2(0xF0, 0x01);
 841			goto xadd;
 842		case BPF_STX | BPF_XADD | BPF_DW:
 843			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
 844xadd:			if (is_imm8(insn->off))
 845				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
 846			else
 847				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
 848					    insn->off);
 849			break;
 850
 851			/* call */
 852		case BPF_JMP | BPF_CALL:
 853			func = (u8 *) __bpf_call_base + imm32;
 854			jmp_offset = func - (image + addrs[i]);
 855			if (seen_ld_abs) {
 856				reload_skb_data = bpf_helper_changes_pkt_data(func);
 857				if (reload_skb_data) {
 858					EMIT1(0x57); /* push %rdi */
 859					jmp_offset += 22; /* pop, mov, sub, mov */
 860				} else {
 861					EMIT2(0x41, 0x52); /* push %r10 */
 862					EMIT2(0x41, 0x51); /* push %r9 */
 863					/* need to adjust jmp offset, since
 864					 * pop %r9, pop %r10 take 4 bytes after call insn
 865					 */
 866					jmp_offset += 4;
 867				}
 868			}
 869			if (!imm32 || !is_simm32(jmp_offset)) {
 870				pr_err("unsupported bpf func %d addr %p image %p\n",
 871				       imm32, func, image);
 872				return -EINVAL;
 873			}
 874			EMIT1_off32(0xE8, jmp_offset);
 875			if (seen_ld_abs) {
 876				if (reload_skb_data) {
 877					EMIT1(0x5F); /* pop %rdi */
 878					emit_load_skb_data_hlen(&prog);
 879				} else {
 880					EMIT2(0x41, 0x59); /* pop %r9 */
 881					EMIT2(0x41, 0x5A); /* pop %r10 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882				}
 883			}
 884			break;
 885
 886		case BPF_JMP | BPF_CALL | BPF_X:
 887			emit_bpf_tail_call(&prog);
 888			break;
 889
 890			/* cond jump */
 891		case BPF_JMP | BPF_JEQ | BPF_X:
 892		case BPF_JMP | BPF_JNE | BPF_X:
 893		case BPF_JMP | BPF_JGT | BPF_X:
 894		case BPF_JMP | BPF_JGE | BPF_X:
 895		case BPF_JMP | BPF_JSGT | BPF_X:
 896		case BPF_JMP | BPF_JSGE | BPF_X:
 897			/* cmp dst_reg, src_reg */
 898			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
 899			      add_2reg(0xC0, dst_reg, src_reg));
 900			goto emit_cond_jmp;
 901
 902		case BPF_JMP | BPF_JSET | BPF_X:
 903			/* test dst_reg, src_reg */
 904			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
 905			      add_2reg(0xC0, dst_reg, src_reg));
 906			goto emit_cond_jmp;
 907
 908		case BPF_JMP | BPF_JSET | BPF_K:
 909			/* test dst_reg, imm32 */
 910			EMIT1(add_1mod(0x48, dst_reg));
 911			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
 912			goto emit_cond_jmp;
 913
 914		case BPF_JMP | BPF_JEQ | BPF_K:
 915		case BPF_JMP | BPF_JNE | BPF_K:
 916		case BPF_JMP | BPF_JGT | BPF_K:
 917		case BPF_JMP | BPF_JGE | BPF_K:
 918		case BPF_JMP | BPF_JSGT | BPF_K:
 919		case BPF_JMP | BPF_JSGE | BPF_K:
 920			/* cmp dst_reg, imm8/32 */
 921			EMIT1(add_1mod(0x48, dst_reg));
 922
 923			if (is_imm8(imm32))
 924				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
 925			else
 926				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
 927
 928emit_cond_jmp:		/* convert BPF opcode to x86 */
 929			switch (BPF_OP(insn->code)) {
 930			case BPF_JEQ:
 931				jmp_cond = X86_JE;
 932				break;
 933			case BPF_JSET:
 934			case BPF_JNE:
 935				jmp_cond = X86_JNE;
 936				break;
 937			case BPF_JGT:
 938				/* GT is unsigned '>', JA in x86 */
 939				jmp_cond = X86_JA;
 940				break;
 941			case BPF_JGE:
 942				/* GE is unsigned '>=', JAE in x86 */
 943				jmp_cond = X86_JAE;
 944				break;
 945			case BPF_JSGT:
 946				/* signed '>', GT in x86 */
 947				jmp_cond = X86_JG;
 948				break;
 949			case BPF_JSGE:
 950				/* signed '>=', GE in x86 */
 951				jmp_cond = X86_JGE;
 952				break;
 953			default: /* to silence gcc warning */
 954				return -EFAULT;
 955			}
 956			jmp_offset = addrs[i + insn->off] - addrs[i];
 957			if (is_imm8(jmp_offset)) {
 958				EMIT2(jmp_cond, jmp_offset);
 959			} else if (is_simm32(jmp_offset)) {
 960				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
 961			} else {
 962				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
 963				return -EFAULT;
 964			}
 965
 966			break;
 967
 968		case BPF_JMP | BPF_JA:
 969			jmp_offset = addrs[i + insn->off] - addrs[i];
 970			if (!jmp_offset)
 971				/* optimize out nop jumps */
 972				break;
 973emit_jmp:
 974			if (is_imm8(jmp_offset)) {
 975				EMIT2(0xEB, jmp_offset);
 976			} else if (is_simm32(jmp_offset)) {
 977				EMIT1_off32(0xE9, jmp_offset);
 978			} else {
 979				pr_err("jmp gen bug %llx\n", jmp_offset);
 980				return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981			}
 982			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 983
 984		case BPF_LD | BPF_IND | BPF_W:
 985			func = sk_load_word;
 986			goto common_load;
 987		case BPF_LD | BPF_ABS | BPF_W:
 988			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
 989common_load:
 990			ctx->seen_ld_abs = seen_ld_abs = true;
 991			jmp_offset = func - (image + addrs[i]);
 992			if (!func || !is_simm32(jmp_offset)) {
 993				pr_err("unsupported bpf func %d addr %p image %p\n",
 994				       imm32, func, image);
 995				return -EINVAL;
 996			}
 997			if (BPF_MODE(insn->code) == BPF_ABS) {
 998				/* mov %esi, imm32 */
 999				EMIT1_off32(0xBE, imm32);
1000			} else {
1001				/* mov %rsi, src_reg */
1002				EMIT_mov(BPF_REG_2, src_reg);
1003				if (imm32) {
1004					if (is_imm8(imm32))
1005						/* add %esi, imm8 */
1006						EMIT3(0x83, 0xC6, imm32);
1007					else
1008						/* add %esi, imm32 */
1009						EMIT2_off32(0x81, 0xC6, imm32);
 
 
 
 
 
 
 
 
 
 
 
 
1010				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1011			}
1012			/* skb pointer is in R6 (%rbx), it will be copied into
1013			 * %rdi if skb_copy_bits() call is necessary.
1014			 * sk_load_* helpers also use %r10 and %r9d.
1015			 * See bpf_jit.S
1016			 */
1017			if (seen_ax_reg)
1018				/* r10 = skb->data, mov %r10, off32(%rbx) */
1019				EMIT3_off32(0x4c, 0x8b, 0x93,
1020					    offsetof(struct sk_buff, data));
1021			EMIT1_off32(0xE8, jmp_offset); /* call */
1022			break;
1023
1024		case BPF_LD | BPF_IND | BPF_H:
1025			func = sk_load_half;
1026			goto common_load;
1027		case BPF_LD | BPF_ABS | BPF_H:
1028			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1029			goto common_load;
1030		case BPF_LD | BPF_IND | BPF_B:
1031			func = sk_load_byte;
1032			goto common_load;
1033		case BPF_LD | BPF_ABS | BPF_B:
1034			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1035			goto common_load;
1036
1037		case BPF_JMP | BPF_EXIT:
1038			if (seen_exit) {
1039				jmp_offset = ctx->cleanup_addr - addrs[i];
1040				goto emit_jmp;
1041			}
1042			seen_exit = true;
1043			/* update cleanup_addr */
1044			ctx->cleanup_addr = proglen;
1045			/* mov rbx, qword ptr [rbp-X] */
1046			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
1047			/* mov r13, qword ptr [rbp-X] */
1048			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
1049			/* mov r14, qword ptr [rbp-X] */
1050			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
1051			/* mov r15, qword ptr [rbp-X] */
1052			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
1053
1054			EMIT1(0xC9); /* leave */
1055			EMIT1(0xC3); /* ret */
1056			break;
1057
1058		default:
1059			/* By design x64 JIT should support all BPF instructions
1060			 * This error will be seen if new instruction was added
1061			 * to interpreter, but not to JIT
1062			 * or if there is junk in bpf_prog
1063			 */
1064			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1065			return -EINVAL;
1066		}
1067
1068		ilen = prog - temp;
1069		if (ilen > BPF_MAX_INSN_SIZE) {
1070			pr_err("bpf_jit_compile fatal insn size error\n");
1071			return -EFAULT;
1072		}
1073
1074		if (image) {
1075			if (unlikely(proglen + ilen > oldproglen)) {
1076				pr_err("bpf_jit_compile fatal error\n");
1077				return -EFAULT;
1078			}
1079			memcpy(image + proglen, temp, ilen);
1080		}
1081		proglen += ilen;
1082		addrs[i] = proglen;
1083		prog = temp;
1084	}
1085	return proglen;
1086}
1087
1088void bpf_jit_compile(struct bpf_prog *prog)
1089{
1090}
1091
1092struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1093{
1094	struct bpf_binary_header *header = NULL;
1095	struct bpf_prog *tmp, *orig_prog = prog;
1096	int proglen, oldproglen = 0;
1097	struct jit_context ctx = {};
1098	bool tmp_blinded = false;
1099	u8 *image = NULL;
1100	int *addrs;
1101	int pass;
1102	int i;
1103
1104	if (!bpf_jit_enable)
1105		return orig_prog;
1106
1107	tmp = bpf_jit_blind_constants(prog);
1108	/* If blinding was requested and we failed during blinding,
1109	 * we must fall back to the interpreter.
1110	 */
1111	if (IS_ERR(tmp))
1112		return orig_prog;
1113	if (tmp != prog) {
1114		tmp_blinded = true;
1115		prog = tmp;
1116	}
1117
1118	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1119	if (!addrs) {
1120		prog = orig_prog;
1121		goto out;
1122	}
1123
1124	/* Before first pass, make a rough estimation of addrs[]
1125	 * each bpf instruction is translated to less than 64 bytes
1126	 */
1127	for (proglen = 0, i = 0; i < prog->len; i++) {
1128		proglen += 64;
1129		addrs[i] = proglen;
1130	}
1131	ctx.cleanup_addr = proglen;
1132
1133	/* JITed image shrinks with every pass and the loop iterates
1134	 * until the image stops shrinking. Very large bpf programs
1135	 * may converge on the last pass. In such case do one more
1136	 * pass to emit the final image
1137	 */
1138	for (pass = 0; pass < 10 || image; pass++) {
1139		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1140		if (proglen <= 0) {
1141			image = NULL;
1142			if (header)
1143				bpf_jit_binary_free(header);
1144			prog = orig_prog;
1145			goto out_addrs;
1146		}
1147		if (image) {
1148			if (proglen != oldproglen) {
1149				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1150				       proglen, oldproglen);
1151				prog = orig_prog;
1152				goto out_addrs;
1153			}
1154			break;
1155		}
1156		if (proglen == oldproglen) {
1157			header = bpf_jit_binary_alloc(proglen, &image,
1158						      1, jit_fill_hole);
1159			if (!header) {
1160				prog = orig_prog;
1161				goto out_addrs;
1162			}
1163		}
1164		oldproglen = proglen;
1165	}
1166
1167	if (bpf_jit_enable > 1)
1168		bpf_jit_dump(prog->len, proglen, pass + 1, image);
1169
1170	if (image) {
1171		bpf_flush_icache(header, image + proglen);
1172		set_memory_ro((unsigned long)header, header->pages);
1173		prog->bpf_func = (void *)image;
1174		prog->jited = 1;
1175	} else {
1176		prog = orig_prog;
1177	}
1178
1179out_addrs:
1180	kfree(addrs);
1181out:
1182	if (tmp_blinded)
1183		bpf_jit_prog_release_other(prog, prog == orig_prog ?
1184					   tmp : orig_prog);
1185	return prog;
1186}
1187
1188void bpf_jit_free(struct bpf_prog *fp)
1189{
 
1190	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1191	struct bpf_binary_header *header = (void *)addr;
1192
1193	if (!fp->jited)
1194		goto free_filter;
1195
1196	set_memory_rw(addr, header->pages);
1197	bpf_jit_binary_free(header);
 
 
1198
1199free_filter:
1200	bpf_prog_unlock_free(fp);
 
 
 
 
 
 
1201}
v3.15
  1/* bpf_jit_comp.c : BPF JIT compiler
  2 *
  3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
 
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License
  7 * as published by the Free Software Foundation; version 2
  8 * of the License.
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <linux/netdevice.h>
 13#include <linux/filter.h>
 14#include <linux/if_vlan.h>
 15#include <linux/random.h>
 
 16
 17/*
 18 * Conventions :
 19 *  EAX : BPF A accumulator
 20 *  EBX : BPF X accumulator
 21 *  RDI : pointer to skb   (first argument given to JIT function)
 22 *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
 23 *  ECX,EDX,ESI : scratch registers
 24 *  r9d : skb->len - skb->data_len (headlen)
 25 *  r8  : skb->data
 26 * -8(RBP) : saved RBX value
 27 * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
 28 */
 29int bpf_jit_enable __read_mostly;
 30
 31/*
 32 * assembly code in arch/x86/net/bpf_jit.S
 33 */
 34extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
 35extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
 36extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
 37extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
 38extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
 39
 40static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 41{
 42	if (len == 1)
 43		*ptr = bytes;
 44	else if (len == 2)
 45		*(u16 *)ptr = bytes;
 46	else {
 47		*(u32 *)ptr = bytes;
 48		barrier();
 49	}
 50	return ptr + len;
 51}
 52
 53#define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
 
 54
 55#define EMIT1(b1)		EMIT(b1, 1)
 56#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
 57#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
 58#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
 59#define EMIT1_off32(b1, off)	do { EMIT1(b1); EMIT(off, 4);} while (0)
 60
 61#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
 62#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
 
 
 
 
 63
 64static inline bool is_imm8(int value)
 65{
 66	return value <= 127 && value >= -128;
 67}
 68
 69static inline bool is_near(int offset)
 70{
 71	return offset <= 127 && offset >= -128;
 72}
 73
 74#define EMIT_JMP(offset)						\
 75do {									\
 76	if (offset) {							\
 77		if (is_near(offset))					\
 78			EMIT2(0xeb, offset); /* jmp .+off8 */		\
 79		else							\
 80			EMIT1_off32(0xe9, offset); /* jmp .+off32 */	\
 81	}								\
 82} while (0)
 
 
 
 
 
 
 
 
 
 
 83
 84/* list of x86 cond jumps opcodes (. + s8)
 85 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 86 */
 87#define X86_JB  0x72
 88#define X86_JAE 0x73
 89#define X86_JE  0x74
 90#define X86_JNE 0x75
 91#define X86_JBE 0x76
 92#define X86_JA  0x77
 
 
 93
 94#define EMIT_COND_JMP(op, offset)				\
 95do {								\
 96	if (is_near(offset))					\
 97		EMIT2(op, offset); /* jxx .+off8 */		\
 98	else {							\
 99		EMIT2(0x0f, op + 0x10);				\
100		EMIT(offset, 4); /* jxx .+off32 */		\
101	}							\
102} while (0)
103
104#define COND_SEL(CODE, TOP, FOP)	\
105	case CODE:			\
106		t_op = TOP;		\
107		f_op = FOP;		\
108		goto cond_branch
109
110
111#define SEEN_DATAREF 1 /* might call external helpers */
112#define SEEN_XREG    2 /* ebx is used */
113#define SEEN_MEM     4 /* use mem[] for temporary storage */
114
115static inline void bpf_flush_icache(void *start, void *end)
116{
117	mm_segment_t old_fs = get_fs();
118
119	set_fs(KERNEL_DS);
120	smp_wmb();
121	flush_icache_range((unsigned long)start, (unsigned long)end);
122	set_fs(old_fs);
123}
124
125#define CHOOSE_LOAD_FUNC(K, func) \
126	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
127
128/* Helper to find the offset of pkt_type in sk_buff
129 * We want to make sure its still a 3bit field starting at a byte boundary.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130 */
131#define PKT_TYPE_MAX 7
132static int pkt_type_offset(void)
133{
134	struct sk_buff skb_probe = {
135		.pkt_type = ~0,
136	};
137	char *ct = (char *)&skb_probe;
138	unsigned int off;
139
140	for (off = 0; off < sizeof(struct sk_buff); off++) {
141		if (ct[off] == PKT_TYPE_MAX)
142			return off;
143	}
144	pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
145	return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}
147
148struct bpf_binary_header {
149	unsigned int	pages;
150	/* Note : for security reasons, bpf code will follow a randomly
151	 * sized amount of int3 instructions
152	 */
153	u8		image[];
154};
155
156static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
157						  u8 **image_ptr)
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	unsigned int sz, hole;
160	struct bpf_binary_header *header;
 
 
 
 
 
 
161
162	/* Most of BPF filters are really small,
163	 * but if some of them fill a page, allow at least
164	 * 128 extra bytes to insert a random section of int3
 
 
 
 
 
 
 
 
165	 */
166	sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
167	header = module_alloc(sz);
168	if (!header)
169		return NULL;
170
171	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
 
 
 
 
 
 
 
 
 
 
 
172
173	header->pages = sz / PAGE_SIZE;
174	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
 
 
175
176	/* insert a random number of int3 instructions before BPF code */
177	*image_ptr = &header->image[prandom_u32() % hole];
178	return header;
179}
180
181void bpf_jit_compile(struct sk_filter *fp)
 
 
 
 
 
 
 
 
 
 
 
 
182{
183	u8 temp[64];
184	u8 *prog;
185	unsigned int proglen, oldproglen = 0;
186	int ilen, i;
187	int t_offset, f_offset;
188	u8 t_op, f_op, seen = 0, pass;
189	u8 *image = NULL;
190	struct bpf_binary_header *header = NULL;
191	u8 *func;
192	int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
193	unsigned int cleanup_addr; /* epilogue code offset */
194	unsigned int *addrs;
195	const struct sock_filter *filter = fp->insns;
196	int flen = fp->len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	if (!bpf_jit_enable)
199		return;
200
201	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
202	if (addrs == NULL)
203		return;
 
204
205	/* Before first pass, make a rough estimation of addrs[]
206	 * each bpf instruction is translated to less than 64 bytes
207	 */
208	for (proglen = 0, i = 0; i < flen; i++) {
209		proglen += 64;
210		addrs[i] = proglen;
211	}
212	cleanup_addr = proglen; /* epilogue address */
213
214	for (pass = 0; pass < 10; pass++) {
215		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
216		/* no prologue/epilogue for trivial filters (RET something) */
217		proglen = 0;
218		prog = temp;
219
220		if (seen_or_pass0) {
221			EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
222			EMIT4(0x48, 0x83, 0xec, 96);	/* subq  $96,%rsp	*/
223			/* note : must save %rbx in case bpf_error is hit */
224			if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
225				EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
226			if (seen_or_pass0 & SEEN_XREG)
227				CLEAR_X(); /* make sure we dont leek kernel memory */
228
229			/*
230			 * If this filter needs to access skb data,
231			 * loads r9 and r8 with :
232			 *  r9 = skb->len - skb->data_len
233			 *  r8 = skb->data
234			 */
235			if (seen_or_pass0 & SEEN_DATAREF) {
236				if (offsetof(struct sk_buff, len) <= 127)
237					/* mov    off8(%rdi),%r9d */
238					EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
239				else {
240					/* mov    off32(%rdi),%r9d */
241					EMIT3(0x44, 0x8b, 0x8f);
242					EMIT(offsetof(struct sk_buff, len), 4);
243				}
244				if (is_imm8(offsetof(struct sk_buff, data_len)))
245					/* sub    off8(%rdi),%r9d */
246					EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
247				else {
248					EMIT3(0x44, 0x2b, 0x8f);
249					EMIT(offsetof(struct sk_buff, data_len), 4);
250				}
251
252				if (is_imm8(offsetof(struct sk_buff, data)))
253					/* mov off8(%rdi),%r8 */
254					EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
255				else {
256					/* mov off32(%rdi),%r8 */
257					EMIT3(0x4c, 0x8b, 0x87);
258					EMIT(offsetof(struct sk_buff, data), 4);
259				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260			}
261		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
263		switch (filter[0].code) {
264		case BPF_S_RET_K:
265		case BPF_S_LD_W_LEN:
266		case BPF_S_ANC_PROTOCOL:
267		case BPF_S_ANC_IFINDEX:
268		case BPF_S_ANC_MARK:
269		case BPF_S_ANC_RXHASH:
270		case BPF_S_ANC_CPU:
271		case BPF_S_ANC_VLAN_TAG:
272		case BPF_S_ANC_VLAN_TAG_PRESENT:
273		case BPF_S_ANC_QUEUE:
274		case BPF_S_ANC_PKTTYPE:
275		case BPF_S_LD_W_ABS:
276		case BPF_S_LD_H_ABS:
277		case BPF_S_LD_B_ABS:
278			/* first instruction sets A register (or is RET 'constant') */
279			break;
280		default:
281			/* make sure we dont leak kernel information to user */
282			CLEAR_A(); /* A = 0 */
283		}
284
285		for (i = 0; i < flen; i++) {
286			unsigned int K = filter[i].k;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288			switch (filter[i].code) {
289			case BPF_S_ALU_ADD_X: /* A += X; */
290				seen |= SEEN_XREG;
291				EMIT2(0x01, 0xd8);		/* add %ebx,%eax */
292				break;
293			case BPF_S_ALU_ADD_K: /* A += K; */
294				if (!K)
295					break;
296				if (is_imm8(K))
297					EMIT3(0x83, 0xc0, K);	/* add imm8,%eax */
298				else
299					EMIT1_off32(0x05, K);	/* add imm32,%eax */
300				break;
301			case BPF_S_ALU_SUB_X: /* A -= X; */
302				seen |= SEEN_XREG;
303				EMIT2(0x29, 0xd8);		/* sub    %ebx,%eax */
304				break;
305			case BPF_S_ALU_SUB_K: /* A -= K */
306				if (!K)
307					break;
308				if (is_imm8(K))
309					EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
310				else
311					EMIT1_off32(0x2d, K); /* sub imm32,%eax */
 
312				break;
313			case BPF_S_ALU_MUL_X: /* A *= X; */
314				seen |= SEEN_XREG;
315				EMIT3(0x0f, 0xaf, 0xc3);	/* imul %ebx,%eax */
316				break;
317			case BPF_S_ALU_MUL_K: /* A *= K */
318				if (is_imm8(K))
319					EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
320				else {
321					EMIT2(0x69, 0xc0);		/* imul imm32,%eax */
322					EMIT(K, 4);
323				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324				break;
325			case BPF_S_ALU_DIV_X: /* A /= X; */
326				seen |= SEEN_XREG;
327				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
328				if (pc_ret0 > 0) {
329					/* addrs[pc_ret0 - 1] is start address of target
330					 * (addrs[i] - 4) is the address following this jmp
331					 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
332					 */
333					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
334								(addrs[i] - 4));
335				} else {
336					EMIT_COND_JMP(X86_JNE, 2 + 5);
337					CLEAR_A();
338					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
339				}
340				EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
341				break;
342			case BPF_S_ALU_MOD_X: /* A %= X; */
343				seen |= SEEN_XREG;
344				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
345				if (pc_ret0 > 0) {
346					/* addrs[pc_ret0 - 1] is start address of target
347					 * (addrs[i] - 6) is the address following this jmp
348					 * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
349					 */
350					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
351								(addrs[i] - 6));
352				} else {
353					EMIT_COND_JMP(X86_JNE, 2 + 5);
354					CLEAR_A();
355					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
356				}
357				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
358				EMIT2(0xf7, 0xf3);	/* div %ebx */
359				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
360				break;
361			case BPF_S_ALU_MOD_K: /* A %= K; */
362				if (K == 1) {
363					CLEAR_A();
364					break;
365				}
366				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
367				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
368				EMIT2(0xf7, 0xf1);	/* div %ecx */
369				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
370				break;
371			case BPF_S_ALU_DIV_K: /* A /= K */
372				if (K == 1)
373					break;
374				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
375				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
376				EMIT2(0xf7, 0xf1);	/* div %ecx */
377				break;
378			case BPF_S_ALU_AND_X:
379				seen |= SEEN_XREG;
380				EMIT2(0x21, 0xd8);		/* and %ebx,%eax */
381				break;
382			case BPF_S_ALU_AND_K:
383				if (K >= 0xFFFFFF00) {
384					EMIT2(0x24, K & 0xFF); /* and imm8,%al */
385				} else if (K >= 0xFFFF0000) {
386					EMIT2(0x66, 0x25);	/* and imm16,%ax */
387					EMIT(K, 2);
388				} else {
389					EMIT1_off32(0x25, K);	/* and imm32,%eax */
390				}
391				break;
392			case BPF_S_ALU_OR_X:
393				seen |= SEEN_XREG;
394				EMIT2(0x09, 0xd8);		/* or %ebx,%eax */
395				break;
396			case BPF_S_ALU_OR_K:
397				if (is_imm8(K))
398					EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399				else
400					EMIT1_off32(0x0d, K);	/* or imm32,%eax */
 
401				break;
402			case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
403			case BPF_S_ALU_XOR_X:
404				seen |= SEEN_XREG;
405				EMIT2(0x31, 0xd8);		/* xor %ebx,%eax */
406				break;
407			case BPF_S_ALU_XOR_K: /* A ^= K; */
408				if (K == 0)
409					break;
410				if (is_imm8(K))
411					EMIT3(0x83, 0xf0, K);	/* xor imm8,%eax */
412				else
413					EMIT1_off32(0x35, K);	/* xor imm32,%eax */
 
414				break;
415			case BPF_S_ALU_LSH_X: /* A <<= X; */
416				seen |= SEEN_XREG;
417				EMIT4(0x89, 0xd9, 0xd3, 0xe0);	/* mov %ebx,%ecx; shl %cl,%eax */
418				break;
419			case BPF_S_ALU_LSH_K:
420				if (K == 0)
421					break;
422				else if (K == 1)
423					EMIT2(0xd1, 0xe0); /* shl %eax */
424				else
425					EMIT3(0xc1, 0xe0, K);
426				break;
427			case BPF_S_ALU_RSH_X: /* A >>= X; */
428				seen |= SEEN_XREG;
429				EMIT4(0x89, 0xd9, 0xd3, 0xe8);	/* mov %ebx,%ecx; shr %cl,%eax */
430				break;
431			case BPF_S_ALU_RSH_K: /* A >>= K; */
432				if (K == 0)
433					break;
434				else if (K == 1)
435					EMIT2(0xd1, 0xe8); /* shr %eax */
 
 
436				else
437					EMIT3(0xc1, 0xe8, K);
 
438				break;
439			case BPF_S_ALU_NEG:
440				EMIT2(0xf7, 0xd8);		/* neg %eax */
 
 
 
441				break;
442			case BPF_S_RET_K:
443				if (!K) {
444					if (pc_ret0 == -1)
445						pc_ret0 = i;
446					CLEAR_A();
447				} else {
448					EMIT1_off32(0xb8, K);	/* mov $imm32,%eax */
449				}
450				/* fallinto */
451			case BPF_S_RET_A:
452				if (seen_or_pass0) {
453					if (i != flen - 1) {
454						EMIT_JMP(cleanup_addr - addrs[i]);
455						break;
456					}
457					if (seen_or_pass0 & SEEN_XREG)
458						EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
459					EMIT1(0xc9);		/* leaveq */
460				}
461				EMIT1(0xc3);		/* ret */
462				break;
463			case BPF_S_MISC_TAX: /* X = A */
464				seen |= SEEN_XREG;
465				EMIT2(0x89, 0xc3);	/* mov    %eax,%ebx */
466				break;
467			case BPF_S_MISC_TXA: /* A = X */
468				seen |= SEEN_XREG;
469				EMIT2(0x89, 0xd8);	/* mov    %ebx,%eax */
470				break;
471			case BPF_S_LD_IMM: /* A = K */
472				if (!K)
473					CLEAR_A();
474				else
475					EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
476				break;
477			case BPF_S_LDX_IMM: /* X = K */
478				seen |= SEEN_XREG;
479				if (!K)
480					CLEAR_X();
481				else
482					EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
483				break;
484			case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
485				seen |= SEEN_MEM;
486				EMIT3(0x8b, 0x45, 0xf0 - K*4);
487				break;
488			case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
489				seen |= SEEN_XREG | SEEN_MEM;
490				EMIT3(0x8b, 0x5d, 0xf0 - K*4);
491				break;
492			case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
493				seen |= SEEN_MEM;
494				EMIT3(0x89, 0x45, 0xf0 - K*4);
495				break;
496			case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
497				seen |= SEEN_XREG | SEEN_MEM;
498				EMIT3(0x89, 0x5d, 0xf0 - K*4);
499				break;
500			case BPF_S_LD_W_LEN: /*	A = skb->len; */
501				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
502				if (is_imm8(offsetof(struct sk_buff, len)))
503					/* mov    off8(%rdi),%eax */
504					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
505				else {
506					EMIT2(0x8b, 0x87);
507					EMIT(offsetof(struct sk_buff, len), 4);
508				}
509				break;
510			case BPF_S_LDX_W_LEN: /* X = skb->len; */
511				seen |= SEEN_XREG;
512				if (is_imm8(offsetof(struct sk_buff, len)))
513					/* mov off8(%rdi),%ebx */
514					EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
515				else {
516					EMIT2(0x8b, 0x9f);
517					EMIT(offsetof(struct sk_buff, len), 4);
518				}
519				break;
520			case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
521				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
522				if (is_imm8(offsetof(struct sk_buff, protocol))) {
523					/* movzwl off8(%rdi),%eax */
524					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525				} else {
526					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
527					EMIT(offsetof(struct sk_buff, protocol), 4);
 
 
 
 
528				}
529				EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
530				break;
531			case BPF_S_ANC_IFINDEX:
532				if (is_imm8(offsetof(struct sk_buff, dev))) {
533					/* movq off8(%rdi),%rax */
534					EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
 
 
 
 
 
535				} else {
536					EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
537					EMIT(offsetof(struct sk_buff, dev), 4);
538				}
539				EMIT3(0x48, 0x85, 0xc0);	/* test %rax,%rax */
540				EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
541				BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
542				EMIT2(0x8b, 0x80);	/* mov off32(%rax),%eax */
543				EMIT(offsetof(struct net_device, ifindex), 4);
544				break;
545			case BPF_S_ANC_MARK:
546				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
547				if (is_imm8(offsetof(struct sk_buff, mark))) {
548					/* mov off8(%rdi),%eax */
549					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
550				} else {
551					EMIT2(0x8b, 0x87);
552					EMIT(offsetof(struct sk_buff, mark), 4);
553				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554				break;
555			case BPF_S_ANC_RXHASH:
556				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
557				if (is_imm8(offsetof(struct sk_buff, hash))) {
558					/* mov off8(%rdi),%eax */
559					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
560				} else {
561					EMIT2(0x8b, 0x87);
562					EMIT(offsetof(struct sk_buff, hash), 4);
563				}
564				break;
565			case BPF_S_ANC_QUEUE:
566				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
567				if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
568					/* movzwl off8(%rdi),%eax */
569					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
570				} else {
571					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
572					EMIT(offsetof(struct sk_buff, queue_mapping), 4);
573				}
574				break;
575			case BPF_S_ANC_CPU:
576#ifdef CONFIG_SMP
577				EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
578				EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
579#else
580				CLEAR_A();
581#endif
582				break;
583			case BPF_S_ANC_VLAN_TAG:
584			case BPF_S_ANC_VLAN_TAG_PRESENT:
585				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
586				if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
587					/* movzwl off8(%rdi),%eax */
588					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
589				} else {
590					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
591					EMIT(offsetof(struct sk_buff, vlan_tci), 4);
592				}
593				BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
594				if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
595					EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
596				} else {
597					EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
598					EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
599				}
600				break;
601			case BPF_S_ANC_PKTTYPE:
602			{
603				int off = pkt_type_offset();
604
605				if (off < 0)
606					goto out;
607				if (is_imm8(off)) {
608					/* movzbl off8(%rdi),%eax */
609					EMIT4(0x0f, 0xb6, 0x47, off);
610				} else {
611					/* movbl off32(%rdi),%eax */
612					EMIT3(0x0f, 0xb6, 0x87);
613					EMIT(off, 4);
614				}
615				EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and    $0x7,%eax */
616				break;
617			}
618			case BPF_S_LD_W_ABS:
619				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
620common_load:			seen |= SEEN_DATAREF;
621				t_offset = func - (image + addrs[i]);
622				EMIT1_off32(0xbe, K); /* mov imm32,%esi */
623				EMIT1_off32(0xe8, t_offset); /* call */
624				break;
625			case BPF_S_LD_H_ABS:
626				func = CHOOSE_LOAD_FUNC(K, sk_load_half);
627				goto common_load;
628			case BPF_S_LD_B_ABS:
629				func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
630				goto common_load;
631			case BPF_S_LDX_B_MSH:
632				func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
633				seen |= SEEN_DATAREF | SEEN_XREG;
634				t_offset = func - (image + addrs[i]);
635				EMIT1_off32(0xbe, K);	/* mov imm32,%esi */
636				EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
637				break;
638			case BPF_S_LD_W_IND:
639				func = sk_load_word;
640common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
641				t_offset = func - (image + addrs[i]);
642				if (K) {
643					if (is_imm8(K)) {
644						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
645					} else {
646						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
647						EMIT(K, 4);
648					}
649				} else {
650					EMIT2(0x89,0xde); /* mov %ebx,%esi */
651				}
652				EMIT1_off32(0xe8, t_offset);	/* call sk_load_xxx_ind */
653				break;
654			case BPF_S_LD_H_IND:
655				func = sk_load_half;
656				goto common_load_ind;
657			case BPF_S_LD_B_IND:
658				func = sk_load_byte;
659				goto common_load_ind;
660			case BPF_S_JMP_JA:
661				t_offset = addrs[i + K] - addrs[i];
662				EMIT_JMP(t_offset);
663				break;
664			COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
665			COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
666			COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
667			COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
668			COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
669			COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
670			COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
671			COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
672
673cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
674				t_offset = addrs[i + filter[i].jt] - addrs[i];
675
676				/* same targets, can avoid doing the test :) */
677				if (filter[i].jt == filter[i].jf) {
678					EMIT_JMP(t_offset);
679					break;
680				}
681
682				switch (filter[i].code) {
683				case BPF_S_JMP_JGT_X:
684				case BPF_S_JMP_JGE_X:
685				case BPF_S_JMP_JEQ_X:
686					seen |= SEEN_XREG;
687					EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
688					break;
689				case BPF_S_JMP_JSET_X:
690					seen |= SEEN_XREG;
691					EMIT2(0x85, 0xd8); /* test %ebx,%eax */
692					break;
693				case BPF_S_JMP_JEQ_K:
694					if (K == 0) {
695						EMIT2(0x85, 0xc0); /* test   %eax,%eax */
696						break;
697					}
698				case BPF_S_JMP_JGT_K:
699				case BPF_S_JMP_JGE_K:
700					if (K <= 127)
701						EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
 
 
 
702					else
703						EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
704					break;
705				case BPF_S_JMP_JSET_K:
706					if (K <= 0xFF)
707						EMIT2(0xa8, K); /* test imm8,%al */
708					else if (!(K & 0xFFFF00FF))
709						EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
710					else if (K <= 0xFFFF) {
711						EMIT2(0x66, 0xa9); /* test imm16,%ax */
712						EMIT(K, 2);
713					} else {
714						EMIT1_off32(0xa9, K); /* test imm32,%eax */
715					}
716					break;
717				}
718				if (filter[i].jt != 0) {
719					if (filter[i].jf && f_offset)
720						t_offset += is_near(f_offset) ? 2 : 5;
721					EMIT_COND_JMP(t_op, t_offset);
722					if (filter[i].jf)
723						EMIT_JMP(f_offset);
724					break;
725				}
726				EMIT_COND_JMP(f_op, f_offset);
727				break;
728			default:
729				/* hmm, too complex filter, give up with jit compiler */
730				goto out;
731			}
732			ilen = prog - temp;
733			if (image) {
734				if (unlikely(proglen + ilen > oldproglen)) {
735					pr_err("bpb_jit_compile fatal error\n");
736					kfree(addrs);
737					module_free(NULL, header);
738					return;
739				}
740				memcpy(image + proglen, temp, ilen);
741			}
742			proglen += ilen;
743			addrs[i] = proglen;
744			prog = temp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745		}
746		/* last bpf instruction is always a RET :
747		 * use it to give the cleanup instruction(s) addr
748		 */
749		cleanup_addr = proglen - 1; /* ret */
750		if (seen_or_pass0)
751			cleanup_addr -= 1; /* leaveq */
752		if (seen_or_pass0 & SEEN_XREG)
753			cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
754
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755		if (image) {
756			if (proglen != oldproglen)
757				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
 
 
 
 
758			break;
759		}
760		if (proglen == oldproglen) {
761			header = bpf_alloc_binary(proglen, &image);
762			if (!header)
763				goto out;
 
 
 
764		}
765		oldproglen = proglen;
766	}
767
768	if (bpf_jit_enable > 1)
769		bpf_jit_dump(flen, proglen, pass, image);
770
771	if (image) {
772		bpf_flush_icache(header, image + proglen);
773		set_memory_ro((unsigned long)header, header->pages);
774		fp->bpf_func = (void *)image;
775		fp->jited = 1;
 
 
776	}
 
 
 
777out:
778	kfree(addrs);
779	return;
 
 
780}
781
782static void bpf_jit_free_deferred(struct work_struct *work)
783{
784	struct sk_filter *fp = container_of(work, struct sk_filter, work);
785	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
786	struct bpf_binary_header *header = (void *)addr;
787
 
 
 
788	set_memory_rw(addr, header->pages);
789	module_free(NULL, header);
790	kfree(fp);
791}
792
793void bpf_jit_free(struct sk_filter *fp)
794{
795	if (fp->jited) {
796		INIT_WORK(&fp->work, bpf_jit_free_deferred);
797		schedule_work(&fp->work);
798	} else {
799		kfree(fp);
800	}
801}