Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * BPF Jit compiler for s390.
   3 *
   4 * Minimum build requirements:
   5 *
   6 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   7 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   8 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
   9 *  - PACK_STACK
  10 *  - 64BIT
  11 *
  12 * Copyright IBM Corp. 2012,2015
  13 *
  14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  15 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  16 */
  17
  18#define KMSG_COMPONENT "bpf_jit"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/netdevice.h>
 
  22#include <linux/filter.h>
 
  23#include <linux/init.h>
  24#include <linux/bpf.h>
  25#include <asm/cacheflush.h>
 
  26#include <asm/dis.h>
  27#include "bpf_jit.h"
  28
  29int bpf_jit_enable __read_mostly;
  30
  31struct bpf_jit {
  32	u32 seen;		/* Flags to remember seen eBPF instructions */
  33	u32 seen_reg[16];	/* Array to remember which registers are used */
  34	u32 *addrs;		/* Array with relative instruction addresses */
  35	u8 *prg_buf;		/* Start of program */
  36	int size;		/* Size of program and literal pool */
  37	int size_prg;		/* Size of program */
  38	int prg;		/* Current position in program */
  39	int lit_start;		/* Start of literal pool */
  40	int lit;		/* Current position in literal pool */
  41	int base_ip;		/* Base address for literal pool */
  42	int ret0_ip;		/* Address of return 0 */
  43	int exit_ip;		/* Address of exit */
  44	int tail_call_start;	/* Tail call start offset */
  45	int labels[1];		/* Labels for local jumps */
  46};
  47
  48#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
  49
  50#define SEEN_SKB	1	/* skb access */
  51#define SEEN_MEM	2	/* use mem[] for temporary storage */
  52#define SEEN_RET0	4	/* ret0_ip points to a valid return 0 */
  53#define SEEN_LITERAL	8	/* code uses literals */
  54#define SEEN_FUNC	16	/* calls C functions */
  55#define SEEN_TAIL_CALL	32	/* code uses tail calls */
  56#define SEEN_SKB_CHANGE	64	/* code changes skb data */
  57#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
  58
  59/*
  60 * s390 registers
 
 
 
 
 
 
 
 
 
 
 
  61 */
  62#define REG_W0		(__MAX_BPF_REG+0)	/* Work register 1 (even) */
  63#define REG_W1		(__MAX_BPF_REG+1)	/* Work register 2 (odd) */
  64#define REG_SKB_DATA	(__MAX_BPF_REG+2)	/* SKB data register */
  65#define REG_L		(__MAX_BPF_REG+3)	/* Literal pool register */
  66#define REG_15		(__MAX_BPF_REG+4)	/* Register 15 */
  67#define REG_0		REG_W0			/* Register 0 */
  68#define REG_1		REG_W1			/* Register 1 */
  69#define REG_2		BPF_REG_1		/* Register 2 */
  70#define REG_14		BPF_REG_0		/* Register 14 */
  71
  72/*
  73 * Mapping of BPF registers to s390 registers
  74 */
  75static const int reg2hex[] = {
  76	/* Return code */
  77	[BPF_REG_0]	= 14,
  78	/* Function parameters */
  79	[BPF_REG_1]	= 2,
  80	[BPF_REG_2]	= 3,
  81	[BPF_REG_3]	= 4,
  82	[BPF_REG_4]	= 5,
  83	[BPF_REG_5]	= 6,
  84	/* Call saved registers */
  85	[BPF_REG_6]	= 7,
  86	[BPF_REG_7]	= 8,
  87	[BPF_REG_8]	= 9,
  88	[BPF_REG_9]	= 10,
  89	/* BPF stack pointer */
  90	[BPF_REG_FP]	= 13,
  91	/* SKB data pointer */
  92	[REG_SKB_DATA]	= 12,
  93	/* Work registers for s390x backend */
  94	[REG_W0]	= 0,
  95	[REG_W1]	= 1,
  96	[REG_L]		= 11,
  97	[REG_15]	= 15,
  98};
  99
 100static inline u32 reg(u32 dst_reg, u32 src_reg)
 101{
 102	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 103}
 104
 105static inline u32 reg_high(u32 reg)
 106{
 107	return reg2hex[reg] << 4;
 108}
 109
 110static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 111{
 112	u32 r1 = reg2hex[b1];
 113
 114	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 115		jit->seen_reg[r1] = 1;
 116}
 117
 118#define REG_SET_SEEN(b1)					\
 119({								\
 120	reg_set_seen(jit, b1);					\
 121})
 122
 123#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 124
 125/*
 126 * EMIT macros for code generation
 127 */
 
 
 128
 129#define _EMIT2(op)						\
 130({								\
 131	if (jit->prg_buf)					\
 132		*(u16 *) (jit->prg_buf + jit->prg) = op;	\
 133	jit->prg += 2;						\
 134})
 135
 136#define EMIT2(op, b1, b2)					\
 137({								\
 138	_EMIT2(op | reg(b1, b2));				\
 139	REG_SET_SEEN(b1);					\
 140	REG_SET_SEEN(b2);					\
 141})
 142
 143#define _EMIT4(op)						\
 144({								\
 145	if (jit->prg_buf)					\
 146		*(u32 *) (jit->prg_buf + jit->prg) = op;	\
 147	jit->prg += 4;						\
 148})
 149
 150#define EMIT4(op, b1, b2)					\
 151({								\
 152	_EMIT4(op | reg(b1, b2));				\
 153	REG_SET_SEEN(b1);					\
 154	REG_SET_SEEN(b2);					\
 155})
 156
 157#define EMIT4_RRF(op, b1, b2, b3)				\
 158({								\
 159	_EMIT4(op | reg_high(b3) << 8 | reg(b1, b2));		\
 160	REG_SET_SEEN(b1);					\
 161	REG_SET_SEEN(b2);					\
 162	REG_SET_SEEN(b3);					\
 163})
 164
 165#define _EMIT4_DISP(op, disp)					\
 166({								\
 167	unsigned int __disp = (disp) & 0xfff;			\
 168	_EMIT4(op | __disp);					\
 169})
 170
 171#define EMIT4_DISP(op, b1, b2, disp)				\
 172({								\
 173	_EMIT4_DISP(op | reg_high(b1) << 16 |			\
 174		    reg_high(b2) << 8, disp);			\
 175	REG_SET_SEEN(b1);					\
 176	REG_SET_SEEN(b2);					\
 177})
 178
 179#define EMIT4_IMM(op, b1, imm)					\
 180({								\
 181	unsigned int __imm = (imm) & 0xffff;			\
 182	_EMIT4(op | reg_high(b1) << 16 | __imm);		\
 183	REG_SET_SEEN(b1);					\
 184})
 185
 186#define EMIT4_PCREL(op, pcrel)					\
 187({								\
 188	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 189	_EMIT4(op | __pcrel);					\
 190})
 191
 192#define _EMIT6(op1, op2)					\
 193({								\
 194	if (jit->prg_buf) {					\
 195		*(u32 *) (jit->prg_buf + jit->prg) = op1;	\
 196		*(u16 *) (jit->prg_buf + jit->prg + 4) = op2;	\
 197	}							\
 198	jit->prg += 6;						\
 199})
 200
 201#define _EMIT6_DISP(op1, op2, disp)				\
 202({								\
 203	unsigned int __disp = (disp) & 0xfff;			\
 204	_EMIT6(op1 | __disp, op2);				\
 205})
 206
 207#define _EMIT6_DISP_LH(op1, op2, disp)				\
 208({								\
 209	u32 _disp = (u32) disp;					\
 210	unsigned int __disp_h = _disp & 0xff000;		\
 211	unsigned int __disp_l = _disp & 0x00fff;		\
 212	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 213})
 214
 215#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 216({								\
 217	_EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 |		\
 218		       reg_high(b3) << 8, op2, disp);		\
 219	REG_SET_SEEN(b1);					\
 220	REG_SET_SEEN(b2);					\
 221	REG_SET_SEEN(b3);					\
 222})
 223
 224#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
 225({								\
 226	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 227	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff),	\
 228	       op2 | mask << 12);				\
 229	REG_SET_SEEN(b1);					\
 230	REG_SET_SEEN(b2);					\
 231})
 232
 233#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
 234({								\
 235	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 236	_EMIT6(op1 | (reg_high(b1) | mask) << 16 |		\
 237		(rel & 0xffff), op2 | (imm & 0xff) << 8);	\
 238	REG_SET_SEEN(b1);					\
 239	BUILD_BUG_ON(((unsigned long) imm) > 0xff);		\
 240})
 241
 242#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 243({								\
 244	/* Branch instruction needs 6 bytes */			\
 245	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
 246	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
 247	REG_SET_SEEN(b1);					\
 248	REG_SET_SEEN(b2);					\
 249})
 250
 251#define _EMIT6_IMM(op, imm)					\
 252({								\
 253	unsigned int __imm = (imm);				\
 254	_EMIT6(op | (__imm >> 16), __imm & 0xffff);		\
 255})
 256
 257#define EMIT6_IMM(op, b1, imm)					\
 258({								\
 259	_EMIT6_IMM(op | reg_high(b1) << 16, imm);		\
 260	REG_SET_SEEN(b1);					\
 261})
 262
 263#define EMIT_CONST_U32(val)					\
 264({								\
 265	unsigned int ret;					\
 266	ret = jit->lit - jit->base_ip;				\
 267	jit->seen |= SEEN_LITERAL;				\
 268	if (jit->prg_buf)					\
 269		*(u32 *) (jit->prg_buf + jit->lit) = (u32) val;	\
 270	jit->lit += 4;						\
 271	ret;							\
 272})
 273
 274#define EMIT_CONST_U64(val)					\
 275({								\
 276	unsigned int ret;					\
 277	ret = jit->lit - jit->base_ip;				\
 278	jit->seen |= SEEN_LITERAL;				\
 279	if (jit->prg_buf)					\
 280		*(u64 *) (jit->prg_buf + jit->lit) = (u64) val;	\
 281	jit->lit += 8;						\
 282	ret;							\
 283})
 284
 285#define EMIT_ZERO(b1)						\
 286({								\
 287	/* llgfr %dst,%dst (zero extend to 64 bit) */		\
 288	EMIT4(0xb9160000, b1, b1);				\
 289	REG_SET_SEEN(b1);					\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 290})
 291
 292/*
 293 * Fill whole space with illegal instructions
 294 */
 295static void jit_fill_hole(void *area, unsigned int size)
 296{
 297	memset(area, 0, size);
 298}
 299
 300/*
 301 * Save registers from "rs" (register start) to "re" (register end) on stack
 302 */
 303static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 304{
 305	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 306
 307	if (rs == re)
 308		/* stg %rs,off(%r15) */
 309		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 310	else
 311		/* stmg %rs,%re,off(%r15) */
 312		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 313}
 314
 315/*
 316 * Restore registers from "rs" (register start) to "re" (register end) on stack
 317 */
 318static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
 319{
 320	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 321
 322	if (jit->seen & SEEN_STACK)
 323		off += STK_OFF;
 324
 325	if (rs == re)
 326		/* lg %rs,off(%r15) */
 327		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 328	else
 329		/* lmg %rs,%re,off(%r15) */
 330		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 331}
 332
 333/*
 334 * Return first seen register (from start)
 335 */
 336static int get_start(struct bpf_jit *jit, int start)
 337{
 338	int i;
 339
 340	for (i = start; i <= 15; i++) {
 341		if (jit->seen_reg[i])
 342			return i;
 343	}
 344	return 0;
 345}
 346
 347/*
 348 * Return last seen register (from start) (gap >= 2)
 349 */
 350static int get_end(struct bpf_jit *jit, int start)
 351{
 352	int i;
 353
 354	for (i = start; i < 15; i++) {
 355		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 356			return i - 1;
 357	}
 358	return jit->seen_reg[15] ? 15 : 14;
 359}
 360
 361#define REGS_SAVE	1
 362#define REGS_RESTORE	0
 363/*
 364 * Save and restore clobbered registers (6-15) on stack.
 365 * We save/restore registers in chunks with gap >= 2 registers.
 366 */
 367static void save_restore_regs(struct bpf_jit *jit, int op)
 368{
 369
 370	int re = 6, rs;
 371
 372	do {
 373		rs = get_start(jit, re);
 374		if (!rs)
 375			break;
 376		re = get_end(jit, rs + 1);
 377		if (op == REGS_SAVE)
 378			save_regs(jit, rs, re);
 379		else
 380			restore_regs(jit, rs, re);
 381		re++;
 382	} while (re <= 15);
 383}
 384
 385/*
 386 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
 387 * we store the SKB header length on the stack and the SKB data
 388 * pointer in REG_SKB_DATA.
 389 */
 390static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 391{
 392	/* Header length: llgf %w1,<len>(%b1) */
 393	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
 394		      offsetof(struct sk_buff, len));
 395	/* s %w1,<data_len>(%b1) */
 396	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
 397		   offsetof(struct sk_buff, data_len));
 398	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
 399	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
 400	/* lg %skb_data,data_off(%b1) */
 401	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
 402		      BPF_REG_1, offsetof(struct sk_buff, data));
 403}
 404
 405/*
 406 * Emit function prologue
 407 *
 408 * Save registers and create stack frame if necessary.
 409 * See stack frame layout desription in "bpf_jit.h"!
 410 */
 411static void bpf_jit_prologue(struct bpf_jit *jit)
 412{
 413	if (jit->seen & SEEN_TAIL_CALL) {
 414		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 415		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 416	} else {
 417		/* j tail_call_start: NOP if no tail calls are used */
 418		EMIT4_PCREL(0xa7f40000, 6);
 419		_EMIT2(0);
 420	}
 421	/* Tail calls have to skip above initialization */
 422	jit->tail_call_start = jit->prg;
 423	/* Save registers */
 424	save_restore_regs(jit, REGS_SAVE);
 
 
 
 
 
 
 
 
 425	/* Setup literal pool */
 426	if (jit->seen & SEEN_LITERAL) {
 427		/* basr %r13,0 */
 428		EMIT2(0x0d00, REG_L, REG_0);
 429		jit->base_ip = jit->prg;
 430	}
 431	/* Setup stack and backchain */
 432	if (jit->seen & SEEN_STACK) {
 433		if (jit->seen & SEEN_FUNC)
 434			/* lgr %w1,%r15 (backchain) */
 435			EMIT4(0xb9040000, REG_W1, REG_15);
 436		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 437		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 438		/* aghi %r15,-STK_OFF */
 439		EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
 440		if (jit->seen & SEEN_FUNC)
 441			/* stg %w1,152(%r15) (backchain) */
 442			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 443				      REG_15, 152);
 
 
 
 
 444	}
 445	if (jit->seen & SEEN_SKB)
 446		emit_load_skb_data_hlen(jit);
 447	if (jit->seen & SEEN_SKB_CHANGE)
 448		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 449		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
 450			      STK_OFF_SKBP);
 451}
 452
 453/*
 454 * Function epilogue
 455 */
 456static void bpf_jit_epilogue(struct bpf_jit *jit)
 457{
 458	/* Return 0 */
 459	if (jit->seen & SEEN_RET0) {
 460		jit->ret0_ip = jit->prg;
 461		/* lghi %b0,0 */
 462		EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
 463	}
 464	jit->exit_ip = jit->prg;
 465	/* Load exit code: lgr %r2,%b0 */
 466	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 467	/* Restore registers */
 468	save_restore_regs(jit, REGS_RESTORE);
 
 
 
 
 
 
 
 
 
 
 
 
 469	/* br %r14 */
 470	_EMIT2(0x07fe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471}
 
 472
 473/*
 474 * Compile one eBPF instruction into s390x code
 475 *
 476 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 477 * stack space for the large switch statement.
 478 */
 479static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
 480{
 481	struct bpf_insn *insn = &fp->insnsi[i];
 482	int jmp_off, last, insn_count = 1;
 483	unsigned int func_addr, mask;
 484	u32 dst_reg = insn->dst_reg;
 485	u32 src_reg = insn->src_reg;
 486	u32 *addrs = jit->addrs;
 487	s32 imm = insn->imm;
 488	s16 off = insn->off;
 489
 490	switch (insn->code) {
 491	/*
 492	 * BPF_MOV
 493	 */
 494	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 495		/* llgfr %dst,%src */
 496		EMIT4(0xb9160000, dst_reg, src_reg);
 497		break;
 498	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 499		/* lgr %dst,%src */
 500		EMIT4(0xb9040000, dst_reg, src_reg);
 501		break;
 502	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 503		/* llilf %dst,imm */
 504		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 505		break;
 506	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 507		/* lgfi %dst,imm */
 508		EMIT6_IMM(0xc0010000, dst_reg, imm);
 509		break;
 510	/*
 511	 * BPF_LD 64
 512	 */
 513	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 514	{
 515		/* 16 byte instruction that uses two 'struct bpf_insn' */
 516		u64 imm64;
 517
 518		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 519		/* lg %dst,<d(imm)>(%l) */
 520		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
 521			      EMIT_CONST_U64(imm64));
 522		insn_count = 2;
 523		break;
 524	}
 525	/*
 526	 * BPF_ADD
 527	 */
 528	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 529		/* ar %dst,%src */
 530		EMIT2(0x1a00, dst_reg, src_reg);
 531		EMIT_ZERO(dst_reg);
 532		break;
 533	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 534		/* agr %dst,%src */
 535		EMIT4(0xb9080000, dst_reg, src_reg);
 536		break;
 537	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 538		if (!imm)
 539			break;
 540		/* alfi %dst,imm */
 541		EMIT6_IMM(0xc20b0000, dst_reg, imm);
 542		EMIT_ZERO(dst_reg);
 543		break;
 544	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 545		if (!imm)
 546			break;
 547		/* agfi %dst,imm */
 548		EMIT6_IMM(0xc2080000, dst_reg, imm);
 549		break;
 550	/*
 551	 * BPF_SUB
 552	 */
 553	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 554		/* sr %dst,%src */
 555		EMIT2(0x1b00, dst_reg, src_reg);
 556		EMIT_ZERO(dst_reg);
 557		break;
 558	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 559		/* sgr %dst,%src */
 560		EMIT4(0xb9090000, dst_reg, src_reg);
 561		break;
 562	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 563		if (!imm)
 564			break;
 565		/* alfi %dst,-imm */
 566		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 567		EMIT_ZERO(dst_reg);
 568		break;
 569	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 570		if (!imm)
 571			break;
 572		/* agfi %dst,-imm */
 573		EMIT6_IMM(0xc2080000, dst_reg, -imm);
 574		break;
 575	/*
 576	 * BPF_MUL
 577	 */
 578	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 579		/* msr %dst,%src */
 580		EMIT4(0xb2520000, dst_reg, src_reg);
 581		EMIT_ZERO(dst_reg);
 582		break;
 583	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 584		/* msgr %dst,%src */
 585		EMIT4(0xb90c0000, dst_reg, src_reg);
 586		break;
 587	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 588		if (imm == 1)
 589			break;
 590		/* msfi %r5,imm */
 591		EMIT6_IMM(0xc2010000, dst_reg, imm);
 592		EMIT_ZERO(dst_reg);
 
 
 
 
 
 
 
 
 
 
 
 593		break;
 594	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 595		if (imm == 1)
 596			break;
 597		/* msgfi %dst,imm */
 598		EMIT6_IMM(0xc2000000, dst_reg, imm);
 
 
 
 
 
 
 
 
 
 
 599		break;
 600	/*
 601	 * BPF_DIV / BPF_MOD
 602	 */
 603	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 604	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 605	{
 606		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 607
 608		jit->seen |= SEEN_RET0;
 609		/* ltr %src,%src (if src == 0 goto fail) */
 610		EMIT2(0x1200, src_reg, src_reg);
 611		/* jz <ret0> */
 612		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 613		/* lhi %w0,0 */
 614		EMIT4_IMM(0xa7080000, REG_W0, 0);
 615		/* lr %w1,%dst */
 616		EMIT2(0x1800, REG_W1, dst_reg);
 617		/* dlr %w0,%src */
 618		EMIT4(0xb9970000, REG_W0, src_reg);
 619		/* llgfr %dst,%rc */
 620		EMIT4(0xb9160000, dst_reg, rc_reg);
 621		break;
 622	}
 623	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 624	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 625	{
 626		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 627
 628		jit->seen |= SEEN_RET0;
 629		/* ltgr %src,%src (if src == 0 goto fail) */
 630		EMIT4(0xb9020000, src_reg, src_reg);
 631		/* jz <ret0> */
 632		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 633		/* lghi %w0,0 */
 634		EMIT4_IMM(0xa7090000, REG_W0, 0);
 635		/* lgr %w1,%dst */
 636		EMIT4(0xb9040000, REG_W1, dst_reg);
 637		/* dlgr %w0,%dst */
 638		EMIT4(0xb9870000, REG_W0, src_reg);
 639		/* lgr %dst,%rc */
 640		EMIT4(0xb9040000, dst_reg, rc_reg);
 641		break;
 642	}
 643	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 644	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 645	{
 646		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 647
 648		if (imm == 1) {
 649			if (BPF_OP(insn->code) == BPF_MOD)
 650				/* lhgi %dst,0 */
 651				EMIT4_IMM(0xa7090000, dst_reg, 0);
 652			break;
 653		}
 654		/* lhi %w0,0 */
 655		EMIT4_IMM(0xa7080000, REG_W0, 0);
 656		/* lr %w1,%dst */
 657		EMIT2(0x1800, REG_W1, dst_reg);
 658		/* dl %w0,<d(imm)>(%l) */
 659		EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 660			      EMIT_CONST_U32(imm));
 661		/* llgfr %dst,%rc */
 662		EMIT4(0xb9160000, dst_reg, rc_reg);
 663		break;
 664	}
 665	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 666	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 667	{
 668		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 669
 670		if (imm == 1) {
 671			if (BPF_OP(insn->code) == BPF_MOD)
 672				/* lhgi %dst,0 */
 673				EMIT4_IMM(0xa7090000, dst_reg, 0);
 674			break;
 675		}
 676		/* lghi %w0,0 */
 677		EMIT4_IMM(0xa7090000, REG_W0, 0);
 678		/* lgr %w1,%dst */
 679		EMIT4(0xb9040000, REG_W1, dst_reg);
 680		/* dlg %w0,<d(imm)>(%l) */
 681		EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 682			      EMIT_CONST_U64(imm));
 683		/* lgr %dst,%rc */
 684		EMIT4(0xb9040000, dst_reg, rc_reg);
 685		break;
 686	}
 687	/*
 688	 * BPF_AND
 689	 */
 690	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 691		/* nr %dst,%src */
 692		EMIT2(0x1400, dst_reg, src_reg);
 693		EMIT_ZERO(dst_reg);
 694		break;
 695	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 696		/* ngr %dst,%src */
 697		EMIT4(0xb9800000, dst_reg, src_reg);
 698		break;
 699	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 700		/* nilf %dst,imm */
 701		EMIT6_IMM(0xc00b0000, dst_reg, imm);
 702		EMIT_ZERO(dst_reg);
 703		break;
 704	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 705		/* ng %dst,<d(imm)>(%l) */
 706		EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
 707			      EMIT_CONST_U64(imm));
 708		break;
 709	/*
 710	 * BPF_OR
 711	 */
 712	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 713		/* or %dst,%src */
 714		EMIT2(0x1600, dst_reg, src_reg);
 715		EMIT_ZERO(dst_reg);
 716		break;
 717	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 718		/* ogr %dst,%src */
 719		EMIT4(0xb9810000, dst_reg, src_reg);
 720		break;
 721	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 722		/* oilf %dst,imm */
 723		EMIT6_IMM(0xc00d0000, dst_reg, imm);
 724		EMIT_ZERO(dst_reg);
 725		break;
 726	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 727		/* og %dst,<d(imm)>(%l) */
 728		EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
 729			      EMIT_CONST_U64(imm));
 730		break;
 731	/*
 732	 * BPF_XOR
 733	 */
 734	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 735		/* xr %dst,%src */
 736		EMIT2(0x1700, dst_reg, src_reg);
 737		EMIT_ZERO(dst_reg);
 738		break;
 739	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 740		/* xgr %dst,%src */
 741		EMIT4(0xb9820000, dst_reg, src_reg);
 742		break;
 743	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
 744		if (!imm)
 745			break;
 746		/* xilf %dst,imm */
 747		EMIT6_IMM(0xc0070000, dst_reg, imm);
 748		EMIT_ZERO(dst_reg);
 749		break;
 750	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
 751		/* xg %dst,<d(imm)>(%l) */
 752		EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
 753			      EMIT_CONST_U64(imm));
 754		break;
 755	/*
 756	 * BPF_LSH
 757	 */
 758	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
 759		/* sll %dst,0(%src) */
 760		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
 761		EMIT_ZERO(dst_reg);
 762		break;
 763	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
 764		/* sllg %dst,%dst,0(%src) */
 765		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
 766		break;
 767	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
 768		if (imm == 0)
 769			break;
 770		/* sll %dst,imm(%r0) */
 771		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
 772		EMIT_ZERO(dst_reg);
 773		break;
 774	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
 775		if (imm == 0)
 776			break;
 777		/* sllg %dst,%dst,imm(%r0) */
 778		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
 779		break;
 780	/*
 781	 * BPF_RSH
 782	 */
 783	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
 784		/* srl %dst,0(%src) */
 785		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
 786		EMIT_ZERO(dst_reg);
 787		break;
 788	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
 789		/* srlg %dst,%dst,0(%src) */
 790		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
 791		break;
 792	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
 793		if (imm == 0)
 794			break;
 795		/* srl %dst,imm(%r0) */
 796		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
 797		EMIT_ZERO(dst_reg);
 798		break;
 799	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
 800		if (imm == 0)
 801			break;
 802		/* srlg %dst,%dst,imm(%r0) */
 803		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
 804		break;
 805	/*
 806	 * BPF_ARSH
 807	 */
 808	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
 809		/* srag %dst,%dst,0(%src) */
 810		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
 
 
 
 
 
 
 811		break;
 812	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
 813		if (imm == 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814			break;
 815		/* srag %dst,%dst,imm(%r0) */
 816		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
 817		break;
 818	/*
 819	 * BPF_NEG
 820	 */
 821	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
 822		/* lcr %dst,%dst */
 823		EMIT2(0x1300, dst_reg, dst_reg);
 824		EMIT_ZERO(dst_reg);
 825		break;
 826	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 827		/* lcgr %dst,%dst */
 828		EMIT4(0xb9130000, dst_reg, dst_reg);
 829		break;
 830	/*
 831	 * BPF_FROM_BE/LE
 832	 */
 833	case BPF_ALU | BPF_END | BPF_FROM_BE:
 834		/* s390 is big endian, therefore only clear high order bytes */
 835		switch (imm) {
 836		case 16: /* dst = (u16) cpu_to_be16(dst) */
 837			/* llghr %dst,%dst */
 838			EMIT4(0xb9850000, dst_reg, dst_reg);
 839			break;
 840		case 32: /* dst = (u32) cpu_to_be32(dst) */
 841			/* llgfr %dst,%dst */
 842			EMIT4(0xb9160000, dst_reg, dst_reg);
 843			break;
 844		case 64: /* dst = (u64) cpu_to_be64(dst) */
 845			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847		break;
 848	case BPF_ALU | BPF_END | BPF_FROM_LE:
 849		switch (imm) {
 850		case 16: /* dst = (u16) cpu_to_le16(dst) */
 851			/* lrvr %dst,%dst */
 852			EMIT4(0xb91f0000, dst_reg, dst_reg);
 853			/* srl %dst,16(%r0) */
 854			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
 855			/* llghr %dst,%dst */
 856			EMIT4(0xb9850000, dst_reg, dst_reg);
 857			break;
 858		case 32: /* dst = (u32) cpu_to_le32(dst) */
 859			/* lrvr %dst,%dst */
 860			EMIT4(0xb91f0000, dst_reg, dst_reg);
 861			/* llgfr %dst,%dst */
 862			EMIT4(0xb9160000, dst_reg, dst_reg);
 863			break;
 864		case 64: /* dst = (u64) cpu_to_le64(dst) */
 865			/* lrvgr %dst,%dst */
 866			EMIT4(0xb90f0000, dst_reg, dst_reg);
 867			break;
 868		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869		break;
 870	/*
 871	 * BPF_ST(X)
 872	 */
 873	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
 874		/* stcy %src,off(%dst) */
 875		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
 876		jit->seen |= SEEN_MEM;
 877		break;
 878	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 879		/* sthy %src,off(%dst) */
 880		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
 881		jit->seen |= SEEN_MEM;
 882		break;
 883	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 884		/* sty %src,off(%dst) */
 885		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
 886		jit->seen |= SEEN_MEM;
 887		break;
 888	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 889		/* stg %src,off(%dst) */
 890		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
 891		jit->seen |= SEEN_MEM;
 892		break;
 893	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 894		/* lhi %w0,imm */
 895		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
 896		/* stcy %w0,off(dst) */
 897		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
 898		jit->seen |= SEEN_MEM;
 899		break;
 900	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 901		/* lhi %w0,imm */
 902		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
 903		/* sthy %w0,off(dst) */
 904		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
 905		jit->seen |= SEEN_MEM;
 906		break;
 907	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 908		/* llilf %w0,imm  */
 909		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
 910		/* sty %w0,off(%dst) */
 911		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
 912		jit->seen |= SEEN_MEM;
 913		break;
 914	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 915		/* lgfi %w0,imm */
 916		EMIT6_IMM(0xc0010000, REG_W0, imm);
 917		/* stg %w0,off(%dst) */
 918		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
 919		jit->seen |= SEEN_MEM;
 920		break;
 921	/*
 922	 * BPF_STX XADD (atomic_add)
 923	 */
 924	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
 925		/* laal %w0,%src,off(%dst) */
 926		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
 927			      dst_reg, off);
 928		jit->seen |= SEEN_MEM;
 929		break;
 930	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
 931		/* laalg %w0,%src,off(%dst) */
 932		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
 933			      dst_reg, off);
 934		jit->seen |= SEEN_MEM;
 935		break;
 936	/*
 937	 * BPF_LDX
 938	 */
 939	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
 940		/* llgc %dst,0(off,%src) */
 941		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
 942		jit->seen |= SEEN_MEM;
 943		break;
 944	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
 945		/* llgh %dst,0(off,%src) */
 946		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
 947		jit->seen |= SEEN_MEM;
 948		break;
 949	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
 950		/* llgf %dst,off(%src) */
 951		jit->seen |= SEEN_MEM;
 952		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
 953		break;
 954	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
 955		/* lg %dst,0(off,%src) */
 956		jit->seen |= SEEN_MEM;
 957		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
 958		break;
 959	/*
 960	 * BPF_JMP / CALL
 961	 */
 962	case BPF_JMP | BPF_CALL:
 963	{
 964		/*
 965		 * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
 966		 */
 967		const u64 func = (u64)__bpf_call_base + imm;
 968
 969		REG_SET_SEEN(BPF_REG_5);
 970		jit->seen |= SEEN_FUNC;
 971		/* lg %w1,<d(imm)>(%l) */
 972		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
 973			      EMIT_CONST_U64(func));
 974		/* basr %r14,%w1 */
 975		EMIT2(0x0d00, REG_14, REG_W1);
 976		/* lgr %b0,%r2: load return value into %b0 */
 977		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 978		if (bpf_helper_changes_skb_data((void *)func)) {
 979			jit->seen |= SEEN_SKB_CHANGE;
 980			/* lg %b1,ST_OFF_SKBP(%r15) */
 981			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
 982				      REG_15, STK_OFF_SKBP);
 983			emit_load_skb_data_hlen(jit);
 
 
 
 
 
 
 
 
 
 
 
 984		}
 985		break;
 986	}
 987	case BPF_JMP | BPF_CALL | BPF_X:
 988		/*
 989		 * Implicit input:
 990		 *  B1: pointer to ctx
 991		 *  B2: pointer to bpf_array
 992		 *  B3: index in bpf_array
 993		 */
 994		jit->seen |= SEEN_TAIL_CALL;
 995
 996		/*
 997		 * if (index >= array->map.max_entries)
 998		 *         goto out;
 999		 */
1000
1001		/* llgf %w1,map.max_entries(%b2) */
1002		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1003			      offsetof(struct bpf_array, map.max_entries));
1004		/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
1005		EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
1006				  REG_W1, 0, 0xa);
1007
1008		/*
1009		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1010		 *         goto out;
1011		 */
1012
1013		if (jit->seen & SEEN_STACK)
1014			off = STK_OFF_TCCNT + STK_OFF;
1015		else
1016			off = STK_OFF_TCCNT;
1017		/* lhi %w0,1 */
1018		EMIT4_IMM(0xa7080000, REG_W0, 1);
1019		/* laal %w1,%w0,off(%r15) */
1020		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1021		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
1022		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
1023				      MAX_TAIL_CALL_CNT, 0, 0x2);
1024
1025		/*
1026		 * prog = array->ptrs[index];
1027		 * if (prog == NULL)
1028		 *         goto out;
1029		 */
1030
1031		/* sllg %r1,%b3,3: %r1 = index * 8 */
1032		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
1033		/* lg %r1,prog(%b2,%r1) */
1034		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1035			      REG_1, offsetof(struct bpf_array, ptrs));
1036		/* clgij %r1,0,0x8,label0 */
1037		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
1038
1039		/*
1040		 * Restore registers before calling function
1041		 */
1042		save_restore_regs(jit, REGS_RESTORE);
1043
1044		/*
1045		 * goto *(prog->bpf_func + tail_call_start);
1046		 */
1047
1048		/* lg %r1,bpf_func(%r1) */
1049		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1050			      offsetof(struct bpf_prog, bpf_func));
1051		/* bc 0xf,tail_call_start(%r1) */
1052		_EMIT4(0x47f01000 + jit->tail_call_start);
1053		/* out: */
1054		jit->labels[0] = jit->prg;
1055		break;
1056	case BPF_JMP | BPF_EXIT: /* return b0 */
1057		last = (i == fp->len - 1) ? 1 : 0;
1058		if (last && !(jit->seen & SEEN_RET0))
1059			break;
1060		/* j <exit> */
1061		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
1062		break;
1063	/*
1064	 * Branch relative (number of skipped instructions) to offset on
1065	 * condition.
1066	 *
1067	 * Condition code to mask mapping:
1068	 *
1069	 * CC | Description	   | Mask
1070	 * ------------------------------
1071	 * 0  | Operands equal	   |	8
1072	 * 1  | First operand low  |	4
1073	 * 2  | First operand high |	2
1074	 * 3  | Unused		   |	1
1075	 *
1076	 * For s390x relative branches: ip = ip + off_bytes
1077	 * For BPF relative branches:	insn = insn + off_insns + 1
1078	 *
1079	 * For example for s390x with offset 0 we jump to the branch
1080	 * instruction itself (loop) and for BPF with offset 0 we
1081	 * branch to the instruction behind the branch.
1082	 */
1083	case BPF_JMP | BPF_JA: /* if (true) */
1084		mask = 0xf000; /* j */
1085		goto branch_oc;
1086	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1087		mask = 0x2000; /* jh */
1088		goto branch_ks;
1089	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1090		mask = 0xa000; /* jhe */
1091		goto branch_ks;
1092	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1093		mask = 0x2000; /* jh */
1094		goto branch_ku;
1095	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1096		mask = 0xa000; /* jhe */
1097		goto branch_ku;
1098	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1099		mask = 0x7000; /* jne */
1100		goto branch_ku;
1101	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1102		mask = 0x8000; /* je */
1103		goto branch_ku;
1104	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1105		mask = 0x7000; /* jnz */
1106		/* lgfi %w1,imm (load sign extend imm) */
1107		EMIT6_IMM(0xc0010000, REG_W1, imm);
1108		/* ngr %w1,%dst */
1109		EMIT4(0xb9800000, REG_W1, dst_reg);
1110		goto branch_oc;
1111
1112	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1113		mask = 0x2000; /* jh */
1114		goto branch_xs;
1115	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1116		mask = 0xa000; /* jhe */
1117		goto branch_xs;
1118	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1119		mask = 0x2000; /* jh */
1120		goto branch_xu;
1121	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1122		mask = 0xa000; /* jhe */
1123		goto branch_xu;
1124	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1125		mask = 0x7000; /* jne */
1126		goto branch_xu;
1127	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1128		mask = 0x8000; /* je */
1129		goto branch_xu;
1130	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1131		mask = 0x7000; /* jnz */
1132		/* ngrk %w1,%dst,%src */
1133		EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
1134		goto branch_oc;
1135branch_ks:
1136		/* lgfi %w1,imm (load sign extend imm) */
1137		EMIT6_IMM(0xc0010000, REG_W1, imm);
1138		/* cgrj %dst,%w1,mask,off */
1139		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
1140		break;
1141branch_ku:
1142		/* lgfi %w1,imm (load sign extend imm) */
1143		EMIT6_IMM(0xc0010000, REG_W1, imm);
1144		/* clgrj %dst,%w1,mask,off */
1145		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
1146		break;
1147branch_xs:
1148		/* cgrj %dst,%src,mask,off */
1149		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
1150		break;
1151branch_xu:
1152		/* clgrj %dst,%src,mask,off */
1153		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
1154		break;
1155branch_oc:
1156		/* brc mask,jmp_off (branch instruction needs 4 bytes) */
1157		jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1158		EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
1159		break;
1160	/*
1161	 * BPF_LD
1162	 */
1163	case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
1164	case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
1165		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1166			func_addr = __pa(sk_load_byte_pos);
1167		else
1168			func_addr = __pa(sk_load_byte);
1169		goto call_fn;
1170	case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
1171	case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
1172		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1173			func_addr = __pa(sk_load_half_pos);
1174		else
1175			func_addr = __pa(sk_load_half);
1176		goto call_fn;
1177	case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
1178	case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
1179		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1180			func_addr = __pa(sk_load_word_pos);
1181		else
1182			func_addr = __pa(sk_load_word);
1183		goto call_fn;
1184call_fn:
1185		jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
1186		REG_SET_SEEN(REG_14); /* Return address of possible func call */
1187
1188		/*
1189		 * Implicit input:
1190		 *  BPF_REG_6	 (R7) : skb pointer
1191		 *  REG_SKB_DATA (R12): skb data pointer
1192		 *
1193		 * Calculated input:
1194		 *  BPF_REG_2	 (R3) : offset of byte(s) to fetch in skb
1195		 *  BPF_REG_5	 (R6) : return address
1196		 *
1197		 * Output:
1198		 *  BPF_REG_0	 (R14): data read from skb
1199		 *
1200		 * Scratch registers (BPF_REG_1-5)
1201		 */
1202
1203		/* Call function: llilf %w1,func_addr  */
1204		EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
1205
1206		/* Offset: lgfi %b2,imm */
1207		EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
1208		if (BPF_MODE(insn->code) == BPF_IND)
1209			/* agfr %b2,%src (%src is s32 here) */
1210			EMIT4(0xb9180000, BPF_REG_2, src_reg);
1211
1212		/* basr %b5,%w1 (%b5 is call saved) */
1213		EMIT2(0x0d00, BPF_REG_5, REG_W1);
1214
1215		/*
1216		 * Note: For fast access we jump directly after the
1217		 * jnz instruction from bpf_jit.S
1218		 */
1219		/* jnz <ret0> */
1220		EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
 
 
 
 
 
 
 
1221		break;
1222	default: /* too complex, give up */
1223		pr_err("Unknown opcode %02x\n", insn->code);
1224		return -1;
1225	}
1226	return insn_count;
1227}
1228
1229/*
1230 * Compile eBPF program into s390x code
1231 */
1232static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1233{
1234	int i, insn_count;
1235
1236	jit->lit = jit->lit_start;
1237	jit->prg = 0;
1238
1239	bpf_jit_prologue(jit);
1240	for (i = 0; i < fp->len; i += insn_count) {
1241		insn_count = bpf_jit_insn(jit, fp, i);
1242		if (insn_count < 0)
1243			return -1;
1244		jit->addrs[i + 1] = jit->prg; /* Next instruction address */
1245	}
1246	bpf_jit_epilogue(jit);
1247
1248	jit->lit_start = jit->prg;
1249	jit->size = jit->lit;
1250	jit->size_prg = jit->prg;
1251	return 0;
 
 
1252}
1253
1254/*
1255 * Classic BPF function stub. BPF programs will be converted into
1256 * eBPF and then bpf_int_jit_compile() will be called.
1257 */
1258void bpf_jit_compile(struct bpf_prog *fp)
1259{
1260}
 
1261
1262/*
1263 * Compile eBPF program "fp"
1264 */
1265void bpf_int_jit_compile(struct bpf_prog *fp)
1266{
1267	struct bpf_binary_header *header;
1268	struct bpf_jit jit;
1269	int pass;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
1271	if (!bpf_jit_enable)
1272		return;
1273	memset(&jit, 0, sizeof(jit));
1274	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1275	if (jit.addrs == NULL)
1276		return;
1277	/*
1278	 * Three initial passes:
1279	 *   - 1/2: Determine clobbered registers
1280	 *   - 3:   Calculate program size and addrs arrray
1281	 */
1282	for (pass = 1; pass <= 3; pass++) {
1283		if (bpf_jit_prog(&jit, fp))
1284			goto free_addrs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285	}
1286	/*
1287	 * Final pass: Allocate and generate program
1288	 */
1289	if (jit.size >= BPF_SIZE_MAX)
1290		goto free_addrs;
1291	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1292	if (!header)
1293		goto free_addrs;
1294	if (bpf_jit_prog(&jit, fp))
1295		goto free_addrs;
1296	if (bpf_jit_enable > 1) {
1297		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1298		if (jit.prg_buf)
1299			print_fn_code(jit.prg_buf, jit.size_prg);
1300	}
1301	if (jit.prg_buf) {
1302		set_memory_ro((unsigned long)header, header->pages);
1303		fp->bpf_func = (void *) jit.prg_buf;
1304		fp->jited = 1;
1305	}
1306free_addrs:
1307	kfree(jit.addrs);
1308}
1309
1310/*
1311 * Free eBPF program
1312 */
1313void bpf_jit_free(struct bpf_prog *fp)
1314{
1315	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1316	struct bpf_binary_header *header = (void *)addr;
1317
1318	if (!fp->jited)
1319		goto free_filter;
1320
1321	set_memory_rw(addr, header->pages);
1322	bpf_jit_binary_free(header);
1323
1324free_filter:
1325	bpf_prog_unlock_free(fp);
1326}
v3.15
  1/*
  2 * BPF Jit compiler for s390.
  3 *
  4 * Copyright IBM Corp. 2012
 
 
 
 
 
 
 
 
  5 *
  6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 
  7 */
  8#include <linux/moduleloader.h>
 
 
 
  9#include <linux/netdevice.h>
 10#include <linux/if_vlan.h>
 11#include <linux/filter.h>
 12#include <linux/random.h>
 13#include <linux/init.h>
 
 14#include <asm/cacheflush.h>
 15#include <asm/facility.h>
 16#include <asm/dis.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17
 18/*
 19 * Conventions:
 20 *   %r2 = skb pointer
 21 *   %r3 = offset parameter
 22 *   %r4 = scratch register / length parameter
 23 *   %r5 = BPF A accumulator
 24 *   %r8 = return address
 25 *   %r9 = save register for skb pointer
 26 *   %r10 = skb->data
 27 *   %r11 = skb->len - skb->data_len (headlen)
 28 *   %r12 = BPF X accumulator
 29 *   %r13 = literal pool pointer
 30 *   0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
 31 */
 32int bpf_jit_enable __read_mostly;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34/*
 35 * assembly code in arch/x86/net/bpf_jit.S
 36 */
 37extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
 38extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
 39
 40struct bpf_jit {
 41	unsigned int seen;
 42	u8 *start;
 43	u8 *prg;
 44	u8 *mid;
 45	u8 *lit;
 46	u8 *end;
 47	u8 *base_ip;
 48	u8 *ret0_ip;
 49	u8 *exit_ip;
 50	unsigned int off_load_word;
 51	unsigned int off_load_half;
 52	unsigned int off_load_byte;
 53	unsigned int off_load_bmsh;
 54	unsigned int off_load_iword;
 55	unsigned int off_load_ihalf;
 56	unsigned int off_load_ibyte;
 57};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59#define BPF_SIZE_MAX	4096	/* Max size for program */
 
 
 
 
 
 
 
 
 
 60
 61#define SEEN_DATAREF	1	/* might call external helpers */
 62#define SEEN_XREG	2	/* ebx is used */
 63#define SEEN_MEM	4	/* use mem[] for temporary storage */
 64#define SEEN_RET0	8	/* pc_ret0 points to a valid return 0 */
 65#define SEEN_LITERAL	16	/* code uses literals */
 66#define SEEN_LOAD_WORD	32	/* code uses sk_load_word */
 67#define SEEN_LOAD_HALF	64	/* code uses sk_load_half */
 68#define SEEN_LOAD_BYTE	128	/* code uses sk_load_byte */
 69#define SEEN_LOAD_BMSH	256	/* code uses sk_load_byte_msh */
 70#define SEEN_LOAD_IWORD	512	/* code uses sk_load_word_ind */
 71#define SEEN_LOAD_IHALF	1024	/* code uses sk_load_half_ind */
 72#define SEEN_LOAD_IBYTE	2048	/* code uses sk_load_byte_ind */
 73
 74#define EMIT2(op)					\
 75({							\
 76	if (jit->prg + 2 <= jit->mid)			\
 77		*(u16 *) jit->prg = op;			\
 78	jit->prg += 2;					\
 79})
 80
 81#define EMIT4(op)					\
 82({							\
 83	if (jit->prg + 4 <= jit->mid)			\
 84		*(u32 *) jit->prg = op;			\
 85	jit->prg += 4;					\
 86})
 87
 88#define EMIT4_DISP(op, disp)				\
 89({							\
 90	unsigned int __disp = (disp) & 0xfff;		\
 91	EMIT4(op | __disp);				\
 92})
 93
 94#define EMIT4_IMM(op, imm)				\
 95({							\
 96	unsigned int __imm = (imm) & 0xffff;		\
 97	EMIT4(op | __imm);				\
 98})
 99
100#define EMIT4_PCREL(op, pcrel)				\
101({							\
102	long __pcrel = ((pcrel) >> 1) & 0xffff;		\
103	EMIT4(op | __pcrel);				\
104})
105
106#define EMIT6(op1, op2)					\
107({							\
108	if (jit->prg + 6 <= jit->mid) {			\
109		*(u32 *) jit->prg = op1;		\
110		*(u16 *) (jit->prg + 4) = op2;		\
111	}						\
112	jit->prg += 6;					\
113})
114
115#define EMIT6_DISP(op1, op2, disp)			\
116({							\
117	unsigned int __disp = (disp) & 0xfff;		\
118	EMIT6(op1 | __disp, op2);			\
119})
120
121#define EMIT6_IMM(op, imm)				\
122({							\
123	unsigned int __imm = (imm);			\
124	EMIT6(op | (__imm >> 16), __imm & 0xffff);	\
125})
126
127#define EMIT_CONST(val)					\
128({							\
129	unsigned int ret;				\
130	ret = (unsigned int) (jit->lit - jit->base_ip);	\
131	jit->seen |= SEEN_LITERAL;			\
132	if (jit->lit + 4 <= jit->end)			\
133		*(u32 *) jit->lit = val;		\
134	jit->lit += 4;					\
135	ret;						\
136})
137
138#define EMIT_FN_CONST(bit, fn)				\
139({							\
140	unsigned int ret;				\
141	ret = (unsigned int) (jit->lit - jit->base_ip);	\
142	if (jit->seen & bit) {				\
143		jit->seen |= SEEN_LITERAL;		\
144		if (jit->lit + 8 <= jit->end)		\
145			*(void **) jit->lit = fn;	\
146		jit->lit += 8;				\
147	}						\
148	ret;						\
149})
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151static void bpf_jit_prologue(struct bpf_jit *jit)
152{
153	/* Save registers and create stack frame if necessary */
154	if (jit->seen & SEEN_DATAREF) {
155		/* stmg %r8,%r15,88(%r15) */
156		EMIT6(0xeb8ff058, 0x0024);
157		/* lgr %r14,%r15 */
158		EMIT4(0xb90400ef);
159		/* aghi %r15,<offset> */
160		EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
161		/* stg %r14,152(%r15) */
162		EMIT6(0xe3e0f098, 0x0024);
163	} else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
164		/* stmg %r12,%r13,120(%r15) */
165		EMIT6(0xebcdf078, 0x0024);
166	else if (jit->seen & SEEN_XREG)
167		/* stg %r12,120(%r15) */
168		EMIT6(0xe3c0f078, 0x0024);
169	else if (jit->seen & SEEN_LITERAL)
170		/* stg %r13,128(%r15) */
171		EMIT6(0xe3d0f080, 0x0024);
172
173	/* Setup literal pool */
174	if (jit->seen & SEEN_LITERAL) {
175		/* basr %r13,0 */
176		EMIT2(0x0dd0);
177		jit->base_ip = jit->prg;
178	}
179	jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
180	jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
181	jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
182	jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
183	jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
184	jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
185	jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
186
187	/* Filter needs to access skb data */
188	if (jit->seen & SEEN_DATAREF) {
189		/* l %r11,<len>(%r2) */
190		EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
191		/* s %r11,<data_len>(%r2) */
192		EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
193		/* lg %r10,<data>(%r2) */
194		EMIT6_DISP(0xe3a02000, 0x0004,
195			   offsetof(struct sk_buff, data));
196	}
 
 
 
 
 
 
197}
198
 
 
 
199static void bpf_jit_epilogue(struct bpf_jit *jit)
200{
201	/* Return 0 */
202	if (jit->seen & SEEN_RET0) {
203		jit->ret0_ip = jit->prg;
204		/* lghi %r2,0 */
205		EMIT4(0xa7290000);
206	}
207	jit->exit_ip = jit->prg;
 
 
208	/* Restore registers */
209	if (jit->seen & SEEN_DATAREF)
210		/* lmg %r8,%r15,<offset>(%r15) */
211		EMIT6_DISP(0xeb8ff000, 0x0004,
212			   (jit->seen & SEEN_MEM) ? 200 : 168);
213	else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
214		/* lmg %r12,%r13,120(%r15) */
215		EMIT6(0xebcdf078, 0x0004);
216	else if (jit->seen & SEEN_XREG)
217		/* lg %r12,120(%r15) */
218		EMIT6(0xe3c0f078, 0x0004);
219	else if (jit->seen & SEEN_LITERAL)
220		/* lg %r13,128(%r15) */
221		EMIT6(0xe3d0f080, 0x0004);
222	/* br %r14 */
223	EMIT2(0x07fe);
224}
225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234	struct sk_buff skb_probe = {
235		.pkt_type = ~0,
236	};
237	char *ct = (char *)&skb_probe;
238	int off;
239
240	pkt_type_offset = -1;
241	for (off = 0; off < sizeof(struct sk_buff); off++) {
242		if (!ct[off])
243			continue;
244		if (ct[off] == PKT_TYPE_MAX)
245			pkt_type_offset = off;
246		else {
247			/* Found non matching bit pattern, fix needed. */
248			WARN_ON_ONCE(1);
249			pkt_type_offset = -1;
250			return -1;
251		}
252	}
253	return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
257/*
258 * make sure we dont leak kernel information to user
 
 
 
259 */
260static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
261{
262	/* Clear temporary memory if (seen & SEEN_MEM) */
263	if (jit->seen & SEEN_MEM)
264		/* xc 0(64,%r15),0(%r15) */
265		EMIT6(0xd73ff000, 0xf000);
266	/* Clear X if (seen & SEEN_XREG) */
267	if (jit->seen & SEEN_XREG)
268		/* lhi %r12,0 */
269		EMIT4(0xa7c80000);
270	/* Clear A if the first register does not set it. */
271	switch (filter[0].code) {
272	case BPF_S_LD_W_ABS:
273	case BPF_S_LD_H_ABS:
274	case BPF_S_LD_B_ABS:
275	case BPF_S_LD_W_LEN:
276	case BPF_S_LD_W_IND:
277	case BPF_S_LD_H_IND:
278	case BPF_S_LD_B_IND:
279	case BPF_S_LD_IMM:
280	case BPF_S_LD_MEM:
281	case BPF_S_MISC_TXA:
282	case BPF_S_ANC_PROTOCOL:
283	case BPF_S_ANC_PKTTYPE:
284	case BPF_S_ANC_IFINDEX:
285	case BPF_S_ANC_MARK:
286	case BPF_S_ANC_QUEUE:
287	case BPF_S_ANC_HATYPE:
288	case BPF_S_ANC_RXHASH:
289	case BPF_S_ANC_CPU:
290	case BPF_S_ANC_VLAN_TAG:
291	case BPF_S_ANC_VLAN_TAG_PRESENT:
292	case BPF_S_RET_K:
293		/* first instruction sets A register */
294		break;
295	default: /* A = 0 */
296		/* lhi %r5,0 */
297		EMIT4(0xa7580000);
298	}
299}
300
301static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
302			unsigned int *addrs, int i, int last)
303{
304	unsigned int K;
305	int offset;
306	unsigned int mask;
307
308	K = filter->k;
309	switch (filter->code) {
310	case BPF_S_ALU_ADD_X: /* A += X */
311		jit->seen |= SEEN_XREG;
312		/* ar %r5,%r12 */
313		EMIT2(0x1a5c);
314		break;
315	case BPF_S_ALU_ADD_K: /* A += K */
316		if (!K)
317			break;
318		if (K <= 16383)
319			/* ahi %r5,<K> */
320			EMIT4_IMM(0xa75a0000, K);
321		else if (test_facility(21))
322			/* alfi %r5,<K> */
323			EMIT6_IMM(0xc25b0000, K);
324		else
325			/* a %r5,<d(K)>(%r13) */
326			EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327		break;
328	case BPF_S_ALU_SUB_X: /* A -= X */
329		jit->seen |= SEEN_XREG;
330		/* sr %r5,%r12 */
331		EMIT2(0x1b5c);
332		break;
333	case BPF_S_ALU_SUB_K: /* A -= K */
334		if (!K)
335			break;
336		if (K <= 16384)
337			/* ahi %r5,-K */
338			EMIT4_IMM(0xa75a0000, -K);
339		else if (test_facility(21))
340			/* alfi %r5,-K */
341			EMIT6_IMM(0xc25b0000, -K);
342		else
343			/* s %r5,<d(K)>(%r13) */
344			EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
345		break;
346	case BPF_S_ALU_MUL_X: /* A *= X */
347		jit->seen |= SEEN_XREG;
348		/* msr %r5,%r12 */
349		EMIT4(0xb252005c);
350		break;
351	case BPF_S_ALU_MUL_K: /* A *= K */
352		if (K <= 16383)
353			/* mhi %r5,K */
354			EMIT4_IMM(0xa75c0000, K);
355		else if (test_facility(34))
356			/* msfi %r5,<K> */
357			EMIT6_IMM(0xc2510000, K);
358		else
359			/* ms %r5,<d(K)>(%r13) */
360			EMIT4_DISP(0x7150d000, EMIT_CONST(K));
361		break;
362	case BPF_S_ALU_DIV_X: /* A /= X */
363		jit->seen |= SEEN_XREG | SEEN_RET0;
364		/* ltr %r12,%r12 */
365		EMIT2(0x12cc);
 
 
 
 
 
 
 
366		/* jz <ret0> */
367		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
368		/* lhi %r4,0 */
369		EMIT4(0xa7480000);
370		/* dlr %r4,%r12 */
371		EMIT4(0xb997004c);
372		break;
373	case BPF_S_ALU_DIV_K: /* A /= K */
374		if (K == 1)
375			break;
376		/* lhi %r4,0 */
377		EMIT4(0xa7480000);
378		/* dl %r4,<d(K)>(%r13) */
379		EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
380		break;
381	case BPF_S_ALU_MOD_X: /* A %= X */
382		jit->seen |= SEEN_XREG | SEEN_RET0;
383		/* ltr %r12,%r12 */
384		EMIT2(0x12cc);
 
385		/* jz <ret0> */
386		EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
387		/* lhi %r4,0 */
388		EMIT4(0xa7480000);
389		/* dlr %r4,%r12 */
390		EMIT4(0xb997004c);
391		/* lr %r5,%r4 */
392		EMIT2(0x1854);
393		break;
394	case BPF_S_ALU_MOD_K: /* A %= K */
395		if (K == 1) {
396			/* lhi %r5,0 */
397			EMIT4(0xa7580000);
 
 
 
 
 
 
 
 
398			break;
399		}
400		/* lhi %r4,0 */
401		EMIT4(0xa7480000);
402		/* dl %r4,<d(K)>(%r13) */
403		EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
404		/* lr %r5,%r4 */
405		EMIT2(0x1854);
406		break;
407	case BPF_S_ALU_AND_X: /* A &= X */
408		jit->seen |= SEEN_XREG;
409		/* nr %r5,%r12 */
410		EMIT2(0x145c);
411		break;
412	case BPF_S_ALU_AND_K: /* A &= K */
413		if (test_facility(21))
414			/* nilf %r5,<K> */
415			EMIT6_IMM(0xc05b0000, K);
416		else
417			/* n %r5,<d(K)>(%r13) */
418			EMIT4_DISP(0x5450d000, EMIT_CONST(K));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419		break;
420	case BPF_S_ALU_OR_X: /* A |= X */
421		jit->seen |= SEEN_XREG;
422		/* or %r5,%r12 */
423		EMIT2(0x165c);
424		break;
425	case BPF_S_ALU_OR_K: /* A |= K */
426		if (test_facility(21))
427			/* oilf %r5,<K> */
428			EMIT6_IMM(0xc05d0000, K);
429		else
430			/* o %r5,<d(K)>(%r13) */
431			EMIT4_DISP(0x5650d000, EMIT_CONST(K));
432		break;
433	case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
434	case BPF_S_ALU_XOR_X:
435		jit->seen |= SEEN_XREG;
436		/* xr %r5,%r12 */
437		EMIT2(0x175c);
438		break;
439	case BPF_S_ALU_XOR_K: /* A ^= K */
440		if (!K)
441			break;
442		/* x %r5,<d(K)>(%r13) */
443		EMIT4_DISP(0x5750d000, EMIT_CONST(K));
444		break;
445	case BPF_S_ALU_LSH_X: /* A <<= X; */
446		jit->seen |= SEEN_XREG;
447		/* sll %r5,0(%r12) */
448		EMIT4(0x8950c000);
449		break;
450	case BPF_S_ALU_LSH_K: /* A <<= K */
451		if (K == 0)
452			break;
453		/* sll %r5,K */
454		EMIT4_DISP(0x89500000, K);
455		break;
456	case BPF_S_ALU_RSH_X: /* A >>= X; */
457		jit->seen |= SEEN_XREG;
458		/* srl %r5,0(%r12) */
459		EMIT4(0x8850c000);
460		break;
461	case BPF_S_ALU_RSH_K: /* A >>= K; */
462		if (K == 0)
463			break;
464		/* srl %r5,K */
465		EMIT4_DISP(0x88500000, K);
466		break;
467	case BPF_S_ALU_NEG: /* A = -A */
468		/* lnr %r5,%r5 */
469		EMIT2(0x1155);
470		break;
471	case BPF_S_JMP_JA: /* ip += K */
472		offset = addrs[i + K] + jit->start - jit->prg;
473		EMIT4_PCREL(0xa7f40000, offset);
474		break;
475	case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
476		mask = 0x200000; /* jh */
477		goto kbranch;
478	case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
479		mask = 0xa00000; /* jhe */
480		goto kbranch;
481	case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
482		mask = 0x800000; /* je */
483kbranch:	/* Emit compare if the branch targets are different */
484		if (filter->jt != filter->jf) {
485			if (K <= 16383)
486				/* chi %r5,<K> */
487				EMIT4_IMM(0xa75e0000, K);
488			else if (test_facility(21))
489				/* clfi %r5,<K> */
490				EMIT6_IMM(0xc25f0000, K);
491			else
492				/* c %r5,<d(K)>(%r13) */
493				EMIT4_DISP(0x5950d000, EMIT_CONST(K));
494		}
495branch:		if (filter->jt == filter->jf) {
496			if (filter->jt == 0)
497				break;
498			/* j <jt> */
499			offset = addrs[i + filter->jt] + jit->start - jit->prg;
500			EMIT4_PCREL(0xa7f40000, offset);
501			break;
502		}
503		if (filter->jt != 0) {
504			/* brc	<mask>,<jt> */
505			offset = addrs[i + filter->jt] + jit->start - jit->prg;
506			EMIT4_PCREL(0xa7040000 | mask, offset);
507		}
508		if (filter->jf != 0) {
509			/* brc	<mask^15>,<jf> */
510			offset = addrs[i + filter->jf] + jit->start - jit->prg;
511			EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
512		}
 
 
 
513		break;
514	case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
515		mask = 0x700000; /* jnz */
516		/* Emit test if the branch targets are different */
517		if (filter->jt != filter->jf) {
518			if (K > 65535) {
519				/* lr %r4,%r5 */
520				EMIT2(0x1845);
521				/* n %r4,<d(K)>(%r13) */
522				EMIT4_DISP(0x5440d000, EMIT_CONST(K));
523			} else
524				/* tmll %r5,K */
525				EMIT4_IMM(0xa7510000, K);
526		}
527		goto branch;
528	case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
529		mask = 0x200000; /* jh */
530		goto xbranch;
531	case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
532		mask = 0xa00000; /* jhe */
533		goto xbranch;
534	case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
535		mask = 0x800000; /* je */
536xbranch:	/* Emit compare if the branch targets are different */
537		if (filter->jt != filter->jf) {
538			jit->seen |= SEEN_XREG;
539			/* cr %r5,%r12 */
540			EMIT2(0x195c);
541		}
542		goto branch;
543	case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
544		mask = 0x700000; /* jnz */
545		/* Emit test if the branch targets are different */
546		if (filter->jt != filter->jf) {
547			jit->seen |= SEEN_XREG;
548			/* lr %r4,%r5 */
549			EMIT2(0x1845);
550			/* nr %r4,%r12 */
551			EMIT2(0x144c);
552		}
553		goto branch;
554	case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
555		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
556		offset = jit->off_load_word;
557		goto load_abs;
558	case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
559		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
560		offset = jit->off_load_half;
561		goto load_abs;
562	case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
563		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
564		offset = jit->off_load_byte;
565load_abs:	if ((int) K < 0)
566			goto out;
567call_fn:	/* lg %r1,<d(function)>(%r13) */
568		EMIT6_DISP(0xe310d000, 0x0004, offset);
569		/* l %r3,<d(K)>(%r13) */
570		EMIT4_DISP(0x5830d000, EMIT_CONST(K));
571		/* basr %r8,%r1 */
572		EMIT2(0x0d81);
573		/* jnz <ret0> */
574		EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
575		break;
576	case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
577		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
578		offset = jit->off_load_iword;
579		goto call_fn;
580	case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
581		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
582		offset = jit->off_load_ihalf;
583		goto call_fn;
584	case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
585		jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
586		offset = jit->off_load_ibyte;
587		goto call_fn;
588	case BPF_S_LDX_B_MSH:
589		/* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
590		jit->seen |= SEEN_RET0;
591		if ((int) K < 0) {
592			/* j <ret0> */
593			EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
 
594			break;
595		}
596		jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
597		offset = jit->off_load_bmsh;
598		goto call_fn;
599	case BPF_S_LD_W_LEN: /*	A = skb->len; */
600		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
601		/* l %r5,<d(len)>(%r2) */
602		EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
603		break;
604	case BPF_S_LDX_W_LEN: /* X = skb->len; */
605		jit->seen |= SEEN_XREG;
606		/* l %r12,<d(len)>(%r2) */
607		EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
608		break;
609	case BPF_S_LD_IMM: /* A = K */
610		if (K <= 16383)
611			/* lhi %r5,K */
612			EMIT4_IMM(0xa7580000, K);
613		else if (test_facility(21))
614			/* llilf %r5,<K> */
615			EMIT6_IMM(0xc05f0000, K);
616		else
617			/* l %r5,<d(K)>(%r13) */
618			EMIT4_DISP(0x5850d000, EMIT_CONST(K));
619		break;
620	case BPF_S_LDX_IMM: /* X = K */
621		jit->seen |= SEEN_XREG;
622		if (K <= 16383)
623			/* lhi %r12,<K> */
624			EMIT4_IMM(0xa7c80000, K);
625		else if (test_facility(21))
626			/* llilf %r12,<K> */
627			EMIT6_IMM(0xc0cf0000, K);
628		else
629			/* l %r12,<d(K)>(%r13) */
630			EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631		break;
632	case BPF_S_LD_MEM: /* A = mem[K] */
 
633		jit->seen |= SEEN_MEM;
634		/* l %r5,<K>(%r15) */
635		EMIT4_DISP(0x5850f000,
636			   (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
637		break;
638	case BPF_S_LDX_MEM: /* X = mem[K] */
639		jit->seen |= SEEN_XREG | SEEN_MEM;
640		/* l %r12,<K>(%r15) */
641		EMIT4_DISP(0x58c0f000,
642			   (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
643		break;
644	case BPF_S_MISC_TAX: /* X = A */
645		jit->seen |= SEEN_XREG;
646		/* lr %r12,%r5 */
647		EMIT2(0x18c5);
648		break;
649	case BPF_S_MISC_TXA: /* A = X */
650		jit->seen |= SEEN_XREG;
651		/* lr %r5,%r12 */
652		EMIT2(0x185c);
653		break;
654	case BPF_S_RET_K:
655		if (K == 0) {
656			jit->seen |= SEEN_RET0;
657			if (last)
658				break;
659			/* j <ret0> */
660			EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
661		} else {
662			if (K <= 16383)
663				/* lghi %r2,K */
664				EMIT4_IMM(0xa7290000, K);
665			else
666				/* llgf %r2,<K>(%r13) */
667				EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
668			/* j <exit> */
669			if (last && !(jit->seen & SEEN_RET0))
670				break;
671			EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
672		}
673		break;
674	case BPF_S_RET_A:
675		/* llgfr %r2,%r5 */
676		EMIT4(0xb9160025);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677		/* j <exit> */
678		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
679		break;
680	case BPF_S_ST: /* mem[K] = A */
681		jit->seen |= SEEN_MEM;
682		/* st %r5,<K>(%r15) */
683		EMIT4_DISP(0x5050f000,
684			   (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
685		break;
686	case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
687		jit->seen |= SEEN_XREG | SEEN_MEM;
688		/* st %r12,<K>(%r15) */
689		EMIT4_DISP(0x50c0f000,
690			   (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
691		break;
692	case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
693		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
694		/* lhi %r5,0 */
695		EMIT4(0xa7580000);
696		/* icm	%r5,3,<d(protocol)>(%r2) */
697		EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
698		break;
699	case BPF_S_ANC_IFINDEX:	/* if (!skb->dev) return 0;
700				 * A = skb->dev->ifindex */
701		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
702		jit->seen |= SEEN_RET0;
703		/* lg %r1,<d(dev)>(%r2) */
704		EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
705		/* ltgr %r1,%r1 */
706		EMIT4(0xb9020011);
707		/* jz <ret0> */
708		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
709		/* l %r5,<d(ifindex)>(%r1) */
710		EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711		break;
712	case BPF_S_ANC_MARK: /* A = skb->mark */
713		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
714		/* l %r5,<d(mark)>(%r2) */
715		EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
716		break;
717	case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
718		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
719		/* lhi %r5,0 */
720		EMIT4(0xa7580000);
721		/* icm	%r5,3,<d(queue_mapping)>(%r2) */
722		EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
723		break;
724	case BPF_S_ANC_HATYPE:	/* if (!skb->dev) return 0;
725				 * A = skb->dev->type */
726		BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
727		jit->seen |= SEEN_RET0;
728		/* lg %r1,<d(dev)>(%r2) */
729		EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
730		/* ltgr %r1,%r1 */
731		EMIT4(0xb9020011);
732		/* jz <ret0> */
733		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
734		/* lhi %r5,0 */
735		EMIT4(0xa7580000);
736		/* icm	%r5,3,<d(type)>(%r1) */
737		EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
738		break;
739	case BPF_S_ANC_RXHASH: /* A = skb->hash */
740		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
741		/* l %r5,<d(hash)>(%r2) */
742		EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
743		break;
744	case BPF_S_ANC_VLAN_TAG:
745	case BPF_S_ANC_VLAN_TAG_PRESENT:
746		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
747		BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
748		/* lhi %r5,0 */
749		EMIT4(0xa7580000);
750		/* icm	%r5,3,<d(vlan_tci)>(%r2) */
751		EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci));
752		if (filter->code == BPF_S_ANC_VLAN_TAG) {
753			/* nill %r5,0xefff */
754			EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT);
755		} else {
756			/* nill %r5,0x1000 */
757			EMIT4_IMM(0xa5570000, VLAN_TAG_PRESENT);
758			/* srl %r5,12 */
759			EMIT4_DISP(0x88500000, 12);
760		}
761		break;
762	case BPF_S_ANC_PKTTYPE:
763		if (pkt_type_offset < 0)
764			goto out;
765		/* lhi %r5,0 */
766		EMIT4(0xa7580000);
767		/* ic %r5,<d(pkt_type_offset)>(%r2) */
768		EMIT4_DISP(0x43502000, pkt_type_offset);
769		/* srl %r5,5 */
770		EMIT4_DISP(0x88500000, 5);
771		break;
772	case BPF_S_ANC_CPU: /* A = smp_processor_id() */
773#ifdef CONFIG_SMP
774		/* l %r5,<d(cpu_nr)> */
775		EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
776#else
777		/* lhi %r5,0 */
778		EMIT4(0xa7580000);
779#endif
780		break;
781	default: /* too complex, give up */
782		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
783	}
784	addrs[i] = jit->prg - jit->start;
 
 
 
 
785	return 0;
786out:
787	return -1;
788}
789
790/*
791 * Note: for security reasons, bpf code will follow a randomly
792 *	 sized amount of illegal instructions.
793 */
794struct bpf_binary_header {
795	unsigned int pages;
796	u8 image[];
797};
798
799static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
800						  u8 **image_ptr)
 
 
801{
802	struct bpf_binary_header *header;
803	unsigned int sz, hole;
804
805	/* Most BPF filters are really small, but if some of them fill a page,
806	 * allow at least 128 extra bytes for illegal instructions.
807	 */
808	sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
809	header = module_alloc(sz);
810	if (!header)
811		return NULL;
812	memset(header, 0, sz);
813	header->pages = sz / PAGE_SIZE;
814	hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
815	/* Insert random number of illegal instructions before BPF code
816	 * and make sure the first instruction starts at an even address.
817	 */
818	*image_ptr = &header->image[(prandom_u32() % hole) & -2];
819	return header;
820}
821
822void bpf_jit_compile(struct sk_filter *fp)
823{
824	struct bpf_binary_header *header = NULL;
825	unsigned long size, prg_len, lit_len;
826	struct bpf_jit jit, cjit;
827	unsigned int *addrs;
828	int pass, i;
829
830	if (!bpf_jit_enable)
831		return;
832	addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
833	if (addrs == NULL)
 
834		return;
835	memset(&jit, 0, sizeof(cjit));
836	memset(&cjit, 0, sizeof(cjit));
837
838	for (pass = 0; pass < 10; pass++) {
839		jit.prg = jit.start;
840		jit.lit = jit.mid;
841
842		bpf_jit_prologue(&jit);
843		bpf_jit_noleaks(&jit, fp->insns);
844		for (i = 0; i < fp->len; i++) {
845			if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
846					 i == fp->len - 1))
847				goto out;
848		}
849		bpf_jit_epilogue(&jit);
850		if (jit.start) {
851			WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
852			if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
853				break;
854		} else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
855			prg_len = jit.prg - jit.start;
856			lit_len = jit.lit - jit.mid;
857			size = prg_len + lit_len;
858			if (size >= BPF_SIZE_MAX)
859				goto out;
860			header = bpf_alloc_binary(size, &jit.start);
861			if (!header)
862				goto out;
863			jit.prg = jit.mid = jit.start + prg_len;
864			jit.lit = jit.end = jit.start + prg_len + lit_len;
865			jit.base_ip += (unsigned long) jit.start;
866			jit.exit_ip += (unsigned long) jit.start;
867			jit.ret0_ip += (unsigned long) jit.start;
868		}
869		cjit = jit;
870	}
 
 
 
 
 
 
 
 
 
 
871	if (bpf_jit_enable > 1) {
872		bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
873		if (jit.start)
874			print_fn_code(jit.start, jit.mid - jit.start);
875	}
876	if (jit.start) {
877		set_memory_ro((unsigned long)header, header->pages);
878		fp->bpf_func = (void *) jit.start;
879		fp->jited = 1;
880	}
881out:
882	kfree(addrs);
883}
884
885void bpf_jit_free(struct sk_filter *fp)
 
 
 
886{
887	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
888	struct bpf_binary_header *header = (void *)addr;
889
890	if (!fp->jited)
891		goto free_filter;
892
893	set_memory_rw(addr, header->pages);
894	module_free(NULL, header);
895
896free_filter:
897	kfree(fp);
898}