Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
  10 *  - PACK_STACK
  11 *  - 64BIT
  12 *
  13 * Copyright IBM Corp. 2012,2015
  14 *
  15 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  16 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  17 */
  18
  19#define KMSG_COMPONENT "bpf_jit"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/netdevice.h>
  23#include <linux/filter.h>
  24#include <linux/init.h>
  25#include <linux/bpf.h>
  26#include <linux/mm.h>
  27#include <linux/kernel.h>
  28#include <asm/cacheflush.h>
  29#include <asm/dis.h>
  30#include <asm/facility.h>
  31#include <asm/nospec-branch.h>
  32#include <asm/set_memory.h>
  33#include "bpf_jit.h"
  34
  35struct bpf_jit {
  36	u32 seen;		/* Flags to remember seen eBPF instructions */
  37	u32 seen_reg[16];	/* Array to remember which registers are used */
  38	u32 *addrs;		/* Array with relative instruction addresses */
  39	u8 *prg_buf;		/* Start of program */
  40	int size;		/* Size of program and literal pool */
  41	int size_prg;		/* Size of program */
  42	int prg;		/* Current position in program */
  43	int lit32_start;	/* Start of 32-bit literal pool */
  44	int lit32;		/* Current position in 32-bit literal pool */
  45	int lit64_start;	/* Start of 64-bit literal pool */
  46	int lit64;		/* Current position in 64-bit literal pool */
  47	int base_ip;		/* Base address for literal pool */
 
  48	int exit_ip;		/* Address of exit */
  49	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
  50	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
  51	int tail_call_start;	/* Tail call start offset */
  52	int excnt;		/* Number of exception table entries */
  53	int labels[1];		/* Labels for local jumps */
  54};
  55
  56#define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
  57#define SEEN_LITERAL	BIT(1)		/* code uses literals */
  58#define SEEN_FUNC	BIT(2)		/* calls C functions */
  59#define SEEN_TAIL_CALL	BIT(3)		/* code uses tail calls */
 
 
 
 
  60#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM)
  61
  62/*
  63 * s390 registers
  64 */
  65#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  66#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  67#define REG_L		(MAX_BPF_JIT_REG + 2)	/* Literal pool register */
  68#define REG_15		(MAX_BPF_JIT_REG + 3)	/* Register 15 */
  69#define REG_0		REG_W0			/* Register 0 */
  70#define REG_1		REG_W1			/* Register 1 */
  71#define REG_2		BPF_REG_1		/* Register 2 */
  72#define REG_14		BPF_REG_0		/* Register 14 */
  73
  74/*
  75 * Mapping of BPF registers to s390 registers
  76 */
  77static const int reg2hex[] = {
  78	/* Return code */
  79	[BPF_REG_0]	= 14,
  80	/* Function parameters */
  81	[BPF_REG_1]	= 2,
  82	[BPF_REG_2]	= 3,
  83	[BPF_REG_3]	= 4,
  84	[BPF_REG_4]	= 5,
  85	[BPF_REG_5]	= 6,
  86	/* Call saved registers */
  87	[BPF_REG_6]	= 7,
  88	[BPF_REG_7]	= 8,
  89	[BPF_REG_8]	= 9,
  90	[BPF_REG_9]	= 10,
  91	/* BPF stack pointer */
  92	[BPF_REG_FP]	= 13,
  93	/* Register for blinding */
  94	[BPF_REG_AX]	= 12,
  95	/* Work registers for s390x backend */
  96	[REG_W0]	= 0,
  97	[REG_W1]	= 1,
  98	[REG_L]		= 11,
  99	[REG_15]	= 15,
 100};
 101
 102static inline u32 reg(u32 dst_reg, u32 src_reg)
 103{
 104	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 105}
 106
 107static inline u32 reg_high(u32 reg)
 108{
 109	return reg2hex[reg] << 4;
 110}
 111
 112static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 113{
 114	u32 r1 = reg2hex[b1];
 115
 116	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 117		jit->seen_reg[r1] = 1;
 118}
 119
 120#define REG_SET_SEEN(b1)					\
 121({								\
 122	reg_set_seen(jit, b1);					\
 123})
 124
 125#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 126
 127/*
 128 * EMIT macros for code generation
 129 */
 130
 131#define _EMIT2(op)						\
 132({								\
 133	if (jit->prg_buf)					\
 134		*(u16 *) (jit->prg_buf + jit->prg) = (op);	\
 135	jit->prg += 2;						\
 136})
 137
 138#define EMIT2(op, b1, b2)					\
 139({								\
 140	_EMIT2((op) | reg(b1, b2));				\
 141	REG_SET_SEEN(b1);					\
 142	REG_SET_SEEN(b2);					\
 143})
 144
 145#define _EMIT4(op)						\
 146({								\
 147	if (jit->prg_buf)					\
 148		*(u32 *) (jit->prg_buf + jit->prg) = (op);	\
 149	jit->prg += 4;						\
 150})
 151
 152#define EMIT4(op, b1, b2)					\
 153({								\
 154	_EMIT4((op) | reg(b1, b2));				\
 155	REG_SET_SEEN(b1);					\
 156	REG_SET_SEEN(b2);					\
 157})
 158
 159#define EMIT4_RRF(op, b1, b2, b3)				\
 160({								\
 161	_EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2));		\
 162	REG_SET_SEEN(b1);					\
 163	REG_SET_SEEN(b2);					\
 164	REG_SET_SEEN(b3);					\
 165})
 166
 167#define _EMIT4_DISP(op, disp)					\
 168({								\
 169	unsigned int __disp = (disp) & 0xfff;			\
 170	_EMIT4((op) | __disp);					\
 171})
 172
 173#define EMIT4_DISP(op, b1, b2, disp)				\
 174({								\
 175	_EMIT4_DISP((op) | reg_high(b1) << 16 |			\
 176		    reg_high(b2) << 8, (disp));			\
 177	REG_SET_SEEN(b1);					\
 178	REG_SET_SEEN(b2);					\
 179})
 180
 181#define EMIT4_IMM(op, b1, imm)					\
 182({								\
 183	unsigned int __imm = (imm) & 0xffff;			\
 184	_EMIT4((op) | reg_high(b1) << 16 | __imm);		\
 185	REG_SET_SEEN(b1);					\
 186})
 187
 188#define EMIT4_PCREL(op, pcrel)					\
 189({								\
 190	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 191	_EMIT4((op) | __pcrel);					\
 192})
 193
 194#define EMIT4_PCREL_RIC(op, mask, target)			\
 195({								\
 196	int __rel = ((target) - jit->prg) / 2;			\
 197	_EMIT4((op) | (mask) << 20 | (__rel & 0xffff));		\
 198})
 199
 200#define _EMIT6(op1, op2)					\
 201({								\
 202	if (jit->prg_buf) {					\
 203		*(u32 *) (jit->prg_buf + jit->prg) = (op1);	\
 204		*(u16 *) (jit->prg_buf + jit->prg + 4) = (op2);	\
 205	}							\
 206	jit->prg += 6;						\
 207})
 208
 209#define _EMIT6_DISP(op1, op2, disp)				\
 210({								\
 211	unsigned int __disp = (disp) & 0xfff;			\
 212	_EMIT6((op1) | __disp, op2);				\
 213})
 214
 215#define _EMIT6_DISP_LH(op1, op2, disp)				\
 216({								\
 217	u32 _disp = (u32) (disp);				\
 218	unsigned int __disp_h = _disp & 0xff000;		\
 219	unsigned int __disp_l = _disp & 0x00fff;		\
 220	_EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4);	\
 221})
 222
 223#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 224({								\
 225	_EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 |		\
 226		       reg_high(b3) << 8, op2, disp);		\
 227	REG_SET_SEEN(b1);					\
 228	REG_SET_SEEN(b2);					\
 229	REG_SET_SEEN(b3);					\
 230})
 231
 232#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
 233({								\
 234	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 235	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),	\
 236	       (op2) | (mask) << 12);				\
 237	REG_SET_SEEN(b1);					\
 238	REG_SET_SEEN(b2);					\
 239})
 240
 241#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
 242({								\
 243	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 244	_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |		\
 245		(rel & 0xffff), (op2) | ((imm) & 0xff) << 8);	\
 246	REG_SET_SEEN(b1);					\
 247	BUILD_BUG_ON(((unsigned long) (imm)) > 0xff);		\
 248})
 249
 250#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 251({								\
 252	/* Branch instruction needs 6 bytes */			\
 253	int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
 254	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
 255	REG_SET_SEEN(b1);					\
 256	REG_SET_SEEN(b2);					\
 257})
 258
 259#define EMIT6_PCREL_RILB(op, b, target)				\
 260({								\
 261	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 262	_EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
 263	REG_SET_SEEN(b);					\
 264})
 265
 266#define EMIT6_PCREL_RIL(op, target)				\
 267({								\
 268	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 269	_EMIT6((op) | rel >> 16, rel & 0xffff);			\
 270})
 271
 272#define EMIT6_PCREL_RILC(op, mask, target)			\
 273({								\
 274	EMIT6_PCREL_RIL((op) | (mask) << 20, (target));		\
 275})
 276
 277#define _EMIT6_IMM(op, imm)					\
 278({								\
 279	unsigned int __imm = (imm);				\
 280	_EMIT6((op) | (__imm >> 16), __imm & 0xffff);		\
 281})
 282
 283#define EMIT6_IMM(op, b1, imm)					\
 284({								\
 285	_EMIT6_IMM((op) | reg_high(b1) << 16, imm);		\
 286	REG_SET_SEEN(b1);					\
 287})
 288
 289#define _EMIT_CONST_U32(val)					\
 290({								\
 291	unsigned int ret;					\
 292	ret = jit->lit32;					\
 293	if (jit->prg_buf)					\
 294		*(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
 295	jit->lit32 += 4;					\
 296	ret;							\
 297})
 298
 299#define EMIT_CONST_U32(val)					\
 300({								\
 301	jit->seen |= SEEN_LITERAL;				\
 302	_EMIT_CONST_U32(val) - jit->base_ip;			\
 303})
 304
 305#define _EMIT_CONST_U64(val)					\
 306({								\
 307	unsigned int ret;					\
 308	ret = jit->lit64;					\
 
 309	if (jit->prg_buf)					\
 310		*(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
 311	jit->lit64 += 8;					\
 312	ret;							\
 313})
 314
 315#define EMIT_CONST_U64(val)					\
 316({								\
 
 
 317	jit->seen |= SEEN_LITERAL;				\
 318	_EMIT_CONST_U64(val) - jit->base_ip;			\
 
 
 
 319})
 320
 321#define EMIT_ZERO(b1)						\
 322({								\
 323	if (!fp->aux->verifier_zext) {				\
 324		/* llgfr %dst,%dst (zero extend to 64 bit) */	\
 325		EMIT4(0xb9160000, b1, b1);			\
 326		REG_SET_SEEN(b1);				\
 327	}							\
 328})
 329
 330/*
 331 * Return whether this is the first pass. The first pass is special, since we
 332 * don't know any sizes yet, and thus must be conservative.
 333 */
 334static bool is_first_pass(struct bpf_jit *jit)
 335{
 336	return jit->size == 0;
 337}
 338
 339/*
 340 * Return whether this is the code generation pass. The code generation pass is
 341 * special, since we should change as little as possible.
 342 */
 343static bool is_codegen_pass(struct bpf_jit *jit)
 344{
 345	return jit->prg_buf;
 346}
 347
 348/*
 349 * Return whether "rel" can be encoded as a short PC-relative offset
 350 */
 351static bool is_valid_rel(int rel)
 352{
 353	return rel >= -65536 && rel <= 65534;
 354}
 355
 356/*
 357 * Return whether "off" can be reached using a short PC-relative offset
 358 */
 359static bool can_use_rel(struct bpf_jit *jit, int off)
 360{
 361	return is_valid_rel(off - jit->prg);
 362}
 363
 364/*
 365 * Return whether given displacement can be encoded using
 366 * Long-Displacement Facility
 367 */
 368static bool is_valid_ldisp(int disp)
 369{
 370	return disp >= -524288 && disp <= 524287;
 371}
 372
 373/*
 374 * Return whether the next 32-bit literal pool entry can be referenced using
 375 * Long-Displacement Facility
 376 */
 377static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
 378{
 379	return is_valid_ldisp(jit->lit32 - jit->base_ip);
 380}
 381
 382/*
 383 * Return whether the next 64-bit literal pool entry can be referenced using
 384 * Long-Displacement Facility
 385 */
 386static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
 387{
 388	return is_valid_ldisp(jit->lit64 - jit->base_ip);
 389}
 390
 391/*
 392 * Fill whole space with illegal instructions
 393 */
 394static void jit_fill_hole(void *area, unsigned int size)
 395{
 396	memset(area, 0, size);
 397}
 398
 399/*
 400 * Save registers from "rs" (register start) to "re" (register end) on stack
 401 */
 402static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 403{
 404	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 405
 406	if (rs == re)
 407		/* stg %rs,off(%r15) */
 408		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 409	else
 410		/* stmg %rs,%re,off(%r15) */
 411		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 412}
 413
 414/*
 415 * Restore registers from "rs" (register start) to "re" (register end) on stack
 416 */
 417static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 418{
 419	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 420
 421	if (jit->seen & SEEN_STACK)
 422		off += STK_OFF + stack_depth;
 423
 424	if (rs == re)
 425		/* lg %rs,off(%r15) */
 426		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 427	else
 428		/* lmg %rs,%re,off(%r15) */
 429		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 430}
 431
 432/*
 433 * Return first seen register (from start)
 434 */
 435static int get_start(struct bpf_jit *jit, int start)
 436{
 437	int i;
 438
 439	for (i = start; i <= 15; i++) {
 440		if (jit->seen_reg[i])
 441			return i;
 442	}
 443	return 0;
 444}
 445
 446/*
 447 * Return last seen register (from start) (gap >= 2)
 448 */
 449static int get_end(struct bpf_jit *jit, int start)
 450{
 451	int i;
 452
 453	for (i = start; i < 15; i++) {
 454		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 455			return i - 1;
 456	}
 457	return jit->seen_reg[15] ? 15 : 14;
 458}
 459
 460#define REGS_SAVE	1
 461#define REGS_RESTORE	0
 462/*
 463 * Save and restore clobbered registers (6-15) on stack.
 464 * We save/restore registers in chunks with gap >= 2 registers.
 465 */
 466static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 467{
 468	const int last = 15, save_restore_size = 6;
 469	int re = 6, rs;
 470
 471	if (is_first_pass(jit)) {
 472		/*
 473		 * We don't know yet which registers are used. Reserve space
 474		 * conservatively.
 475		 */
 476		jit->prg += (last - re + 1) * save_restore_size;
 477		return;
 478	}
 479
 480	do {
 481		rs = get_start(jit, re);
 482		if (!rs)
 483			break;
 484		re = get_end(jit, rs + 1);
 485		if (op == REGS_SAVE)
 486			save_regs(jit, rs, re);
 487		else
 488			restore_regs(jit, rs, re, stack_depth);
 489		re++;
 490	} while (re <= last);
 491}
 492
 493static void bpf_skip(struct bpf_jit *jit, int size)
 494{
 495	if (size >= 6 && !is_valid_rel(size)) {
 496		/* brcl 0xf,size */
 497		EMIT6_PCREL_RIL(0xc0f4000000, size);
 498		size -= 6;
 499	} else if (size >= 4 && is_valid_rel(size)) {
 500		/* brc 0xf,size */
 501		EMIT4_PCREL(0xa7f40000, size);
 502		size -= 4;
 503	}
 504	while (size >= 2) {
 505		/* bcr 0,%0 */
 506		_EMIT2(0x0700);
 507		size -= 2;
 508	}
 509}
 510
 511/*
 512 * Emit function prologue
 513 *
 514 * Save registers and create stack frame if necessary.
 515 * See stack frame layout desription in "bpf_jit.h"!
 516 */
 517static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 518{
 519	if (jit->seen & SEEN_TAIL_CALL) {
 520		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 521		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 522	} else {
 523		/*
 524		 * There are no tail calls. Insert nops in order to have
 525		 * tail_call_start at a predictable offset.
 526		 */
 527		bpf_skip(jit, 6);
 528	}
 529	/* Tail calls have to skip above initialization */
 530	jit->tail_call_start = jit->prg;
 531	/* Save registers */
 532	save_restore_regs(jit, REGS_SAVE, stack_depth);
 533	/* Setup literal pool */
 534	if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
 535		if (!is_first_pass(jit) &&
 536		    is_valid_ldisp(jit->size - (jit->prg + 2))) {
 537			/* basr %l,0 */
 538			EMIT2(0x0d00, REG_L, REG_0);
 539			jit->base_ip = jit->prg;
 540		} else {
 541			/* larl %l,lit32_start */
 542			EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
 543			jit->base_ip = jit->lit32_start;
 544		}
 545	}
 546	/* Setup stack and backchain */
 547	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
 548		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 549			/* lgr %w1,%r15 (backchain) */
 550			EMIT4(0xb9040000, REG_W1, REG_15);
 551		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 552		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 553		/* aghi %r15,-STK_OFF */
 554		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 555		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 556			/* stg %w1,152(%r15) (backchain) */
 557			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 558				      REG_15, 152);
 559	}
 560}
 561
 562/*
 563 * Function epilogue
 564 */
 565static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 566{
 
 
 
 
 
 
 567	jit->exit_ip = jit->prg;
 568	/* Load exit code: lgr %r2,%b0 */
 569	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 570	/* Restore registers */
 571	save_restore_regs(jit, REGS_RESTORE, stack_depth);
 572	if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
 573		jit->r14_thunk_ip = jit->prg;
 574		/* Generate __s390_indirect_jump_r14 thunk */
 575		if (test_facility(35)) {
 576			/* exrl %r0,.+10 */
 577			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 578		} else {
 579			/* larl %r1,.+14 */
 580			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 581			/* ex 0,0(%r1) */
 582			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
 583		}
 584		/* j . */
 585		EMIT4_PCREL(0xa7f40000, 0);
 586	}
 587	/* br %r14 */
 588	_EMIT2(0x07fe);
 589
 590	if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
 591	    (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
 592		jit->r1_thunk_ip = jit->prg;
 593		/* Generate __s390_indirect_jump_r1 thunk */
 594		if (test_facility(35)) {
 595			/* exrl %r0,.+10 */
 596			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 597			/* j . */
 598			EMIT4_PCREL(0xa7f40000, 0);
 599			/* br %r1 */
 600			_EMIT2(0x07f1);
 601		} else {
 602			/* ex 0,S390_lowcore.br_r1_tampoline */
 603			EMIT4_DISP(0x44000000, REG_0, REG_0,
 604				   offsetof(struct lowcore, br_r1_trampoline));
 605			/* j . */
 606			EMIT4_PCREL(0xa7f40000, 0);
 607		}
 608	}
 609}
 610
 611static int get_probe_mem_regno(const u8 *insn)
 612{
 613	/*
 614	 * insn must point to llgc, llgh, llgf or lg, which have destination
 615	 * register at the same position.
 616	 */
 617	if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
 618		return -1;
 619	if (insn[5] != 0x90 && /* llgc */
 620	    insn[5] != 0x91 && /* llgh */
 621	    insn[5] != 0x16 && /* llgf */
 622	    insn[5] != 0x04) /* lg */
 623		return -1;
 624	return insn[1] >> 4;
 625}
 626
 627static bool ex_handler_bpf(const struct exception_table_entry *x,
 628			   struct pt_regs *regs)
 629{
 630	int regno;
 631	u8 *insn;
 632
 633	regs->psw.addr = extable_fixup(x);
 634	insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16);
 635	regno = get_probe_mem_regno(insn);
 636	if (WARN_ON_ONCE(regno < 0))
 637		/* JIT bug - unexpected instruction. */
 638		return false;
 639	regs->gprs[regno] = 0;
 640	return true;
 641}
 642
 643static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
 644			     int probe_prg, int nop_prg)
 645{
 646	struct exception_table_entry *ex;
 647	s64 delta;
 648	u8 *insn;
 649	int prg;
 650	int i;
 651
 652	if (!fp->aux->extable)
 653		/* Do nothing during early JIT passes. */
 654		return 0;
 655	insn = jit->prg_buf + probe_prg;
 656	if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0))
 657		/* JIT bug - unexpected probe instruction. */
 658		return -1;
 659	if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
 660		/* JIT bug - gap between probe and nop instructions. */
 661		return -1;
 662	for (i = 0; i < 2; i++) {
 663		if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
 664			/* Verifier bug - not enough entries. */
 665			return -1;
 666		ex = &fp->aux->extable[jit->excnt];
 667		/* Add extable entries for probe and nop instructions. */
 668		prg = i == 0 ? probe_prg : nop_prg;
 669		delta = jit->prg_buf + prg - (u8 *)&ex->insn;
 670		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 671			/* JIT bug - code and extable must be close. */
 672			return -1;
 673		ex->insn = delta;
 674		/*
 675		 * Always land on the nop. Note that extable infrastructure
 676		 * ignores fixup field, it is handled by ex_handler_bpf().
 677		 */
 678		delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
 679		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 680			/* JIT bug - landing pad and extable must be close. */
 681			return -1;
 682		ex->fixup = delta;
 683		ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
 684		jit->excnt++;
 685	}
 686	return 0;
 687}
 688
 689/*
 690 * Compile one eBPF instruction into s390x code
 691 *
 692 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 693 * stack space for the large switch statement.
 694 */
 695static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 696				 int i, bool extra_pass, u32 stack_depth)
 697{
 698	struct bpf_insn *insn = &fp->insnsi[i];
 
 699	u32 dst_reg = insn->dst_reg;
 700	u32 src_reg = insn->src_reg;
 701	int last, insn_count = 1;
 702	u32 *addrs = jit->addrs;
 703	s32 imm = insn->imm;
 704	s16 off = insn->off;
 705	int probe_prg = -1;
 706	unsigned int mask;
 707	int nop_prg;
 708	int err;
 709
 710	if (BPF_CLASS(insn->code) == BPF_LDX &&
 711	    BPF_MODE(insn->code) == BPF_PROBE_MEM)
 712		probe_prg = jit->prg;
 713
 
 
 714	switch (insn->code) {
 715	/*
 716	 * BPF_MOV
 717	 */
 718	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 719		/* llgfr %dst,%src */
 720		EMIT4(0xb9160000, dst_reg, src_reg);
 721		if (insn_is_zext(&insn[1]))
 722			insn_count = 2;
 723		break;
 724	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 725		/* lgr %dst,%src */
 726		EMIT4(0xb9040000, dst_reg, src_reg);
 727		break;
 728	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 729		/* llilf %dst,imm */
 730		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 731		if (insn_is_zext(&insn[1]))
 732			insn_count = 2;
 733		break;
 734	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 735		/* lgfi %dst,imm */
 736		EMIT6_IMM(0xc0010000, dst_reg, imm);
 737		break;
 738	/*
 739	 * BPF_LD 64
 740	 */
 741	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 742	{
 743		/* 16 byte instruction that uses two 'struct bpf_insn' */
 744		u64 imm64;
 745
 746		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 747		/* lgrl %dst,imm */
 748		EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
 
 749		insn_count = 2;
 750		break;
 751	}
 752	/*
 753	 * BPF_ADD
 754	 */
 755	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 756		/* ar %dst,%src */
 757		EMIT2(0x1a00, dst_reg, src_reg);
 758		EMIT_ZERO(dst_reg);
 759		break;
 760	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 761		/* agr %dst,%src */
 762		EMIT4(0xb9080000, dst_reg, src_reg);
 763		break;
 764	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 765		if (!imm)
 766			break;
 767		/* alfi %dst,imm */
 768		EMIT6_IMM(0xc20b0000, dst_reg, imm);
 769		EMIT_ZERO(dst_reg);
 770		break;
 771	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 772		if (!imm)
 773			break;
 774		/* agfi %dst,imm */
 775		EMIT6_IMM(0xc2080000, dst_reg, imm);
 776		break;
 777	/*
 778	 * BPF_SUB
 779	 */
 780	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 781		/* sr %dst,%src */
 782		EMIT2(0x1b00, dst_reg, src_reg);
 783		EMIT_ZERO(dst_reg);
 784		break;
 785	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 786		/* sgr %dst,%src */
 787		EMIT4(0xb9090000, dst_reg, src_reg);
 788		break;
 789	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 790		if (!imm)
 791			break;
 792		/* alfi %dst,-imm */
 793		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 794		EMIT_ZERO(dst_reg);
 795		break;
 796	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 797		if (!imm)
 798			break;
 799		/* agfi %dst,-imm */
 800		EMIT6_IMM(0xc2080000, dst_reg, -imm);
 801		break;
 802	/*
 803	 * BPF_MUL
 804	 */
 805	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 806		/* msr %dst,%src */
 807		EMIT4(0xb2520000, dst_reg, src_reg);
 808		EMIT_ZERO(dst_reg);
 809		break;
 810	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 811		/* msgr %dst,%src */
 812		EMIT4(0xb90c0000, dst_reg, src_reg);
 813		break;
 814	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 815		if (imm == 1)
 816			break;
 817		/* msfi %r5,imm */
 818		EMIT6_IMM(0xc2010000, dst_reg, imm);
 819		EMIT_ZERO(dst_reg);
 820		break;
 821	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 822		if (imm == 1)
 823			break;
 824		/* msgfi %dst,imm */
 825		EMIT6_IMM(0xc2000000, dst_reg, imm);
 826		break;
 827	/*
 828	 * BPF_DIV / BPF_MOD
 829	 */
 830	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 831	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 832	{
 833		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 834
 835		/* lhi %w0,0 */
 836		EMIT4_IMM(0xa7080000, REG_W0, 0);
 837		/* lr %w1,%dst */
 838		EMIT2(0x1800, REG_W1, dst_reg);
 839		/* dlr %w0,%src */
 840		EMIT4(0xb9970000, REG_W0, src_reg);
 841		/* llgfr %dst,%rc */
 842		EMIT4(0xb9160000, dst_reg, rc_reg);
 843		if (insn_is_zext(&insn[1]))
 844			insn_count = 2;
 845		break;
 846	}
 847	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 848	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 849	{
 850		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 851
 852		/* lghi %w0,0 */
 853		EMIT4_IMM(0xa7090000, REG_W0, 0);
 854		/* lgr %w1,%dst */
 855		EMIT4(0xb9040000, REG_W1, dst_reg);
 856		/* dlgr %w0,%dst */
 857		EMIT4(0xb9870000, REG_W0, src_reg);
 858		/* lgr %dst,%rc */
 859		EMIT4(0xb9040000, dst_reg, rc_reg);
 860		break;
 861	}
 862	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 863	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 864	{
 865		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 866
 867		if (imm == 1) {
 868			if (BPF_OP(insn->code) == BPF_MOD)
 869				/* lhgi %dst,0 */
 870				EMIT4_IMM(0xa7090000, dst_reg, 0);
 871			break;
 872		}
 873		/* lhi %w0,0 */
 874		EMIT4_IMM(0xa7080000, REG_W0, 0);
 875		/* lr %w1,%dst */
 876		EMIT2(0x1800, REG_W1, dst_reg);
 877		if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
 878			/* dl %w0,<d(imm)>(%l) */
 879			EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 880				      EMIT_CONST_U32(imm));
 881		} else {
 882			/* lgfrl %dst,imm */
 883			EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
 884					 _EMIT_CONST_U32(imm));
 885			jit->seen |= SEEN_LITERAL;
 886			/* dlr %w0,%dst */
 887			EMIT4(0xb9970000, REG_W0, dst_reg);
 888		}
 889		/* llgfr %dst,%rc */
 890		EMIT4(0xb9160000, dst_reg, rc_reg);
 891		if (insn_is_zext(&insn[1]))
 892			insn_count = 2;
 893		break;
 894	}
 895	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 896	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 897	{
 898		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 899
 900		if (imm == 1) {
 901			if (BPF_OP(insn->code) == BPF_MOD)
 902				/* lhgi %dst,0 */
 903				EMIT4_IMM(0xa7090000, dst_reg, 0);
 904			break;
 905		}
 906		/* lghi %w0,0 */
 907		EMIT4_IMM(0xa7090000, REG_W0, 0);
 908		/* lgr %w1,%dst */
 909		EMIT4(0xb9040000, REG_W1, dst_reg);
 910		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 911			/* dlg %w0,<d(imm)>(%l) */
 912			EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 913				      EMIT_CONST_U64(imm));
 914		} else {
 915			/* lgrl %dst,imm */
 916			EMIT6_PCREL_RILB(0xc4080000, dst_reg,
 917					 _EMIT_CONST_U64(imm));
 918			jit->seen |= SEEN_LITERAL;
 919			/* dlgr %w0,%dst */
 920			EMIT4(0xb9870000, REG_W0, dst_reg);
 921		}
 922		/* lgr %dst,%rc */
 923		EMIT4(0xb9040000, dst_reg, rc_reg);
 924		break;
 925	}
 926	/*
 927	 * BPF_AND
 928	 */
 929	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 930		/* nr %dst,%src */
 931		EMIT2(0x1400, dst_reg, src_reg);
 932		EMIT_ZERO(dst_reg);
 933		break;
 934	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 935		/* ngr %dst,%src */
 936		EMIT4(0xb9800000, dst_reg, src_reg);
 937		break;
 938	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 939		/* nilf %dst,imm */
 940		EMIT6_IMM(0xc00b0000, dst_reg, imm);
 941		EMIT_ZERO(dst_reg);
 942		break;
 943	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 944		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 945			/* ng %dst,<d(imm)>(%l) */
 946			EMIT6_DISP_LH(0xe3000000, 0x0080,
 947				      dst_reg, REG_0, REG_L,
 948				      EMIT_CONST_U64(imm));
 949		} else {
 950			/* lgrl %w0,imm */
 951			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 952					 _EMIT_CONST_U64(imm));
 953			jit->seen |= SEEN_LITERAL;
 954			/* ngr %dst,%w0 */
 955			EMIT4(0xb9800000, dst_reg, REG_W0);
 956		}
 957		break;
 958	/*
 959	 * BPF_OR
 960	 */
 961	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 962		/* or %dst,%src */
 963		EMIT2(0x1600, dst_reg, src_reg);
 964		EMIT_ZERO(dst_reg);
 965		break;
 966	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 967		/* ogr %dst,%src */
 968		EMIT4(0xb9810000, dst_reg, src_reg);
 969		break;
 970	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 971		/* oilf %dst,imm */
 972		EMIT6_IMM(0xc00d0000, dst_reg, imm);
 973		EMIT_ZERO(dst_reg);
 974		break;
 975	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 976		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 977			/* og %dst,<d(imm)>(%l) */
 978			EMIT6_DISP_LH(0xe3000000, 0x0081,
 979				      dst_reg, REG_0, REG_L,
 980				      EMIT_CONST_U64(imm));
 981		} else {
 982			/* lgrl %w0,imm */
 983			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 984					 _EMIT_CONST_U64(imm));
 985			jit->seen |= SEEN_LITERAL;
 986			/* ogr %dst,%w0 */
 987			EMIT4(0xb9810000, dst_reg, REG_W0);
 988		}
 989		break;
 990	/*
 991	 * BPF_XOR
 992	 */
 993	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 994		/* xr %dst,%src */
 995		EMIT2(0x1700, dst_reg, src_reg);
 996		EMIT_ZERO(dst_reg);
 997		break;
 998	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 999		/* xgr %dst,%src */
1000		EMIT4(0xb9820000, dst_reg, src_reg);
1001		break;
1002	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1003		if (!imm)
1004			break;
1005		/* xilf %dst,imm */
1006		EMIT6_IMM(0xc0070000, dst_reg, imm);
1007		EMIT_ZERO(dst_reg);
1008		break;
1009	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1010		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1011			/* xg %dst,<d(imm)>(%l) */
1012			EMIT6_DISP_LH(0xe3000000, 0x0082,
1013				      dst_reg, REG_0, REG_L,
1014				      EMIT_CONST_U64(imm));
1015		} else {
1016			/* lgrl %w0,imm */
1017			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1018					 _EMIT_CONST_U64(imm));
1019			jit->seen |= SEEN_LITERAL;
1020			/* xgr %dst,%w0 */
1021			EMIT4(0xb9820000, dst_reg, REG_W0);
1022		}
1023		break;
1024	/*
1025	 * BPF_LSH
1026	 */
1027	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1028		/* sll %dst,0(%src) */
1029		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1030		EMIT_ZERO(dst_reg);
1031		break;
1032	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1033		/* sllg %dst,%dst,0(%src) */
1034		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1035		break;
1036	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1037		if (imm == 0)
1038			break;
1039		/* sll %dst,imm(%r0) */
1040		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1041		EMIT_ZERO(dst_reg);
1042		break;
1043	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1044		if (imm == 0)
1045			break;
1046		/* sllg %dst,%dst,imm(%r0) */
1047		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1048		break;
1049	/*
1050	 * BPF_RSH
1051	 */
1052	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1053		/* srl %dst,0(%src) */
1054		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1055		EMIT_ZERO(dst_reg);
1056		break;
1057	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1058		/* srlg %dst,%dst,0(%src) */
1059		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1060		break;
1061	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1062		if (imm == 0)
1063			break;
1064		/* srl %dst,imm(%r0) */
1065		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1066		EMIT_ZERO(dst_reg);
1067		break;
1068	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1069		if (imm == 0)
1070			break;
1071		/* srlg %dst,%dst,imm(%r0) */
1072		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1073		break;
1074	/*
1075	 * BPF_ARSH
1076	 */
1077	case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1078		/* sra %dst,%dst,0(%src) */
1079		EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1080		EMIT_ZERO(dst_reg);
1081		break;
1082	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1083		/* srag %dst,%dst,0(%src) */
1084		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1085		break;
1086	case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1087		if (imm == 0)
1088			break;
1089		/* sra %dst,imm(%r0) */
1090		EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1091		EMIT_ZERO(dst_reg);
1092		break;
1093	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1094		if (imm == 0)
1095			break;
1096		/* srag %dst,%dst,imm(%r0) */
1097		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1098		break;
1099	/*
1100	 * BPF_NEG
1101	 */
1102	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1103		/* lcr %dst,%dst */
1104		EMIT2(0x1300, dst_reg, dst_reg);
1105		EMIT_ZERO(dst_reg);
1106		break;
1107	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1108		/* lcgr %dst,%dst */
1109		EMIT4(0xb9030000, dst_reg, dst_reg);
1110		break;
1111	/*
1112	 * BPF_FROM_BE/LE
1113	 */
1114	case BPF_ALU | BPF_END | BPF_FROM_BE:
1115		/* s390 is big endian, therefore only clear high order bytes */
1116		switch (imm) {
1117		case 16: /* dst = (u16) cpu_to_be16(dst) */
1118			/* llghr %dst,%dst */
1119			EMIT4(0xb9850000, dst_reg, dst_reg);
1120			if (insn_is_zext(&insn[1]))
1121				insn_count = 2;
1122			break;
1123		case 32: /* dst = (u32) cpu_to_be32(dst) */
1124			if (!fp->aux->verifier_zext)
1125				/* llgfr %dst,%dst */
1126				EMIT4(0xb9160000, dst_reg, dst_reg);
1127			break;
1128		case 64: /* dst = (u64) cpu_to_be64(dst) */
1129			break;
1130		}
1131		break;
1132	case BPF_ALU | BPF_END | BPF_FROM_LE:
1133		switch (imm) {
1134		case 16: /* dst = (u16) cpu_to_le16(dst) */
1135			/* lrvr %dst,%dst */
1136			EMIT4(0xb91f0000, dst_reg, dst_reg);
1137			/* srl %dst,16(%r0) */
1138			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1139			/* llghr %dst,%dst */
1140			EMIT4(0xb9850000, dst_reg, dst_reg);
1141			if (insn_is_zext(&insn[1]))
1142				insn_count = 2;
1143			break;
1144		case 32: /* dst = (u32) cpu_to_le32(dst) */
1145			/* lrvr %dst,%dst */
1146			EMIT4(0xb91f0000, dst_reg, dst_reg);
1147			if (!fp->aux->verifier_zext)
1148				/* llgfr %dst,%dst */
1149				EMIT4(0xb9160000, dst_reg, dst_reg);
1150			break;
1151		case 64: /* dst = (u64) cpu_to_le64(dst) */
1152			/* lrvgr %dst,%dst */
1153			EMIT4(0xb90f0000, dst_reg, dst_reg);
1154			break;
1155		}
1156		break;
1157	/*
1158	 * BPF_ST(X)
1159	 */
1160	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1161		/* stcy %src,off(%dst) */
1162		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1163		jit->seen |= SEEN_MEM;
1164		break;
1165	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1166		/* sthy %src,off(%dst) */
1167		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1168		jit->seen |= SEEN_MEM;
1169		break;
1170	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1171		/* sty %src,off(%dst) */
1172		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1173		jit->seen |= SEEN_MEM;
1174		break;
1175	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1176		/* stg %src,off(%dst) */
1177		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1178		jit->seen |= SEEN_MEM;
1179		break;
1180	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1181		/* lhi %w0,imm */
1182		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1183		/* stcy %w0,off(dst) */
1184		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1185		jit->seen |= SEEN_MEM;
1186		break;
1187	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1188		/* lhi %w0,imm */
1189		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1190		/* sthy %w0,off(dst) */
1191		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1192		jit->seen |= SEEN_MEM;
1193		break;
1194	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1195		/* llilf %w0,imm  */
1196		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1197		/* sty %w0,off(%dst) */
1198		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1199		jit->seen |= SEEN_MEM;
1200		break;
1201	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1202		/* lgfi %w0,imm */
1203		EMIT6_IMM(0xc0010000, REG_W0, imm);
1204		/* stg %w0,off(%dst) */
1205		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1206		jit->seen |= SEEN_MEM;
1207		break;
1208	/*
1209	 * BPF_STX XADD (atomic_add)
1210	 */
1211	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
1212		/* laal %w0,%src,off(%dst) */
1213		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
1214			      dst_reg, off);
1215		jit->seen |= SEEN_MEM;
1216		break;
1217	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
1218		/* laalg %w0,%src,off(%dst) */
1219		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
1220			      dst_reg, off);
1221		jit->seen |= SEEN_MEM;
1222		break;
1223	/*
1224	 * BPF_LDX
1225	 */
1226	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1227	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1228		/* llgc %dst,0(off,%src) */
1229		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1230		jit->seen |= SEEN_MEM;
1231		if (insn_is_zext(&insn[1]))
1232			insn_count = 2;
1233		break;
1234	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1235	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1236		/* llgh %dst,0(off,%src) */
1237		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1238		jit->seen |= SEEN_MEM;
1239		if (insn_is_zext(&insn[1]))
1240			insn_count = 2;
1241		break;
1242	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1243	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1244		/* llgf %dst,off(%src) */
1245		jit->seen |= SEEN_MEM;
1246		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1247		if (insn_is_zext(&insn[1]))
1248			insn_count = 2;
1249		break;
1250	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1251	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1252		/* lg %dst,0(off,%src) */
1253		jit->seen |= SEEN_MEM;
1254		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1255		break;
1256	/*
1257	 * BPF_JMP / CALL
1258	 */
1259	case BPF_JMP | BPF_CALL:
1260	{
1261		u64 func;
1262		bool func_addr_fixed;
1263		int ret;
1264
1265		ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1266					    &func, &func_addr_fixed);
1267		if (ret < 0)
1268			return -1;
1269
1270		REG_SET_SEEN(BPF_REG_5);
1271		jit->seen |= SEEN_FUNC;
1272		/* lgrl %w1,func */
1273		EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
 
1274		if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
1275			/* brasl %r14,__s390_indirect_jump_r1 */
1276			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1277		} else {
1278			/* basr %r14,%w1 */
1279			EMIT2(0x0d00, REG_14, REG_W1);
1280		}
1281		/* lgr %b0,%r2: load return value into %b0 */
1282		EMIT4(0xb9040000, BPF_REG_0, REG_2);
1283		break;
1284	}
1285	case BPF_JMP | BPF_TAIL_CALL:
1286		/*
1287		 * Implicit input:
1288		 *  B1: pointer to ctx
1289		 *  B2: pointer to bpf_array
1290		 *  B3: index in bpf_array
1291		 */
1292		jit->seen |= SEEN_TAIL_CALL;
1293
1294		/*
1295		 * if (index >= array->map.max_entries)
1296		 *         goto out;
1297		 */
1298
1299		/* llgf %w1,map.max_entries(%b2) */
1300		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1301			      offsetof(struct bpf_array, map.max_entries));
1302		/* if ((u32)%b3 >= (u32)%w1) goto out; */
1303		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
1304			/* clrj %b3,%w1,0xa,label0 */
1305			EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1306					  REG_W1, 0, 0xa);
1307		} else {
1308			/* clr %b3,%w1 */
1309			EMIT2(0x1500, BPF_REG_3, REG_W1);
1310			/* brcl 0xa,label0 */
1311			EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
1312		}
1313
1314		/*
1315		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1316		 *         goto out;
1317		 */
1318
1319		if (jit->seen & SEEN_STACK)
1320			off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1321		else
1322			off = STK_OFF_TCCNT;
1323		/* lhi %w0,1 */
1324		EMIT4_IMM(0xa7080000, REG_W0, 1);
1325		/* laal %w1,%w0,off(%r15) */
1326		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1327		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
1328			/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
1329			EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
1330					      MAX_TAIL_CALL_CNT, 0, 0x2);
1331		} else {
1332			/* clfi %w1,MAX_TAIL_CALL_CNT */
1333			EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
1334			/* brcl 0x2,label0 */
1335			EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
1336		}
1337
1338		/*
1339		 * prog = array->ptrs[index];
1340		 * if (prog == NULL)
1341		 *         goto out;
1342		 */
1343
1344		/* llgfr %r1,%b3: %r1 = (u32) index */
1345		EMIT4(0xb9160000, REG_1, BPF_REG_3);
1346		/* sllg %r1,%r1,3: %r1 *= 8 */
1347		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1348		/* ltg %r1,prog(%b2,%r1) */
1349		EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1350			      REG_1, offsetof(struct bpf_array, ptrs));
1351		if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
1352			/* brc 0x8,label0 */
1353			EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
1354		} else {
1355			/* brcl 0x8,label0 */
1356			EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
1357		}
1358
1359		/*
1360		 * Restore registers before calling function
1361		 */
1362		save_restore_regs(jit, REGS_RESTORE, stack_depth);
1363
1364		/*
1365		 * goto *(prog->bpf_func + tail_call_start);
1366		 */
1367
1368		/* lg %r1,bpf_func(%r1) */
1369		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1370			      offsetof(struct bpf_prog, bpf_func));
1371		/* bc 0xf,tail_call_start(%r1) */
1372		_EMIT4(0x47f01000 + jit->tail_call_start);
1373		/* out: */
1374		jit->labels[0] = jit->prg;
1375		break;
1376	case BPF_JMP | BPF_EXIT: /* return b0 */
1377		last = (i == fp->len - 1) ? 1 : 0;
1378		if (last)
1379			break;
1380		if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1381			/* brc 0xf, <exit> */
1382			EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1383		else
1384			/* brcl 0xf, <exit> */
1385			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1386		break;
1387	/*
1388	 * Branch relative (number of skipped instructions) to offset on
1389	 * condition.
1390	 *
1391	 * Condition code to mask mapping:
1392	 *
1393	 * CC | Description	   | Mask
1394	 * ------------------------------
1395	 * 0  | Operands equal	   |	8
1396	 * 1  | First operand low  |	4
1397	 * 2  | First operand high |	2
1398	 * 3  | Unused		   |	1
1399	 *
1400	 * For s390x relative branches: ip = ip + off_bytes
1401	 * For BPF relative branches:	insn = insn + off_insns + 1
1402	 *
1403	 * For example for s390x with offset 0 we jump to the branch
1404	 * instruction itself (loop) and for BPF with offset 0 we
1405	 * branch to the instruction behind the branch.
1406	 */
1407	case BPF_JMP | BPF_JA: /* if (true) */
1408		mask = 0xf000; /* j */
1409		goto branch_oc;
1410	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1411	case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1412		mask = 0x2000; /* jh */
1413		goto branch_ks;
1414	case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1415	case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1416		mask = 0x4000; /* jl */
1417		goto branch_ks;
1418	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1419	case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1420		mask = 0xa000; /* jhe */
1421		goto branch_ks;
1422	case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1423	case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1424		mask = 0xc000; /* jle */
1425		goto branch_ks;
1426	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1427	case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1428		mask = 0x2000; /* jh */
1429		goto branch_ku;
1430	case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1431	case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1432		mask = 0x4000; /* jl */
1433		goto branch_ku;
1434	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1435	case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1436		mask = 0xa000; /* jhe */
1437		goto branch_ku;
1438	case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1439	case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1440		mask = 0xc000; /* jle */
1441		goto branch_ku;
1442	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1443	case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1444		mask = 0x7000; /* jne */
1445		goto branch_ku;
1446	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1447	case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1448		mask = 0x8000; /* je */
1449		goto branch_ku;
1450	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1451	case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1452		mask = 0x7000; /* jnz */
1453		if (BPF_CLASS(insn->code) == BPF_JMP32) {
1454			/* llilf %w1,imm (load zero extend imm) */
1455			EMIT6_IMM(0xc00f0000, REG_W1, imm);
1456			/* nr %w1,%dst */
1457			EMIT2(0x1400, REG_W1, dst_reg);
1458		} else {
1459			/* lgfi %w1,imm (load sign extend imm) */
1460			EMIT6_IMM(0xc0010000, REG_W1, imm);
1461			/* ngr %w1,%dst */
1462			EMIT4(0xb9800000, REG_W1, dst_reg);
1463		}
1464		goto branch_oc;
1465
1466	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1467	case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1468		mask = 0x2000; /* jh */
1469		goto branch_xs;
1470	case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1471	case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1472		mask = 0x4000; /* jl */
1473		goto branch_xs;
1474	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1475	case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1476		mask = 0xa000; /* jhe */
1477		goto branch_xs;
1478	case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1479	case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1480		mask = 0xc000; /* jle */
1481		goto branch_xs;
1482	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1483	case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1484		mask = 0x2000; /* jh */
1485		goto branch_xu;
1486	case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1487	case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1488		mask = 0x4000; /* jl */
1489		goto branch_xu;
1490	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1491	case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1492		mask = 0xa000; /* jhe */
1493		goto branch_xu;
1494	case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1495	case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1496		mask = 0xc000; /* jle */
1497		goto branch_xu;
1498	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1499	case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1500		mask = 0x7000; /* jne */
1501		goto branch_xu;
1502	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1503	case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1504		mask = 0x8000; /* je */
1505		goto branch_xu;
1506	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1507	case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1508	{
1509		bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1510
1511		mask = 0x7000; /* jnz */
1512		/* nrk or ngrk %w1,%dst,%src */
1513		EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1514			  REG_W1, dst_reg, src_reg);
1515		goto branch_oc;
1516branch_ks:
1517		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1518		/* cfi or cgfi %dst,imm */
1519		EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1520			  dst_reg, imm);
1521		if (!is_first_pass(jit) &&
1522		    can_use_rel(jit, addrs[i + off + 1])) {
1523			/* brc mask,off */
1524			EMIT4_PCREL_RIC(0xa7040000,
1525					mask >> 12, addrs[i + off + 1]);
1526		} else {
1527			/* brcl mask,off */
1528			EMIT6_PCREL_RILC(0xc0040000,
1529					 mask >> 12, addrs[i + off + 1]);
1530		}
1531		break;
1532branch_ku:
 
1533		/* lgfi %w1,imm (load sign extend imm) */
1534		src_reg = REG_1;
1535		EMIT6_IMM(0xc0010000, src_reg, imm);
1536		goto branch_xu;
 
 
1537branch_xs:
1538		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1539		if (!is_first_pass(jit) &&
1540		    can_use_rel(jit, addrs[i + off + 1])) {
1541			/* crj or cgrj %dst,%src,mask,off */
1542			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1543				    dst_reg, src_reg, i, off, mask);
1544		} else {
1545			/* cr or cgr %dst,%src */
1546			if (is_jmp32)
1547				EMIT2(0x1900, dst_reg, src_reg);
1548			else
1549				EMIT4(0xb9200000, dst_reg, src_reg);
1550			/* brcl mask,off */
1551			EMIT6_PCREL_RILC(0xc0040000,
1552					 mask >> 12, addrs[i + off + 1]);
1553		}
1554		break;
1555branch_xu:
1556		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1557		if (!is_first_pass(jit) &&
1558		    can_use_rel(jit, addrs[i + off + 1])) {
1559			/* clrj or clgrj %dst,%src,mask,off */
1560			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1561				    dst_reg, src_reg, i, off, mask);
1562		} else {
1563			/* clr or clgr %dst,%src */
1564			if (is_jmp32)
1565				EMIT2(0x1500, dst_reg, src_reg);
1566			else
1567				EMIT4(0xb9210000, dst_reg, src_reg);
1568			/* brcl mask,off */
1569			EMIT6_PCREL_RILC(0xc0040000,
1570					 mask >> 12, addrs[i + off + 1]);
1571		}
1572		break;
1573branch_oc:
1574		if (!is_first_pass(jit) &&
1575		    can_use_rel(jit, addrs[i + off + 1])) {
1576			/* brc mask,off */
1577			EMIT4_PCREL_RIC(0xa7040000,
1578					mask >> 12, addrs[i + off + 1]);
1579		} else {
1580			/* brcl mask,off */
1581			EMIT6_PCREL_RILC(0xc0040000,
1582					 mask >> 12, addrs[i + off + 1]);
1583		}
1584		break;
1585	}
1586	default: /* too complex, give up */
1587		pr_err("Unknown opcode %02x\n", insn->code);
1588		return -1;
1589	}
1590
1591	if (probe_prg != -1) {
1592		/*
1593		 * Handlers of certain exceptions leave psw.addr pointing to
1594		 * the instruction directly after the failing one. Therefore,
1595		 * create two exception table entries and also add a nop in
1596		 * case two probing instructions come directly after each
1597		 * other.
1598		 */
1599		nop_prg = jit->prg;
1600		/* bcr 0,%0 */
1601		_EMIT2(0x0700);
1602		err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1603		if (err < 0)
1604			return err;
1605	}
1606
1607	return insn_count;
1608}
1609
1610/*
1611 * Return whether new i-th instruction address does not violate any invariant
1612 */
1613static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1614{
1615	/* On the first pass anything goes */
1616	if (is_first_pass(jit))
1617		return true;
1618
1619	/* The codegen pass must not change anything */
1620	if (is_codegen_pass(jit))
1621		return jit->addrs[i] == jit->prg;
1622
1623	/* Passes in between must not increase code size */
1624	return jit->addrs[i] >= jit->prg;
1625}
1626
1627/*
1628 * Update the address of i-th instruction
1629 */
1630static int bpf_set_addr(struct bpf_jit *jit, int i)
1631{
1632	int delta;
1633
1634	if (is_codegen_pass(jit)) {
1635		delta = jit->prg - jit->addrs[i];
1636		if (delta < 0)
1637			bpf_skip(jit, -delta);
1638	}
1639	if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1640		return -1;
1641	jit->addrs[i] = jit->prg;
1642	return 0;
1643}
1644
1645/*
1646 * Compile eBPF program into s390x code
1647 */
1648static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1649			bool extra_pass, u32 stack_depth)
1650{
1651	int i, insn_count, lit32_size, lit64_size;
1652
1653	jit->lit32 = jit->lit32_start;
1654	jit->lit64 = jit->lit64_start;
1655	jit->prg = 0;
1656	jit->excnt = 0;
1657
1658	bpf_jit_prologue(jit, stack_depth);
1659	if (bpf_set_addr(jit, 0) < 0)
1660		return -1;
1661	for (i = 0; i < fp->len; i += insn_count) {
1662		insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1663		if (insn_count < 0)
1664			return -1;
1665		/* Next instruction address */
1666		if (bpf_set_addr(jit, i + insn_count) < 0)
1667			return -1;
1668	}
1669	bpf_jit_epilogue(jit, stack_depth);
1670
1671	lit32_size = jit->lit32 - jit->lit32_start;
1672	lit64_size = jit->lit64 - jit->lit64_start;
1673	jit->lit32_start = jit->prg;
1674	if (lit32_size)
1675		jit->lit32_start = ALIGN(jit->lit32_start, 4);
1676	jit->lit64_start = jit->lit32_start + lit32_size;
1677	if (lit64_size)
1678		jit->lit64_start = ALIGN(jit->lit64_start, 8);
1679	jit->size = jit->lit64_start + lit64_size;
1680	jit->size_prg = jit->prg;
1681
1682	if (WARN_ON_ONCE(fp->aux->extable &&
1683			 jit->excnt != fp->aux->num_exentries))
1684		/* Verifier bug - too many entries. */
1685		return -1;
1686
1687	return 0;
1688}
1689
1690bool bpf_jit_needs_zext(void)
1691{
1692	return true;
1693}
1694
1695struct s390_jit_data {
1696	struct bpf_binary_header *header;
1697	struct bpf_jit ctx;
1698	int pass;
1699};
1700
1701static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
1702					       struct bpf_prog *fp)
1703{
1704	struct bpf_binary_header *header;
1705	u32 extable_size;
1706	u32 code_size;
1707
1708	/* We need two entries per insn. */
1709	fp->aux->num_exentries *= 2;
1710
1711	code_size = roundup(jit->size,
1712			    __alignof__(struct exception_table_entry));
1713	extable_size = fp->aux->num_exentries *
1714		sizeof(struct exception_table_entry);
1715	header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
1716				      8, jit_fill_hole);
1717	if (!header)
1718		return NULL;
1719	fp->aux->extable = (struct exception_table_entry *)
1720		(jit->prg_buf + code_size);
1721	return header;
1722}
1723
1724/*
1725 * Compile eBPF program "fp"
1726 */
1727struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1728{
1729	u32 stack_depth = round_up(fp->aux->stack_depth, 8);
1730	struct bpf_prog *tmp, *orig_fp = fp;
1731	struct bpf_binary_header *header;
1732	struct s390_jit_data *jit_data;
1733	bool tmp_blinded = false;
1734	bool extra_pass = false;
1735	struct bpf_jit jit;
1736	int pass;
1737
1738	if (!fp->jit_requested)
1739		return orig_fp;
1740
1741	tmp = bpf_jit_blind_constants(fp);
1742	/*
1743	 * If blinding was requested and we failed during blinding,
1744	 * we must fall back to the interpreter.
1745	 */
1746	if (IS_ERR(tmp))
1747		return orig_fp;
1748	if (tmp != fp) {
1749		tmp_blinded = true;
1750		fp = tmp;
1751	}
1752
1753	jit_data = fp->aux->jit_data;
1754	if (!jit_data) {
1755		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1756		if (!jit_data) {
1757			fp = orig_fp;
1758			goto out;
1759		}
1760		fp->aux->jit_data = jit_data;
1761	}
1762	if (jit_data->ctx.addrs) {
1763		jit = jit_data->ctx;
1764		header = jit_data->header;
1765		extra_pass = true;
1766		pass = jit_data->pass + 1;
1767		goto skip_init_ctx;
1768	}
1769
1770	memset(&jit, 0, sizeof(jit));
1771	jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1772	if (jit.addrs == NULL) {
1773		fp = orig_fp;
1774		goto out;
1775	}
1776	/*
1777	 * Three initial passes:
1778	 *   - 1/2: Determine clobbered registers
1779	 *   - 3:   Calculate program size and addrs arrray
1780	 */
1781	for (pass = 1; pass <= 3; pass++) {
1782		if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1783			fp = orig_fp;
1784			goto free_addrs;
1785		}
1786	}
1787	/*
1788	 * Final pass: Allocate and generate program
1789	 */
1790	header = bpf_jit_alloc(&jit, fp);
 
 
 
 
 
1791	if (!header) {
1792		fp = orig_fp;
1793		goto free_addrs;
1794	}
1795skip_init_ctx:
1796	if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1797		bpf_jit_binary_free(header);
1798		fp = orig_fp;
1799		goto free_addrs;
1800	}
1801	if (bpf_jit_enable > 1) {
1802		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1803		print_fn_code(jit.prg_buf, jit.size_prg);
1804	}
1805	if (!fp->is_func || extra_pass) {
1806		bpf_jit_binary_lock_ro(header);
1807	} else {
1808		jit_data->header = header;
1809		jit_data->ctx = jit;
1810		jit_data->pass = pass;
1811	}
1812	fp->bpf_func = (void *) jit.prg_buf;
1813	fp->jited = 1;
1814	fp->jited_len = jit.size;
1815
1816	if (!fp->is_func || extra_pass) {
1817		bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1818free_addrs:
1819		kvfree(jit.addrs);
1820		kfree(jit_data);
1821		fp->aux->jit_data = NULL;
1822	}
1823out:
1824	if (tmp_blinded)
1825		bpf_jit_prog_release_other(fp, fp == orig_fp ?
1826					   tmp : orig_fp);
1827	return fp;
1828}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
  10 *  - PACK_STACK
  11 *  - 64BIT
  12 *
  13 * Copyright IBM Corp. 2012,2015
  14 *
  15 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  16 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  17 */
  18
  19#define KMSG_COMPONENT "bpf_jit"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/netdevice.h>
  23#include <linux/filter.h>
  24#include <linux/init.h>
  25#include <linux/bpf.h>
 
 
  26#include <asm/cacheflush.h>
  27#include <asm/dis.h>
  28#include <asm/facility.h>
  29#include <asm/nospec-branch.h>
  30#include <asm/set_memory.h>
  31#include "bpf_jit.h"
  32
  33struct bpf_jit {
  34	u32 seen;		/* Flags to remember seen eBPF instructions */
  35	u32 seen_reg[16];	/* Array to remember which registers are used */
  36	u32 *addrs;		/* Array with relative instruction addresses */
  37	u8 *prg_buf;		/* Start of program */
  38	int size;		/* Size of program and literal pool */
  39	int size_prg;		/* Size of program */
  40	int prg;		/* Current position in program */
  41	int lit_start;		/* Start of literal pool */
  42	int lit;		/* Current position in literal pool */
 
 
  43	int base_ip;		/* Base address for literal pool */
  44	int ret0_ip;		/* Address of return 0 */
  45	int exit_ip;		/* Address of exit */
  46	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
  47	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
  48	int tail_call_start;	/* Tail call start offset */
 
  49	int labels[1];		/* Labels for local jumps */
  50};
  51
  52#define BPF_SIZE_MAX	0xffff	/* Max size for program (16 bit branches) */
  53
  54#define SEEN_MEM	(1 << 0)	/* use mem[] for temporary storage */
  55#define SEEN_RET0	(1 << 1)	/* ret0_ip points to a valid return 0 */
  56#define SEEN_LITERAL	(1 << 2)	/* code uses literals */
  57#define SEEN_FUNC	(1 << 3)	/* calls C functions */
  58#define SEEN_TAIL_CALL	(1 << 4)	/* code uses tail calls */
  59#define SEEN_REG_AX	(1 << 5)	/* code uses constant blinding */
  60#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM)
  61
  62/*
  63 * s390 registers
  64 */
  65#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  66#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  67#define REG_L		(MAX_BPF_JIT_REG + 2)	/* Literal pool register */
  68#define REG_15		(MAX_BPF_JIT_REG + 3)	/* Register 15 */
  69#define REG_0		REG_W0			/* Register 0 */
  70#define REG_1		REG_W1			/* Register 1 */
  71#define REG_2		BPF_REG_1		/* Register 2 */
  72#define REG_14		BPF_REG_0		/* Register 14 */
  73
  74/*
  75 * Mapping of BPF registers to s390 registers
  76 */
  77static const int reg2hex[] = {
  78	/* Return code */
  79	[BPF_REG_0]	= 14,
  80	/* Function parameters */
  81	[BPF_REG_1]	= 2,
  82	[BPF_REG_2]	= 3,
  83	[BPF_REG_3]	= 4,
  84	[BPF_REG_4]	= 5,
  85	[BPF_REG_5]	= 6,
  86	/* Call saved registers */
  87	[BPF_REG_6]	= 7,
  88	[BPF_REG_7]	= 8,
  89	[BPF_REG_8]	= 9,
  90	[BPF_REG_9]	= 10,
  91	/* BPF stack pointer */
  92	[BPF_REG_FP]	= 13,
  93	/* Register for blinding */
  94	[BPF_REG_AX]	= 12,
  95	/* Work registers for s390x backend */
  96	[REG_W0]	= 0,
  97	[REG_W1]	= 1,
  98	[REG_L]		= 11,
  99	[REG_15]	= 15,
 100};
 101
 102static inline u32 reg(u32 dst_reg, u32 src_reg)
 103{
 104	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 105}
 106
 107static inline u32 reg_high(u32 reg)
 108{
 109	return reg2hex[reg] << 4;
 110}
 111
 112static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 113{
 114	u32 r1 = reg2hex[b1];
 115
 116	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 117		jit->seen_reg[r1] = 1;
 118}
 119
 120#define REG_SET_SEEN(b1)					\
 121({								\
 122	reg_set_seen(jit, b1);					\
 123})
 124
 125#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 126
 127/*
 128 * EMIT macros for code generation
 129 */
 130
 131#define _EMIT2(op)						\
 132({								\
 133	if (jit->prg_buf)					\
 134		*(u16 *) (jit->prg_buf + jit->prg) = op;	\
 135	jit->prg += 2;						\
 136})
 137
 138#define EMIT2(op, b1, b2)					\
 139({								\
 140	_EMIT2(op | reg(b1, b2));				\
 141	REG_SET_SEEN(b1);					\
 142	REG_SET_SEEN(b2);					\
 143})
 144
 145#define _EMIT4(op)						\
 146({								\
 147	if (jit->prg_buf)					\
 148		*(u32 *) (jit->prg_buf + jit->prg) = op;	\
 149	jit->prg += 4;						\
 150})
 151
 152#define EMIT4(op, b1, b2)					\
 153({								\
 154	_EMIT4(op | reg(b1, b2));				\
 155	REG_SET_SEEN(b1);					\
 156	REG_SET_SEEN(b2);					\
 157})
 158
 159#define EMIT4_RRF(op, b1, b2, b3)				\
 160({								\
 161	_EMIT4(op | reg_high(b3) << 8 | reg(b1, b2));		\
 162	REG_SET_SEEN(b1);					\
 163	REG_SET_SEEN(b2);					\
 164	REG_SET_SEEN(b3);					\
 165})
 166
 167#define _EMIT4_DISP(op, disp)					\
 168({								\
 169	unsigned int __disp = (disp) & 0xfff;			\
 170	_EMIT4(op | __disp);					\
 171})
 172
 173#define EMIT4_DISP(op, b1, b2, disp)				\
 174({								\
 175	_EMIT4_DISP(op | reg_high(b1) << 16 |			\
 176		    reg_high(b2) << 8, disp);			\
 177	REG_SET_SEEN(b1);					\
 178	REG_SET_SEEN(b2);					\
 179})
 180
 181#define EMIT4_IMM(op, b1, imm)					\
 182({								\
 183	unsigned int __imm = (imm) & 0xffff;			\
 184	_EMIT4(op | reg_high(b1) << 16 | __imm);		\
 185	REG_SET_SEEN(b1);					\
 186})
 187
 188#define EMIT4_PCREL(op, pcrel)					\
 189({								\
 190	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 191	_EMIT4(op | __pcrel);					\
 
 
 
 
 
 
 192})
 193
 194#define _EMIT6(op1, op2)					\
 195({								\
 196	if (jit->prg_buf) {					\
 197		*(u32 *) (jit->prg_buf + jit->prg) = op1;	\
 198		*(u16 *) (jit->prg_buf + jit->prg + 4) = op2;	\
 199	}							\
 200	jit->prg += 6;						\
 201})
 202
 203#define _EMIT6_DISP(op1, op2, disp)				\
 204({								\
 205	unsigned int __disp = (disp) & 0xfff;			\
 206	_EMIT6(op1 | __disp, op2);				\
 207})
 208
 209#define _EMIT6_DISP_LH(op1, op2, disp)				\
 210({								\
 211	u32 _disp = (u32) disp;					\
 212	unsigned int __disp_h = _disp & 0xff000;		\
 213	unsigned int __disp_l = _disp & 0x00fff;		\
 214	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 215})
 216
 217#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 218({								\
 219	_EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 |		\
 220		       reg_high(b3) << 8, op2, disp);		\
 221	REG_SET_SEEN(b1);					\
 222	REG_SET_SEEN(b2);					\
 223	REG_SET_SEEN(b3);					\
 224})
 225
 226#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
 227({								\
 228	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 229	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff),	\
 230	       op2 | mask << 12);				\
 231	REG_SET_SEEN(b1);					\
 232	REG_SET_SEEN(b2);					\
 233})
 234
 235#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
 236({								\
 237	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 238	_EMIT6(op1 | (reg_high(b1) | mask) << 16 |		\
 239		(rel & 0xffff), op2 | (imm & 0xff) << 8);	\
 240	REG_SET_SEEN(b1);					\
 241	BUILD_BUG_ON(((unsigned long) imm) > 0xff);		\
 242})
 243
 244#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 245({								\
 246	/* Branch instruction needs 6 bytes */			\
 247	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
 248	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
 249	REG_SET_SEEN(b1);					\
 250	REG_SET_SEEN(b2);					\
 251})
 252
 253#define EMIT6_PCREL_RILB(op, b, target)				\
 254({								\
 255	int rel = (target - jit->prg) / 2;			\
 256	_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);	\
 257	REG_SET_SEEN(b);					\
 258})
 259
 260#define EMIT6_PCREL_RIL(op, target)				\
 261({								\
 262	int rel = (target - jit->prg) / 2;			\
 263	_EMIT6(op | rel >> 16, rel & 0xffff);			\
 
 
 
 
 
 264})
 265
 266#define _EMIT6_IMM(op, imm)					\
 267({								\
 268	unsigned int __imm = (imm);				\
 269	_EMIT6(op | (__imm >> 16), __imm & 0xffff);		\
 270})
 271
 272#define EMIT6_IMM(op, b1, imm)					\
 273({								\
 274	_EMIT6_IMM(op | reg_high(b1) << 16, imm);		\
 275	REG_SET_SEEN(b1);					\
 276})
 277
 
 
 
 
 
 
 
 
 
 
 278#define EMIT_CONST_U32(val)					\
 279({								\
 
 
 
 
 
 
 280	unsigned int ret;					\
 281	ret = jit->lit - jit->base_ip;				\
 282	jit->seen |= SEEN_LITERAL;				\
 283	if (jit->prg_buf)					\
 284		*(u32 *) (jit->prg_buf + jit->lit) = (u32) val;	\
 285	jit->lit += 4;						\
 286	ret;							\
 287})
 288
 289#define EMIT_CONST_U64(val)					\
 290({								\
 291	unsigned int ret;					\
 292	ret = jit->lit - jit->base_ip;				\
 293	jit->seen |= SEEN_LITERAL;				\
 294	if (jit->prg_buf)					\
 295		*(u64 *) (jit->prg_buf + jit->lit) = (u64) val;	\
 296	jit->lit += 8;						\
 297	ret;							\
 298})
 299
 300#define EMIT_ZERO(b1)						\
 301({								\
 302	if (!fp->aux->verifier_zext) {				\
 303		/* llgfr %dst,%dst (zero extend to 64 bit) */	\
 304		EMIT4(0xb9160000, b1, b1);			\
 305		REG_SET_SEEN(b1);				\
 306	}							\
 307})
 308
 309/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 310 * Fill whole space with illegal instructions
 311 */
 312static void jit_fill_hole(void *area, unsigned int size)
 313{
 314	memset(area, 0, size);
 315}
 316
 317/*
 318 * Save registers from "rs" (register start) to "re" (register end) on stack
 319 */
 320static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 321{
 322	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 323
 324	if (rs == re)
 325		/* stg %rs,off(%r15) */
 326		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 327	else
 328		/* stmg %rs,%re,off(%r15) */
 329		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 330}
 331
 332/*
 333 * Restore registers from "rs" (register start) to "re" (register end) on stack
 334 */
 335static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 336{
 337	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 338
 339	if (jit->seen & SEEN_STACK)
 340		off += STK_OFF + stack_depth;
 341
 342	if (rs == re)
 343		/* lg %rs,off(%r15) */
 344		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 345	else
 346		/* lmg %rs,%re,off(%r15) */
 347		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 348}
 349
 350/*
 351 * Return first seen register (from start)
 352 */
 353static int get_start(struct bpf_jit *jit, int start)
 354{
 355	int i;
 356
 357	for (i = start; i <= 15; i++) {
 358		if (jit->seen_reg[i])
 359			return i;
 360	}
 361	return 0;
 362}
 363
 364/*
 365 * Return last seen register (from start) (gap >= 2)
 366 */
 367static int get_end(struct bpf_jit *jit, int start)
 368{
 369	int i;
 370
 371	for (i = start; i < 15; i++) {
 372		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 373			return i - 1;
 374	}
 375	return jit->seen_reg[15] ? 15 : 14;
 376}
 377
 378#define REGS_SAVE	1
 379#define REGS_RESTORE	0
 380/*
 381 * Save and restore clobbered registers (6-15) on stack.
 382 * We save/restore registers in chunks with gap >= 2 registers.
 383 */
 384static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 385{
 
 
 386
 387	int re = 6, rs;
 
 
 
 
 
 
 
 388
 389	do {
 390		rs = get_start(jit, re);
 391		if (!rs)
 392			break;
 393		re = get_end(jit, rs + 1);
 394		if (op == REGS_SAVE)
 395			save_regs(jit, rs, re);
 396		else
 397			restore_regs(jit, rs, re, stack_depth);
 398		re++;
 399	} while (re <= 15);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400}
 401
 402/*
 403 * Emit function prologue
 404 *
 405 * Save registers and create stack frame if necessary.
 406 * See stack frame layout desription in "bpf_jit.h"!
 407 */
 408static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 409{
 410	if (jit->seen & SEEN_TAIL_CALL) {
 411		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 412		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 413	} else {
 414		/* j tail_call_start: NOP if no tail calls are used */
 415		EMIT4_PCREL(0xa7f40000, 6);
 416		_EMIT2(0);
 
 
 417	}
 418	/* Tail calls have to skip above initialization */
 419	jit->tail_call_start = jit->prg;
 420	/* Save registers */
 421	save_restore_regs(jit, REGS_SAVE, stack_depth);
 422	/* Setup literal pool */
 423	if (jit->seen & SEEN_LITERAL) {
 424		/* basr %r13,0 */
 425		EMIT2(0x0d00, REG_L, REG_0);
 426		jit->base_ip = jit->prg;
 
 
 
 
 
 
 
 427	}
 428	/* Setup stack and backchain */
 429	if (jit->seen & SEEN_STACK) {
 430		if (jit->seen & SEEN_FUNC)
 431			/* lgr %w1,%r15 (backchain) */
 432			EMIT4(0xb9040000, REG_W1, REG_15);
 433		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 434		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 435		/* aghi %r15,-STK_OFF */
 436		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 437		if (jit->seen & SEEN_FUNC)
 438			/* stg %w1,152(%r15) (backchain) */
 439			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 440				      REG_15, 152);
 441	}
 442}
 443
 444/*
 445 * Function epilogue
 446 */
 447static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 448{
 449	/* Return 0 */
 450	if (jit->seen & SEEN_RET0) {
 451		jit->ret0_ip = jit->prg;
 452		/* lghi %b0,0 */
 453		EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
 454	}
 455	jit->exit_ip = jit->prg;
 456	/* Load exit code: lgr %r2,%b0 */
 457	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 458	/* Restore registers */
 459	save_restore_regs(jit, REGS_RESTORE, stack_depth);
 460	if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
 461		jit->r14_thunk_ip = jit->prg;
 462		/* Generate __s390_indirect_jump_r14 thunk */
 463		if (test_facility(35)) {
 464			/* exrl %r0,.+10 */
 465			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 466		} else {
 467			/* larl %r1,.+14 */
 468			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 469			/* ex 0,0(%r1) */
 470			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
 471		}
 472		/* j . */
 473		EMIT4_PCREL(0xa7f40000, 0);
 474	}
 475	/* br %r14 */
 476	_EMIT2(0x07fe);
 477
 478	if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
 479	    (jit->seen & SEEN_FUNC)) {
 480		jit->r1_thunk_ip = jit->prg;
 481		/* Generate __s390_indirect_jump_r1 thunk */
 482		if (test_facility(35)) {
 483			/* exrl %r0,.+10 */
 484			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 485			/* j . */
 486			EMIT4_PCREL(0xa7f40000, 0);
 487			/* br %r1 */
 488			_EMIT2(0x07f1);
 489		} else {
 490			/* ex 0,S390_lowcore.br_r1_tampoline */
 491			EMIT4_DISP(0x44000000, REG_0, REG_0,
 492				   offsetof(struct lowcore, br_r1_trampoline));
 493			/* j . */
 494			EMIT4_PCREL(0xa7f40000, 0);
 495		}
 496	}
 497}
 498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499/*
 500 * Compile one eBPF instruction into s390x code
 501 *
 502 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 503 * stack space for the large switch statement.
 504 */
 505static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 506				 int i, bool extra_pass)
 507{
 508	struct bpf_insn *insn = &fp->insnsi[i];
 509	int jmp_off, last, insn_count = 1;
 510	u32 dst_reg = insn->dst_reg;
 511	u32 src_reg = insn->src_reg;
 
 512	u32 *addrs = jit->addrs;
 513	s32 imm = insn->imm;
 514	s16 off = insn->off;
 
 515	unsigned int mask;
 
 
 
 
 
 
 516
 517	if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
 518		jit->seen |= SEEN_REG_AX;
 519	switch (insn->code) {
 520	/*
 521	 * BPF_MOV
 522	 */
 523	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 524		/* llgfr %dst,%src */
 525		EMIT4(0xb9160000, dst_reg, src_reg);
 526		if (insn_is_zext(&insn[1]))
 527			insn_count = 2;
 528		break;
 529	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 530		/* lgr %dst,%src */
 531		EMIT4(0xb9040000, dst_reg, src_reg);
 532		break;
 533	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 534		/* llilf %dst,imm */
 535		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 536		if (insn_is_zext(&insn[1]))
 537			insn_count = 2;
 538		break;
 539	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 540		/* lgfi %dst,imm */
 541		EMIT6_IMM(0xc0010000, dst_reg, imm);
 542		break;
 543	/*
 544	 * BPF_LD 64
 545	 */
 546	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 547	{
 548		/* 16 byte instruction that uses two 'struct bpf_insn' */
 549		u64 imm64;
 550
 551		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 552		/* lg %dst,<d(imm)>(%l) */
 553		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
 554			      EMIT_CONST_U64(imm64));
 555		insn_count = 2;
 556		break;
 557	}
 558	/*
 559	 * BPF_ADD
 560	 */
 561	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 562		/* ar %dst,%src */
 563		EMIT2(0x1a00, dst_reg, src_reg);
 564		EMIT_ZERO(dst_reg);
 565		break;
 566	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 567		/* agr %dst,%src */
 568		EMIT4(0xb9080000, dst_reg, src_reg);
 569		break;
 570	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 571		if (!imm)
 572			break;
 573		/* alfi %dst,imm */
 574		EMIT6_IMM(0xc20b0000, dst_reg, imm);
 575		EMIT_ZERO(dst_reg);
 576		break;
 577	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 578		if (!imm)
 579			break;
 580		/* agfi %dst,imm */
 581		EMIT6_IMM(0xc2080000, dst_reg, imm);
 582		break;
 583	/*
 584	 * BPF_SUB
 585	 */
 586	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 587		/* sr %dst,%src */
 588		EMIT2(0x1b00, dst_reg, src_reg);
 589		EMIT_ZERO(dst_reg);
 590		break;
 591	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 592		/* sgr %dst,%src */
 593		EMIT4(0xb9090000, dst_reg, src_reg);
 594		break;
 595	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 596		if (!imm)
 597			break;
 598		/* alfi %dst,-imm */
 599		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 600		EMIT_ZERO(dst_reg);
 601		break;
 602	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 603		if (!imm)
 604			break;
 605		/* agfi %dst,-imm */
 606		EMIT6_IMM(0xc2080000, dst_reg, -imm);
 607		break;
 608	/*
 609	 * BPF_MUL
 610	 */
 611	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 612		/* msr %dst,%src */
 613		EMIT4(0xb2520000, dst_reg, src_reg);
 614		EMIT_ZERO(dst_reg);
 615		break;
 616	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 617		/* msgr %dst,%src */
 618		EMIT4(0xb90c0000, dst_reg, src_reg);
 619		break;
 620	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 621		if (imm == 1)
 622			break;
 623		/* msfi %r5,imm */
 624		EMIT6_IMM(0xc2010000, dst_reg, imm);
 625		EMIT_ZERO(dst_reg);
 626		break;
 627	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 628		if (imm == 1)
 629			break;
 630		/* msgfi %dst,imm */
 631		EMIT6_IMM(0xc2000000, dst_reg, imm);
 632		break;
 633	/*
 634	 * BPF_DIV / BPF_MOD
 635	 */
 636	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 637	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 638	{
 639		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 640
 641		/* lhi %w0,0 */
 642		EMIT4_IMM(0xa7080000, REG_W0, 0);
 643		/* lr %w1,%dst */
 644		EMIT2(0x1800, REG_W1, dst_reg);
 645		/* dlr %w0,%src */
 646		EMIT4(0xb9970000, REG_W0, src_reg);
 647		/* llgfr %dst,%rc */
 648		EMIT4(0xb9160000, dst_reg, rc_reg);
 649		if (insn_is_zext(&insn[1]))
 650			insn_count = 2;
 651		break;
 652	}
 653	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 654	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 655	{
 656		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 657
 658		/* lghi %w0,0 */
 659		EMIT4_IMM(0xa7090000, REG_W0, 0);
 660		/* lgr %w1,%dst */
 661		EMIT4(0xb9040000, REG_W1, dst_reg);
 662		/* dlgr %w0,%dst */
 663		EMIT4(0xb9870000, REG_W0, src_reg);
 664		/* lgr %dst,%rc */
 665		EMIT4(0xb9040000, dst_reg, rc_reg);
 666		break;
 667	}
 668	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 669	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 670	{
 671		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 672
 673		if (imm == 1) {
 674			if (BPF_OP(insn->code) == BPF_MOD)
 675				/* lhgi %dst,0 */
 676				EMIT4_IMM(0xa7090000, dst_reg, 0);
 677			break;
 678		}
 679		/* lhi %w0,0 */
 680		EMIT4_IMM(0xa7080000, REG_W0, 0);
 681		/* lr %w1,%dst */
 682		EMIT2(0x1800, REG_W1, dst_reg);
 683		/* dl %w0,<d(imm)>(%l) */
 684		EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 685			      EMIT_CONST_U32(imm));
 
 
 
 
 
 
 
 
 
 686		/* llgfr %dst,%rc */
 687		EMIT4(0xb9160000, dst_reg, rc_reg);
 688		if (insn_is_zext(&insn[1]))
 689			insn_count = 2;
 690		break;
 691	}
 692	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 693	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 694	{
 695		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 696
 697		if (imm == 1) {
 698			if (BPF_OP(insn->code) == BPF_MOD)
 699				/* lhgi %dst,0 */
 700				EMIT4_IMM(0xa7090000, dst_reg, 0);
 701			break;
 702		}
 703		/* lghi %w0,0 */
 704		EMIT4_IMM(0xa7090000, REG_W0, 0);
 705		/* lgr %w1,%dst */
 706		EMIT4(0xb9040000, REG_W1, dst_reg);
 707		/* dlg %w0,<d(imm)>(%l) */
 708		EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 709			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 710		/* lgr %dst,%rc */
 711		EMIT4(0xb9040000, dst_reg, rc_reg);
 712		break;
 713	}
 714	/*
 715	 * BPF_AND
 716	 */
 717	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 718		/* nr %dst,%src */
 719		EMIT2(0x1400, dst_reg, src_reg);
 720		EMIT_ZERO(dst_reg);
 721		break;
 722	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 723		/* ngr %dst,%src */
 724		EMIT4(0xb9800000, dst_reg, src_reg);
 725		break;
 726	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 727		/* nilf %dst,imm */
 728		EMIT6_IMM(0xc00b0000, dst_reg, imm);
 729		EMIT_ZERO(dst_reg);
 730		break;
 731	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 732		/* ng %dst,<d(imm)>(%l) */
 733		EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
 734			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 735		break;
 736	/*
 737	 * BPF_OR
 738	 */
 739	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 740		/* or %dst,%src */
 741		EMIT2(0x1600, dst_reg, src_reg);
 742		EMIT_ZERO(dst_reg);
 743		break;
 744	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 745		/* ogr %dst,%src */
 746		EMIT4(0xb9810000, dst_reg, src_reg);
 747		break;
 748	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 749		/* oilf %dst,imm */
 750		EMIT6_IMM(0xc00d0000, dst_reg, imm);
 751		EMIT_ZERO(dst_reg);
 752		break;
 753	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 754		/* og %dst,<d(imm)>(%l) */
 755		EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
 756			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 757		break;
 758	/*
 759	 * BPF_XOR
 760	 */
 761	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 762		/* xr %dst,%src */
 763		EMIT2(0x1700, dst_reg, src_reg);
 764		EMIT_ZERO(dst_reg);
 765		break;
 766	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 767		/* xgr %dst,%src */
 768		EMIT4(0xb9820000, dst_reg, src_reg);
 769		break;
 770	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
 771		if (!imm)
 772			break;
 773		/* xilf %dst,imm */
 774		EMIT6_IMM(0xc0070000, dst_reg, imm);
 775		EMIT_ZERO(dst_reg);
 776		break;
 777	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
 778		/* xg %dst,<d(imm)>(%l) */
 779		EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
 780			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 781		break;
 782	/*
 783	 * BPF_LSH
 784	 */
 785	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
 786		/* sll %dst,0(%src) */
 787		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
 788		EMIT_ZERO(dst_reg);
 789		break;
 790	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
 791		/* sllg %dst,%dst,0(%src) */
 792		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
 793		break;
 794	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
 795		if (imm == 0)
 796			break;
 797		/* sll %dst,imm(%r0) */
 798		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
 799		EMIT_ZERO(dst_reg);
 800		break;
 801	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
 802		if (imm == 0)
 803			break;
 804		/* sllg %dst,%dst,imm(%r0) */
 805		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
 806		break;
 807	/*
 808	 * BPF_RSH
 809	 */
 810	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
 811		/* srl %dst,0(%src) */
 812		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
 813		EMIT_ZERO(dst_reg);
 814		break;
 815	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
 816		/* srlg %dst,%dst,0(%src) */
 817		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
 818		break;
 819	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
 820		if (imm == 0)
 821			break;
 822		/* srl %dst,imm(%r0) */
 823		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
 824		EMIT_ZERO(dst_reg);
 825		break;
 826	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
 827		if (imm == 0)
 828			break;
 829		/* srlg %dst,%dst,imm(%r0) */
 830		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
 831		break;
 832	/*
 833	 * BPF_ARSH
 834	 */
 835	case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
 836		/* sra %dst,%dst,0(%src) */
 837		EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
 838		EMIT_ZERO(dst_reg);
 839		break;
 840	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
 841		/* srag %dst,%dst,0(%src) */
 842		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
 843		break;
 844	case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
 845		if (imm == 0)
 846			break;
 847		/* sra %dst,imm(%r0) */
 848		EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
 849		EMIT_ZERO(dst_reg);
 850		break;
 851	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
 852		if (imm == 0)
 853			break;
 854		/* srag %dst,%dst,imm(%r0) */
 855		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
 856		break;
 857	/*
 858	 * BPF_NEG
 859	 */
 860	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
 861		/* lcr %dst,%dst */
 862		EMIT2(0x1300, dst_reg, dst_reg);
 863		EMIT_ZERO(dst_reg);
 864		break;
 865	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 866		/* lcgr %dst,%dst */
 867		EMIT4(0xb9030000, dst_reg, dst_reg);
 868		break;
 869	/*
 870	 * BPF_FROM_BE/LE
 871	 */
 872	case BPF_ALU | BPF_END | BPF_FROM_BE:
 873		/* s390 is big endian, therefore only clear high order bytes */
 874		switch (imm) {
 875		case 16: /* dst = (u16) cpu_to_be16(dst) */
 876			/* llghr %dst,%dst */
 877			EMIT4(0xb9850000, dst_reg, dst_reg);
 878			if (insn_is_zext(&insn[1]))
 879				insn_count = 2;
 880			break;
 881		case 32: /* dst = (u32) cpu_to_be32(dst) */
 882			if (!fp->aux->verifier_zext)
 883				/* llgfr %dst,%dst */
 884				EMIT4(0xb9160000, dst_reg, dst_reg);
 885			break;
 886		case 64: /* dst = (u64) cpu_to_be64(dst) */
 887			break;
 888		}
 889		break;
 890	case BPF_ALU | BPF_END | BPF_FROM_LE:
 891		switch (imm) {
 892		case 16: /* dst = (u16) cpu_to_le16(dst) */
 893			/* lrvr %dst,%dst */
 894			EMIT4(0xb91f0000, dst_reg, dst_reg);
 895			/* srl %dst,16(%r0) */
 896			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
 897			/* llghr %dst,%dst */
 898			EMIT4(0xb9850000, dst_reg, dst_reg);
 899			if (insn_is_zext(&insn[1]))
 900				insn_count = 2;
 901			break;
 902		case 32: /* dst = (u32) cpu_to_le32(dst) */
 903			/* lrvr %dst,%dst */
 904			EMIT4(0xb91f0000, dst_reg, dst_reg);
 905			if (!fp->aux->verifier_zext)
 906				/* llgfr %dst,%dst */
 907				EMIT4(0xb9160000, dst_reg, dst_reg);
 908			break;
 909		case 64: /* dst = (u64) cpu_to_le64(dst) */
 910			/* lrvgr %dst,%dst */
 911			EMIT4(0xb90f0000, dst_reg, dst_reg);
 912			break;
 913		}
 914		break;
 915	/*
 916	 * BPF_ST(X)
 917	 */
 918	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
 919		/* stcy %src,off(%dst) */
 920		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
 921		jit->seen |= SEEN_MEM;
 922		break;
 923	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 924		/* sthy %src,off(%dst) */
 925		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
 926		jit->seen |= SEEN_MEM;
 927		break;
 928	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 929		/* sty %src,off(%dst) */
 930		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
 931		jit->seen |= SEEN_MEM;
 932		break;
 933	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 934		/* stg %src,off(%dst) */
 935		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
 936		jit->seen |= SEEN_MEM;
 937		break;
 938	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 939		/* lhi %w0,imm */
 940		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
 941		/* stcy %w0,off(dst) */
 942		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
 943		jit->seen |= SEEN_MEM;
 944		break;
 945	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 946		/* lhi %w0,imm */
 947		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
 948		/* sthy %w0,off(dst) */
 949		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
 950		jit->seen |= SEEN_MEM;
 951		break;
 952	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 953		/* llilf %w0,imm  */
 954		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
 955		/* sty %w0,off(%dst) */
 956		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
 957		jit->seen |= SEEN_MEM;
 958		break;
 959	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 960		/* lgfi %w0,imm */
 961		EMIT6_IMM(0xc0010000, REG_W0, imm);
 962		/* stg %w0,off(%dst) */
 963		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
 964		jit->seen |= SEEN_MEM;
 965		break;
 966	/*
 967	 * BPF_STX XADD (atomic_add)
 968	 */
 969	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
 970		/* laal %w0,%src,off(%dst) */
 971		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
 972			      dst_reg, off);
 973		jit->seen |= SEEN_MEM;
 974		break;
 975	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
 976		/* laalg %w0,%src,off(%dst) */
 977		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
 978			      dst_reg, off);
 979		jit->seen |= SEEN_MEM;
 980		break;
 981	/*
 982	 * BPF_LDX
 983	 */
 984	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
 
 985		/* llgc %dst,0(off,%src) */
 986		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
 987		jit->seen |= SEEN_MEM;
 988		if (insn_is_zext(&insn[1]))
 989			insn_count = 2;
 990		break;
 991	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
 
 992		/* llgh %dst,0(off,%src) */
 993		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
 994		jit->seen |= SEEN_MEM;
 995		if (insn_is_zext(&insn[1]))
 996			insn_count = 2;
 997		break;
 998	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
 
 999		/* llgf %dst,off(%src) */
1000		jit->seen |= SEEN_MEM;
1001		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1002		if (insn_is_zext(&insn[1]))
1003			insn_count = 2;
1004		break;
1005	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
 
1006		/* lg %dst,0(off,%src) */
1007		jit->seen |= SEEN_MEM;
1008		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1009		break;
1010	/*
1011	 * BPF_JMP / CALL
1012	 */
1013	case BPF_JMP | BPF_CALL:
1014	{
1015		u64 func;
1016		bool func_addr_fixed;
1017		int ret;
1018
1019		ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1020					    &func, &func_addr_fixed);
1021		if (ret < 0)
1022			return -1;
1023
1024		REG_SET_SEEN(BPF_REG_5);
1025		jit->seen |= SEEN_FUNC;
1026		/* lg %w1,<d(imm)>(%l) */
1027		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
1028			      EMIT_CONST_U64(func));
1029		if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
1030			/* brasl %r14,__s390_indirect_jump_r1 */
1031			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1032		} else {
1033			/* basr %r14,%w1 */
1034			EMIT2(0x0d00, REG_14, REG_W1);
1035		}
1036		/* lgr %b0,%r2: load return value into %b0 */
1037		EMIT4(0xb9040000, BPF_REG_0, REG_2);
1038		break;
1039	}
1040	case BPF_JMP | BPF_TAIL_CALL:
1041		/*
1042		 * Implicit input:
1043		 *  B1: pointer to ctx
1044		 *  B2: pointer to bpf_array
1045		 *  B3: index in bpf_array
1046		 */
1047		jit->seen |= SEEN_TAIL_CALL;
1048
1049		/*
1050		 * if (index >= array->map.max_entries)
1051		 *         goto out;
1052		 */
1053
1054		/* llgf %w1,map.max_entries(%b2) */
1055		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1056			      offsetof(struct bpf_array, map.max_entries));
1057		/* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1058		EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1059				  REG_W1, 0, 0xa);
 
 
 
 
 
 
 
 
1060
1061		/*
1062		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1063		 *         goto out;
1064		 */
1065
1066		if (jit->seen & SEEN_STACK)
1067			off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth;
1068		else
1069			off = STK_OFF_TCCNT;
1070		/* lhi %w0,1 */
1071		EMIT4_IMM(0xa7080000, REG_W0, 1);
1072		/* laal %w1,%w0,off(%r15) */
1073		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1074		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
1075		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
1076				      MAX_TAIL_CALL_CNT, 0, 0x2);
 
 
 
 
 
 
 
1077
1078		/*
1079		 * prog = array->ptrs[index];
1080		 * if (prog == NULL)
1081		 *         goto out;
1082		 */
1083
1084		/* llgfr %r1,%b3: %r1 = (u32) index */
1085		EMIT4(0xb9160000, REG_1, BPF_REG_3);
1086		/* sllg %r1,%r1,3: %r1 *= 8 */
1087		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1088		/* lg %r1,prog(%b2,%r1) */
1089		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1090			      REG_1, offsetof(struct bpf_array, ptrs));
1091		/* clgij %r1,0,0x8,label0 */
1092		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
 
 
 
 
1093
1094		/*
1095		 * Restore registers before calling function
1096		 */
1097		save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth);
1098
1099		/*
1100		 * goto *(prog->bpf_func + tail_call_start);
1101		 */
1102
1103		/* lg %r1,bpf_func(%r1) */
1104		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1105			      offsetof(struct bpf_prog, bpf_func));
1106		/* bc 0xf,tail_call_start(%r1) */
1107		_EMIT4(0x47f01000 + jit->tail_call_start);
1108		/* out: */
1109		jit->labels[0] = jit->prg;
1110		break;
1111	case BPF_JMP | BPF_EXIT: /* return b0 */
1112		last = (i == fp->len - 1) ? 1 : 0;
1113		if (last && !(jit->seen & SEEN_RET0))
1114			break;
1115		/* j <exit> */
1116		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
 
 
 
 
1117		break;
1118	/*
1119	 * Branch relative (number of skipped instructions) to offset on
1120	 * condition.
1121	 *
1122	 * Condition code to mask mapping:
1123	 *
1124	 * CC | Description	   | Mask
1125	 * ------------------------------
1126	 * 0  | Operands equal	   |	8
1127	 * 1  | First operand low  |	4
1128	 * 2  | First operand high |	2
1129	 * 3  | Unused		   |	1
1130	 *
1131	 * For s390x relative branches: ip = ip + off_bytes
1132	 * For BPF relative branches:	insn = insn + off_insns + 1
1133	 *
1134	 * For example for s390x with offset 0 we jump to the branch
1135	 * instruction itself (loop) and for BPF with offset 0 we
1136	 * branch to the instruction behind the branch.
1137	 */
1138	case BPF_JMP | BPF_JA: /* if (true) */
1139		mask = 0xf000; /* j */
1140		goto branch_oc;
1141	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1142	case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1143		mask = 0x2000; /* jh */
1144		goto branch_ks;
1145	case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1146	case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1147		mask = 0x4000; /* jl */
1148		goto branch_ks;
1149	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1150	case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1151		mask = 0xa000; /* jhe */
1152		goto branch_ks;
1153	case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1154	case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1155		mask = 0xc000; /* jle */
1156		goto branch_ks;
1157	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1158	case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1159		mask = 0x2000; /* jh */
1160		goto branch_ku;
1161	case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1162	case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1163		mask = 0x4000; /* jl */
1164		goto branch_ku;
1165	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1166	case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1167		mask = 0xa000; /* jhe */
1168		goto branch_ku;
1169	case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1170	case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1171		mask = 0xc000; /* jle */
1172		goto branch_ku;
1173	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1174	case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1175		mask = 0x7000; /* jne */
1176		goto branch_ku;
1177	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1178	case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1179		mask = 0x8000; /* je */
1180		goto branch_ku;
1181	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1182	case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1183		mask = 0x7000; /* jnz */
1184		if (BPF_CLASS(insn->code) == BPF_JMP32) {
1185			/* llilf %w1,imm (load zero extend imm) */
1186			EMIT6_IMM(0xc00f0000, REG_W1, imm);
1187			/* nr %w1,%dst */
1188			EMIT2(0x1400, REG_W1, dst_reg);
1189		} else {
1190			/* lgfi %w1,imm (load sign extend imm) */
1191			EMIT6_IMM(0xc0010000, REG_W1, imm);
1192			/* ngr %w1,%dst */
1193			EMIT4(0xb9800000, REG_W1, dst_reg);
1194		}
1195		goto branch_oc;
1196
1197	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1198	case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1199		mask = 0x2000; /* jh */
1200		goto branch_xs;
1201	case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1202	case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1203		mask = 0x4000; /* jl */
1204		goto branch_xs;
1205	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1206	case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1207		mask = 0xa000; /* jhe */
1208		goto branch_xs;
1209	case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1210	case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1211		mask = 0xc000; /* jle */
1212		goto branch_xs;
1213	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1214	case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1215		mask = 0x2000; /* jh */
1216		goto branch_xu;
1217	case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1218	case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1219		mask = 0x4000; /* jl */
1220		goto branch_xu;
1221	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1222	case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1223		mask = 0xa000; /* jhe */
1224		goto branch_xu;
1225	case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1226	case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1227		mask = 0xc000; /* jle */
1228		goto branch_xu;
1229	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1230	case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1231		mask = 0x7000; /* jne */
1232		goto branch_xu;
1233	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1234	case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1235		mask = 0x8000; /* je */
1236		goto branch_xu;
1237	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1238	case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1239	{
1240		bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1241
1242		mask = 0x7000; /* jnz */
1243		/* nrk or ngrk %w1,%dst,%src */
1244		EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1245			  REG_W1, dst_reg, src_reg);
1246		goto branch_oc;
1247branch_ks:
1248		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1249		/* lgfi %w1,imm (load sign extend imm) */
1250		EMIT6_IMM(0xc0010000, REG_W1, imm);
1251		/* crj or cgrj %dst,%w1,mask,off */
1252		EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1253			    dst_reg, REG_W1, i, off, mask);
 
 
 
 
 
 
 
 
1254		break;
1255branch_ku:
1256		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1257		/* lgfi %w1,imm (load sign extend imm) */
1258		EMIT6_IMM(0xc0010000, REG_W1, imm);
1259		/* clrj or clgrj %dst,%w1,mask,off */
1260		EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1261			    dst_reg, REG_W1, i, off, mask);
1262		break;
1263branch_xs:
1264		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1265		/* crj or cgrj %dst,%src,mask,off */
1266		EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1267			    dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
1268		break;
1269branch_xu:
1270		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1271		/* clrj or clgrj %dst,%src,mask,off */
1272		EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1273			    dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
1274		break;
1275branch_oc:
1276		/* brc mask,jmp_off (branch instruction needs 4 bytes) */
1277		jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1278		EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
 
 
 
 
 
 
 
1279		break;
1280	}
1281	default: /* too complex, give up */
1282		pr_err("Unknown opcode %02x\n", insn->code);
1283		return -1;
1284	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285	return insn_count;
1286}
1287
1288/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289 * Compile eBPF program into s390x code
1290 */
1291static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1292			bool extra_pass)
1293{
1294	int i, insn_count;
1295
1296	jit->lit = jit->lit_start;
 
1297	jit->prg = 0;
 
1298
1299	bpf_jit_prologue(jit, fp->aux->stack_depth);
 
 
1300	for (i = 0; i < fp->len; i += insn_count) {
1301		insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
1302		if (insn_count < 0)
1303			return -1;
1304		/* Next instruction address */
1305		jit->addrs[i + insn_count] = jit->prg;
 
1306	}
1307	bpf_jit_epilogue(jit, fp->aux->stack_depth);
1308
1309	jit->lit_start = jit->prg;
1310	jit->size = jit->lit;
 
 
 
 
 
 
 
1311	jit->size_prg = jit->prg;
 
 
 
 
 
 
1312	return 0;
1313}
1314
1315bool bpf_jit_needs_zext(void)
1316{
1317	return true;
1318}
1319
1320struct s390_jit_data {
1321	struct bpf_binary_header *header;
1322	struct bpf_jit ctx;
1323	int pass;
1324};
1325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1326/*
1327 * Compile eBPF program "fp"
1328 */
1329struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1330{
 
1331	struct bpf_prog *tmp, *orig_fp = fp;
1332	struct bpf_binary_header *header;
1333	struct s390_jit_data *jit_data;
1334	bool tmp_blinded = false;
1335	bool extra_pass = false;
1336	struct bpf_jit jit;
1337	int pass;
1338
1339	if (!fp->jit_requested)
1340		return orig_fp;
1341
1342	tmp = bpf_jit_blind_constants(fp);
1343	/*
1344	 * If blinding was requested and we failed during blinding,
1345	 * we must fall back to the interpreter.
1346	 */
1347	if (IS_ERR(tmp))
1348		return orig_fp;
1349	if (tmp != fp) {
1350		tmp_blinded = true;
1351		fp = tmp;
1352	}
1353
1354	jit_data = fp->aux->jit_data;
1355	if (!jit_data) {
1356		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1357		if (!jit_data) {
1358			fp = orig_fp;
1359			goto out;
1360		}
1361		fp->aux->jit_data = jit_data;
1362	}
1363	if (jit_data->ctx.addrs) {
1364		jit = jit_data->ctx;
1365		header = jit_data->header;
1366		extra_pass = true;
1367		pass = jit_data->pass + 1;
1368		goto skip_init_ctx;
1369	}
1370
1371	memset(&jit, 0, sizeof(jit));
1372	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1373	if (jit.addrs == NULL) {
1374		fp = orig_fp;
1375		goto out;
1376	}
1377	/*
1378	 * Three initial passes:
1379	 *   - 1/2: Determine clobbered registers
1380	 *   - 3:   Calculate program size and addrs arrray
1381	 */
1382	for (pass = 1; pass <= 3; pass++) {
1383		if (bpf_jit_prog(&jit, fp, extra_pass)) {
1384			fp = orig_fp;
1385			goto free_addrs;
1386		}
1387	}
1388	/*
1389	 * Final pass: Allocate and generate program
1390	 */
1391	if (jit.size >= BPF_SIZE_MAX) {
1392		fp = orig_fp;
1393		goto free_addrs;
1394	}
1395
1396	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1397	if (!header) {
1398		fp = orig_fp;
1399		goto free_addrs;
1400	}
1401skip_init_ctx:
1402	if (bpf_jit_prog(&jit, fp, extra_pass)) {
1403		bpf_jit_binary_free(header);
1404		fp = orig_fp;
1405		goto free_addrs;
1406	}
1407	if (bpf_jit_enable > 1) {
1408		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1409		print_fn_code(jit.prg_buf, jit.size_prg);
1410	}
1411	if (!fp->is_func || extra_pass) {
1412		bpf_jit_binary_lock_ro(header);
1413	} else {
1414		jit_data->header = header;
1415		jit_data->ctx = jit;
1416		jit_data->pass = pass;
1417	}
1418	fp->bpf_func = (void *) jit.prg_buf;
1419	fp->jited = 1;
1420	fp->jited_len = jit.size;
1421
1422	if (!fp->is_func || extra_pass) {
1423		bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1424free_addrs:
1425		kfree(jit.addrs);
1426		kfree(jit_data);
1427		fp->aux->jit_data = NULL;
1428	}
1429out:
1430	if (tmp_blinded)
1431		bpf_jit_prog_release_other(fp, fp == orig_fp ?
1432					   tmp : orig_fp);
1433	return fp;
1434}