Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
 
  10 *  - 64BIT
  11 *
  12 * Copyright IBM Corp. 2012,2015
  13 *
  14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  15 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  16 */
  17
  18#define KMSG_COMPONENT "bpf_jit"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/netdevice.h>
  22#include <linux/filter.h>
  23#include <linux/init.h>
  24#include <linux/bpf.h>
  25#include <linux/mm.h>
  26#include <linux/kernel.h>
  27#include <asm/cacheflush.h>
  28#include <asm/extable.h>
  29#include <asm/dis.h>
  30#include <asm/facility.h>
  31#include <asm/nospec-branch.h>
  32#include <asm/set_memory.h>
  33#include <asm/text-patching.h>
  34#include "bpf_jit.h"
  35
  36struct bpf_jit {
  37	u32 seen;		/* Flags to remember seen eBPF instructions */
  38	u32 seen_reg[16];	/* Array to remember which registers are used */
  39	u32 *addrs;		/* Array with relative instruction addresses */
  40	u8 *prg_buf;		/* Start of program */
  41	int size;		/* Size of program and literal pool */
  42	int size_prg;		/* Size of program */
  43	int prg;		/* Current position in program */
  44	int lit32_start;	/* Start of 32-bit literal pool */
  45	int lit32;		/* Current position in 32-bit literal pool */
  46	int lit64_start;	/* Start of 64-bit literal pool */
  47	int lit64;		/* Current position in 64-bit literal pool */
  48	int base_ip;		/* Base address for literal pool */
 
  49	int exit_ip;		/* Address of exit */
  50	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
  51	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
  52	int tail_call_start;	/* Tail call start offset */
  53	int excnt;		/* Number of exception table entries */
  54	int prologue_plt_ret;	/* Return address for prologue hotpatch PLT */
  55	int prologue_plt;	/* Start of prologue hotpatch PLT */
  56};
  57
  58#define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
  59#define SEEN_LITERAL	BIT(1)		/* code uses literals */
  60#define SEEN_FUNC	BIT(2)		/* calls C functions */
  61#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM)
 
 
 
 
 
 
  62
  63/*
  64 * s390 registers
  65 */
  66#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  67#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  68#define REG_L		(MAX_BPF_JIT_REG + 2)	/* Literal pool register */
  69#define REG_15		(MAX_BPF_JIT_REG + 3)	/* Register 15 */
 
  70#define REG_0		REG_W0			/* Register 0 */
  71#define REG_1		REG_W1			/* Register 1 */
  72#define REG_2		BPF_REG_1		/* Register 2 */
  73#define REG_3		BPF_REG_2		/* Register 3 */
  74#define REG_4		BPF_REG_3		/* Register 4 */
  75#define REG_7		BPF_REG_6		/* Register 7 */
  76#define REG_8		BPF_REG_7		/* Register 8 */
  77#define REG_14		BPF_REG_0		/* Register 14 */
  78
  79/*
  80 * Mapping of BPF registers to s390 registers
  81 */
  82static const int reg2hex[] = {
  83	/* Return code */
  84	[BPF_REG_0]	= 14,
  85	/* Function parameters */
  86	[BPF_REG_1]	= 2,
  87	[BPF_REG_2]	= 3,
  88	[BPF_REG_3]	= 4,
  89	[BPF_REG_4]	= 5,
  90	[BPF_REG_5]	= 6,
  91	/* Call saved registers */
  92	[BPF_REG_6]	= 7,
  93	[BPF_REG_7]	= 8,
  94	[BPF_REG_8]	= 9,
  95	[BPF_REG_9]	= 10,
  96	/* BPF stack pointer */
  97	[BPF_REG_FP]	= 13,
  98	/* Register for blinding */
  99	[BPF_REG_AX]	= 12,
 
 
 100	/* Work registers for s390x backend */
 101	[REG_W0]	= 0,
 102	[REG_W1]	= 1,
 103	[REG_L]		= 11,
 104	[REG_15]	= 15,
 105};
 106
 107static inline u32 reg(u32 dst_reg, u32 src_reg)
 108{
 109	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 110}
 111
 112static inline u32 reg_high(u32 reg)
 113{
 114	return reg2hex[reg] << 4;
 115}
 116
 117static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 118{
 119	u32 r1 = reg2hex[b1];
 120
 121	if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
 122		jit->seen_reg[r1] = 1;
 123}
 124
 125#define REG_SET_SEEN(b1)					\
 126({								\
 127	reg_set_seen(jit, b1);					\
 128})
 129
 130#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 131
 132/*
 133 * EMIT macros for code generation
 134 */
 135
 136#define _EMIT2(op)						\
 137({								\
 138	if (jit->prg_buf)					\
 139		*(u16 *) (jit->prg_buf + jit->prg) = (op);	\
 140	jit->prg += 2;						\
 141})
 142
 143#define EMIT2(op, b1, b2)					\
 144({								\
 145	_EMIT2((op) | reg(b1, b2));				\
 146	REG_SET_SEEN(b1);					\
 147	REG_SET_SEEN(b2);					\
 148})
 149
 150#define _EMIT4(op)						\
 151({								\
 152	if (jit->prg_buf)					\
 153		*(u32 *) (jit->prg_buf + jit->prg) = (op);	\
 154	jit->prg += 4;						\
 155})
 156
 157#define EMIT4(op, b1, b2)					\
 158({								\
 159	_EMIT4((op) | reg(b1, b2));				\
 160	REG_SET_SEEN(b1);					\
 161	REG_SET_SEEN(b2);					\
 162})
 163
 164#define EMIT4_RRF(op, b1, b2, b3)				\
 165({								\
 166	_EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2));		\
 167	REG_SET_SEEN(b1);					\
 168	REG_SET_SEEN(b2);					\
 169	REG_SET_SEEN(b3);					\
 170})
 171
 172#define _EMIT4_DISP(op, disp)					\
 173({								\
 174	unsigned int __disp = (disp) & 0xfff;			\
 175	_EMIT4((op) | __disp);					\
 176})
 177
 178#define EMIT4_DISP(op, b1, b2, disp)				\
 179({								\
 180	_EMIT4_DISP((op) | reg_high(b1) << 16 |			\
 181		    reg_high(b2) << 8, (disp));			\
 182	REG_SET_SEEN(b1);					\
 183	REG_SET_SEEN(b2);					\
 184})
 185
 186#define EMIT4_IMM(op, b1, imm)					\
 187({								\
 188	unsigned int __imm = (imm) & 0xffff;			\
 189	_EMIT4((op) | reg_high(b1) << 16 | __imm);		\
 190	REG_SET_SEEN(b1);					\
 191})
 192
 193#define EMIT4_PCREL(op, pcrel)					\
 194({								\
 195	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 196	_EMIT4((op) | __pcrel);					\
 197})
 198
 199#define EMIT4_PCREL_RIC(op, mask, target)			\
 200({								\
 201	int __rel = ((target) - jit->prg) / 2;			\
 202	_EMIT4((op) | (mask) << 20 | (__rel & 0xffff));		\
 203})
 204
 205#define _EMIT6(op1, op2)					\
 206({								\
 207	if (jit->prg_buf) {					\
 208		*(u32 *) (jit->prg_buf + jit->prg) = (op1);	\
 209		*(u16 *) (jit->prg_buf + jit->prg + 4) = (op2);	\
 210	}							\
 211	jit->prg += 6;						\
 212})
 213
 214#define _EMIT6_DISP(op1, op2, disp)				\
 215({								\
 216	unsigned int __disp = (disp) & 0xfff;			\
 217	_EMIT6((op1) | __disp, op2);				\
 218})
 219
 220#define _EMIT6_DISP_LH(op1, op2, disp)				\
 221({								\
 222	u32 _disp = (u32) (disp);				\
 223	unsigned int __disp_h = _disp & 0xff000;		\
 224	unsigned int __disp_l = _disp & 0x00fff;		\
 225	_EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4);	\
 226})
 227
 228#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 229({								\
 230	_EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 |		\
 231		       reg_high(b3) << 8, op2, disp);		\
 232	REG_SET_SEEN(b1);					\
 233	REG_SET_SEEN(b2);					\
 234	REG_SET_SEEN(b3);					\
 235})
 236
 237#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target)	\
 238({								\
 239	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 240	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),	\
 241	       (op2) | (mask) << 12);				\
 242	REG_SET_SEEN(b1);					\
 243	REG_SET_SEEN(b2);					\
 244})
 245
 246#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target)	\
 247({								\
 248	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 249	_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |		\
 250		(rel & 0xffff), (op2) | ((imm) & 0xff) << 8);	\
 251	REG_SET_SEEN(b1);					\
 252	BUILD_BUG_ON(((unsigned long) (imm)) > 0xff);		\
 253})
 254
 255#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 256({								\
 257	int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;	\
 258	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
 
 259	REG_SET_SEEN(b1);					\
 260	REG_SET_SEEN(b2);					\
 261})
 262
 263#define EMIT6_PCREL_RILB(op, b, target)				\
 264({								\
 265	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 266	_EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
 267	REG_SET_SEEN(b);					\
 268})
 269
 270#define EMIT6_PCREL_RIL(op, target)				\
 271({								\
 272	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 273	_EMIT6((op) | rel >> 16, rel & 0xffff);			\
 274})
 275
 276#define EMIT6_PCREL_RILC(op, mask, target)			\
 277({								\
 278	EMIT6_PCREL_RIL((op) | (mask) << 20, (target));		\
 279})
 280
 281#define _EMIT6_IMM(op, imm)					\
 282({								\
 283	unsigned int __imm = (imm);				\
 284	_EMIT6((op) | (__imm >> 16), __imm & 0xffff);		\
 285})
 286
 287#define EMIT6_IMM(op, b1, imm)					\
 288({								\
 289	_EMIT6_IMM((op) | reg_high(b1) << 16, imm);		\
 290	REG_SET_SEEN(b1);					\
 291})
 292
 293#define _EMIT_CONST_U32(val)					\
 294({								\
 295	unsigned int ret;					\
 296	ret = jit->lit32;					\
 297	if (jit->prg_buf)					\
 298		*(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
 299	jit->lit32 += 4;					\
 300	ret;							\
 301})
 302
 303#define EMIT_CONST_U32(val)					\
 304({								\
 305	jit->seen |= SEEN_LITERAL;				\
 306	_EMIT_CONST_U32(val) - jit->base_ip;			\
 307})
 308
 309#define _EMIT_CONST_U64(val)					\
 310({								\
 311	unsigned int ret;					\
 312	ret = jit->lit64;					\
 
 313	if (jit->prg_buf)					\
 314		*(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
 315	jit->lit64 += 8;					\
 316	ret;							\
 317})
 318
 319#define EMIT_CONST_U64(val)					\
 320({								\
 
 
 321	jit->seen |= SEEN_LITERAL;				\
 322	_EMIT_CONST_U64(val) - jit->base_ip;			\
 
 
 
 323})
 324
 325#define EMIT_ZERO(b1)						\
 326({								\
 327	if (!fp->aux->verifier_zext) {				\
 328		/* llgfr %dst,%dst (zero extend to 64 bit) */	\
 329		EMIT4(0xb9160000, b1, b1);			\
 330		REG_SET_SEEN(b1);				\
 331	}							\
 332})
 333
 334/*
 335 * Return whether this is the first pass. The first pass is special, since we
 336 * don't know any sizes yet, and thus must be conservative.
 337 */
 338static bool is_first_pass(struct bpf_jit *jit)
 339{
 340	return jit->size == 0;
 341}
 342
 343/*
 344 * Return whether this is the code generation pass. The code generation pass is
 345 * special, since we should change as little as possible.
 346 */
 347static bool is_codegen_pass(struct bpf_jit *jit)
 348{
 349	return jit->prg_buf;
 350}
 351
 352/*
 353 * Return whether "rel" can be encoded as a short PC-relative offset
 354 */
 355static bool is_valid_rel(int rel)
 356{
 357	return rel >= -65536 && rel <= 65534;
 358}
 359
 360/*
 361 * Return whether "off" can be reached using a short PC-relative offset
 362 */
 363static bool can_use_rel(struct bpf_jit *jit, int off)
 364{
 365	return is_valid_rel(off - jit->prg);
 366}
 367
 368/*
 369 * Return whether given displacement can be encoded using
 370 * Long-Displacement Facility
 371 */
 372static bool is_valid_ldisp(int disp)
 373{
 374	return disp >= -524288 && disp <= 524287;
 375}
 376
 377/*
 378 * Return whether the next 32-bit literal pool entry can be referenced using
 379 * Long-Displacement Facility
 380 */
 381static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
 382{
 383	return is_valid_ldisp(jit->lit32 - jit->base_ip);
 384}
 385
 386/*
 387 * Return whether the next 64-bit literal pool entry can be referenced using
 388 * Long-Displacement Facility
 389 */
 390static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
 391{
 392	return is_valid_ldisp(jit->lit64 - jit->base_ip);
 393}
 394
 395/*
 396 * Fill whole space with illegal instructions
 397 */
 398static void jit_fill_hole(void *area, unsigned int size)
 399{
 400	memset(area, 0, size);
 401}
 402
 403/*
 404 * Save registers from "rs" (register start) to "re" (register end) on stack
 405 */
 406static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 407{
 408	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 409
 410	if (rs == re)
 411		/* stg %rs,off(%r15) */
 412		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 413	else
 414		/* stmg %rs,%re,off(%r15) */
 415		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 416}
 417
 418/*
 419 * Restore registers from "rs" (register start) to "re" (register end) on stack
 420 */
 421static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 422{
 423	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 424
 425	if (jit->seen & SEEN_STACK)
 426		off += STK_OFF + stack_depth;
 427
 428	if (rs == re)
 429		/* lg %rs,off(%r15) */
 430		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 431	else
 432		/* lmg %rs,%re,off(%r15) */
 433		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 434}
 435
 436/*
 437 * Return first seen register (from start)
 438 */
 439static int get_start(struct bpf_jit *jit, int start)
 440{
 441	int i;
 442
 443	for (i = start; i <= 15; i++) {
 444		if (jit->seen_reg[i])
 445			return i;
 446	}
 447	return 0;
 448}
 449
 450/*
 451 * Return last seen register (from start) (gap >= 2)
 452 */
 453static int get_end(struct bpf_jit *jit, int start)
 454{
 455	int i;
 456
 457	for (i = start; i < 15; i++) {
 458		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 459			return i - 1;
 460	}
 461	return jit->seen_reg[15] ? 15 : 14;
 462}
 463
 464#define REGS_SAVE	1
 465#define REGS_RESTORE	0
 466/*
 467 * Save and restore clobbered registers (6-15) on stack.
 468 * We save/restore registers in chunks with gap >= 2 registers.
 469 */
 470static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 471{
 472	const int last = 15, save_restore_size = 6;
 473	int re = 6, rs;
 474
 475	if (is_first_pass(jit)) {
 476		/*
 477		 * We don't know yet which registers are used. Reserve space
 478		 * conservatively.
 479		 */
 480		jit->prg += (last - re + 1) * save_restore_size;
 481		return;
 482	}
 483
 484	do {
 485		rs = get_start(jit, re);
 486		if (!rs)
 487			break;
 488		re = get_end(jit, rs + 1);
 489		if (op == REGS_SAVE)
 490			save_regs(jit, rs, re);
 491		else
 492			restore_regs(jit, rs, re, stack_depth);
 493		re++;
 494	} while (re <= last);
 495}
 496
 497static void bpf_skip(struct bpf_jit *jit, int size)
 498{
 499	if (size >= 6 && !is_valid_rel(size)) {
 500		/* brcl 0xf,size */
 501		EMIT6_PCREL_RIL(0xc0f4000000, size);
 502		size -= 6;
 503	} else if (size >= 4 && is_valid_rel(size)) {
 504		/* brc 0xf,size */
 505		EMIT4_PCREL(0xa7f40000, size);
 506		size -= 4;
 507	}
 508	while (size >= 2) {
 509		/* bcr 0,%0 */
 510		_EMIT2(0x0700);
 511		size -= 2;
 512	}
 513}
 514
 515/*
 516 * PLT for hotpatchable calls. The calling convention is the same as for the
 517 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
 518 */
 519extern const char bpf_plt[];
 520extern const char bpf_plt_ret[];
 521extern const char bpf_plt_target[];
 522extern const char bpf_plt_end[];
 523#define BPF_PLT_SIZE 32
 524asm(
 525	".pushsection .rodata\n"
 526	"	.balign 8\n"
 527	"bpf_plt:\n"
 528	"	lgrl %r0,bpf_plt_ret\n"
 529	"	lgrl %r1,bpf_plt_target\n"
 530	"	br %r1\n"
 531	"	.balign 8\n"
 532	"bpf_plt_ret: .quad 0\n"
 533	"bpf_plt_target: .quad 0\n"
 534	"bpf_plt_end:\n"
 535	"	.popsection\n"
 536);
 537
 538static void bpf_jit_plt(void *plt, void *ret, void *target)
 539{
 540	memcpy(plt, bpf_plt, BPF_PLT_SIZE);
 541	*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
 542	*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
 543}
 544
 545/*
 546 * Emit function prologue
 547 *
 548 * Save registers and create stack frame if necessary.
 549 * See stack frame layout description in "bpf_jit.h"!
 550 */
 551static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
 552			     u32 stack_depth)
 553{
 554	/* No-op for hotpatching */
 555	/* brcl 0,prologue_plt */
 556	EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
 557	jit->prologue_plt_ret = jit->prg;
 558
 559	if (!bpf_is_subprog(fp)) {
 560		/* Initialize the tail call counter in the main program. */
 561		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 562		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 563	} else {
 564		/*
 565		 * Skip the tail call counter initialization in subprograms.
 566		 * Insert nops in order to have tail_call_start at a
 567		 * predictable offset.
 568		 */
 569		bpf_skip(jit, 6);
 570	}
 571	/* Tail calls have to skip above initialization */
 572	jit->tail_call_start = jit->prg;
 573	/* Save registers */
 574	save_restore_regs(jit, REGS_SAVE, stack_depth);
 575	/* Setup literal pool */
 576	if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
 577		if (!is_first_pass(jit) &&
 578		    is_valid_ldisp(jit->size - (jit->prg + 2))) {
 579			/* basr %l,0 */
 580			EMIT2(0x0d00, REG_L, REG_0);
 581			jit->base_ip = jit->prg;
 582		} else {
 583			/* larl %l,lit32_start */
 584			EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
 585			jit->base_ip = jit->lit32_start;
 586		}
 587	}
 588	/* Setup stack and backchain */
 589	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
 590		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 591			/* lgr %w1,%r15 (backchain) */
 592			EMIT4(0xb9040000, REG_W1, REG_15);
 593		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 594		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 595		/* aghi %r15,-STK_OFF */
 596		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 597		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 598			/* stg %w1,152(%r15) (backchain) */
 599			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 600				      REG_15, 152);
 601	}
 602}
 603
 604/*
 605 * Emit an expoline for a jump that follows
 606 */
 607static void emit_expoline(struct bpf_jit *jit)
 608{
 609	/* exrl %r0,.+10 */
 610	EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 611	/* j . */
 612	EMIT4_PCREL(0xa7f40000, 0);
 613}
 614
 615/*
 616 * Emit __s390_indirect_jump_r1 thunk if necessary
 617 */
 618static void emit_r1_thunk(struct bpf_jit *jit)
 619{
 620	if (nospec_uses_trampoline()) {
 621		jit->r1_thunk_ip = jit->prg;
 622		emit_expoline(jit);
 623		/* br %r1 */
 624		_EMIT2(0x07f1);
 625	}
 626}
 627
 628/*
 629 * Call r1 either directly or via __s390_indirect_jump_r1 thunk
 630 */
 631static void call_r1(struct bpf_jit *jit)
 632{
 633	if (nospec_uses_trampoline())
 634		/* brasl %r14,__s390_indirect_jump_r1 */
 635		EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
 636	else
 637		/* basr %r14,%r1 */
 638		EMIT2(0x0d00, REG_14, REG_1);
 639}
 640
 641/*
 642 * Function epilogue
 643 */
 644static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 645{
 
 
 
 
 
 
 646	jit->exit_ip = jit->prg;
 647	/* Load exit code: lgr %r2,%b0 */
 648	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 649	/* Restore registers */
 650	save_restore_regs(jit, REGS_RESTORE, stack_depth);
 651	if (nospec_uses_trampoline()) {
 652		jit->r14_thunk_ip = jit->prg;
 653		/* Generate __s390_indirect_jump_r14 thunk */
 654		emit_expoline(jit);
 
 
 
 
 
 
 
 
 
 
 655	}
 656	/* br %r14 */
 657	_EMIT2(0x07fe);
 658
 659	if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 660		emit_r1_thunk(jit);
 661
 662	jit->prg = ALIGN(jit->prg, 8);
 663	jit->prologue_plt = jit->prg;
 664	if (jit->prg_buf)
 665		bpf_jit_plt(jit->prg_buf + jit->prg,
 666			    jit->prg_buf + jit->prologue_plt_ret, NULL);
 667	jit->prg += BPF_PLT_SIZE;
 668}
 669
 670static int get_probe_mem_regno(const u8 *insn)
 671{
 672	/*
 673	 * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have
 674	 * destination register at the same position.
 675	 */
 676	if (insn[0] != 0xe3) /* common prefix */
 677		return -1;
 678	if (insn[5] != 0x90 && /* llgc */
 679	    insn[5] != 0x91 && /* llgh */
 680	    insn[5] != 0x16 && /* llgf */
 681	    insn[5] != 0x04 && /* lg */
 682	    insn[5] != 0x77 && /* lgb */
 683	    insn[5] != 0x15 && /* lgh */
 684	    insn[5] != 0x14) /* lgf */
 685		return -1;
 686	return insn[1] >> 4;
 687}
 688
 689bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
 690{
 691	regs->psw.addr = extable_fixup(x);
 692	regs->gprs[x->data] = 0;
 693	return true;
 694}
 695
 696static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
 697			     int probe_prg, int nop_prg)
 698{
 699	struct exception_table_entry *ex;
 700	int reg, prg;
 701	s64 delta;
 702	u8 *insn;
 703	int i;
 704
 705	if (!fp->aux->extable)
 706		/* Do nothing during early JIT passes. */
 707		return 0;
 708	insn = jit->prg_buf + probe_prg;
 709	reg = get_probe_mem_regno(insn);
 710	if (WARN_ON_ONCE(reg < 0))
 711		/* JIT bug - unexpected probe instruction. */
 712		return -1;
 713	if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
 714		/* JIT bug - gap between probe and nop instructions. */
 715		return -1;
 716	for (i = 0; i < 2; i++) {
 717		if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
 718			/* Verifier bug - not enough entries. */
 719			return -1;
 720		ex = &fp->aux->extable[jit->excnt];
 721		/* Add extable entries for probe and nop instructions. */
 722		prg = i == 0 ? probe_prg : nop_prg;
 723		delta = jit->prg_buf + prg - (u8 *)&ex->insn;
 724		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 725			/* JIT bug - code and extable must be close. */
 726			return -1;
 727		ex->insn = delta;
 728		/*
 729		 * Always land on the nop. Note that extable infrastructure
 730		 * ignores fixup field, it is handled by ex_handler_bpf().
 731		 */
 732		delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
 733		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 734			/* JIT bug - landing pad and extable must be close. */
 735			return -1;
 736		ex->fixup = delta;
 737		ex->type = EX_TYPE_BPF;
 738		ex->data = reg;
 739		jit->excnt++;
 740	}
 741	return 0;
 742}
 743
 744/*
 745 * Sign-extend the register if necessary
 746 */
 747static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
 748{
 749	if (!(flags & BTF_FMODEL_SIGNED_ARG))
 750		return 0;
 751
 752	switch (size) {
 753	case 1:
 754		/* lgbr %r,%r */
 755		EMIT4(0xb9060000, r, r);
 756		return 0;
 757	case 2:
 758		/* lghr %r,%r */
 759		EMIT4(0xb9070000, r, r);
 760		return 0;
 761	case 4:
 762		/* lgfr %r,%r */
 763		EMIT4(0xb9140000, r, r);
 764		return 0;
 765	case 8:
 766		return 0;
 767	default:
 768		return -1;
 769	}
 770}
 771
 772/*
 773 * Compile one eBPF instruction into s390x code
 774 *
 775 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 776 * stack space for the large switch statement.
 777 */
 778static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 779				 int i, bool extra_pass, u32 stack_depth)
 780{
 781	struct bpf_insn *insn = &fp->insnsi[i];
 782	s32 branch_oc_off = insn->off;
 
 783	u32 dst_reg = insn->dst_reg;
 784	u32 src_reg = insn->src_reg;
 785	int last, insn_count = 1;
 786	u32 *addrs = jit->addrs;
 787	s32 imm = insn->imm;
 788	s16 off = insn->off;
 789	int probe_prg = -1;
 790	unsigned int mask;
 791	int nop_prg;
 792	int err;
 793
 794	if (BPF_CLASS(insn->code) == BPF_LDX &&
 795	    (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
 796	     BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
 797		probe_prg = jit->prg;
 798
 
 
 799	switch (insn->code) {
 800	/*
 801	 * BPF_MOV
 802	 */
 803	case BPF_ALU | BPF_MOV | BPF_X:
 804		switch (insn->off) {
 805		case 0: /* DST = (u32) SRC */
 806			/* llgfr %dst,%src */
 807			EMIT4(0xb9160000, dst_reg, src_reg);
 808			if (insn_is_zext(&insn[1]))
 809				insn_count = 2;
 810			break;
 811		case 8: /* DST = (u32)(s8) SRC */
 812			/* lbr %dst,%src */
 813			EMIT4(0xb9260000, dst_reg, src_reg);
 814			/* llgfr %dst,%dst */
 815			EMIT4(0xb9160000, dst_reg, dst_reg);
 816			break;
 817		case 16: /* DST = (u32)(s16) SRC */
 818			/* lhr %dst,%src */
 819			EMIT4(0xb9270000, dst_reg, src_reg);
 820			/* llgfr %dst,%dst */
 821			EMIT4(0xb9160000, dst_reg, dst_reg);
 822			break;
 823		}
 824		break;
 825	case BPF_ALU64 | BPF_MOV | BPF_X:
 826		switch (insn->off) {
 827		case 0: /* DST = SRC */
 828			/* lgr %dst,%src */
 829			EMIT4(0xb9040000, dst_reg, src_reg);
 830			break;
 831		case 8: /* DST = (s8) SRC */
 832			/* lgbr %dst,%src */
 833			EMIT4(0xb9060000, dst_reg, src_reg);
 834			break;
 835		case 16: /* DST = (s16) SRC */
 836			/* lghr %dst,%src */
 837			EMIT4(0xb9070000, dst_reg, src_reg);
 838			break;
 839		case 32: /* DST = (s32) SRC */
 840			/* lgfr %dst,%src */
 841			EMIT4(0xb9140000, dst_reg, src_reg);
 842			break;
 843		}
 844		break;
 845	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 846		/* llilf %dst,imm */
 847		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 848		if (insn_is_zext(&insn[1]))
 849			insn_count = 2;
 850		break;
 851	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 852		/* lgfi %dst,imm */
 853		EMIT6_IMM(0xc0010000, dst_reg, imm);
 854		break;
 855	/*
 856	 * BPF_LD 64
 857	 */
 858	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 859	{
 860		/* 16 byte instruction that uses two 'struct bpf_insn' */
 861		u64 imm64;
 862
 863		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 864		/* lgrl %dst,imm */
 865		EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
 
 866		insn_count = 2;
 867		break;
 868	}
 869	/*
 870	 * BPF_ADD
 871	 */
 872	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 873		/* ar %dst,%src */
 874		EMIT2(0x1a00, dst_reg, src_reg);
 875		EMIT_ZERO(dst_reg);
 876		break;
 877	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 878		/* agr %dst,%src */
 879		EMIT4(0xb9080000, dst_reg, src_reg);
 880		break;
 881	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 882		if (imm != 0) {
 883			/* alfi %dst,imm */
 884			EMIT6_IMM(0xc20b0000, dst_reg, imm);
 885		}
 886		EMIT_ZERO(dst_reg);
 887		break;
 888	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 889		if (!imm)
 890			break;
 891		/* agfi %dst,imm */
 892		EMIT6_IMM(0xc2080000, dst_reg, imm);
 893		break;
 894	/*
 895	 * BPF_SUB
 896	 */
 897	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 898		/* sr %dst,%src */
 899		EMIT2(0x1b00, dst_reg, src_reg);
 900		EMIT_ZERO(dst_reg);
 901		break;
 902	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 903		/* sgr %dst,%src */
 904		EMIT4(0xb9090000, dst_reg, src_reg);
 905		break;
 906	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 907		if (imm != 0) {
 908			/* alfi %dst,-imm */
 909			EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 910		}
 911		EMIT_ZERO(dst_reg);
 912		break;
 913	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 914		if (!imm)
 915			break;
 916		if (imm == -0x80000000) {
 917			/* algfi %dst,0x80000000 */
 918			EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
 919		} else {
 920			/* agfi %dst,-imm */
 921			EMIT6_IMM(0xc2080000, dst_reg, -imm);
 922		}
 923		break;
 924	/*
 925	 * BPF_MUL
 926	 */
 927	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 928		/* msr %dst,%src */
 929		EMIT4(0xb2520000, dst_reg, src_reg);
 930		EMIT_ZERO(dst_reg);
 931		break;
 932	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 933		/* msgr %dst,%src */
 934		EMIT4(0xb90c0000, dst_reg, src_reg);
 935		break;
 936	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 937		if (imm != 1) {
 938			/* msfi %r5,imm */
 939			EMIT6_IMM(0xc2010000, dst_reg, imm);
 940		}
 941		EMIT_ZERO(dst_reg);
 942		break;
 943	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 944		if (imm == 1)
 945			break;
 946		/* msgfi %dst,imm */
 947		EMIT6_IMM(0xc2000000, dst_reg, imm);
 948		break;
 949	/*
 950	 * BPF_DIV / BPF_MOD
 951	 */
 952	case BPF_ALU | BPF_DIV | BPF_X:
 953	case BPF_ALU | BPF_MOD | BPF_X:
 954	{
 955		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 956
 957		switch (off) {
 958		case 0: /* dst = (u32) dst {/,%} (u32) src */
 959			/* xr %w0,%w0 */
 960			EMIT2(0x1700, REG_W0, REG_W0);
 961			/* lr %w1,%dst */
 962			EMIT2(0x1800, REG_W1, dst_reg);
 963			/* dlr %w0,%src */
 964			EMIT4(0xb9970000, REG_W0, src_reg);
 965			break;
 966		case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */
 967			/* lgfr %r1,%dst */
 968			EMIT4(0xb9140000, REG_W1, dst_reg);
 969			/* dsgfr %r0,%src */
 970			EMIT4(0xb91d0000, REG_W0, src_reg);
 971			break;
 972		}
 973		/* llgfr %dst,%rc */
 974		EMIT4(0xb9160000, dst_reg, rc_reg);
 975		if (insn_is_zext(&insn[1]))
 976			insn_count = 2;
 977		break;
 978	}
 979	case BPF_ALU64 | BPF_DIV | BPF_X:
 980	case BPF_ALU64 | BPF_MOD | BPF_X:
 981	{
 982		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 983
 984		switch (off) {
 985		case 0: /* dst = dst {/,%} src */
 986			/* lghi %w0,0 */
 987			EMIT4_IMM(0xa7090000, REG_W0, 0);
 988			/* lgr %w1,%dst */
 989			EMIT4(0xb9040000, REG_W1, dst_reg);
 990			/* dlgr %w0,%src */
 991			EMIT4(0xb9870000, REG_W0, src_reg);
 992			break;
 993		case 1: /* dst = (s64) dst {/,%} (s64) src */
 994			/* lgr %w1,%dst */
 995			EMIT4(0xb9040000, REG_W1, dst_reg);
 996			/* dsgr %w0,%src */
 997			EMIT4(0xb90d0000, REG_W0, src_reg);
 998			break;
 999		}
1000		/* lgr %dst,%rc */
1001		EMIT4(0xb9040000, dst_reg, rc_reg);
1002		break;
1003	}
1004	case BPF_ALU | BPF_DIV | BPF_K:
1005	case BPF_ALU | BPF_MOD | BPF_K:
1006	{
1007		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1008
1009		if (imm == 1) {
1010			if (BPF_OP(insn->code) == BPF_MOD)
1011				/* lghi %dst,0 */
1012				EMIT4_IMM(0xa7090000, dst_reg, 0);
1013			else
1014				EMIT_ZERO(dst_reg);
1015			break;
1016		}
1017		if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
1018			switch (off) {
1019			case 0: /* dst = (u32) dst {/,%} (u32) imm */
1020				/* xr %w0,%w0 */
1021				EMIT2(0x1700, REG_W0, REG_W0);
1022				/* lr %w1,%dst */
1023				EMIT2(0x1800, REG_W1, dst_reg);
1024				/* dl %w0,<d(imm)>(%l) */
1025				EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0,
1026					      REG_L, EMIT_CONST_U32(imm));
1027				break;
1028			case 1: /* dst = (s32) dst {/,%} (s32) imm */
1029				/* lgfr %r1,%dst */
1030				EMIT4(0xb9140000, REG_W1, dst_reg);
1031				/* dsgf %r0,<d(imm)>(%l) */
1032				EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0,
1033					      REG_L, EMIT_CONST_U32(imm));
1034				break;
1035			}
1036		} else {
1037			switch (off) {
1038			case 0: /* dst = (u32) dst {/,%} (u32) imm */
1039				/* xr %w0,%w0 */
1040				EMIT2(0x1700, REG_W0, REG_W0);
1041				/* lr %w1,%dst */
1042				EMIT2(0x1800, REG_W1, dst_reg);
1043				/* lrl %dst,imm */
1044				EMIT6_PCREL_RILB(0xc40d0000, dst_reg,
1045						 _EMIT_CONST_U32(imm));
1046				jit->seen |= SEEN_LITERAL;
1047				/* dlr %w0,%dst */
1048				EMIT4(0xb9970000, REG_W0, dst_reg);
1049				break;
1050			case 1: /* dst = (s32) dst {/,%} (s32) imm */
1051				/* lgfr %w1,%dst */
1052				EMIT4(0xb9140000, REG_W1, dst_reg);
1053				/* lgfrl %dst,imm */
1054				EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
1055						 _EMIT_CONST_U32(imm));
1056				jit->seen |= SEEN_LITERAL;
1057				/* dsgr %w0,%dst */
1058				EMIT4(0xb90d0000, REG_W0, dst_reg);
1059				break;
1060			}
1061		}
1062		/* llgfr %dst,%rc */
1063		EMIT4(0xb9160000, dst_reg, rc_reg);
1064		if (insn_is_zext(&insn[1]))
1065			insn_count = 2;
1066		break;
1067	}
1068	case BPF_ALU64 | BPF_DIV | BPF_K:
1069	case BPF_ALU64 | BPF_MOD | BPF_K:
1070	{
1071		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1072
1073		if (imm == 1) {
1074			if (BPF_OP(insn->code) == BPF_MOD)
1075				/* lhgi %dst,0 */
1076				EMIT4_IMM(0xa7090000, dst_reg, 0);
1077			break;
1078		}
1079		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1080			switch (off) {
1081			case 0: /* dst = dst {/,%} imm */
1082				/* lghi %w0,0 */
1083				EMIT4_IMM(0xa7090000, REG_W0, 0);
1084				/* lgr %w1,%dst */
1085				EMIT4(0xb9040000, REG_W1, dst_reg);
1086				/* dlg %w0,<d(imm)>(%l) */
1087				EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0,
1088					      REG_L, EMIT_CONST_U64(imm));
1089				break;
1090			case 1: /* dst = (s64) dst {/,%} (s64) imm */
1091				/* lgr %w1,%dst */
1092				EMIT4(0xb9040000, REG_W1, dst_reg);
1093				/* dsg %w0,<d(imm)>(%l) */
1094				EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0,
1095					      REG_L, EMIT_CONST_U64(imm));
1096				break;
1097			}
1098		} else {
1099			switch (off) {
1100			case 0: /* dst = dst {/,%} imm */
1101				/* lghi %w0,0 */
1102				EMIT4_IMM(0xa7090000, REG_W0, 0);
1103				/* lgr %w1,%dst */
1104				EMIT4(0xb9040000, REG_W1, dst_reg);
1105				/* lgrl %dst,imm */
1106				EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1107						 _EMIT_CONST_U64(imm));
1108				jit->seen |= SEEN_LITERAL;
1109				/* dlgr %w0,%dst */
1110				EMIT4(0xb9870000, REG_W0, dst_reg);
1111				break;
1112			case 1: /* dst = (s64) dst {/,%} (s64) imm */
1113				/* lgr %w1,%dst */
1114				EMIT4(0xb9040000, REG_W1, dst_reg);
1115				/* lgrl %dst,imm */
1116				EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1117						 _EMIT_CONST_U64(imm));
1118				jit->seen |= SEEN_LITERAL;
1119				/* dsgr %w0,%dst */
1120				EMIT4(0xb90d0000, REG_W0, dst_reg);
1121				break;
1122			}
1123		}
1124		/* lgr %dst,%rc */
1125		EMIT4(0xb9040000, dst_reg, rc_reg);
1126		break;
1127	}
1128	/*
1129	 * BPF_AND
1130	 */
1131	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
1132		/* nr %dst,%src */
1133		EMIT2(0x1400, dst_reg, src_reg);
1134		EMIT_ZERO(dst_reg);
1135		break;
1136	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1137		/* ngr %dst,%src */
1138		EMIT4(0xb9800000, dst_reg, src_reg);
1139		break;
1140	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
1141		/* nilf %dst,imm */
1142		EMIT6_IMM(0xc00b0000, dst_reg, imm);
1143		EMIT_ZERO(dst_reg);
1144		break;
1145	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1146		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1147			/* ng %dst,<d(imm)>(%l) */
1148			EMIT6_DISP_LH(0xe3000000, 0x0080,
1149				      dst_reg, REG_0, REG_L,
1150				      EMIT_CONST_U64(imm));
1151		} else {
1152			/* lgrl %w0,imm */
1153			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1154					 _EMIT_CONST_U64(imm));
1155			jit->seen |= SEEN_LITERAL;
1156			/* ngr %dst,%w0 */
1157			EMIT4(0xb9800000, dst_reg, REG_W0);
1158		}
1159		break;
1160	/*
1161	 * BPF_OR
1162	 */
1163	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1164		/* or %dst,%src */
1165		EMIT2(0x1600, dst_reg, src_reg);
1166		EMIT_ZERO(dst_reg);
1167		break;
1168	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1169		/* ogr %dst,%src */
1170		EMIT4(0xb9810000, dst_reg, src_reg);
1171		break;
1172	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
1173		/* oilf %dst,imm */
1174		EMIT6_IMM(0xc00d0000, dst_reg, imm);
1175		EMIT_ZERO(dst_reg);
1176		break;
1177	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
1178		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1179			/* og %dst,<d(imm)>(%l) */
1180			EMIT6_DISP_LH(0xe3000000, 0x0081,
1181				      dst_reg, REG_0, REG_L,
1182				      EMIT_CONST_U64(imm));
1183		} else {
1184			/* lgrl %w0,imm */
1185			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1186					 _EMIT_CONST_U64(imm));
1187			jit->seen |= SEEN_LITERAL;
1188			/* ogr %dst,%w0 */
1189			EMIT4(0xb9810000, dst_reg, REG_W0);
1190		}
1191		break;
1192	/*
1193	 * BPF_XOR
1194	 */
1195	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
1196		/* xr %dst,%src */
1197		EMIT2(0x1700, dst_reg, src_reg);
1198		EMIT_ZERO(dst_reg);
1199		break;
1200	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
1201		/* xgr %dst,%src */
1202		EMIT4(0xb9820000, dst_reg, src_reg);
1203		break;
1204	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1205		if (imm != 0) {
1206			/* xilf %dst,imm */
1207			EMIT6_IMM(0xc0070000, dst_reg, imm);
1208		}
1209		EMIT_ZERO(dst_reg);
1210		break;
1211	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1212		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1213			/* xg %dst,<d(imm)>(%l) */
1214			EMIT6_DISP_LH(0xe3000000, 0x0082,
1215				      dst_reg, REG_0, REG_L,
1216				      EMIT_CONST_U64(imm));
1217		} else {
1218			/* lgrl %w0,imm */
1219			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1220					 _EMIT_CONST_U64(imm));
1221			jit->seen |= SEEN_LITERAL;
1222			/* xgr %dst,%w0 */
1223			EMIT4(0xb9820000, dst_reg, REG_W0);
1224		}
1225		break;
1226	/*
1227	 * BPF_LSH
1228	 */
1229	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1230		/* sll %dst,0(%src) */
1231		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1232		EMIT_ZERO(dst_reg);
1233		break;
1234	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1235		/* sllg %dst,%dst,0(%src) */
1236		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1237		break;
1238	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1239		if (imm != 0) {
1240			/* sll %dst,imm(%r0) */
1241			EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1242		}
1243		EMIT_ZERO(dst_reg);
1244		break;
1245	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1246		if (imm == 0)
1247			break;
1248		/* sllg %dst,%dst,imm(%r0) */
1249		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1250		break;
1251	/*
1252	 * BPF_RSH
1253	 */
1254	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1255		/* srl %dst,0(%src) */
1256		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1257		EMIT_ZERO(dst_reg);
1258		break;
1259	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1260		/* srlg %dst,%dst,0(%src) */
1261		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1262		break;
1263	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1264		if (imm != 0) {
1265			/* srl %dst,imm(%r0) */
1266			EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1267		}
1268		EMIT_ZERO(dst_reg);
1269		break;
1270	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1271		if (imm == 0)
1272			break;
1273		/* srlg %dst,%dst,imm(%r0) */
1274		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1275		break;
1276	/*
1277	 * BPF_ARSH
1278	 */
1279	case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1280		/* sra %dst,%dst,0(%src) */
1281		EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1282		EMIT_ZERO(dst_reg);
1283		break;
1284	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1285		/* srag %dst,%dst,0(%src) */
1286		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1287		break;
1288	case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1289		if (imm != 0) {
1290			/* sra %dst,imm(%r0) */
1291			EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1292		}
1293		EMIT_ZERO(dst_reg);
1294		break;
1295	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1296		if (imm == 0)
1297			break;
1298		/* srag %dst,%dst,imm(%r0) */
1299		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1300		break;
1301	/*
1302	 * BPF_NEG
1303	 */
1304	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1305		/* lcr %dst,%dst */
1306		EMIT2(0x1300, dst_reg, dst_reg);
1307		EMIT_ZERO(dst_reg);
1308		break;
1309	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1310		/* lcgr %dst,%dst */
1311		EMIT4(0xb9030000, dst_reg, dst_reg);
1312		break;
1313	/*
1314	 * BPF_FROM_BE/LE
1315	 */
1316	case BPF_ALU | BPF_END | BPF_FROM_BE:
1317		/* s390 is big endian, therefore only clear high order bytes */
1318		switch (imm) {
1319		case 16: /* dst = (u16) cpu_to_be16(dst) */
1320			/* llghr %dst,%dst */
1321			EMIT4(0xb9850000, dst_reg, dst_reg);
1322			if (insn_is_zext(&insn[1]))
1323				insn_count = 2;
1324			break;
1325		case 32: /* dst = (u32) cpu_to_be32(dst) */
1326			if (!fp->aux->verifier_zext)
1327				/* llgfr %dst,%dst */
1328				EMIT4(0xb9160000, dst_reg, dst_reg);
1329			break;
1330		case 64: /* dst = (u64) cpu_to_be64(dst) */
1331			break;
1332		}
1333		break;
1334	case BPF_ALU | BPF_END | BPF_FROM_LE:
1335	case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1336		switch (imm) {
1337		case 16: /* dst = (u16) cpu_to_le16(dst) */
1338			/* lrvr %dst,%dst */
1339			EMIT4(0xb91f0000, dst_reg, dst_reg);
1340			/* srl %dst,16(%r0) */
1341			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1342			/* llghr %dst,%dst */
1343			EMIT4(0xb9850000, dst_reg, dst_reg);
1344			if (insn_is_zext(&insn[1]))
1345				insn_count = 2;
1346			break;
1347		case 32: /* dst = (u32) cpu_to_le32(dst) */
1348			/* lrvr %dst,%dst */
1349			EMIT4(0xb91f0000, dst_reg, dst_reg);
1350			if (!fp->aux->verifier_zext)
1351				/* llgfr %dst,%dst */
1352				EMIT4(0xb9160000, dst_reg, dst_reg);
1353			break;
1354		case 64: /* dst = (u64) cpu_to_le64(dst) */
1355			/* lrvgr %dst,%dst */
1356			EMIT4(0xb90f0000, dst_reg, dst_reg);
1357			break;
1358		}
1359		break;
1360	/*
1361	 * BPF_NOSPEC (speculation barrier)
1362	 */
1363	case BPF_ST | BPF_NOSPEC:
1364		break;
1365	/*
1366	 * BPF_ST(X)
1367	 */
1368	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1369		/* stcy %src,off(%dst) */
1370		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1371		jit->seen |= SEEN_MEM;
1372		break;
1373	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1374		/* sthy %src,off(%dst) */
1375		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1376		jit->seen |= SEEN_MEM;
1377		break;
1378	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1379		/* sty %src,off(%dst) */
1380		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1381		jit->seen |= SEEN_MEM;
1382		break;
1383	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1384		/* stg %src,off(%dst) */
1385		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1386		jit->seen |= SEEN_MEM;
1387		break;
1388	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1389		/* lhi %w0,imm */
1390		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1391		/* stcy %w0,off(dst) */
1392		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1393		jit->seen |= SEEN_MEM;
1394		break;
1395	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1396		/* lhi %w0,imm */
1397		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1398		/* sthy %w0,off(dst) */
1399		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1400		jit->seen |= SEEN_MEM;
1401		break;
1402	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1403		/* llilf %w0,imm  */
1404		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1405		/* sty %w0,off(%dst) */
1406		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1407		jit->seen |= SEEN_MEM;
1408		break;
1409	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1410		/* lgfi %w0,imm */
1411		EMIT6_IMM(0xc0010000, REG_W0, imm);
1412		/* stg %w0,off(%dst) */
1413		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1414		jit->seen |= SEEN_MEM;
1415		break;
1416	/*
1417	 * BPF_ATOMIC
1418	 */
1419	case BPF_STX | BPF_ATOMIC | BPF_DW:
1420	case BPF_STX | BPF_ATOMIC | BPF_W:
1421	{
1422		bool is32 = BPF_SIZE(insn->code) == BPF_W;
1423
1424		switch (insn->imm) {
1425/* {op32|op64} {%w0|%src},%src,off(%dst) */
1426#define EMIT_ATOMIC(op32, op64) do {					\
1427	EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64),		\
1428		      (insn->imm & BPF_FETCH) ? src_reg : REG_W0,	\
1429		      src_reg, dst_reg, off);				\
1430	if (is32 && (insn->imm & BPF_FETCH))				\
1431		EMIT_ZERO(src_reg);					\
1432} while (0)
1433		case BPF_ADD:
1434		case BPF_ADD | BPF_FETCH:
1435			/* {laal|laalg} */
1436			EMIT_ATOMIC(0x00fa, 0x00ea);
1437			break;
1438		case BPF_AND:
1439		case BPF_AND | BPF_FETCH:
1440			/* {lan|lang} */
1441			EMIT_ATOMIC(0x00f4, 0x00e4);
1442			break;
1443		case BPF_OR:
1444		case BPF_OR | BPF_FETCH:
1445			/* {lao|laog} */
1446			EMIT_ATOMIC(0x00f6, 0x00e6);
1447			break;
1448		case BPF_XOR:
1449		case BPF_XOR | BPF_FETCH:
1450			/* {lax|laxg} */
1451			EMIT_ATOMIC(0x00f7, 0x00e7);
1452			break;
1453#undef EMIT_ATOMIC
1454		case BPF_XCHG:
1455			/* {ly|lg} %w0,off(%dst) */
1456			EMIT6_DISP_LH(0xe3000000,
1457				      is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1458				      dst_reg, off);
1459			/* 0: {csy|csg} %w0,%src,off(%dst) */
1460			EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1461				      REG_W0, src_reg, dst_reg, off);
1462			/* brc 4,0b */
1463			EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1464			/* {llgfr|lgr} %src,%w0 */
1465			EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1466			if (is32 && insn_is_zext(&insn[1]))
1467				insn_count = 2;
1468			break;
1469		case BPF_CMPXCHG:
1470			/* 0: {csy|csg} %b0,%src,off(%dst) */
1471			EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1472				      BPF_REG_0, src_reg, dst_reg, off);
1473			break;
1474		default:
1475			pr_err("Unknown atomic operation %02x\n", insn->imm);
1476			return -1;
1477		}
1478
1479		jit->seen |= SEEN_MEM;
1480		break;
1481	}
1482	/*
1483	 * BPF_LDX
1484	 */
1485	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1486	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1487		/* llgc %dst,0(off,%src) */
1488		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1489		jit->seen |= SEEN_MEM;
1490		if (insn_is_zext(&insn[1]))
1491			insn_count = 2;
1492		break;
1493	case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
1494	case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1495		/* lgb %dst,0(off,%src) */
1496		EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
1497		jit->seen |= SEEN_MEM;
1498		break;
1499	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1500	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1501		/* llgh %dst,0(off,%src) */
1502		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1503		jit->seen |= SEEN_MEM;
1504		if (insn_is_zext(&insn[1]))
1505			insn_count = 2;
1506		break;
1507	case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
1508	case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1509		/* lgh %dst,0(off,%src) */
1510		EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
1511		jit->seen |= SEEN_MEM;
1512		break;
1513	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1514	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1515		/* llgf %dst,off(%src) */
1516		jit->seen |= SEEN_MEM;
1517		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1518		if (insn_is_zext(&insn[1]))
1519			insn_count = 2;
1520		break;
1521	case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
1522	case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1523		/* lgf %dst,off(%src) */
1524		jit->seen |= SEEN_MEM;
1525		EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
1526		break;
1527	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1528	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1529		/* lg %dst,0(off,%src) */
1530		jit->seen |= SEEN_MEM;
1531		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1532		break;
1533	/*
1534	 * BPF_JMP / CALL
1535	 */
1536	case BPF_JMP | BPF_CALL:
1537	{
1538		const struct btf_func_model *m;
1539		bool func_addr_fixed;
1540		int j, ret;
1541		u64 func;
1542
1543		ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1544					    &func, &func_addr_fixed);
1545		if (ret < 0)
1546			return -1;
1547
1548		REG_SET_SEEN(BPF_REG_5);
1549		jit->seen |= SEEN_FUNC;
1550		/*
1551		 * Copy the tail call counter to where the callee expects it.
1552		 *
1553		 * Note 1: The callee can increment the tail call counter, but
1554		 * we do not load it back, since the x86 JIT does not do this
1555		 * either.
1556		 *
1557		 * Note 2: We assume that the verifier does not let us call the
1558		 * main program, which clears the tail call counter on entry.
1559		 */
1560		/* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
1561		_EMIT6(0xd203f000 | STK_OFF_TCCNT,
1562		       0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
1563
1564		/* Sign-extend the kfunc arguments. */
1565		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
1566			m = bpf_jit_find_kfunc_model(fp, insn);
1567			if (!m)
1568				return -1;
1569
1570			for (j = 0; j < m->nr_args; j++) {
1571				if (sign_extend(jit, BPF_REG_1 + j,
1572						m->arg_size[j],
1573						m->arg_flags[j]))
1574					return -1;
1575			}
1576		}
1577
1578		/* lgrl %w1,func */
1579		EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1580		/* %r1() */
1581		call_r1(jit);
1582		/* lgr %b0,%r2: load return value into %b0 */
1583		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 
 
 
 
 
 
 
1584		break;
1585	}
1586	case BPF_JMP | BPF_TAIL_CALL: {
1587		int patch_1_clrj, patch_2_clij, patch_3_brc;
1588
1589		/*
1590		 * Implicit input:
1591		 *  B1: pointer to ctx
1592		 *  B2: pointer to bpf_array
1593		 *  B3: index in bpf_array
1594		 *
 
 
 
1595		 * if (index >= array->map.max_entries)
1596		 *         goto out;
1597		 */
1598
1599		/* llgf %w1,map.max_entries(%b2) */
1600		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1601			      offsetof(struct bpf_array, map.max_entries));
1602		/* if ((u32)%b3 >= (u32)%w1) goto out; */
1603		/* clrj %b3,%w1,0xa,out */
1604		patch_1_clrj = jit->prg;
1605		EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1606				 jit->prg);
1607
1608		/*
1609		 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1610		 *         goto out;
1611		 */
1612
1613		if (jit->seen & SEEN_STACK)
1614			off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1615		else
1616			off = STK_OFF_TCCNT;
1617		/* lhi %w0,1 */
1618		EMIT4_IMM(0xa7080000, REG_W0, 1);
1619		/* laal %w1,%w0,off(%r15) */
1620		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1621		/* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1622		patch_2_clij = jit->prg;
1623		EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1624				 2, jit->prg);
1625
1626		/*
1627		 * prog = array->ptrs[index];
1628		 * if (prog == NULL)
1629		 *         goto out;
1630		 */
1631
1632		/* llgfr %r1,%b3: %r1 = (u32) index */
1633		EMIT4(0xb9160000, REG_1, BPF_REG_3);
1634		/* sllg %r1,%r1,3: %r1 *= 8 */
1635		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1636		/* ltg %r1,prog(%b2,%r1) */
1637		EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1638			      REG_1, offsetof(struct bpf_array, ptrs));
1639		/* brc 0x8,out */
1640		patch_3_brc = jit->prg;
1641		EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1642
1643		/*
1644		 * Restore registers before calling function
1645		 */
1646		save_restore_regs(jit, REGS_RESTORE, stack_depth);
1647
1648		/*
1649		 * goto *(prog->bpf_func + tail_call_start);
1650		 */
1651
1652		/* lg %r1,bpf_func(%r1) */
1653		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1654			      offsetof(struct bpf_prog, bpf_func));
1655		if (nospec_uses_trampoline()) {
1656			jit->seen |= SEEN_FUNC;
1657			/* aghi %r1,tail_call_start */
1658			EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
1659			/* brcl 0xf,__s390_indirect_jump_r1 */
1660			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
1661		} else {
1662			/* bc 0xf,tail_call_start(%r1) */
1663			_EMIT4(0x47f01000 + jit->tail_call_start);
1664		}
1665		/* out: */
1666		if (jit->prg_buf) {
1667			*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1668				(jit->prg - patch_1_clrj) >> 1;
1669			*(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1670				(jit->prg - patch_2_clij) >> 1;
1671			*(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1672				(jit->prg - patch_3_brc) >> 1;
1673		}
1674		break;
1675	}
1676	case BPF_JMP | BPF_EXIT: /* return b0 */
1677		last = (i == fp->len - 1) ? 1 : 0;
1678		if (last)
1679			break;
1680		if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1681			/* brc 0xf, <exit> */
1682			EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1683		else
1684			/* brcl 0xf, <exit> */
1685			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1686		break;
1687	/*
1688	 * Branch relative (number of skipped instructions) to offset on
1689	 * condition.
1690	 *
1691	 * Condition code to mask mapping:
1692	 *
1693	 * CC | Description	   | Mask
1694	 * ------------------------------
1695	 * 0  | Operands equal	   |	8
1696	 * 1  | First operand low  |	4
1697	 * 2  | First operand high |	2
1698	 * 3  | Unused		   |	1
1699	 *
1700	 * For s390x relative branches: ip = ip + off_bytes
1701	 * For BPF relative branches:	insn = insn + off_insns + 1
1702	 *
1703	 * For example for s390x with offset 0 we jump to the branch
1704	 * instruction itself (loop) and for BPF with offset 0 we
1705	 * branch to the instruction behind the branch.
1706	 */
1707	case BPF_JMP32 | BPF_JA: /* if (true) */
1708		branch_oc_off = imm;
1709		fallthrough;
1710	case BPF_JMP | BPF_JA: /* if (true) */
1711		mask = 0xf000; /* j */
1712		goto branch_oc;
1713	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1714	case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1715		mask = 0x2000; /* jh */
1716		goto branch_ks;
1717	case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1718	case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1719		mask = 0x4000; /* jl */
1720		goto branch_ks;
1721	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1722	case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1723		mask = 0xa000; /* jhe */
1724		goto branch_ks;
1725	case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1726	case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1727		mask = 0xc000; /* jle */
1728		goto branch_ks;
1729	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1730	case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1731		mask = 0x2000; /* jh */
1732		goto branch_ku;
1733	case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1734	case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1735		mask = 0x4000; /* jl */
1736		goto branch_ku;
1737	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1738	case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1739		mask = 0xa000; /* jhe */
1740		goto branch_ku;
1741	case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1742	case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1743		mask = 0xc000; /* jle */
1744		goto branch_ku;
1745	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1746	case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1747		mask = 0x7000; /* jne */
1748		goto branch_ku;
1749	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1750	case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1751		mask = 0x8000; /* je */
1752		goto branch_ku;
1753	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1754	case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1755		mask = 0x7000; /* jnz */
1756		if (BPF_CLASS(insn->code) == BPF_JMP32) {
1757			/* llilf %w1,imm (load zero extend imm) */
1758			EMIT6_IMM(0xc00f0000, REG_W1, imm);
1759			/* nr %w1,%dst */
1760			EMIT2(0x1400, REG_W1, dst_reg);
1761		} else {
1762			/* lgfi %w1,imm (load sign extend imm) */
1763			EMIT6_IMM(0xc0010000, REG_W1, imm);
1764			/* ngr %w1,%dst */
1765			EMIT4(0xb9800000, REG_W1, dst_reg);
1766		}
1767		goto branch_oc;
1768
1769	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1770	case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1771		mask = 0x2000; /* jh */
1772		goto branch_xs;
1773	case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1774	case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1775		mask = 0x4000; /* jl */
1776		goto branch_xs;
1777	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1778	case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1779		mask = 0xa000; /* jhe */
1780		goto branch_xs;
1781	case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1782	case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1783		mask = 0xc000; /* jle */
1784		goto branch_xs;
1785	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1786	case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1787		mask = 0x2000; /* jh */
1788		goto branch_xu;
1789	case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1790	case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1791		mask = 0x4000; /* jl */
1792		goto branch_xu;
1793	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1794	case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1795		mask = 0xa000; /* jhe */
1796		goto branch_xu;
1797	case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1798	case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1799		mask = 0xc000; /* jle */
1800		goto branch_xu;
1801	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1802	case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1803		mask = 0x7000; /* jne */
1804		goto branch_xu;
1805	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1806	case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1807		mask = 0x8000; /* je */
1808		goto branch_xu;
1809	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1810	case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1811	{
1812		bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1813
1814		mask = 0x7000; /* jnz */
1815		/* nrk or ngrk %w1,%dst,%src */
1816		EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1817			  REG_W1, dst_reg, src_reg);
1818		goto branch_oc;
1819branch_ks:
1820		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1821		/* cfi or cgfi %dst,imm */
1822		EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1823			  dst_reg, imm);
1824		if (!is_first_pass(jit) &&
1825		    can_use_rel(jit, addrs[i + off + 1])) {
1826			/* brc mask,off */
1827			EMIT4_PCREL_RIC(0xa7040000,
1828					mask >> 12, addrs[i + off + 1]);
1829		} else {
1830			/* brcl mask,off */
1831			EMIT6_PCREL_RILC(0xc0040000,
1832					 mask >> 12, addrs[i + off + 1]);
1833		}
1834		break;
1835branch_ku:
1836		/* lgfi %w1,imm (load sign extend imm) */
1837		src_reg = REG_1;
1838		EMIT6_IMM(0xc0010000, src_reg, imm);
1839		goto branch_xu;
 
1840branch_xs:
1841		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1842		if (!is_first_pass(jit) &&
1843		    can_use_rel(jit, addrs[i + off + 1])) {
1844			/* crj or cgrj %dst,%src,mask,off */
1845			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1846				    dst_reg, src_reg, i, off, mask);
1847		} else {
1848			/* cr or cgr %dst,%src */
1849			if (is_jmp32)
1850				EMIT2(0x1900, dst_reg, src_reg);
1851			else
1852				EMIT4(0xb9200000, dst_reg, src_reg);
1853			/* brcl mask,off */
1854			EMIT6_PCREL_RILC(0xc0040000,
1855					 mask >> 12, addrs[i + off + 1]);
1856		}
1857		break;
1858branch_xu:
1859		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1860		if (!is_first_pass(jit) &&
1861		    can_use_rel(jit, addrs[i + off + 1])) {
1862			/* clrj or clgrj %dst,%src,mask,off */
1863			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1864				    dst_reg, src_reg, i, off, mask);
1865		} else {
1866			/* clr or clgr %dst,%src */
1867			if (is_jmp32)
1868				EMIT2(0x1500, dst_reg, src_reg);
1869			else
1870				EMIT4(0xb9210000, dst_reg, src_reg);
1871			/* brcl mask,off */
1872			EMIT6_PCREL_RILC(0xc0040000,
1873					 mask >> 12, addrs[i + off + 1]);
1874		}
1875		break;
1876branch_oc:
1877		if (!is_first_pass(jit) &&
1878		    can_use_rel(jit, addrs[i + branch_oc_off + 1])) {
1879			/* brc mask,off */
1880			EMIT4_PCREL_RIC(0xa7040000,
1881					mask >> 12,
1882					addrs[i + branch_oc_off + 1]);
1883		} else {
1884			/* brcl mask,off */
1885			EMIT6_PCREL_RILC(0xc0040000,
1886					 mask >> 12,
1887					 addrs[i + branch_oc_off + 1]);
1888		}
1889		break;
1890	}
1891	default: /* too complex, give up */
1892		pr_err("Unknown opcode %02x\n", insn->code);
1893		return -1;
1894	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1895
1896	if (probe_prg != -1) {
1897		/*
1898		 * Handlers of certain exceptions leave psw.addr pointing to
1899		 * the instruction directly after the failing one. Therefore,
1900		 * create two exception table entries and also add a nop in
1901		 * case two probing instructions come directly after each
1902		 * other.
 
 
 
 
 
 
 
1903		 */
1904		nop_prg = jit->prg;
1905		/* bcr 0,%0 */
1906		_EMIT2(0x0700);
1907		err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1908		if (err < 0)
1909			return err;
1910	}
1911
1912	return insn_count;
1913}
1914
1915/*
1916 * Return whether new i-th instruction address does not violate any invariant
1917 */
1918static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1919{
1920	/* On the first pass anything goes */
1921	if (is_first_pass(jit))
1922		return true;
1923
1924	/* The codegen pass must not change anything */
1925	if (is_codegen_pass(jit))
1926		return jit->addrs[i] == jit->prg;
1927
1928	/* Passes in between must not increase code size */
1929	return jit->addrs[i] >= jit->prg;
1930}
1931
1932/*
1933 * Update the address of i-th instruction
1934 */
1935static int bpf_set_addr(struct bpf_jit *jit, int i)
1936{
1937	int delta;
1938
1939	if (is_codegen_pass(jit)) {
1940		delta = jit->prg - jit->addrs[i];
1941		if (delta < 0)
1942			bpf_skip(jit, -delta);
1943	}
1944	if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1945		return -1;
1946	jit->addrs[i] = jit->prg;
1947	return 0;
1948}
1949
1950/*
1951 * Compile eBPF program into s390x code
1952 */
1953static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1954			bool extra_pass, u32 stack_depth)
1955{
1956	int i, insn_count, lit32_size, lit64_size;
1957
1958	jit->lit32 = jit->lit32_start;
1959	jit->lit64 = jit->lit64_start;
1960	jit->prg = 0;
1961	jit->excnt = 0;
1962
1963	bpf_jit_prologue(jit, fp, stack_depth);
1964	if (bpf_set_addr(jit, 0) < 0)
1965		return -1;
1966	for (i = 0; i < fp->len; i += insn_count) {
1967		insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1968		if (insn_count < 0)
1969			return -1;
1970		/* Next instruction address */
1971		if (bpf_set_addr(jit, i + insn_count) < 0)
1972			return -1;
1973	}
1974	bpf_jit_epilogue(jit, stack_depth);
1975
1976	lit32_size = jit->lit32 - jit->lit32_start;
1977	lit64_size = jit->lit64 - jit->lit64_start;
1978	jit->lit32_start = jit->prg;
1979	if (lit32_size)
1980		jit->lit32_start = ALIGN(jit->lit32_start, 4);
1981	jit->lit64_start = jit->lit32_start + lit32_size;
1982	if (lit64_size)
1983		jit->lit64_start = ALIGN(jit->lit64_start, 8);
1984	jit->size = jit->lit64_start + lit64_size;
1985	jit->size_prg = jit->prg;
1986
1987	if (WARN_ON_ONCE(fp->aux->extable &&
1988			 jit->excnt != fp->aux->num_exentries))
1989		/* Verifier bug - too many entries. */
1990		return -1;
1991
1992	return 0;
1993}
1994
1995bool bpf_jit_needs_zext(void)
1996{
1997	return true;
1998}
1999
2000struct s390_jit_data {
2001	struct bpf_binary_header *header;
2002	struct bpf_jit ctx;
2003	int pass;
2004};
2005
2006static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
2007					       struct bpf_prog *fp)
2008{
2009	struct bpf_binary_header *header;
2010	u32 extable_size;
2011	u32 code_size;
2012
2013	/* We need two entries per insn. */
2014	fp->aux->num_exentries *= 2;
2015
2016	code_size = roundup(jit->size,
2017			    __alignof__(struct exception_table_entry));
2018	extable_size = fp->aux->num_exentries *
2019		sizeof(struct exception_table_entry);
2020	header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
2021				      8, jit_fill_hole);
2022	if (!header)
2023		return NULL;
2024	fp->aux->extable = (struct exception_table_entry *)
2025		(jit->prg_buf + code_size);
2026	return header;
2027}
2028
2029/*
2030 * Compile eBPF program "fp"
2031 */
2032struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2033{
2034	u32 stack_depth = round_up(fp->aux->stack_depth, 8);
2035	struct bpf_prog *tmp, *orig_fp = fp;
2036	struct bpf_binary_header *header;
2037	struct s390_jit_data *jit_data;
2038	bool tmp_blinded = false;
2039	bool extra_pass = false;
2040	struct bpf_jit jit;
2041	int pass;
2042
2043	if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
2044		return orig_fp;
2045
2046	if (!fp->jit_requested)
2047		return orig_fp;
2048
2049	tmp = bpf_jit_blind_constants(fp);
2050	/*
2051	 * If blinding was requested and we failed during blinding,
2052	 * we must fall back to the interpreter.
2053	 */
2054	if (IS_ERR(tmp))
2055		return orig_fp;
2056	if (tmp != fp) {
2057		tmp_blinded = true;
2058		fp = tmp;
2059	}
2060
2061	jit_data = fp->aux->jit_data;
2062	if (!jit_data) {
2063		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2064		if (!jit_data) {
2065			fp = orig_fp;
2066			goto out;
2067		}
2068		fp->aux->jit_data = jit_data;
2069	}
2070	if (jit_data->ctx.addrs) {
2071		jit = jit_data->ctx;
2072		header = jit_data->header;
2073		extra_pass = true;
2074		pass = jit_data->pass + 1;
2075		goto skip_init_ctx;
2076	}
2077
2078	memset(&jit, 0, sizeof(jit));
2079	jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
2080	if (jit.addrs == NULL) {
2081		fp = orig_fp;
2082		goto free_addrs;
2083	}
2084	/*
2085	 * Three initial passes:
2086	 *   - 1/2: Determine clobbered registers
2087	 *   - 3:   Calculate program size and addrs array
2088	 */
2089	for (pass = 1; pass <= 3; pass++) {
2090		if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2091			fp = orig_fp;
2092			goto free_addrs;
2093		}
2094	}
2095	/*
2096	 * Final pass: Allocate and generate program
2097	 */
2098	header = bpf_jit_alloc(&jit, fp);
 
 
 
 
2099	if (!header) {
2100		fp = orig_fp;
2101		goto free_addrs;
2102	}
2103skip_init_ctx:
2104	if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2105		bpf_jit_binary_free(header);
2106		fp = orig_fp;
2107		goto free_addrs;
2108	}
2109	if (bpf_jit_enable > 1) {
2110		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
2111		print_fn_code(jit.prg_buf, jit.size_prg);
2112	}
2113	if (!fp->is_func || extra_pass) {
2114		bpf_jit_binary_lock_ro(header);
2115	} else {
2116		jit_data->header = header;
2117		jit_data->ctx = jit;
2118		jit_data->pass = pass;
2119	}
2120	fp->bpf_func = (void *) jit.prg_buf;
2121	fp->jited = 1;
2122	fp->jited_len = jit.size;
2123
2124	if (!fp->is_func || extra_pass) {
2125		bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
2126free_addrs:
2127		kvfree(jit.addrs);
2128		kfree(jit_data);
2129		fp->aux->jit_data = NULL;
2130	}
2131out:
2132	if (tmp_blinded)
2133		bpf_jit_prog_release_other(fp, fp == orig_fp ?
2134					   tmp : orig_fp);
2135	return fp;
2136}
2137
2138bool bpf_jit_supports_kfunc_call(void)
2139{
2140	return true;
2141}
2142
2143bool bpf_jit_supports_far_kfunc_call(void)
2144{
2145	return true;
2146}
2147
2148int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2149		       void *old_addr, void *new_addr)
2150{
2151	struct {
2152		u16 opc;
2153		s32 disp;
2154	} __packed insn;
2155	char expected_plt[BPF_PLT_SIZE];
2156	char current_plt[BPF_PLT_SIZE];
2157	char new_plt[BPF_PLT_SIZE];
2158	char *plt;
2159	char *ret;
2160	int err;
2161
2162	/* Verify the branch to be patched. */
2163	err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
2164	if (err < 0)
2165		return err;
2166	if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
2167		return -EINVAL;
2168
2169	if (t == BPF_MOD_JUMP &&
2170	    insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
2171		/*
2172		 * The branch already points to the destination,
2173		 * there is no PLT.
2174		 */
2175	} else {
2176		/* Verify the PLT. */
2177		plt = (char *)ip + (insn.disp << 1);
2178		err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
2179		if (err < 0)
2180			return err;
2181		ret = (char *)ip + 6;
2182		bpf_jit_plt(expected_plt, ret, old_addr);
2183		if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
2184			return -EINVAL;
2185		/* Adjust the call address. */
2186		bpf_jit_plt(new_plt, ret, new_addr);
2187		s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
2188				  new_plt + (bpf_plt_target - bpf_plt),
2189				  sizeof(void *));
2190	}
2191
2192	/* Adjust the mask of the branch. */
2193	insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
2194	s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
2195
2196	/* Make the new code visible to the other CPUs. */
2197	text_poke_sync_lock();
2198
2199	return 0;
2200}
2201
2202struct bpf_tramp_jit {
2203	struct bpf_jit common;
2204	int orig_stack_args_off;/* Offset of arguments placed on stack by the
2205				 * func_addr's original caller
2206				 */
2207	int stack_size;		/* Trampoline stack size */
2208	int backchain_off;	/* Offset of backchain */
2209	int stack_args_off;	/* Offset of stack arguments for calling
2210				 * func_addr, has to be at the top
2211				 */
2212	int reg_args_off;	/* Offset of register arguments for calling
2213				 * func_addr
2214				 */
2215	int ip_off;		/* For bpf_get_func_ip(), has to be at
2216				 * (ctx - 16)
2217				 */
2218	int arg_cnt_off;	/* For bpf_get_func_arg_cnt(), has to be at
2219				 * (ctx - 8)
2220				 */
2221	int bpf_args_off;	/* Offset of BPF_PROG context, which consists
2222				 * of BPF arguments followed by return value
2223				 */
2224	int retval_off;		/* Offset of return value (see above) */
2225	int r7_r8_off;		/* Offset of saved %r7 and %r8, which are used
2226				 * for __bpf_prog_enter() return value and
2227				 * func_addr respectively
2228				 */
2229	int run_ctx_off;	/* Offset of struct bpf_tramp_run_ctx */
2230	int tccnt_off;		/* Offset of saved tailcall counter */
2231	int r14_off;		/* Offset of saved %r14, has to be at the
2232				 * bottom */
2233	int do_fexit;		/* do_fexit: label */
2234};
2235
2236static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
2237{
2238	/* llihf %dst_reg,val_hi */
2239	EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
2240	/* oilf %rdst_reg,val_lo */
2241	EMIT6_IMM(0xc00d0000, dst_reg, val);
2242}
2243
2244static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
2245			   const struct btf_func_model *m,
2246			   struct bpf_tramp_link *tlink, bool save_ret)
2247{
2248	struct bpf_jit *jit = &tjit->common;
2249	int cookie_off = tjit->run_ctx_off +
2250			 offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2251	struct bpf_prog *p = tlink->link.prog;
2252	int patch;
2253
2254	/*
2255	 * run_ctx.cookie = tlink->cookie;
2256	 */
2257
2258	/* %r0 = tlink->cookie */
2259	load_imm64(jit, REG_W0, tlink->cookie);
2260	/* stg %r0,cookie_off(%r15) */
2261	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
2262
2263	/*
2264	 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
2265	 *         goto skip;
2266	 */
2267
2268	/* %r1 = __bpf_prog_enter */
2269	load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
2270	/* %r2 = p */
2271	load_imm64(jit, REG_2, (u64)p);
2272	/* la %r3,run_ctx_off(%r15) */
2273	EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
2274	/* %r1() */
2275	call_r1(jit);
2276	/* ltgr %r7,%r2 */
2277	EMIT4(0xb9020000, REG_7, REG_2);
2278	/* brcl 8,skip */
2279	patch = jit->prg;
2280	EMIT6_PCREL_RILC(0xc0040000, 8, 0);
2281
2282	/*
2283	 * retval = bpf_func(args, p->insnsi);
2284	 */
2285
2286	/* %r1 = p->bpf_func */
2287	load_imm64(jit, REG_1, (u64)p->bpf_func);
2288	/* la %r2,bpf_args_off(%r15) */
2289	EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
2290	/* %r3 = p->insnsi */
2291	if (!p->jited)
2292		load_imm64(jit, REG_3, (u64)p->insnsi);
2293	/* %r1() */
2294	call_r1(jit);
2295	/* stg %r2,retval_off(%r15) */
2296	if (save_ret) {
2297		if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
2298			return -1;
2299		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2300			      tjit->retval_off);
2301	}
2302
2303	/* skip: */
2304	if (jit->prg_buf)
2305		*(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
2306
2307	/*
2308	 * __bpf_prog_exit(p, start, &run_ctx);
2309	 */
2310
2311	/* %r1 = __bpf_prog_exit */
2312	load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
2313	/* %r2 = p */
2314	load_imm64(jit, REG_2, (u64)p);
2315	/* lgr %r3,%r7 */
2316	EMIT4(0xb9040000, REG_3, REG_7);
2317	/* la %r4,run_ctx_off(%r15) */
2318	EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
2319	/* %r1() */
2320	call_r1(jit);
2321
2322	return 0;
2323}
2324
2325static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
2326{
2327	int stack_offset = tjit->stack_size;
2328
2329	tjit->stack_size += size;
2330	return stack_offset;
2331}
2332
2333/* ABI uses %r2 - %r6 for parameter passing. */
2334#define MAX_NR_REG_ARGS 5
2335
2336/* The "L" field of the "mvc" instruction is 8 bits. */
2337#define MAX_MVC_SIZE 256
2338#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
2339
2340/* -mfentry generates a 6-byte nop on s390x. */
2341#define S390X_PATCH_SIZE 6
2342
2343static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2344					 struct bpf_tramp_jit *tjit,
2345					 const struct btf_func_model *m,
2346					 u32 flags,
2347					 struct bpf_tramp_links *tlinks,
2348					 void *func_addr)
2349{
2350	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2351	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2352	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2353	int nr_bpf_args, nr_reg_args, nr_stack_args;
2354	struct bpf_jit *jit = &tjit->common;
2355	int arg, bpf_arg_off;
2356	int i, j;
2357
2358	/* Support as many stack arguments as "mvc" instruction can handle. */
2359	nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
2360	nr_stack_args = m->nr_args - nr_reg_args;
2361	if (nr_stack_args > MAX_NR_STACK_ARGS)
2362		return -ENOTSUPP;
2363
2364	/* Return to %r14, since func_addr and %r0 are not available. */
2365	if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
2366	    (flags & BPF_TRAMP_F_INDIRECT))
2367		flags |= BPF_TRAMP_F_SKIP_FRAME;
2368
2369	/*
2370	 * Compute how many arguments we need to pass to BPF programs.
2371	 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
2372	 * smaller are packed into 1 or 2 registers; larger arguments are
2373	 * passed via pointers.
2374	 * In s390x ABI, arguments that are 8 bytes or smaller are packed into
2375	 * a register; larger arguments are passed via pointers.
2376	 * We need to deal with this difference.
2377	 */
2378	nr_bpf_args = 0;
2379	for (i = 0; i < m->nr_args; i++) {
2380		if (m->arg_size[i] <= 8)
2381			nr_bpf_args += 1;
2382		else if (m->arg_size[i] <= 16)
2383			nr_bpf_args += 2;
2384		else
2385			return -ENOTSUPP;
2386	}
2387
2388	/*
2389	 * Calculate the stack layout.
2390	 */
2391
2392	/*
2393	 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
2394	 * ABI requires, put our backchain at the end of the allocated memory.
2395	 */
2396	tjit->stack_size = STACK_FRAME_OVERHEAD;
2397	tjit->backchain_off = tjit->stack_size - sizeof(u64);
2398	tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
2399	tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
2400	tjit->ip_off = alloc_stack(tjit, sizeof(u64));
2401	tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
2402	tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
2403	tjit->retval_off = alloc_stack(tjit, sizeof(u64));
2404	tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
2405	tjit->run_ctx_off = alloc_stack(tjit,
2406					sizeof(struct bpf_tramp_run_ctx));
2407	tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
2408	tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
2409	/*
2410	 * In accordance with the s390x ABI, the caller has allocated
2411	 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
2412	 * backchain, and the rest we can use.
2413	 */
2414	tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
2415	tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
2416
2417	/* lgr %r1,%r15 */
2418	EMIT4(0xb9040000, REG_1, REG_15);
2419	/* aghi %r15,-stack_size */
2420	EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
2421	/* stg %r1,backchain_off(%r15) */
2422	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
2423		      tjit->backchain_off);
2424	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
2425	_EMIT6(0xd203f000 | tjit->tccnt_off,
2426	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
2427	/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
2428	if (nr_reg_args)
2429		EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
2430			      REG_2 + (nr_reg_args - 1), REG_15,
2431			      tjit->reg_args_off);
2432	for (i = 0, j = 0; i < m->nr_args; i++) {
2433		if (i < MAX_NR_REG_ARGS)
2434			arg = REG_2 + i;
2435		else
2436			arg = tjit->orig_stack_args_off +
2437			      (i - MAX_NR_REG_ARGS) * sizeof(u64);
2438		bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
2439		if (m->arg_size[i] <= 8) {
2440			if (i < MAX_NR_REG_ARGS)
2441				/* stg %arg,bpf_arg_off(%r15) */
2442				EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
2443					      REG_0, REG_15, bpf_arg_off);
2444			else
2445				/* mvc bpf_arg_off(8,%r15),arg(%r15) */
2446				_EMIT6(0xd207f000 | bpf_arg_off,
2447				       0xf000 | arg);
2448			j += 1;
2449		} else {
2450			if (i < MAX_NR_REG_ARGS) {
2451				/* mvc bpf_arg_off(16,%r15),0(%arg) */
2452				_EMIT6(0xd20ff000 | bpf_arg_off,
2453				       reg2hex[arg] << 12);
2454			} else {
2455				/* lg %r1,arg(%r15) */
2456				EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
2457					      REG_15, arg);
2458				/* mvc bpf_arg_off(16,%r15),0(%r1) */
2459				_EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
2460			}
2461			j += 2;
2462		}
2463	}
2464	/* stmg %r7,%r8,r7_r8_off(%r15) */
2465	EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
2466		      tjit->r7_r8_off);
2467	/* stg %r14,r14_off(%r15) */
2468	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
2469
2470	if (flags & BPF_TRAMP_F_ORIG_STACK) {
2471		/*
2472		 * The ftrace trampoline puts the return address (which is the
2473		 * address of the original function + S390X_PATCH_SIZE) into
2474		 * %r0; see ftrace_shared_hotpatch_trampoline_br and
2475		 * ftrace_init_nop() for details.
2476		 */
2477
2478		/* lgr %r8,%r0 */
2479		EMIT4(0xb9040000, REG_8, REG_0);
2480	} else {
2481		/* %r8 = func_addr + S390X_PATCH_SIZE */
2482		load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
2483	}
2484
2485	/*
2486	 * ip = func_addr;
2487	 * arg_cnt = m->nr_args;
2488	 */
2489
2490	if (flags & BPF_TRAMP_F_IP_ARG) {
2491		/* %r0 = func_addr */
2492		load_imm64(jit, REG_0, (u64)func_addr);
2493		/* stg %r0,ip_off(%r15) */
2494		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2495			      tjit->ip_off);
2496	}
2497	/* lghi %r0,nr_bpf_args */
2498	EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
2499	/* stg %r0,arg_cnt_off(%r15) */
2500	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2501		      tjit->arg_cnt_off);
2502
2503	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2504		/*
2505		 * __bpf_tramp_enter(im);
2506		 */
2507
2508		/* %r1 = __bpf_tramp_enter */
2509		load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
2510		/* %r2 = im */
2511		load_imm64(jit, REG_2, (u64)im);
2512		/* %r1() */
2513		call_r1(jit);
2514	}
2515
2516	for (i = 0; i < fentry->nr_links; i++)
2517		if (invoke_bpf_prog(tjit, m, fentry->links[i],
2518				    flags & BPF_TRAMP_F_RET_FENTRY_RET))
2519			return -EINVAL;
2520
2521	if (fmod_ret->nr_links) {
2522		/*
2523		 * retval = 0;
2524		 */
2525
2526		/* xc retval_off(8,%r15),retval_off(%r15) */
2527		_EMIT6(0xd707f000 | tjit->retval_off,
2528		       0xf000 | tjit->retval_off);
2529
2530		for (i = 0; i < fmod_ret->nr_links; i++) {
2531			if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
2532				return -EINVAL;
2533
2534			/*
2535			 * if (retval)
2536			 *         goto do_fexit;
2537			 */
2538
2539			/* ltg %r0,retval_off(%r15) */
2540			EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
2541				      tjit->retval_off);
2542			/* brcl 7,do_fexit */
2543			EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
2544		}
2545	}
2546
2547	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2548		/*
2549		 * retval = func_addr(args);
2550		 */
2551
2552		/* lmg %r2,%rN,reg_args_off(%r15) */
2553		if (nr_reg_args)
2554			EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2555				      REG_2 + (nr_reg_args - 1), REG_15,
2556				      tjit->reg_args_off);
2557		/* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
2558		if (nr_stack_args)
2559			_EMIT6(0xd200f000 |
2560				       (nr_stack_args * sizeof(u64) - 1) << 16 |
2561				       tjit->stack_args_off,
2562			       0xf000 | tjit->orig_stack_args_off);
2563		/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2564		_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
2565		/* lgr %r1,%r8 */
2566		EMIT4(0xb9040000, REG_1, REG_8);
2567		/* %r1() */
2568		call_r1(jit);
2569		/* stg %r2,retval_off(%r15) */
2570		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2571			      tjit->retval_off);
2572
2573		im->ip_after_call = jit->prg_buf + jit->prg;
2574
2575		/*
2576		 * The following nop will be patched by bpf_tramp_image_put().
2577		 */
2578
2579		/* brcl 0,im->ip_epilogue */
2580		EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
2581	}
2582
2583	/* do_fexit: */
2584	tjit->do_fexit = jit->prg;
2585	for (i = 0; i < fexit->nr_links; i++)
2586		if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
2587			return -EINVAL;
2588
2589	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2590		im->ip_epilogue = jit->prg_buf + jit->prg;
2591
2592		/*
2593		 * __bpf_tramp_exit(im);
2594		 */
2595
2596		/* %r1 = __bpf_tramp_exit */
2597		load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
2598		/* %r2 = im */
2599		load_imm64(jit, REG_2, (u64)im);
2600		/* %r1() */
2601		call_r1(jit);
2602	}
2603
2604	/* lmg %r2,%rN,reg_args_off(%r15) */
2605	if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
2606		EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2607			      REG_2 + (nr_reg_args - 1), REG_15,
2608			      tjit->reg_args_off);
2609	/* lgr %r1,%r8 */
2610	if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
2611		EMIT4(0xb9040000, REG_1, REG_8);
2612	/* lmg %r7,%r8,r7_r8_off(%r15) */
2613	EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
2614		      tjit->r7_r8_off);
2615	/* lg %r14,r14_off(%r15) */
2616	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
2617	/* lg %r2,retval_off(%r15) */
2618	if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
2619		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
2620			      tjit->retval_off);
2621	/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2622	_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
2623	       0xf000 | tjit->tccnt_off);
2624	/* aghi %r15,stack_size */
2625	EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
2626	/* Emit an expoline for the following indirect jump. */
2627	if (nospec_uses_trampoline())
2628		emit_expoline(jit);
2629	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2630		/* br %r14 */
2631		_EMIT2(0x07fe);
2632	else
2633		/* br %r1 */
2634		_EMIT2(0x07f1);
2635
2636	emit_r1_thunk(jit);
2637
2638	return 0;
2639}
2640
2641int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
2642			     struct bpf_tramp_links *tlinks, void *orig_call)
2643{
2644	struct bpf_tramp_image im;
2645	struct bpf_tramp_jit tjit;
2646	int ret;
2647
2648	memset(&tjit, 0, sizeof(tjit));
2649
2650	ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
2651					    tlinks, orig_call);
2652
2653	return ret < 0 ? ret : tjit.common.prg;
2654}
2655
2656int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
2657				void *image_end, const struct btf_func_model *m,
2658				u32 flags, struct bpf_tramp_links *tlinks,
2659				void *func_addr)
2660{
2661	struct bpf_tramp_jit tjit;
2662	int ret;
2663
2664	/* Compute offsets, check whether the code fits. */
2665	memset(&tjit, 0, sizeof(tjit));
2666	ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2667					    tlinks, func_addr);
2668
2669	if (ret < 0)
2670		return ret;
2671	if (tjit.common.prg > (char *)image_end - (char *)image)
2672		/*
2673		 * Use the same error code as for exceeding
2674		 * BPF_MAX_TRAMP_LINKS.
2675		 */
2676		return -E2BIG;
2677
2678	tjit.common.prg = 0;
2679	tjit.common.prg_buf = image;
2680	ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2681					    tlinks, func_addr);
2682
2683	return ret < 0 ? ret : tjit.common.prg;
2684}
2685
2686bool bpf_jit_supports_subprog_tailcalls(void)
2687{
2688	return true;
2689}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
  10 *  - PACK_STACK
  11 *  - 64BIT
  12 *
  13 * Copyright IBM Corp. 2012,2015
  14 *
  15 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  16 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  17 */
  18
  19#define KMSG_COMPONENT "bpf_jit"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/netdevice.h>
  23#include <linux/filter.h>
  24#include <linux/init.h>
  25#include <linux/bpf.h>
 
 
  26#include <asm/cacheflush.h>
 
  27#include <asm/dis.h>
  28#include <asm/facility.h>
  29#include <asm/nospec-branch.h>
  30#include <asm/set_memory.h>
 
  31#include "bpf_jit.h"
  32
  33struct bpf_jit {
  34	u32 seen;		/* Flags to remember seen eBPF instructions */
  35	u32 seen_reg[16];	/* Array to remember which registers are used */
  36	u32 *addrs;		/* Array with relative instruction addresses */
  37	u8 *prg_buf;		/* Start of program */
  38	int size;		/* Size of program and literal pool */
  39	int size_prg;		/* Size of program */
  40	int prg;		/* Current position in program */
  41	int lit_start;		/* Start of literal pool */
  42	int lit;		/* Current position in literal pool */
 
 
  43	int base_ip;		/* Base address for literal pool */
  44	int ret0_ip;		/* Address of return 0 */
  45	int exit_ip;		/* Address of exit */
  46	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
  47	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
  48	int tail_call_start;	/* Tail call start offset */
  49	int labels[1];		/* Labels for local jumps */
 
 
  50};
  51
  52#define BPF_SIZE_MAX	0xffff	/* Max size for program (16 bit branches) */
  53
  54#define SEEN_SKB	1	/* skb access */
  55#define SEEN_MEM	2	/* use mem[] for temporary storage */
  56#define SEEN_RET0	4	/* ret0_ip points to a valid return 0 */
  57#define SEEN_LITERAL	8	/* code uses literals */
  58#define SEEN_FUNC	16	/* calls C functions */
  59#define SEEN_TAIL_CALL	32	/* code uses tail calls */
  60#define SEEN_REG_AX	64	/* code uses constant blinding */
  61#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
  62
  63/*
  64 * s390 registers
  65 */
  66#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  67#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  68#define REG_SKB_DATA	(MAX_BPF_JIT_REG + 2)	/* SKB data register */
  69#define REG_L		(MAX_BPF_JIT_REG + 3)	/* Literal pool register */
  70#define REG_15		(MAX_BPF_JIT_REG + 4)	/* Register 15 */
  71#define REG_0		REG_W0			/* Register 0 */
  72#define REG_1		REG_W1			/* Register 1 */
  73#define REG_2		BPF_REG_1		/* Register 2 */
 
 
 
 
  74#define REG_14		BPF_REG_0		/* Register 14 */
  75
  76/*
  77 * Mapping of BPF registers to s390 registers
  78 */
  79static const int reg2hex[] = {
  80	/* Return code */
  81	[BPF_REG_0]	= 14,
  82	/* Function parameters */
  83	[BPF_REG_1]	= 2,
  84	[BPF_REG_2]	= 3,
  85	[BPF_REG_3]	= 4,
  86	[BPF_REG_4]	= 5,
  87	[BPF_REG_5]	= 6,
  88	/* Call saved registers */
  89	[BPF_REG_6]	= 7,
  90	[BPF_REG_7]	= 8,
  91	[BPF_REG_8]	= 9,
  92	[BPF_REG_9]	= 10,
  93	/* BPF stack pointer */
  94	[BPF_REG_FP]	= 13,
  95	/* Register for blinding (shared with REG_SKB_DATA) */
  96	[BPF_REG_AX]	= 12,
  97	/* SKB data pointer */
  98	[REG_SKB_DATA]	= 12,
  99	/* Work registers for s390x backend */
 100	[REG_W0]	= 0,
 101	[REG_W1]	= 1,
 102	[REG_L]		= 11,
 103	[REG_15]	= 15,
 104};
 105
 106static inline u32 reg(u32 dst_reg, u32 src_reg)
 107{
 108	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 109}
 110
 111static inline u32 reg_high(u32 reg)
 112{
 113	return reg2hex[reg] << 4;
 114}
 115
 116static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 117{
 118	u32 r1 = reg2hex[b1];
 119
 120	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 121		jit->seen_reg[r1] = 1;
 122}
 123
 124#define REG_SET_SEEN(b1)					\
 125({								\
 126	reg_set_seen(jit, b1);					\
 127})
 128
 129#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 130
 131/*
 132 * EMIT macros for code generation
 133 */
 134
 135#define _EMIT2(op)						\
 136({								\
 137	if (jit->prg_buf)					\
 138		*(u16 *) (jit->prg_buf + jit->prg) = op;	\
 139	jit->prg += 2;						\
 140})
 141
 142#define EMIT2(op, b1, b2)					\
 143({								\
 144	_EMIT2(op | reg(b1, b2));				\
 145	REG_SET_SEEN(b1);					\
 146	REG_SET_SEEN(b2);					\
 147})
 148
 149#define _EMIT4(op)						\
 150({								\
 151	if (jit->prg_buf)					\
 152		*(u32 *) (jit->prg_buf + jit->prg) = op;	\
 153	jit->prg += 4;						\
 154})
 155
 156#define EMIT4(op, b1, b2)					\
 157({								\
 158	_EMIT4(op | reg(b1, b2));				\
 159	REG_SET_SEEN(b1);					\
 160	REG_SET_SEEN(b2);					\
 161})
 162
 163#define EMIT4_RRF(op, b1, b2, b3)				\
 164({								\
 165	_EMIT4(op | reg_high(b3) << 8 | reg(b1, b2));		\
 166	REG_SET_SEEN(b1);					\
 167	REG_SET_SEEN(b2);					\
 168	REG_SET_SEEN(b3);					\
 169})
 170
 171#define _EMIT4_DISP(op, disp)					\
 172({								\
 173	unsigned int __disp = (disp) & 0xfff;			\
 174	_EMIT4(op | __disp);					\
 175})
 176
 177#define EMIT4_DISP(op, b1, b2, disp)				\
 178({								\
 179	_EMIT4_DISP(op | reg_high(b1) << 16 |			\
 180		    reg_high(b2) << 8, disp);			\
 181	REG_SET_SEEN(b1);					\
 182	REG_SET_SEEN(b2);					\
 183})
 184
 185#define EMIT4_IMM(op, b1, imm)					\
 186({								\
 187	unsigned int __imm = (imm) & 0xffff;			\
 188	_EMIT4(op | reg_high(b1) << 16 | __imm);		\
 189	REG_SET_SEEN(b1);					\
 190})
 191
 192#define EMIT4_PCREL(op, pcrel)					\
 193({								\
 194	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 195	_EMIT4(op | __pcrel);					\
 
 
 
 
 
 
 196})
 197
 198#define _EMIT6(op1, op2)					\
 199({								\
 200	if (jit->prg_buf) {					\
 201		*(u32 *) (jit->prg_buf + jit->prg) = op1;	\
 202		*(u16 *) (jit->prg_buf + jit->prg + 4) = op2;	\
 203	}							\
 204	jit->prg += 6;						\
 205})
 206
 207#define _EMIT6_DISP(op1, op2, disp)				\
 208({								\
 209	unsigned int __disp = (disp) & 0xfff;			\
 210	_EMIT6(op1 | __disp, op2);				\
 211})
 212
 213#define _EMIT6_DISP_LH(op1, op2, disp)				\
 214({								\
 215	u32 _disp = (u32) disp;					\
 216	unsigned int __disp_h = _disp & 0xff000;		\
 217	unsigned int __disp_l = _disp & 0x00fff;		\
 218	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 219})
 220
 221#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 222({								\
 223	_EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 |		\
 224		       reg_high(b3) << 8, op2, disp);		\
 225	REG_SET_SEEN(b1);					\
 226	REG_SET_SEEN(b2);					\
 227	REG_SET_SEEN(b3);					\
 228})
 229
 230#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
 231({								\
 232	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 233	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff),	\
 234	       op2 | mask << 12);				\
 235	REG_SET_SEEN(b1);					\
 236	REG_SET_SEEN(b2);					\
 237})
 238
 239#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
 240({								\
 241	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 242	_EMIT6(op1 | (reg_high(b1) | mask) << 16 |		\
 243		(rel & 0xffff), op2 | (imm & 0xff) << 8);	\
 244	REG_SET_SEEN(b1);					\
 245	BUILD_BUG_ON(((unsigned long) imm) > 0xff);		\
 246})
 247
 248#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 249({								\
 250	/* Branch instruction needs 6 bytes */			\
 251	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
 252	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
 253	REG_SET_SEEN(b1);					\
 254	REG_SET_SEEN(b2);					\
 255})
 256
 257#define EMIT6_PCREL_RILB(op, b, target)				\
 258({								\
 259	int rel = (target - jit->prg) / 2;			\
 260	_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);	\
 261	REG_SET_SEEN(b);					\
 262})
 263
 264#define EMIT6_PCREL_RIL(op, target)				\
 265({								\
 266	int rel = (target - jit->prg) / 2;			\
 267	_EMIT6(op | rel >> 16, rel & 0xffff);			\
 
 
 
 
 
 268})
 269
 270#define _EMIT6_IMM(op, imm)					\
 271({								\
 272	unsigned int __imm = (imm);				\
 273	_EMIT6(op | (__imm >> 16), __imm & 0xffff);		\
 274})
 275
 276#define EMIT6_IMM(op, b1, imm)					\
 277({								\
 278	_EMIT6_IMM(op | reg_high(b1) << 16, imm);		\
 279	REG_SET_SEEN(b1);					\
 280})
 281
 
 
 
 
 
 
 
 
 
 
 282#define EMIT_CONST_U32(val)					\
 283({								\
 
 
 
 
 
 
 284	unsigned int ret;					\
 285	ret = jit->lit - jit->base_ip;				\
 286	jit->seen |= SEEN_LITERAL;				\
 287	if (jit->prg_buf)					\
 288		*(u32 *) (jit->prg_buf + jit->lit) = (u32) val;	\
 289	jit->lit += 4;						\
 290	ret;							\
 291})
 292
 293#define EMIT_CONST_U64(val)					\
 294({								\
 295	unsigned int ret;					\
 296	ret = jit->lit - jit->base_ip;				\
 297	jit->seen |= SEEN_LITERAL;				\
 298	if (jit->prg_buf)					\
 299		*(u64 *) (jit->prg_buf + jit->lit) = (u64) val;	\
 300	jit->lit += 8;						\
 301	ret;							\
 302})
 303
 304#define EMIT_ZERO(b1)						\
 305({								\
 306	/* llgfr %dst,%dst (zero extend to 64 bit) */		\
 307	EMIT4(0xb9160000, b1, b1);				\
 308	REG_SET_SEEN(b1);					\
 
 
 309})
 310
 311/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312 * Fill whole space with illegal instructions
 313 */
 314static void jit_fill_hole(void *area, unsigned int size)
 315{
 316	memset(area, 0, size);
 317}
 318
 319/*
 320 * Save registers from "rs" (register start) to "re" (register end) on stack
 321 */
 322static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 323{
 324	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 325
 326	if (rs == re)
 327		/* stg %rs,off(%r15) */
 328		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 329	else
 330		/* stmg %rs,%re,off(%r15) */
 331		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 332}
 333
 334/*
 335 * Restore registers from "rs" (register start) to "re" (register end) on stack
 336 */
 337static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 338{
 339	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 340
 341	if (jit->seen & SEEN_STACK)
 342		off += STK_OFF + stack_depth;
 343
 344	if (rs == re)
 345		/* lg %rs,off(%r15) */
 346		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 347	else
 348		/* lmg %rs,%re,off(%r15) */
 349		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 350}
 351
 352/*
 353 * Return first seen register (from start)
 354 */
 355static int get_start(struct bpf_jit *jit, int start)
 356{
 357	int i;
 358
 359	for (i = start; i <= 15; i++) {
 360		if (jit->seen_reg[i])
 361			return i;
 362	}
 363	return 0;
 364}
 365
 366/*
 367 * Return last seen register (from start) (gap >= 2)
 368 */
 369static int get_end(struct bpf_jit *jit, int start)
 370{
 371	int i;
 372
 373	for (i = start; i < 15; i++) {
 374		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 375			return i - 1;
 376	}
 377	return jit->seen_reg[15] ? 15 : 14;
 378}
 379
 380#define REGS_SAVE	1
 381#define REGS_RESTORE	0
 382/*
 383 * Save and restore clobbered registers (6-15) on stack.
 384 * We save/restore registers in chunks with gap >= 2 registers.
 385 */
 386static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 387{
 
 
 388
 389	int re = 6, rs;
 
 
 
 
 
 
 
 390
 391	do {
 392		rs = get_start(jit, re);
 393		if (!rs)
 394			break;
 395		re = get_end(jit, rs + 1);
 396		if (op == REGS_SAVE)
 397			save_regs(jit, rs, re);
 398		else
 399			restore_regs(jit, rs, re, stack_depth);
 400		re++;
 401	} while (re <= 15);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402}
 403
 404/*
 405 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
 406 * we store the SKB header length on the stack and the SKB data
 407 * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
 408 */
 409static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 410{
 411	/* Header length: llgf %w1,<len>(%b1) */
 412	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
 413		      offsetof(struct sk_buff, len));
 414	/* s %w1,<data_len>(%b1) */
 415	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
 416		   offsetof(struct sk_buff, data_len));
 417	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
 418	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
 419	if (!(jit->seen & SEEN_REG_AX))
 420		/* lg %skb_data,data_off(%b1) */
 421		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
 422			      BPF_REG_1, offsetof(struct sk_buff, data));
 
 
 
 
 
 
 
 
 
 423}
 424
 425/*
 426 * Emit function prologue
 427 *
 428 * Save registers and create stack frame if necessary.
 429 * See stack frame layout desription in "bpf_jit.h"!
 430 */
 431static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 
 432{
 433	if (jit->seen & SEEN_TAIL_CALL) {
 
 
 
 
 
 
 434		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 435		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 436	} else {
 437		/* j tail_call_start: NOP if no tail calls are used */
 438		EMIT4_PCREL(0xa7f40000, 6);
 439		_EMIT2(0);
 
 
 
 440	}
 441	/* Tail calls have to skip above initialization */
 442	jit->tail_call_start = jit->prg;
 443	/* Save registers */
 444	save_restore_regs(jit, REGS_SAVE, stack_depth);
 445	/* Setup literal pool */
 446	if (jit->seen & SEEN_LITERAL) {
 447		/* basr %r13,0 */
 448		EMIT2(0x0d00, REG_L, REG_0);
 449		jit->base_ip = jit->prg;
 
 
 
 
 
 
 
 450	}
 451	/* Setup stack and backchain */
 452	if (jit->seen & SEEN_STACK) {
 453		if (jit->seen & SEEN_FUNC)
 454			/* lgr %w1,%r15 (backchain) */
 455			EMIT4(0xb9040000, REG_W1, REG_15);
 456		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 457		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 458		/* aghi %r15,-STK_OFF */
 459		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 460		if (jit->seen & SEEN_FUNC)
 461			/* stg %w1,152(%r15) (backchain) */
 462			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 463				      REG_15, 152);
 464	}
 465	if (jit->seen & SEEN_SKB) {
 466		emit_load_skb_data_hlen(jit);
 467		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 468		EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
 469			      STK_OFF_SKBP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 470	}
 471}
 472
 473/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 474 * Function epilogue
 475 */
 476static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 477{
 478	/* Return 0 */
 479	if (jit->seen & SEEN_RET0) {
 480		jit->ret0_ip = jit->prg;
 481		/* lghi %b0,0 */
 482		EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
 483	}
 484	jit->exit_ip = jit->prg;
 485	/* Load exit code: lgr %r2,%b0 */
 486	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 487	/* Restore registers */
 488	save_restore_regs(jit, REGS_RESTORE, stack_depth);
 489	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
 490		jit->r14_thunk_ip = jit->prg;
 491		/* Generate __s390_indirect_jump_r14 thunk */
 492		if (test_facility(35)) {
 493			/* exrl %r0,.+10 */
 494			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 495		} else {
 496			/* larl %r1,.+14 */
 497			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 498			/* ex 0,0(%r1) */
 499			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
 500		}
 501		/* j . */
 502		EMIT4_PCREL(0xa7f40000, 0);
 503	}
 504	/* br %r14 */
 505	_EMIT2(0x07fe);
 506
 507	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
 508	    (jit->seen & SEEN_FUNC)) {
 509		jit->r1_thunk_ip = jit->prg;
 510		/* Generate __s390_indirect_jump_r1 thunk */
 511		if (test_facility(35)) {
 512			/* exrl %r0,.+10 */
 513			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 514			/* j . */
 515			EMIT4_PCREL(0xa7f40000, 0);
 516			/* br %r1 */
 517			_EMIT2(0x07f1);
 518		} else {
 519			/* larl %r1,.+14 */
 520			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 521			/* ex 0,S390_lowcore.br_r1_tampoline */
 522			EMIT4_DISP(0x44000000, REG_0, REG_0,
 523				   offsetof(struct lowcore, br_r1_trampoline));
 524			/* j . */
 525			EMIT4_PCREL(0xa7f40000, 0);
 526		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527	}
 528}
 529
 530/*
 531 * Compile one eBPF instruction into s390x code
 532 *
 533 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 534 * stack space for the large switch statement.
 535 */
 536static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
 
 537{
 538	struct bpf_insn *insn = &fp->insnsi[i];
 539	int jmp_off, last, insn_count = 1;
 540	unsigned int func_addr, mask;
 541	u32 dst_reg = insn->dst_reg;
 542	u32 src_reg = insn->src_reg;
 
 543	u32 *addrs = jit->addrs;
 544	s32 imm = insn->imm;
 545	s16 off = insn->off;
 
 
 
 
 
 
 
 
 
 546
 547	if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
 548		jit->seen |= SEEN_REG_AX;
 549	switch (insn->code) {
 550	/*
 551	 * BPF_MOV
 552	 */
 553	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 554		/* llgfr %dst,%src */
 555		EMIT4(0xb9160000, dst_reg, src_reg);
 556		break;
 557	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 558		/* lgr %dst,%src */
 559		EMIT4(0xb9040000, dst_reg, src_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560		break;
 561	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 562		/* llilf %dst,imm */
 563		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 
 
 564		break;
 565	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 566		/* lgfi %dst,imm */
 567		EMIT6_IMM(0xc0010000, dst_reg, imm);
 568		break;
 569	/*
 570	 * BPF_LD 64
 571	 */
 572	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 573	{
 574		/* 16 byte instruction that uses two 'struct bpf_insn' */
 575		u64 imm64;
 576
 577		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 578		/* lg %dst,<d(imm)>(%l) */
 579		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
 580			      EMIT_CONST_U64(imm64));
 581		insn_count = 2;
 582		break;
 583	}
 584	/*
 585	 * BPF_ADD
 586	 */
 587	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 588		/* ar %dst,%src */
 589		EMIT2(0x1a00, dst_reg, src_reg);
 590		EMIT_ZERO(dst_reg);
 591		break;
 592	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 593		/* agr %dst,%src */
 594		EMIT4(0xb9080000, dst_reg, src_reg);
 595		break;
 596	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 597		if (!imm)
 598			break;
 599		/* alfi %dst,imm */
 600		EMIT6_IMM(0xc20b0000, dst_reg, imm);
 601		EMIT_ZERO(dst_reg);
 602		break;
 603	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 604		if (!imm)
 605			break;
 606		/* agfi %dst,imm */
 607		EMIT6_IMM(0xc2080000, dst_reg, imm);
 608		break;
 609	/*
 610	 * BPF_SUB
 611	 */
 612	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 613		/* sr %dst,%src */
 614		EMIT2(0x1b00, dst_reg, src_reg);
 615		EMIT_ZERO(dst_reg);
 616		break;
 617	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 618		/* sgr %dst,%src */
 619		EMIT4(0xb9090000, dst_reg, src_reg);
 620		break;
 621	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 622		if (!imm)
 623			break;
 624		/* alfi %dst,-imm */
 625		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 626		EMIT_ZERO(dst_reg);
 627		break;
 628	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 629		if (!imm)
 630			break;
 631		/* agfi %dst,-imm */
 632		EMIT6_IMM(0xc2080000, dst_reg, -imm);
 
 
 
 
 
 633		break;
 634	/*
 635	 * BPF_MUL
 636	 */
 637	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 638		/* msr %dst,%src */
 639		EMIT4(0xb2520000, dst_reg, src_reg);
 640		EMIT_ZERO(dst_reg);
 641		break;
 642	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 643		/* msgr %dst,%src */
 644		EMIT4(0xb90c0000, dst_reg, src_reg);
 645		break;
 646	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 647		if (imm == 1)
 648			break;
 649		/* msfi %r5,imm */
 650		EMIT6_IMM(0xc2010000, dst_reg, imm);
 651		EMIT_ZERO(dst_reg);
 652		break;
 653	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 654		if (imm == 1)
 655			break;
 656		/* msgfi %dst,imm */
 657		EMIT6_IMM(0xc2000000, dst_reg, imm);
 658		break;
 659	/*
 660	 * BPF_DIV / BPF_MOD
 661	 */
 662	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 663	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 664	{
 665		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 666
 667		/* lhi %w0,0 */
 668		EMIT4_IMM(0xa7080000, REG_W0, 0);
 669		/* lr %w1,%dst */
 670		EMIT2(0x1800, REG_W1, dst_reg);
 671		/* dlr %w0,%src */
 672		EMIT4(0xb9970000, REG_W0, src_reg);
 
 
 
 
 
 
 
 
 
 
 673		/* llgfr %dst,%rc */
 674		EMIT4(0xb9160000, dst_reg, rc_reg);
 
 
 675		break;
 676	}
 677	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 678	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 679	{
 680		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 681
 682		/* lghi %w0,0 */
 683		EMIT4_IMM(0xa7090000, REG_W0, 0);
 684		/* lgr %w1,%dst */
 685		EMIT4(0xb9040000, REG_W1, dst_reg);
 686		/* dlgr %w0,%dst */
 687		EMIT4(0xb9870000, REG_W0, src_reg);
 
 
 
 
 
 
 
 
 
 
 688		/* lgr %dst,%rc */
 689		EMIT4(0xb9040000, dst_reg, rc_reg);
 690		break;
 691	}
 692	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 693	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 694	{
 695		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 696
 697		if (imm == 1) {
 698			if (BPF_OP(insn->code) == BPF_MOD)
 699				/* lhgi %dst,0 */
 700				EMIT4_IMM(0xa7090000, dst_reg, 0);
 
 
 701			break;
 702		}
 703		/* lhi %w0,0 */
 704		EMIT4_IMM(0xa7080000, REG_W0, 0);
 705		/* lr %w1,%dst */
 706		EMIT2(0x1800, REG_W1, dst_reg);
 707		/* dl %w0,<d(imm)>(%l) */
 708		EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 709			      EMIT_CONST_U32(imm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710		/* llgfr %dst,%rc */
 711		EMIT4(0xb9160000, dst_reg, rc_reg);
 
 
 712		break;
 713	}
 714	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 715	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 716	{
 717		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 718
 719		if (imm == 1) {
 720			if (BPF_OP(insn->code) == BPF_MOD)
 721				/* lhgi %dst,0 */
 722				EMIT4_IMM(0xa7090000, dst_reg, 0);
 723			break;
 724		}
 725		/* lghi %w0,0 */
 726		EMIT4_IMM(0xa7090000, REG_W0, 0);
 727		/* lgr %w1,%dst */
 728		EMIT4(0xb9040000, REG_W1, dst_reg);
 729		/* dlg %w0,<d(imm)>(%l) */
 730		EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 731			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732		/* lgr %dst,%rc */
 733		EMIT4(0xb9040000, dst_reg, rc_reg);
 734		break;
 735	}
 736	/*
 737	 * BPF_AND
 738	 */
 739	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 740		/* nr %dst,%src */
 741		EMIT2(0x1400, dst_reg, src_reg);
 742		EMIT_ZERO(dst_reg);
 743		break;
 744	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 745		/* ngr %dst,%src */
 746		EMIT4(0xb9800000, dst_reg, src_reg);
 747		break;
 748	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 749		/* nilf %dst,imm */
 750		EMIT6_IMM(0xc00b0000, dst_reg, imm);
 751		EMIT_ZERO(dst_reg);
 752		break;
 753	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 754		/* ng %dst,<d(imm)>(%l) */
 755		EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
 756			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 757		break;
 758	/*
 759	 * BPF_OR
 760	 */
 761	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 762		/* or %dst,%src */
 763		EMIT2(0x1600, dst_reg, src_reg);
 764		EMIT_ZERO(dst_reg);
 765		break;
 766	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 767		/* ogr %dst,%src */
 768		EMIT4(0xb9810000, dst_reg, src_reg);
 769		break;
 770	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 771		/* oilf %dst,imm */
 772		EMIT6_IMM(0xc00d0000, dst_reg, imm);
 773		EMIT_ZERO(dst_reg);
 774		break;
 775	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 776		/* og %dst,<d(imm)>(%l) */
 777		EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
 778			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 779		break;
 780	/*
 781	 * BPF_XOR
 782	 */
 783	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 784		/* xr %dst,%src */
 785		EMIT2(0x1700, dst_reg, src_reg);
 786		EMIT_ZERO(dst_reg);
 787		break;
 788	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 789		/* xgr %dst,%src */
 790		EMIT4(0xb9820000, dst_reg, src_reg);
 791		break;
 792	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
 793		if (!imm)
 794			break;
 795		/* xilf %dst,imm */
 796		EMIT6_IMM(0xc0070000, dst_reg, imm);
 797		EMIT_ZERO(dst_reg);
 798		break;
 799	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
 800		/* xg %dst,<d(imm)>(%l) */
 801		EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
 802			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 803		break;
 804	/*
 805	 * BPF_LSH
 806	 */
 807	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
 808		/* sll %dst,0(%src) */
 809		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
 810		EMIT_ZERO(dst_reg);
 811		break;
 812	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
 813		/* sllg %dst,%dst,0(%src) */
 814		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
 815		break;
 816	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
 817		if (imm == 0)
 818			break;
 819		/* sll %dst,imm(%r0) */
 820		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
 821		EMIT_ZERO(dst_reg);
 822		break;
 823	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
 824		if (imm == 0)
 825			break;
 826		/* sllg %dst,%dst,imm(%r0) */
 827		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
 828		break;
 829	/*
 830	 * BPF_RSH
 831	 */
 832	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
 833		/* srl %dst,0(%src) */
 834		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
 835		EMIT_ZERO(dst_reg);
 836		break;
 837	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
 838		/* srlg %dst,%dst,0(%src) */
 839		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
 840		break;
 841	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
 842		if (imm == 0)
 843			break;
 844		/* srl %dst,imm(%r0) */
 845		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
 846		EMIT_ZERO(dst_reg);
 847		break;
 848	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
 849		if (imm == 0)
 850			break;
 851		/* srlg %dst,%dst,imm(%r0) */
 852		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
 853		break;
 854	/*
 855	 * BPF_ARSH
 856	 */
 
 
 
 
 
 857	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
 858		/* srag %dst,%dst,0(%src) */
 859		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
 860		break;
 
 
 
 
 
 
 
 861	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
 862		if (imm == 0)
 863			break;
 864		/* srag %dst,%dst,imm(%r0) */
 865		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
 866		break;
 867	/*
 868	 * BPF_NEG
 869	 */
 870	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
 871		/* lcr %dst,%dst */
 872		EMIT2(0x1300, dst_reg, dst_reg);
 873		EMIT_ZERO(dst_reg);
 874		break;
 875	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 876		/* lcgr %dst,%dst */
 877		EMIT4(0xb9130000, dst_reg, dst_reg);
 878		break;
 879	/*
 880	 * BPF_FROM_BE/LE
 881	 */
 882	case BPF_ALU | BPF_END | BPF_FROM_BE:
 883		/* s390 is big endian, therefore only clear high order bytes */
 884		switch (imm) {
 885		case 16: /* dst = (u16) cpu_to_be16(dst) */
 886			/* llghr %dst,%dst */
 887			EMIT4(0xb9850000, dst_reg, dst_reg);
 
 
 888			break;
 889		case 32: /* dst = (u32) cpu_to_be32(dst) */
 890			/* llgfr %dst,%dst */
 891			EMIT4(0xb9160000, dst_reg, dst_reg);
 
 892			break;
 893		case 64: /* dst = (u64) cpu_to_be64(dst) */
 894			break;
 895		}
 896		break;
 897	case BPF_ALU | BPF_END | BPF_FROM_LE:
 
 898		switch (imm) {
 899		case 16: /* dst = (u16) cpu_to_le16(dst) */
 900			/* lrvr %dst,%dst */
 901			EMIT4(0xb91f0000, dst_reg, dst_reg);
 902			/* srl %dst,16(%r0) */
 903			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
 904			/* llghr %dst,%dst */
 905			EMIT4(0xb9850000, dst_reg, dst_reg);
 
 
 906			break;
 907		case 32: /* dst = (u32) cpu_to_le32(dst) */
 908			/* lrvr %dst,%dst */
 909			EMIT4(0xb91f0000, dst_reg, dst_reg);
 910			/* llgfr %dst,%dst */
 911			EMIT4(0xb9160000, dst_reg, dst_reg);
 
 912			break;
 913		case 64: /* dst = (u64) cpu_to_le64(dst) */
 914			/* lrvgr %dst,%dst */
 915			EMIT4(0xb90f0000, dst_reg, dst_reg);
 916			break;
 917		}
 918		break;
 919	/*
 
 
 
 
 
 920	 * BPF_ST(X)
 921	 */
 922	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
 923		/* stcy %src,off(%dst) */
 924		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
 925		jit->seen |= SEEN_MEM;
 926		break;
 927	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 928		/* sthy %src,off(%dst) */
 929		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
 930		jit->seen |= SEEN_MEM;
 931		break;
 932	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 933		/* sty %src,off(%dst) */
 934		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
 935		jit->seen |= SEEN_MEM;
 936		break;
 937	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 938		/* stg %src,off(%dst) */
 939		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
 940		jit->seen |= SEEN_MEM;
 941		break;
 942	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 943		/* lhi %w0,imm */
 944		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
 945		/* stcy %w0,off(dst) */
 946		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
 947		jit->seen |= SEEN_MEM;
 948		break;
 949	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 950		/* lhi %w0,imm */
 951		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
 952		/* sthy %w0,off(dst) */
 953		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
 954		jit->seen |= SEEN_MEM;
 955		break;
 956	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 957		/* llilf %w0,imm  */
 958		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
 959		/* sty %w0,off(%dst) */
 960		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
 961		jit->seen |= SEEN_MEM;
 962		break;
 963	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 964		/* lgfi %w0,imm */
 965		EMIT6_IMM(0xc0010000, REG_W0, imm);
 966		/* stg %w0,off(%dst) */
 967		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
 968		jit->seen |= SEEN_MEM;
 969		break;
 970	/*
 971	 * BPF_STX XADD (atomic_add)
 972	 */
 973	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
 974		/* laal %w0,%src,off(%dst) */
 975		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
 976			      dst_reg, off);
 977		jit->seen |= SEEN_MEM;
 978		break;
 979	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
 980		/* laalg %w0,%src,off(%dst) */
 981		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
 982			      dst_reg, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 983		jit->seen |= SEEN_MEM;
 984		break;
 
 985	/*
 986	 * BPF_LDX
 987	 */
 988	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
 
 989		/* llgc %dst,0(off,%src) */
 990		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
 991		jit->seen |= SEEN_MEM;
 
 
 
 
 
 
 
 
 992		break;
 993	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
 
 994		/* llgh %dst,0(off,%src) */
 995		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
 996		jit->seen |= SEEN_MEM;
 
 
 
 
 
 
 
 
 997		break;
 998	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
 
 999		/* llgf %dst,off(%src) */
1000		jit->seen |= SEEN_MEM;
1001		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
 
 
 
 
 
 
 
 
1002		break;
1003	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
 
1004		/* lg %dst,0(off,%src) */
1005		jit->seen |= SEEN_MEM;
1006		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1007		break;
1008	/*
1009	 * BPF_JMP / CALL
1010	 */
1011	case BPF_JMP | BPF_CALL:
1012	{
1013		/*
1014		 * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
1015		 */
1016		const u64 func = (u64)__bpf_call_base + imm;
 
 
 
 
 
1017
1018		REG_SET_SEEN(BPF_REG_5);
1019		jit->seen |= SEEN_FUNC;
1020		/* lg %w1,<d(imm)>(%l) */
1021		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
1022			      EMIT_CONST_U64(func));
1023		if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
1024			/* brasl %r14,__s390_indirect_jump_r1 */
1025			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1026		} else {
1027			/* basr %r14,%w1 */
1028			EMIT2(0x0d00, REG_14, REG_W1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1029		}
 
 
 
 
 
1030		/* lgr %b0,%r2: load return value into %b0 */
1031		EMIT4(0xb9040000, BPF_REG_0, REG_2);
1032		if ((jit->seen & SEEN_SKB) &&
1033		    bpf_helper_changes_pkt_data((void *)func)) {
1034			/* lg %b1,ST_OFF_SKBP(%r15) */
1035			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
1036				      REG_15, STK_OFF_SKBP);
1037			emit_load_skb_data_hlen(jit);
1038		}
1039		break;
1040	}
1041	case BPF_JMP | BPF_TAIL_CALL:
 
 
1042		/*
1043		 * Implicit input:
1044		 *  B1: pointer to ctx
1045		 *  B2: pointer to bpf_array
1046		 *  B3: index in bpf_array
1047		 */
1048		jit->seen |= SEEN_TAIL_CALL;
1049
1050		/*
1051		 * if (index >= array->map.max_entries)
1052		 *         goto out;
1053		 */
1054
1055		/* llgf %w1,map.max_entries(%b2) */
1056		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1057			      offsetof(struct bpf_array, map.max_entries));
1058		/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
1059		EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
1060				  REG_W1, 0, 0xa);
 
 
1061
1062		/*
1063		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1064		 *         goto out;
1065		 */
1066
1067		if (jit->seen & SEEN_STACK)
1068			off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth;
1069		else
1070			off = STK_OFF_TCCNT;
1071		/* lhi %w0,1 */
1072		EMIT4_IMM(0xa7080000, REG_W0, 1);
1073		/* laal %w1,%w0,off(%r15) */
1074		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1075		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
1076		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
1077				      MAX_TAIL_CALL_CNT, 0, 0x2);
 
1078
1079		/*
1080		 * prog = array->ptrs[index];
1081		 * if (prog == NULL)
1082		 *         goto out;
1083		 */
1084
1085		/* sllg %r1,%b3,3: %r1 = index * 8 */
1086		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
1087		/* lg %r1,prog(%b2,%r1) */
1088		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
 
 
1089			      REG_1, offsetof(struct bpf_array, ptrs));
1090		/* clgij %r1,0,0x8,label0 */
1091		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
1092
1093		/*
1094		 * Restore registers before calling function
1095		 */
1096		save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth);
1097
1098		/*
1099		 * goto *(prog->bpf_func + tail_call_start);
1100		 */
1101
1102		/* lg %r1,bpf_func(%r1) */
1103		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1104			      offsetof(struct bpf_prog, bpf_func));
1105		/* bc 0xf,tail_call_start(%r1) */
1106		_EMIT4(0x47f01000 + jit->tail_call_start);
 
 
 
 
 
 
 
 
1107		/* out: */
1108		jit->labels[0] = jit->prg;
 
 
 
 
 
 
 
1109		break;
 
1110	case BPF_JMP | BPF_EXIT: /* return b0 */
1111		last = (i == fp->len - 1) ? 1 : 0;
1112		if (last && !(jit->seen & SEEN_RET0))
1113			break;
1114		/* j <exit> */
1115		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
 
 
 
 
1116		break;
1117	/*
1118	 * Branch relative (number of skipped instructions) to offset on
1119	 * condition.
1120	 *
1121	 * Condition code to mask mapping:
1122	 *
1123	 * CC | Description	   | Mask
1124	 * ------------------------------
1125	 * 0  | Operands equal	   |	8
1126	 * 1  | First operand low  |	4
1127	 * 2  | First operand high |	2
1128	 * 3  | Unused		   |	1
1129	 *
1130	 * For s390x relative branches: ip = ip + off_bytes
1131	 * For BPF relative branches:	insn = insn + off_insns + 1
1132	 *
1133	 * For example for s390x with offset 0 we jump to the branch
1134	 * instruction itself (loop) and for BPF with offset 0 we
1135	 * branch to the instruction behind the branch.
1136	 */
 
 
 
1137	case BPF_JMP | BPF_JA: /* if (true) */
1138		mask = 0xf000; /* j */
1139		goto branch_oc;
1140	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
 
1141		mask = 0x2000; /* jh */
1142		goto branch_ks;
1143	case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
 
1144		mask = 0x4000; /* jl */
1145		goto branch_ks;
1146	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
 
1147		mask = 0xa000; /* jhe */
1148		goto branch_ks;
1149	case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
 
1150		mask = 0xc000; /* jle */
1151		goto branch_ks;
1152	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
 
1153		mask = 0x2000; /* jh */
1154		goto branch_ku;
1155	case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
 
1156		mask = 0x4000; /* jl */
1157		goto branch_ku;
1158	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
 
1159		mask = 0xa000; /* jhe */
1160		goto branch_ku;
1161	case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
 
1162		mask = 0xc000; /* jle */
1163		goto branch_ku;
1164	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
 
1165		mask = 0x7000; /* jne */
1166		goto branch_ku;
1167	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
 
1168		mask = 0x8000; /* je */
1169		goto branch_ku;
1170	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
 
1171		mask = 0x7000; /* jnz */
1172		/* lgfi %w1,imm (load sign extend imm) */
1173		EMIT6_IMM(0xc0010000, REG_W1, imm);
1174		/* ngr %w1,%dst */
1175		EMIT4(0xb9800000, REG_W1, dst_reg);
 
 
 
 
 
 
 
1176		goto branch_oc;
1177
1178	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
 
1179		mask = 0x2000; /* jh */
1180		goto branch_xs;
1181	case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
 
1182		mask = 0x4000; /* jl */
1183		goto branch_xs;
1184	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
 
1185		mask = 0xa000; /* jhe */
1186		goto branch_xs;
1187	case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
 
1188		mask = 0xc000; /* jle */
1189		goto branch_xs;
1190	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
 
1191		mask = 0x2000; /* jh */
1192		goto branch_xu;
1193	case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
 
1194		mask = 0x4000; /* jl */
1195		goto branch_xu;
1196	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
 
1197		mask = 0xa000; /* jhe */
1198		goto branch_xu;
1199	case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
 
1200		mask = 0xc000; /* jle */
1201		goto branch_xu;
1202	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
 
1203		mask = 0x7000; /* jne */
1204		goto branch_xu;
1205	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
 
1206		mask = 0x8000; /* je */
1207		goto branch_xu;
1208	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
 
 
 
 
1209		mask = 0x7000; /* jnz */
1210		/* ngrk %w1,%dst,%src */
1211		EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
 
1212		goto branch_oc;
1213branch_ks:
1214		/* lgfi %w1,imm (load sign extend imm) */
1215		EMIT6_IMM(0xc0010000, REG_W1, imm);
1216		/* cgrj %dst,%w1,mask,off */
1217		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
 
 
 
 
 
 
 
 
 
 
1218		break;
1219branch_ku:
1220		/* lgfi %w1,imm (load sign extend imm) */
1221		EMIT6_IMM(0xc0010000, REG_W1, imm);
1222		/* clgrj %dst,%w1,mask,off */
1223		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
1224		break;
1225branch_xs:
1226		/* cgrj %dst,%src,mask,off */
1227		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228		break;
1229branch_xu:
1230		/* clgrj %dst,%src,mask,off */
1231		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232		break;
1233branch_oc:
1234		/* brc mask,jmp_off (branch instruction needs 4 bytes) */
1235		jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1236		EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
 
 
 
 
 
 
 
 
 
1237		break;
1238	/*
1239	 * BPF_LD
1240	 */
1241	case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
1242	case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
1243		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1244			func_addr = __pa(sk_load_byte_pos);
1245		else
1246			func_addr = __pa(sk_load_byte);
1247		goto call_fn;
1248	case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
1249	case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
1250		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1251			func_addr = __pa(sk_load_half_pos);
1252		else
1253			func_addr = __pa(sk_load_half);
1254		goto call_fn;
1255	case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
1256	case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
1257		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1258			func_addr = __pa(sk_load_word_pos);
1259		else
1260			func_addr = __pa(sk_load_word);
1261		goto call_fn;
1262call_fn:
1263		jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
1264		REG_SET_SEEN(REG_14); /* Return address of possible func call */
1265
 
1266		/*
1267		 * Implicit input:
1268		 *  BPF_REG_6	 (R7) : skb pointer
1269		 *  REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
1270		 *
1271		 * Calculated input:
1272		 *  BPF_REG_2	 (R3) : offset of byte(s) to fetch in skb
1273		 *  BPF_REG_5	 (R6) : return address
1274		 *
1275		 * Output:
1276		 *  BPF_REG_0	 (R14): data read from skb
1277		 *
1278		 * Scratch registers (BPF_REG_1-5)
1279		 */
 
 
 
 
 
 
 
1280
1281		/* Call function: llilf %w1,func_addr  */
1282		EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
1283
1284		/* Offset: lgfi %b2,imm */
1285		EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
1286		if (BPF_MODE(insn->code) == BPF_IND)
1287			/* agfr %b2,%src (%src is s32 here) */
1288			EMIT4(0xb9180000, BPF_REG_2, src_reg);
1289
1290		/* Reload REG_SKB_DATA if BPF_REG_AX is used */
1291		if (jit->seen & SEEN_REG_AX)
1292			/* lg %skb_data,data_off(%b6) */
1293			EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
1294				      BPF_REG_6, offsetof(struct sk_buff, data));
1295		/* basr %b5,%w1 (%b5 is call saved) */
1296		EMIT2(0x0d00, BPF_REG_5, REG_W1);
 
 
 
1297
1298		/*
1299		 * Note: For fast access we jump directly after the
1300		 * jnz instruction from bpf_jit.S
1301		 */
1302		/* jnz <ret0> */
1303		EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
1304		break;
1305	default: /* too complex, give up */
1306		pr_err("Unknown opcode %02x\n", insn->code);
 
 
 
 
1307		return -1;
1308	}
1309	return insn_count;
1310}
1311
1312/*
1313 * Compile eBPF program into s390x code
1314 */
1315static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
 
1316{
1317	int i, insn_count;
1318
1319	jit->lit = jit->lit_start;
 
1320	jit->prg = 0;
 
1321
1322	bpf_jit_prologue(jit, fp->aux->stack_depth);
 
 
1323	for (i = 0; i < fp->len; i += insn_count) {
1324		insn_count = bpf_jit_insn(jit, fp, i);
1325		if (insn_count < 0)
1326			return -1;
1327		/* Next instruction address */
1328		jit->addrs[i + insn_count] = jit->prg;
 
1329	}
1330	bpf_jit_epilogue(jit, fp->aux->stack_depth);
1331
1332	jit->lit_start = jit->prg;
1333	jit->size = jit->lit;
 
 
 
 
 
 
 
1334	jit->size_prg = jit->prg;
 
 
 
 
 
 
1335	return 0;
1336}
1337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338/*
1339 * Compile eBPF program "fp"
1340 */
1341struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1342{
 
1343	struct bpf_prog *tmp, *orig_fp = fp;
1344	struct bpf_binary_header *header;
 
1345	bool tmp_blinded = false;
 
1346	struct bpf_jit jit;
1347	int pass;
1348
 
 
 
1349	if (!fp->jit_requested)
1350		return orig_fp;
1351
1352	tmp = bpf_jit_blind_constants(fp);
1353	/*
1354	 * If blinding was requested and we failed during blinding,
1355	 * we must fall back to the interpreter.
1356	 */
1357	if (IS_ERR(tmp))
1358		return orig_fp;
1359	if (tmp != fp) {
1360		tmp_blinded = true;
1361		fp = tmp;
1362	}
1363
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1364	memset(&jit, 0, sizeof(jit));
1365	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1366	if (jit.addrs == NULL) {
1367		fp = orig_fp;
1368		goto out;
1369	}
1370	/*
1371	 * Three initial passes:
1372	 *   - 1/2: Determine clobbered registers
1373	 *   - 3:   Calculate program size and addrs arrray
1374	 */
1375	for (pass = 1; pass <= 3; pass++) {
1376		if (bpf_jit_prog(&jit, fp)) {
1377			fp = orig_fp;
1378			goto free_addrs;
1379		}
1380	}
1381	/*
1382	 * Final pass: Allocate and generate program
1383	 */
1384	if (jit.size >= BPF_SIZE_MAX) {
1385		fp = orig_fp;
1386		goto free_addrs;
1387	}
1388	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1389	if (!header) {
1390		fp = orig_fp;
1391		goto free_addrs;
1392	}
1393	if (bpf_jit_prog(&jit, fp)) {
 
 
1394		fp = orig_fp;
1395		goto free_addrs;
1396	}
1397	if (bpf_jit_enable > 1) {
1398		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1399		print_fn_code(jit.prg_buf, jit.size_prg);
1400	}
1401	bpf_jit_binary_lock_ro(header);
 
 
 
 
 
 
1402	fp->bpf_func = (void *) jit.prg_buf;
1403	fp->jited = 1;
1404	fp->jited_len = jit.size;
 
 
 
1405free_addrs:
1406	kfree(jit.addrs);
 
 
 
1407out:
1408	if (tmp_blinded)
1409		bpf_jit_prog_release_other(fp, fp == orig_fp ?
1410					   tmp : orig_fp);
1411	return fp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412}