Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * BPF Jit compiler for s390.
   3 *
   4 * Minimum build requirements:
   5 *
   6 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   7 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   8 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
   9 *  - PACK_STACK
  10 *  - 64BIT
  11 *
  12 * Copyright IBM Corp. 2012,2015
  13 *
  14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  15 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  16 */
  17
  18#define KMSG_COMPONENT "bpf_jit"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/netdevice.h>
  22#include <linux/filter.h>
  23#include <linux/init.h>
  24#include <linux/bpf.h>
 
 
  25#include <asm/cacheflush.h>
 
  26#include <asm/dis.h>
 
 
 
 
 
  27#include "bpf_jit.h"
  28
  29int bpf_jit_enable __read_mostly;
  30
  31struct bpf_jit {
  32	u32 seen;		/* Flags to remember seen eBPF instructions */
  33	u32 seen_reg[16];	/* Array to remember which registers are used */
  34	u32 *addrs;		/* Array with relative instruction addresses */
  35	u8 *prg_buf;		/* Start of program */
  36	int size;		/* Size of program and literal pool */
  37	int size_prg;		/* Size of program */
  38	int prg;		/* Current position in program */
  39	int lit_start;		/* Start of literal pool */
  40	int lit;		/* Current position in literal pool */
 
 
  41	int base_ip;		/* Base address for literal pool */
  42	int ret0_ip;		/* Address of return 0 */
  43	int exit_ip;		/* Address of exit */
 
 
  44	int tail_call_start;	/* Tail call start offset */
  45	int labels[1];		/* Labels for local jumps */
 
 
 
 
  46};
  47
  48#define BPF_SIZE_MAX	0xffff	/* Max size for program (16 bit branches) */
 
 
 
  49
  50#define SEEN_SKB	1	/* skb access */
  51#define SEEN_MEM	2	/* use mem[] for temporary storage */
  52#define SEEN_RET0	4	/* ret0_ip points to a valid return 0 */
  53#define SEEN_LITERAL	8	/* code uses literals */
  54#define SEEN_FUNC	16	/* calls C functions */
  55#define SEEN_TAIL_CALL	32	/* code uses tail calls */
  56#define SEEN_SKB_CHANGE	64	/* code changes skb data */
  57#define SEEN_REG_AX	128	/* code uses constant blinding */
  58#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
  59
  60/*
  61 * s390 registers
  62 */
  63#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  64#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  65#define REG_SKB_DATA	(MAX_BPF_JIT_REG + 2)	/* SKB data register */
  66#define REG_L		(MAX_BPF_JIT_REG + 3)	/* Literal pool register */
  67#define REG_15		(MAX_BPF_JIT_REG + 4)	/* Register 15 */
  68#define REG_0		REG_W0			/* Register 0 */
  69#define REG_1		REG_W1			/* Register 1 */
  70#define REG_2		BPF_REG_1		/* Register 2 */
 
 
 
 
  71#define REG_14		BPF_REG_0		/* Register 14 */
  72
  73/*
  74 * Mapping of BPF registers to s390 registers
  75 */
  76static const int reg2hex[] = {
  77	/* Return code */
  78	[BPF_REG_0]	= 14,
  79	/* Function parameters */
  80	[BPF_REG_1]	= 2,
  81	[BPF_REG_2]	= 3,
  82	[BPF_REG_3]	= 4,
  83	[BPF_REG_4]	= 5,
  84	[BPF_REG_5]	= 6,
  85	/* Call saved registers */
  86	[BPF_REG_6]	= 7,
  87	[BPF_REG_7]	= 8,
  88	[BPF_REG_8]	= 9,
  89	[BPF_REG_9]	= 10,
  90	/* BPF stack pointer */
  91	[BPF_REG_FP]	= 13,
  92	/* Register for blinding (shared with REG_SKB_DATA) */
  93	[BPF_REG_AX]	= 12,
  94	/* SKB data pointer */
  95	[REG_SKB_DATA]	= 12,
  96	/* Work registers for s390x backend */
  97	[REG_W0]	= 0,
  98	[REG_W1]	= 1,
  99	[REG_L]		= 11,
 100	[REG_15]	= 15,
 101};
 102
 103static inline u32 reg(u32 dst_reg, u32 src_reg)
 104{
 105	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 106}
 107
 108static inline u32 reg_high(u32 reg)
 109{
 110	return reg2hex[reg] << 4;
 111}
 112
 113static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 114{
 115	u32 r1 = reg2hex[b1];
 116
 117	if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 118		jit->seen_reg[r1] = 1;
 119}
 120
 121#define REG_SET_SEEN(b1)					\
 122({								\
 123	reg_set_seen(jit, b1);					\
 124})
 125
 126#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 127
 128/*
 129 * EMIT macros for code generation
 130 */
 131
 132#define _EMIT2(op)						\
 133({								\
 134	if (jit->prg_buf)					\
 135		*(u16 *) (jit->prg_buf + jit->prg) = op;	\
 136	jit->prg += 2;						\
 137})
 138
 139#define EMIT2(op, b1, b2)					\
 140({								\
 141	_EMIT2(op | reg(b1, b2));				\
 142	REG_SET_SEEN(b1);					\
 143	REG_SET_SEEN(b2);					\
 144})
 145
 146#define _EMIT4(op)						\
 147({								\
 148	if (jit->prg_buf)					\
 149		*(u32 *) (jit->prg_buf + jit->prg) = op;	\
 150	jit->prg += 4;						\
 151})
 152
 153#define EMIT4(op, b1, b2)					\
 154({								\
 155	_EMIT4(op | reg(b1, b2));				\
 156	REG_SET_SEEN(b1);					\
 157	REG_SET_SEEN(b2);					\
 158})
 159
 160#define EMIT4_RRF(op, b1, b2, b3)				\
 161({								\
 162	_EMIT4(op | reg_high(b3) << 8 | reg(b1, b2));		\
 163	REG_SET_SEEN(b1);					\
 164	REG_SET_SEEN(b2);					\
 165	REG_SET_SEEN(b3);					\
 166})
 167
 168#define _EMIT4_DISP(op, disp)					\
 169({								\
 170	unsigned int __disp = (disp) & 0xfff;			\
 171	_EMIT4(op | __disp);					\
 172})
 173
 174#define EMIT4_DISP(op, b1, b2, disp)				\
 175({								\
 176	_EMIT4_DISP(op | reg_high(b1) << 16 |			\
 177		    reg_high(b2) << 8, disp);			\
 178	REG_SET_SEEN(b1);					\
 179	REG_SET_SEEN(b2);					\
 180})
 181
 182#define EMIT4_IMM(op, b1, imm)					\
 183({								\
 184	unsigned int __imm = (imm) & 0xffff;			\
 185	_EMIT4(op | reg_high(b1) << 16 | __imm);		\
 186	REG_SET_SEEN(b1);					\
 187})
 188
 189#define EMIT4_PCREL(op, pcrel)					\
 190({								\
 191	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 192	_EMIT4(op | __pcrel);					\
 
 
 
 
 
 
 193})
 194
 195#define _EMIT6(op1, op2)					\
 196({								\
 197	if (jit->prg_buf) {					\
 198		*(u32 *) (jit->prg_buf + jit->prg) = op1;	\
 199		*(u16 *) (jit->prg_buf + jit->prg + 4) = op2;	\
 200	}							\
 201	jit->prg += 6;						\
 202})
 203
 204#define _EMIT6_DISP(op1, op2, disp)				\
 205({								\
 206	unsigned int __disp = (disp) & 0xfff;			\
 207	_EMIT6(op1 | __disp, op2);				\
 208})
 209
 210#define _EMIT6_DISP_LH(op1, op2, disp)				\
 211({								\
 212	u32 _disp = (u32) disp;					\
 213	unsigned int __disp_h = _disp & 0xff000;		\
 214	unsigned int __disp_l = _disp & 0x00fff;		\
 215	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 216})
 217
 218#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 219({								\
 220	_EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 |		\
 221		       reg_high(b3) << 8, op2, disp);		\
 222	REG_SET_SEEN(b1);					\
 223	REG_SET_SEEN(b2);					\
 224	REG_SET_SEEN(b3);					\
 225})
 226
 227#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask)	\
 228({								\
 229	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 230	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff),	\
 231	       op2 | mask << 12);				\
 232	REG_SET_SEEN(b1);					\
 233	REG_SET_SEEN(b2);					\
 234})
 235
 236#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask)	\
 237({								\
 238	int rel = (jit->labels[label] - jit->prg) >> 1;		\
 239	_EMIT6(op1 | (reg_high(b1) | mask) << 16 |		\
 240		(rel & 0xffff), op2 | (imm & 0xff) << 8);	\
 241	REG_SET_SEEN(b1);					\
 242	BUILD_BUG_ON(((unsigned long) imm) > 0xff);		\
 243})
 244
 245#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 246({								\
 247	/* Branch instruction needs 6 bytes */			\
 248	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
 249	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
 250	REG_SET_SEEN(b1);					\
 251	REG_SET_SEEN(b2);					\
 252})
 253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254#define _EMIT6_IMM(op, imm)					\
 255({								\
 256	unsigned int __imm = (imm);				\
 257	_EMIT6(op | (__imm >> 16), __imm & 0xffff);		\
 258})
 259
 260#define EMIT6_IMM(op, b1, imm)					\
 261({								\
 262	_EMIT6_IMM(op | reg_high(b1) << 16, imm);		\
 263	REG_SET_SEEN(b1);					\
 264})
 265
 266#define EMIT_CONST_U32(val)					\
 267({								\
 268	unsigned int ret;					\
 269	ret = jit->lit - jit->base_ip;				\
 270	jit->seen |= SEEN_LITERAL;				\
 271	if (jit->prg_buf)					\
 272		*(u32 *) (jit->prg_buf + jit->lit) = (u32) val;	\
 273	jit->lit += 4;						\
 274	ret;							\
 275})
 276
 277#define EMIT_CONST_U64(val)					\
 278({								\
 279	unsigned int ret;					\
 280	ret = jit->lit - jit->base_ip;				\
 281	jit->seen |= SEEN_LITERAL;				\
 
 
 
 
 
 
 
 282	if (jit->prg_buf)					\
 283		*(u64 *) (jit->prg_buf + jit->lit) = (u64) val;	\
 284	jit->lit += 8;						\
 285	ret;							\
 286})
 287
 
 
 
 
 
 
 288#define EMIT_ZERO(b1)						\
 289({								\
 290	/* llgfr %dst,%dst (zero extend to 64 bit) */		\
 291	EMIT4(0xb9160000, b1, b1);				\
 292	REG_SET_SEEN(b1);					\
 
 
 293})
 294
 295/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 296 * Fill whole space with illegal instructions
 297 */
 298static void jit_fill_hole(void *area, unsigned int size)
 299{
 300	memset(area, 0, size);
 301}
 302
 303/*
 304 * Save registers from "rs" (register start) to "re" (register end) on stack
 305 */
 306static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 307{
 308	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 309
 310	if (rs == re)
 311		/* stg %rs,off(%r15) */
 312		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 313	else
 314		/* stmg %rs,%re,off(%r15) */
 315		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 316}
 317
 318/*
 319 * Restore registers from "rs" (register start) to "re" (register end) on stack
 320 */
 321static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
 322{
 323	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 324
 325	if (jit->seen & SEEN_STACK)
 326		off += STK_OFF;
 327
 328	if (rs == re)
 329		/* lg %rs,off(%r15) */
 330		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 331	else
 332		/* lmg %rs,%re,off(%r15) */
 333		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 334}
 335
 336/*
 337 * Return first seen register (from start)
 338 */
 339static int get_start(struct bpf_jit *jit, int start)
 340{
 341	int i;
 342
 343	for (i = start; i <= 15; i++) {
 344		if (jit->seen_reg[i])
 345			return i;
 346	}
 347	return 0;
 348}
 349
 350/*
 351 * Return last seen register (from start) (gap >= 2)
 352 */
 353static int get_end(struct bpf_jit *jit, int start)
 354{
 355	int i;
 356
 357	for (i = start; i < 15; i++) {
 358		if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 359			return i - 1;
 360	}
 361	return jit->seen_reg[15] ? 15 : 14;
 362}
 363
 364#define REGS_SAVE	1
 365#define REGS_RESTORE	0
 366/*
 367 * Save and restore clobbered registers (6-15) on stack.
 368 * We save/restore registers in chunks with gap >= 2 registers.
 369 */
 370static void save_restore_regs(struct bpf_jit *jit, int op)
 
 371{
 372
 
 373	int re = 6, rs;
 374
 
 
 
 
 
 
 
 
 
 375	do {
 376		rs = get_start(jit, re);
 377		if (!rs)
 378			break;
 379		re = get_end(jit, rs + 1);
 380		if (op == REGS_SAVE)
 381			save_regs(jit, rs, re);
 382		else
 383			restore_regs(jit, rs, re);
 384		re++;
 385	} while (re <= 15);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 386}
 387
 388/*
 389 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
 390 * we store the SKB header length on the stack and the SKB data
 391 * pointer in REG_SKB_DATA if BPF_REG_AX is not used.
 392 */
 393static void emit_load_skb_data_hlen(struct bpf_jit *jit)
 394{
 395	/* Header length: llgf %w1,<len>(%b1) */
 396	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
 397		      offsetof(struct sk_buff, len));
 398	/* s %w1,<data_len>(%b1) */
 399	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
 400		   offsetof(struct sk_buff, data_len));
 401	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
 402	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
 403	if (!(jit->seen & SEEN_REG_AX))
 404		/* lg %skb_data,data_off(%b1) */
 405		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
 406			      BPF_REG_1, offsetof(struct sk_buff, data));
 
 
 
 
 
 
 
 
 
 407}
 408
 409/*
 410 * Emit function prologue
 411 *
 412 * Save registers and create stack frame if necessary.
 413 * See stack frame layout desription in "bpf_jit.h"!
 414 */
 415static void bpf_jit_prologue(struct bpf_jit *jit)
 
 416{
 417	if (jit->seen & SEEN_TAIL_CALL) {
 
 
 
 
 
 
 418		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 419		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 420	} else {
 421		/* j tail_call_start: NOP if no tail calls are used */
 422		EMIT4_PCREL(0xa7f40000, 6);
 423		_EMIT2(0);
 
 
 
 424	}
 425	/* Tail calls have to skip above initialization */
 426	jit->tail_call_start = jit->prg;
 427	/* Save registers */
 428	save_restore_regs(jit, REGS_SAVE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 429	/* Setup literal pool */
 430	if (jit->seen & SEEN_LITERAL) {
 431		/* basr %r13,0 */
 432		EMIT2(0x0d00, REG_L, REG_0);
 433		jit->base_ip = jit->prg;
 
 
 
 
 
 
 
 434	}
 435	/* Setup stack and backchain */
 436	if (jit->seen & SEEN_STACK) {
 437		if (jit->seen & SEEN_FUNC)
 438			/* lgr %w1,%r15 (backchain) */
 439			EMIT4(0xb9040000, REG_W1, REG_15);
 440		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 441		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 442		/* aghi %r15,-STK_OFF */
 443		EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
 444		if (jit->seen & SEEN_FUNC)
 445			/* stg %w1,152(%r15) (backchain) */
 446			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 447				      REG_15, 152);
 448	}
 449	if (jit->seen & SEEN_SKB)
 450		emit_load_skb_data_hlen(jit);
 451	if (jit->seen & SEEN_SKB_CHANGE)
 452		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 453		EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
 454			      STK_OFF_SKBP);
 455}
 456
 457/*
 458 * Function epilogue
 459 */
 460static void bpf_jit_epilogue(struct bpf_jit *jit)
 461{
 462	/* Return 0 */
 463	if (jit->seen & SEEN_RET0) {
 464		jit->ret0_ip = jit->prg;
 465		/* lghi %b0,0 */
 466		EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
 
 
 
 
 
 
 
 
 
 
 
 467	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468	jit->exit_ip = jit->prg;
 469	/* Load exit code: lgr %r2,%b0 */
 470	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 471	/* Restore registers */
 472	save_restore_regs(jit, REGS_RESTORE);
 
 
 
 
 
 473	/* br %r14 */
 474	_EMIT2(0x07fe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 475}
 476
 477/*
 478 * Compile one eBPF instruction into s390x code
 479 *
 480 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 481 * stack space for the large switch statement.
 482 */
 483static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
 
 484{
 485	struct bpf_insn *insn = &fp->insnsi[i];
 486	int jmp_off, last, insn_count = 1;
 487	unsigned int func_addr, mask;
 488	u32 dst_reg = insn->dst_reg;
 489	u32 src_reg = insn->src_reg;
 
 
 490	u32 *addrs = jit->addrs;
 491	s32 imm = insn->imm;
 492	s16 off = insn->off;
 
 
 
 
 493
 494	if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
 495		jit->seen |= SEEN_REG_AX;
 496	switch (insn->code) {
 497	/*
 498	 * BPF_MOV
 499	 */
 500	case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 501		/* llgfr %dst,%src */
 502		EMIT4(0xb9160000, dst_reg, src_reg);
 503		break;
 504	case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 505		/* lgr %dst,%src */
 506		EMIT4(0xb9040000, dst_reg, src_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507		break;
 508	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 509		/* llilf %dst,imm */
 510		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 
 
 511		break;
 512	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 513		/* lgfi %dst,imm */
 514		EMIT6_IMM(0xc0010000, dst_reg, imm);
 515		break;
 516	/*
 517	 * BPF_LD 64
 518	 */
 519	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 520	{
 521		/* 16 byte instruction that uses two 'struct bpf_insn' */
 522		u64 imm64;
 523
 524		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 525		/* lg %dst,<d(imm)>(%l) */
 526		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
 527			      EMIT_CONST_U64(imm64));
 528		insn_count = 2;
 529		break;
 530	}
 531	/*
 532	 * BPF_ADD
 533	 */
 534	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 535		/* ar %dst,%src */
 536		EMIT2(0x1a00, dst_reg, src_reg);
 537		EMIT_ZERO(dst_reg);
 538		break;
 539	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 540		/* agr %dst,%src */
 541		EMIT4(0xb9080000, dst_reg, src_reg);
 542		break;
 543	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 544		if (!imm)
 545			break;
 546		/* alfi %dst,imm */
 547		EMIT6_IMM(0xc20b0000, dst_reg, imm);
 548		EMIT_ZERO(dst_reg);
 549		break;
 550	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 551		if (!imm)
 552			break;
 553		/* agfi %dst,imm */
 554		EMIT6_IMM(0xc2080000, dst_reg, imm);
 555		break;
 556	/*
 557	 * BPF_SUB
 558	 */
 559	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 560		/* sr %dst,%src */
 561		EMIT2(0x1b00, dst_reg, src_reg);
 562		EMIT_ZERO(dst_reg);
 563		break;
 564	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 565		/* sgr %dst,%src */
 566		EMIT4(0xb9090000, dst_reg, src_reg);
 567		break;
 568	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 569		if (!imm)
 570			break;
 571		/* alfi %dst,-imm */
 572		EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 573		EMIT_ZERO(dst_reg);
 574		break;
 575	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 576		if (!imm)
 577			break;
 578		/* agfi %dst,-imm */
 579		EMIT6_IMM(0xc2080000, dst_reg, -imm);
 
 
 
 
 
 580		break;
 581	/*
 582	 * BPF_MUL
 583	 */
 584	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 585		/* msr %dst,%src */
 586		EMIT4(0xb2520000, dst_reg, src_reg);
 587		EMIT_ZERO(dst_reg);
 588		break;
 589	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 590		/* msgr %dst,%src */
 591		EMIT4(0xb90c0000, dst_reg, src_reg);
 592		break;
 593	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 594		if (imm == 1)
 595			break;
 596		/* msfi %r5,imm */
 597		EMIT6_IMM(0xc2010000, dst_reg, imm);
 598		EMIT_ZERO(dst_reg);
 599		break;
 600	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 601		if (imm == 1)
 602			break;
 603		/* msgfi %dst,imm */
 604		EMIT6_IMM(0xc2000000, dst_reg, imm);
 605		break;
 606	/*
 607	 * BPF_DIV / BPF_MOD
 608	 */
 609	case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 610	case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 611	{
 612		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 613
 614		jit->seen |= SEEN_RET0;
 615		/* ltr %src,%src (if src == 0 goto fail) */
 616		EMIT2(0x1200, src_reg, src_reg);
 617		/* jz <ret0> */
 618		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 619		/* lhi %w0,0 */
 620		EMIT4_IMM(0xa7080000, REG_W0, 0);
 621		/* lr %w1,%dst */
 622		EMIT2(0x1800, REG_W1, dst_reg);
 623		/* dlr %w0,%src */
 624		EMIT4(0xb9970000, REG_W0, src_reg);
 
 
 
 
 
 625		/* llgfr %dst,%rc */
 626		EMIT4(0xb9160000, dst_reg, rc_reg);
 
 
 627		break;
 628	}
 629	case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 630	case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 631	{
 632		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 633
 634		jit->seen |= SEEN_RET0;
 635		/* ltgr %src,%src (if src == 0 goto fail) */
 636		EMIT4(0xb9020000, src_reg, src_reg);
 637		/* jz <ret0> */
 638		EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
 639		/* lghi %w0,0 */
 640		EMIT4_IMM(0xa7090000, REG_W0, 0);
 641		/* lgr %w1,%dst */
 642		EMIT4(0xb9040000, REG_W1, dst_reg);
 643		/* dlgr %w0,%dst */
 644		EMIT4(0xb9870000, REG_W0, src_reg);
 
 
 
 
 
 645		/* lgr %dst,%rc */
 646		EMIT4(0xb9040000, dst_reg, rc_reg);
 647		break;
 648	}
 649	case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 650	case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 651	{
 652		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 653
 654		if (imm == 1) {
 655			if (BPF_OP(insn->code) == BPF_MOD)
 656				/* lhgi %dst,0 */
 657				EMIT4_IMM(0xa7090000, dst_reg, 0);
 
 
 658			break;
 659		}
 660		/* lhi %w0,0 */
 661		EMIT4_IMM(0xa7080000, REG_W0, 0);
 662		/* lr %w1,%dst */
 663		EMIT2(0x1800, REG_W1, dst_reg);
 664		/* dl %w0,<d(imm)>(%l) */
 665		EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 666			      EMIT_CONST_U32(imm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667		/* llgfr %dst,%rc */
 668		EMIT4(0xb9160000, dst_reg, rc_reg);
 
 
 669		break;
 670	}
 671	case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 672	case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 673	{
 674		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 675
 676		if (imm == 1) {
 677			if (BPF_OP(insn->code) == BPF_MOD)
 678				/* lhgi %dst,0 */
 679				EMIT4_IMM(0xa7090000, dst_reg, 0);
 680			break;
 681		}
 682		/* lghi %w0,0 */
 683		EMIT4_IMM(0xa7090000, REG_W0, 0);
 684		/* lgr %w1,%dst */
 685		EMIT4(0xb9040000, REG_W1, dst_reg);
 686		/* dlg %w0,<d(imm)>(%l) */
 687		EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 688			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 689		/* lgr %dst,%rc */
 690		EMIT4(0xb9040000, dst_reg, rc_reg);
 691		break;
 692	}
 693	/*
 694	 * BPF_AND
 695	 */
 696	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 697		/* nr %dst,%src */
 698		EMIT2(0x1400, dst_reg, src_reg);
 699		EMIT_ZERO(dst_reg);
 700		break;
 701	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 702		/* ngr %dst,%src */
 703		EMIT4(0xb9800000, dst_reg, src_reg);
 704		break;
 705	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 706		/* nilf %dst,imm */
 707		EMIT6_IMM(0xc00b0000, dst_reg, imm);
 708		EMIT_ZERO(dst_reg);
 709		break;
 710	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 711		/* ng %dst,<d(imm)>(%l) */
 712		EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
 713			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 714		break;
 715	/*
 716	 * BPF_OR
 717	 */
 718	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 719		/* or %dst,%src */
 720		EMIT2(0x1600, dst_reg, src_reg);
 721		EMIT_ZERO(dst_reg);
 722		break;
 723	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 724		/* ogr %dst,%src */
 725		EMIT4(0xb9810000, dst_reg, src_reg);
 726		break;
 727	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 728		/* oilf %dst,imm */
 729		EMIT6_IMM(0xc00d0000, dst_reg, imm);
 730		EMIT_ZERO(dst_reg);
 731		break;
 732	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 733		/* og %dst,<d(imm)>(%l) */
 734		EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
 735			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 736		break;
 737	/*
 738	 * BPF_XOR
 739	 */
 740	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 741		/* xr %dst,%src */
 742		EMIT2(0x1700, dst_reg, src_reg);
 743		EMIT_ZERO(dst_reg);
 744		break;
 745	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 746		/* xgr %dst,%src */
 747		EMIT4(0xb9820000, dst_reg, src_reg);
 748		break;
 749	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
 750		if (!imm)
 751			break;
 752		/* xilf %dst,imm */
 753		EMIT6_IMM(0xc0070000, dst_reg, imm);
 754		EMIT_ZERO(dst_reg);
 755		break;
 756	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
 757		/* xg %dst,<d(imm)>(%l) */
 758		EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
 759			      EMIT_CONST_U64(imm));
 
 
 
 
 
 
 
 
 
 
 760		break;
 761	/*
 762	 * BPF_LSH
 763	 */
 764	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
 765		/* sll %dst,0(%src) */
 766		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
 767		EMIT_ZERO(dst_reg);
 768		break;
 769	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
 770		/* sllg %dst,%dst,0(%src) */
 771		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
 772		break;
 773	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
 774		if (imm == 0)
 775			break;
 776		/* sll %dst,imm(%r0) */
 777		EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
 778		EMIT_ZERO(dst_reg);
 779		break;
 780	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
 781		if (imm == 0)
 782			break;
 783		/* sllg %dst,%dst,imm(%r0) */
 784		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
 785		break;
 786	/*
 787	 * BPF_RSH
 788	 */
 789	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
 790		/* srl %dst,0(%src) */
 791		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
 792		EMIT_ZERO(dst_reg);
 793		break;
 794	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
 795		/* srlg %dst,%dst,0(%src) */
 796		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
 797		break;
 798	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
 799		if (imm == 0)
 800			break;
 801		/* srl %dst,imm(%r0) */
 802		EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
 803		EMIT_ZERO(dst_reg);
 804		break;
 805	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
 806		if (imm == 0)
 807			break;
 808		/* srlg %dst,%dst,imm(%r0) */
 809		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
 810		break;
 811	/*
 812	 * BPF_ARSH
 813	 */
 
 
 
 
 
 814	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
 815		/* srag %dst,%dst,0(%src) */
 816		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
 817		break;
 
 
 
 
 
 
 
 818	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
 819		if (imm == 0)
 820			break;
 821		/* srag %dst,%dst,imm(%r0) */
 822		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
 823		break;
 824	/*
 825	 * BPF_NEG
 826	 */
 827	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
 828		/* lcr %dst,%dst */
 829		EMIT2(0x1300, dst_reg, dst_reg);
 830		EMIT_ZERO(dst_reg);
 831		break;
 832	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 833		/* lcgr %dst,%dst */
 834		EMIT4(0xb9130000, dst_reg, dst_reg);
 835		break;
 836	/*
 837	 * BPF_FROM_BE/LE
 838	 */
 839	case BPF_ALU | BPF_END | BPF_FROM_BE:
 840		/* s390 is big endian, therefore only clear high order bytes */
 841		switch (imm) {
 842		case 16: /* dst = (u16) cpu_to_be16(dst) */
 843			/* llghr %dst,%dst */
 844			EMIT4(0xb9850000, dst_reg, dst_reg);
 
 
 845			break;
 846		case 32: /* dst = (u32) cpu_to_be32(dst) */
 847			/* llgfr %dst,%dst */
 848			EMIT4(0xb9160000, dst_reg, dst_reg);
 
 849			break;
 850		case 64: /* dst = (u64) cpu_to_be64(dst) */
 851			break;
 852		}
 853		break;
 854	case BPF_ALU | BPF_END | BPF_FROM_LE:
 
 855		switch (imm) {
 856		case 16: /* dst = (u16) cpu_to_le16(dst) */
 857			/* lrvr %dst,%dst */
 858			EMIT4(0xb91f0000, dst_reg, dst_reg);
 859			/* srl %dst,16(%r0) */
 860			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
 861			/* llghr %dst,%dst */
 862			EMIT4(0xb9850000, dst_reg, dst_reg);
 
 
 863			break;
 864		case 32: /* dst = (u32) cpu_to_le32(dst) */
 865			/* lrvr %dst,%dst */
 866			EMIT4(0xb91f0000, dst_reg, dst_reg);
 867			/* llgfr %dst,%dst */
 868			EMIT4(0xb9160000, dst_reg, dst_reg);
 
 869			break;
 870		case 64: /* dst = (u64) cpu_to_le64(dst) */
 871			/* lrvgr %dst,%dst */
 872			EMIT4(0xb90f0000, dst_reg, dst_reg);
 873			break;
 874		}
 875		break;
 876	/*
 
 
 
 
 
 877	 * BPF_ST(X)
 878	 */
 879	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
 880		/* stcy %src,off(%dst) */
 881		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
 
 
 
 
 
 
 882		jit->seen |= SEEN_MEM;
 883		break;
 884	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 885		/* sthy %src,off(%dst) */
 886		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
 
 
 
 
 
 
 887		jit->seen |= SEEN_MEM;
 888		break;
 889	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 890		/* sty %src,off(%dst) */
 891		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
 
 
 
 
 
 
 892		jit->seen |= SEEN_MEM;
 893		break;
 894	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 895		/* stg %src,off(%dst) */
 896		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
 
 
 
 
 
 
 897		jit->seen |= SEEN_MEM;
 898		break;
 899	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 
 900		/* lhi %w0,imm */
 901		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
 902		/* stcy %w0,off(dst) */
 903		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
 
 
 
 
 
 904		jit->seen |= SEEN_MEM;
 905		break;
 906	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 
 907		/* lhi %w0,imm */
 908		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
 909		/* sthy %w0,off(dst) */
 910		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
 
 
 
 
 
 911		jit->seen |= SEEN_MEM;
 912		break;
 913	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 
 914		/* llilf %w0,imm  */
 915		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
 916		/* sty %w0,off(%dst) */
 917		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
 
 
 
 
 
 918		jit->seen |= SEEN_MEM;
 919		break;
 920	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 
 921		/* lgfi %w0,imm */
 922		EMIT6_IMM(0xc0010000, REG_W0, imm);
 923		/* stg %w0,off(%dst) */
 924		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
 
 
 
 
 
 925		jit->seen |= SEEN_MEM;
 926		break;
 927	/*
 928	 * BPF_STX XADD (atomic_add)
 929	 */
 930	case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
 931		/* laal %w0,%src,off(%dst) */
 932		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
 933			      dst_reg, off);
 934		jit->seen |= SEEN_MEM;
 935		break;
 936	case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
 937		/* laalg %w0,%src,off(%dst) */
 938		EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
 939			      dst_reg, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940		jit->seen |= SEEN_MEM;
 941		break;
 
 942	/*
 943	 * BPF_LDX
 944	 */
 945	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
 946		/* llgc %dst,0(off,%src) */
 947		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948		jit->seen |= SEEN_MEM;
 949		break;
 950	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
 951		/* llgh %dst,0(off,%src) */
 952		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953		jit->seen |= SEEN_MEM;
 954		break;
 955	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
 
 
 
 956		/* llgf %dst,off(%src) */
 957		jit->seen |= SEEN_MEM;
 958		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959		break;
 960	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
 961		/* lg %dst,0(off,%src) */
 
 
 
 962		jit->seen |= SEEN_MEM;
 963		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
 
 
 
 
 964		break;
 965	/*
 966	 * BPF_JMP / CALL
 967	 */
 968	case BPF_JMP | BPF_CALL:
 969	{
 970		/*
 971		 * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5)
 972		 */
 973		const u64 func = (u64)__bpf_call_base + imm;
 
 
 
 
 
 974
 975		REG_SET_SEEN(BPF_REG_5);
 976		jit->seen |= SEEN_FUNC;
 977		/* lg %w1,<d(imm)>(%l) */
 978		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
 979			      EMIT_CONST_U64(func));
 980		/* basr %r14,%w1 */
 981		EMIT2(0x0d00, REG_14, REG_W1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982		/* lgr %b0,%r2: load return value into %b0 */
 983		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 984		if (bpf_helper_changes_pkt_data((void *)func)) {
 985			jit->seen |= SEEN_SKB_CHANGE;
 986			/* lg %b1,ST_OFF_SKBP(%r15) */
 987			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
 988				      REG_15, STK_OFF_SKBP);
 989			emit_load_skb_data_hlen(jit);
 990		}
 991		break;
 992	}
 993	case BPF_JMP | BPF_CALL | BPF_X:
 
 
 994		/*
 995		 * Implicit input:
 996		 *  B1: pointer to ctx
 997		 *  B2: pointer to bpf_array
 998		 *  B3: index in bpf_array
 999		 */
1000		jit->seen |= SEEN_TAIL_CALL;
1001
1002		/*
1003		 * if (index >= array->map.max_entries)
1004		 *         goto out;
1005		 */
1006
1007		/* llgf %w1,map.max_entries(%b2) */
1008		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1009			      offsetof(struct bpf_array, map.max_entries));
1010		/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
1011		EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
1012				  REG_W1, 0, 0xa);
 
 
1013
1014		/*
1015		 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1016		 *         goto out;
1017		 */
1018
1019		if (jit->seen & SEEN_STACK)
1020			off = STK_OFF_TCCNT + STK_OFF;
1021		else
1022			off = STK_OFF_TCCNT;
1023		/* lhi %w0,1 */
1024		EMIT4_IMM(0xa7080000, REG_W0, 1);
1025		/* laal %w1,%w0,off(%r15) */
1026		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1027		/* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
1028		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
1029				      MAX_TAIL_CALL_CNT, 0, 0x2);
 
1030
1031		/*
1032		 * prog = array->ptrs[index];
1033		 * if (prog == NULL)
1034		 *         goto out;
1035		 */
1036
1037		/* sllg %r1,%b3,3: %r1 = index * 8 */
1038		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
1039		/* lg %r1,prog(%b2,%r1) */
1040		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
 
 
1041			      REG_1, offsetof(struct bpf_array, ptrs));
1042		/* clgij %r1,0,0x8,label0 */
1043		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
1044
1045		/*
1046		 * Restore registers before calling function
1047		 */
1048		save_restore_regs(jit, REGS_RESTORE);
1049
1050		/*
1051		 * goto *(prog->bpf_func + tail_call_start);
1052		 */
1053
1054		/* lg %r1,bpf_func(%r1) */
1055		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1056			      offsetof(struct bpf_prog, bpf_func));
1057		/* bc 0xf,tail_call_start(%r1) */
1058		_EMIT4(0x47f01000 + jit->tail_call_start);
 
 
 
 
 
 
 
 
1059		/* out: */
1060		jit->labels[0] = jit->prg;
 
 
 
 
 
 
 
1061		break;
 
1062	case BPF_JMP | BPF_EXIT: /* return b0 */
1063		last = (i == fp->len - 1) ? 1 : 0;
1064		if (last && !(jit->seen & SEEN_RET0))
1065			break;
1066		/* j <exit> */
1067		EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
 
 
 
 
1068		break;
1069	/*
1070	 * Branch relative (number of skipped instructions) to offset on
1071	 * condition.
1072	 *
1073	 * Condition code to mask mapping:
1074	 *
1075	 * CC | Description	   | Mask
1076	 * ------------------------------
1077	 * 0  | Operands equal	   |	8
1078	 * 1  | First operand low  |	4
1079	 * 2  | First operand high |	2
1080	 * 3  | Unused		   |	1
1081	 *
1082	 * For s390x relative branches: ip = ip + off_bytes
1083	 * For BPF relative branches:	insn = insn + off_insns + 1
1084	 *
1085	 * For example for s390x with offset 0 we jump to the branch
1086	 * instruction itself (loop) and for BPF with offset 0 we
1087	 * branch to the instruction behind the branch.
1088	 */
 
 
 
1089	case BPF_JMP | BPF_JA: /* if (true) */
1090		mask = 0xf000; /* j */
1091		goto branch_oc;
1092	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
 
1093		mask = 0x2000; /* jh */
1094		goto branch_ks;
 
 
 
 
1095	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
 
1096		mask = 0xa000; /* jhe */
1097		goto branch_ks;
 
 
 
 
1098	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
 
1099		mask = 0x2000; /* jh */
1100		goto branch_ku;
 
 
 
 
1101	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
 
1102		mask = 0xa000; /* jhe */
1103		goto branch_ku;
 
 
 
 
1104	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
 
1105		mask = 0x7000; /* jne */
1106		goto branch_ku;
1107	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
 
1108		mask = 0x8000; /* je */
1109		goto branch_ku;
1110	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
 
1111		mask = 0x7000; /* jnz */
1112		/* lgfi %w1,imm (load sign extend imm) */
1113		EMIT6_IMM(0xc0010000, REG_W1, imm);
1114		/* ngr %w1,%dst */
1115		EMIT4(0xb9800000, REG_W1, dst_reg);
 
 
 
 
 
 
 
1116		goto branch_oc;
1117
1118	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
 
1119		mask = 0x2000; /* jh */
1120		goto branch_xs;
 
 
 
 
1121	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
 
1122		mask = 0xa000; /* jhe */
1123		goto branch_xs;
 
 
 
 
1124	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
 
1125		mask = 0x2000; /* jh */
1126		goto branch_xu;
 
 
 
 
1127	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
 
1128		mask = 0xa000; /* jhe */
1129		goto branch_xu;
 
 
 
 
1130	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
 
1131		mask = 0x7000; /* jne */
1132		goto branch_xu;
1133	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
 
1134		mask = 0x8000; /* je */
1135		goto branch_xu;
1136	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
 
 
 
 
1137		mask = 0x7000; /* jnz */
1138		/* ngrk %w1,%dst,%src */
1139		EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg);
 
1140		goto branch_oc;
1141branch_ks:
1142		/* lgfi %w1,imm (load sign extend imm) */
1143		EMIT6_IMM(0xc0010000, REG_W1, imm);
1144		/* cgrj %dst,%w1,mask,off */
1145		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask);
 
 
 
 
 
 
 
 
 
 
1146		break;
1147branch_ku:
1148		/* lgfi %w1,imm (load sign extend imm) */
1149		EMIT6_IMM(0xc0010000, REG_W1, imm);
1150		/* clgrj %dst,%w1,mask,off */
1151		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask);
1152		break;
1153branch_xs:
1154		/* cgrj %dst,%src,mask,off */
1155		EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156		break;
1157branch_xu:
1158		/* clgrj %dst,%src,mask,off */
1159		EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1160		break;
1161branch_oc:
1162		/* brc mask,jmp_off (branch instruction needs 4 bytes) */
1163		jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
1164		EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
1165		break;
1166	/*
1167	 * BPF_LD
1168	 */
1169	case BPF_LD | BPF_ABS | BPF_B: /* b0 = *(u8 *) (skb->data+imm) */
1170	case BPF_LD | BPF_IND | BPF_B: /* b0 = *(u8 *) (skb->data+imm+src) */
1171		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1172			func_addr = __pa(sk_load_byte_pos);
1173		else
1174			func_addr = __pa(sk_load_byte);
1175		goto call_fn;
1176	case BPF_LD | BPF_ABS | BPF_H: /* b0 = *(u16 *) (skb->data+imm) */
1177	case BPF_LD | BPF_IND | BPF_H: /* b0 = *(u16 *) (skb->data+imm+src) */
1178		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1179			func_addr = __pa(sk_load_half_pos);
1180		else
1181			func_addr = __pa(sk_load_half);
1182		goto call_fn;
1183	case BPF_LD | BPF_ABS | BPF_W: /* b0 = *(u32 *) (skb->data+imm) */
1184	case BPF_LD | BPF_IND | BPF_W: /* b0 = *(u32 *) (skb->data+imm+src) */
1185		if ((BPF_MODE(insn->code) == BPF_ABS) && (imm >= 0))
1186			func_addr = __pa(sk_load_word_pos);
1187		else
1188			func_addr = __pa(sk_load_word);
1189		goto call_fn;
1190call_fn:
1191		jit->seen |= SEEN_SKB | SEEN_RET0 | SEEN_FUNC;
1192		REG_SET_SEEN(REG_14); /* Return address of possible func call */
1193
1194		/*
1195		 * Implicit input:
1196		 *  BPF_REG_6	 (R7) : skb pointer
1197		 *  REG_SKB_DATA (R12): skb data pointer (if no BPF_REG_AX)
1198		 *
1199		 * Calculated input:
1200		 *  BPF_REG_2	 (R3) : offset of byte(s) to fetch in skb
1201		 *  BPF_REG_5	 (R6) : return address
1202		 *
1203		 * Output:
1204		 *  BPF_REG_0	 (R14): data read from skb
1205		 *
1206		 * Scratch registers (BPF_REG_1-5)
1207		 */
1208
1209		/* Call function: llilf %w1,func_addr  */
1210		EMIT6_IMM(0xc00f0000, REG_W1, func_addr);
1211
1212		/* Offset: lgfi %b2,imm */
1213		EMIT6_IMM(0xc0010000, BPF_REG_2, imm);
1214		if (BPF_MODE(insn->code) == BPF_IND)
1215			/* agfr %b2,%src (%src is s32 here) */
1216			EMIT4(0xb9180000, BPF_REG_2, src_reg);
1217
1218		/* Reload REG_SKB_DATA if BPF_REG_AX is used */
1219		if (jit->seen & SEEN_REG_AX)
1220			/* lg %skb_data,data_off(%b6) */
1221			EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
1222				      BPF_REG_6, offsetof(struct sk_buff, data));
1223		/* basr %b5,%w1 (%b5 is call saved) */
1224		EMIT2(0x0d00, BPF_REG_5, REG_W1);
1225
1226		/*
1227		 * Note: For fast access we jump directly after the
1228		 * jnz instruction from bpf_jit.S
1229		 */
1230		/* jnz <ret0> */
1231		EMIT4_PCREL(0xa7740000, jit->ret0_ip - jit->prg);
1232		break;
 
1233	default: /* too complex, give up */
1234		pr_err("Unknown opcode %02x\n", insn->code);
1235		return -1;
1236	}
 
1237	return insn_count;
1238}
1239
1240/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241 * Compile eBPF program into s390x code
1242 */
1243static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
 
1244{
1245	int i, insn_count;
 
1246
1247	jit->lit = jit->lit_start;
 
1248	jit->prg = 0;
 
 
 
 
 
 
1249
1250	bpf_jit_prologue(jit);
 
 
1251	for (i = 0; i < fp->len; i += insn_count) {
1252		insn_count = bpf_jit_insn(jit, fp, i);
1253		if (insn_count < 0)
1254			return -1;
1255		jit->addrs[i + 1] = jit->prg; /* Next instruction address */
 
 
1256	}
1257	bpf_jit_epilogue(jit);
1258
1259	jit->lit_start = jit->prg;
1260	jit->size = jit->lit;
 
 
 
 
 
 
 
1261	jit->size_prg = jit->prg;
 
 
 
 
 
 
1262	return 0;
1263}
1264
1265/*
1266 * Classic BPF function stub. BPF programs will be converted into
1267 * eBPF and then bpf_int_jit_compile() will be called.
1268 */
1269void bpf_jit_compile(struct bpf_prog *fp)
 
 
 
 
 
 
 
 
1270{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1271}
1272
1273/*
1274 * Compile eBPF program "fp"
1275 */
1276struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1277{
 
1278	struct bpf_prog *tmp, *orig_fp = fp;
1279	struct bpf_binary_header *header;
 
1280	bool tmp_blinded = false;
 
1281	struct bpf_jit jit;
1282	int pass;
1283
1284	if (!bpf_jit_enable)
1285		return orig_fp;
1286
1287	tmp = bpf_jit_blind_constants(fp);
1288	/*
1289	 * If blinding was requested and we failed during blinding,
1290	 * we must fall back to the interpreter.
1291	 */
1292	if (IS_ERR(tmp))
1293		return orig_fp;
1294	if (tmp != fp) {
1295		tmp_blinded = true;
1296		fp = tmp;
1297	}
1298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299	memset(&jit, 0, sizeof(jit));
1300	jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1301	if (jit.addrs == NULL) {
1302		fp = orig_fp;
1303		goto out;
1304	}
1305	/*
1306	 * Three initial passes:
1307	 *   - 1/2: Determine clobbered registers
1308	 *   - 3:   Calculate program size and addrs arrray
1309	 */
1310	for (pass = 1; pass <= 3; pass++) {
1311		if (bpf_jit_prog(&jit, fp)) {
1312			fp = orig_fp;
1313			goto free_addrs;
1314		}
1315	}
1316	/*
1317	 * Final pass: Allocate and generate program
1318	 */
1319	if (jit.size >= BPF_SIZE_MAX) {
1320		fp = orig_fp;
1321		goto free_addrs;
1322	}
1323	header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
1324	if (!header) {
1325		fp = orig_fp;
1326		goto free_addrs;
1327	}
1328	if (bpf_jit_prog(&jit, fp)) {
 
 
1329		fp = orig_fp;
1330		goto free_addrs;
1331	}
1332	if (bpf_jit_enable > 1) {
1333		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1334		if (jit.prg_buf)
1335			print_fn_code(jit.prg_buf, jit.size_prg);
1336	}
1337	if (jit.prg_buf) {
1338		set_memory_ro((unsigned long)header, header->pages);
1339		fp->bpf_func = (void *) jit.prg_buf;
1340		fp->jited = 1;
1341	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1342free_addrs:
1343	kfree(jit.addrs);
 
 
 
1344out:
1345	if (tmp_blinded)
1346		bpf_jit_prog_release_other(fp, fp == orig_fp ?
1347					   tmp : orig_fp);
1348	return fp;
1349}
1350
1351/*
1352 * Free eBPF program
1353 */
1354void bpf_jit_free(struct bpf_prog *fp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355{
1356	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1357	struct bpf_binary_header *header = (void *)addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1358
1359	if (!fp->jited)
1360		goto free_filter;
 
 
1361
1362	set_memory_rw(addr, header->pages);
1363	bpf_jit_binary_free(header);
 
 
 
 
 
1364
1365free_filter:
1366	bpf_prog_unlock_free(fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
 
  10 *  - 64BIT
  11 *
  12 * Copyright IBM Corp. 2012,2015
  13 *
  14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  15 *	      Michael Holzheu <holzheu@linux.vnet.ibm.com>
  16 */
  17
  18#define KMSG_COMPONENT "bpf_jit"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/netdevice.h>
  22#include <linux/filter.h>
  23#include <linux/init.h>
  24#include <linux/bpf.h>
  25#include <linux/mm.h>
  26#include <linux/kernel.h>
  27#include <asm/cacheflush.h>
  28#include <asm/extable.h>
  29#include <asm/dis.h>
  30#include <asm/facility.h>
  31#include <asm/nospec-branch.h>
  32#include <asm/set_memory.h>
  33#include <asm/text-patching.h>
  34#include <asm/unwind.h>
  35#include "bpf_jit.h"
  36
 
 
  37struct bpf_jit {
  38	u32 seen;		/* Flags to remember seen eBPF instructions */
  39	u16 seen_regs;		/* Mask to remember which registers are used */
  40	u32 *addrs;		/* Array with relative instruction addresses */
  41	u8 *prg_buf;		/* Start of program */
  42	int size;		/* Size of program and literal pool */
  43	int size_prg;		/* Size of program */
  44	int prg;		/* Current position in program */
  45	int lit32_start;	/* Start of 32-bit literal pool */
  46	int lit32;		/* Current position in 32-bit literal pool */
  47	int lit64_start;	/* Start of 64-bit literal pool */
  48	int lit64;		/* Current position in 64-bit literal pool */
  49	int base_ip;		/* Base address for literal pool */
 
  50	int exit_ip;		/* Address of exit */
  51	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
  52	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
  53	int tail_call_start;	/* Tail call start offset */
  54	int excnt;		/* Number of exception table entries */
  55	int prologue_plt_ret;	/* Return address for prologue hotpatch PLT */
  56	int prologue_plt;	/* Start of prologue hotpatch PLT */
  57	int kern_arena;		/* Pool offset of kernel arena address */
  58	u64 user_arena;		/* User arena address */
  59};
  60
  61#define SEEN_MEM	BIT(0)		/* use mem[] for temporary storage */
  62#define SEEN_LITERAL	BIT(1)		/* code uses literals */
  63#define SEEN_FUNC	BIT(2)		/* calls C functions */
  64#define SEEN_STACK	(SEEN_FUNC | SEEN_MEM)
  65
  66#define NVREGS		0xffc0		/* %r6-%r15 */
 
 
 
 
 
 
 
 
  67
  68/*
  69 * s390 registers
  70 */
  71#define REG_W0		(MAX_BPF_JIT_REG + 0)	/* Work register 1 (even) */
  72#define REG_W1		(MAX_BPF_JIT_REG + 1)	/* Work register 2 (odd) */
  73#define REG_L		(MAX_BPF_JIT_REG + 2)	/* Literal pool register */
  74#define REG_15		(MAX_BPF_JIT_REG + 3)	/* Register 15 */
 
  75#define REG_0		REG_W0			/* Register 0 */
  76#define REG_1		REG_W1			/* Register 1 */
  77#define REG_2		BPF_REG_1		/* Register 2 */
  78#define REG_3		BPF_REG_2		/* Register 3 */
  79#define REG_4		BPF_REG_3		/* Register 4 */
  80#define REG_7		BPF_REG_6		/* Register 7 */
  81#define REG_8		BPF_REG_7		/* Register 8 */
  82#define REG_14		BPF_REG_0		/* Register 14 */
  83
  84/*
  85 * Mapping of BPF registers to s390 registers
  86 */
  87static const int reg2hex[] = {
  88	/* Return code */
  89	[BPF_REG_0]	= 14,
  90	/* Function parameters */
  91	[BPF_REG_1]	= 2,
  92	[BPF_REG_2]	= 3,
  93	[BPF_REG_3]	= 4,
  94	[BPF_REG_4]	= 5,
  95	[BPF_REG_5]	= 6,
  96	/* Call saved registers */
  97	[BPF_REG_6]	= 7,
  98	[BPF_REG_7]	= 8,
  99	[BPF_REG_8]	= 9,
 100	[BPF_REG_9]	= 10,
 101	/* BPF stack pointer */
 102	[BPF_REG_FP]	= 13,
 103	/* Register for blinding */
 104	[BPF_REG_AX]	= 12,
 
 
 105	/* Work registers for s390x backend */
 106	[REG_W0]	= 0,
 107	[REG_W1]	= 1,
 108	[REG_L]		= 11,
 109	[REG_15]	= 15,
 110};
 111
 112static inline u32 reg(u32 dst_reg, u32 src_reg)
 113{
 114	return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 115}
 116
 117static inline u32 reg_high(u32 reg)
 118{
 119	return reg2hex[reg] << 4;
 120}
 121
 122static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 123{
 124	u32 r1 = reg2hex[b1];
 125
 126	if (r1 >= 6 && r1 <= 15)
 127		jit->seen_regs |= (1 << r1);
 128}
 129
 130#define REG_SET_SEEN(b1)					\
 131({								\
 132	reg_set_seen(jit, b1);					\
 133})
 134
 
 
 135/*
 136 * EMIT macros for code generation
 137 */
 138
 139#define _EMIT2(op)						\
 140({								\
 141	if (jit->prg_buf)					\
 142		*(u16 *) (jit->prg_buf + jit->prg) = (op);	\
 143	jit->prg += 2;						\
 144})
 145
 146#define EMIT2(op, b1, b2)					\
 147({								\
 148	_EMIT2((op) | reg(b1, b2));				\
 149	REG_SET_SEEN(b1);					\
 150	REG_SET_SEEN(b2);					\
 151})
 152
 153#define _EMIT4(op)						\
 154({								\
 155	if (jit->prg_buf)					\
 156		*(u32 *) (jit->prg_buf + jit->prg) = (op);	\
 157	jit->prg += 4;						\
 158})
 159
 160#define EMIT4(op, b1, b2)					\
 161({								\
 162	_EMIT4((op) | reg(b1, b2));				\
 163	REG_SET_SEEN(b1);					\
 164	REG_SET_SEEN(b2);					\
 165})
 166
 167#define EMIT4_RRF(op, b1, b2, b3)				\
 168({								\
 169	_EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2));		\
 170	REG_SET_SEEN(b1);					\
 171	REG_SET_SEEN(b2);					\
 172	REG_SET_SEEN(b3);					\
 173})
 174
 175#define _EMIT4_DISP(op, disp)					\
 176({								\
 177	unsigned int __disp = (disp) & 0xfff;			\
 178	_EMIT4((op) | __disp);					\
 179})
 180
 181#define EMIT4_DISP(op, b1, b2, disp)				\
 182({								\
 183	_EMIT4_DISP((op) | reg_high(b1) << 16 |			\
 184		    reg_high(b2) << 8, (disp));			\
 185	REG_SET_SEEN(b1);					\
 186	REG_SET_SEEN(b2);					\
 187})
 188
 189#define EMIT4_IMM(op, b1, imm)					\
 190({								\
 191	unsigned int __imm = (imm) & 0xffff;			\
 192	_EMIT4((op) | reg_high(b1) << 16 | __imm);		\
 193	REG_SET_SEEN(b1);					\
 194})
 195
 196#define EMIT4_PCREL(op, pcrel)					\
 197({								\
 198	long __pcrel = ((pcrel) >> 1) & 0xffff;			\
 199	_EMIT4((op) | __pcrel);					\
 200})
 201
 202#define EMIT4_PCREL_RIC(op, mask, target)			\
 203({								\
 204	int __rel = ((target) - jit->prg) / 2;			\
 205	_EMIT4((op) | (mask) << 20 | (__rel & 0xffff));		\
 206})
 207
 208#define _EMIT6(op1, op2)					\
 209({								\
 210	if (jit->prg_buf) {					\
 211		*(u32 *) (jit->prg_buf + jit->prg) = (op1);	\
 212		*(u16 *) (jit->prg_buf + jit->prg + 4) = (op2);	\
 213	}							\
 214	jit->prg += 6;						\
 215})
 216
 217#define _EMIT6_DISP(op1, op2, disp)				\
 218({								\
 219	unsigned int __disp = (disp) & 0xfff;			\
 220	_EMIT6((op1) | __disp, op2);				\
 221})
 222
 223#define _EMIT6_DISP_LH(op1, op2, disp)				\
 224({								\
 225	u32 _disp = (u32) (disp);				\
 226	unsigned int __disp_h = _disp & 0xff000;		\
 227	unsigned int __disp_l = _disp & 0x00fff;		\
 228	_EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4);	\
 229})
 230
 231#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)		\
 232({								\
 233	_EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 |		\
 234		       reg_high(b3) << 8, op2, disp);		\
 235	REG_SET_SEEN(b1);					\
 236	REG_SET_SEEN(b2);					\
 237	REG_SET_SEEN(b3);					\
 238})
 239
 240#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target)	\
 241({								\
 242	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 243	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),	\
 244	       (op2) | (mask) << 12);				\
 245	REG_SET_SEEN(b1);					\
 246	REG_SET_SEEN(b2);					\
 247})
 248
 249#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target)	\
 250({								\
 251	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 252	_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |		\
 253		(rel & 0xffff), (op2) | ((imm) & 0xff) << 8);	\
 254	REG_SET_SEEN(b1);					\
 255	BUILD_BUG_ON(((unsigned long) (imm)) > 0xff);		\
 256})
 257
 258#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)		\
 259({								\
 260	int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2;	\
 261	_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
 
 262	REG_SET_SEEN(b1);					\
 263	REG_SET_SEEN(b2);					\
 264})
 265
 266#define EMIT6_PCREL_RILB(op, b, target)				\
 267({								\
 268	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 269	_EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
 270	REG_SET_SEEN(b);					\
 271})
 272
 273#define EMIT6_PCREL_RIL(op, target)				\
 274({								\
 275	unsigned int rel = (int)((target) - jit->prg) / 2;	\
 276	_EMIT6((op) | rel >> 16, rel & 0xffff);			\
 277})
 278
 279#define EMIT6_PCREL_RILC(op, mask, target)			\
 280({								\
 281	EMIT6_PCREL_RIL((op) | (mask) << 20, (target));		\
 282})
 283
 284#define _EMIT6_IMM(op, imm)					\
 285({								\
 286	unsigned int __imm = (imm);				\
 287	_EMIT6((op) | (__imm >> 16), __imm & 0xffff);		\
 288})
 289
 290#define EMIT6_IMM(op, b1, imm)					\
 291({								\
 292	_EMIT6_IMM((op) | reg_high(b1) << 16, imm);		\
 293	REG_SET_SEEN(b1);					\
 294})
 295
 296#define _EMIT_CONST_U32(val)					\
 297({								\
 298	unsigned int ret;					\
 299	ret = jit->lit32;					\
 
 300	if (jit->prg_buf)					\
 301		*(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
 302	jit->lit32 += 4;					\
 303	ret;							\
 304})
 305
 306#define EMIT_CONST_U32(val)					\
 307({								\
 
 
 308	jit->seen |= SEEN_LITERAL;				\
 309	_EMIT_CONST_U32(val) - jit->base_ip;			\
 310})
 311
 312#define _EMIT_CONST_U64(val)					\
 313({								\
 314	unsigned int ret;					\
 315	ret = jit->lit64;					\
 316	if (jit->prg_buf)					\
 317		*(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
 318	jit->lit64 += 8;					\
 319	ret;							\
 320})
 321
 322#define EMIT_CONST_U64(val)					\
 323({								\
 324	jit->seen |= SEEN_LITERAL;				\
 325	_EMIT_CONST_U64(val) - jit->base_ip;			\
 326})
 327
 328#define EMIT_ZERO(b1)						\
 329({								\
 330	if (!fp->aux->verifier_zext) {				\
 331		/* llgfr %dst,%dst (zero extend to 64 bit) */	\
 332		EMIT4(0xb9160000, b1, b1);			\
 333		REG_SET_SEEN(b1);				\
 334	}							\
 335})
 336
 337/*
 338 * Return whether this is the first pass. The first pass is special, since we
 339 * don't know any sizes yet, and thus must be conservative.
 340 */
 341static bool is_first_pass(struct bpf_jit *jit)
 342{
 343	return jit->size == 0;
 344}
 345
 346/*
 347 * Return whether this is the code generation pass. The code generation pass is
 348 * special, since we should change as little as possible.
 349 */
 350static bool is_codegen_pass(struct bpf_jit *jit)
 351{
 352	return jit->prg_buf;
 353}
 354
 355/*
 356 * Return whether "rel" can be encoded as a short PC-relative offset
 357 */
 358static bool is_valid_rel(int rel)
 359{
 360	return rel >= -65536 && rel <= 65534;
 361}
 362
 363/*
 364 * Return whether "off" can be reached using a short PC-relative offset
 365 */
 366static bool can_use_rel(struct bpf_jit *jit, int off)
 367{
 368	return is_valid_rel(off - jit->prg);
 369}
 370
 371/*
 372 * Return whether given displacement can be encoded using
 373 * Long-Displacement Facility
 374 */
 375static bool is_valid_ldisp(int disp)
 376{
 377	return disp >= -524288 && disp <= 524287;
 378}
 379
 380/*
 381 * Return whether the next 32-bit literal pool entry can be referenced using
 382 * Long-Displacement Facility
 383 */
 384static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
 385{
 386	return is_valid_ldisp(jit->lit32 - jit->base_ip);
 387}
 388
 389/*
 390 * Return whether the next 64-bit literal pool entry can be referenced using
 391 * Long-Displacement Facility
 392 */
 393static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
 394{
 395	return is_valid_ldisp(jit->lit64 - jit->base_ip);
 396}
 397
 398/*
 399 * Fill whole space with illegal instructions
 400 */
 401static void jit_fill_hole(void *area, unsigned int size)
 402{
 403	memset(area, 0, size);
 404}
 405
 406/*
 407 * Save registers from "rs" (register start) to "re" (register end) on stack
 408 */
 409static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 410{
 411	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 412
 413	if (rs == re)
 414		/* stg %rs,off(%r15) */
 415		_EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 416	else
 417		/* stmg %rs,%re,off(%r15) */
 418		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 419}
 420
 421/*
 422 * Restore registers from "rs" (register start) to "re" (register end) on stack
 423 */
 424static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 425{
 426	u32 off = STK_OFF_R6 + (rs - 6) * 8;
 427
 428	if (jit->seen & SEEN_STACK)
 429		off += STK_OFF + stack_depth;
 430
 431	if (rs == re)
 432		/* lg %rs,off(%r15) */
 433		_EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 434	else
 435		/* lmg %rs,%re,off(%r15) */
 436		_EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 437}
 438
 439/*
 440 * Return first seen register (from start)
 441 */
 442static int get_start(u16 seen_regs, int start)
 443{
 444	int i;
 445
 446	for (i = start; i <= 15; i++) {
 447		if (seen_regs & (1 << i))
 448			return i;
 449	}
 450	return 0;
 451}
 452
 453/*
 454 * Return last seen register (from start) (gap >= 2)
 455 */
 456static int get_end(u16 seen_regs, int start)
 457{
 458	int i;
 459
 460	for (i = start; i < 15; i++) {
 461		if (!(seen_regs & (3 << i)))
 462			return i - 1;
 463	}
 464	return (seen_regs & (1 << 15)) ? 15 : 14;
 465}
 466
 467#define REGS_SAVE	1
 468#define REGS_RESTORE	0
 469/*
 470 * Save and restore clobbered registers (6-15) on stack.
 471 * We save/restore registers in chunks with gap >= 2 registers.
 472 */
 473static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth,
 474			      u16 extra_regs)
 475{
 476	u16 seen_regs = jit->seen_regs | extra_regs;
 477	const int last = 15, save_restore_size = 6;
 478	int re = 6, rs;
 479
 480	if (is_first_pass(jit)) {
 481		/*
 482		 * We don't know yet which registers are used. Reserve space
 483		 * conservatively.
 484		 */
 485		jit->prg += (last - re + 1) * save_restore_size;
 486		return;
 487	}
 488
 489	do {
 490		rs = get_start(seen_regs, re);
 491		if (!rs)
 492			break;
 493		re = get_end(seen_regs, rs + 1);
 494		if (op == REGS_SAVE)
 495			save_regs(jit, rs, re);
 496		else
 497			restore_regs(jit, rs, re, stack_depth);
 498		re++;
 499	} while (re <= last);
 500}
 501
 502static void bpf_skip(struct bpf_jit *jit, int size)
 503{
 504	if (size >= 6 && !is_valid_rel(size)) {
 505		/* brcl 0xf,size */
 506		EMIT6_PCREL_RIL(0xc0f4000000, size);
 507		size -= 6;
 508	} else if (size >= 4 && is_valid_rel(size)) {
 509		/* brc 0xf,size */
 510		EMIT4_PCREL(0xa7f40000, size);
 511		size -= 4;
 512	}
 513	while (size >= 2) {
 514		/* bcr 0,%0 */
 515		_EMIT2(0x0700);
 516		size -= 2;
 517	}
 518}
 519
 520/*
 521 * PLT for hotpatchable calls. The calling convention is the same as for the
 522 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
 523 */
 524struct bpf_plt {
 525	char code[16];
 526	void *ret;
 527	void *target;
 528} __packed;
 529extern const struct bpf_plt bpf_plt;
 530asm(
 531	".pushsection .rodata\n"
 532	"	.balign 8\n"
 533	"bpf_plt:\n"
 534	"	lgrl %r0,bpf_plt_ret\n"
 535	"	lgrl %r1,bpf_plt_target\n"
 536	"	br %r1\n"
 537	"	.balign 8\n"
 538	"bpf_plt_ret: .quad 0\n"
 539	"bpf_plt_target: .quad 0\n"
 540	"	.popsection\n"
 541);
 542
 543static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
 544{
 545	memcpy(plt, &bpf_plt, sizeof(*plt));
 546	plt->ret = ret;
 547	plt->target = target;
 548}
 549
 550/*
 551 * Emit function prologue
 552 *
 553 * Save registers and create stack frame if necessary.
 554 * See stack frame layout description in "bpf_jit.h"!
 555 */
 556static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
 557			     u32 stack_depth)
 558{
 559	/* No-op for hotpatching */
 560	/* brcl 0,prologue_plt */
 561	EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
 562	jit->prologue_plt_ret = jit->prg;
 563
 564	if (!bpf_is_subprog(fp)) {
 565		/* Initialize the tail call counter in the main program. */
 566		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 567		_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 568	} else {
 569		/*
 570		 * Skip the tail call counter initialization in subprograms.
 571		 * Insert nops in order to have tail_call_start at a
 572		 * predictable offset.
 573		 */
 574		bpf_skip(jit, 6);
 575	}
 576	/* Tail calls have to skip above initialization */
 577	jit->tail_call_start = jit->prg;
 578	if (fp->aux->exception_cb) {
 579		/*
 580		 * Switch stack, the new address is in the 2nd parameter.
 581		 *
 582		 * Arrange the restoration of %r6-%r15 in the epilogue.
 583		 * Do not restore them now, the prog does not need them.
 584		 */
 585		/* lgr %r15,%r3 */
 586		EMIT4(0xb9040000, REG_15, REG_3);
 587		jit->seen_regs |= NVREGS;
 588	} else {
 589		/* Save registers */
 590		save_restore_regs(jit, REGS_SAVE, stack_depth,
 591				  fp->aux->exception_boundary ? NVREGS : 0);
 592	}
 593	/* Setup literal pool */
 594	if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
 595		if (!is_first_pass(jit) &&
 596		    is_valid_ldisp(jit->size - (jit->prg + 2))) {
 597			/* basr %l,0 */
 598			EMIT2(0x0d00, REG_L, REG_0);
 599			jit->base_ip = jit->prg;
 600		} else {
 601			/* larl %l,lit32_start */
 602			EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
 603			jit->base_ip = jit->lit32_start;
 604		}
 605	}
 606	/* Setup stack and backchain */
 607	if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
 608		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 609			/* lgr %w1,%r15 (backchain) */
 610			EMIT4(0xb9040000, REG_W1, REG_15);
 611		/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 612		EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 613		/* aghi %r15,-STK_OFF */
 614		EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 615		if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 616			/* stg %w1,152(%r15) (backchain) */
 617			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 618				      REG_15, 152);
 619	}
 
 
 
 
 
 
 620}
 621
 622/*
 623 * Emit an expoline for a jump that follows
 624 */
 625static void emit_expoline(struct bpf_jit *jit)
 626{
 627	/* exrl %r0,.+10 */
 628	EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 629	/* j . */
 630	EMIT4_PCREL(0xa7f40000, 0);
 631}
 632
 633/*
 634 * Emit __s390_indirect_jump_r1 thunk if necessary
 635 */
 636static void emit_r1_thunk(struct bpf_jit *jit)
 637{
 638	if (nospec_uses_trampoline()) {
 639		jit->r1_thunk_ip = jit->prg;
 640		emit_expoline(jit);
 641		/* br %r1 */
 642		_EMIT2(0x07f1);
 643	}
 644}
 645
 646/*
 647 * Call r1 either directly or via __s390_indirect_jump_r1 thunk
 648 */
 649static void call_r1(struct bpf_jit *jit)
 650{
 651	if (nospec_uses_trampoline())
 652		/* brasl %r14,__s390_indirect_jump_r1 */
 653		EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
 654	else
 655		/* basr %r14,%r1 */
 656		EMIT2(0x0d00, REG_14, REG_1);
 657}
 658
 659/*
 660 * Function epilogue
 661 */
 662static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 663{
 664	jit->exit_ip = jit->prg;
 665	/* Load exit code: lgr %r2,%b0 */
 666	EMIT4(0xb9040000, REG_2, BPF_REG_0);
 667	/* Restore registers */
 668	save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
 669	if (nospec_uses_trampoline()) {
 670		jit->r14_thunk_ip = jit->prg;
 671		/* Generate __s390_indirect_jump_r14 thunk */
 672		emit_expoline(jit);
 673	}
 674	/* br %r14 */
 675	_EMIT2(0x07fe);
 676
 677	if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 678		emit_r1_thunk(jit);
 679
 680	jit->prg = ALIGN(jit->prg, 8);
 681	jit->prologue_plt = jit->prg;
 682	if (jit->prg_buf)
 683		bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
 684			    jit->prg_buf + jit->prologue_plt_ret, NULL);
 685	jit->prg += sizeof(struct bpf_plt);
 686}
 687
 688bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
 689{
 690	regs->psw.addr = extable_fixup(x);
 691	if (x->data != -1)
 692		regs->gprs[x->data] = 0;
 693	return true;
 694}
 695
 696/*
 697 * A single BPF probe instruction
 698 */
 699struct bpf_jit_probe {
 700	int prg;	/* JITed instruction offset */
 701	int nop_prg;	/* JITed nop offset */
 702	int reg;	/* Register to clear on exception */
 703	int arena_reg;	/* Register to use for arena addressing */
 704};
 705
 706static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
 707{
 708	probe->prg = -1;
 709	probe->nop_prg = -1;
 710	probe->reg = -1;
 711	probe->arena_reg = REG_0;
 712}
 713
 714/*
 715 * Handlers of certain exceptions leave psw.addr pointing to the instruction
 716 * directly after the failing one. Therefore, create two exception table
 717 * entries and also add a nop in case two probing instructions come directly
 718 * after each other.
 719 */
 720static void bpf_jit_probe_emit_nop(struct bpf_jit *jit,
 721				   struct bpf_jit_probe *probe)
 722{
 723	if (probe->prg == -1 || probe->nop_prg != -1)
 724		/* The probe is not armed or nop is already emitted. */
 725		return;
 726
 727	probe->nop_prg = jit->prg;
 728	/* bcr 0,%0 */
 729	_EMIT2(0x0700);
 730}
 731
 732static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn,
 733				   struct bpf_jit_probe *probe)
 734{
 735	if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
 736	    BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
 737	    BPF_MODE(insn->code) != BPF_PROBE_MEM32)
 738		return;
 739
 740	if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
 741		/* lgrl %r1,kern_arena */
 742		EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
 743		probe->arena_reg = REG_W1;
 744	}
 745	probe->prg = jit->prg;
 746	probe->reg = reg2hex[insn->dst_reg];
 747}
 748
 749static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn,
 750				    struct bpf_jit_probe *probe)
 751{
 752	if (BPF_MODE(insn->code) != BPF_PROBE_MEM32)
 753		return;
 754
 755	/* lgrl %r1,kern_arena */
 756	EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
 757	probe->arena_reg = REG_W1;
 758	probe->prg = jit->prg;
 759}
 760
 761static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit,
 762				     struct bpf_insn *insn,
 763				     struct bpf_jit_probe *probe)
 764{
 765	if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
 766		return;
 767
 768	/* lgrl %r1,kern_arena */
 769	EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
 770	/* agr %r1,%dst */
 771	EMIT4(0xb9080000, REG_W1, insn->dst_reg);
 772	probe->arena_reg = REG_W1;
 773	probe->prg = jit->prg;
 774}
 775
 776static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
 777			      struct bpf_jit_probe *probe)
 778{
 779	struct exception_table_entry *ex;
 780	int i, prg;
 781	s64 delta;
 782	u8 *insn;
 783
 784	if (probe->prg == -1)
 785		/* The probe is not armed. */
 786		return 0;
 787	bpf_jit_probe_emit_nop(jit, probe);
 788	if (!fp->aux->extable)
 789		/* Do nothing during early JIT passes. */
 790		return 0;
 791	insn = jit->prg_buf + probe->prg;
 792	if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg))
 793		/* JIT bug - gap between probe and nop instructions. */
 794		return -1;
 795	for (i = 0; i < 2; i++) {
 796		if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
 797			/* Verifier bug - not enough entries. */
 798			return -1;
 799		ex = &fp->aux->extable[jit->excnt];
 800		/* Add extable entries for probe and nop instructions. */
 801		prg = i == 0 ? probe->prg : probe->nop_prg;
 802		delta = jit->prg_buf + prg - (u8 *)&ex->insn;
 803		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 804			/* JIT bug - code and extable must be close. */
 805			return -1;
 806		ex->insn = delta;
 807		/*
 808		 * Land on the current instruction. Note that the extable
 809		 * infrastructure ignores the fixup field; it is handled by
 810		 * ex_handler_bpf().
 811		 */
 812		delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup;
 813		if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 814			/* JIT bug - landing pad and extable must be close. */
 815			return -1;
 816		ex->fixup = delta;
 817		ex->type = EX_TYPE_BPF;
 818		ex->data = probe->reg;
 819		jit->excnt++;
 820	}
 821	return 0;
 822}
 823
 824/*
 825 * Sign-extend the register if necessary
 826 */
 827static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
 828{
 829	if (!(flags & BTF_FMODEL_SIGNED_ARG))
 830		return 0;
 831
 832	switch (size) {
 833	case 1:
 834		/* lgbr %r,%r */
 835		EMIT4(0xb9060000, r, r);
 836		return 0;
 837	case 2:
 838		/* lghr %r,%r */
 839		EMIT4(0xb9070000, r, r);
 840		return 0;
 841	case 4:
 842		/* lgfr %r,%r */
 843		EMIT4(0xb9140000, r, r);
 844		return 0;
 845	case 8:
 846		return 0;
 847	default:
 848		return -1;
 849	}
 850}
 851
 852/*
 853 * Compile one eBPF instruction into s390x code
 854 *
 855 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 856 * stack space for the large switch statement.
 857 */
 858static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 859				 int i, bool extra_pass, u32 stack_depth)
 860{
 861	struct bpf_insn *insn = &fp->insnsi[i];
 862	s32 branch_oc_off = insn->off;
 
 863	u32 dst_reg = insn->dst_reg;
 864	u32 src_reg = insn->src_reg;
 865	struct bpf_jit_probe probe;
 866	int last, insn_count = 1;
 867	u32 *addrs = jit->addrs;
 868	s32 imm = insn->imm;
 869	s16 off = insn->off;
 870	unsigned int mask;
 871	int err;
 872
 873	bpf_jit_probe_init(&probe);
 874
 
 
 875	switch (insn->code) {
 876	/*
 877	 * BPF_MOV
 878	 */
 879	case BPF_ALU | BPF_MOV | BPF_X:
 880		switch (insn->off) {
 881		case 0: /* DST = (u32) SRC */
 882			/* llgfr %dst,%src */
 883			EMIT4(0xb9160000, dst_reg, src_reg);
 884			if (insn_is_zext(&insn[1]))
 885				insn_count = 2;
 886			break;
 887		case 8: /* DST = (u32)(s8) SRC */
 888			/* lbr %dst,%src */
 889			EMIT4(0xb9260000, dst_reg, src_reg);
 890			/* llgfr %dst,%dst */
 891			EMIT4(0xb9160000, dst_reg, dst_reg);
 892			break;
 893		case 16: /* DST = (u32)(s16) SRC */
 894			/* lhr %dst,%src */
 895			EMIT4(0xb9270000, dst_reg, src_reg);
 896			/* llgfr %dst,%dst */
 897			EMIT4(0xb9160000, dst_reg, dst_reg);
 898			break;
 899		}
 900		break;
 901	case BPF_ALU64 | BPF_MOV | BPF_X:
 902		if (insn_is_cast_user(insn)) {
 903			int patch_brc;
 904
 905			/* ltgr %dst,%src */
 906			EMIT4(0xb9020000, dst_reg, src_reg);
 907			/* brc 8,0f */
 908			patch_brc = jit->prg;
 909			EMIT4_PCREL_RIC(0xa7040000, 8, 0);
 910			/* iihf %dst,user_arena>>32 */
 911			EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32);
 912			/* 0: */
 913			if (jit->prg_buf)
 914				*(u16 *)(jit->prg_buf + patch_brc + 2) =
 915					(jit->prg - patch_brc) >> 1;
 916			break;
 917		}
 918		switch (insn->off) {
 919		case 0: /* DST = SRC */
 920			/* lgr %dst,%src */
 921			EMIT4(0xb9040000, dst_reg, src_reg);
 922			break;
 923		case 8: /* DST = (s8) SRC */
 924			/* lgbr %dst,%src */
 925			EMIT4(0xb9060000, dst_reg, src_reg);
 926			break;
 927		case 16: /* DST = (s16) SRC */
 928			/* lghr %dst,%src */
 929			EMIT4(0xb9070000, dst_reg, src_reg);
 930			break;
 931		case 32: /* DST = (s32) SRC */
 932			/* lgfr %dst,%src */
 933			EMIT4(0xb9140000, dst_reg, src_reg);
 934			break;
 935		}
 936		break;
 937	case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 938		/* llilf %dst,imm */
 939		EMIT6_IMM(0xc00f0000, dst_reg, imm);
 940		if (insn_is_zext(&insn[1]))
 941			insn_count = 2;
 942		break;
 943	case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 944		/* lgfi %dst,imm */
 945		EMIT6_IMM(0xc0010000, dst_reg, imm);
 946		break;
 947	/*
 948	 * BPF_LD 64
 949	 */
 950	case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 951	{
 952		/* 16 byte instruction that uses two 'struct bpf_insn' */
 953		u64 imm64;
 954
 955		imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 956		/* lgrl %dst,imm */
 957		EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
 
 958		insn_count = 2;
 959		break;
 960	}
 961	/*
 962	 * BPF_ADD
 963	 */
 964	case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 965		/* ar %dst,%src */
 966		EMIT2(0x1a00, dst_reg, src_reg);
 967		EMIT_ZERO(dst_reg);
 968		break;
 969	case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 970		/* agr %dst,%src */
 971		EMIT4(0xb9080000, dst_reg, src_reg);
 972		break;
 973	case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 974		if (imm != 0) {
 975			/* alfi %dst,imm */
 976			EMIT6_IMM(0xc20b0000, dst_reg, imm);
 977		}
 978		EMIT_ZERO(dst_reg);
 979		break;
 980	case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 981		if (!imm)
 982			break;
 983		/* agfi %dst,imm */
 984		EMIT6_IMM(0xc2080000, dst_reg, imm);
 985		break;
 986	/*
 987	 * BPF_SUB
 988	 */
 989	case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 990		/* sr %dst,%src */
 991		EMIT2(0x1b00, dst_reg, src_reg);
 992		EMIT_ZERO(dst_reg);
 993		break;
 994	case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 995		/* sgr %dst,%src */
 996		EMIT4(0xb9090000, dst_reg, src_reg);
 997		break;
 998	case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 999		if (imm != 0) {
1000			/* alfi %dst,-imm */
1001			EMIT6_IMM(0xc20b0000, dst_reg, -imm);
1002		}
1003		EMIT_ZERO(dst_reg);
1004		break;
1005	case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
1006		if (!imm)
1007			break;
1008		if (imm == -0x80000000) {
1009			/* algfi %dst,0x80000000 */
1010			EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
1011		} else {
1012			/* agfi %dst,-imm */
1013			EMIT6_IMM(0xc2080000, dst_reg, -imm);
1014		}
1015		break;
1016	/*
1017	 * BPF_MUL
1018	 */
1019	case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
1020		/* msr %dst,%src */
1021		EMIT4(0xb2520000, dst_reg, src_reg);
1022		EMIT_ZERO(dst_reg);
1023		break;
1024	case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
1025		/* msgr %dst,%src */
1026		EMIT4(0xb90c0000, dst_reg, src_reg);
1027		break;
1028	case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
1029		if (imm != 1) {
1030			/* msfi %r5,imm */
1031			EMIT6_IMM(0xc2010000, dst_reg, imm);
1032		}
1033		EMIT_ZERO(dst_reg);
1034		break;
1035	case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
1036		if (imm == 1)
1037			break;
1038		/* msgfi %dst,imm */
1039		EMIT6_IMM(0xc2000000, dst_reg, imm);
1040		break;
1041	/*
1042	 * BPF_DIV / BPF_MOD
1043	 */
1044	case BPF_ALU | BPF_DIV | BPF_X:
1045	case BPF_ALU | BPF_MOD | BPF_X:
1046	{
1047		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1048
1049		switch (off) {
1050		case 0: /* dst = (u32) dst {/,%} (u32) src */
1051			/* xr %w0,%w0 */
1052			EMIT2(0x1700, REG_W0, REG_W0);
1053			/* lr %w1,%dst */
1054			EMIT2(0x1800, REG_W1, dst_reg);
1055			/* dlr %w0,%src */
1056			EMIT4(0xb9970000, REG_W0, src_reg);
1057			break;
1058		case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */
1059			/* lgfr %r1,%dst */
1060			EMIT4(0xb9140000, REG_W1, dst_reg);
1061			/* dsgfr %r0,%src */
1062			EMIT4(0xb91d0000, REG_W0, src_reg);
1063			break;
1064		}
1065		/* llgfr %dst,%rc */
1066		EMIT4(0xb9160000, dst_reg, rc_reg);
1067		if (insn_is_zext(&insn[1]))
1068			insn_count = 2;
1069		break;
1070	}
1071	case BPF_ALU64 | BPF_DIV | BPF_X:
1072	case BPF_ALU64 | BPF_MOD | BPF_X:
1073	{
1074		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1075
1076		switch (off) {
1077		case 0: /* dst = dst {/,%} src */
1078			/* lghi %w0,0 */
1079			EMIT4_IMM(0xa7090000, REG_W0, 0);
1080			/* lgr %w1,%dst */
1081			EMIT4(0xb9040000, REG_W1, dst_reg);
1082			/* dlgr %w0,%src */
1083			EMIT4(0xb9870000, REG_W0, src_reg);
1084			break;
1085		case 1: /* dst = (s64) dst {/,%} (s64) src */
1086			/* lgr %w1,%dst */
1087			EMIT4(0xb9040000, REG_W1, dst_reg);
1088			/* dsgr %w0,%src */
1089			EMIT4(0xb90d0000, REG_W0, src_reg);
1090			break;
1091		}
1092		/* lgr %dst,%rc */
1093		EMIT4(0xb9040000, dst_reg, rc_reg);
1094		break;
1095	}
1096	case BPF_ALU | BPF_DIV | BPF_K:
1097	case BPF_ALU | BPF_MOD | BPF_K:
1098	{
1099		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1100
1101		if (imm == 1) {
1102			if (BPF_OP(insn->code) == BPF_MOD)
1103				/* lghi %dst,0 */
1104				EMIT4_IMM(0xa7090000, dst_reg, 0);
1105			else
1106				EMIT_ZERO(dst_reg);
1107			break;
1108		}
1109		if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
1110			switch (off) {
1111			case 0: /* dst = (u32) dst {/,%} (u32) imm */
1112				/* xr %w0,%w0 */
1113				EMIT2(0x1700, REG_W0, REG_W0);
1114				/* lr %w1,%dst */
1115				EMIT2(0x1800, REG_W1, dst_reg);
1116				/* dl %w0,<d(imm)>(%l) */
1117				EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0,
1118					      REG_L, EMIT_CONST_U32(imm));
1119				break;
1120			case 1: /* dst = (s32) dst {/,%} (s32) imm */
1121				/* lgfr %r1,%dst */
1122				EMIT4(0xb9140000, REG_W1, dst_reg);
1123				/* dsgf %r0,<d(imm)>(%l) */
1124				EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0,
1125					      REG_L, EMIT_CONST_U32(imm));
1126				break;
1127			}
1128		} else {
1129			switch (off) {
1130			case 0: /* dst = (u32) dst {/,%} (u32) imm */
1131				/* xr %w0,%w0 */
1132				EMIT2(0x1700, REG_W0, REG_W0);
1133				/* lr %w1,%dst */
1134				EMIT2(0x1800, REG_W1, dst_reg);
1135				/* lrl %dst,imm */
1136				EMIT6_PCREL_RILB(0xc40d0000, dst_reg,
1137						 _EMIT_CONST_U32(imm));
1138				jit->seen |= SEEN_LITERAL;
1139				/* dlr %w0,%dst */
1140				EMIT4(0xb9970000, REG_W0, dst_reg);
1141				break;
1142			case 1: /* dst = (s32) dst {/,%} (s32) imm */
1143				/* lgfr %w1,%dst */
1144				EMIT4(0xb9140000, REG_W1, dst_reg);
1145				/* lgfrl %dst,imm */
1146				EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
1147						 _EMIT_CONST_U32(imm));
1148				jit->seen |= SEEN_LITERAL;
1149				/* dsgr %w0,%dst */
1150				EMIT4(0xb90d0000, REG_W0, dst_reg);
1151				break;
1152			}
1153		}
1154		/* llgfr %dst,%rc */
1155		EMIT4(0xb9160000, dst_reg, rc_reg);
1156		if (insn_is_zext(&insn[1]))
1157			insn_count = 2;
1158		break;
1159	}
1160	case BPF_ALU64 | BPF_DIV | BPF_K:
1161	case BPF_ALU64 | BPF_MOD | BPF_K:
1162	{
1163		int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1164
1165		if (imm == 1) {
1166			if (BPF_OP(insn->code) == BPF_MOD)
1167				/* lhgi %dst,0 */
1168				EMIT4_IMM(0xa7090000, dst_reg, 0);
1169			break;
1170		}
1171		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1172			switch (off) {
1173			case 0: /* dst = dst {/,%} imm */
1174				/* lghi %w0,0 */
1175				EMIT4_IMM(0xa7090000, REG_W0, 0);
1176				/* lgr %w1,%dst */
1177				EMIT4(0xb9040000, REG_W1, dst_reg);
1178				/* dlg %w0,<d(imm)>(%l) */
1179				EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0,
1180					      REG_L, EMIT_CONST_U64(imm));
1181				break;
1182			case 1: /* dst = (s64) dst {/,%} (s64) imm */
1183				/* lgr %w1,%dst */
1184				EMIT4(0xb9040000, REG_W1, dst_reg);
1185				/* dsg %w0,<d(imm)>(%l) */
1186				EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0,
1187					      REG_L, EMIT_CONST_U64(imm));
1188				break;
1189			}
1190		} else {
1191			switch (off) {
1192			case 0: /* dst = dst {/,%} imm */
1193				/* lghi %w0,0 */
1194				EMIT4_IMM(0xa7090000, REG_W0, 0);
1195				/* lgr %w1,%dst */
1196				EMIT4(0xb9040000, REG_W1, dst_reg);
1197				/* lgrl %dst,imm */
1198				EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1199						 _EMIT_CONST_U64(imm));
1200				jit->seen |= SEEN_LITERAL;
1201				/* dlgr %w0,%dst */
1202				EMIT4(0xb9870000, REG_W0, dst_reg);
1203				break;
1204			case 1: /* dst = (s64) dst {/,%} (s64) imm */
1205				/* lgr %w1,%dst */
1206				EMIT4(0xb9040000, REG_W1, dst_reg);
1207				/* lgrl %dst,imm */
1208				EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1209						 _EMIT_CONST_U64(imm));
1210				jit->seen |= SEEN_LITERAL;
1211				/* dsgr %w0,%dst */
1212				EMIT4(0xb90d0000, REG_W0, dst_reg);
1213				break;
1214			}
1215		}
1216		/* lgr %dst,%rc */
1217		EMIT4(0xb9040000, dst_reg, rc_reg);
1218		break;
1219	}
1220	/*
1221	 * BPF_AND
1222	 */
1223	case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
1224		/* nr %dst,%src */
1225		EMIT2(0x1400, dst_reg, src_reg);
1226		EMIT_ZERO(dst_reg);
1227		break;
1228	case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1229		/* ngr %dst,%src */
1230		EMIT4(0xb9800000, dst_reg, src_reg);
1231		break;
1232	case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
1233		/* nilf %dst,imm */
1234		EMIT6_IMM(0xc00b0000, dst_reg, imm);
1235		EMIT_ZERO(dst_reg);
1236		break;
1237	case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1238		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1239			/* ng %dst,<d(imm)>(%l) */
1240			EMIT6_DISP_LH(0xe3000000, 0x0080,
1241				      dst_reg, REG_0, REG_L,
1242				      EMIT_CONST_U64(imm));
1243		} else {
1244			/* lgrl %w0,imm */
1245			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1246					 _EMIT_CONST_U64(imm));
1247			jit->seen |= SEEN_LITERAL;
1248			/* ngr %dst,%w0 */
1249			EMIT4(0xb9800000, dst_reg, REG_W0);
1250		}
1251		break;
1252	/*
1253	 * BPF_OR
1254	 */
1255	case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1256		/* or %dst,%src */
1257		EMIT2(0x1600, dst_reg, src_reg);
1258		EMIT_ZERO(dst_reg);
1259		break;
1260	case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1261		/* ogr %dst,%src */
1262		EMIT4(0xb9810000, dst_reg, src_reg);
1263		break;
1264	case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
1265		/* oilf %dst,imm */
1266		EMIT6_IMM(0xc00d0000, dst_reg, imm);
1267		EMIT_ZERO(dst_reg);
1268		break;
1269	case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
1270		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1271			/* og %dst,<d(imm)>(%l) */
1272			EMIT6_DISP_LH(0xe3000000, 0x0081,
1273				      dst_reg, REG_0, REG_L,
1274				      EMIT_CONST_U64(imm));
1275		} else {
1276			/* lgrl %w0,imm */
1277			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1278					 _EMIT_CONST_U64(imm));
1279			jit->seen |= SEEN_LITERAL;
1280			/* ogr %dst,%w0 */
1281			EMIT4(0xb9810000, dst_reg, REG_W0);
1282		}
1283		break;
1284	/*
1285	 * BPF_XOR
1286	 */
1287	case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
1288		/* xr %dst,%src */
1289		EMIT2(0x1700, dst_reg, src_reg);
1290		EMIT_ZERO(dst_reg);
1291		break;
1292	case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
1293		/* xgr %dst,%src */
1294		EMIT4(0xb9820000, dst_reg, src_reg);
1295		break;
1296	case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1297		if (imm != 0) {
1298			/* xilf %dst,imm */
1299			EMIT6_IMM(0xc0070000, dst_reg, imm);
1300		}
1301		EMIT_ZERO(dst_reg);
1302		break;
1303	case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1304		if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1305			/* xg %dst,<d(imm)>(%l) */
1306			EMIT6_DISP_LH(0xe3000000, 0x0082,
1307				      dst_reg, REG_0, REG_L,
1308				      EMIT_CONST_U64(imm));
1309		} else {
1310			/* lgrl %w0,imm */
1311			EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1312					 _EMIT_CONST_U64(imm));
1313			jit->seen |= SEEN_LITERAL;
1314			/* xgr %dst,%w0 */
1315			EMIT4(0xb9820000, dst_reg, REG_W0);
1316		}
1317		break;
1318	/*
1319	 * BPF_LSH
1320	 */
1321	case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1322		/* sll %dst,0(%src) */
1323		EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1324		EMIT_ZERO(dst_reg);
1325		break;
1326	case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1327		/* sllg %dst,%dst,0(%src) */
1328		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1329		break;
1330	case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1331		if (imm != 0) {
1332			/* sll %dst,imm(%r0) */
1333			EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1334		}
1335		EMIT_ZERO(dst_reg);
1336		break;
1337	case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1338		if (imm == 0)
1339			break;
1340		/* sllg %dst,%dst,imm(%r0) */
1341		EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1342		break;
1343	/*
1344	 * BPF_RSH
1345	 */
1346	case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1347		/* srl %dst,0(%src) */
1348		EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1349		EMIT_ZERO(dst_reg);
1350		break;
1351	case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1352		/* srlg %dst,%dst,0(%src) */
1353		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1354		break;
1355	case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1356		if (imm != 0) {
1357			/* srl %dst,imm(%r0) */
1358			EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1359		}
1360		EMIT_ZERO(dst_reg);
1361		break;
1362	case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1363		if (imm == 0)
1364			break;
1365		/* srlg %dst,%dst,imm(%r0) */
1366		EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1367		break;
1368	/*
1369	 * BPF_ARSH
1370	 */
1371	case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1372		/* sra %dst,%dst,0(%src) */
1373		EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1374		EMIT_ZERO(dst_reg);
1375		break;
1376	case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1377		/* srag %dst,%dst,0(%src) */
1378		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1379		break;
1380	case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1381		if (imm != 0) {
1382			/* sra %dst,imm(%r0) */
1383			EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1384		}
1385		EMIT_ZERO(dst_reg);
1386		break;
1387	case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1388		if (imm == 0)
1389			break;
1390		/* srag %dst,%dst,imm(%r0) */
1391		EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1392		break;
1393	/*
1394	 * BPF_NEG
1395	 */
1396	case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1397		/* lcr %dst,%dst */
1398		EMIT2(0x1300, dst_reg, dst_reg);
1399		EMIT_ZERO(dst_reg);
1400		break;
1401	case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1402		/* lcgr %dst,%dst */
1403		EMIT4(0xb9030000, dst_reg, dst_reg);
1404		break;
1405	/*
1406	 * BPF_FROM_BE/LE
1407	 */
1408	case BPF_ALU | BPF_END | BPF_FROM_BE:
1409		/* s390 is big endian, therefore only clear high order bytes */
1410		switch (imm) {
1411		case 16: /* dst = (u16) cpu_to_be16(dst) */
1412			/* llghr %dst,%dst */
1413			EMIT4(0xb9850000, dst_reg, dst_reg);
1414			if (insn_is_zext(&insn[1]))
1415				insn_count = 2;
1416			break;
1417		case 32: /* dst = (u32) cpu_to_be32(dst) */
1418			if (!fp->aux->verifier_zext)
1419				/* llgfr %dst,%dst */
1420				EMIT4(0xb9160000, dst_reg, dst_reg);
1421			break;
1422		case 64: /* dst = (u64) cpu_to_be64(dst) */
1423			break;
1424		}
1425		break;
1426	case BPF_ALU | BPF_END | BPF_FROM_LE:
1427	case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1428		switch (imm) {
1429		case 16: /* dst = (u16) cpu_to_le16(dst) */
1430			/* lrvr %dst,%dst */
1431			EMIT4(0xb91f0000, dst_reg, dst_reg);
1432			/* srl %dst,16(%r0) */
1433			EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1434			/* llghr %dst,%dst */
1435			EMIT4(0xb9850000, dst_reg, dst_reg);
1436			if (insn_is_zext(&insn[1]))
1437				insn_count = 2;
1438			break;
1439		case 32: /* dst = (u32) cpu_to_le32(dst) */
1440			/* lrvr %dst,%dst */
1441			EMIT4(0xb91f0000, dst_reg, dst_reg);
1442			if (!fp->aux->verifier_zext)
1443				/* llgfr %dst,%dst */
1444				EMIT4(0xb9160000, dst_reg, dst_reg);
1445			break;
1446		case 64: /* dst = (u64) cpu_to_le64(dst) */
1447			/* lrvgr %dst,%dst */
1448			EMIT4(0xb90f0000, dst_reg, dst_reg);
1449			break;
1450		}
1451		break;
1452	/*
1453	 * BPF_NOSPEC (speculation barrier)
1454	 */
1455	case BPF_ST | BPF_NOSPEC:
1456		break;
1457	/*
1458	 * BPF_ST(X)
1459	 */
1460	case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1461	case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1462		bpf_jit_probe_store_pre(jit, insn, &probe);
1463		/* stcy %src,off(%dst,%arena) */
1464		EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg,
1465			      probe.arena_reg, off);
1466		err = bpf_jit_probe_post(jit, fp, &probe);
1467		if (err < 0)
1468			return err;
1469		jit->seen |= SEEN_MEM;
1470		break;
1471	case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1472	case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1473		bpf_jit_probe_store_pre(jit, insn, &probe);
1474		/* sthy %src,off(%dst,%arena) */
1475		EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg,
1476			      probe.arena_reg, off);
1477		err = bpf_jit_probe_post(jit, fp, &probe);
1478		if (err < 0)
1479			return err;
1480		jit->seen |= SEEN_MEM;
1481		break;
1482	case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1483	case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1484		bpf_jit_probe_store_pre(jit, insn, &probe);
1485		/* sty %src,off(%dst,%arena) */
1486		EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg,
1487			      probe.arena_reg, off);
1488		err = bpf_jit_probe_post(jit, fp, &probe);
1489		if (err < 0)
1490			return err;
1491		jit->seen |= SEEN_MEM;
1492		break;
1493	case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1494	case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1495		bpf_jit_probe_store_pre(jit, insn, &probe);
1496		/* stg %src,off(%dst,%arena) */
1497		EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg,
1498			      probe.arena_reg, off);
1499		err = bpf_jit_probe_post(jit, fp, &probe);
1500		if (err < 0)
1501			return err;
1502		jit->seen |= SEEN_MEM;
1503		break;
1504	case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1505	case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1506		/* lhi %w0,imm */
1507		EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1508		bpf_jit_probe_store_pre(jit, insn, &probe);
1509		/* stcy %w0,off(%dst,%arena) */
1510		EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg,
1511			      probe.arena_reg, off);
1512		err = bpf_jit_probe_post(jit, fp, &probe);
1513		if (err < 0)
1514			return err;
1515		jit->seen |= SEEN_MEM;
1516		break;
1517	case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1518	case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1519		/* lhi %w0,imm */
1520		EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1521		bpf_jit_probe_store_pre(jit, insn, &probe);
1522		/* sthy %w0,off(%dst,%arena) */
1523		EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg,
1524			      probe.arena_reg, off);
1525		err = bpf_jit_probe_post(jit, fp, &probe);
1526		if (err < 0)
1527			return err;
1528		jit->seen |= SEEN_MEM;
1529		break;
1530	case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1531	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1532		/* llilf %w0,imm  */
1533		EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1534		bpf_jit_probe_store_pre(jit, insn, &probe);
1535		/* sty %w0,off(%dst,%arena) */
1536		EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg,
1537			      probe.arena_reg, off);
1538		err = bpf_jit_probe_post(jit, fp, &probe);
1539		if (err < 0)
1540			return err;
1541		jit->seen |= SEEN_MEM;
1542		break;
1543	case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1544	case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1545		/* lgfi %w0,imm */
1546		EMIT6_IMM(0xc0010000, REG_W0, imm);
1547		bpf_jit_probe_store_pre(jit, insn, &probe);
1548		/* stg %w0,off(%dst,%arena) */
1549		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg,
1550			      probe.arena_reg, off);
1551		err = bpf_jit_probe_post(jit, fp, &probe);
1552		if (err < 0)
1553			return err;
1554		jit->seen |= SEEN_MEM;
1555		break;
1556	/*
1557	 * BPF_ATOMIC
1558	 */
1559	case BPF_STX | BPF_ATOMIC | BPF_DW:
1560	case BPF_STX | BPF_ATOMIC | BPF_W:
1561	case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
1562	case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
1563	{
1564		bool is32 = BPF_SIZE(insn->code) == BPF_W;
1565
1566		/*
1567		 * Unlike loads and stores, atomics have only a base register,
1568		 * but no index register. For the non-arena case, simply use
1569		 * %dst as a base. For the arena case, use the work register
1570		 * %r1: first, load the arena base into it, and then add %dst
1571		 * to it.
1572		 */
1573		probe.arena_reg = dst_reg;
1574
1575		switch (insn->imm) {
1576#define EMIT_ATOMIC(op32, op64) do {					\
1577	bpf_jit_probe_atomic_pre(jit, insn, &probe);			\
1578	/* {op32|op64} {%w0|%src},%src,off(%arena) */			\
1579	EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64),		\
1580		      (insn->imm & BPF_FETCH) ? src_reg : REG_W0,	\
1581		      src_reg, probe.arena_reg, off);			\
1582	err = bpf_jit_probe_post(jit, fp, &probe);			\
1583	if (err < 0)							\
1584		return err;						\
1585	if (insn->imm & BPF_FETCH) {					\
1586		/* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */	\
1587		_EMIT2(0x07e0);						\
1588		if (is32)                                               \
1589			EMIT_ZERO(src_reg);				\
1590	}								\
1591} while (0)
1592		case BPF_ADD:
1593		case BPF_ADD | BPF_FETCH:
1594			/* {laal|laalg} */
1595			EMIT_ATOMIC(0x00fa, 0x00ea);
1596			break;
1597		case BPF_AND:
1598		case BPF_AND | BPF_FETCH:
1599			/* {lan|lang} */
1600			EMIT_ATOMIC(0x00f4, 0x00e4);
1601			break;
1602		case BPF_OR:
1603		case BPF_OR | BPF_FETCH:
1604			/* {lao|laog} */
1605			EMIT_ATOMIC(0x00f6, 0x00e6);
1606			break;
1607		case BPF_XOR:
1608		case BPF_XOR | BPF_FETCH:
1609			/* {lax|laxg} */
1610			EMIT_ATOMIC(0x00f7, 0x00e7);
1611			break;
1612#undef EMIT_ATOMIC
1613		case BPF_XCHG: {
1614			struct bpf_jit_probe load_probe = probe;
1615			int loop_start;
1616
1617			bpf_jit_probe_atomic_pre(jit, insn, &load_probe);
1618			/* {ly|lg} %w0,off(%arena) */
1619			EMIT6_DISP_LH(0xe3000000,
1620				      is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1621				      load_probe.arena_reg, off);
1622			bpf_jit_probe_emit_nop(jit, &load_probe);
1623			/* Reuse {ly|lg}'s arena_reg for {csy|csg}. */
1624			if (load_probe.prg != -1) {
1625				probe.prg = jit->prg;
1626				probe.arena_reg = load_probe.arena_reg;
1627			}
1628			loop_start = jit->prg;
1629			/* 0: {csy|csg} %w0,%src,off(%arena) */
1630			EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1631				      REG_W0, src_reg, probe.arena_reg, off);
1632			bpf_jit_probe_emit_nop(jit, &probe);
1633			/* brc 4,0b */
1634			EMIT4_PCREL_RIC(0xa7040000, 4, loop_start);
1635			/* {llgfr|lgr} %src,%w0 */
1636			EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1637			/* Both probes should land here on exception. */
1638			err = bpf_jit_probe_post(jit, fp, &load_probe);
1639			if (err < 0)
1640				return err;
1641			err = bpf_jit_probe_post(jit, fp, &probe);
1642			if (err < 0)
1643				return err;
1644			if (is32 && insn_is_zext(&insn[1]))
1645				insn_count = 2;
1646			break;
1647		}
1648		case BPF_CMPXCHG:
1649			bpf_jit_probe_atomic_pre(jit, insn, &probe);
1650			/* 0: {csy|csg} %b0,%src,off(%arena) */
1651			EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1652				      BPF_REG_0, src_reg,
1653				      probe.arena_reg, off);
1654			err = bpf_jit_probe_post(jit, fp, &probe);
1655			if (err < 0)
1656				return err;
1657			break;
1658		default:
1659			pr_err("Unknown atomic operation %02x\n", insn->imm);
1660			return -1;
1661		}
1662
1663		jit->seen |= SEEN_MEM;
1664		break;
1665	}
1666	/*
1667	 * BPF_LDX
1668	 */
1669	case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1670	case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1671	case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1672		bpf_jit_probe_load_pre(jit, insn, &probe);
1673		/* llgc %dst,off(%src,%arena) */
1674		EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg,
1675			      probe.arena_reg, off);
1676		err = bpf_jit_probe_post(jit, fp, &probe);
1677		if (err < 0)
1678			return err;
1679		jit->seen |= SEEN_MEM;
1680		if (insn_is_zext(&insn[1]))
1681			insn_count = 2;
1682		break;
1683	case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
1684	case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1685		bpf_jit_probe_load_pre(jit, insn, &probe);
1686		/* lgb %dst,off(%src) */
1687		EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
1688		err = bpf_jit_probe_post(jit, fp, &probe);
1689		if (err < 0)
1690			return err;
1691		jit->seen |= SEEN_MEM;
1692		break;
1693	case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1694	case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1695	case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1696		bpf_jit_probe_load_pre(jit, insn, &probe);
1697		/* llgh %dst,off(%src,%arena) */
1698		EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg,
1699			      probe.arena_reg, off);
1700		err = bpf_jit_probe_post(jit, fp, &probe);
1701		if (err < 0)
1702			return err;
1703		jit->seen |= SEEN_MEM;
1704		if (insn_is_zext(&insn[1]))
1705			insn_count = 2;
1706		break;
1707	case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
1708	case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1709		bpf_jit_probe_load_pre(jit, insn, &probe);
1710		/* lgh %dst,off(%src) */
1711		EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
1712		err = bpf_jit_probe_post(jit, fp, &probe);
1713		if (err < 0)
1714			return err;
1715		jit->seen |= SEEN_MEM;
1716		break;
1717	case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1718	case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1719	case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1720		bpf_jit_probe_load_pre(jit, insn, &probe);
1721		/* llgf %dst,off(%src) */
1722		jit->seen |= SEEN_MEM;
1723		EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg,
1724			      probe.arena_reg, off);
1725		err = bpf_jit_probe_post(jit, fp, &probe);
1726		if (err < 0)
1727			return err;
1728		if (insn_is_zext(&insn[1]))
1729			insn_count = 2;
1730		break;
1731	case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
1732	case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1733		bpf_jit_probe_load_pre(jit, insn, &probe);
1734		/* lgf %dst,off(%src) */
1735		jit->seen |= SEEN_MEM;
1736		EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
1737		err = bpf_jit_probe_post(jit, fp, &probe);
1738		if (err < 0)
1739			return err;
1740		break;
1741	case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1742	case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1743	case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1744		bpf_jit_probe_load_pre(jit, insn, &probe);
1745		/* lg %dst,off(%src,%arena) */
1746		jit->seen |= SEEN_MEM;
1747		EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg,
1748			      probe.arena_reg, off);
1749		err = bpf_jit_probe_post(jit, fp, &probe);
1750		if (err < 0)
1751			return err;
1752		break;
1753	/*
1754	 * BPF_JMP / CALL
1755	 */
1756	case BPF_JMP | BPF_CALL:
1757	{
1758		const struct btf_func_model *m;
1759		bool func_addr_fixed;
1760		int j, ret;
1761		u64 func;
1762
1763		ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1764					    &func, &func_addr_fixed);
1765		if (ret < 0)
1766			return -1;
1767
1768		REG_SET_SEEN(BPF_REG_5);
1769		jit->seen |= SEEN_FUNC;
1770		/*
1771		 * Copy the tail call counter to where the callee expects it.
1772		 *
1773		 * Note 1: The callee can increment the tail call counter, but
1774		 * we do not load it back, since the x86 JIT does not do this
1775		 * either.
1776		 *
1777		 * Note 2: We assume that the verifier does not let us call the
1778		 * main program, which clears the tail call counter on entry.
1779		 */
1780		/* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
1781		_EMIT6(0xd203f000 | STK_OFF_TCCNT,
1782		       0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
1783
1784		/* Sign-extend the kfunc arguments. */
1785		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
1786			m = bpf_jit_find_kfunc_model(fp, insn);
1787			if (!m)
1788				return -1;
1789
1790			for (j = 0; j < m->nr_args; j++) {
1791				if (sign_extend(jit, BPF_REG_1 + j,
1792						m->arg_size[j],
1793						m->arg_flags[j]))
1794					return -1;
1795			}
1796		}
1797
1798		/* lgrl %w1,func */
1799		EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1800		/* %r1() */
1801		call_r1(jit);
1802		/* lgr %b0,%r2: load return value into %b0 */
1803		EMIT4(0xb9040000, BPF_REG_0, REG_2);
 
 
 
 
 
 
 
1804		break;
1805	}
1806	case BPF_JMP | BPF_TAIL_CALL: {
1807		int patch_1_clrj, patch_2_clij, patch_3_brc;
1808
1809		/*
1810		 * Implicit input:
1811		 *  B1: pointer to ctx
1812		 *  B2: pointer to bpf_array
1813		 *  B3: index in bpf_array
1814		 *
 
 
 
1815		 * if (index >= array->map.max_entries)
1816		 *         goto out;
1817		 */
1818
1819		/* llgf %w1,map.max_entries(%b2) */
1820		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1821			      offsetof(struct bpf_array, map.max_entries));
1822		/* if ((u32)%b3 >= (u32)%w1) goto out; */
1823		/* clrj %b3,%w1,0xa,out */
1824		patch_1_clrj = jit->prg;
1825		EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1826				 jit->prg);
1827
1828		/*
1829		 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1830		 *         goto out;
1831		 */
1832
1833		if (jit->seen & SEEN_STACK)
1834			off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1835		else
1836			off = STK_OFF_TCCNT;
1837		/* lhi %w0,1 */
1838		EMIT4_IMM(0xa7080000, REG_W0, 1);
1839		/* laal %w1,%w0,off(%r15) */
1840		EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1841		/* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1842		patch_2_clij = jit->prg;
1843		EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1844				 2, jit->prg);
1845
1846		/*
1847		 * prog = array->ptrs[index];
1848		 * if (prog == NULL)
1849		 *         goto out;
1850		 */
1851
1852		/* llgfr %r1,%b3: %r1 = (u32) index */
1853		EMIT4(0xb9160000, REG_1, BPF_REG_3);
1854		/* sllg %r1,%r1,3: %r1 *= 8 */
1855		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1856		/* ltg %r1,prog(%b2,%r1) */
1857		EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1858			      REG_1, offsetof(struct bpf_array, ptrs));
1859		/* brc 0x8,out */
1860		patch_3_brc = jit->prg;
1861		EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1862
1863		/*
1864		 * Restore registers before calling function
1865		 */
1866		save_restore_regs(jit, REGS_RESTORE, stack_depth, 0);
1867
1868		/*
1869		 * goto *(prog->bpf_func + tail_call_start);
1870		 */
1871
1872		/* lg %r1,bpf_func(%r1) */
1873		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1874			      offsetof(struct bpf_prog, bpf_func));
1875		if (nospec_uses_trampoline()) {
1876			jit->seen |= SEEN_FUNC;
1877			/* aghi %r1,tail_call_start */
1878			EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
1879			/* brcl 0xf,__s390_indirect_jump_r1 */
1880			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
1881		} else {
1882			/* bc 0xf,tail_call_start(%r1) */
1883			_EMIT4(0x47f01000 + jit->tail_call_start);
1884		}
1885		/* out: */
1886		if (jit->prg_buf) {
1887			*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1888				(jit->prg - patch_1_clrj) >> 1;
1889			*(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1890				(jit->prg - patch_2_clij) >> 1;
1891			*(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1892				(jit->prg - patch_3_brc) >> 1;
1893		}
1894		break;
1895	}
1896	case BPF_JMP | BPF_EXIT: /* return b0 */
1897		last = (i == fp->len - 1) ? 1 : 0;
1898		if (last)
1899			break;
1900		if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1901			/* brc 0xf, <exit> */
1902			EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1903		else
1904			/* brcl 0xf, <exit> */
1905			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1906		break;
1907	/*
1908	 * Branch relative (number of skipped instructions) to offset on
1909	 * condition.
1910	 *
1911	 * Condition code to mask mapping:
1912	 *
1913	 * CC | Description	   | Mask
1914	 * ------------------------------
1915	 * 0  | Operands equal	   |	8
1916	 * 1  | First operand low  |	4
1917	 * 2  | First operand high |	2
1918	 * 3  | Unused		   |	1
1919	 *
1920	 * For s390x relative branches: ip = ip + off_bytes
1921	 * For BPF relative branches:	insn = insn + off_insns + 1
1922	 *
1923	 * For example for s390x with offset 0 we jump to the branch
1924	 * instruction itself (loop) and for BPF with offset 0 we
1925	 * branch to the instruction behind the branch.
1926	 */
1927	case BPF_JMP32 | BPF_JA: /* if (true) */
1928		branch_oc_off = imm;
1929		fallthrough;
1930	case BPF_JMP | BPF_JA: /* if (true) */
1931		mask = 0xf000; /* j */
1932		goto branch_oc;
1933	case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1934	case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1935		mask = 0x2000; /* jh */
1936		goto branch_ks;
1937	case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1938	case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1939		mask = 0x4000; /* jl */
1940		goto branch_ks;
1941	case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1942	case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1943		mask = 0xa000; /* jhe */
1944		goto branch_ks;
1945	case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1946	case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1947		mask = 0xc000; /* jle */
1948		goto branch_ks;
1949	case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1950	case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1951		mask = 0x2000; /* jh */
1952		goto branch_ku;
1953	case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1954	case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1955		mask = 0x4000; /* jl */
1956		goto branch_ku;
1957	case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1958	case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1959		mask = 0xa000; /* jhe */
1960		goto branch_ku;
1961	case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1962	case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1963		mask = 0xc000; /* jle */
1964		goto branch_ku;
1965	case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1966	case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1967		mask = 0x7000; /* jne */
1968		goto branch_ku;
1969	case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1970	case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1971		mask = 0x8000; /* je */
1972		goto branch_ku;
1973	case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1974	case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1975		mask = 0x7000; /* jnz */
1976		if (BPF_CLASS(insn->code) == BPF_JMP32) {
1977			/* llilf %w1,imm (load zero extend imm) */
1978			EMIT6_IMM(0xc00f0000, REG_W1, imm);
1979			/* nr %w1,%dst */
1980			EMIT2(0x1400, REG_W1, dst_reg);
1981		} else {
1982			/* lgfi %w1,imm (load sign extend imm) */
1983			EMIT6_IMM(0xc0010000, REG_W1, imm);
1984			/* ngr %w1,%dst */
1985			EMIT4(0xb9800000, REG_W1, dst_reg);
1986		}
1987		goto branch_oc;
1988
1989	case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1990	case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1991		mask = 0x2000; /* jh */
1992		goto branch_xs;
1993	case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1994	case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1995		mask = 0x4000; /* jl */
1996		goto branch_xs;
1997	case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1998	case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1999		mask = 0xa000; /* jhe */
2000		goto branch_xs;
2001	case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
2002	case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
2003		mask = 0xc000; /* jle */
2004		goto branch_xs;
2005	case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
2006	case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
2007		mask = 0x2000; /* jh */
2008		goto branch_xu;
2009	case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
2010	case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
2011		mask = 0x4000; /* jl */
2012		goto branch_xu;
2013	case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
2014	case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
2015		mask = 0xa000; /* jhe */
2016		goto branch_xu;
2017	case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
2018	case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
2019		mask = 0xc000; /* jle */
2020		goto branch_xu;
2021	case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
2022	case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
2023		mask = 0x7000; /* jne */
2024		goto branch_xu;
2025	case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
2026	case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
2027		mask = 0x8000; /* je */
2028		goto branch_xu;
2029	case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
2030	case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
2031	{
2032		bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
2033
2034		mask = 0x7000; /* jnz */
2035		/* nrk or ngrk %w1,%dst,%src */
2036		EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
2037			  REG_W1, dst_reg, src_reg);
2038		goto branch_oc;
2039branch_ks:
2040		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
2041		/* cfi or cgfi %dst,imm */
2042		EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
2043			  dst_reg, imm);
2044		if (!is_first_pass(jit) &&
2045		    can_use_rel(jit, addrs[i + off + 1])) {
2046			/* brc mask,off */
2047			EMIT4_PCREL_RIC(0xa7040000,
2048					mask >> 12, addrs[i + off + 1]);
2049		} else {
2050			/* brcl mask,off */
2051			EMIT6_PCREL_RILC(0xc0040000,
2052					 mask >> 12, addrs[i + off + 1]);
2053		}
2054		break;
2055branch_ku:
2056		/* lgfi %w1,imm (load sign extend imm) */
2057		src_reg = REG_1;
2058		EMIT6_IMM(0xc0010000, src_reg, imm);
2059		goto branch_xu;
 
2060branch_xs:
2061		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
2062		if (!is_first_pass(jit) &&
2063		    can_use_rel(jit, addrs[i + off + 1])) {
2064			/* crj or cgrj %dst,%src,mask,off */
2065			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
2066				    dst_reg, src_reg, i, off, mask);
2067		} else {
2068			/* cr or cgr %dst,%src */
2069			if (is_jmp32)
2070				EMIT2(0x1900, dst_reg, src_reg);
2071			else
2072				EMIT4(0xb9200000, dst_reg, src_reg);
2073			/* brcl mask,off */
2074			EMIT6_PCREL_RILC(0xc0040000,
2075					 mask >> 12, addrs[i + off + 1]);
2076		}
2077		break;
2078branch_xu:
2079		is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
2080		if (!is_first_pass(jit) &&
2081		    can_use_rel(jit, addrs[i + off + 1])) {
2082			/* clrj or clgrj %dst,%src,mask,off */
2083			EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
2084				    dst_reg, src_reg, i, off, mask);
2085		} else {
2086			/* clr or clgr %dst,%src */
2087			if (is_jmp32)
2088				EMIT2(0x1500, dst_reg, src_reg);
2089			else
2090				EMIT4(0xb9210000, dst_reg, src_reg);
2091			/* brcl mask,off */
2092			EMIT6_PCREL_RILC(0xc0040000,
2093					 mask >> 12, addrs[i + off + 1]);
2094		}
2095		break;
2096branch_oc:
2097		if (!is_first_pass(jit) &&
2098		    can_use_rel(jit, addrs[i + branch_oc_off + 1])) {
2099			/* brc mask,off */
2100			EMIT4_PCREL_RIC(0xa7040000,
2101					mask >> 12,
2102					addrs[i + branch_oc_off + 1]);
2103		} else {
2104			/* brcl mask,off */
2105			EMIT6_PCREL_RILC(0xc0040000,
2106					 mask >> 12,
2107					 addrs[i + branch_oc_off + 1]);
2108		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2109		break;
2110	}
2111	default: /* too complex, give up */
2112		pr_err("Unknown opcode %02x\n", insn->code);
2113		return -1;
2114	}
2115
2116	return insn_count;
2117}
2118
2119/*
2120 * Return whether new i-th instruction address does not violate any invariant
2121 */
2122static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
2123{
2124	/* On the first pass anything goes */
2125	if (is_first_pass(jit))
2126		return true;
2127
2128	/* The codegen pass must not change anything */
2129	if (is_codegen_pass(jit))
2130		return jit->addrs[i] == jit->prg;
2131
2132	/* Passes in between must not increase code size */
2133	return jit->addrs[i] >= jit->prg;
2134}
2135
2136/*
2137 * Update the address of i-th instruction
2138 */
2139static int bpf_set_addr(struct bpf_jit *jit, int i)
2140{
2141	int delta;
2142
2143	if (is_codegen_pass(jit)) {
2144		delta = jit->prg - jit->addrs[i];
2145		if (delta < 0)
2146			bpf_skip(jit, -delta);
2147	}
2148	if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
2149		return -1;
2150	jit->addrs[i] = jit->prg;
2151	return 0;
2152}
2153
2154/*
2155 * Compile eBPF program into s390x code
2156 */
2157static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
2158			bool extra_pass, u32 stack_depth)
2159{
2160	int i, insn_count, lit32_size, lit64_size;
2161	u64 kern_arena;
2162
2163	jit->lit32 = jit->lit32_start;
2164	jit->lit64 = jit->lit64_start;
2165	jit->prg = 0;
2166	jit->excnt = 0;
2167
2168	kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
2169	if (kern_arena)
2170		jit->kern_arena = _EMIT_CONST_U64(kern_arena);
2171	jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena);
2172
2173	bpf_jit_prologue(jit, fp, stack_depth);
2174	if (bpf_set_addr(jit, 0) < 0)
2175		return -1;
2176	for (i = 0; i < fp->len; i += insn_count) {
2177		insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
2178		if (insn_count < 0)
2179			return -1;
2180		/* Next instruction address */
2181		if (bpf_set_addr(jit, i + insn_count) < 0)
2182			return -1;
2183	}
2184	bpf_jit_epilogue(jit, stack_depth);
2185
2186	lit32_size = jit->lit32 - jit->lit32_start;
2187	lit64_size = jit->lit64 - jit->lit64_start;
2188	jit->lit32_start = jit->prg;
2189	if (lit32_size)
2190		jit->lit32_start = ALIGN(jit->lit32_start, 4);
2191	jit->lit64_start = jit->lit32_start + lit32_size;
2192	if (lit64_size)
2193		jit->lit64_start = ALIGN(jit->lit64_start, 8);
2194	jit->size = jit->lit64_start + lit64_size;
2195	jit->size_prg = jit->prg;
2196
2197	if (WARN_ON_ONCE(fp->aux->extable &&
2198			 jit->excnt != fp->aux->num_exentries))
2199		/* Verifier bug - too many entries. */
2200		return -1;
2201
2202	return 0;
2203}
2204
2205bool bpf_jit_needs_zext(void)
2206{
2207	return true;
2208}
2209
2210struct s390_jit_data {
2211	struct bpf_binary_header *header;
2212	struct bpf_jit ctx;
2213	int pass;
2214};
2215
2216static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
2217					       struct bpf_prog *fp)
2218{
2219	struct bpf_binary_header *header;
2220	struct bpf_insn *insn;
2221	u32 extable_size;
2222	u32 code_size;
2223	int i;
2224
2225	for (i = 0; i < fp->len; i++) {
2226		insn = &fp->insnsi[i];
2227
2228		if (BPF_CLASS(insn->code) == BPF_STX &&
2229		    BPF_MODE(insn->code) == BPF_PROBE_ATOMIC &&
2230		    (BPF_SIZE(insn->code) == BPF_DW ||
2231		     BPF_SIZE(insn->code) == BPF_W) &&
2232		    insn->imm == BPF_XCHG)
2233			/*
2234			 * bpf_jit_insn() emits a load and a compare-and-swap,
2235			 * both of which need to be probed.
2236			 */
2237			fp->aux->num_exentries += 1;
2238	}
2239	/* We need two entries per insn. */
2240	fp->aux->num_exentries *= 2;
2241
2242	code_size = roundup(jit->size,
2243			    __alignof__(struct exception_table_entry));
2244	extable_size = fp->aux->num_exentries *
2245		sizeof(struct exception_table_entry);
2246	header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
2247				      8, jit_fill_hole);
2248	if (!header)
2249		return NULL;
2250	fp->aux->extable = (struct exception_table_entry *)
2251		(jit->prg_buf + code_size);
2252	return header;
2253}
2254
2255/*
2256 * Compile eBPF program "fp"
2257 */
2258struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2259{
2260	u32 stack_depth = round_up(fp->aux->stack_depth, 8);
2261	struct bpf_prog *tmp, *orig_fp = fp;
2262	struct bpf_binary_header *header;
2263	struct s390_jit_data *jit_data;
2264	bool tmp_blinded = false;
2265	bool extra_pass = false;
2266	struct bpf_jit jit;
2267	int pass;
2268
2269	if (!fp->jit_requested)
2270		return orig_fp;
2271
2272	tmp = bpf_jit_blind_constants(fp);
2273	/*
2274	 * If blinding was requested and we failed during blinding,
2275	 * we must fall back to the interpreter.
2276	 */
2277	if (IS_ERR(tmp))
2278		return orig_fp;
2279	if (tmp != fp) {
2280		tmp_blinded = true;
2281		fp = tmp;
2282	}
2283
2284	jit_data = fp->aux->jit_data;
2285	if (!jit_data) {
2286		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2287		if (!jit_data) {
2288			fp = orig_fp;
2289			goto out;
2290		}
2291		fp->aux->jit_data = jit_data;
2292	}
2293	if (jit_data->ctx.addrs) {
2294		jit = jit_data->ctx;
2295		header = jit_data->header;
2296		extra_pass = true;
2297		pass = jit_data->pass + 1;
2298		goto skip_init_ctx;
2299	}
2300
2301	memset(&jit, 0, sizeof(jit));
2302	jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
2303	if (jit.addrs == NULL) {
2304		fp = orig_fp;
2305		goto free_addrs;
2306	}
2307	/*
2308	 * Three initial passes:
2309	 *   - 1/2: Determine clobbered registers
2310	 *   - 3:   Calculate program size and addrs array
2311	 */
2312	for (pass = 1; pass <= 3; pass++) {
2313		if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2314			fp = orig_fp;
2315			goto free_addrs;
2316		}
2317	}
2318	/*
2319	 * Final pass: Allocate and generate program
2320	 */
2321	header = bpf_jit_alloc(&jit, fp);
 
 
 
 
2322	if (!header) {
2323		fp = orig_fp;
2324		goto free_addrs;
2325	}
2326skip_init_ctx:
2327	if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2328		bpf_jit_binary_free(header);
2329		fp = orig_fp;
2330		goto free_addrs;
2331	}
2332	if (bpf_jit_enable > 1) {
2333		bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
2334		print_fn_code(jit.prg_buf, jit.size_prg);
 
 
 
 
 
 
2335	}
2336	if (!fp->is_func || extra_pass) {
2337		if (bpf_jit_binary_lock_ro(header)) {
2338			bpf_jit_binary_free(header);
2339			fp = orig_fp;
2340			goto free_addrs;
2341		}
2342	} else {
2343		jit_data->header = header;
2344		jit_data->ctx = jit;
2345		jit_data->pass = pass;
2346	}
2347	fp->bpf_func = (void *) jit.prg_buf;
2348	fp->jited = 1;
2349	fp->jited_len = jit.size;
2350
2351	if (!fp->is_func || extra_pass) {
2352		bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
2353free_addrs:
2354		kvfree(jit.addrs);
2355		kfree(jit_data);
2356		fp->aux->jit_data = NULL;
2357	}
2358out:
2359	if (tmp_blinded)
2360		bpf_jit_prog_release_other(fp, fp == orig_fp ?
2361					   tmp : orig_fp);
2362	return fp;
2363}
2364
2365bool bpf_jit_supports_kfunc_call(void)
2366{
2367	return true;
2368}
2369
2370bool bpf_jit_supports_far_kfunc_call(void)
2371{
2372	return true;
2373}
2374
2375int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2376		       void *old_addr, void *new_addr)
2377{
2378	struct bpf_plt expected_plt, current_plt, new_plt, *plt;
2379	struct {
2380		u16 opc;
2381		s32 disp;
2382	} __packed insn;
2383	char *ret;
2384	int err;
2385
2386	/* Verify the branch to be patched. */
2387	err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
2388	if (err < 0)
2389		return err;
2390	if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
2391		return -EINVAL;
2392
2393	if (t == BPF_MOD_JUMP &&
2394	    insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
2395		/*
2396		 * The branch already points to the destination,
2397		 * there is no PLT.
2398		 */
2399	} else {
2400		/* Verify the PLT. */
2401		plt = ip + (insn.disp << 1);
2402		err = copy_from_kernel_nofault(&current_plt, plt,
2403					       sizeof(current_plt));
2404		if (err < 0)
2405			return err;
2406		ret = (char *)ip + 6;
2407		bpf_jit_plt(&expected_plt, ret, old_addr);
2408		if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
2409			return -EINVAL;
2410		/* Adjust the call address. */
2411		bpf_jit_plt(&new_plt, ret, new_addr);
2412		s390_kernel_write(&plt->target, &new_plt.target,
2413				  sizeof(void *));
2414	}
2415
2416	/* Adjust the mask of the branch. */
2417	insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
2418	s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
2419
2420	/* Make the new code visible to the other CPUs. */
2421	text_poke_sync_lock();
2422
2423	return 0;
2424}
2425
2426struct bpf_tramp_jit {
2427	struct bpf_jit common;
2428	int orig_stack_args_off;/* Offset of arguments placed on stack by the
2429				 * func_addr's original caller
2430				 */
2431	int stack_size;		/* Trampoline stack size */
2432	int backchain_off;	/* Offset of backchain */
2433	int stack_args_off;	/* Offset of stack arguments for calling
2434				 * func_addr, has to be at the top
2435				 */
2436	int reg_args_off;	/* Offset of register arguments for calling
2437				 * func_addr
2438				 */
2439	int ip_off;		/* For bpf_get_func_ip(), has to be at
2440				 * (ctx - 16)
2441				 */
2442	int arg_cnt_off;	/* For bpf_get_func_arg_cnt(), has to be at
2443				 * (ctx - 8)
2444				 */
2445	int bpf_args_off;	/* Offset of BPF_PROG context, which consists
2446				 * of BPF arguments followed by return value
2447				 */
2448	int retval_off;		/* Offset of return value (see above) */
2449	int r7_r8_off;		/* Offset of saved %r7 and %r8, which are used
2450				 * for __bpf_prog_enter() return value and
2451				 * func_addr respectively
2452				 */
2453	int run_ctx_off;	/* Offset of struct bpf_tramp_run_ctx */
2454	int tccnt_off;		/* Offset of saved tailcall counter */
2455	int r14_off;		/* Offset of saved %r14, has to be at the
2456				 * bottom */
2457	int do_fexit;		/* do_fexit: label */
2458};
2459
2460static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
2461{
2462	/* llihf %dst_reg,val_hi */
2463	EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
2464	/* oilf %rdst_reg,val_lo */
2465	EMIT6_IMM(0xc00d0000, dst_reg, val);
2466}
2467
2468static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
2469			   const struct btf_func_model *m,
2470			   struct bpf_tramp_link *tlink, bool save_ret)
2471{
2472	struct bpf_jit *jit = &tjit->common;
2473	int cookie_off = tjit->run_ctx_off +
2474			 offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2475	struct bpf_prog *p = tlink->link.prog;
2476	int patch;
2477
2478	/*
2479	 * run_ctx.cookie = tlink->cookie;
2480	 */
2481
2482	/* %r0 = tlink->cookie */
2483	load_imm64(jit, REG_W0, tlink->cookie);
2484	/* stg %r0,cookie_off(%r15) */
2485	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
2486
2487	/*
2488	 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
2489	 *         goto skip;
2490	 */
2491
2492	/* %r1 = __bpf_prog_enter */
2493	load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
2494	/* %r2 = p */
2495	load_imm64(jit, REG_2, (u64)p);
2496	/* la %r3,run_ctx_off(%r15) */
2497	EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
2498	/* %r1() */
2499	call_r1(jit);
2500	/* ltgr %r7,%r2 */
2501	EMIT4(0xb9020000, REG_7, REG_2);
2502	/* brcl 8,skip */
2503	patch = jit->prg;
2504	EMIT6_PCREL_RILC(0xc0040000, 8, 0);
2505
2506	/*
2507	 * retval = bpf_func(args, p->insnsi);
2508	 */
2509
2510	/* %r1 = p->bpf_func */
2511	load_imm64(jit, REG_1, (u64)p->bpf_func);
2512	/* la %r2,bpf_args_off(%r15) */
2513	EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
2514	/* %r3 = p->insnsi */
2515	if (!p->jited)
2516		load_imm64(jit, REG_3, (u64)p->insnsi);
2517	/* %r1() */
2518	call_r1(jit);
2519	/* stg %r2,retval_off(%r15) */
2520	if (save_ret) {
2521		if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
2522			return -1;
2523		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2524			      tjit->retval_off);
2525	}
2526
2527	/* skip: */
2528	if (jit->prg_buf)
2529		*(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
2530
2531	/*
2532	 * __bpf_prog_exit(p, start, &run_ctx);
2533	 */
2534
2535	/* %r1 = __bpf_prog_exit */
2536	load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
2537	/* %r2 = p */
2538	load_imm64(jit, REG_2, (u64)p);
2539	/* lgr %r3,%r7 */
2540	EMIT4(0xb9040000, REG_3, REG_7);
2541	/* la %r4,run_ctx_off(%r15) */
2542	EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
2543	/* %r1() */
2544	call_r1(jit);
2545
2546	return 0;
2547}
2548
2549static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
2550{
2551	int stack_offset = tjit->stack_size;
2552
2553	tjit->stack_size += size;
2554	return stack_offset;
2555}
2556
2557/* ABI uses %r2 - %r6 for parameter passing. */
2558#define MAX_NR_REG_ARGS 5
2559
2560/* The "L" field of the "mvc" instruction is 8 bits. */
2561#define MAX_MVC_SIZE 256
2562#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
2563
2564/* -mfentry generates a 6-byte nop on s390x. */
2565#define S390X_PATCH_SIZE 6
2566
2567static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2568					 struct bpf_tramp_jit *tjit,
2569					 const struct btf_func_model *m,
2570					 u32 flags,
2571					 struct bpf_tramp_links *tlinks,
2572					 void *func_addr)
2573{
2574	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2575	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2576	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2577	int nr_bpf_args, nr_reg_args, nr_stack_args;
2578	struct bpf_jit *jit = &tjit->common;
2579	int arg, bpf_arg_off;
2580	int i, j;
2581
2582	/* Support as many stack arguments as "mvc" instruction can handle. */
2583	nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
2584	nr_stack_args = m->nr_args - nr_reg_args;
2585	if (nr_stack_args > MAX_NR_STACK_ARGS)
2586		return -ENOTSUPP;
2587
2588	/* Return to %r14, since func_addr and %r0 are not available. */
2589	if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
2590	    (flags & BPF_TRAMP_F_INDIRECT))
2591		flags |= BPF_TRAMP_F_SKIP_FRAME;
2592
2593	/*
2594	 * Compute how many arguments we need to pass to BPF programs.
2595	 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
2596	 * smaller are packed into 1 or 2 registers; larger arguments are
2597	 * passed via pointers.
2598	 * In s390x ABI, arguments that are 8 bytes or smaller are packed into
2599	 * a register; larger arguments are passed via pointers.
2600	 * We need to deal with this difference.
2601	 */
2602	nr_bpf_args = 0;
2603	for (i = 0; i < m->nr_args; i++) {
2604		if (m->arg_size[i] <= 8)
2605			nr_bpf_args += 1;
2606		else if (m->arg_size[i] <= 16)
2607			nr_bpf_args += 2;
2608		else
2609			return -ENOTSUPP;
2610	}
2611
2612	/*
2613	 * Calculate the stack layout.
2614	 */
2615
2616	/*
2617	 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
2618	 * ABI requires, put our backchain at the end of the allocated memory.
2619	 */
2620	tjit->stack_size = STACK_FRAME_OVERHEAD;
2621	tjit->backchain_off = tjit->stack_size - sizeof(u64);
2622	tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
2623	tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
2624	tjit->ip_off = alloc_stack(tjit, sizeof(u64));
2625	tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
2626	tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
2627	tjit->retval_off = alloc_stack(tjit, sizeof(u64));
2628	tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
2629	tjit->run_ctx_off = alloc_stack(tjit,
2630					sizeof(struct bpf_tramp_run_ctx));
2631	tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
2632	tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
2633	/*
2634	 * In accordance with the s390x ABI, the caller has allocated
2635	 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
2636	 * backchain, and the rest we can use.
2637	 */
2638	tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
2639	tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
2640
2641	/* lgr %r1,%r15 */
2642	EMIT4(0xb9040000, REG_1, REG_15);
2643	/* aghi %r15,-stack_size */
2644	EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
2645	/* stg %r1,backchain_off(%r15) */
2646	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
2647		      tjit->backchain_off);
2648	/* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
2649	_EMIT6(0xd203f000 | tjit->tccnt_off,
2650	       0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
2651	/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
2652	if (nr_reg_args)
2653		EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
2654			      REG_2 + (nr_reg_args - 1), REG_15,
2655			      tjit->reg_args_off);
2656	for (i = 0, j = 0; i < m->nr_args; i++) {
2657		if (i < MAX_NR_REG_ARGS)
2658			arg = REG_2 + i;
2659		else
2660			arg = tjit->orig_stack_args_off +
2661			      (i - MAX_NR_REG_ARGS) * sizeof(u64);
2662		bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
2663		if (m->arg_size[i] <= 8) {
2664			if (i < MAX_NR_REG_ARGS)
2665				/* stg %arg,bpf_arg_off(%r15) */
2666				EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
2667					      REG_0, REG_15, bpf_arg_off);
2668			else
2669				/* mvc bpf_arg_off(8,%r15),arg(%r15) */
2670				_EMIT6(0xd207f000 | bpf_arg_off,
2671				       0xf000 | arg);
2672			j += 1;
2673		} else {
2674			if (i < MAX_NR_REG_ARGS) {
2675				/* mvc bpf_arg_off(16,%r15),0(%arg) */
2676				_EMIT6(0xd20ff000 | bpf_arg_off,
2677				       reg2hex[arg] << 12);
2678			} else {
2679				/* lg %r1,arg(%r15) */
2680				EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
2681					      REG_15, arg);
2682				/* mvc bpf_arg_off(16,%r15),0(%r1) */
2683				_EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
2684			}
2685			j += 2;
2686		}
2687	}
2688	/* stmg %r7,%r8,r7_r8_off(%r15) */
2689	EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
2690		      tjit->r7_r8_off);
2691	/* stg %r14,r14_off(%r15) */
2692	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
2693
2694	if (flags & BPF_TRAMP_F_ORIG_STACK) {
2695		/*
2696		 * The ftrace trampoline puts the return address (which is the
2697		 * address of the original function + S390X_PATCH_SIZE) into
2698		 * %r0; see ftrace_shared_hotpatch_trampoline_br and
2699		 * ftrace_init_nop() for details.
2700		 */
2701
2702		/* lgr %r8,%r0 */
2703		EMIT4(0xb9040000, REG_8, REG_0);
2704	} else {
2705		/* %r8 = func_addr + S390X_PATCH_SIZE */
2706		load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
2707	}
2708
2709	/*
2710	 * ip = func_addr;
2711	 * arg_cnt = m->nr_args;
2712	 */
2713
2714	if (flags & BPF_TRAMP_F_IP_ARG) {
2715		/* %r0 = func_addr */
2716		load_imm64(jit, REG_0, (u64)func_addr);
2717		/* stg %r0,ip_off(%r15) */
2718		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2719			      tjit->ip_off);
2720	}
2721	/* lghi %r0,nr_bpf_args */
2722	EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
2723	/* stg %r0,arg_cnt_off(%r15) */
2724	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2725		      tjit->arg_cnt_off);
2726
2727	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2728		/*
2729		 * __bpf_tramp_enter(im);
2730		 */
2731
2732		/* %r1 = __bpf_tramp_enter */
2733		load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
2734		/* %r2 = im */
2735		load_imm64(jit, REG_2, (u64)im);
2736		/* %r1() */
2737		call_r1(jit);
2738	}
2739
2740	for (i = 0; i < fentry->nr_links; i++)
2741		if (invoke_bpf_prog(tjit, m, fentry->links[i],
2742				    flags & BPF_TRAMP_F_RET_FENTRY_RET))
2743			return -EINVAL;
2744
2745	if (fmod_ret->nr_links) {
2746		/*
2747		 * retval = 0;
2748		 */
2749
2750		/* xc retval_off(8,%r15),retval_off(%r15) */
2751		_EMIT6(0xd707f000 | tjit->retval_off,
2752		       0xf000 | tjit->retval_off);
2753
2754		for (i = 0; i < fmod_ret->nr_links; i++) {
2755			if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
2756				return -EINVAL;
2757
2758			/*
2759			 * if (retval)
2760			 *         goto do_fexit;
2761			 */
2762
2763			/* ltg %r0,retval_off(%r15) */
2764			EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
2765				      tjit->retval_off);
2766			/* brcl 7,do_fexit */
2767			EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
2768		}
2769	}
2770
2771	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2772		/*
2773		 * retval = func_addr(args);
2774		 */
2775
2776		/* lmg %r2,%rN,reg_args_off(%r15) */
2777		if (nr_reg_args)
2778			EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2779				      REG_2 + (nr_reg_args - 1), REG_15,
2780				      tjit->reg_args_off);
2781		/* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
2782		if (nr_stack_args)
2783			_EMIT6(0xd200f000 |
2784				       (nr_stack_args * sizeof(u64) - 1) << 16 |
2785				       tjit->stack_args_off,
2786			       0xf000 | tjit->orig_stack_args_off);
2787		/* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2788		_EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
2789		/* lgr %r1,%r8 */
2790		EMIT4(0xb9040000, REG_1, REG_8);
2791		/* %r1() */
2792		call_r1(jit);
2793		/* stg %r2,retval_off(%r15) */
2794		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2795			      tjit->retval_off);
2796
2797		im->ip_after_call = jit->prg_buf + jit->prg;
2798
2799		/*
2800		 * The following nop will be patched by bpf_tramp_image_put().
2801		 */
2802
2803		/* brcl 0,im->ip_epilogue */
2804		EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
2805	}
2806
2807	/* do_fexit: */
2808	tjit->do_fexit = jit->prg;
2809	for (i = 0; i < fexit->nr_links; i++)
2810		if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
2811			return -EINVAL;
2812
2813	if (flags & BPF_TRAMP_F_CALL_ORIG) {
2814		im->ip_epilogue = jit->prg_buf + jit->prg;
2815
2816		/*
2817		 * __bpf_tramp_exit(im);
2818		 */
2819
2820		/* %r1 = __bpf_tramp_exit */
2821		load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
2822		/* %r2 = im */
2823		load_imm64(jit, REG_2, (u64)im);
2824		/* %r1() */
2825		call_r1(jit);
2826	}
2827
2828	/* lmg %r2,%rN,reg_args_off(%r15) */
2829	if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
2830		EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2831			      REG_2 + (nr_reg_args - 1), REG_15,
2832			      tjit->reg_args_off);
2833	/* lgr %r1,%r8 */
2834	if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
2835		EMIT4(0xb9040000, REG_1, REG_8);
2836	/* lmg %r7,%r8,r7_r8_off(%r15) */
2837	EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
2838		      tjit->r7_r8_off);
2839	/* lg %r14,r14_off(%r15) */
2840	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
2841	/* lg %r2,retval_off(%r15) */
2842	if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
2843		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
2844			      tjit->retval_off);
2845	/* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2846	_EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
2847	       0xf000 | tjit->tccnt_off);
2848	/* aghi %r15,stack_size */
2849	EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
2850	/* Emit an expoline for the following indirect jump. */
2851	if (nospec_uses_trampoline())
2852		emit_expoline(jit);
2853	if (flags & BPF_TRAMP_F_SKIP_FRAME)
2854		/* br %r14 */
2855		_EMIT2(0x07fe);
2856	else
2857		/* br %r1 */
2858		_EMIT2(0x07f1);
2859
2860	emit_r1_thunk(jit);
2861
2862	return 0;
2863}
2864
2865int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
2866			     struct bpf_tramp_links *tlinks, void *orig_call)
2867{
2868	struct bpf_tramp_image im;
2869	struct bpf_tramp_jit tjit;
2870	int ret;
2871
2872	memset(&tjit, 0, sizeof(tjit));
2873
2874	ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
2875					    tlinks, orig_call);
2876
2877	return ret < 0 ? ret : tjit.common.prg;
2878}
2879
2880int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
2881				void *image_end, const struct btf_func_model *m,
2882				u32 flags, struct bpf_tramp_links *tlinks,
2883				void *func_addr)
2884{
2885	struct bpf_tramp_jit tjit;
2886	int ret;
2887
2888	/* Compute offsets, check whether the code fits. */
2889	memset(&tjit, 0, sizeof(tjit));
2890	ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2891					    tlinks, func_addr);
2892
2893	if (ret < 0)
2894		return ret;
2895	if (tjit.common.prg > (char *)image_end - (char *)image)
2896		/*
2897		 * Use the same error code as for exceeding
2898		 * BPF_MAX_TRAMP_LINKS.
2899		 */
2900		return -E2BIG;
2901
2902	tjit.common.prg = 0;
2903	tjit.common.prg_buf = image;
2904	ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2905					    tlinks, func_addr);
2906
2907	return ret < 0 ? ret : tjit.common.prg;
2908}
2909
2910bool bpf_jit_supports_subprog_tailcalls(void)
2911{
2912	return true;
2913}
2914
2915bool bpf_jit_supports_arena(void)
2916{
2917	return true;
2918}
2919
2920bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
2921{
2922	/*
2923	 * Currently the verifier uses this function only to check which
2924	 * atomic stores to arena are supported, and they all are.
2925	 */
2926	return true;
2927}
2928
2929bool bpf_jit_supports_exceptions(void)
2930{
2931	/*
2932	 * Exceptions require unwinding support, which is always available,
2933	 * because the kernel is always built with backchain.
2934	 */
2935	return true;
2936}
2937
2938void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64),
2939			 void *cookie)
2940{
2941	unsigned long addr, prev_addr = 0;
2942	struct unwind_state state;
2943
2944	unwind_for_each_frame(&state, NULL, NULL, 0) {
2945		addr = unwind_get_return_address(&state);
2946		if (!addr)
2947			break;
2948		/*
2949		 * addr is a return address and state.sp is the value of %r15
2950		 * at this address. exception_cb needs %r15 at entry to the
2951		 * function containing addr, so take the next state.sp.
2952		 *
2953		 * There is no bp, and the exception_cb prog does not need one
2954		 * to perform a quasi-longjmp. The common code requires a
2955		 * non-zero bp, so pass sp there as well.
2956		 */
2957		if (prev_addr && !consume_fn(cookie, prev_addr, state.sp,
2958					     state.sp))
2959			break;
2960		prev_addr = addr;
2961	}
2962}