Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp64.c: eBPF JIT compiler
   4 *
   5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
   6 *		  IBM Corporation
   7 *
   8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
 
 
 
 
 
   9 */
  10#include <linux/moduleloader.h>
  11#include <asm/cacheflush.h>
  12#include <asm/asm-compat.h>
  13#include <linux/netdevice.h>
  14#include <linux/filter.h>
  15#include <linux/if_vlan.h>
  16#include <asm/kprobes.h>
  17#include <linux/bpf.h>
  18#include <asm/security_features.h>
  19
  20#include "bpf_jit.h"
  21
  22/*
  23 * Stack layout:
  24 * Ensure the top half (upto local_tmp_var) stays consistent
  25 * with our redzone usage.
  26 *
  27 *		[	prev sp		] <-------------
  28 *		[   nv gpr save area	] 5*8		|
  29 *		[    tail_call_cnt	] 8		|
  30 *		[    local_tmp_var	] 16		|
  31 * fp (r31) -->	[   ebpf stack space	] upto 512	|
  32 *		[     frame header	] 32/112	|
  33 * sp (r1) --->	[    stack pointer	] --------------
  34 */
  35
  36/* for gpr non volatile registers BPG_REG_6 to 10 */
  37#define BPF_PPC_STACK_SAVE	(5*8)
  38/* for bpf JIT code internal usage */
  39#define BPF_PPC_STACK_LOCALS	24
  40/* stack frame excluding BPF stack, ensure this is quadword aligned */
  41#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
  42				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
  43
  44/* BPF register usage */
  45#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
  46#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
  47
  48/* BPF to ppc register mappings */
  49void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
  50{
  51	/* function return value */
  52	ctx->b2p[BPF_REG_0] = _R8;
  53	/* function arguments */
  54	ctx->b2p[BPF_REG_1] = _R3;
  55	ctx->b2p[BPF_REG_2] = _R4;
  56	ctx->b2p[BPF_REG_3] = _R5;
  57	ctx->b2p[BPF_REG_4] = _R6;
  58	ctx->b2p[BPF_REG_5] = _R7;
  59	/* non volatile registers */
  60	ctx->b2p[BPF_REG_6] = _R27;
  61	ctx->b2p[BPF_REG_7] = _R28;
  62	ctx->b2p[BPF_REG_8] = _R29;
  63	ctx->b2p[BPF_REG_9] = _R30;
  64	/* frame pointer aka BPF_REG_10 */
  65	ctx->b2p[BPF_REG_FP] = _R31;
  66	/* eBPF jit internal registers */
  67	ctx->b2p[BPF_REG_AX] = _R12;
  68	ctx->b2p[TMP_REG_1] = _R9;
  69	ctx->b2p[TMP_REG_2] = _R10;
  70}
  71
  72/* PPC NVR range -- update this if we ever use NVRs below r27 */
  73#define BPF_PPC_NVR_MIN		_R27
 
 
  74
  75static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  76{
  77	/*
  78	 * We only need a stack frame if:
  79	 * - we call other functions (kernel helpers), or
  80	 * - the bpf program uses its stack area
  81	 * The latter condition is deduced from the usage of BPF_REG_FP
  82	 */
  83	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
  84}
  85
  86/*
  87 * When not setting up our own stackframe, the redzone usage is:
  88 *
  89 *		[	prev sp		] <-------------
  90 *		[	  ...       	] 		|
  91 * sp (r1) --->	[    stack pointer	] --------------
  92 *		[   nv gpr save area	] 5*8
  93 *		[    tail_call_cnt	] 8
  94 *		[    local_tmp_var	] 16
  95 *		[   unused red zone	] 208 bytes protected
  96 */
  97static int bpf_jit_stack_local(struct codegen_context *ctx)
  98{
  99	if (bpf_has_stack_frame(ctx))
 100		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
 101	else
 102		return -(BPF_PPC_STACK_SAVE + 24);
 103}
 104
 105static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 106{
 107	return bpf_jit_stack_local(ctx) + 16;
 108}
 109
 110static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
 111{
 112	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
 113		return (bpf_has_stack_frame(ctx) ?
 114			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
 115				- (8 * (32 - reg));
 116
 117	pr_err("BPF JIT is asking about unknown registers");
 118	BUG();
 119}
 120
 121void bpf_jit_realloc_regs(struct codegen_context *ctx)
 122{
 
 
 
 
 
 
 
 
 
 
 
 123}
 124
 125void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 126{
 127	int i;
 128
 129#ifndef CONFIG_PPC_KERNEL_PCREL
 130	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 131		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
 132#endif
 133
 134	/*
 135	 * Initialize tail_call_cnt if we do tail calls.
 136	 * Otherwise, put in NOPs so that it can be skipped when we are
 137	 * invoked through a tail call.
 138	 */
 139	if (ctx->seen & SEEN_TAILCALL) {
 140		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
 141		/* this goes in the redzone */
 142		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
 143	} else {
 144		EMIT(PPC_RAW_NOP());
 145		EMIT(PPC_RAW_NOP());
 146	}
 147
 
 
 148	if (bpf_has_stack_frame(ctx)) {
 149		/*
 150		 * We need a stack frame, but we don't necessarily need to
 151		 * save/restore LR unless we call other functions
 152		 */
 153		if (ctx->seen & SEEN_FUNC) {
 154			EMIT(PPC_RAW_MFLR(_R0));
 155			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
 156		}
 157
 158		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
 159	}
 160
 161	/*
 162	 * Back up non-volatile regs -- BPF registers 6-10
 163	 * If we haven't created our own stack frame, we save these
 164	 * in the protected zone below the previous stack frame
 165	 */
 166	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 167		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 168			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170	/* Setup frame pointer to point to the bpf stack area */
 171	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
 172		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
 173				STACK_FRAME_MIN_SIZE + ctx->stack_size));
 174}
 175
 176static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
 177{
 178	int i;
 179
 180	/* Restore NVRs */
 181	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 182		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 183			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 
 
 
 
 
 
 
 
 184
 185	/* Tear down our stack frame */
 186	if (bpf_has_stack_frame(ctx)) {
 187		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
 188		if (ctx->seen & SEEN_FUNC) {
 189			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
 190			EMIT(PPC_RAW_MTLR(_R0));
 191		}
 192	}
 193}
 194
 195void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 196{
 197	bpf_jit_emit_common_epilogue(image, ctx);
 198
 199	/* Move result to r3 */
 200	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
 201
 202	EMIT(PPC_RAW_BLR());
 203}
 204
 205static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
 206{
 207	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
 208	long reladdr;
 209
 210	if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
 211		return -EINVAL;
 212
 213	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
 214		reladdr = func_addr - CTX_NIA(ctx);
 215
 216		if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
 217			pr_err("eBPF: address of %ps out of range of pcrel address.\n",
 218				(void *)func);
 219			return -ERANGE;
 220		}
 221		/* pla r12,addr */
 222		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
 223		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
 224		EMIT(PPC_RAW_MTCTR(_R12));
 225		EMIT(PPC_RAW_BCTR());
 226
 227	} else {
 228		reladdr = func_addr - kernel_toc_addr();
 229		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
 230			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
 231			return -ERANGE;
 232		}
 233
 234		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
 235		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
 236		EMIT(PPC_RAW_MTCTR(_R12));
 237		EMIT(PPC_RAW_BCTRL());
 238	}
 239
 240	return 0;
 241}
 242
 243int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
 244{
 245	unsigned int i, ctx_idx = ctx->idx;
 246
 247	if (WARN_ON_ONCE(func && is_module_text_address(func)))
 248		return -EINVAL;
 249
 250	/* skip past descriptor if elf v1 */
 251	func += FUNCTION_DESCR_SIZE;
 252
 253	/* Load function address into r12 */
 254	PPC_LI64(_R12, func);
 255
 256	/* For bpf-to-bpf function calls, the callee's address is unknown
 257	 * until the last extra pass. As seen above, we use PPC_LI64() to
 258	 * load the callee's address, but this may optimize the number of
 259	 * instructions required based on the nature of the address.
 260	 *
 261	 * Since we don't want the number of instructions emitted to increase,
 262	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
 263	 * we always have a five-instruction sequence, which is the maximum
 264	 * that PPC_LI64() can emit.
 265	 */
 266	if (!image)
 267		for (i = ctx->idx - ctx_idx; i < 5; i++)
 268			EMIT(PPC_RAW_NOP());
 269
 270	EMIT(PPC_RAW_MTCTR(_R12));
 271	EMIT(PPC_RAW_BCTRL());
 272
 273	return 0;
 274}
 275
 276static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 277{
 278	/*
 279	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
 280	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
 281	 * r4/BPF_REG_2 - pointer to bpf_array
 282	 * r5/BPF_REG_3 - index in bpf_array
 283	 */
 284	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
 285	int b2p_index = bpf_to_ppc(BPF_REG_3);
 286	int bpf_tailcall_prologue_size = 8;
 287
 288	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 289		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
 290
 291	/*
 292	 * if (index >= array->map.max_entries)
 293	 *   goto out;
 294	 */
 295	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
 296	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
 297	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
 298	PPC_BCC_SHORT(COND_GE, out);
 299
 300	/*
 301	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
 302	 *   goto out;
 303	 */
 304	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 305	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
 306	PPC_BCC_SHORT(COND_GE, out);
 307
 308	/*
 309	 * tail_call_cnt++;
 310	 */
 311	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
 312	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 313
 314	/* prog = array->ptrs[index]; */
 315	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
 316	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
 317	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
 318
 319	/*
 320	 * if (prog == NULL)
 321	 *   goto out;
 322	 */
 323	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
 324	PPC_BCC_SHORT(COND_EQ, out);
 325
 326	/* goto *(prog->bpf_func + prologue_size); */
 327	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
 328	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
 329			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
 330	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
 
 
 
 
 
 331
 332	/* tear down stack, restore NVRs, ... */
 333	bpf_jit_emit_common_epilogue(image, ctx);
 334
 335	EMIT(PPC_RAW_BCTR());
 336
 337	/* out: */
 338	return 0;
 339}
 340
 341/*
 342 * We spill into the redzone always, even if the bpf program has its own stackframe.
 343 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
 344 */
 345void bpf_stf_barrier(void);
 346
 347asm (
 348"		.global bpf_stf_barrier		;"
 349"	bpf_stf_barrier:			;"
 350"		std	21,-64(1)		;"
 351"		std	22,-56(1)		;"
 352"		sync				;"
 353"		ld	21,-64(1)		;"
 354"		ld	22,-56(1)		;"
 355"		ori	31,31,0			;"
 356"		.rept 14			;"
 357"		b	1f			;"
 358"	1:					;"
 359"		.endr				;"
 360"		blr				;"
 361);
 362
 363/* Assemble the body code between the prologue & epilogue */
 364int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
 365		       u32 *addrs, int pass, bool extra_pass)
 
 366{
 367	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
 368	const struct bpf_insn *insn = fp->insnsi;
 369	int flen = fp->len;
 370	int i, ret;
 371
 372	/* Start of epilogue code - will only be valid 2nd pass onwards */
 373	u32 exit_addr = addrs[flen];
 374
 375	for (i = 0; i < flen; i++) {
 376		u32 code = insn[i].code;
 377		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
 378		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
 379		u32 size = BPF_SIZE(code);
 380		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
 381		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
 382		u32 save_reg, ret_reg;
 383		s16 off = insn[i].off;
 384		s32 imm = insn[i].imm;
 385		bool func_addr_fixed;
 386		u64 func_addr;
 387		u64 imm64;
 
 388		u32 true_cond;
 389		u32 tmp_idx;
 390		int j;
 391
 392		/*
 393		 * addrs[] maps a BPF bytecode address into a real offset from
 394		 * the start of the body code.
 395		 */
 396		addrs[i] = ctx->idx * 4;
 397
 398		/*
 399		 * As an optimization, we note down which non-volatile registers
 400		 * are used so that we can only save/restore those in our
 401		 * prologue and epilogue. We do this here regardless of whether
 402		 * the actual BPF instruction uses src/dst registers or not
 403		 * (for instance, BPF_CALL does not use them). The expectation
 404		 * is that those instructions will have src_reg/dst_reg set to
 405		 * 0. Even otherwise, we just lose some prologue/epilogue
 406		 * optimization but everything else should work without
 407		 * any issues.
 408		 */
 409		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
 410			bpf_set_seen_register(ctx, dst_reg);
 411		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
 412			bpf_set_seen_register(ctx, src_reg);
 413
 414		switch (code) {
 415		/*
 416		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
 417		 */
 418		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
 419		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
 420			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
 421			goto bpf_alu32_trunc;
 422		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
 423		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
 424			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
 425			goto bpf_alu32_trunc;
 426		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
 427		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 428			if (!imm) {
 429				goto bpf_alu32_trunc;
 430			} else if (imm >= -32768 && imm < 32768) {
 431				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
 432			} else {
 433				PPC_LI32(tmp1_reg, imm);
 434				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
 435			}
 436			goto bpf_alu32_trunc;
 437		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 
 438		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
 439			if (!imm) {
 440				goto bpf_alu32_trunc;
 441			} else if (imm > -32768 && imm <= 32768) {
 442				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
 443			} else {
 444				PPC_LI32(tmp1_reg, imm);
 445				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 
 
 446			}
 447			goto bpf_alu32_trunc;
 448		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
 449		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
 450			if (BPF_CLASS(code) == BPF_ALU)
 451				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
 452			else
 453				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
 454			goto bpf_alu32_trunc;
 455		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
 456		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
 457			if (imm >= -32768 && imm < 32768)
 458				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
 459			else {
 460				PPC_LI32(tmp1_reg, imm);
 461				if (BPF_CLASS(code) == BPF_ALU)
 462					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
 
 463				else
 464					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
 
 465			}
 466			goto bpf_alu32_trunc;
 467		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 468		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
 469			if (BPF_OP(code) == BPF_MOD) {
 470				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
 471				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
 472				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 
 473			} else
 474				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
 475			goto bpf_alu32_trunc;
 476		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 477		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
 478			if (BPF_OP(code) == BPF_MOD) {
 479				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
 480				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
 481				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 
 482			} else
 483				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
 484			break;
 485		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
 486		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
 487		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
 488		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
 489			if (imm == 0)
 490				return -EINVAL;
 491			if (imm == 1) {
 492				if (BPF_OP(code) == BPF_DIV) {
 493					goto bpf_alu32_trunc;
 494				} else {
 495					EMIT(PPC_RAW_LI(dst_reg, 0));
 496					break;
 497				}
 498			}
 499
 500			PPC_LI32(tmp1_reg, imm);
 501			switch (BPF_CLASS(code)) {
 502			case BPF_ALU:
 503				if (BPF_OP(code) == BPF_MOD) {
 504					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
 505					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
 506					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 
 
 
 
 507				} else
 508					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
 
 509				break;
 510			case BPF_ALU64:
 511				if (BPF_OP(code) == BPF_MOD) {
 512					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
 513					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
 514					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 
 
 
 
 515				} else
 516					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
 
 517				break;
 518			}
 519			goto bpf_alu32_trunc;
 520		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
 521		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 522			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
 523			goto bpf_alu32_trunc;
 524
 525		/*
 526		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
 527		 */
 528		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
 529		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 530			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
 531			goto bpf_alu32_trunc;
 532		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
 533		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 534			if (!IMM_H(imm))
 535				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
 536			else {
 537				/* Sign-extended */
 538				PPC_LI32(tmp1_reg, imm);
 539				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
 540			}
 541			goto bpf_alu32_trunc;
 542		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 543		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 544			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
 545			goto bpf_alu32_trunc;
 546		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
 547		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
 548			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 549				/* Sign-extended */
 550				PPC_LI32(tmp1_reg, imm);
 551				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
 552			} else {
 553				if (IMM_L(imm))
 554					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
 555				if (IMM_H(imm))
 556					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
 557			}
 558			goto bpf_alu32_trunc;
 559		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
 560		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
 561			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
 562			goto bpf_alu32_trunc;
 563		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
 564		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
 565			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 566				/* Sign-extended */
 567				PPC_LI32(tmp1_reg, imm);
 568				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
 569			} else {
 570				if (IMM_L(imm))
 571					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
 572				if (IMM_H(imm))
 573					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
 574			}
 575			goto bpf_alu32_trunc;
 576		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
 577			/* slw clears top 32 bits */
 578			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
 579			/* skip zero extension move, but set address map. */
 580			if (insn_is_zext(&insn[i + 1]))
 581				addrs[++i] = ctx->idx * 4;
 582			break;
 583		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
 584			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
 585			break;
 586		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
 587			/* with imm 0, we still need to clear top 32 bits */
 588			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
 589			if (insn_is_zext(&insn[i + 1]))
 590				addrs[++i] = ctx->idx * 4;
 591			break;
 592		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
 593			if (imm != 0)
 594				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
 595			break;
 596		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
 597			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
 598			if (insn_is_zext(&insn[i + 1]))
 599				addrs[++i] = ctx->idx * 4;
 600			break;
 601		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
 602			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
 603			break;
 604		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
 605			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
 606			if (insn_is_zext(&insn[i + 1]))
 607				addrs[++i] = ctx->idx * 4;
 608			break;
 609		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
 610			if (imm != 0)
 611				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
 612			break;
 613		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
 614			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
 615			goto bpf_alu32_trunc;
 616		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
 617			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
 618			break;
 619		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
 620			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
 621			goto bpf_alu32_trunc;
 622		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
 623			if (imm != 0)
 624				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
 625			break;
 626
 627		/*
 628		 * MOV
 629		 */
 630		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
 631		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 632			if (imm == 1) {
 633				/* special mov32 for zext */
 634				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 635				break;
 636			}
 637			EMIT(PPC_RAW_MR(dst_reg, src_reg));
 638			goto bpf_alu32_trunc;
 639		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
 640		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
 641			PPC_LI32(dst_reg, imm);
 642			if (imm < 0)
 643				goto bpf_alu32_trunc;
 644			else if (insn_is_zext(&insn[i + 1]))
 645				addrs[++i] = ctx->idx * 4;
 646			break;
 647
 648bpf_alu32_trunc:
 649		/* Truncate to 32-bits */
 650		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
 651			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 652		break;
 653
 654		/*
 655		 * BPF_FROM_BE/LE
 656		 */
 657		case BPF_ALU | BPF_END | BPF_FROM_LE:
 658		case BPF_ALU | BPF_END | BPF_FROM_BE:
 659#ifdef __BIG_ENDIAN__
 660			if (BPF_SRC(code) == BPF_FROM_BE)
 661				goto emit_clear;
 662#else /* !__BIG_ENDIAN__ */
 663			if (BPF_SRC(code) == BPF_FROM_LE)
 664				goto emit_clear;
 665#endif
 666			switch (imm) {
 667			case 16:
 668				/* Rotate 8 bits left & mask with 0x0000ff00 */
 669				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
 670				/* Rotate 8 bits right & insert LSB to reg */
 671				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
 672				/* Move result back to dst_reg */
 673				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 674				break;
 675			case 32:
 676				/*
 677				 * Rotate word left by 8 bits:
 678				 * 2 bytes are already in their final position
 679				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
 680				 */
 681				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
 682				/* Rotate 24 bits and insert byte 1 */
 683				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
 684				/* Rotate 24 bits and insert byte 3 */
 685				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
 686				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 687				break;
 688			case 64:
 689				/* Store the value to stack and then use byte-reverse loads */
 690				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
 691				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
 692				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
 693					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
 694				} else {
 695					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
 696					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
 697						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
 698					EMIT(PPC_RAW_LI(tmp2_reg, 4));
 699					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
 700					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 701						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
 702					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
 703				}
 704				break;
 705			}
 706			break;
 707
 708emit_clear:
 709			switch (imm) {
 710			case 16:
 711				/* zero-extend 16 bits into 64 bits */
 712				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
 713				if (insn_is_zext(&insn[i + 1]))
 714					addrs[++i] = ctx->idx * 4;
 715				break;
 716			case 32:
 717				if (!fp->aux->verifier_zext)
 718					/* zero-extend 32 bits into 64 bits */
 719					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
 720				break;
 721			case 64:
 722				/* nop */
 723				break;
 724			}
 725			break;
 726
 727		/*
 728		 * BPF_ST NOSPEC (speculation barrier)
 729		 */
 730		case BPF_ST | BPF_NOSPEC:
 731			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
 732					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
 733				break;
 734
 735			switch (stf_barrier) {
 736			case STF_BARRIER_EIEIO:
 737				EMIT(PPC_RAW_EIEIO() | 0x02000000);
 738				break;
 739			case STF_BARRIER_SYNC_ORI:
 740				EMIT(PPC_RAW_SYNC());
 741				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
 742				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
 743				break;
 744			case STF_BARRIER_FALLBACK:
 745				ctx->seen |= SEEN_FUNC;
 746				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
 747				EMIT(PPC_RAW_MTCTR(_R12));
 748				EMIT(PPC_RAW_BCTRL());
 749				break;
 750			case STF_BARRIER_NONE:
 751				break;
 752			}
 753			break;
 754
 755		/*
 756		 * BPF_ST(X)
 757		 */
 758		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
 759		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 760			if (BPF_CLASS(code) == BPF_ST) {
 761				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 762				src_reg = tmp1_reg;
 763			}
 764			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
 765			break;
 766		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 767		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 768			if (BPF_CLASS(code) == BPF_ST) {
 769				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 770				src_reg = tmp1_reg;
 771			}
 772			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
 773			break;
 774		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 775		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 776			if (BPF_CLASS(code) == BPF_ST) {
 777				PPC_LI32(tmp1_reg, imm);
 778				src_reg = tmp1_reg;
 779			}
 780			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
 781			break;
 782		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 783		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 784			if (BPF_CLASS(code) == BPF_ST) {
 785				PPC_LI32(tmp1_reg, imm);
 786				src_reg = tmp1_reg;
 787			}
 788			if (off % 4) {
 789				EMIT(PPC_RAW_LI(tmp2_reg, off));
 790				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
 791			} else {
 792				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
 793			}
 
 794			break;
 795
 796		/*
 797		 * BPF_STX ATOMIC (atomic ops)
 798		 */
 799		case BPF_STX | BPF_ATOMIC | BPF_W:
 800		case BPF_STX | BPF_ATOMIC | BPF_DW:
 801			save_reg = tmp2_reg;
 802			ret_reg = src_reg;
 803
 804			/* Get offset into TMP_REG_1 */
 805			EMIT(PPC_RAW_LI(tmp1_reg, off));
 806			tmp_idx = ctx->idx * 4;
 
 807			/* load value from memory into TMP_REG_2 */
 808			if (size == BPF_DW)
 809				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 810			else
 811				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 812
 813			/* Save old value in _R0 */
 814			if (imm & BPF_FETCH)
 815				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
 816
 817			switch (imm) {
 818			case BPF_ADD:
 819			case BPF_ADD | BPF_FETCH:
 820				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
 821				break;
 822			case BPF_AND:
 823			case BPF_AND | BPF_FETCH:
 824				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
 825				break;
 826			case BPF_OR:
 827			case BPF_OR | BPF_FETCH:
 828				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
 829				break;
 830			case BPF_XOR:
 831			case BPF_XOR | BPF_FETCH:
 832				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
 833				break;
 834			case BPF_CMPXCHG:
 835				/*
 836				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
 837				 * in src_reg for other cases.
 838				 */
 839				ret_reg = bpf_to_ppc(BPF_REG_0);
 840
 841				/* Compare with old value in BPF_R0 */
 842				if (size == BPF_DW)
 843					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 844				else
 845					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 846				/* Don't set if different from old value */
 847				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
 848				fallthrough;
 849			case BPF_XCHG:
 850				save_reg = src_reg;
 851				break;
 852			default:
 853				pr_err_ratelimited(
 854					"eBPF filter atomic op code %02x (@%d) unsupported\n",
 855					code, i);
 856				return -EOPNOTSUPP;
 857			}
 858
 859			/* store new value */
 860			if (size == BPF_DW)
 861				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
 862			else
 863				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
 864			/* we're done if this succeeded */
 865			PPC_BCC_SHORT(COND_NE, tmp_idx);
 866
 867			if (imm & BPF_FETCH) {
 868				EMIT(PPC_RAW_MR(ret_reg, _R0));
 869				/*
 870				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
 871				 * For context, see commit 39491867ace5.
 872				 */
 873				if (size != BPF_DW && imm == BPF_CMPXCHG &&
 874				    insn_is_zext(&insn[i + 1]))
 875					addrs[++i] = ctx->idx * 4;
 876			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877			break;
 878
 879		/*
 880		 * BPF_LDX
 881		 */
 882		/* dst = *(u8 *)(ul) (src + off) */
 883		case BPF_LDX | BPF_MEM | BPF_B:
 884		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
 
 885		/* dst = *(u16 *)(ul) (src + off) */
 886		case BPF_LDX | BPF_MEM | BPF_H:
 887		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
 
 888		/* dst = *(u32 *)(ul) (src + off) */
 889		case BPF_LDX | BPF_MEM | BPF_W:
 890		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
 
 891		/* dst = *(u64 *)(ul) (src + off) */
 892		case BPF_LDX | BPF_MEM | BPF_DW:
 893		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
 894			/*
 895			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
 896			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
 897			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
 898			 * set dst_reg=0 and move on.
 899			 */
 900			if (BPF_MODE(code) == BPF_PROBE_MEM) {
 901				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
 902				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
 903					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
 904				else /* BOOK3S_64 */
 905					PPC_LI64(tmp2_reg, PAGE_OFFSET);
 906				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
 907				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
 908				EMIT(PPC_RAW_LI(dst_reg, 0));
 909				/*
 910				 * Check if 'off' is word aligned for BPF_DW, because
 911				 * we might generate two instructions.
 912				 */
 913				if (BPF_SIZE(code) == BPF_DW && (off & 3))
 914					PPC_JMP((ctx->idx + 3) * 4);
 915				else
 916					PPC_JMP((ctx->idx + 2) * 4);
 917			}
 918
 919			switch (size) {
 920			case BPF_B:
 921				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
 922				break;
 923			case BPF_H:
 924				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
 925				break;
 926			case BPF_W:
 927				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
 928				break;
 929			case BPF_DW:
 930				if (off % 4) {
 931					EMIT(PPC_RAW_LI(tmp1_reg, off));
 932					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
 933				} else {
 934					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
 935				}
 936				break;
 937			}
 938
 939			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
 940				addrs[++i] = ctx->idx * 4;
 941
 942			if (BPF_MODE(code) == BPF_PROBE_MEM) {
 943				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
 944							    ctx->idx - 1, 4, dst_reg);
 945				if (ret)
 946					return ret;
 947			}
 948			break;
 949
 950		/*
 951		 * Doubleword load
 952		 * 16 byte instruction that uses two 'struct bpf_insn'
 953		 */
 954		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 955			imm64 = ((u64)(u32) insn[i].imm) |
 956				    (((u64)(u32) insn[i+1].imm) << 32);
 957			tmp_idx = ctx->idx;
 958			PPC_LI64(dst_reg, imm64);
 959			/* padding to allow full 5 instructions for later patching */
 960			if (!image)
 961				for (j = ctx->idx - tmp_idx; j < 5; j++)
 962					EMIT(PPC_RAW_NOP());
 963			/* Adjust for two bpf instructions */
 964			addrs[++i] = ctx->idx * 4;
 
 965			break;
 966
 967		/*
 968		 * Return/Exit
 969		 */
 970		case BPF_JMP | BPF_EXIT:
 971			/*
 972			 * If this isn't the very last instruction, branch to
 973			 * the epilogue. If we _are_ the last instruction,
 974			 * we'll just fall through to the epilogue.
 975			 */
 976			if (i != flen - 1) {
 977				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
 978				if (ret)
 979					return ret;
 980			}
 981			/* else fall through to the epilogue */
 982			break;
 983
 984		/*
 985		 * Call kernel helper or bpf function
 986		 */
 987		case BPF_JMP | BPF_CALL:
 988			ctx->seen |= SEEN_FUNC;
 
 989
 990			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
 991						    &func_addr, &func_addr_fixed);
 992			if (ret < 0)
 993				return ret;
 994
 995			if (func_addr_fixed)
 996				ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
 997			else
 998				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
 999
1000			if (ret)
1001				return ret;
1002
1003			/* move return value from r3 to BPF_REG_0 */
1004			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
 
 
 
 
 
 
 
 
1005			break;
1006
1007		/*
1008		 * Jumps and branches
1009		 */
1010		case BPF_JMP | BPF_JA:
1011			PPC_JMP(addrs[i + 1 + off]);
1012			break;
1013
1014		case BPF_JMP | BPF_JGT | BPF_K:
1015		case BPF_JMP | BPF_JGT | BPF_X:
1016		case BPF_JMP | BPF_JSGT | BPF_K:
1017		case BPF_JMP | BPF_JSGT | BPF_X:
1018		case BPF_JMP32 | BPF_JGT | BPF_K:
1019		case BPF_JMP32 | BPF_JGT | BPF_X:
1020		case BPF_JMP32 | BPF_JSGT | BPF_K:
1021		case BPF_JMP32 | BPF_JSGT | BPF_X:
1022			true_cond = COND_GT;
1023			goto cond_branch;
1024		case BPF_JMP | BPF_JLT | BPF_K:
1025		case BPF_JMP | BPF_JLT | BPF_X:
1026		case BPF_JMP | BPF_JSLT | BPF_K:
1027		case BPF_JMP | BPF_JSLT | BPF_X:
1028		case BPF_JMP32 | BPF_JLT | BPF_K:
1029		case BPF_JMP32 | BPF_JLT | BPF_X:
1030		case BPF_JMP32 | BPF_JSLT | BPF_K:
1031		case BPF_JMP32 | BPF_JSLT | BPF_X:
1032			true_cond = COND_LT;
1033			goto cond_branch;
1034		case BPF_JMP | BPF_JGE | BPF_K:
1035		case BPF_JMP | BPF_JGE | BPF_X:
1036		case BPF_JMP | BPF_JSGE | BPF_K:
1037		case BPF_JMP | BPF_JSGE | BPF_X:
1038		case BPF_JMP32 | BPF_JGE | BPF_K:
1039		case BPF_JMP32 | BPF_JGE | BPF_X:
1040		case BPF_JMP32 | BPF_JSGE | BPF_K:
1041		case BPF_JMP32 | BPF_JSGE | BPF_X:
1042			true_cond = COND_GE;
1043			goto cond_branch;
1044		case BPF_JMP | BPF_JLE | BPF_K:
1045		case BPF_JMP | BPF_JLE | BPF_X:
1046		case BPF_JMP | BPF_JSLE | BPF_K:
1047		case BPF_JMP | BPF_JSLE | BPF_X:
1048		case BPF_JMP32 | BPF_JLE | BPF_K:
1049		case BPF_JMP32 | BPF_JLE | BPF_X:
1050		case BPF_JMP32 | BPF_JSLE | BPF_K:
1051		case BPF_JMP32 | BPF_JSLE | BPF_X:
1052			true_cond = COND_LE;
1053			goto cond_branch;
1054		case BPF_JMP | BPF_JEQ | BPF_K:
1055		case BPF_JMP | BPF_JEQ | BPF_X:
1056		case BPF_JMP32 | BPF_JEQ | BPF_K:
1057		case BPF_JMP32 | BPF_JEQ | BPF_X:
1058			true_cond = COND_EQ;
1059			goto cond_branch;
1060		case BPF_JMP | BPF_JNE | BPF_K:
1061		case BPF_JMP | BPF_JNE | BPF_X:
1062		case BPF_JMP32 | BPF_JNE | BPF_K:
1063		case BPF_JMP32 | BPF_JNE | BPF_X:
1064			true_cond = COND_NE;
1065			goto cond_branch;
1066		case BPF_JMP | BPF_JSET | BPF_K:
1067		case BPF_JMP | BPF_JSET | BPF_X:
1068		case BPF_JMP32 | BPF_JSET | BPF_K:
1069		case BPF_JMP32 | BPF_JSET | BPF_X:
1070			true_cond = COND_NE;
1071			/* Fall through */
1072
1073cond_branch:
1074			switch (code) {
1075			case BPF_JMP | BPF_JGT | BPF_X:
1076			case BPF_JMP | BPF_JLT | BPF_X:
1077			case BPF_JMP | BPF_JGE | BPF_X:
1078			case BPF_JMP | BPF_JLE | BPF_X:
1079			case BPF_JMP | BPF_JEQ | BPF_X:
1080			case BPF_JMP | BPF_JNE | BPF_X:
1081			case BPF_JMP32 | BPF_JGT | BPF_X:
1082			case BPF_JMP32 | BPF_JLT | BPF_X:
1083			case BPF_JMP32 | BPF_JGE | BPF_X:
1084			case BPF_JMP32 | BPF_JLE | BPF_X:
1085			case BPF_JMP32 | BPF_JEQ | BPF_X:
1086			case BPF_JMP32 | BPF_JNE | BPF_X:
1087				/* unsigned comparison */
1088				if (BPF_CLASS(code) == BPF_JMP32)
1089					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1090				else
1091					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1092				break;
1093			case BPF_JMP | BPF_JSGT | BPF_X:
1094			case BPF_JMP | BPF_JSLT | BPF_X:
1095			case BPF_JMP | BPF_JSGE | BPF_X:
1096			case BPF_JMP | BPF_JSLE | BPF_X:
1097			case BPF_JMP32 | BPF_JSGT | BPF_X:
1098			case BPF_JMP32 | BPF_JSLT | BPF_X:
1099			case BPF_JMP32 | BPF_JSGE | BPF_X:
1100			case BPF_JMP32 | BPF_JSLE | BPF_X:
1101				/* signed comparison */
1102				if (BPF_CLASS(code) == BPF_JMP32)
1103					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1104				else
1105					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1106				break;
1107			case BPF_JMP | BPF_JSET | BPF_X:
1108			case BPF_JMP32 | BPF_JSET | BPF_X:
1109				if (BPF_CLASS(code) == BPF_JMP) {
1110					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1111				} else {
1112					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1113					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1114				}
1115				break;
1116			case BPF_JMP | BPF_JNE | BPF_K:
1117			case BPF_JMP | BPF_JEQ | BPF_K:
1118			case BPF_JMP | BPF_JGT | BPF_K:
1119			case BPF_JMP | BPF_JLT | BPF_K:
1120			case BPF_JMP | BPF_JGE | BPF_K:
1121			case BPF_JMP | BPF_JLE | BPF_K:
1122			case BPF_JMP32 | BPF_JNE | BPF_K:
1123			case BPF_JMP32 | BPF_JEQ | BPF_K:
1124			case BPF_JMP32 | BPF_JGT | BPF_K:
1125			case BPF_JMP32 | BPF_JLT | BPF_K:
1126			case BPF_JMP32 | BPF_JGE | BPF_K:
1127			case BPF_JMP32 | BPF_JLE | BPF_K:
1128			{
1129				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1130
1131				/*
1132				 * Need sign-extended load, so only positive
1133				 * values can be used as imm in cmpldi
1134				 */
1135				if (imm >= 0 && imm < 32768) {
1136					if (is_jmp32)
1137						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1138					else
1139						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1140				} else {
1141					/* sign-extending load */
1142					PPC_LI32(tmp1_reg, imm);
1143					/* ... but unsigned comparison */
1144					if (is_jmp32)
1145						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1146					else
1147						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1148				}
1149				break;
1150			}
1151			case BPF_JMP | BPF_JSGT | BPF_K:
1152			case BPF_JMP | BPF_JSLT | BPF_K:
1153			case BPF_JMP | BPF_JSGE | BPF_K:
1154			case BPF_JMP | BPF_JSLE | BPF_K:
1155			case BPF_JMP32 | BPF_JSGT | BPF_K:
1156			case BPF_JMP32 | BPF_JSLT | BPF_K:
1157			case BPF_JMP32 | BPF_JSGE | BPF_K:
1158			case BPF_JMP32 | BPF_JSLE | BPF_K:
1159			{
1160				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1161
1162				/*
1163				 * signed comparison, so any 16-bit value
1164				 * can be used in cmpdi
1165				 */
1166				if (imm >= -32768 && imm < 32768) {
1167					if (is_jmp32)
1168						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1169					else
1170						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1171				} else {
1172					PPC_LI32(tmp1_reg, imm);
1173					if (is_jmp32)
1174						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1175					else
1176						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1177				}
1178				break;
1179			}
1180			case BPF_JMP | BPF_JSET | BPF_K:
1181			case BPF_JMP32 | BPF_JSET | BPF_K:
1182				/* andi does not sign-extend the immediate */
1183				if (imm >= 0 && imm < 32768)
1184					/* PPC_ANDI is _only/always_ dot-form */
1185					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1186				else {
1187					PPC_LI32(tmp1_reg, imm);
1188					if (BPF_CLASS(code) == BPF_JMP) {
1189						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1190								     tmp1_reg));
1191					} else {
1192						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1193						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1194									0, 0, 31));
1195					}
1196				}
1197				break;
1198			}
1199			PPC_BCC(true_cond, addrs[i + 1 + off]);
1200			break;
1201
1202		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1203		 * Tail call
1204		 */
1205		case BPF_JMP | BPF_TAIL_CALL:
1206			ctx->seen |= SEEN_TAILCALL;
1207			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1208			if (ret < 0)
1209				return ret;
1210			break;
1211
1212		default:
1213			/*
1214			 * The filter contains something cruel & unusual.
1215			 * We don't handle it, but also there shouldn't be
1216			 * anything missing from our list.
1217			 */
1218			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1219					code, i);
1220			return -ENOTSUPP;
1221		}
1222	}
1223
1224	/* Set end-of-body-code address for exit. */
1225	addrs[i] = ctx->idx * 4;
1226
1227	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228}
v4.17
 
   1/*
   2 * bpf_jit_comp64.c: eBPF JIT compiler
   3 *
   4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
   5 *		  IBM Corporation
   6 *
   7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14#include <linux/moduleloader.h>
  15#include <asm/cacheflush.h>
 
  16#include <linux/netdevice.h>
  17#include <linux/filter.h>
  18#include <linux/if_vlan.h>
  19#include <asm/kprobes.h>
  20#include <linux/bpf.h>
 
  21
  22#include "bpf_jit64.h"
  23
  24static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
  25{
  26	memset32(area, BREAKPOINT_INSTRUCTION, size/4);
  27}
 
 
 
 
 
 
 
 
 
  28
  29static inline void bpf_flush_icache(void *start, void *end)
  30{
  31	smp_wmb();
  32	flush_icache_range((unsigned long)start, (unsigned long)end);
  33}
 
 
 
 
 
 
  34
  35static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
 
  36{
  37	return (ctx->seen & (1 << (31 - b2p[i])));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38}
  39
  40static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
  41{
  42	ctx->seen |= (1 << (31 - b2p[i]));
  43}
  44
  45static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  46{
  47	/*
  48	 * We only need a stack frame if:
  49	 * - we call other functions (kernel helpers), or
  50	 * - the bpf program uses its stack area
  51	 * The latter condition is deduced from the usage of BPF_REG_FP
  52	 */
  53	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
  54}
  55
  56/*
  57 * When not setting up our own stackframe, the redzone usage is:
  58 *
  59 *		[	prev sp		] <-------------
  60 *		[	  ...       	] 		|
  61 * sp (r1) --->	[    stack pointer	] --------------
  62 *		[   nv gpr save area	] 8*8
  63 *		[    tail_call_cnt	] 8
  64 *		[    local_tmp_var	] 8
  65 *		[   unused red zone	] 208 bytes protected
  66 */
  67static int bpf_jit_stack_local(struct codegen_context *ctx)
  68{
  69	if (bpf_has_stack_frame(ctx))
  70		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
  71	else
  72		return -(BPF_PPC_STACK_SAVE + 16);
  73}
  74
  75static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
  76{
  77	return bpf_jit_stack_local(ctx) + 8;
  78}
  79
  80static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
  81{
  82	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
  83		return (bpf_has_stack_frame(ctx) ?
  84			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
  85				- (8 * (32 - reg));
  86
  87	pr_err("BPF JIT is asking about unknown registers");
  88	BUG();
  89}
  90
  91static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
  92{
  93	/*
  94	 * Load skb->len and skb->data_len
  95	 * r3 points to skb
  96	 */
  97	PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
  98	PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
  99	/* header_len = len - data_len */
 100	PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
 101
 102	/* skb->data pointer */
 103	PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
 104}
 105
 106static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 107{
 108	int i;
 109
 
 
 
 
 
 110	/*
 111	 * Initialize tail_call_cnt if we do tail calls.
 112	 * Otherwise, put in NOPs so that it can be skipped when we are
 113	 * invoked through a tail call.
 114	 */
 115	if (ctx->seen & SEEN_TAILCALL) {
 116		PPC_LI(b2p[TMP_REG_1], 0);
 117		/* this goes in the redzone */
 118		PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
 119	} else {
 120		PPC_NOP();
 121		PPC_NOP();
 122	}
 123
 124#define BPF_TAILCALL_PROLOGUE_SIZE	8
 125
 126	if (bpf_has_stack_frame(ctx)) {
 127		/*
 128		 * We need a stack frame, but we don't necessarily need to
 129		 * save/restore LR unless we call other functions
 130		 */
 131		if (ctx->seen & SEEN_FUNC) {
 132			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
 133			PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
 134		}
 135
 136		PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
 137	}
 138
 139	/*
 140	 * Back up non-volatile regs -- BPF registers 6-10
 141	 * If we haven't created our own stack frame, we save these
 142	 * in the protected zone below the previous stack frame
 143	 */
 144	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 145		if (bpf_is_seen_register(ctx, i))
 146			PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
 147
 148	/*
 149	 * Save additional non-volatile regs if we cache skb
 150	 * Also, setup skb data
 151	 */
 152	if (ctx->seen & SEEN_SKB) {
 153		PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
 154				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
 155		PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
 156				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
 157		bpf_jit_emit_skb_loads(image, ctx);
 158	}
 159
 160	/* Setup frame pointer to point to the bpf stack area */
 161	if (bpf_is_seen_register(ctx, BPF_REG_FP))
 162		PPC_ADDI(b2p[BPF_REG_FP], 1,
 163				STACK_FRAME_MIN_SIZE + ctx->stack_size);
 164}
 165
 166static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
 167{
 168	int i;
 169
 170	/* Restore NVRs */
 171	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 172		if (bpf_is_seen_register(ctx, i))
 173			PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
 174
 175	/* Restore non-volatile registers used for skb cache */
 176	if (ctx->seen & SEEN_SKB) {
 177		PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
 178				bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
 179		PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
 180				bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
 181	}
 182
 183	/* Tear down our stack frame */
 184	if (bpf_has_stack_frame(ctx)) {
 185		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
 186		if (ctx->seen & SEEN_FUNC) {
 187			PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
 188			PPC_MTLR(0);
 189		}
 190	}
 191}
 192
 193static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 194{
 195	bpf_jit_emit_common_epilogue(image, ctx);
 196
 197	/* Move result to r3 */
 198	PPC_MR(3, b2p[BPF_REG_0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199
 200	PPC_BLR();
 
 
 
 
 
 
 
 
 
 
 
 
 
 201}
 202
 203static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
 204{
 205#ifdef PPC64_ELF_ABI_v1
 206	/* func points to the function descriptor */
 207	PPC_LI64(b2p[TMP_REG_2], func);
 208	/* Load actual entry point from function descriptor */
 209	PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
 210	/* ... and move it to LR */
 211	PPC_MTLR(b2p[TMP_REG_1]);
 212	/*
 213	 * Load TOC from function descriptor at offset 8.
 214	 * We can clobber r2 since we get called through a
 215	 * function pointer (so caller will save/restore r2)
 216	 * and since we don't use a TOC ourself.
 
 
 
 
 
 
 
 
 217	 */
 218	PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
 219#else
 220	/* We can clobber r12 */
 221	PPC_FUNC_ADDR(12, func);
 222	PPC_MTLR(12);
 223#endif
 224	PPC_BLRL();
 
 225}
 226
 227static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 228{
 229	/*
 230	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
 231	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
 232	 * r4/BPF_REG_2 - pointer to bpf_array
 233	 * r5/BPF_REG_3 - index in bpf_array
 234	 */
 235	int b2p_bpf_array = b2p[BPF_REG_2];
 236	int b2p_index = b2p[BPF_REG_3];
 
 
 
 
 237
 238	/*
 239	 * if (index >= array->map.max_entries)
 240	 *   goto out;
 241	 */
 242	PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
 243	PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
 244	PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
 245	PPC_BCC(COND_GE, out);
 246
 247	/*
 248	 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 249	 *   goto out;
 250	 */
 251	PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
 252	PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
 253	PPC_BCC(COND_GT, out);
 254
 255	/*
 256	 * tail_call_cnt++;
 257	 */
 258	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
 259	PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
 260
 261	/* prog = array->ptrs[index]; */
 262	PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
 263	PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
 264	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 265
 266	/*
 267	 * if (prog == NULL)
 268	 *   goto out;
 269	 */
 270	PPC_CMPLDI(b2p[TMP_REG_1], 0);
 271	PPC_BCC(COND_EQ, out);
 272
 273	/* goto *(prog->bpf_func + prologue_size); */
 274	PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 275#ifdef PPC64_ELF_ABI_v1
 276	/* skip past the function descriptor */
 277	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
 278			FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
 279#else
 280	PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
 281#endif
 282	PPC_MTCTR(b2p[TMP_REG_1]);
 283
 284	/* tear down stack, restore NVRs, ... */
 285	bpf_jit_emit_common_epilogue(image, ctx);
 286
 287	PPC_BCTR();
 
 288	/* out: */
 
 289}
 290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291/* Assemble the body code between the prologue & epilogue */
 292static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 293			      struct codegen_context *ctx,
 294			      u32 *addrs)
 295{
 
 296	const struct bpf_insn *insn = fp->insnsi;
 297	int flen = fp->len;
 298	int i;
 299
 300	/* Start of epilogue code - will only be valid 2nd pass onwards */
 301	u32 exit_addr = addrs[flen];
 302
 303	for (i = 0; i < flen; i++) {
 304		u32 code = insn[i].code;
 305		u32 dst_reg = b2p[insn[i].dst_reg];
 306		u32 src_reg = b2p[insn[i].src_reg];
 
 
 
 
 307		s16 off = insn[i].off;
 308		s32 imm = insn[i].imm;
 
 
 309		u64 imm64;
 310		u8 *func;
 311		u32 true_cond;
 
 
 312
 313		/*
 314		 * addrs[] maps a BPF bytecode address into a real offset from
 315		 * the start of the body code.
 316		 */
 317		addrs[i] = ctx->idx * 4;
 318
 319		/*
 320		 * As an optimization, we note down which non-volatile registers
 321		 * are used so that we can only save/restore those in our
 322		 * prologue and epilogue. We do this here regardless of whether
 323		 * the actual BPF instruction uses src/dst registers or not
 324		 * (for instance, BPF_CALL does not use them). The expectation
 325		 * is that those instructions will have src_reg/dst_reg set to
 326		 * 0. Even otherwise, we just lose some prologue/epilogue
 327		 * optimization but everything else should work without
 328		 * any issues.
 329		 */
 330		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
 331			bpf_set_seen_register(ctx, insn[i].dst_reg);
 332		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
 333			bpf_set_seen_register(ctx, insn[i].src_reg);
 334
 335		switch (code) {
 336		/*
 337		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
 338		 */
 339		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
 340		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
 341			PPC_ADD(dst_reg, dst_reg, src_reg);
 342			goto bpf_alu32_trunc;
 343		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
 344		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
 345			PPC_SUB(dst_reg, dst_reg, src_reg);
 346			goto bpf_alu32_trunc;
 347		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
 
 
 
 
 
 
 
 
 
 
 348		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 349		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 350		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
 351			if (BPF_OP(code) == BPF_SUB)
 352				imm = -imm;
 353			if (imm) {
 354				if (imm >= -32768 && imm < 32768)
 355					PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
 356				else {
 357					PPC_LI32(b2p[TMP_REG_1], imm);
 358					PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
 359				}
 360			}
 361			goto bpf_alu32_trunc;
 362		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
 363		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
 364			if (BPF_CLASS(code) == BPF_ALU)
 365				PPC_MULW(dst_reg, dst_reg, src_reg);
 366			else
 367				PPC_MULD(dst_reg, dst_reg, src_reg);
 368			goto bpf_alu32_trunc;
 369		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
 370		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
 371			if (imm >= -32768 && imm < 32768)
 372				PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
 373			else {
 374				PPC_LI32(b2p[TMP_REG_1], imm);
 375				if (BPF_CLASS(code) == BPF_ALU)
 376					PPC_MULW(dst_reg, dst_reg,
 377							b2p[TMP_REG_1]);
 378				else
 379					PPC_MULD(dst_reg, dst_reg,
 380							b2p[TMP_REG_1]);
 381			}
 382			goto bpf_alu32_trunc;
 383		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 384		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
 385			if (BPF_OP(code) == BPF_MOD) {
 386				PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
 387				PPC_MULW(b2p[TMP_REG_1], src_reg,
 388						b2p[TMP_REG_1]);
 389				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
 390			} else
 391				PPC_DIVWU(dst_reg, dst_reg, src_reg);
 392			goto bpf_alu32_trunc;
 393		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 394		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
 395			if (BPF_OP(code) == BPF_MOD) {
 396				PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
 397				PPC_MULD(b2p[TMP_REG_1], src_reg,
 398						b2p[TMP_REG_1]);
 399				PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
 400			} else
 401				PPC_DIVD(dst_reg, dst_reg, src_reg);
 402			break;
 403		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
 404		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
 405		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
 406		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
 407			if (imm == 0)
 408				return -EINVAL;
 409			else if (imm == 1)
 410				goto bpf_alu32_trunc;
 
 
 
 
 
 
 411
 412			PPC_LI32(b2p[TMP_REG_1], imm);
 413			switch (BPF_CLASS(code)) {
 414			case BPF_ALU:
 415				if (BPF_OP(code) == BPF_MOD) {
 416					PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
 417							b2p[TMP_REG_1]);
 418					PPC_MULW(b2p[TMP_REG_1],
 419							b2p[TMP_REG_1],
 420							b2p[TMP_REG_2]);
 421					PPC_SUB(dst_reg, dst_reg,
 422							b2p[TMP_REG_1]);
 423				} else
 424					PPC_DIVWU(dst_reg, dst_reg,
 425							b2p[TMP_REG_1]);
 426				break;
 427			case BPF_ALU64:
 428				if (BPF_OP(code) == BPF_MOD) {
 429					PPC_DIVD(b2p[TMP_REG_2], dst_reg,
 430							b2p[TMP_REG_1]);
 431					PPC_MULD(b2p[TMP_REG_1],
 432							b2p[TMP_REG_1],
 433							b2p[TMP_REG_2]);
 434					PPC_SUB(dst_reg, dst_reg,
 435							b2p[TMP_REG_1]);
 436				} else
 437					PPC_DIVD(dst_reg, dst_reg,
 438							b2p[TMP_REG_1]);
 439				break;
 440			}
 441			goto bpf_alu32_trunc;
 442		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
 443		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 444			PPC_NEG(dst_reg, dst_reg);
 445			goto bpf_alu32_trunc;
 446
 447		/*
 448		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
 449		 */
 450		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
 451		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 452			PPC_AND(dst_reg, dst_reg, src_reg);
 453			goto bpf_alu32_trunc;
 454		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
 455		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 456			if (!IMM_H(imm))
 457				PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
 458			else {
 459				/* Sign-extended */
 460				PPC_LI32(b2p[TMP_REG_1], imm);
 461				PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
 462			}
 463			goto bpf_alu32_trunc;
 464		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 465		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 466			PPC_OR(dst_reg, dst_reg, src_reg);
 467			goto bpf_alu32_trunc;
 468		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
 469		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
 470			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 471				/* Sign-extended */
 472				PPC_LI32(b2p[TMP_REG_1], imm);
 473				PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
 474			} else {
 475				if (IMM_L(imm))
 476					PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
 477				if (IMM_H(imm))
 478					PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
 479			}
 480			goto bpf_alu32_trunc;
 481		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
 482		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
 483			PPC_XOR(dst_reg, dst_reg, src_reg);
 484			goto bpf_alu32_trunc;
 485		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
 486		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
 487			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 488				/* Sign-extended */
 489				PPC_LI32(b2p[TMP_REG_1], imm);
 490				PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
 491			} else {
 492				if (IMM_L(imm))
 493					PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
 494				if (IMM_H(imm))
 495					PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
 496			}
 497			goto bpf_alu32_trunc;
 498		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
 499			/* slw clears top 32 bits */
 500			PPC_SLW(dst_reg, dst_reg, src_reg);
 
 
 
 501			break;
 502		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
 503			PPC_SLD(dst_reg, dst_reg, src_reg);
 504			break;
 505		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
 506			/* with imm 0, we still need to clear top 32 bits */
 507			PPC_SLWI(dst_reg, dst_reg, imm);
 
 
 508			break;
 509		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
 510			if (imm != 0)
 511				PPC_SLDI(dst_reg, dst_reg, imm);
 512			break;
 513		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
 514			PPC_SRW(dst_reg, dst_reg, src_reg);
 
 
 515			break;
 516		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
 517			PPC_SRD(dst_reg, dst_reg, src_reg);
 518			break;
 519		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
 520			PPC_SRWI(dst_reg, dst_reg, imm);
 
 
 521			break;
 522		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
 523			if (imm != 0)
 524				PPC_SRDI(dst_reg, dst_reg, imm);
 525			break;
 
 
 
 526		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
 527			PPC_SRAD(dst_reg, dst_reg, src_reg);
 528			break;
 
 
 
 529		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
 530			if (imm != 0)
 531				PPC_SRADI(dst_reg, dst_reg, imm);
 532			break;
 533
 534		/*
 535		 * MOV
 536		 */
 537		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
 538		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 539			PPC_MR(dst_reg, src_reg);
 
 
 
 
 
 540			goto bpf_alu32_trunc;
 541		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
 542		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
 543			PPC_LI32(dst_reg, imm);
 544			if (imm < 0)
 545				goto bpf_alu32_trunc;
 
 
 546			break;
 547
 548bpf_alu32_trunc:
 549		/* Truncate to 32-bits */
 550		if (BPF_CLASS(code) == BPF_ALU)
 551			PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
 552		break;
 553
 554		/*
 555		 * BPF_FROM_BE/LE
 556		 */
 557		case BPF_ALU | BPF_END | BPF_FROM_LE:
 558		case BPF_ALU | BPF_END | BPF_FROM_BE:
 559#ifdef __BIG_ENDIAN__
 560			if (BPF_SRC(code) == BPF_FROM_BE)
 561				goto emit_clear;
 562#else /* !__BIG_ENDIAN__ */
 563			if (BPF_SRC(code) == BPF_FROM_LE)
 564				goto emit_clear;
 565#endif
 566			switch (imm) {
 567			case 16:
 568				/* Rotate 8 bits left & mask with 0x0000ff00 */
 569				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
 570				/* Rotate 8 bits right & insert LSB to reg */
 571				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
 572				/* Move result back to dst_reg */
 573				PPC_MR(dst_reg, b2p[TMP_REG_1]);
 574				break;
 575			case 32:
 576				/*
 577				 * Rotate word left by 8 bits:
 578				 * 2 bytes are already in their final position
 579				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
 580				 */
 581				PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
 582				/* Rotate 24 bits and insert byte 1 */
 583				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
 584				/* Rotate 24 bits and insert byte 3 */
 585				PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
 586				PPC_MR(dst_reg, b2p[TMP_REG_1]);
 587				break;
 588			case 64:
 589				/*
 590				 * Way easier and faster(?) to store the value
 591				 * into stack and then use ldbrx
 592				 *
 593				 * ctx->seen will be reliable in pass2, but
 594				 * the instructions generated will remain the
 595				 * same across all passes
 596				 */
 597				PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
 598				PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
 599				PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
 
 
 
 
 600				break;
 601			}
 602			break;
 603
 604emit_clear:
 605			switch (imm) {
 606			case 16:
 607				/* zero-extend 16 bits into 64 bits */
 608				PPC_RLDICL(dst_reg, dst_reg, 0, 48);
 
 
 609				break;
 610			case 32:
 611				/* zero-extend 32 bits into 64 bits */
 612				PPC_RLDICL(dst_reg, dst_reg, 0, 32);
 
 613				break;
 614			case 64:
 615				/* nop */
 616				break;
 617			}
 618			break;
 619
 620		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 621		 * BPF_ST(X)
 622		 */
 623		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
 624		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 625			if (BPF_CLASS(code) == BPF_ST) {
 626				PPC_LI(b2p[TMP_REG_1], imm);
 627				src_reg = b2p[TMP_REG_1];
 628			}
 629			PPC_STB(src_reg, dst_reg, off);
 630			break;
 631		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 632		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 633			if (BPF_CLASS(code) == BPF_ST) {
 634				PPC_LI(b2p[TMP_REG_1], imm);
 635				src_reg = b2p[TMP_REG_1];
 636			}
 637			PPC_STH(src_reg, dst_reg, off);
 638			break;
 639		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 640		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 641			if (BPF_CLASS(code) == BPF_ST) {
 642				PPC_LI32(b2p[TMP_REG_1], imm);
 643				src_reg = b2p[TMP_REG_1];
 644			}
 645			PPC_STW(src_reg, dst_reg, off);
 646			break;
 647		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 648		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 649			if (BPF_CLASS(code) == BPF_ST) {
 650				PPC_LI32(b2p[TMP_REG_1], imm);
 651				src_reg = b2p[TMP_REG_1];
 
 
 
 
 
 
 652			}
 653			PPC_STD(src_reg, dst_reg, off);
 654			break;
 655
 656		/*
 657		 * BPF_STX XADD (atomic_add)
 658		 */
 659		/* *(u32 *)(dst + off) += src */
 660		case BPF_STX | BPF_XADD | BPF_W:
 661			/* Get EA into TMP_REG_1 */
 662			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
 663			/* error if EA is not word-aligned */
 664			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
 665			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
 666			PPC_LI(b2p[BPF_REG_0], 0);
 667			PPC_JMP(exit_addr);
 668			/* load value from memory into TMP_REG_2 */
 669			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 670			/* add value from src_reg into this */
 671			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
 672			/* store result back */
 673			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 674			/* we're done if this succeeded */
 675			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
 676			/* otherwise, let's try once more */
 677			PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 678			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
 679			PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
 680			/* exit if the store was not successful */
 681			PPC_LI(b2p[BPF_REG_0], 0);
 682			PPC_BCC(COND_NE, exit_addr);
 683			break;
 684		/* *(u64 *)(dst + off) += src */
 685		case BPF_STX | BPF_XADD | BPF_DW:
 686			PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
 687			/* error if EA is not doubleword-aligned */
 688			PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
 689			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
 690			PPC_LI(b2p[BPF_REG_0], 0);
 691			PPC_JMP(exit_addr);
 692			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 693			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
 694			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
 695			PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
 696			PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
 697			PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
 698			PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
 699			PPC_LI(b2p[BPF_REG_0], 0);
 700			PPC_BCC(COND_NE, exit_addr);
 701			break;
 702
 703		/*
 704		 * BPF_LDX
 705		 */
 706		/* dst = *(u8 *)(ul) (src + off) */
 707		case BPF_LDX | BPF_MEM | BPF_B:
 708			PPC_LBZ(dst_reg, src_reg, off);
 709			break;
 710		/* dst = *(u16 *)(ul) (src + off) */
 711		case BPF_LDX | BPF_MEM | BPF_H:
 712			PPC_LHZ(dst_reg, src_reg, off);
 713			break;
 714		/* dst = *(u32 *)(ul) (src + off) */
 715		case BPF_LDX | BPF_MEM | BPF_W:
 716			PPC_LWZ(dst_reg, src_reg, off);
 717			break;
 718		/* dst = *(u64 *)(ul) (src + off) */
 719		case BPF_LDX | BPF_MEM | BPF_DW:
 720			PPC_LD(dst_reg, src_reg, off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721			break;
 722
 723		/*
 724		 * Doubleword load
 725		 * 16 byte instruction that uses two 'struct bpf_insn'
 726		 */
 727		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 728			imm64 = ((u64)(u32) insn[i].imm) |
 729				    (((u64)(u32) insn[i+1].imm) << 32);
 
 
 
 
 
 
 730			/* Adjust for two bpf instructions */
 731			addrs[++i] = ctx->idx * 4;
 732			PPC_LI64(dst_reg, imm64);
 733			break;
 734
 735		/*
 736		 * Return/Exit
 737		 */
 738		case BPF_JMP | BPF_EXIT:
 739			/*
 740			 * If this isn't the very last instruction, branch to
 741			 * the epilogue. If we _are_ the last instruction,
 742			 * we'll just fall through to the epilogue.
 743			 */
 744			if (i != flen - 1)
 745				PPC_JMP(exit_addr);
 
 
 
 746			/* else fall through to the epilogue */
 747			break;
 748
 749		/*
 750		 * Call kernel helper
 751		 */
 752		case BPF_JMP | BPF_CALL:
 753			ctx->seen |= SEEN_FUNC;
 754			func = (u8 *) __bpf_call_base + imm;
 755
 756			/* Save skb pointer if we need to re-cache skb data */
 757			if ((ctx->seen & SEEN_SKB) &&
 758			    bpf_helper_changes_pkt_data(func))
 759				PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
 
 
 
 
 760
 761			bpf_jit_emit_func_call(image, ctx, (u64)func);
 
 762
 763			/* move return value from r3 to BPF_REG_0 */
 764			PPC_MR(b2p[BPF_REG_0], 3);
 765
 766			/* refresh skb cache */
 767			if ((ctx->seen & SEEN_SKB) &&
 768			    bpf_helper_changes_pkt_data(func)) {
 769				/* reload skb pointer to r3 */
 770				PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
 771				bpf_jit_emit_skb_loads(image, ctx);
 772			}
 773			break;
 774
 775		/*
 776		 * Jumps and branches
 777		 */
 778		case BPF_JMP | BPF_JA:
 779			PPC_JMP(addrs[i + 1 + off]);
 780			break;
 781
 782		case BPF_JMP | BPF_JGT | BPF_K:
 783		case BPF_JMP | BPF_JGT | BPF_X:
 784		case BPF_JMP | BPF_JSGT | BPF_K:
 785		case BPF_JMP | BPF_JSGT | BPF_X:
 
 
 
 
 786			true_cond = COND_GT;
 787			goto cond_branch;
 788		case BPF_JMP | BPF_JLT | BPF_K:
 789		case BPF_JMP | BPF_JLT | BPF_X:
 790		case BPF_JMP | BPF_JSLT | BPF_K:
 791		case BPF_JMP | BPF_JSLT | BPF_X:
 
 
 
 
 792			true_cond = COND_LT;
 793			goto cond_branch;
 794		case BPF_JMP | BPF_JGE | BPF_K:
 795		case BPF_JMP | BPF_JGE | BPF_X:
 796		case BPF_JMP | BPF_JSGE | BPF_K:
 797		case BPF_JMP | BPF_JSGE | BPF_X:
 
 
 
 
 798			true_cond = COND_GE;
 799			goto cond_branch;
 800		case BPF_JMP | BPF_JLE | BPF_K:
 801		case BPF_JMP | BPF_JLE | BPF_X:
 802		case BPF_JMP | BPF_JSLE | BPF_K:
 803		case BPF_JMP | BPF_JSLE | BPF_X:
 
 
 
 
 804			true_cond = COND_LE;
 805			goto cond_branch;
 806		case BPF_JMP | BPF_JEQ | BPF_K:
 807		case BPF_JMP | BPF_JEQ | BPF_X:
 
 
 808			true_cond = COND_EQ;
 809			goto cond_branch;
 810		case BPF_JMP | BPF_JNE | BPF_K:
 811		case BPF_JMP | BPF_JNE | BPF_X:
 
 
 812			true_cond = COND_NE;
 813			goto cond_branch;
 814		case BPF_JMP | BPF_JSET | BPF_K:
 815		case BPF_JMP | BPF_JSET | BPF_X:
 
 
 816			true_cond = COND_NE;
 817			/* Fall through */
 818
 819cond_branch:
 820			switch (code) {
 821			case BPF_JMP | BPF_JGT | BPF_X:
 822			case BPF_JMP | BPF_JLT | BPF_X:
 823			case BPF_JMP | BPF_JGE | BPF_X:
 824			case BPF_JMP | BPF_JLE | BPF_X:
 825			case BPF_JMP | BPF_JEQ | BPF_X:
 826			case BPF_JMP | BPF_JNE | BPF_X:
 
 
 
 
 
 
 827				/* unsigned comparison */
 828				PPC_CMPLD(dst_reg, src_reg);
 
 
 
 829				break;
 830			case BPF_JMP | BPF_JSGT | BPF_X:
 831			case BPF_JMP | BPF_JSLT | BPF_X:
 832			case BPF_JMP | BPF_JSGE | BPF_X:
 833			case BPF_JMP | BPF_JSLE | BPF_X:
 
 
 
 
 834				/* signed comparison */
 835				PPC_CMPD(dst_reg, src_reg);
 
 
 
 836				break;
 837			case BPF_JMP | BPF_JSET | BPF_X:
 838				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
 
 
 
 
 
 
 839				break;
 840			case BPF_JMP | BPF_JNE | BPF_K:
 841			case BPF_JMP | BPF_JEQ | BPF_K:
 842			case BPF_JMP | BPF_JGT | BPF_K:
 843			case BPF_JMP | BPF_JLT | BPF_K:
 844			case BPF_JMP | BPF_JGE | BPF_K:
 845			case BPF_JMP | BPF_JLE | BPF_K:
 
 
 
 
 
 
 
 
 
 846				/*
 847				 * Need sign-extended load, so only positive
 848				 * values can be used as imm in cmpldi
 849				 */
 850				if (imm >= 0 && imm < 32768)
 851					PPC_CMPLDI(dst_reg, imm);
 852				else {
 
 
 
 853					/* sign-extending load */
 854					PPC_LI32(b2p[TMP_REG_1], imm);
 855					/* ... but unsigned comparison */
 856					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
 
 
 
 857				}
 858				break;
 
 859			case BPF_JMP | BPF_JSGT | BPF_K:
 860			case BPF_JMP | BPF_JSLT | BPF_K:
 861			case BPF_JMP | BPF_JSGE | BPF_K:
 862			case BPF_JMP | BPF_JSLE | BPF_K:
 
 
 
 
 
 
 
 863				/*
 864				 * signed comparison, so any 16-bit value
 865				 * can be used in cmpdi
 866				 */
 867				if (imm >= -32768 && imm < 32768)
 868					PPC_CMPDI(dst_reg, imm);
 869				else {
 870					PPC_LI32(b2p[TMP_REG_1], imm);
 871					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
 
 
 
 
 
 
 872				}
 873				break;
 
 874			case BPF_JMP | BPF_JSET | BPF_K:
 
 875				/* andi does not sign-extend the immediate */
 876				if (imm >= 0 && imm < 32768)
 877					/* PPC_ANDI is _only/always_ dot-form */
 878					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
 879				else {
 880					PPC_LI32(b2p[TMP_REG_1], imm);
 881					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
 882						    b2p[TMP_REG_1]);
 
 
 
 
 
 
 883				}
 884				break;
 885			}
 886			PPC_BCC(true_cond, addrs[i + 1 + off]);
 887			break;
 888
 889		/*
 890		 * Loads from packet header/data
 891		 * Assume 32-bit input value in imm and X (src_reg)
 892		 */
 893
 894		/* Absolute loads */
 895		case BPF_LD | BPF_W | BPF_ABS:
 896			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
 897			goto common_load_abs;
 898		case BPF_LD | BPF_H | BPF_ABS:
 899			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
 900			goto common_load_abs;
 901		case BPF_LD | BPF_B | BPF_ABS:
 902			func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
 903common_load_abs:
 904			/*
 905			 * Load from [imm]
 906			 * Load into r4, which can just be passed onto
 907			 *  skb load helpers as the second parameter
 908			 */
 909			PPC_LI32(4, imm);
 910			goto common_load;
 911
 912		/* Indirect loads */
 913		case BPF_LD | BPF_W | BPF_IND:
 914			func = (u8 *)sk_load_word;
 915			goto common_load_ind;
 916		case BPF_LD | BPF_H | BPF_IND:
 917			func = (u8 *)sk_load_half;
 918			goto common_load_ind;
 919		case BPF_LD | BPF_B | BPF_IND:
 920			func = (u8 *)sk_load_byte;
 921common_load_ind:
 922			/*
 923			 * Load from [src_reg + imm]
 924			 * Treat src_reg as a 32-bit value
 925			 */
 926			PPC_EXTSW(4, src_reg);
 927			if (imm) {
 928				if (imm >= -32768 && imm < 32768)
 929					PPC_ADDI(4, 4, IMM_L(imm));
 930				else {
 931					PPC_LI32(b2p[TMP_REG_1], imm);
 932					PPC_ADD(4, 4, b2p[TMP_REG_1]);
 933				}
 934			}
 935
 936common_load:
 937			ctx->seen |= SEEN_SKB;
 938			ctx->seen |= SEEN_FUNC;
 939			bpf_jit_emit_func_call(image, ctx, (u64)func);
 940
 941			/*
 942			 * Helper returns 'lt' condition on error, and an
 943			 * appropriate return value in BPF_REG_0
 944			 */
 945			PPC_BCC(COND_LT, exit_addr);
 946			break;
 947
 948		/*
 949		 * Tail call
 950		 */
 951		case BPF_JMP | BPF_TAIL_CALL:
 952			ctx->seen |= SEEN_TAILCALL;
 953			bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
 
 
 954			break;
 955
 956		default:
 957			/*
 958			 * The filter contains something cruel & unusual.
 959			 * We don't handle it, but also there shouldn't be
 960			 * anything missing from our list.
 961			 */
 962			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
 963					code, i);
 964			return -ENOTSUPP;
 965		}
 966	}
 967
 968	/* Set end-of-body-code address for exit. */
 969	addrs[i] = ctx->idx * 4;
 970
 971	return 0;
 972}
 973
 974struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 975{
 976	u32 proglen;
 977	u32 alloclen;
 978	u8 *image = NULL;
 979	u32 *code_base;
 980	u32 *addrs;
 981	struct codegen_context cgctx;
 982	int pass;
 983	int flen;
 984	struct bpf_binary_header *bpf_hdr;
 985	struct bpf_prog *org_fp = fp;
 986	struct bpf_prog *tmp_fp;
 987	bool bpf_blinded = false;
 988
 989	if (!fp->jit_requested)
 990		return org_fp;
 991
 992	tmp_fp = bpf_jit_blind_constants(org_fp);
 993	if (IS_ERR(tmp_fp))
 994		return org_fp;
 995
 996	if (tmp_fp != org_fp) {
 997		bpf_blinded = true;
 998		fp = tmp_fp;
 999	}
1000
1001	flen = fp->len;
1002	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
1003	if (addrs == NULL) {
1004		fp = org_fp;
1005		goto out;
1006	}
1007
1008	memset(&cgctx, 0, sizeof(struct codegen_context));
1009
1010	/* Make sure that the stack is quadword aligned. */
1011	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1012
1013	/* Scouting faux-generate pass 0 */
1014	if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
1015		/* We hit something illegal or unsupported. */
1016		fp = org_fp;
1017		goto out;
1018	}
1019
1020	/*
1021	 * Pretend to build prologue, given the features we've seen.  This will
1022	 * update ctgtx.idx as it pretends to output instructions, then we can
1023	 * calculate total size from idx.
1024	 */
1025	bpf_jit_build_prologue(0, &cgctx);
1026	bpf_jit_build_epilogue(0, &cgctx);
1027
1028	proglen = cgctx.idx * 4;
1029	alloclen = proglen + FUNCTION_DESCR_SIZE;
1030
1031	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1032			bpf_jit_fill_ill_insns);
1033	if (!bpf_hdr) {
1034		fp = org_fp;
1035		goto out;
1036	}
1037
1038	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1039
1040	/* Code generation passes 1-2 */
1041	for (pass = 1; pass < 3; pass++) {
1042		/* Now build the prologue, body code & epilogue for real. */
1043		cgctx.idx = 0;
1044		bpf_jit_build_prologue(code_base, &cgctx);
1045		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1046		bpf_jit_build_epilogue(code_base, &cgctx);
1047
1048		if (bpf_jit_enable > 1)
1049			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1050				proglen - (cgctx.idx * 4), cgctx.seen);
1051	}
1052
1053	if (bpf_jit_enable > 1)
1054		/*
1055		 * Note that we output the base address of the code_base
1056		 * rather than image, since opcodes are in code_base.
1057		 */
1058		bpf_jit_dump(flen, proglen, pass, code_base);
1059
1060#ifdef PPC64_ELF_ABI_v1
1061	/* Function descriptor nastiness: Address + TOC */
1062	((u64 *)image)[0] = (u64)code_base;
1063	((u64 *)image)[1] = local_paca->kernel_toc;
1064#endif
1065
1066	fp->bpf_func = (void *)image;
1067	fp->jited = 1;
1068	fp->jited_len = alloclen;
1069
1070	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1071
1072out:
1073	kfree(addrs);
1074
1075	if (bpf_blinded)
1076		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1077
1078	return fp;
1079}
1080
1081/* Overriding bpf_jit_free() as we don't set images read-only. */
1082void bpf_jit_free(struct bpf_prog *fp)
1083{
1084	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1085	struct bpf_binary_header *bpf_hdr = (void *)addr;
1086
1087	if (fp->jited)
1088		bpf_jit_binary_free(bpf_hdr);
1089
1090	bpf_prog_unlock_free(fp);
1091}