Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp64.c: eBPF JIT compiler
   4 *
   5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
   6 *		  IBM Corporation
   7 *
   8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
   9 */
  10#include <linux/moduleloader.h>
  11#include <asm/cacheflush.h>
  12#include <asm/asm-compat.h>
  13#include <linux/netdevice.h>
  14#include <linux/filter.h>
  15#include <linux/if_vlan.h>
  16#include <asm/kprobes.h>
  17#include <linux/bpf.h>
  18#include <asm/security_features.h>
  19
  20#include "bpf_jit.h"
  21
  22/*
  23 * Stack layout:
  24 * Ensure the top half (upto local_tmp_var) stays consistent
  25 * with our redzone usage.
  26 *
  27 *		[	prev sp		] <-------------
  28 *		[   nv gpr save area	] 5*8		|
  29 *		[    tail_call_cnt	] 8		|
  30 *		[    local_tmp_var	] 16		|
  31 * fp (r31) -->	[   ebpf stack space	] upto 512	|
  32 *		[     frame header	] 32/112	|
  33 * sp (r1) --->	[    stack pointer	] --------------
  34 */
  35
  36/* for gpr non volatile registers BPG_REG_6 to 10 */
  37#define BPF_PPC_STACK_SAVE	(5*8)
  38/* for bpf JIT code internal usage */
  39#define BPF_PPC_STACK_LOCALS	24
  40/* stack frame excluding BPF stack, ensure this is quadword aligned */
  41#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
  42				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
  43
  44/* BPF register usage */
  45#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
  46#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
  47
  48/* BPF to ppc register mappings */
  49void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
  50{
  51	/* function return value */
  52	ctx->b2p[BPF_REG_0] = _R8;
  53	/* function arguments */
  54	ctx->b2p[BPF_REG_1] = _R3;
  55	ctx->b2p[BPF_REG_2] = _R4;
  56	ctx->b2p[BPF_REG_3] = _R5;
  57	ctx->b2p[BPF_REG_4] = _R6;
  58	ctx->b2p[BPF_REG_5] = _R7;
  59	/* non volatile registers */
  60	ctx->b2p[BPF_REG_6] = _R27;
  61	ctx->b2p[BPF_REG_7] = _R28;
  62	ctx->b2p[BPF_REG_8] = _R29;
  63	ctx->b2p[BPF_REG_9] = _R30;
  64	/* frame pointer aka BPF_REG_10 */
  65	ctx->b2p[BPF_REG_FP] = _R31;
  66	/* eBPF jit internal registers */
  67	ctx->b2p[BPF_REG_AX] = _R12;
  68	ctx->b2p[TMP_REG_1] = _R9;
  69	ctx->b2p[TMP_REG_2] = _R10;
  70}
  71
  72/* PPC NVR range -- update this if we ever use NVRs below r27 */
  73#define BPF_PPC_NVR_MIN		_R27
  74
  75static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  76{
  77	/*
  78	 * We only need a stack frame if:
  79	 * - we call other functions (kernel helpers), or
  80	 * - the bpf program uses its stack area
  81	 * The latter condition is deduced from the usage of BPF_REG_FP
  82	 */
  83	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
  84}
  85
  86/*
  87 * When not setting up our own stackframe, the redzone usage is:
  88 *
  89 *		[	prev sp		] <-------------
  90 *		[	  ...       	] 		|
  91 * sp (r1) --->	[    stack pointer	] --------------
  92 *		[   nv gpr save area	] 5*8
  93 *		[    tail_call_cnt	] 8
  94 *		[    local_tmp_var	] 16
  95 *		[   unused red zone	] 208 bytes protected
  96 */
  97static int bpf_jit_stack_local(struct codegen_context *ctx)
  98{
  99	if (bpf_has_stack_frame(ctx))
 100		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
 101	else
 102		return -(BPF_PPC_STACK_SAVE + 24);
 103}
 104
 105static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 106{
 107	return bpf_jit_stack_local(ctx) + 16;
 108}
 109
 110static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
 111{
 112	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
 113		return (bpf_has_stack_frame(ctx) ?
 114			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
 115				- (8 * (32 - reg));
 116
 117	pr_err("BPF JIT is asking about unknown registers");
 118	BUG();
 119}
 120
 121void bpf_jit_realloc_regs(struct codegen_context *ctx)
 122{
 123}
 124
 125void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 126{
 127	int i;
 128
 
 
 
 129#ifndef CONFIG_PPC_KERNEL_PCREL
 130	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 131		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
 132#endif
 133
 134	/*
 135	 * Initialize tail_call_cnt if we do tail calls.
 136	 * Otherwise, put in NOPs so that it can be skipped when we are
 137	 * invoked through a tail call.
 138	 */
 139	if (ctx->seen & SEEN_TAILCALL) {
 140		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
 141		/* this goes in the redzone */
 142		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
 143	} else {
 144		EMIT(PPC_RAW_NOP());
 145		EMIT(PPC_RAW_NOP());
 146	}
 147
 148	if (bpf_has_stack_frame(ctx)) {
 149		/*
 150		 * We need a stack frame, but we don't necessarily need to
 151		 * save/restore LR unless we call other functions
 152		 */
 153		if (ctx->seen & SEEN_FUNC) {
 154			EMIT(PPC_RAW_MFLR(_R0));
 155			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
 156		}
 157
 158		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
 159	}
 160
 161	/*
 162	 * Back up non-volatile regs -- BPF registers 6-10
 163	 * If we haven't created our own stack frame, we save these
 164	 * in the protected zone below the previous stack frame
 165	 */
 166	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 167		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 168			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 169
 170	/* Setup frame pointer to point to the bpf stack area */
 171	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
 172		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
 173				STACK_FRAME_MIN_SIZE + ctx->stack_size));
 174}
 175
 176static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
 177{
 178	int i;
 179
 180	/* Restore NVRs */
 181	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 182		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 183			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 184
 185	/* Tear down our stack frame */
 186	if (bpf_has_stack_frame(ctx)) {
 187		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
 188		if (ctx->seen & SEEN_FUNC) {
 189			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
 190			EMIT(PPC_RAW_MTLR(_R0));
 191		}
 192	}
 193}
 194
 195void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 196{
 197	bpf_jit_emit_common_epilogue(image, ctx);
 198
 199	/* Move result to r3 */
 200	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
 201
 202	EMIT(PPC_RAW_BLR());
 
 
 203}
 204
 205static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
 206{
 207	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
 208	long reladdr;
 209
 210	if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
 211		return -EINVAL;
 212
 213	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
 214		reladdr = func_addr - CTX_NIA(ctx);
 215
 216		if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
 217			pr_err("eBPF: address of %ps out of range of pcrel address.\n",
 218				(void *)func);
 219			return -ERANGE;
 220		}
 221		/* pla r12,addr */
 222		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
 223		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
 224		EMIT(PPC_RAW_MTCTR(_R12));
 225		EMIT(PPC_RAW_BCTR());
 
 
 226
 
 
 
 
 
 
 
 
 
 
 
 227	} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 228		reladdr = func_addr - kernel_toc_addr();
 229		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
 230			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
 231			return -ERANGE;
 232		}
 233
 234		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
 235		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
 236		EMIT(PPC_RAW_MTCTR(_R12));
 237		EMIT(PPC_RAW_BCTRL());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238	}
 239
 240	return 0;
 241}
 242
 243int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
 244{
 245	unsigned int i, ctx_idx = ctx->idx;
 246
 247	if (WARN_ON_ONCE(func && is_module_text_address(func)))
 248		return -EINVAL;
 249
 250	/* skip past descriptor if elf v1 */
 251	func += FUNCTION_DESCR_SIZE;
 252
 253	/* Load function address into r12 */
 254	PPC_LI64(_R12, func);
 255
 256	/* For bpf-to-bpf function calls, the callee's address is unknown
 257	 * until the last extra pass. As seen above, we use PPC_LI64() to
 258	 * load the callee's address, but this may optimize the number of
 259	 * instructions required based on the nature of the address.
 260	 *
 261	 * Since we don't want the number of instructions emitted to increase,
 262	 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
 263	 * we always have a five-instruction sequence, which is the maximum
 264	 * that PPC_LI64() can emit.
 265	 */
 266	if (!image)
 267		for (i = ctx->idx - ctx_idx; i < 5; i++)
 268			EMIT(PPC_RAW_NOP());
 269
 270	EMIT(PPC_RAW_MTCTR(_R12));
 271	EMIT(PPC_RAW_BCTRL());
 272
 273	return 0;
 274}
 275
 276static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 277{
 278	/*
 279	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
 280	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
 281	 * r4/BPF_REG_2 - pointer to bpf_array
 282	 * r5/BPF_REG_3 - index in bpf_array
 283	 */
 284	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
 285	int b2p_index = bpf_to_ppc(BPF_REG_3);
 286	int bpf_tailcall_prologue_size = 8;
 287
 288	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 289		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
 290
 291	/*
 292	 * if (index >= array->map.max_entries)
 293	 *   goto out;
 294	 */
 295	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
 296	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
 297	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
 298	PPC_BCC_SHORT(COND_GE, out);
 299
 300	/*
 301	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
 302	 *   goto out;
 303	 */
 304	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 305	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
 306	PPC_BCC_SHORT(COND_GE, out);
 307
 308	/*
 309	 * tail_call_cnt++;
 310	 */
 311	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
 312	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 313
 314	/* prog = array->ptrs[index]; */
 315	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
 316	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
 317	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
 318
 319	/*
 320	 * if (prog == NULL)
 321	 *   goto out;
 322	 */
 323	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
 324	PPC_BCC_SHORT(COND_EQ, out);
 325
 326	/* goto *(prog->bpf_func + prologue_size); */
 327	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
 328	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
 329			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
 330	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
 331
 332	/* tear down stack, restore NVRs, ... */
 333	bpf_jit_emit_common_epilogue(image, ctx);
 334
 335	EMIT(PPC_RAW_BCTR());
 336
 337	/* out: */
 338	return 0;
 339}
 340
 341/*
 342 * We spill into the redzone always, even if the bpf program has its own stackframe.
 343 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
 344 */
 345void bpf_stf_barrier(void);
 346
 347asm (
 348"		.global bpf_stf_barrier		;"
 349"	bpf_stf_barrier:			;"
 350"		std	21,-64(1)		;"
 351"		std	22,-56(1)		;"
 352"		sync				;"
 353"		ld	21,-64(1)		;"
 354"		ld	22,-56(1)		;"
 355"		ori	31,31,0			;"
 356"		.rept 14			;"
 357"		b	1f			;"
 358"	1:					;"
 359"		.endr				;"
 360"		blr				;"
 361);
 362
 363/* Assemble the body code between the prologue & epilogue */
 364int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
 365		       u32 *addrs, int pass, bool extra_pass)
 366{
 367	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
 368	const struct bpf_insn *insn = fp->insnsi;
 369	int flen = fp->len;
 370	int i, ret;
 371
 372	/* Start of epilogue code - will only be valid 2nd pass onwards */
 373	u32 exit_addr = addrs[flen];
 374
 375	for (i = 0; i < flen; i++) {
 376		u32 code = insn[i].code;
 377		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
 378		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
 379		u32 size = BPF_SIZE(code);
 380		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
 381		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
 382		u32 save_reg, ret_reg;
 383		s16 off = insn[i].off;
 384		s32 imm = insn[i].imm;
 385		bool func_addr_fixed;
 386		u64 func_addr;
 387		u64 imm64;
 388		u32 true_cond;
 389		u32 tmp_idx;
 390		int j;
 391
 392		/*
 393		 * addrs[] maps a BPF bytecode address into a real offset from
 394		 * the start of the body code.
 395		 */
 396		addrs[i] = ctx->idx * 4;
 397
 398		/*
 399		 * As an optimization, we note down which non-volatile registers
 400		 * are used so that we can only save/restore those in our
 401		 * prologue and epilogue. We do this here regardless of whether
 402		 * the actual BPF instruction uses src/dst registers or not
 403		 * (for instance, BPF_CALL does not use them). The expectation
 404		 * is that those instructions will have src_reg/dst_reg set to
 405		 * 0. Even otherwise, we just lose some prologue/epilogue
 406		 * optimization but everything else should work without
 407		 * any issues.
 408		 */
 409		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
 410			bpf_set_seen_register(ctx, dst_reg);
 411		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
 412			bpf_set_seen_register(ctx, src_reg);
 413
 414		switch (code) {
 415		/*
 416		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
 417		 */
 418		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
 419		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
 420			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
 421			goto bpf_alu32_trunc;
 422		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
 423		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
 424			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
 425			goto bpf_alu32_trunc;
 426		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
 427		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 428			if (!imm) {
 429				goto bpf_alu32_trunc;
 430			} else if (imm >= -32768 && imm < 32768) {
 431				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
 432			} else {
 433				PPC_LI32(tmp1_reg, imm);
 434				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
 435			}
 436			goto bpf_alu32_trunc;
 437		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 438		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
 439			if (!imm) {
 440				goto bpf_alu32_trunc;
 441			} else if (imm > -32768 && imm <= 32768) {
 442				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
 443			} else {
 444				PPC_LI32(tmp1_reg, imm);
 445				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 446			}
 447			goto bpf_alu32_trunc;
 448		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
 449		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
 450			if (BPF_CLASS(code) == BPF_ALU)
 451				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
 452			else
 453				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
 454			goto bpf_alu32_trunc;
 455		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
 456		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
 457			if (imm >= -32768 && imm < 32768)
 458				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
 459			else {
 460				PPC_LI32(tmp1_reg, imm);
 461				if (BPF_CLASS(code) == BPF_ALU)
 462					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
 463				else
 464					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
 465			}
 466			goto bpf_alu32_trunc;
 467		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 468		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
 469			if (BPF_OP(code) == BPF_MOD) {
 470				EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
 
 
 
 
 471				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
 472				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 473			} else
 474				EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
 
 
 
 475			goto bpf_alu32_trunc;
 476		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 477		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
 478			if (BPF_OP(code) == BPF_MOD) {
 479				EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
 
 
 
 480				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
 481				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 482			} else
 483				EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
 
 
 
 484			break;
 485		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
 486		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
 487		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
 488		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
 489			if (imm == 0)
 490				return -EINVAL;
 491			if (imm == 1) {
 492				if (BPF_OP(code) == BPF_DIV) {
 493					goto bpf_alu32_trunc;
 494				} else {
 495					EMIT(PPC_RAW_LI(dst_reg, 0));
 496					break;
 497				}
 498			}
 499
 500			PPC_LI32(tmp1_reg, imm);
 501			switch (BPF_CLASS(code)) {
 502			case BPF_ALU:
 503				if (BPF_OP(code) == BPF_MOD) {
 504					EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
 
 
 
 505					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
 506					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 507				} else
 508					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
 
 
 
 509				break;
 510			case BPF_ALU64:
 511				if (BPF_OP(code) == BPF_MOD) {
 512					EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
 
 
 
 513					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
 514					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 515				} else
 516					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
 
 
 
 517				break;
 518			}
 519			goto bpf_alu32_trunc;
 520		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
 521		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 522			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
 523			goto bpf_alu32_trunc;
 524
 525		/*
 526		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
 527		 */
 528		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
 529		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 530			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
 531			goto bpf_alu32_trunc;
 532		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
 533		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 534			if (!IMM_H(imm))
 535				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
 536			else {
 537				/* Sign-extended */
 538				PPC_LI32(tmp1_reg, imm);
 539				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
 540			}
 541			goto bpf_alu32_trunc;
 542		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 543		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 544			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
 545			goto bpf_alu32_trunc;
 546		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
 547		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
 548			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 549				/* Sign-extended */
 550				PPC_LI32(tmp1_reg, imm);
 551				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
 552			} else {
 553				if (IMM_L(imm))
 554					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
 555				if (IMM_H(imm))
 556					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
 557			}
 558			goto bpf_alu32_trunc;
 559		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
 560		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
 561			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
 562			goto bpf_alu32_trunc;
 563		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
 564		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
 565			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 566				/* Sign-extended */
 567				PPC_LI32(tmp1_reg, imm);
 568				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
 569			} else {
 570				if (IMM_L(imm))
 571					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
 572				if (IMM_H(imm))
 573					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
 574			}
 575			goto bpf_alu32_trunc;
 576		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
 577			/* slw clears top 32 bits */
 578			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
 579			/* skip zero extension move, but set address map. */
 580			if (insn_is_zext(&insn[i + 1]))
 581				addrs[++i] = ctx->idx * 4;
 582			break;
 583		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
 584			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
 585			break;
 586		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
 587			/* with imm 0, we still need to clear top 32 bits */
 588			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
 589			if (insn_is_zext(&insn[i + 1]))
 590				addrs[++i] = ctx->idx * 4;
 591			break;
 592		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
 593			if (imm != 0)
 594				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
 595			break;
 596		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
 597			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
 598			if (insn_is_zext(&insn[i + 1]))
 599				addrs[++i] = ctx->idx * 4;
 600			break;
 601		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
 602			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
 603			break;
 604		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
 605			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
 606			if (insn_is_zext(&insn[i + 1]))
 607				addrs[++i] = ctx->idx * 4;
 608			break;
 609		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
 610			if (imm != 0)
 611				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
 612			break;
 613		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
 614			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
 615			goto bpf_alu32_trunc;
 616		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
 617			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
 618			break;
 619		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
 620			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
 621			goto bpf_alu32_trunc;
 622		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
 623			if (imm != 0)
 624				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
 625			break;
 626
 627		/*
 628		 * MOV
 629		 */
 630		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
 631		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 632			if (imm == 1) {
 633				/* special mov32 for zext */
 634				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 635				break;
 636			}
 637			EMIT(PPC_RAW_MR(dst_reg, src_reg));
 
 
 
 
 
 
 638			goto bpf_alu32_trunc;
 639		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
 640		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
 641			PPC_LI32(dst_reg, imm);
 642			if (imm < 0)
 643				goto bpf_alu32_trunc;
 644			else if (insn_is_zext(&insn[i + 1]))
 645				addrs[++i] = ctx->idx * 4;
 646			break;
 647
 648bpf_alu32_trunc:
 649		/* Truncate to 32-bits */
 650		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
 651			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 652		break;
 653
 654		/*
 655		 * BPF_FROM_BE/LE
 656		 */
 657		case BPF_ALU | BPF_END | BPF_FROM_LE:
 658		case BPF_ALU | BPF_END | BPF_FROM_BE:
 
 659#ifdef __BIG_ENDIAN__
 660			if (BPF_SRC(code) == BPF_FROM_BE)
 661				goto emit_clear;
 662#else /* !__BIG_ENDIAN__ */
 663			if (BPF_SRC(code) == BPF_FROM_LE)
 664				goto emit_clear;
 665#endif
 666			switch (imm) {
 667			case 16:
 668				/* Rotate 8 bits left & mask with 0x0000ff00 */
 669				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
 670				/* Rotate 8 bits right & insert LSB to reg */
 671				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
 672				/* Move result back to dst_reg */
 673				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 674				break;
 675			case 32:
 676				/*
 677				 * Rotate word left by 8 bits:
 678				 * 2 bytes are already in their final position
 679				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
 680				 */
 681				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
 682				/* Rotate 24 bits and insert byte 1 */
 683				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
 684				/* Rotate 24 bits and insert byte 3 */
 685				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
 686				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 687				break;
 688			case 64:
 689				/* Store the value to stack and then use byte-reverse loads */
 690				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
 691				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
 692				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
 693					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
 694				} else {
 695					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
 696					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
 697						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
 698					EMIT(PPC_RAW_LI(tmp2_reg, 4));
 699					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
 700					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 701						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
 702					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
 703				}
 704				break;
 705			}
 706			break;
 707
 708emit_clear:
 709			switch (imm) {
 710			case 16:
 711				/* zero-extend 16 bits into 64 bits */
 712				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
 713				if (insn_is_zext(&insn[i + 1]))
 714					addrs[++i] = ctx->idx * 4;
 715				break;
 716			case 32:
 717				if (!fp->aux->verifier_zext)
 718					/* zero-extend 32 bits into 64 bits */
 719					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
 720				break;
 721			case 64:
 722				/* nop */
 723				break;
 724			}
 725			break;
 726
 727		/*
 728		 * BPF_ST NOSPEC (speculation barrier)
 729		 */
 730		case BPF_ST | BPF_NOSPEC:
 731			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
 732					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
 733				break;
 734
 735			switch (stf_barrier) {
 736			case STF_BARRIER_EIEIO:
 737				EMIT(PPC_RAW_EIEIO() | 0x02000000);
 738				break;
 739			case STF_BARRIER_SYNC_ORI:
 740				EMIT(PPC_RAW_SYNC());
 741				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
 742				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
 743				break;
 744			case STF_BARRIER_FALLBACK:
 745				ctx->seen |= SEEN_FUNC;
 746				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
 747				EMIT(PPC_RAW_MTCTR(_R12));
 748				EMIT(PPC_RAW_BCTRL());
 749				break;
 750			case STF_BARRIER_NONE:
 751				break;
 752			}
 753			break;
 754
 755		/*
 756		 * BPF_ST(X)
 757		 */
 758		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
 759		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 760			if (BPF_CLASS(code) == BPF_ST) {
 761				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 762				src_reg = tmp1_reg;
 763			}
 764			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
 765			break;
 766		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 767		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 768			if (BPF_CLASS(code) == BPF_ST) {
 769				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 770				src_reg = tmp1_reg;
 771			}
 772			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
 773			break;
 774		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 775		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 776			if (BPF_CLASS(code) == BPF_ST) {
 777				PPC_LI32(tmp1_reg, imm);
 778				src_reg = tmp1_reg;
 779			}
 780			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
 781			break;
 782		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 783		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 784			if (BPF_CLASS(code) == BPF_ST) {
 785				PPC_LI32(tmp1_reg, imm);
 786				src_reg = tmp1_reg;
 787			}
 788			if (off % 4) {
 789				EMIT(PPC_RAW_LI(tmp2_reg, off));
 790				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
 791			} else {
 792				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
 793			}
 794			break;
 795
 796		/*
 797		 * BPF_STX ATOMIC (atomic ops)
 798		 */
 799		case BPF_STX | BPF_ATOMIC | BPF_W:
 800		case BPF_STX | BPF_ATOMIC | BPF_DW:
 801			save_reg = tmp2_reg;
 802			ret_reg = src_reg;
 803
 804			/* Get offset into TMP_REG_1 */
 805			EMIT(PPC_RAW_LI(tmp1_reg, off));
 
 
 
 
 
 
 
 
 
 806			tmp_idx = ctx->idx * 4;
 807			/* load value from memory into TMP_REG_2 */
 808			if (size == BPF_DW)
 809				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 810			else
 811				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 812
 813			/* Save old value in _R0 */
 814			if (imm & BPF_FETCH)
 815				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
 816
 817			switch (imm) {
 818			case BPF_ADD:
 819			case BPF_ADD | BPF_FETCH:
 820				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
 821				break;
 822			case BPF_AND:
 823			case BPF_AND | BPF_FETCH:
 824				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
 825				break;
 826			case BPF_OR:
 827			case BPF_OR | BPF_FETCH:
 828				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
 829				break;
 830			case BPF_XOR:
 831			case BPF_XOR | BPF_FETCH:
 832				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
 833				break;
 834			case BPF_CMPXCHG:
 835				/*
 836				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
 837				 * in src_reg for other cases.
 838				 */
 839				ret_reg = bpf_to_ppc(BPF_REG_0);
 840
 841				/* Compare with old value in BPF_R0 */
 842				if (size == BPF_DW)
 843					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 844				else
 845					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 846				/* Don't set if different from old value */
 847				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
 848				fallthrough;
 849			case BPF_XCHG:
 850				save_reg = src_reg;
 851				break;
 852			default:
 853				pr_err_ratelimited(
 854					"eBPF filter atomic op code %02x (@%d) unsupported\n",
 855					code, i);
 856				return -EOPNOTSUPP;
 857			}
 858
 859			/* store new value */
 860			if (size == BPF_DW)
 861				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
 862			else
 863				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
 864			/* we're done if this succeeded */
 865			PPC_BCC_SHORT(COND_NE, tmp_idx);
 866
 867			if (imm & BPF_FETCH) {
 
 
 
 868				EMIT(PPC_RAW_MR(ret_reg, _R0));
 869				/*
 870				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
 871				 * For context, see commit 39491867ace5.
 872				 */
 873				if (size != BPF_DW && imm == BPF_CMPXCHG &&
 874				    insn_is_zext(&insn[i + 1]))
 875					addrs[++i] = ctx->idx * 4;
 876			}
 877			break;
 878
 879		/*
 880		 * BPF_LDX
 881		 */
 882		/* dst = *(u8 *)(ul) (src + off) */
 883		case BPF_LDX | BPF_MEM | BPF_B:
 
 884		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
 
 885		/* dst = *(u16 *)(ul) (src + off) */
 886		case BPF_LDX | BPF_MEM | BPF_H:
 
 887		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
 
 888		/* dst = *(u32 *)(ul) (src + off) */
 889		case BPF_LDX | BPF_MEM | BPF_W:
 
 890		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
 
 891		/* dst = *(u64 *)(ul) (src + off) */
 892		case BPF_LDX | BPF_MEM | BPF_DW:
 893		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
 894			/*
 895			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
 896			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
 897			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
 898			 * set dst_reg=0 and move on.
 899			 */
 900			if (BPF_MODE(code) == BPF_PROBE_MEM) {
 901				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
 902				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
 903					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
 904				else /* BOOK3S_64 */
 905					PPC_LI64(tmp2_reg, PAGE_OFFSET);
 906				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
 907				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
 908				EMIT(PPC_RAW_LI(dst_reg, 0));
 909				/*
 910				 * Check if 'off' is word aligned for BPF_DW, because
 911				 * we might generate two instructions.
 912				 */
 913				if (BPF_SIZE(code) == BPF_DW && (off & 3))
 
 
 914					PPC_JMP((ctx->idx + 3) * 4);
 915				else
 916					PPC_JMP((ctx->idx + 2) * 4);
 917			}
 918
 919			switch (size) {
 920			case BPF_B:
 921				EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
 922				break;
 923			case BPF_H:
 924				EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
 925				break;
 926			case BPF_W:
 927				EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
 928				break;
 929			case BPF_DW:
 930				if (off % 4) {
 931					EMIT(PPC_RAW_LI(tmp1_reg, off));
 932					EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
 933				} else {
 934					EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935				}
 936				break;
 937			}
 938
 939			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
 940				addrs[++i] = ctx->idx * 4;
 941
 942			if (BPF_MODE(code) == BPF_PROBE_MEM) {
 943				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
 944							    ctx->idx - 1, 4, dst_reg);
 945				if (ret)
 946					return ret;
 947			}
 948			break;
 949
 950		/*
 951		 * Doubleword load
 952		 * 16 byte instruction that uses two 'struct bpf_insn'
 953		 */
 954		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 955			imm64 = ((u64)(u32) insn[i].imm) |
 956				    (((u64)(u32) insn[i+1].imm) << 32);
 957			tmp_idx = ctx->idx;
 958			PPC_LI64(dst_reg, imm64);
 959			/* padding to allow full 5 instructions for later patching */
 960			if (!image)
 961				for (j = ctx->idx - tmp_idx; j < 5; j++)
 962					EMIT(PPC_RAW_NOP());
 963			/* Adjust for two bpf instructions */
 964			addrs[++i] = ctx->idx * 4;
 965			break;
 966
 967		/*
 968		 * Return/Exit
 969		 */
 970		case BPF_JMP | BPF_EXIT:
 971			/*
 972			 * If this isn't the very last instruction, branch to
 973			 * the epilogue. If we _are_ the last instruction,
 974			 * we'll just fall through to the epilogue.
 975			 */
 976			if (i != flen - 1) {
 977				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
 978				if (ret)
 979					return ret;
 980			}
 981			/* else fall through to the epilogue */
 982			break;
 983
 984		/*
 985		 * Call kernel helper or bpf function
 986		 */
 987		case BPF_JMP | BPF_CALL:
 988			ctx->seen |= SEEN_FUNC;
 989
 990			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
 991						    &func_addr, &func_addr_fixed);
 992			if (ret < 0)
 993				return ret;
 994
 995			if (func_addr_fixed)
 996				ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
 997			else
 998				ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
 999
1000			if (ret)
1001				return ret;
1002
1003			/* move return value from r3 to BPF_REG_0 */
1004			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1005			break;
1006
1007		/*
1008		 * Jumps and branches
1009		 */
1010		case BPF_JMP | BPF_JA:
1011			PPC_JMP(addrs[i + 1 + off]);
 
 
 
1012			break;
1013
1014		case BPF_JMP | BPF_JGT | BPF_K:
1015		case BPF_JMP | BPF_JGT | BPF_X:
1016		case BPF_JMP | BPF_JSGT | BPF_K:
1017		case BPF_JMP | BPF_JSGT | BPF_X:
1018		case BPF_JMP32 | BPF_JGT | BPF_K:
1019		case BPF_JMP32 | BPF_JGT | BPF_X:
1020		case BPF_JMP32 | BPF_JSGT | BPF_K:
1021		case BPF_JMP32 | BPF_JSGT | BPF_X:
1022			true_cond = COND_GT;
1023			goto cond_branch;
1024		case BPF_JMP | BPF_JLT | BPF_K:
1025		case BPF_JMP | BPF_JLT | BPF_X:
1026		case BPF_JMP | BPF_JSLT | BPF_K:
1027		case BPF_JMP | BPF_JSLT | BPF_X:
1028		case BPF_JMP32 | BPF_JLT | BPF_K:
1029		case BPF_JMP32 | BPF_JLT | BPF_X:
1030		case BPF_JMP32 | BPF_JSLT | BPF_K:
1031		case BPF_JMP32 | BPF_JSLT | BPF_X:
1032			true_cond = COND_LT;
1033			goto cond_branch;
1034		case BPF_JMP | BPF_JGE | BPF_K:
1035		case BPF_JMP | BPF_JGE | BPF_X:
1036		case BPF_JMP | BPF_JSGE | BPF_K:
1037		case BPF_JMP | BPF_JSGE | BPF_X:
1038		case BPF_JMP32 | BPF_JGE | BPF_K:
1039		case BPF_JMP32 | BPF_JGE | BPF_X:
1040		case BPF_JMP32 | BPF_JSGE | BPF_K:
1041		case BPF_JMP32 | BPF_JSGE | BPF_X:
1042			true_cond = COND_GE;
1043			goto cond_branch;
1044		case BPF_JMP | BPF_JLE | BPF_K:
1045		case BPF_JMP | BPF_JLE | BPF_X:
1046		case BPF_JMP | BPF_JSLE | BPF_K:
1047		case BPF_JMP | BPF_JSLE | BPF_X:
1048		case BPF_JMP32 | BPF_JLE | BPF_K:
1049		case BPF_JMP32 | BPF_JLE | BPF_X:
1050		case BPF_JMP32 | BPF_JSLE | BPF_K:
1051		case BPF_JMP32 | BPF_JSLE | BPF_X:
1052			true_cond = COND_LE;
1053			goto cond_branch;
1054		case BPF_JMP | BPF_JEQ | BPF_K:
1055		case BPF_JMP | BPF_JEQ | BPF_X:
1056		case BPF_JMP32 | BPF_JEQ | BPF_K:
1057		case BPF_JMP32 | BPF_JEQ | BPF_X:
1058			true_cond = COND_EQ;
1059			goto cond_branch;
1060		case BPF_JMP | BPF_JNE | BPF_K:
1061		case BPF_JMP | BPF_JNE | BPF_X:
1062		case BPF_JMP32 | BPF_JNE | BPF_K:
1063		case BPF_JMP32 | BPF_JNE | BPF_X:
1064			true_cond = COND_NE;
1065			goto cond_branch;
1066		case BPF_JMP | BPF_JSET | BPF_K:
1067		case BPF_JMP | BPF_JSET | BPF_X:
1068		case BPF_JMP32 | BPF_JSET | BPF_K:
1069		case BPF_JMP32 | BPF_JSET | BPF_X:
1070			true_cond = COND_NE;
1071			/* Fall through */
1072
1073cond_branch:
1074			switch (code) {
1075			case BPF_JMP | BPF_JGT | BPF_X:
1076			case BPF_JMP | BPF_JLT | BPF_X:
1077			case BPF_JMP | BPF_JGE | BPF_X:
1078			case BPF_JMP | BPF_JLE | BPF_X:
1079			case BPF_JMP | BPF_JEQ | BPF_X:
1080			case BPF_JMP | BPF_JNE | BPF_X:
1081			case BPF_JMP32 | BPF_JGT | BPF_X:
1082			case BPF_JMP32 | BPF_JLT | BPF_X:
1083			case BPF_JMP32 | BPF_JGE | BPF_X:
1084			case BPF_JMP32 | BPF_JLE | BPF_X:
1085			case BPF_JMP32 | BPF_JEQ | BPF_X:
1086			case BPF_JMP32 | BPF_JNE | BPF_X:
1087				/* unsigned comparison */
1088				if (BPF_CLASS(code) == BPF_JMP32)
1089					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1090				else
1091					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1092				break;
1093			case BPF_JMP | BPF_JSGT | BPF_X:
1094			case BPF_JMP | BPF_JSLT | BPF_X:
1095			case BPF_JMP | BPF_JSGE | BPF_X:
1096			case BPF_JMP | BPF_JSLE | BPF_X:
1097			case BPF_JMP32 | BPF_JSGT | BPF_X:
1098			case BPF_JMP32 | BPF_JSLT | BPF_X:
1099			case BPF_JMP32 | BPF_JSGE | BPF_X:
1100			case BPF_JMP32 | BPF_JSLE | BPF_X:
1101				/* signed comparison */
1102				if (BPF_CLASS(code) == BPF_JMP32)
1103					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1104				else
1105					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1106				break;
1107			case BPF_JMP | BPF_JSET | BPF_X:
1108			case BPF_JMP32 | BPF_JSET | BPF_X:
1109				if (BPF_CLASS(code) == BPF_JMP) {
1110					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1111				} else {
1112					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1113					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1114				}
1115				break;
1116			case BPF_JMP | BPF_JNE | BPF_K:
1117			case BPF_JMP | BPF_JEQ | BPF_K:
1118			case BPF_JMP | BPF_JGT | BPF_K:
1119			case BPF_JMP | BPF_JLT | BPF_K:
1120			case BPF_JMP | BPF_JGE | BPF_K:
1121			case BPF_JMP | BPF_JLE | BPF_K:
1122			case BPF_JMP32 | BPF_JNE | BPF_K:
1123			case BPF_JMP32 | BPF_JEQ | BPF_K:
1124			case BPF_JMP32 | BPF_JGT | BPF_K:
1125			case BPF_JMP32 | BPF_JLT | BPF_K:
1126			case BPF_JMP32 | BPF_JGE | BPF_K:
1127			case BPF_JMP32 | BPF_JLE | BPF_K:
1128			{
1129				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1130
1131				/*
1132				 * Need sign-extended load, so only positive
1133				 * values can be used as imm in cmpldi
1134				 */
1135				if (imm >= 0 && imm < 32768) {
1136					if (is_jmp32)
1137						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1138					else
1139						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1140				} else {
1141					/* sign-extending load */
1142					PPC_LI32(tmp1_reg, imm);
1143					/* ... but unsigned comparison */
1144					if (is_jmp32)
1145						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1146					else
1147						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1148				}
1149				break;
1150			}
1151			case BPF_JMP | BPF_JSGT | BPF_K:
1152			case BPF_JMP | BPF_JSLT | BPF_K:
1153			case BPF_JMP | BPF_JSGE | BPF_K:
1154			case BPF_JMP | BPF_JSLE | BPF_K:
1155			case BPF_JMP32 | BPF_JSGT | BPF_K:
1156			case BPF_JMP32 | BPF_JSLT | BPF_K:
1157			case BPF_JMP32 | BPF_JSGE | BPF_K:
1158			case BPF_JMP32 | BPF_JSLE | BPF_K:
1159			{
1160				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1161
1162				/*
1163				 * signed comparison, so any 16-bit value
1164				 * can be used in cmpdi
1165				 */
1166				if (imm >= -32768 && imm < 32768) {
1167					if (is_jmp32)
1168						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1169					else
1170						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1171				} else {
1172					PPC_LI32(tmp1_reg, imm);
1173					if (is_jmp32)
1174						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1175					else
1176						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1177				}
1178				break;
1179			}
1180			case BPF_JMP | BPF_JSET | BPF_K:
1181			case BPF_JMP32 | BPF_JSET | BPF_K:
1182				/* andi does not sign-extend the immediate */
1183				if (imm >= 0 && imm < 32768)
1184					/* PPC_ANDI is _only/always_ dot-form */
1185					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1186				else {
1187					PPC_LI32(tmp1_reg, imm);
1188					if (BPF_CLASS(code) == BPF_JMP) {
1189						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1190								     tmp1_reg));
1191					} else {
1192						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1193						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1194									0, 0, 31));
1195					}
1196				}
1197				break;
1198			}
1199			PPC_BCC(true_cond, addrs[i + 1 + off]);
1200			break;
1201
1202		/*
1203		 * Tail call
1204		 */
1205		case BPF_JMP | BPF_TAIL_CALL:
1206			ctx->seen |= SEEN_TAILCALL;
1207			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1208			if (ret < 0)
1209				return ret;
1210			break;
1211
1212		default:
1213			/*
1214			 * The filter contains something cruel & unusual.
1215			 * We don't handle it, but also there shouldn't be
1216			 * anything missing from our list.
1217			 */
1218			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1219					code, i);
1220			return -ENOTSUPP;
1221		}
1222	}
1223
1224	/* Set end-of-body-code address for exit. */
1225	addrs[i] = ctx->idx * 4;
1226
1227	return 0;
1228}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * bpf_jit_comp64.c: eBPF JIT compiler
   4 *
   5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
   6 *		  IBM Corporation
   7 *
   8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
   9 */
  10#include <linux/moduleloader.h>
  11#include <asm/cacheflush.h>
  12#include <asm/asm-compat.h>
  13#include <linux/netdevice.h>
  14#include <linux/filter.h>
  15#include <linux/if_vlan.h>
  16#include <asm/kprobes.h>
  17#include <linux/bpf.h>
  18#include <asm/security_features.h>
  19
  20#include "bpf_jit.h"
  21
  22/*
  23 * Stack layout:
  24 * Ensure the top half (upto local_tmp_var) stays consistent
  25 * with our redzone usage.
  26 *
  27 *		[	prev sp		] <-------------
  28 *		[   nv gpr save area	] 5*8		|
  29 *		[    tail_call_cnt	] 8		|
  30 *		[    local_tmp_var	] 16		|
  31 * fp (r31) -->	[   ebpf stack space	] upto 512	|
  32 *		[     frame header	] 32/112	|
  33 * sp (r1) --->	[    stack pointer	] --------------
  34 */
  35
  36/* for gpr non volatile registers BPG_REG_6 to 10 */
  37#define BPF_PPC_STACK_SAVE	(5*8)
  38/* for bpf JIT code internal usage */
  39#define BPF_PPC_STACK_LOCALS	24
  40/* stack frame excluding BPF stack, ensure this is quadword aligned */
  41#define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
  42				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
  43
  44/* BPF register usage */
  45#define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
  46#define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
  47
  48/* BPF to ppc register mappings */
  49void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
  50{
  51	/* function return value */
  52	ctx->b2p[BPF_REG_0] = _R8;
  53	/* function arguments */
  54	ctx->b2p[BPF_REG_1] = _R3;
  55	ctx->b2p[BPF_REG_2] = _R4;
  56	ctx->b2p[BPF_REG_3] = _R5;
  57	ctx->b2p[BPF_REG_4] = _R6;
  58	ctx->b2p[BPF_REG_5] = _R7;
  59	/* non volatile registers */
  60	ctx->b2p[BPF_REG_6] = _R27;
  61	ctx->b2p[BPF_REG_7] = _R28;
  62	ctx->b2p[BPF_REG_8] = _R29;
  63	ctx->b2p[BPF_REG_9] = _R30;
  64	/* frame pointer aka BPF_REG_10 */
  65	ctx->b2p[BPF_REG_FP] = _R31;
  66	/* eBPF jit internal registers */
  67	ctx->b2p[BPF_REG_AX] = _R12;
  68	ctx->b2p[TMP_REG_1] = _R9;
  69	ctx->b2p[TMP_REG_2] = _R10;
  70}
  71
  72/* PPC NVR range -- update this if we ever use NVRs below r27 */
  73#define BPF_PPC_NVR_MIN		_R27
  74
  75static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
  76{
  77	/*
  78	 * We only need a stack frame if:
  79	 * - we call other functions (kernel helpers), or
  80	 * - the bpf program uses its stack area
  81	 * The latter condition is deduced from the usage of BPF_REG_FP
  82	 */
  83	return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
  84}
  85
  86/*
  87 * When not setting up our own stackframe, the redzone (288 bytes) usage is:
  88 *
  89 *		[	prev sp		] <-------------
  90 *		[	  ...       	] 		|
  91 * sp (r1) --->	[    stack pointer	] --------------
  92 *		[   nv gpr save area	] 5*8
  93 *		[    tail_call_cnt	] 8
  94 *		[    local_tmp_var	] 16
  95 *		[   unused red zone	] 224
  96 */
  97static int bpf_jit_stack_local(struct codegen_context *ctx)
  98{
  99	if (bpf_has_stack_frame(ctx))
 100		return STACK_FRAME_MIN_SIZE + ctx->stack_size;
 101	else
 102		return -(BPF_PPC_STACK_SAVE + 24);
 103}
 104
 105static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
 106{
 107	return bpf_jit_stack_local(ctx) + 16;
 108}
 109
 110static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
 111{
 112	if (reg >= BPF_PPC_NVR_MIN && reg < 32)
 113		return (bpf_has_stack_frame(ctx) ?
 114			(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
 115				- (8 * (32 - reg));
 116
 117	pr_err("BPF JIT is asking about unknown registers");
 118	BUG();
 119}
 120
 121void bpf_jit_realloc_regs(struct codegen_context *ctx)
 122{
 123}
 124
 125void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
 126{
 127	int i;
 128
 129	/* Instruction for trampoline attach */
 130	EMIT(PPC_RAW_NOP());
 131
 132#ifndef CONFIG_PPC_KERNEL_PCREL
 133	if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 134		EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
 135#endif
 136
 137	/*
 138	 * Initialize tail_call_cnt if we do tail calls.
 139	 * Otherwise, put in NOPs so that it can be skipped when we are
 140	 * invoked through a tail call.
 141	 */
 142	if (ctx->seen & SEEN_TAILCALL) {
 143		EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
 144		/* this goes in the redzone */
 145		EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
 146	} else {
 147		EMIT(PPC_RAW_NOP());
 148		EMIT(PPC_RAW_NOP());
 149	}
 150
 151	if (bpf_has_stack_frame(ctx)) {
 152		/*
 153		 * We need a stack frame, but we don't necessarily need to
 154		 * save/restore LR unless we call other functions
 155		 */
 156		if (ctx->seen & SEEN_FUNC) {
 157			EMIT(PPC_RAW_MFLR(_R0));
 158			EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
 159		}
 160
 161		EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
 162	}
 163
 164	/*
 165	 * Back up non-volatile regs -- BPF registers 6-10
 166	 * If we haven't created our own stack frame, we save these
 167	 * in the protected zone below the previous stack frame
 168	 */
 169	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 170		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 171			EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 172
 173	/* Setup frame pointer to point to the bpf stack area */
 174	if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
 175		EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
 176				STACK_FRAME_MIN_SIZE + ctx->stack_size));
 177}
 178
 179static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
 180{
 181	int i;
 182
 183	/* Restore NVRs */
 184	for (i = BPF_REG_6; i <= BPF_REG_10; i++)
 185		if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
 186			EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
 187
 188	/* Tear down our stack frame */
 189	if (bpf_has_stack_frame(ctx)) {
 190		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
 191		if (ctx->seen & SEEN_FUNC) {
 192			EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
 193			EMIT(PPC_RAW_MTLR(_R0));
 194		}
 195	}
 196}
 197
 198void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 199{
 200	bpf_jit_emit_common_epilogue(image, ctx);
 201
 202	/* Move result to r3 */
 203	EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
 204
 205	EMIT(PPC_RAW_BLR());
 206
 207	bpf_jit_build_fentry_stubs(image, ctx);
 208}
 209
 210int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
 211{
 212	unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
 213	long reladdr;
 214
 215	/* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
 216	if (!func) {
 217		for (int i = 0; i < 5; i++)
 218			EMIT(PPC_RAW_NOP());
 219		/* elfv1 needs an additional instruction to load addr from descriptor */
 220		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
 221			EMIT(PPC_RAW_NOP());
 
 
 
 
 
 
 
 222		EMIT(PPC_RAW_MTCTR(_R12));
 223		EMIT(PPC_RAW_BCTRL());
 224		return 0;
 225	}
 226
 227#ifdef CONFIG_PPC_KERNEL_PCREL
 228	reladdr = func_addr - local_paca->kernelbase;
 229
 230	if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
 231		EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
 232		/* Align for subsequent prefix instruction */
 233		if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
 234			EMIT(PPC_RAW_NOP());
 235		/* paddi r12,r12,addr */
 236		EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
 237		EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
 238	} else {
 239		unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
 240		bool alignment_needed = !IS_ALIGNED(pc, 8);
 241
 242		reladdr = func_addr - (alignment_needed ? pc + 4 :  pc);
 243
 244		if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
 245			if (alignment_needed)
 246				EMIT(PPC_RAW_NOP());
 247			/* pla r12,addr */
 248			EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
 249			EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
 250		} else {
 251			/* We can clobber r12 */
 252			PPC_LI64(_R12, func);
 253		}
 254	}
 255	EMIT(PPC_RAW_MTCTR(_R12));
 256	EMIT(PPC_RAW_BCTRL());
 257#else
 258	if (core_kernel_text(func_addr)) {
 259		reladdr = func_addr - kernel_toc_addr();
 260		if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
 261			pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
 262			return -ERANGE;
 263		}
 264
 265		EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
 266		EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
 267		EMIT(PPC_RAW_MTCTR(_R12));
 268		EMIT(PPC_RAW_BCTRL());
 269	} else {
 270		if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
 271			/* func points to the function descriptor */
 272			PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
 273			/* Load actual entry point from function descriptor */
 274			EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
 275			/* ... and move it to CTR */
 276			EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
 277			/*
 278			 * Load TOC from function descriptor at offset 8.
 279			 * We can clobber r2 since we get called through a
 280			 * function pointer (so caller will save/restore r2).
 281			 */
 282			if (is_module_text_address(func_addr))
 283				EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
 284		} else {
 285			PPC_LI64(_R12, func);
 286			EMIT(PPC_RAW_MTCTR(_R12));
 287		}
 288		EMIT(PPC_RAW_BCTRL());
 289		/*
 290		 * Load r2 with kernel TOC as kernel TOC is used if function address falls
 291		 * within core kernel text.
 292		 */
 293		if (is_module_text_address(func_addr))
 294			EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
 295	}
 296#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297
 298	return 0;
 299}
 300
 301static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
 302{
 303	/*
 304	 * By now, the eBPF program has already setup parameters in r3, r4 and r5
 305	 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
 306	 * r4/BPF_REG_2 - pointer to bpf_array
 307	 * r5/BPF_REG_3 - index in bpf_array
 308	 */
 309	int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
 310	int b2p_index = bpf_to_ppc(BPF_REG_3);
 311	int bpf_tailcall_prologue_size = 12;
 312
 313	if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
 314		bpf_tailcall_prologue_size += 4; /* skip past the toc load */
 315
 316	/*
 317	 * if (index >= array->map.max_entries)
 318	 *   goto out;
 319	 */
 320	EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
 321	EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
 322	EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
 323	PPC_BCC_SHORT(COND_GE, out);
 324
 325	/*
 326	 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
 327	 *   goto out;
 328	 */
 329	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 330	EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
 331	PPC_BCC_SHORT(COND_GE, out);
 332
 333	/*
 334	 * tail_call_cnt++;
 335	 */
 336	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
 337	EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
 338
 339	/* prog = array->ptrs[index]; */
 340	EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
 341	EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
 342	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
 343
 344	/*
 345	 * if (prog == NULL)
 346	 *   goto out;
 347	 */
 348	EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
 349	PPC_BCC_SHORT(COND_EQ, out);
 350
 351	/* goto *(prog->bpf_func + prologue_size); */
 352	EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
 353	EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
 354			FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
 355	EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
 356
 357	/* tear down stack, restore NVRs, ... */
 358	bpf_jit_emit_common_epilogue(image, ctx);
 359
 360	EMIT(PPC_RAW_BCTR());
 361
 362	/* out: */
 363	return 0;
 364}
 365
 366/*
 367 * We spill into the redzone always, even if the bpf program has its own stackframe.
 368 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
 369 */
 370void bpf_stf_barrier(void);
 371
 372asm (
 373"		.global bpf_stf_barrier		;"
 374"	bpf_stf_barrier:			;"
 375"		std	21,-64(1)		;"
 376"		std	22,-56(1)		;"
 377"		sync				;"
 378"		ld	21,-64(1)		;"
 379"		ld	22,-56(1)		;"
 380"		ori	31,31,0			;"
 381"		.rept 14			;"
 382"		b	1f			;"
 383"	1:					;"
 384"		.endr				;"
 385"		blr				;"
 386);
 387
 388/* Assemble the body code between the prologue & epilogue */
 389int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
 390		       u32 *addrs, int pass, bool extra_pass)
 391{
 392	enum stf_barrier_type stf_barrier = stf_barrier_type_get();
 393	const struct bpf_insn *insn = fp->insnsi;
 394	int flen = fp->len;
 395	int i, ret;
 396
 397	/* Start of epilogue code - will only be valid 2nd pass onwards */
 398	u32 exit_addr = addrs[flen];
 399
 400	for (i = 0; i < flen; i++) {
 401		u32 code = insn[i].code;
 402		u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
 403		u32 src_reg = bpf_to_ppc(insn[i].src_reg);
 404		u32 size = BPF_SIZE(code);
 405		u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
 406		u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
 407		u32 save_reg, ret_reg;
 408		s16 off = insn[i].off;
 409		s32 imm = insn[i].imm;
 410		bool func_addr_fixed;
 411		u64 func_addr;
 412		u64 imm64;
 413		u32 true_cond;
 414		u32 tmp_idx;
 415		int j;
 416
 417		/*
 418		 * addrs[] maps a BPF bytecode address into a real offset from
 419		 * the start of the body code.
 420		 */
 421		addrs[i] = ctx->idx * 4;
 422
 423		/*
 424		 * As an optimization, we note down which non-volatile registers
 425		 * are used so that we can only save/restore those in our
 426		 * prologue and epilogue. We do this here regardless of whether
 427		 * the actual BPF instruction uses src/dst registers or not
 428		 * (for instance, BPF_CALL does not use them). The expectation
 429		 * is that those instructions will have src_reg/dst_reg set to
 430		 * 0. Even otherwise, we just lose some prologue/epilogue
 431		 * optimization but everything else should work without
 432		 * any issues.
 433		 */
 434		if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
 435			bpf_set_seen_register(ctx, dst_reg);
 436		if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
 437			bpf_set_seen_register(ctx, src_reg);
 438
 439		switch (code) {
 440		/*
 441		 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
 442		 */
 443		case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
 444		case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
 445			EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
 446			goto bpf_alu32_trunc;
 447		case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
 448		case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
 449			EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
 450			goto bpf_alu32_trunc;
 451		case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
 452		case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
 453			if (!imm) {
 454				goto bpf_alu32_trunc;
 455			} else if (imm >= -32768 && imm < 32768) {
 456				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
 457			} else {
 458				PPC_LI32(tmp1_reg, imm);
 459				EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
 460			}
 461			goto bpf_alu32_trunc;
 462		case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
 463		case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
 464			if (!imm) {
 465				goto bpf_alu32_trunc;
 466			} else if (imm > -32768 && imm <= 32768) {
 467				EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
 468			} else {
 469				PPC_LI32(tmp1_reg, imm);
 470				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 471			}
 472			goto bpf_alu32_trunc;
 473		case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
 474		case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
 475			if (BPF_CLASS(code) == BPF_ALU)
 476				EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
 477			else
 478				EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
 479			goto bpf_alu32_trunc;
 480		case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
 481		case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
 482			if (imm >= -32768 && imm < 32768)
 483				EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
 484			else {
 485				PPC_LI32(tmp1_reg, imm);
 486				if (BPF_CLASS(code) == BPF_ALU)
 487					EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
 488				else
 489					EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
 490			}
 491			goto bpf_alu32_trunc;
 492		case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
 493		case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
 494			if (BPF_OP(code) == BPF_MOD) {
 495				if (off)
 496					EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
 497				else
 498					EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
 499
 500				EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
 501				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 502			} else
 503				if (off)
 504					EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
 505				else
 506					EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
 507			goto bpf_alu32_trunc;
 508		case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
 509		case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
 510			if (BPF_OP(code) == BPF_MOD) {
 511				if (off)
 512					EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
 513				else
 514					EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
 515				EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
 516				EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 517			} else
 518				if (off)
 519					EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
 520				else
 521					EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
 522			break;
 523		case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
 524		case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
 525		case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
 526		case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
 527			if (imm == 0)
 528				return -EINVAL;
 529			if (imm == 1) {
 530				if (BPF_OP(code) == BPF_DIV) {
 531					goto bpf_alu32_trunc;
 532				} else {
 533					EMIT(PPC_RAW_LI(dst_reg, 0));
 534					break;
 535				}
 536			}
 537
 538			PPC_LI32(tmp1_reg, imm);
 539			switch (BPF_CLASS(code)) {
 540			case BPF_ALU:
 541				if (BPF_OP(code) == BPF_MOD) {
 542					if (off)
 543						EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
 544					else
 545						EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
 546					EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
 547					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 548				} else
 549					if (off)
 550						EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
 551					else
 552						EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
 553				break;
 554			case BPF_ALU64:
 555				if (BPF_OP(code) == BPF_MOD) {
 556					if (off)
 557						EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
 558					else
 559						EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
 560					EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
 561					EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
 562				} else
 563					if (off)
 564						EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
 565					else
 566						EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
 567				break;
 568			}
 569			goto bpf_alu32_trunc;
 570		case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
 571		case BPF_ALU64 | BPF_NEG: /* dst = -dst */
 572			EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
 573			goto bpf_alu32_trunc;
 574
 575		/*
 576		 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
 577		 */
 578		case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
 579		case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 580			EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
 581			goto bpf_alu32_trunc;
 582		case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
 583		case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 584			if (!IMM_H(imm))
 585				EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
 586			else {
 587				/* Sign-extended */
 588				PPC_LI32(tmp1_reg, imm);
 589				EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
 590			}
 591			goto bpf_alu32_trunc;
 592		case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 593		case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 594			EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
 595			goto bpf_alu32_trunc;
 596		case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
 597		case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
 598			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 599				/* Sign-extended */
 600				PPC_LI32(tmp1_reg, imm);
 601				EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
 602			} else {
 603				if (IMM_L(imm))
 604					EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
 605				if (IMM_H(imm))
 606					EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
 607			}
 608			goto bpf_alu32_trunc;
 609		case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
 610		case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
 611			EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
 612			goto bpf_alu32_trunc;
 613		case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
 614		case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
 615			if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
 616				/* Sign-extended */
 617				PPC_LI32(tmp1_reg, imm);
 618				EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
 619			} else {
 620				if (IMM_L(imm))
 621					EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
 622				if (IMM_H(imm))
 623					EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
 624			}
 625			goto bpf_alu32_trunc;
 626		case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
 627			/* slw clears top 32 bits */
 628			EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
 629			/* skip zero extension move, but set address map. */
 630			if (insn_is_zext(&insn[i + 1]))
 631				addrs[++i] = ctx->idx * 4;
 632			break;
 633		case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
 634			EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
 635			break;
 636		case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
 637			/* with imm 0, we still need to clear top 32 bits */
 638			EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
 639			if (insn_is_zext(&insn[i + 1]))
 640				addrs[++i] = ctx->idx * 4;
 641			break;
 642		case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
 643			if (imm != 0)
 644				EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
 645			break;
 646		case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
 647			EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
 648			if (insn_is_zext(&insn[i + 1]))
 649				addrs[++i] = ctx->idx * 4;
 650			break;
 651		case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
 652			EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
 653			break;
 654		case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
 655			EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
 656			if (insn_is_zext(&insn[i + 1]))
 657				addrs[++i] = ctx->idx * 4;
 658			break;
 659		case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
 660			if (imm != 0)
 661				EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
 662			break;
 663		case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
 664			EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
 665			goto bpf_alu32_trunc;
 666		case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
 667			EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
 668			break;
 669		case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
 670			EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
 671			goto bpf_alu32_trunc;
 672		case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
 673			if (imm != 0)
 674				EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
 675			break;
 676
 677		/*
 678		 * MOV
 679		 */
 680		case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
 681		case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 682			if (imm == 1) {
 683				/* special mov32 for zext */
 684				EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 685				break;
 686			} else if (off == 8) {
 687				EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
 688			} else if (off == 16) {
 689				EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
 690			} else if (off == 32) {
 691				EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
 692			} else if (dst_reg != src_reg)
 693				EMIT(PPC_RAW_MR(dst_reg, src_reg));
 694			goto bpf_alu32_trunc;
 695		case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
 696		case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
 697			PPC_LI32(dst_reg, imm);
 698			if (imm < 0)
 699				goto bpf_alu32_trunc;
 700			else if (insn_is_zext(&insn[i + 1]))
 701				addrs[++i] = ctx->idx * 4;
 702			break;
 703
 704bpf_alu32_trunc:
 705		/* Truncate to 32-bits */
 706		if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
 707			EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
 708		break;
 709
 710		/*
 711		 * BPF_FROM_BE/LE
 712		 */
 713		case BPF_ALU | BPF_END | BPF_FROM_LE:
 714		case BPF_ALU | BPF_END | BPF_FROM_BE:
 715		case BPF_ALU64 | BPF_END | BPF_FROM_LE:
 716#ifdef __BIG_ENDIAN__
 717			if (BPF_SRC(code) == BPF_FROM_BE)
 718				goto emit_clear;
 719#else /* !__BIG_ENDIAN__ */
 720			if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
 721				goto emit_clear;
 722#endif
 723			switch (imm) {
 724			case 16:
 725				/* Rotate 8 bits left & mask with 0x0000ff00 */
 726				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
 727				/* Rotate 8 bits right & insert LSB to reg */
 728				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
 729				/* Move result back to dst_reg */
 730				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 731				break;
 732			case 32:
 733				/*
 734				 * Rotate word left by 8 bits:
 735				 * 2 bytes are already in their final position
 736				 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
 737				 */
 738				EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
 739				/* Rotate 24 bits and insert byte 1 */
 740				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
 741				/* Rotate 24 bits and insert byte 3 */
 742				EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
 743				EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
 744				break;
 745			case 64:
 746				/* Store the value to stack and then use byte-reverse loads */
 747				EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
 748				EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
 749				if (cpu_has_feature(CPU_FTR_ARCH_206)) {
 750					EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
 751				} else {
 752					EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
 753					if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
 754						EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
 755					EMIT(PPC_RAW_LI(tmp2_reg, 4));
 756					EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
 757					if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 758						EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
 759					EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
 760				}
 761				break;
 762			}
 763			break;
 764
 765emit_clear:
 766			switch (imm) {
 767			case 16:
 768				/* zero-extend 16 bits into 64 bits */
 769				EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
 770				if (insn_is_zext(&insn[i + 1]))
 771					addrs[++i] = ctx->idx * 4;
 772				break;
 773			case 32:
 774				if (!fp->aux->verifier_zext)
 775					/* zero-extend 32 bits into 64 bits */
 776					EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
 777				break;
 778			case 64:
 779				/* nop */
 780				break;
 781			}
 782			break;
 783
 784		/*
 785		 * BPF_ST NOSPEC (speculation barrier)
 786		 */
 787		case BPF_ST | BPF_NOSPEC:
 788			if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
 789					!security_ftr_enabled(SEC_FTR_STF_BARRIER))
 790				break;
 791
 792			switch (stf_barrier) {
 793			case STF_BARRIER_EIEIO:
 794				EMIT(PPC_RAW_EIEIO() | 0x02000000);
 795				break;
 796			case STF_BARRIER_SYNC_ORI:
 797				EMIT(PPC_RAW_SYNC());
 798				EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
 799				EMIT(PPC_RAW_ORI(_R31, _R31, 0));
 800				break;
 801			case STF_BARRIER_FALLBACK:
 802				ctx->seen |= SEEN_FUNC;
 803				PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
 804				EMIT(PPC_RAW_MTCTR(_R12));
 805				EMIT(PPC_RAW_BCTRL());
 806				break;
 807			case STF_BARRIER_NONE:
 808				break;
 809			}
 810			break;
 811
 812		/*
 813		 * BPF_ST(X)
 814		 */
 815		case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
 816		case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
 817			if (BPF_CLASS(code) == BPF_ST) {
 818				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 819				src_reg = tmp1_reg;
 820			}
 821			EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
 822			break;
 823		case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
 824		case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
 825			if (BPF_CLASS(code) == BPF_ST) {
 826				EMIT(PPC_RAW_LI(tmp1_reg, imm));
 827				src_reg = tmp1_reg;
 828			}
 829			EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
 830			break;
 831		case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
 832		case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
 833			if (BPF_CLASS(code) == BPF_ST) {
 834				PPC_LI32(tmp1_reg, imm);
 835				src_reg = tmp1_reg;
 836			}
 837			EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
 838			break;
 839		case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
 840		case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
 841			if (BPF_CLASS(code) == BPF_ST) {
 842				PPC_LI32(tmp1_reg, imm);
 843				src_reg = tmp1_reg;
 844			}
 845			if (off % 4) {
 846				EMIT(PPC_RAW_LI(tmp2_reg, off));
 847				EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
 848			} else {
 849				EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
 850			}
 851			break;
 852
 853		/*
 854		 * BPF_STX ATOMIC (atomic ops)
 855		 */
 856		case BPF_STX | BPF_ATOMIC | BPF_W:
 857		case BPF_STX | BPF_ATOMIC | BPF_DW:
 858			save_reg = tmp2_reg;
 859			ret_reg = src_reg;
 860
 861			/* Get offset into TMP_REG_1 */
 862			EMIT(PPC_RAW_LI(tmp1_reg, off));
 863			/*
 864			 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
 865			 * before and after the operation.
 866			 *
 867			 * This is a requirement in the Linux Kernel Memory Model.
 868			 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
 869			 */
 870			if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
 871				EMIT(PPC_RAW_SYNC());
 872			tmp_idx = ctx->idx * 4;
 873			/* load value from memory into TMP_REG_2 */
 874			if (size == BPF_DW)
 875				EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 876			else
 877				EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
 878
 879			/* Save old value in _R0 */
 880			if (imm & BPF_FETCH)
 881				EMIT(PPC_RAW_MR(_R0, tmp2_reg));
 882
 883			switch (imm) {
 884			case BPF_ADD:
 885			case BPF_ADD | BPF_FETCH:
 886				EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
 887				break;
 888			case BPF_AND:
 889			case BPF_AND | BPF_FETCH:
 890				EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
 891				break;
 892			case BPF_OR:
 893			case BPF_OR | BPF_FETCH:
 894				EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
 895				break;
 896			case BPF_XOR:
 897			case BPF_XOR | BPF_FETCH:
 898				EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
 899				break;
 900			case BPF_CMPXCHG:
 901				/*
 902				 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
 903				 * in src_reg for other cases.
 904				 */
 905				ret_reg = bpf_to_ppc(BPF_REG_0);
 906
 907				/* Compare with old value in BPF_R0 */
 908				if (size == BPF_DW)
 909					EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 910				else
 911					EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
 912				/* Don't set if different from old value */
 913				PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
 914				fallthrough;
 915			case BPF_XCHG:
 916				save_reg = src_reg;
 917				break;
 918			default:
 919				pr_err_ratelimited(
 920					"eBPF filter atomic op code %02x (@%d) unsupported\n",
 921					code, i);
 922				return -EOPNOTSUPP;
 923			}
 924
 925			/* store new value */
 926			if (size == BPF_DW)
 927				EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
 928			else
 929				EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
 930			/* we're done if this succeeded */
 931			PPC_BCC_SHORT(COND_NE, tmp_idx);
 932
 933			if (imm & BPF_FETCH) {
 934				/* Emit 'sync' to enforce full ordering */
 935				if (IS_ENABLED(CONFIG_SMP))
 936					EMIT(PPC_RAW_SYNC());
 937				EMIT(PPC_RAW_MR(ret_reg, _R0));
 938				/*
 939				 * Skip unnecessary zero-extension for 32-bit cmpxchg.
 940				 * For context, see commit 39491867ace5.
 941				 */
 942				if (size != BPF_DW && imm == BPF_CMPXCHG &&
 943				    insn_is_zext(&insn[i + 1]))
 944					addrs[++i] = ctx->idx * 4;
 945			}
 946			break;
 947
 948		/*
 949		 * BPF_LDX
 950		 */
 951		/* dst = *(u8 *)(ul) (src + off) */
 952		case BPF_LDX | BPF_MEM | BPF_B:
 953		case BPF_LDX | BPF_MEMSX | BPF_B:
 954		case BPF_LDX | BPF_PROBE_MEM | BPF_B:
 955		case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
 956		/* dst = *(u16 *)(ul) (src + off) */
 957		case BPF_LDX | BPF_MEM | BPF_H:
 958		case BPF_LDX | BPF_MEMSX | BPF_H:
 959		case BPF_LDX | BPF_PROBE_MEM | BPF_H:
 960		case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
 961		/* dst = *(u32 *)(ul) (src + off) */
 962		case BPF_LDX | BPF_MEM | BPF_W:
 963		case BPF_LDX | BPF_MEMSX | BPF_W:
 964		case BPF_LDX | BPF_PROBE_MEM | BPF_W:
 965		case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
 966		/* dst = *(u64 *)(ul) (src + off) */
 967		case BPF_LDX | BPF_MEM | BPF_DW:
 968		case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
 969			/*
 970			 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
 971			 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
 972			 * load only if addr is kernel address (see is_kernel_addr()), otherwise
 973			 * set dst_reg=0 and move on.
 974			 */
 975			if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
 976				EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
 977				if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
 978					PPC_LI64(tmp2_reg, 0x8000000000000000ul);
 979				else /* BOOK3S_64 */
 980					PPC_LI64(tmp2_reg, PAGE_OFFSET);
 981				EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
 982				PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
 983				EMIT(PPC_RAW_LI(dst_reg, 0));
 984				/*
 985				 * Check if 'off' is word aligned for BPF_DW, because
 986				 * we might generate two instructions.
 987				 */
 988				if ((BPF_SIZE(code) == BPF_DW ||
 989				    (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
 990						(off & 3))
 991					PPC_JMP((ctx->idx + 3) * 4);
 992				else
 993					PPC_JMP((ctx->idx + 2) * 4);
 994			}
 995
 996			if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
 997				switch (size) {
 998				case BPF_B:
 999					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1000					EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1001					break;
1002				case BPF_H:
1003					EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1004					break;
1005				case BPF_W:
1006					EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1007					break;
1008				}
1009			} else {
1010				switch (size) {
1011				case BPF_B:
1012					EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1013					break;
1014				case BPF_H:
1015					EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1016					break;
1017				case BPF_W:
1018					EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1019					break;
1020				case BPF_DW:
1021					if (off % 4) {
1022						EMIT(PPC_RAW_LI(tmp1_reg, off));
1023						EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1024					} else {
1025						EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1026					}
1027					break;
1028				}
 
1029			}
1030
1031			if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1032				addrs[++i] = ctx->idx * 4;
1033
1034			if (BPF_MODE(code) == BPF_PROBE_MEM) {
1035				ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1036							    ctx->idx - 1, 4, dst_reg);
1037				if (ret)
1038					return ret;
1039			}
1040			break;
1041
1042		/*
1043		 * Doubleword load
1044		 * 16 byte instruction that uses two 'struct bpf_insn'
1045		 */
1046		case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1047			imm64 = ((u64)(u32) insn[i].imm) |
1048				    (((u64)(u32) insn[i+1].imm) << 32);
1049			tmp_idx = ctx->idx;
1050			PPC_LI64(dst_reg, imm64);
1051			/* padding to allow full 5 instructions for later patching */
1052			if (!image)
1053				for (j = ctx->idx - tmp_idx; j < 5; j++)
1054					EMIT(PPC_RAW_NOP());
1055			/* Adjust for two bpf instructions */
1056			addrs[++i] = ctx->idx * 4;
1057			break;
1058
1059		/*
1060		 * Return/Exit
1061		 */
1062		case BPF_JMP | BPF_EXIT:
1063			/*
1064			 * If this isn't the very last instruction, branch to
1065			 * the epilogue. If we _are_ the last instruction,
1066			 * we'll just fall through to the epilogue.
1067			 */
1068			if (i != flen - 1) {
1069				ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1070				if (ret)
1071					return ret;
1072			}
1073			/* else fall through to the epilogue */
1074			break;
1075
1076		/*
1077		 * Call kernel helper or bpf function
1078		 */
1079		case BPF_JMP | BPF_CALL:
1080			ctx->seen |= SEEN_FUNC;
1081
1082			ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1083						    &func_addr, &func_addr_fixed);
1084			if (ret < 0)
1085				return ret;
1086
1087			ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
 
 
 
 
1088			if (ret)
1089				return ret;
1090
1091			/* move return value from r3 to BPF_REG_0 */
1092			EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1093			break;
1094
1095		/*
1096		 * Jumps and branches
1097		 */
1098		case BPF_JMP | BPF_JA:
1099			PPC_JMP(addrs[i + 1 + off]);
1100			break;
1101		case BPF_JMP32 | BPF_JA:
1102			PPC_JMP(addrs[i + 1 + imm]);
1103			break;
1104
1105		case BPF_JMP | BPF_JGT | BPF_K:
1106		case BPF_JMP | BPF_JGT | BPF_X:
1107		case BPF_JMP | BPF_JSGT | BPF_K:
1108		case BPF_JMP | BPF_JSGT | BPF_X:
1109		case BPF_JMP32 | BPF_JGT | BPF_K:
1110		case BPF_JMP32 | BPF_JGT | BPF_X:
1111		case BPF_JMP32 | BPF_JSGT | BPF_K:
1112		case BPF_JMP32 | BPF_JSGT | BPF_X:
1113			true_cond = COND_GT;
1114			goto cond_branch;
1115		case BPF_JMP | BPF_JLT | BPF_K:
1116		case BPF_JMP | BPF_JLT | BPF_X:
1117		case BPF_JMP | BPF_JSLT | BPF_K:
1118		case BPF_JMP | BPF_JSLT | BPF_X:
1119		case BPF_JMP32 | BPF_JLT | BPF_K:
1120		case BPF_JMP32 | BPF_JLT | BPF_X:
1121		case BPF_JMP32 | BPF_JSLT | BPF_K:
1122		case BPF_JMP32 | BPF_JSLT | BPF_X:
1123			true_cond = COND_LT;
1124			goto cond_branch;
1125		case BPF_JMP | BPF_JGE | BPF_K:
1126		case BPF_JMP | BPF_JGE | BPF_X:
1127		case BPF_JMP | BPF_JSGE | BPF_K:
1128		case BPF_JMP | BPF_JSGE | BPF_X:
1129		case BPF_JMP32 | BPF_JGE | BPF_K:
1130		case BPF_JMP32 | BPF_JGE | BPF_X:
1131		case BPF_JMP32 | BPF_JSGE | BPF_K:
1132		case BPF_JMP32 | BPF_JSGE | BPF_X:
1133			true_cond = COND_GE;
1134			goto cond_branch;
1135		case BPF_JMP | BPF_JLE | BPF_K:
1136		case BPF_JMP | BPF_JLE | BPF_X:
1137		case BPF_JMP | BPF_JSLE | BPF_K:
1138		case BPF_JMP | BPF_JSLE | BPF_X:
1139		case BPF_JMP32 | BPF_JLE | BPF_K:
1140		case BPF_JMP32 | BPF_JLE | BPF_X:
1141		case BPF_JMP32 | BPF_JSLE | BPF_K:
1142		case BPF_JMP32 | BPF_JSLE | BPF_X:
1143			true_cond = COND_LE;
1144			goto cond_branch;
1145		case BPF_JMP | BPF_JEQ | BPF_K:
1146		case BPF_JMP | BPF_JEQ | BPF_X:
1147		case BPF_JMP32 | BPF_JEQ | BPF_K:
1148		case BPF_JMP32 | BPF_JEQ | BPF_X:
1149			true_cond = COND_EQ;
1150			goto cond_branch;
1151		case BPF_JMP | BPF_JNE | BPF_K:
1152		case BPF_JMP | BPF_JNE | BPF_X:
1153		case BPF_JMP32 | BPF_JNE | BPF_K:
1154		case BPF_JMP32 | BPF_JNE | BPF_X:
1155			true_cond = COND_NE;
1156			goto cond_branch;
1157		case BPF_JMP | BPF_JSET | BPF_K:
1158		case BPF_JMP | BPF_JSET | BPF_X:
1159		case BPF_JMP32 | BPF_JSET | BPF_K:
1160		case BPF_JMP32 | BPF_JSET | BPF_X:
1161			true_cond = COND_NE;
1162			/* Fall through */
1163
1164cond_branch:
1165			switch (code) {
1166			case BPF_JMP | BPF_JGT | BPF_X:
1167			case BPF_JMP | BPF_JLT | BPF_X:
1168			case BPF_JMP | BPF_JGE | BPF_X:
1169			case BPF_JMP | BPF_JLE | BPF_X:
1170			case BPF_JMP | BPF_JEQ | BPF_X:
1171			case BPF_JMP | BPF_JNE | BPF_X:
1172			case BPF_JMP32 | BPF_JGT | BPF_X:
1173			case BPF_JMP32 | BPF_JLT | BPF_X:
1174			case BPF_JMP32 | BPF_JGE | BPF_X:
1175			case BPF_JMP32 | BPF_JLE | BPF_X:
1176			case BPF_JMP32 | BPF_JEQ | BPF_X:
1177			case BPF_JMP32 | BPF_JNE | BPF_X:
1178				/* unsigned comparison */
1179				if (BPF_CLASS(code) == BPF_JMP32)
1180					EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1181				else
1182					EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1183				break;
1184			case BPF_JMP | BPF_JSGT | BPF_X:
1185			case BPF_JMP | BPF_JSLT | BPF_X:
1186			case BPF_JMP | BPF_JSGE | BPF_X:
1187			case BPF_JMP | BPF_JSLE | BPF_X:
1188			case BPF_JMP32 | BPF_JSGT | BPF_X:
1189			case BPF_JMP32 | BPF_JSLT | BPF_X:
1190			case BPF_JMP32 | BPF_JSGE | BPF_X:
1191			case BPF_JMP32 | BPF_JSLE | BPF_X:
1192				/* signed comparison */
1193				if (BPF_CLASS(code) == BPF_JMP32)
1194					EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1195				else
1196					EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1197				break;
1198			case BPF_JMP | BPF_JSET | BPF_X:
1199			case BPF_JMP32 | BPF_JSET | BPF_X:
1200				if (BPF_CLASS(code) == BPF_JMP) {
1201					EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1202				} else {
1203					EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1204					EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1205				}
1206				break;
1207			case BPF_JMP | BPF_JNE | BPF_K:
1208			case BPF_JMP | BPF_JEQ | BPF_K:
1209			case BPF_JMP | BPF_JGT | BPF_K:
1210			case BPF_JMP | BPF_JLT | BPF_K:
1211			case BPF_JMP | BPF_JGE | BPF_K:
1212			case BPF_JMP | BPF_JLE | BPF_K:
1213			case BPF_JMP32 | BPF_JNE | BPF_K:
1214			case BPF_JMP32 | BPF_JEQ | BPF_K:
1215			case BPF_JMP32 | BPF_JGT | BPF_K:
1216			case BPF_JMP32 | BPF_JLT | BPF_K:
1217			case BPF_JMP32 | BPF_JGE | BPF_K:
1218			case BPF_JMP32 | BPF_JLE | BPF_K:
1219			{
1220				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1221
1222				/*
1223				 * Need sign-extended load, so only positive
1224				 * values can be used as imm in cmpldi
1225				 */
1226				if (imm >= 0 && imm < 32768) {
1227					if (is_jmp32)
1228						EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1229					else
1230						EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1231				} else {
1232					/* sign-extending load */
1233					PPC_LI32(tmp1_reg, imm);
1234					/* ... but unsigned comparison */
1235					if (is_jmp32)
1236						EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1237					else
1238						EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1239				}
1240				break;
1241			}
1242			case BPF_JMP | BPF_JSGT | BPF_K:
1243			case BPF_JMP | BPF_JSLT | BPF_K:
1244			case BPF_JMP | BPF_JSGE | BPF_K:
1245			case BPF_JMP | BPF_JSLE | BPF_K:
1246			case BPF_JMP32 | BPF_JSGT | BPF_K:
1247			case BPF_JMP32 | BPF_JSLT | BPF_K:
1248			case BPF_JMP32 | BPF_JSGE | BPF_K:
1249			case BPF_JMP32 | BPF_JSLE | BPF_K:
1250			{
1251				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1252
1253				/*
1254				 * signed comparison, so any 16-bit value
1255				 * can be used in cmpdi
1256				 */
1257				if (imm >= -32768 && imm < 32768) {
1258					if (is_jmp32)
1259						EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1260					else
1261						EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1262				} else {
1263					PPC_LI32(tmp1_reg, imm);
1264					if (is_jmp32)
1265						EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1266					else
1267						EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1268				}
1269				break;
1270			}
1271			case BPF_JMP | BPF_JSET | BPF_K:
1272			case BPF_JMP32 | BPF_JSET | BPF_K:
1273				/* andi does not sign-extend the immediate */
1274				if (imm >= 0 && imm < 32768)
1275					/* PPC_ANDI is _only/always_ dot-form */
1276					EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1277				else {
1278					PPC_LI32(tmp1_reg, imm);
1279					if (BPF_CLASS(code) == BPF_JMP) {
1280						EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1281								     tmp1_reg));
1282					} else {
1283						EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1284						EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1285									0, 0, 31));
1286					}
1287				}
1288				break;
1289			}
1290			PPC_BCC(true_cond, addrs[i + 1 + off]);
1291			break;
1292
1293		/*
1294		 * Tail call
1295		 */
1296		case BPF_JMP | BPF_TAIL_CALL:
1297			ctx->seen |= SEEN_TAILCALL;
1298			ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1299			if (ret < 0)
1300				return ret;
1301			break;
1302
1303		default:
1304			/*
1305			 * The filter contains something cruel & unusual.
1306			 * We don't handle it, but also there shouldn't be
1307			 * anything missing from our list.
1308			 */
1309			pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1310					code, i);
1311			return -ENOTSUPP;
1312		}
1313	}
1314
1315	/* Set end-of-body-code address for exit. */
1316	addrs[i] = ctx->idx * 4;
1317
1318	return 0;
1319}