Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * bpf_jit_comp64.c: eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
12#include <asm/asm-compat.h>
13#include <linux/netdevice.h>
14#include <linux/filter.h>
15#include <linux/if_vlan.h>
16#include <asm/kprobes.h>
17#include <linux/bpf.h>
18#include <asm/security_features.h>
19
20#include "bpf_jit.h"
21
22/*
23 * Stack layout:
24 * Ensure the top half (upto local_tmp_var) stays consistent
25 * with our redzone usage.
26 *
27 * [ prev sp ] <-------------
28 * [ nv gpr save area ] 5*8 |
29 * [ tail_call_cnt ] 8 |
30 * [ local_tmp_var ] 16 |
31 * fp (r31) --> [ ebpf stack space ] upto 512 |
32 * [ frame header ] 32/112 |
33 * sp (r1) ---> [ stack pointer ] --------------
34 */
35
36/* for gpr non volatile registers BPG_REG_6 to 10 */
37#define BPF_PPC_STACK_SAVE (5*8)
38/* for bpf JIT code internal usage */
39#define BPF_PPC_STACK_LOCALS 24
40/* stack frame excluding BPF stack, ensure this is quadword aligned */
41#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43
44/* BPF register usage */
45#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
46#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
47
48/* BPF to ppc register mappings */
49void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50{
51 /* function return value */
52 ctx->b2p[BPF_REG_0] = _R8;
53 /* function arguments */
54 ctx->b2p[BPF_REG_1] = _R3;
55 ctx->b2p[BPF_REG_2] = _R4;
56 ctx->b2p[BPF_REG_3] = _R5;
57 ctx->b2p[BPF_REG_4] = _R6;
58 ctx->b2p[BPF_REG_5] = _R7;
59 /* non volatile registers */
60 ctx->b2p[BPF_REG_6] = _R27;
61 ctx->b2p[BPF_REG_7] = _R28;
62 ctx->b2p[BPF_REG_8] = _R29;
63 ctx->b2p[BPF_REG_9] = _R30;
64 /* frame pointer aka BPF_REG_10 */
65 ctx->b2p[BPF_REG_FP] = _R31;
66 /* eBPF jit internal registers */
67 ctx->b2p[BPF_REG_AX] = _R12;
68 ctx->b2p[TMP_REG_1] = _R9;
69 ctx->b2p[TMP_REG_2] = _R10;
70}
71
72/* PPC NVR range -- update this if we ever use NVRs below r27 */
73#define BPF_PPC_NVR_MIN _R27
74
75static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76{
77 /*
78 * We only need a stack frame if:
79 * - we call other functions (kernel helpers), or
80 * - the bpf program uses its stack area
81 * The latter condition is deduced from the usage of BPF_REG_FP
82 */
83 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84}
85
86/*
87 * When not setting up our own stackframe, the redzone usage is:
88 *
89 * [ prev sp ] <-------------
90 * [ ... ] |
91 * sp (r1) ---> [ stack pointer ] --------------
92 * [ nv gpr save area ] 5*8
93 * [ tail_call_cnt ] 8
94 * [ local_tmp_var ] 16
95 * [ unused red zone ] 208 bytes protected
96 */
97static int bpf_jit_stack_local(struct codegen_context *ctx)
98{
99 if (bpf_has_stack_frame(ctx))
100 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 else
102 return -(BPF_PPC_STACK_SAVE + 24);
103}
104
105static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106{
107 return bpf_jit_stack_local(ctx) + 16;
108}
109
110static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111{
112 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 return (bpf_has_stack_frame(ctx) ?
114 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 - (8 * (32 - reg));
116
117 pr_err("BPF JIT is asking about unknown registers");
118 BUG();
119}
120
121void bpf_jit_realloc_regs(struct codegen_context *ctx)
122{
123}
124
125void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126{
127 int i;
128
129 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
130 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
131
132 /*
133 * Initialize tail_call_cnt if we do tail calls.
134 * Otherwise, put in NOPs so that it can be skipped when we are
135 * invoked through a tail call.
136 */
137 if (ctx->seen & SEEN_TAILCALL) {
138 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
139 /* this goes in the redzone */
140 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
141 } else {
142 EMIT(PPC_RAW_NOP());
143 EMIT(PPC_RAW_NOP());
144 }
145
146 if (bpf_has_stack_frame(ctx)) {
147 /*
148 * We need a stack frame, but we don't necessarily need to
149 * save/restore LR unless we call other functions
150 */
151 if (ctx->seen & SEEN_FUNC) {
152 EMIT(PPC_RAW_MFLR(_R0));
153 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
154 }
155
156 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
157 }
158
159 /*
160 * Back up non-volatile regs -- BPF registers 6-10
161 * If we haven't created our own stack frame, we save these
162 * in the protected zone below the previous stack frame
163 */
164 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
165 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
166 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
167
168 /* Setup frame pointer to point to the bpf stack area */
169 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
170 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
171 STACK_FRAME_MIN_SIZE + ctx->stack_size));
172}
173
174static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
175{
176 int i;
177
178 /* Restore NVRs */
179 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
180 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
181 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
182
183 /* Tear down our stack frame */
184 if (bpf_has_stack_frame(ctx)) {
185 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
186 if (ctx->seen & SEEN_FUNC) {
187 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
188 EMIT(PPC_RAW_MTLR(_R0));
189 }
190 }
191}
192
193void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
194{
195 bpf_jit_emit_common_epilogue(image, ctx);
196
197 /* Move result to r3 */
198 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
199
200 EMIT(PPC_RAW_BLR());
201}
202
203static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
204{
205 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
206 long reladdr;
207
208 if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
209 return -EINVAL;
210
211 reladdr = func_addr - kernel_toc_addr();
212 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
213 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
214 return -ERANGE;
215 }
216
217 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
218 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
219 EMIT(PPC_RAW_MTCTR(_R12));
220 EMIT(PPC_RAW_BCTRL());
221
222 return 0;
223}
224
225int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
226{
227 unsigned int i, ctx_idx = ctx->idx;
228
229 if (WARN_ON_ONCE(func && is_module_text_address(func)))
230 return -EINVAL;
231
232 /* skip past descriptor if elf v1 */
233 func += FUNCTION_DESCR_SIZE;
234
235 /* Load function address into r12 */
236 PPC_LI64(_R12, func);
237
238 /* For bpf-to-bpf function calls, the callee's address is unknown
239 * until the last extra pass. As seen above, we use PPC_LI64() to
240 * load the callee's address, but this may optimize the number of
241 * instructions required based on the nature of the address.
242 *
243 * Since we don't want the number of instructions emitted to change,
244 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
245 * we always have a five-instruction sequence, which is the maximum
246 * that PPC_LI64() can emit.
247 */
248 for (i = ctx->idx - ctx_idx; i < 5; i++)
249 EMIT(PPC_RAW_NOP());
250
251 EMIT(PPC_RAW_MTCTR(_R12));
252 EMIT(PPC_RAW_BCTRL());
253
254 return 0;
255}
256
257static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
258{
259 /*
260 * By now, the eBPF program has already setup parameters in r3, r4 and r5
261 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
262 * r4/BPF_REG_2 - pointer to bpf_array
263 * r5/BPF_REG_3 - index in bpf_array
264 */
265 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
266 int b2p_index = bpf_to_ppc(BPF_REG_3);
267 int bpf_tailcall_prologue_size = 8;
268
269 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
270 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
271
272 /*
273 * if (index >= array->map.max_entries)
274 * goto out;
275 */
276 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
277 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
278 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
279 PPC_BCC_SHORT(COND_GE, out);
280
281 /*
282 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
283 * goto out;
284 */
285 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
286 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
287 PPC_BCC_SHORT(COND_GE, out);
288
289 /*
290 * tail_call_cnt++;
291 */
292 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
293 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
294
295 /* prog = array->ptrs[index]; */
296 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
297 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
298 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
299
300 /*
301 * if (prog == NULL)
302 * goto out;
303 */
304 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
305 PPC_BCC_SHORT(COND_EQ, out);
306
307 /* goto *(prog->bpf_func + prologue_size); */
308 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
309 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
310 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
311 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
312
313 /* tear down stack, restore NVRs, ... */
314 bpf_jit_emit_common_epilogue(image, ctx);
315
316 EMIT(PPC_RAW_BCTR());
317
318 /* out: */
319 return 0;
320}
321
322/*
323 * We spill into the redzone always, even if the bpf program has its own stackframe.
324 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
325 */
326void bpf_stf_barrier(void);
327
328asm (
329" .global bpf_stf_barrier ;"
330" bpf_stf_barrier: ;"
331" std 21,-64(1) ;"
332" std 22,-56(1) ;"
333" sync ;"
334" ld 21,-64(1) ;"
335" ld 22,-56(1) ;"
336" ori 31,31,0 ;"
337" .rept 14 ;"
338" b 1f ;"
339" 1: ;"
340" .endr ;"
341" blr ;"
342);
343
344/* Assemble the body code between the prologue & epilogue */
345int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
346 u32 *addrs, int pass)
347{
348 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
349 const struct bpf_insn *insn = fp->insnsi;
350 int flen = fp->len;
351 int i, ret;
352
353 /* Start of epilogue code - will only be valid 2nd pass onwards */
354 u32 exit_addr = addrs[flen];
355
356 for (i = 0; i < flen; i++) {
357 u32 code = insn[i].code;
358 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
359 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
360 u32 size = BPF_SIZE(code);
361 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
362 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
363 u32 save_reg, ret_reg;
364 s16 off = insn[i].off;
365 s32 imm = insn[i].imm;
366 bool func_addr_fixed;
367 u64 func_addr;
368 u64 imm64;
369 u32 true_cond;
370 u32 tmp_idx;
371 int j;
372
373 /*
374 * addrs[] maps a BPF bytecode address into a real offset from
375 * the start of the body code.
376 */
377 addrs[i] = ctx->idx * 4;
378
379 /*
380 * As an optimization, we note down which non-volatile registers
381 * are used so that we can only save/restore those in our
382 * prologue and epilogue. We do this here regardless of whether
383 * the actual BPF instruction uses src/dst registers or not
384 * (for instance, BPF_CALL does not use them). The expectation
385 * is that those instructions will have src_reg/dst_reg set to
386 * 0. Even otherwise, we just lose some prologue/epilogue
387 * optimization but everything else should work without
388 * any issues.
389 */
390 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
391 bpf_set_seen_register(ctx, dst_reg);
392 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
393 bpf_set_seen_register(ctx, src_reg);
394
395 switch (code) {
396 /*
397 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
398 */
399 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
400 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
401 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
402 goto bpf_alu32_trunc;
403 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
404 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
405 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
406 goto bpf_alu32_trunc;
407 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
408 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
409 if (!imm) {
410 goto bpf_alu32_trunc;
411 } else if (imm >= -32768 && imm < 32768) {
412 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
413 } else {
414 PPC_LI32(tmp1_reg, imm);
415 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
416 }
417 goto bpf_alu32_trunc;
418 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
419 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
420 if (!imm) {
421 goto bpf_alu32_trunc;
422 } else if (imm > -32768 && imm <= 32768) {
423 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
424 } else {
425 PPC_LI32(tmp1_reg, imm);
426 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
427 }
428 goto bpf_alu32_trunc;
429 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
430 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
431 if (BPF_CLASS(code) == BPF_ALU)
432 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
433 else
434 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
435 goto bpf_alu32_trunc;
436 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
437 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
438 if (imm >= -32768 && imm < 32768)
439 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
440 else {
441 PPC_LI32(tmp1_reg, imm);
442 if (BPF_CLASS(code) == BPF_ALU)
443 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
444 else
445 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
446 }
447 goto bpf_alu32_trunc;
448 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
449 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
450 if (BPF_OP(code) == BPF_MOD) {
451 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
452 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
453 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
454 } else
455 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
456 goto bpf_alu32_trunc;
457 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
458 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
459 if (BPF_OP(code) == BPF_MOD) {
460 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
461 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
462 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
463 } else
464 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
465 break;
466 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
467 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
468 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
469 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
470 if (imm == 0)
471 return -EINVAL;
472 if (imm == 1) {
473 if (BPF_OP(code) == BPF_DIV) {
474 goto bpf_alu32_trunc;
475 } else {
476 EMIT(PPC_RAW_LI(dst_reg, 0));
477 break;
478 }
479 }
480
481 PPC_LI32(tmp1_reg, imm);
482 switch (BPF_CLASS(code)) {
483 case BPF_ALU:
484 if (BPF_OP(code) == BPF_MOD) {
485 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
486 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
487 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
488 } else
489 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
490 break;
491 case BPF_ALU64:
492 if (BPF_OP(code) == BPF_MOD) {
493 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
494 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
495 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
496 } else
497 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
498 break;
499 }
500 goto bpf_alu32_trunc;
501 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
502 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
503 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
504 goto bpf_alu32_trunc;
505
506 /*
507 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
508 */
509 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
510 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
511 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
512 goto bpf_alu32_trunc;
513 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
514 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
515 if (!IMM_H(imm))
516 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
517 else {
518 /* Sign-extended */
519 PPC_LI32(tmp1_reg, imm);
520 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
521 }
522 goto bpf_alu32_trunc;
523 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
524 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
525 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
526 goto bpf_alu32_trunc;
527 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
528 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
529 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
530 /* Sign-extended */
531 PPC_LI32(tmp1_reg, imm);
532 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
533 } else {
534 if (IMM_L(imm))
535 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
536 if (IMM_H(imm))
537 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
538 }
539 goto bpf_alu32_trunc;
540 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
541 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
542 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
543 goto bpf_alu32_trunc;
544 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
545 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
546 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
547 /* Sign-extended */
548 PPC_LI32(tmp1_reg, imm);
549 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
550 } else {
551 if (IMM_L(imm))
552 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
553 if (IMM_H(imm))
554 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
555 }
556 goto bpf_alu32_trunc;
557 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
558 /* slw clears top 32 bits */
559 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
560 /* skip zero extension move, but set address map. */
561 if (insn_is_zext(&insn[i + 1]))
562 addrs[++i] = ctx->idx * 4;
563 break;
564 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
565 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
566 break;
567 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
568 /* with imm 0, we still need to clear top 32 bits */
569 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
570 if (insn_is_zext(&insn[i + 1]))
571 addrs[++i] = ctx->idx * 4;
572 break;
573 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
574 if (imm != 0)
575 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
576 break;
577 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
578 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
579 if (insn_is_zext(&insn[i + 1]))
580 addrs[++i] = ctx->idx * 4;
581 break;
582 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
583 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
584 break;
585 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
586 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
587 if (insn_is_zext(&insn[i + 1]))
588 addrs[++i] = ctx->idx * 4;
589 break;
590 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
591 if (imm != 0)
592 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
593 break;
594 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
595 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
596 goto bpf_alu32_trunc;
597 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
598 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
599 break;
600 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
601 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
602 goto bpf_alu32_trunc;
603 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
604 if (imm != 0)
605 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
606 break;
607
608 /*
609 * MOV
610 */
611 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
612 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
613 if (imm == 1) {
614 /* special mov32 for zext */
615 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
616 break;
617 }
618 EMIT(PPC_RAW_MR(dst_reg, src_reg));
619 goto bpf_alu32_trunc;
620 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
621 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
622 PPC_LI32(dst_reg, imm);
623 if (imm < 0)
624 goto bpf_alu32_trunc;
625 else if (insn_is_zext(&insn[i + 1]))
626 addrs[++i] = ctx->idx * 4;
627 break;
628
629bpf_alu32_trunc:
630 /* Truncate to 32-bits */
631 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
632 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
633 break;
634
635 /*
636 * BPF_FROM_BE/LE
637 */
638 case BPF_ALU | BPF_END | BPF_FROM_LE:
639 case BPF_ALU | BPF_END | BPF_FROM_BE:
640#ifdef __BIG_ENDIAN__
641 if (BPF_SRC(code) == BPF_FROM_BE)
642 goto emit_clear;
643#else /* !__BIG_ENDIAN__ */
644 if (BPF_SRC(code) == BPF_FROM_LE)
645 goto emit_clear;
646#endif
647 switch (imm) {
648 case 16:
649 /* Rotate 8 bits left & mask with 0x0000ff00 */
650 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
651 /* Rotate 8 bits right & insert LSB to reg */
652 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
653 /* Move result back to dst_reg */
654 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
655 break;
656 case 32:
657 /*
658 * Rotate word left by 8 bits:
659 * 2 bytes are already in their final position
660 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
661 */
662 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
663 /* Rotate 24 bits and insert byte 1 */
664 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
665 /* Rotate 24 bits and insert byte 3 */
666 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
667 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
668 break;
669 case 64:
670 /* Store the value to stack and then use byte-reverse loads */
671 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
672 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
673 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
674 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
675 } else {
676 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
677 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
678 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
679 EMIT(PPC_RAW_LI(tmp2_reg, 4));
680 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
681 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
682 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
683 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
684 }
685 break;
686 }
687 break;
688
689emit_clear:
690 switch (imm) {
691 case 16:
692 /* zero-extend 16 bits into 64 bits */
693 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
694 if (insn_is_zext(&insn[i + 1]))
695 addrs[++i] = ctx->idx * 4;
696 break;
697 case 32:
698 if (!fp->aux->verifier_zext)
699 /* zero-extend 32 bits into 64 bits */
700 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
701 break;
702 case 64:
703 /* nop */
704 break;
705 }
706 break;
707
708 /*
709 * BPF_ST NOSPEC (speculation barrier)
710 */
711 case BPF_ST | BPF_NOSPEC:
712 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
713 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
714 break;
715
716 switch (stf_barrier) {
717 case STF_BARRIER_EIEIO:
718 EMIT(PPC_RAW_EIEIO() | 0x02000000);
719 break;
720 case STF_BARRIER_SYNC_ORI:
721 EMIT(PPC_RAW_SYNC());
722 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
723 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
724 break;
725 case STF_BARRIER_FALLBACK:
726 ctx->seen |= SEEN_FUNC;
727 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
728 EMIT(PPC_RAW_MTCTR(_R12));
729 EMIT(PPC_RAW_BCTRL());
730 break;
731 case STF_BARRIER_NONE:
732 break;
733 }
734 break;
735
736 /*
737 * BPF_ST(X)
738 */
739 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
740 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
741 if (BPF_CLASS(code) == BPF_ST) {
742 EMIT(PPC_RAW_LI(tmp1_reg, imm));
743 src_reg = tmp1_reg;
744 }
745 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
746 break;
747 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
748 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
749 if (BPF_CLASS(code) == BPF_ST) {
750 EMIT(PPC_RAW_LI(tmp1_reg, imm));
751 src_reg = tmp1_reg;
752 }
753 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
754 break;
755 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
756 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
757 if (BPF_CLASS(code) == BPF_ST) {
758 PPC_LI32(tmp1_reg, imm);
759 src_reg = tmp1_reg;
760 }
761 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
762 break;
763 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
764 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
765 if (BPF_CLASS(code) == BPF_ST) {
766 PPC_LI32(tmp1_reg, imm);
767 src_reg = tmp1_reg;
768 }
769 if (off % 4) {
770 EMIT(PPC_RAW_LI(tmp2_reg, off));
771 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
772 } else {
773 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
774 }
775 break;
776
777 /*
778 * BPF_STX ATOMIC (atomic ops)
779 */
780 case BPF_STX | BPF_ATOMIC | BPF_W:
781 case BPF_STX | BPF_ATOMIC | BPF_DW:
782 save_reg = tmp2_reg;
783 ret_reg = src_reg;
784
785 /* Get offset into TMP_REG_1 */
786 EMIT(PPC_RAW_LI(tmp1_reg, off));
787 tmp_idx = ctx->idx * 4;
788 /* load value from memory into TMP_REG_2 */
789 if (size == BPF_DW)
790 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
791 else
792 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
793
794 /* Save old value in _R0 */
795 if (imm & BPF_FETCH)
796 EMIT(PPC_RAW_MR(_R0, tmp2_reg));
797
798 switch (imm) {
799 case BPF_ADD:
800 case BPF_ADD | BPF_FETCH:
801 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
802 break;
803 case BPF_AND:
804 case BPF_AND | BPF_FETCH:
805 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
806 break;
807 case BPF_OR:
808 case BPF_OR | BPF_FETCH:
809 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
810 break;
811 case BPF_XOR:
812 case BPF_XOR | BPF_FETCH:
813 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
814 break;
815 case BPF_CMPXCHG:
816 /*
817 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
818 * in src_reg for other cases.
819 */
820 ret_reg = bpf_to_ppc(BPF_REG_0);
821
822 /* Compare with old value in BPF_R0 */
823 if (size == BPF_DW)
824 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
825 else
826 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
827 /* Don't set if different from old value */
828 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
829 fallthrough;
830 case BPF_XCHG:
831 save_reg = src_reg;
832 break;
833 default:
834 pr_err_ratelimited(
835 "eBPF filter atomic op code %02x (@%d) unsupported\n",
836 code, i);
837 return -EOPNOTSUPP;
838 }
839
840 /* store new value */
841 if (size == BPF_DW)
842 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
843 else
844 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
845 /* we're done if this succeeded */
846 PPC_BCC_SHORT(COND_NE, tmp_idx);
847
848 if (imm & BPF_FETCH) {
849 EMIT(PPC_RAW_MR(ret_reg, _R0));
850 /*
851 * Skip unnecessary zero-extension for 32-bit cmpxchg.
852 * For context, see commit 39491867ace5.
853 */
854 if (size != BPF_DW && imm == BPF_CMPXCHG &&
855 insn_is_zext(&insn[i + 1]))
856 addrs[++i] = ctx->idx * 4;
857 }
858 break;
859
860 /*
861 * BPF_LDX
862 */
863 /* dst = *(u8 *)(ul) (src + off) */
864 case BPF_LDX | BPF_MEM | BPF_B:
865 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
866 /* dst = *(u16 *)(ul) (src + off) */
867 case BPF_LDX | BPF_MEM | BPF_H:
868 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
869 /* dst = *(u32 *)(ul) (src + off) */
870 case BPF_LDX | BPF_MEM | BPF_W:
871 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
872 /* dst = *(u64 *)(ul) (src + off) */
873 case BPF_LDX | BPF_MEM | BPF_DW:
874 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
875 /*
876 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
877 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
878 * load only if addr is kernel address (see is_kernel_addr()), otherwise
879 * set dst_reg=0 and move on.
880 */
881 if (BPF_MODE(code) == BPF_PROBE_MEM) {
882 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
883 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
884 PPC_LI64(tmp2_reg, 0x8000000000000000ul);
885 else /* BOOK3S_64 */
886 PPC_LI64(tmp2_reg, PAGE_OFFSET);
887 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
888 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
889 EMIT(PPC_RAW_LI(dst_reg, 0));
890 /*
891 * Check if 'off' is word aligned for BPF_DW, because
892 * we might generate two instructions.
893 */
894 if (BPF_SIZE(code) == BPF_DW && (off & 3))
895 PPC_JMP((ctx->idx + 3) * 4);
896 else
897 PPC_JMP((ctx->idx + 2) * 4);
898 }
899
900 switch (size) {
901 case BPF_B:
902 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
903 break;
904 case BPF_H:
905 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
906 break;
907 case BPF_W:
908 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
909 break;
910 case BPF_DW:
911 if (off % 4) {
912 EMIT(PPC_RAW_LI(tmp1_reg, off));
913 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
914 } else {
915 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
916 }
917 break;
918 }
919
920 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
921 addrs[++i] = ctx->idx * 4;
922
923 if (BPF_MODE(code) == BPF_PROBE_MEM) {
924 ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
925 4, dst_reg);
926 if (ret)
927 return ret;
928 }
929 break;
930
931 /*
932 * Doubleword load
933 * 16 byte instruction that uses two 'struct bpf_insn'
934 */
935 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
936 imm64 = ((u64)(u32) insn[i].imm) |
937 (((u64)(u32) insn[i+1].imm) << 32);
938 tmp_idx = ctx->idx;
939 PPC_LI64(dst_reg, imm64);
940 /* padding to allow full 5 instructions for later patching */
941 for (j = ctx->idx - tmp_idx; j < 5; j++)
942 EMIT(PPC_RAW_NOP());
943 /* Adjust for two bpf instructions */
944 addrs[++i] = ctx->idx * 4;
945 break;
946
947 /*
948 * Return/Exit
949 */
950 case BPF_JMP | BPF_EXIT:
951 /*
952 * If this isn't the very last instruction, branch to
953 * the epilogue. If we _are_ the last instruction,
954 * we'll just fall through to the epilogue.
955 */
956 if (i != flen - 1) {
957 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
958 if (ret)
959 return ret;
960 }
961 /* else fall through to the epilogue */
962 break;
963
964 /*
965 * Call kernel helper or bpf function
966 */
967 case BPF_JMP | BPF_CALL:
968 ctx->seen |= SEEN_FUNC;
969
970 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
971 &func_addr, &func_addr_fixed);
972 if (ret < 0)
973 return ret;
974
975 if (func_addr_fixed)
976 ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
977 else
978 ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
979
980 if (ret)
981 return ret;
982
983 /* move return value from r3 to BPF_REG_0 */
984 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
985 break;
986
987 /*
988 * Jumps and branches
989 */
990 case BPF_JMP | BPF_JA:
991 PPC_JMP(addrs[i + 1 + off]);
992 break;
993
994 case BPF_JMP | BPF_JGT | BPF_K:
995 case BPF_JMP | BPF_JGT | BPF_X:
996 case BPF_JMP | BPF_JSGT | BPF_K:
997 case BPF_JMP | BPF_JSGT | BPF_X:
998 case BPF_JMP32 | BPF_JGT | BPF_K:
999 case BPF_JMP32 | BPF_JGT | BPF_X:
1000 case BPF_JMP32 | BPF_JSGT | BPF_K:
1001 case BPF_JMP32 | BPF_JSGT | BPF_X:
1002 true_cond = COND_GT;
1003 goto cond_branch;
1004 case BPF_JMP | BPF_JLT | BPF_K:
1005 case BPF_JMP | BPF_JLT | BPF_X:
1006 case BPF_JMP | BPF_JSLT | BPF_K:
1007 case BPF_JMP | BPF_JSLT | BPF_X:
1008 case BPF_JMP32 | BPF_JLT | BPF_K:
1009 case BPF_JMP32 | BPF_JLT | BPF_X:
1010 case BPF_JMP32 | BPF_JSLT | BPF_K:
1011 case BPF_JMP32 | BPF_JSLT | BPF_X:
1012 true_cond = COND_LT;
1013 goto cond_branch;
1014 case BPF_JMP | BPF_JGE | BPF_K:
1015 case BPF_JMP | BPF_JGE | BPF_X:
1016 case BPF_JMP | BPF_JSGE | BPF_K:
1017 case BPF_JMP | BPF_JSGE | BPF_X:
1018 case BPF_JMP32 | BPF_JGE | BPF_K:
1019 case BPF_JMP32 | BPF_JGE | BPF_X:
1020 case BPF_JMP32 | BPF_JSGE | BPF_K:
1021 case BPF_JMP32 | BPF_JSGE | BPF_X:
1022 true_cond = COND_GE;
1023 goto cond_branch;
1024 case BPF_JMP | BPF_JLE | BPF_K:
1025 case BPF_JMP | BPF_JLE | BPF_X:
1026 case BPF_JMP | BPF_JSLE | BPF_K:
1027 case BPF_JMP | BPF_JSLE | BPF_X:
1028 case BPF_JMP32 | BPF_JLE | BPF_K:
1029 case BPF_JMP32 | BPF_JLE | BPF_X:
1030 case BPF_JMP32 | BPF_JSLE | BPF_K:
1031 case BPF_JMP32 | BPF_JSLE | BPF_X:
1032 true_cond = COND_LE;
1033 goto cond_branch;
1034 case BPF_JMP | BPF_JEQ | BPF_K:
1035 case BPF_JMP | BPF_JEQ | BPF_X:
1036 case BPF_JMP32 | BPF_JEQ | BPF_K:
1037 case BPF_JMP32 | BPF_JEQ | BPF_X:
1038 true_cond = COND_EQ;
1039 goto cond_branch;
1040 case BPF_JMP | BPF_JNE | BPF_K:
1041 case BPF_JMP | BPF_JNE | BPF_X:
1042 case BPF_JMP32 | BPF_JNE | BPF_K:
1043 case BPF_JMP32 | BPF_JNE | BPF_X:
1044 true_cond = COND_NE;
1045 goto cond_branch;
1046 case BPF_JMP | BPF_JSET | BPF_K:
1047 case BPF_JMP | BPF_JSET | BPF_X:
1048 case BPF_JMP32 | BPF_JSET | BPF_K:
1049 case BPF_JMP32 | BPF_JSET | BPF_X:
1050 true_cond = COND_NE;
1051 /* Fall through */
1052
1053cond_branch:
1054 switch (code) {
1055 case BPF_JMP | BPF_JGT | BPF_X:
1056 case BPF_JMP | BPF_JLT | BPF_X:
1057 case BPF_JMP | BPF_JGE | BPF_X:
1058 case BPF_JMP | BPF_JLE | BPF_X:
1059 case BPF_JMP | BPF_JEQ | BPF_X:
1060 case BPF_JMP | BPF_JNE | BPF_X:
1061 case BPF_JMP32 | BPF_JGT | BPF_X:
1062 case BPF_JMP32 | BPF_JLT | BPF_X:
1063 case BPF_JMP32 | BPF_JGE | BPF_X:
1064 case BPF_JMP32 | BPF_JLE | BPF_X:
1065 case BPF_JMP32 | BPF_JEQ | BPF_X:
1066 case BPF_JMP32 | BPF_JNE | BPF_X:
1067 /* unsigned comparison */
1068 if (BPF_CLASS(code) == BPF_JMP32)
1069 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1070 else
1071 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1072 break;
1073 case BPF_JMP | BPF_JSGT | BPF_X:
1074 case BPF_JMP | BPF_JSLT | BPF_X:
1075 case BPF_JMP | BPF_JSGE | BPF_X:
1076 case BPF_JMP | BPF_JSLE | BPF_X:
1077 case BPF_JMP32 | BPF_JSGT | BPF_X:
1078 case BPF_JMP32 | BPF_JSLT | BPF_X:
1079 case BPF_JMP32 | BPF_JSGE | BPF_X:
1080 case BPF_JMP32 | BPF_JSLE | BPF_X:
1081 /* signed comparison */
1082 if (BPF_CLASS(code) == BPF_JMP32)
1083 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1084 else
1085 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1086 break;
1087 case BPF_JMP | BPF_JSET | BPF_X:
1088 case BPF_JMP32 | BPF_JSET | BPF_X:
1089 if (BPF_CLASS(code) == BPF_JMP) {
1090 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1091 } else {
1092 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1093 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1094 }
1095 break;
1096 case BPF_JMP | BPF_JNE | BPF_K:
1097 case BPF_JMP | BPF_JEQ | BPF_K:
1098 case BPF_JMP | BPF_JGT | BPF_K:
1099 case BPF_JMP | BPF_JLT | BPF_K:
1100 case BPF_JMP | BPF_JGE | BPF_K:
1101 case BPF_JMP | BPF_JLE | BPF_K:
1102 case BPF_JMP32 | BPF_JNE | BPF_K:
1103 case BPF_JMP32 | BPF_JEQ | BPF_K:
1104 case BPF_JMP32 | BPF_JGT | BPF_K:
1105 case BPF_JMP32 | BPF_JLT | BPF_K:
1106 case BPF_JMP32 | BPF_JGE | BPF_K:
1107 case BPF_JMP32 | BPF_JLE | BPF_K:
1108 {
1109 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1110
1111 /*
1112 * Need sign-extended load, so only positive
1113 * values can be used as imm in cmpldi
1114 */
1115 if (imm >= 0 && imm < 32768) {
1116 if (is_jmp32)
1117 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1118 else
1119 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1120 } else {
1121 /* sign-extending load */
1122 PPC_LI32(tmp1_reg, imm);
1123 /* ... but unsigned comparison */
1124 if (is_jmp32)
1125 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1126 else
1127 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1128 }
1129 break;
1130 }
1131 case BPF_JMP | BPF_JSGT | BPF_K:
1132 case BPF_JMP | BPF_JSLT | BPF_K:
1133 case BPF_JMP | BPF_JSGE | BPF_K:
1134 case BPF_JMP | BPF_JSLE | BPF_K:
1135 case BPF_JMP32 | BPF_JSGT | BPF_K:
1136 case BPF_JMP32 | BPF_JSLT | BPF_K:
1137 case BPF_JMP32 | BPF_JSGE | BPF_K:
1138 case BPF_JMP32 | BPF_JSLE | BPF_K:
1139 {
1140 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1141
1142 /*
1143 * signed comparison, so any 16-bit value
1144 * can be used in cmpdi
1145 */
1146 if (imm >= -32768 && imm < 32768) {
1147 if (is_jmp32)
1148 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1149 else
1150 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1151 } else {
1152 PPC_LI32(tmp1_reg, imm);
1153 if (is_jmp32)
1154 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1155 else
1156 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1157 }
1158 break;
1159 }
1160 case BPF_JMP | BPF_JSET | BPF_K:
1161 case BPF_JMP32 | BPF_JSET | BPF_K:
1162 /* andi does not sign-extend the immediate */
1163 if (imm >= 0 && imm < 32768)
1164 /* PPC_ANDI is _only/always_ dot-form */
1165 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1166 else {
1167 PPC_LI32(tmp1_reg, imm);
1168 if (BPF_CLASS(code) == BPF_JMP) {
1169 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1170 tmp1_reg));
1171 } else {
1172 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1173 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1174 0, 0, 31));
1175 }
1176 }
1177 break;
1178 }
1179 PPC_BCC(true_cond, addrs[i + 1 + off]);
1180 break;
1181
1182 /*
1183 * Tail call
1184 */
1185 case BPF_JMP | BPF_TAIL_CALL:
1186 ctx->seen |= SEEN_TAILCALL;
1187 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1188 if (ret < 0)
1189 return ret;
1190 break;
1191
1192 default:
1193 /*
1194 * The filter contains something cruel & unusual.
1195 * We don't handle it, but also there shouldn't be
1196 * anything missing from our list.
1197 */
1198 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1199 code, i);
1200 return -ENOTSUPP;
1201 }
1202 }
1203
1204 /* Set end-of-body-code address for exit. */
1205 addrs[i] = ctx->idx * 4;
1206
1207 return 0;
1208}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * bpf_jit_comp64.c: eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
12#include <asm/asm-compat.h>
13#include <linux/netdevice.h>
14#include <linux/filter.h>
15#include <linux/if_vlan.h>
16#include <asm/kprobes.h>
17#include <linux/bpf.h>
18#include <asm/security_features.h>
19
20#include "bpf_jit.h"
21
22/*
23 * Stack layout:
24 * Ensure the top half (upto local_tmp_var) stays consistent
25 * with our redzone usage.
26 *
27 * [ prev sp ] <-------------
28 * [ nv gpr save area ] 5*8 |
29 * [ tail_call_cnt ] 8 |
30 * [ local_tmp_var ] 16 |
31 * fp (r31) --> [ ebpf stack space ] upto 512 |
32 * [ frame header ] 32/112 |
33 * sp (r1) ---> [ stack pointer ] --------------
34 */
35
36/* for gpr non volatile registers BPG_REG_6 to 10 */
37#define BPF_PPC_STACK_SAVE (5*8)
38/* for bpf JIT code internal usage */
39#define BPF_PPC_STACK_LOCALS 24
40/* stack frame excluding BPF stack, ensure this is quadword aligned */
41#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
43
44/* BPF register usage */
45#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
46#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
47
48/* BPF to ppc register mappings */
49void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
50{
51 /* function return value */
52 ctx->b2p[BPF_REG_0] = _R8;
53 /* function arguments */
54 ctx->b2p[BPF_REG_1] = _R3;
55 ctx->b2p[BPF_REG_2] = _R4;
56 ctx->b2p[BPF_REG_3] = _R5;
57 ctx->b2p[BPF_REG_4] = _R6;
58 ctx->b2p[BPF_REG_5] = _R7;
59 /* non volatile registers */
60 ctx->b2p[BPF_REG_6] = _R27;
61 ctx->b2p[BPF_REG_7] = _R28;
62 ctx->b2p[BPF_REG_8] = _R29;
63 ctx->b2p[BPF_REG_9] = _R30;
64 /* frame pointer aka BPF_REG_10 */
65 ctx->b2p[BPF_REG_FP] = _R31;
66 /* eBPF jit internal registers */
67 ctx->b2p[BPF_REG_AX] = _R12;
68 ctx->b2p[TMP_REG_1] = _R9;
69 ctx->b2p[TMP_REG_2] = _R10;
70}
71
72/* PPC NVR range -- update this if we ever use NVRs below r27 */
73#define BPF_PPC_NVR_MIN _R27
74
75static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
76{
77 /*
78 * We only need a stack frame if:
79 * - we call other functions (kernel helpers), or
80 * - the bpf program uses its stack area
81 * The latter condition is deduced from the usage of BPF_REG_FP
82 */
83 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
84}
85
86/*
87 * When not setting up our own stackframe, the redzone (288 bytes) usage is:
88 *
89 * [ prev sp ] <-------------
90 * [ ... ] |
91 * sp (r1) ---> [ stack pointer ] --------------
92 * [ nv gpr save area ] 5*8
93 * [ tail_call_cnt ] 8
94 * [ local_tmp_var ] 16
95 * [ unused red zone ] 224
96 */
97static int bpf_jit_stack_local(struct codegen_context *ctx)
98{
99 if (bpf_has_stack_frame(ctx))
100 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
101 else
102 return -(BPF_PPC_STACK_SAVE + 24);
103}
104
105static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
106{
107 return bpf_jit_stack_local(ctx) + 16;
108}
109
110static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
111{
112 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 return (bpf_has_stack_frame(ctx) ?
114 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
115 - (8 * (32 - reg));
116
117 pr_err("BPF JIT is asking about unknown registers");
118 BUG();
119}
120
121void bpf_jit_realloc_regs(struct codegen_context *ctx)
122{
123}
124
125void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
126{
127 int i;
128
129 /* Instruction for trampoline attach */
130 EMIT(PPC_RAW_NOP());
131
132#ifndef CONFIG_PPC_KERNEL_PCREL
133 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
134 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
135#endif
136
137 /*
138 * Initialize tail_call_cnt if we do tail calls.
139 * Otherwise, put in NOPs so that it can be skipped when we are
140 * invoked through a tail call.
141 */
142 if (ctx->seen & SEEN_TAILCALL) {
143 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
144 /* this goes in the redzone */
145 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
146 } else {
147 EMIT(PPC_RAW_NOP());
148 EMIT(PPC_RAW_NOP());
149 }
150
151 if (bpf_has_stack_frame(ctx)) {
152 /*
153 * We need a stack frame, but we don't necessarily need to
154 * save/restore LR unless we call other functions
155 */
156 if (ctx->seen & SEEN_FUNC) {
157 EMIT(PPC_RAW_MFLR(_R0));
158 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
159 }
160
161 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
162 }
163
164 /*
165 * Back up non-volatile regs -- BPF registers 6-10
166 * If we haven't created our own stack frame, we save these
167 * in the protected zone below the previous stack frame
168 */
169 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
170 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
171 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
172
173 /* Setup frame pointer to point to the bpf stack area */
174 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
175 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
176 STACK_FRAME_MIN_SIZE + ctx->stack_size));
177}
178
179static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
180{
181 int i;
182
183 /* Restore NVRs */
184 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
185 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
186 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
187
188 /* Tear down our stack frame */
189 if (bpf_has_stack_frame(ctx)) {
190 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
191 if (ctx->seen & SEEN_FUNC) {
192 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
193 EMIT(PPC_RAW_MTLR(_R0));
194 }
195 }
196}
197
198void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
199{
200 bpf_jit_emit_common_epilogue(image, ctx);
201
202 /* Move result to r3 */
203 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
204
205 EMIT(PPC_RAW_BLR());
206
207 bpf_jit_build_fentry_stubs(image, ctx);
208}
209
210int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
211{
212 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
213 long reladdr;
214
215 /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
216 if (!func) {
217 for (int i = 0; i < 5; i++)
218 EMIT(PPC_RAW_NOP());
219 /* elfv1 needs an additional instruction to load addr from descriptor */
220 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
221 EMIT(PPC_RAW_NOP());
222 EMIT(PPC_RAW_MTCTR(_R12));
223 EMIT(PPC_RAW_BCTRL());
224 return 0;
225 }
226
227#ifdef CONFIG_PPC_KERNEL_PCREL
228 reladdr = func_addr - local_paca->kernelbase;
229
230 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
231 EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
232 /* Align for subsequent prefix instruction */
233 if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
234 EMIT(PPC_RAW_NOP());
235 /* paddi r12,r12,addr */
236 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
237 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
238 } else {
239 unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
240 bool alignment_needed = !IS_ALIGNED(pc, 8);
241
242 reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
243
244 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
245 if (alignment_needed)
246 EMIT(PPC_RAW_NOP());
247 /* pla r12,addr */
248 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
249 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
250 } else {
251 /* We can clobber r12 */
252 PPC_LI64(_R12, func);
253 }
254 }
255 EMIT(PPC_RAW_MTCTR(_R12));
256 EMIT(PPC_RAW_BCTRL());
257#else
258 if (core_kernel_text(func_addr)) {
259 reladdr = func_addr - kernel_toc_addr();
260 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
261 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
262 return -ERANGE;
263 }
264
265 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
266 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
267 EMIT(PPC_RAW_MTCTR(_R12));
268 EMIT(PPC_RAW_BCTRL());
269 } else {
270 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
271 /* func points to the function descriptor */
272 PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
273 /* Load actual entry point from function descriptor */
274 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
275 /* ... and move it to CTR */
276 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
277 /*
278 * Load TOC from function descriptor at offset 8.
279 * We can clobber r2 since we get called through a
280 * function pointer (so caller will save/restore r2).
281 */
282 if (is_module_text_address(func_addr))
283 EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
284 } else {
285 PPC_LI64(_R12, func);
286 EMIT(PPC_RAW_MTCTR(_R12));
287 }
288 EMIT(PPC_RAW_BCTRL());
289 /*
290 * Load r2 with kernel TOC as kernel TOC is used if function address falls
291 * within core kernel text.
292 */
293 if (is_module_text_address(func_addr))
294 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
295 }
296#endif
297
298 return 0;
299}
300
301static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
302{
303 /*
304 * By now, the eBPF program has already setup parameters in r3, r4 and r5
305 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
306 * r4/BPF_REG_2 - pointer to bpf_array
307 * r5/BPF_REG_3 - index in bpf_array
308 */
309 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
310 int b2p_index = bpf_to_ppc(BPF_REG_3);
311 int bpf_tailcall_prologue_size = 12;
312
313 if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
314 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
315
316 /*
317 * if (index >= array->map.max_entries)
318 * goto out;
319 */
320 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
321 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
322 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
323 PPC_BCC_SHORT(COND_GE, out);
324
325 /*
326 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
327 * goto out;
328 */
329 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
330 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
331 PPC_BCC_SHORT(COND_GE, out);
332
333 /*
334 * tail_call_cnt++;
335 */
336 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
337 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
338
339 /* prog = array->ptrs[index]; */
340 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
341 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
342 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
343
344 /*
345 * if (prog == NULL)
346 * goto out;
347 */
348 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
349 PPC_BCC_SHORT(COND_EQ, out);
350
351 /* goto *(prog->bpf_func + prologue_size); */
352 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
353 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
354 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
355 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
356
357 /* tear down stack, restore NVRs, ... */
358 bpf_jit_emit_common_epilogue(image, ctx);
359
360 EMIT(PPC_RAW_BCTR());
361
362 /* out: */
363 return 0;
364}
365
366/*
367 * We spill into the redzone always, even if the bpf program has its own stackframe.
368 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
369 */
370void bpf_stf_barrier(void);
371
372asm (
373" .global bpf_stf_barrier ;"
374" bpf_stf_barrier: ;"
375" std 21,-64(1) ;"
376" std 22,-56(1) ;"
377" sync ;"
378" ld 21,-64(1) ;"
379" ld 22,-56(1) ;"
380" ori 31,31,0 ;"
381" .rept 14 ;"
382" b 1f ;"
383" 1: ;"
384" .endr ;"
385" blr ;"
386);
387
388/* Assemble the body code between the prologue & epilogue */
389int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
390 u32 *addrs, int pass, bool extra_pass)
391{
392 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
393 const struct bpf_insn *insn = fp->insnsi;
394 int flen = fp->len;
395 int i, ret;
396
397 /* Start of epilogue code - will only be valid 2nd pass onwards */
398 u32 exit_addr = addrs[flen];
399
400 for (i = 0; i < flen; i++) {
401 u32 code = insn[i].code;
402 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
403 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
404 u32 size = BPF_SIZE(code);
405 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
406 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
407 u32 save_reg, ret_reg;
408 s16 off = insn[i].off;
409 s32 imm = insn[i].imm;
410 bool func_addr_fixed;
411 u64 func_addr;
412 u64 imm64;
413 u32 true_cond;
414 u32 tmp_idx;
415 int j;
416
417 /*
418 * addrs[] maps a BPF bytecode address into a real offset from
419 * the start of the body code.
420 */
421 addrs[i] = ctx->idx * 4;
422
423 /*
424 * As an optimization, we note down which non-volatile registers
425 * are used so that we can only save/restore those in our
426 * prologue and epilogue. We do this here regardless of whether
427 * the actual BPF instruction uses src/dst registers or not
428 * (for instance, BPF_CALL does not use them). The expectation
429 * is that those instructions will have src_reg/dst_reg set to
430 * 0. Even otherwise, we just lose some prologue/epilogue
431 * optimization but everything else should work without
432 * any issues.
433 */
434 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
435 bpf_set_seen_register(ctx, dst_reg);
436 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
437 bpf_set_seen_register(ctx, src_reg);
438
439 switch (code) {
440 /*
441 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
442 */
443 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
444 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
445 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
446 goto bpf_alu32_trunc;
447 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
448 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
449 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
450 goto bpf_alu32_trunc;
451 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
452 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
453 if (!imm) {
454 goto bpf_alu32_trunc;
455 } else if (imm >= -32768 && imm < 32768) {
456 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
457 } else {
458 PPC_LI32(tmp1_reg, imm);
459 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
460 }
461 goto bpf_alu32_trunc;
462 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
463 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
464 if (!imm) {
465 goto bpf_alu32_trunc;
466 } else if (imm > -32768 && imm <= 32768) {
467 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
468 } else {
469 PPC_LI32(tmp1_reg, imm);
470 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
471 }
472 goto bpf_alu32_trunc;
473 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
474 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
475 if (BPF_CLASS(code) == BPF_ALU)
476 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
477 else
478 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
479 goto bpf_alu32_trunc;
480 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
481 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
482 if (imm >= -32768 && imm < 32768)
483 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
484 else {
485 PPC_LI32(tmp1_reg, imm);
486 if (BPF_CLASS(code) == BPF_ALU)
487 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
488 else
489 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
490 }
491 goto bpf_alu32_trunc;
492 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
493 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
494 if (BPF_OP(code) == BPF_MOD) {
495 if (off)
496 EMIT(PPC_RAW_DIVW(tmp1_reg, dst_reg, src_reg));
497 else
498 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
499
500 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
501 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
502 } else
503 if (off)
504 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, src_reg));
505 else
506 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
507 goto bpf_alu32_trunc;
508 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
509 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
510 if (BPF_OP(code) == BPF_MOD) {
511 if (off)
512 EMIT(PPC_RAW_DIVD(tmp1_reg, dst_reg, src_reg));
513 else
514 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
515 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
516 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
517 } else
518 if (off)
519 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, src_reg));
520 else
521 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
522 break;
523 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
524 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
525 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
526 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
527 if (imm == 0)
528 return -EINVAL;
529 if (imm == 1) {
530 if (BPF_OP(code) == BPF_DIV) {
531 goto bpf_alu32_trunc;
532 } else {
533 EMIT(PPC_RAW_LI(dst_reg, 0));
534 break;
535 }
536 }
537
538 PPC_LI32(tmp1_reg, imm);
539 switch (BPF_CLASS(code)) {
540 case BPF_ALU:
541 if (BPF_OP(code) == BPF_MOD) {
542 if (off)
543 EMIT(PPC_RAW_DIVW(tmp2_reg, dst_reg, tmp1_reg));
544 else
545 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
546 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
547 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
548 } else
549 if (off)
550 EMIT(PPC_RAW_DIVW(dst_reg, dst_reg, tmp1_reg));
551 else
552 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
553 break;
554 case BPF_ALU64:
555 if (BPF_OP(code) == BPF_MOD) {
556 if (off)
557 EMIT(PPC_RAW_DIVD(tmp2_reg, dst_reg, tmp1_reg));
558 else
559 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
560 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
561 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
562 } else
563 if (off)
564 EMIT(PPC_RAW_DIVD(dst_reg, dst_reg, tmp1_reg));
565 else
566 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
567 break;
568 }
569 goto bpf_alu32_trunc;
570 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
571 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
572 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
573 goto bpf_alu32_trunc;
574
575 /*
576 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
577 */
578 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
579 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
580 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
581 goto bpf_alu32_trunc;
582 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
583 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
584 if (!IMM_H(imm))
585 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
586 else {
587 /* Sign-extended */
588 PPC_LI32(tmp1_reg, imm);
589 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
590 }
591 goto bpf_alu32_trunc;
592 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
593 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
594 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
595 goto bpf_alu32_trunc;
596 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
597 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
598 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
599 /* Sign-extended */
600 PPC_LI32(tmp1_reg, imm);
601 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
602 } else {
603 if (IMM_L(imm))
604 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
605 if (IMM_H(imm))
606 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
607 }
608 goto bpf_alu32_trunc;
609 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
610 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
611 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
612 goto bpf_alu32_trunc;
613 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
614 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
615 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
616 /* Sign-extended */
617 PPC_LI32(tmp1_reg, imm);
618 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
619 } else {
620 if (IMM_L(imm))
621 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
622 if (IMM_H(imm))
623 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
624 }
625 goto bpf_alu32_trunc;
626 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
627 /* slw clears top 32 bits */
628 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
629 /* skip zero extension move, but set address map. */
630 if (insn_is_zext(&insn[i + 1]))
631 addrs[++i] = ctx->idx * 4;
632 break;
633 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
634 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
635 break;
636 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
637 /* with imm 0, we still need to clear top 32 bits */
638 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
639 if (insn_is_zext(&insn[i + 1]))
640 addrs[++i] = ctx->idx * 4;
641 break;
642 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
643 if (imm != 0)
644 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
645 break;
646 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
647 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
648 if (insn_is_zext(&insn[i + 1]))
649 addrs[++i] = ctx->idx * 4;
650 break;
651 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
652 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
653 break;
654 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
655 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
656 if (insn_is_zext(&insn[i + 1]))
657 addrs[++i] = ctx->idx * 4;
658 break;
659 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
660 if (imm != 0)
661 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
662 break;
663 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
664 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
665 goto bpf_alu32_trunc;
666 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
667 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
668 break;
669 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
670 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
671 goto bpf_alu32_trunc;
672 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
673 if (imm != 0)
674 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
675 break;
676
677 /*
678 * MOV
679 */
680 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
681 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
682 if (imm == 1) {
683 /* special mov32 for zext */
684 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
685 break;
686 } else if (off == 8) {
687 EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
688 } else if (off == 16) {
689 EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
690 } else if (off == 32) {
691 EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
692 } else if (dst_reg != src_reg)
693 EMIT(PPC_RAW_MR(dst_reg, src_reg));
694 goto bpf_alu32_trunc;
695 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
696 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
697 PPC_LI32(dst_reg, imm);
698 if (imm < 0)
699 goto bpf_alu32_trunc;
700 else if (insn_is_zext(&insn[i + 1]))
701 addrs[++i] = ctx->idx * 4;
702 break;
703
704bpf_alu32_trunc:
705 /* Truncate to 32-bits */
706 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
707 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
708 break;
709
710 /*
711 * BPF_FROM_BE/LE
712 */
713 case BPF_ALU | BPF_END | BPF_FROM_LE:
714 case BPF_ALU | BPF_END | BPF_FROM_BE:
715 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
716#ifdef __BIG_ENDIAN__
717 if (BPF_SRC(code) == BPF_FROM_BE)
718 goto emit_clear;
719#else /* !__BIG_ENDIAN__ */
720 if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
721 goto emit_clear;
722#endif
723 switch (imm) {
724 case 16:
725 /* Rotate 8 bits left & mask with 0x0000ff00 */
726 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
727 /* Rotate 8 bits right & insert LSB to reg */
728 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
729 /* Move result back to dst_reg */
730 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
731 break;
732 case 32:
733 /*
734 * Rotate word left by 8 bits:
735 * 2 bytes are already in their final position
736 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
737 */
738 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
739 /* Rotate 24 bits and insert byte 1 */
740 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
741 /* Rotate 24 bits and insert byte 3 */
742 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
743 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
744 break;
745 case 64:
746 /* Store the value to stack and then use byte-reverse loads */
747 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
748 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
749 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
750 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
751 } else {
752 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
753 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
754 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
755 EMIT(PPC_RAW_LI(tmp2_reg, 4));
756 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
757 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
758 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
759 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
760 }
761 break;
762 }
763 break;
764
765emit_clear:
766 switch (imm) {
767 case 16:
768 /* zero-extend 16 bits into 64 bits */
769 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
770 if (insn_is_zext(&insn[i + 1]))
771 addrs[++i] = ctx->idx * 4;
772 break;
773 case 32:
774 if (!fp->aux->verifier_zext)
775 /* zero-extend 32 bits into 64 bits */
776 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
777 break;
778 case 64:
779 /* nop */
780 break;
781 }
782 break;
783
784 /*
785 * BPF_ST NOSPEC (speculation barrier)
786 */
787 case BPF_ST | BPF_NOSPEC:
788 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
789 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
790 break;
791
792 switch (stf_barrier) {
793 case STF_BARRIER_EIEIO:
794 EMIT(PPC_RAW_EIEIO() | 0x02000000);
795 break;
796 case STF_BARRIER_SYNC_ORI:
797 EMIT(PPC_RAW_SYNC());
798 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
799 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
800 break;
801 case STF_BARRIER_FALLBACK:
802 ctx->seen |= SEEN_FUNC;
803 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
804 EMIT(PPC_RAW_MTCTR(_R12));
805 EMIT(PPC_RAW_BCTRL());
806 break;
807 case STF_BARRIER_NONE:
808 break;
809 }
810 break;
811
812 /*
813 * BPF_ST(X)
814 */
815 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
816 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
817 if (BPF_CLASS(code) == BPF_ST) {
818 EMIT(PPC_RAW_LI(tmp1_reg, imm));
819 src_reg = tmp1_reg;
820 }
821 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
822 break;
823 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
824 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
825 if (BPF_CLASS(code) == BPF_ST) {
826 EMIT(PPC_RAW_LI(tmp1_reg, imm));
827 src_reg = tmp1_reg;
828 }
829 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
830 break;
831 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
832 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
833 if (BPF_CLASS(code) == BPF_ST) {
834 PPC_LI32(tmp1_reg, imm);
835 src_reg = tmp1_reg;
836 }
837 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
838 break;
839 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
840 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
841 if (BPF_CLASS(code) == BPF_ST) {
842 PPC_LI32(tmp1_reg, imm);
843 src_reg = tmp1_reg;
844 }
845 if (off % 4) {
846 EMIT(PPC_RAW_LI(tmp2_reg, off));
847 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
848 } else {
849 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
850 }
851 break;
852
853 /*
854 * BPF_STX ATOMIC (atomic ops)
855 */
856 case BPF_STX | BPF_ATOMIC | BPF_W:
857 case BPF_STX | BPF_ATOMIC | BPF_DW:
858 save_reg = tmp2_reg;
859 ret_reg = src_reg;
860
861 /* Get offset into TMP_REG_1 */
862 EMIT(PPC_RAW_LI(tmp1_reg, off));
863 /*
864 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
865 * before and after the operation.
866 *
867 * This is a requirement in the Linux Kernel Memory Model.
868 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
869 */
870 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
871 EMIT(PPC_RAW_SYNC());
872 tmp_idx = ctx->idx * 4;
873 /* load value from memory into TMP_REG_2 */
874 if (size == BPF_DW)
875 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
876 else
877 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
878
879 /* Save old value in _R0 */
880 if (imm & BPF_FETCH)
881 EMIT(PPC_RAW_MR(_R0, tmp2_reg));
882
883 switch (imm) {
884 case BPF_ADD:
885 case BPF_ADD | BPF_FETCH:
886 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
887 break;
888 case BPF_AND:
889 case BPF_AND | BPF_FETCH:
890 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
891 break;
892 case BPF_OR:
893 case BPF_OR | BPF_FETCH:
894 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
895 break;
896 case BPF_XOR:
897 case BPF_XOR | BPF_FETCH:
898 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
899 break;
900 case BPF_CMPXCHG:
901 /*
902 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
903 * in src_reg for other cases.
904 */
905 ret_reg = bpf_to_ppc(BPF_REG_0);
906
907 /* Compare with old value in BPF_R0 */
908 if (size == BPF_DW)
909 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
910 else
911 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
912 /* Don't set if different from old value */
913 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
914 fallthrough;
915 case BPF_XCHG:
916 save_reg = src_reg;
917 break;
918 default:
919 pr_err_ratelimited(
920 "eBPF filter atomic op code %02x (@%d) unsupported\n",
921 code, i);
922 return -EOPNOTSUPP;
923 }
924
925 /* store new value */
926 if (size == BPF_DW)
927 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
928 else
929 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
930 /* we're done if this succeeded */
931 PPC_BCC_SHORT(COND_NE, tmp_idx);
932
933 if (imm & BPF_FETCH) {
934 /* Emit 'sync' to enforce full ordering */
935 if (IS_ENABLED(CONFIG_SMP))
936 EMIT(PPC_RAW_SYNC());
937 EMIT(PPC_RAW_MR(ret_reg, _R0));
938 /*
939 * Skip unnecessary zero-extension for 32-bit cmpxchg.
940 * For context, see commit 39491867ace5.
941 */
942 if (size != BPF_DW && imm == BPF_CMPXCHG &&
943 insn_is_zext(&insn[i + 1]))
944 addrs[++i] = ctx->idx * 4;
945 }
946 break;
947
948 /*
949 * BPF_LDX
950 */
951 /* dst = *(u8 *)(ul) (src + off) */
952 case BPF_LDX | BPF_MEM | BPF_B:
953 case BPF_LDX | BPF_MEMSX | BPF_B:
954 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
955 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
956 /* dst = *(u16 *)(ul) (src + off) */
957 case BPF_LDX | BPF_MEM | BPF_H:
958 case BPF_LDX | BPF_MEMSX | BPF_H:
959 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
960 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
961 /* dst = *(u32 *)(ul) (src + off) */
962 case BPF_LDX | BPF_MEM | BPF_W:
963 case BPF_LDX | BPF_MEMSX | BPF_W:
964 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
965 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
966 /* dst = *(u64 *)(ul) (src + off) */
967 case BPF_LDX | BPF_MEM | BPF_DW:
968 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
969 /*
970 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
971 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
972 * load only if addr is kernel address (see is_kernel_addr()), otherwise
973 * set dst_reg=0 and move on.
974 */
975 if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
976 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
977 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
978 PPC_LI64(tmp2_reg, 0x8000000000000000ul);
979 else /* BOOK3S_64 */
980 PPC_LI64(tmp2_reg, PAGE_OFFSET);
981 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
982 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
983 EMIT(PPC_RAW_LI(dst_reg, 0));
984 /*
985 * Check if 'off' is word aligned for BPF_DW, because
986 * we might generate two instructions.
987 */
988 if ((BPF_SIZE(code) == BPF_DW ||
989 (BPF_SIZE(code) == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX)) &&
990 (off & 3))
991 PPC_JMP((ctx->idx + 3) * 4);
992 else
993 PPC_JMP((ctx->idx + 2) * 4);
994 }
995
996 if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
997 switch (size) {
998 case BPF_B:
999 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1000 EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
1001 break;
1002 case BPF_H:
1003 EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
1004 break;
1005 case BPF_W:
1006 EMIT(PPC_RAW_LWA(dst_reg, src_reg, off));
1007 break;
1008 }
1009 } else {
1010 switch (size) {
1011 case BPF_B:
1012 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
1013 break;
1014 case BPF_H:
1015 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
1016 break;
1017 case BPF_W:
1018 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
1019 break;
1020 case BPF_DW:
1021 if (off % 4) {
1022 EMIT(PPC_RAW_LI(tmp1_reg, off));
1023 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
1024 } else {
1025 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
1026 }
1027 break;
1028 }
1029 }
1030
1031 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
1032 addrs[++i] = ctx->idx * 4;
1033
1034 if (BPF_MODE(code) == BPF_PROBE_MEM) {
1035 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
1036 ctx->idx - 1, 4, dst_reg);
1037 if (ret)
1038 return ret;
1039 }
1040 break;
1041
1042 /*
1043 * Doubleword load
1044 * 16 byte instruction that uses two 'struct bpf_insn'
1045 */
1046 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1047 imm64 = ((u64)(u32) insn[i].imm) |
1048 (((u64)(u32) insn[i+1].imm) << 32);
1049 tmp_idx = ctx->idx;
1050 PPC_LI64(dst_reg, imm64);
1051 /* padding to allow full 5 instructions for later patching */
1052 if (!image)
1053 for (j = ctx->idx - tmp_idx; j < 5; j++)
1054 EMIT(PPC_RAW_NOP());
1055 /* Adjust for two bpf instructions */
1056 addrs[++i] = ctx->idx * 4;
1057 break;
1058
1059 /*
1060 * Return/Exit
1061 */
1062 case BPF_JMP | BPF_EXIT:
1063 /*
1064 * If this isn't the very last instruction, branch to
1065 * the epilogue. If we _are_ the last instruction,
1066 * we'll just fall through to the epilogue.
1067 */
1068 if (i != flen - 1) {
1069 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1070 if (ret)
1071 return ret;
1072 }
1073 /* else fall through to the epilogue */
1074 break;
1075
1076 /*
1077 * Call kernel helper or bpf function
1078 */
1079 case BPF_JMP | BPF_CALL:
1080 ctx->seen |= SEEN_FUNC;
1081
1082 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1083 &func_addr, &func_addr_fixed);
1084 if (ret < 0)
1085 return ret;
1086
1087 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1088 if (ret)
1089 return ret;
1090
1091 /* move return value from r3 to BPF_REG_0 */
1092 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1093 break;
1094
1095 /*
1096 * Jumps and branches
1097 */
1098 case BPF_JMP | BPF_JA:
1099 PPC_JMP(addrs[i + 1 + off]);
1100 break;
1101 case BPF_JMP32 | BPF_JA:
1102 PPC_JMP(addrs[i + 1 + imm]);
1103 break;
1104
1105 case BPF_JMP | BPF_JGT | BPF_K:
1106 case BPF_JMP | BPF_JGT | BPF_X:
1107 case BPF_JMP | BPF_JSGT | BPF_K:
1108 case BPF_JMP | BPF_JSGT | BPF_X:
1109 case BPF_JMP32 | BPF_JGT | BPF_K:
1110 case BPF_JMP32 | BPF_JGT | BPF_X:
1111 case BPF_JMP32 | BPF_JSGT | BPF_K:
1112 case BPF_JMP32 | BPF_JSGT | BPF_X:
1113 true_cond = COND_GT;
1114 goto cond_branch;
1115 case BPF_JMP | BPF_JLT | BPF_K:
1116 case BPF_JMP | BPF_JLT | BPF_X:
1117 case BPF_JMP | BPF_JSLT | BPF_K:
1118 case BPF_JMP | BPF_JSLT | BPF_X:
1119 case BPF_JMP32 | BPF_JLT | BPF_K:
1120 case BPF_JMP32 | BPF_JLT | BPF_X:
1121 case BPF_JMP32 | BPF_JSLT | BPF_K:
1122 case BPF_JMP32 | BPF_JSLT | BPF_X:
1123 true_cond = COND_LT;
1124 goto cond_branch;
1125 case BPF_JMP | BPF_JGE | BPF_K:
1126 case BPF_JMP | BPF_JGE | BPF_X:
1127 case BPF_JMP | BPF_JSGE | BPF_K:
1128 case BPF_JMP | BPF_JSGE | BPF_X:
1129 case BPF_JMP32 | BPF_JGE | BPF_K:
1130 case BPF_JMP32 | BPF_JGE | BPF_X:
1131 case BPF_JMP32 | BPF_JSGE | BPF_K:
1132 case BPF_JMP32 | BPF_JSGE | BPF_X:
1133 true_cond = COND_GE;
1134 goto cond_branch;
1135 case BPF_JMP | BPF_JLE | BPF_K:
1136 case BPF_JMP | BPF_JLE | BPF_X:
1137 case BPF_JMP | BPF_JSLE | BPF_K:
1138 case BPF_JMP | BPF_JSLE | BPF_X:
1139 case BPF_JMP32 | BPF_JLE | BPF_K:
1140 case BPF_JMP32 | BPF_JLE | BPF_X:
1141 case BPF_JMP32 | BPF_JSLE | BPF_K:
1142 case BPF_JMP32 | BPF_JSLE | BPF_X:
1143 true_cond = COND_LE;
1144 goto cond_branch;
1145 case BPF_JMP | BPF_JEQ | BPF_K:
1146 case BPF_JMP | BPF_JEQ | BPF_X:
1147 case BPF_JMP32 | BPF_JEQ | BPF_K:
1148 case BPF_JMP32 | BPF_JEQ | BPF_X:
1149 true_cond = COND_EQ;
1150 goto cond_branch;
1151 case BPF_JMP | BPF_JNE | BPF_K:
1152 case BPF_JMP | BPF_JNE | BPF_X:
1153 case BPF_JMP32 | BPF_JNE | BPF_K:
1154 case BPF_JMP32 | BPF_JNE | BPF_X:
1155 true_cond = COND_NE;
1156 goto cond_branch;
1157 case BPF_JMP | BPF_JSET | BPF_K:
1158 case BPF_JMP | BPF_JSET | BPF_X:
1159 case BPF_JMP32 | BPF_JSET | BPF_K:
1160 case BPF_JMP32 | BPF_JSET | BPF_X:
1161 true_cond = COND_NE;
1162 /* Fall through */
1163
1164cond_branch:
1165 switch (code) {
1166 case BPF_JMP | BPF_JGT | BPF_X:
1167 case BPF_JMP | BPF_JLT | BPF_X:
1168 case BPF_JMP | BPF_JGE | BPF_X:
1169 case BPF_JMP | BPF_JLE | BPF_X:
1170 case BPF_JMP | BPF_JEQ | BPF_X:
1171 case BPF_JMP | BPF_JNE | BPF_X:
1172 case BPF_JMP32 | BPF_JGT | BPF_X:
1173 case BPF_JMP32 | BPF_JLT | BPF_X:
1174 case BPF_JMP32 | BPF_JGE | BPF_X:
1175 case BPF_JMP32 | BPF_JLE | BPF_X:
1176 case BPF_JMP32 | BPF_JEQ | BPF_X:
1177 case BPF_JMP32 | BPF_JNE | BPF_X:
1178 /* unsigned comparison */
1179 if (BPF_CLASS(code) == BPF_JMP32)
1180 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1181 else
1182 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1183 break;
1184 case BPF_JMP | BPF_JSGT | BPF_X:
1185 case BPF_JMP | BPF_JSLT | BPF_X:
1186 case BPF_JMP | BPF_JSGE | BPF_X:
1187 case BPF_JMP | BPF_JSLE | BPF_X:
1188 case BPF_JMP32 | BPF_JSGT | BPF_X:
1189 case BPF_JMP32 | BPF_JSLT | BPF_X:
1190 case BPF_JMP32 | BPF_JSGE | BPF_X:
1191 case BPF_JMP32 | BPF_JSLE | BPF_X:
1192 /* signed comparison */
1193 if (BPF_CLASS(code) == BPF_JMP32)
1194 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1195 else
1196 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1197 break;
1198 case BPF_JMP | BPF_JSET | BPF_X:
1199 case BPF_JMP32 | BPF_JSET | BPF_X:
1200 if (BPF_CLASS(code) == BPF_JMP) {
1201 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1202 } else {
1203 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1204 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1205 }
1206 break;
1207 case BPF_JMP | BPF_JNE | BPF_K:
1208 case BPF_JMP | BPF_JEQ | BPF_K:
1209 case BPF_JMP | BPF_JGT | BPF_K:
1210 case BPF_JMP | BPF_JLT | BPF_K:
1211 case BPF_JMP | BPF_JGE | BPF_K:
1212 case BPF_JMP | BPF_JLE | BPF_K:
1213 case BPF_JMP32 | BPF_JNE | BPF_K:
1214 case BPF_JMP32 | BPF_JEQ | BPF_K:
1215 case BPF_JMP32 | BPF_JGT | BPF_K:
1216 case BPF_JMP32 | BPF_JLT | BPF_K:
1217 case BPF_JMP32 | BPF_JGE | BPF_K:
1218 case BPF_JMP32 | BPF_JLE | BPF_K:
1219 {
1220 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1221
1222 /*
1223 * Need sign-extended load, so only positive
1224 * values can be used as imm in cmpldi
1225 */
1226 if (imm >= 0 && imm < 32768) {
1227 if (is_jmp32)
1228 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1229 else
1230 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1231 } else {
1232 /* sign-extending load */
1233 PPC_LI32(tmp1_reg, imm);
1234 /* ... but unsigned comparison */
1235 if (is_jmp32)
1236 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1237 else
1238 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1239 }
1240 break;
1241 }
1242 case BPF_JMP | BPF_JSGT | BPF_K:
1243 case BPF_JMP | BPF_JSLT | BPF_K:
1244 case BPF_JMP | BPF_JSGE | BPF_K:
1245 case BPF_JMP | BPF_JSLE | BPF_K:
1246 case BPF_JMP32 | BPF_JSGT | BPF_K:
1247 case BPF_JMP32 | BPF_JSLT | BPF_K:
1248 case BPF_JMP32 | BPF_JSGE | BPF_K:
1249 case BPF_JMP32 | BPF_JSLE | BPF_K:
1250 {
1251 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1252
1253 /*
1254 * signed comparison, so any 16-bit value
1255 * can be used in cmpdi
1256 */
1257 if (imm >= -32768 && imm < 32768) {
1258 if (is_jmp32)
1259 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1260 else
1261 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1262 } else {
1263 PPC_LI32(tmp1_reg, imm);
1264 if (is_jmp32)
1265 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1266 else
1267 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1268 }
1269 break;
1270 }
1271 case BPF_JMP | BPF_JSET | BPF_K:
1272 case BPF_JMP32 | BPF_JSET | BPF_K:
1273 /* andi does not sign-extend the immediate */
1274 if (imm >= 0 && imm < 32768)
1275 /* PPC_ANDI is _only/always_ dot-form */
1276 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1277 else {
1278 PPC_LI32(tmp1_reg, imm);
1279 if (BPF_CLASS(code) == BPF_JMP) {
1280 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1281 tmp1_reg));
1282 } else {
1283 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1284 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1285 0, 0, 31));
1286 }
1287 }
1288 break;
1289 }
1290 PPC_BCC(true_cond, addrs[i + 1 + off]);
1291 break;
1292
1293 /*
1294 * Tail call
1295 */
1296 case BPF_JMP | BPF_TAIL_CALL:
1297 ctx->seen |= SEEN_TAILCALL;
1298 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1299 if (ret < 0)
1300 return ret;
1301 break;
1302
1303 default:
1304 /*
1305 * The filter contains something cruel & unusual.
1306 * We don't handle it, but also there shouldn't be
1307 * anything missing from our list.
1308 */
1309 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1310 code, i);
1311 return -ENOTSUPP;
1312 }
1313 }
1314
1315 /* Set end-of-body-code address for exit. */
1316 addrs[i] = ctx->idx * 4;
1317
1318 return 0;
1319}