Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * BPF JIT compiler for ARM64
4 *
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 */
7
8#define pr_fmt(fmt) "bpf_jit: " fmt
9
10#include <linux/bitfield.h>
11#include <linux/bpf.h>
12#include <linux/filter.h>
13#include <linux/memory.h>
14#include <linux/printk.h>
15#include <linux/slab.h>
16
17#include <asm/asm-extable.h>
18#include <asm/byteorder.h>
19#include <asm/cacheflush.h>
20#include <asm/debug-monitors.h>
21#include <asm/insn.h>
22#include <asm/patching.h>
23#include <asm/set_memory.h>
24
25#include "bpf_jit.h"
26
27#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
28#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
29#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
30#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
31#define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
32
33#define check_imm(bits, imm) do { \
34 if ((((imm) > 0) && ((imm) >> (bits))) || \
35 (((imm) < 0) && (~(imm) >> (bits)))) { \
36 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
37 i, imm, imm); \
38 return -EINVAL; \
39 } \
40} while (0)
41#define check_imm19(imm) check_imm(19, imm)
42#define check_imm26(imm) check_imm(26, imm)
43
44/* Map BPF registers to A64 registers */
45static const int bpf2a64[] = {
46 /* return value from in-kernel function, and exit value from eBPF */
47 [BPF_REG_0] = A64_R(7),
48 /* arguments from eBPF program to in-kernel function */
49 [BPF_REG_1] = A64_R(0),
50 [BPF_REG_2] = A64_R(1),
51 [BPF_REG_3] = A64_R(2),
52 [BPF_REG_4] = A64_R(3),
53 [BPF_REG_5] = A64_R(4),
54 /* callee saved registers that in-kernel function will preserve */
55 [BPF_REG_6] = A64_R(19),
56 [BPF_REG_7] = A64_R(20),
57 [BPF_REG_8] = A64_R(21),
58 [BPF_REG_9] = A64_R(22),
59 /* read-only frame pointer to access stack */
60 [BPF_REG_FP] = A64_R(25),
61 /* temporary registers for BPF JIT */
62 [TMP_REG_1] = A64_R(10),
63 [TMP_REG_2] = A64_R(11),
64 [TMP_REG_3] = A64_R(12),
65 /* tail_call_cnt */
66 [TCALL_CNT] = A64_R(26),
67 /* temporary register for blinding constants */
68 [BPF_REG_AX] = A64_R(9),
69 [FP_BOTTOM] = A64_R(27),
70};
71
72struct jit_ctx {
73 const struct bpf_prog *prog;
74 int idx;
75 int epilogue_offset;
76 int *offset;
77 int exentry_idx;
78 __le32 *image;
79 u32 stack_size;
80 int fpb_offset;
81};
82
83struct bpf_plt {
84 u32 insn_ldr; /* load target */
85 u32 insn_br; /* branch to target */
86 u64 target; /* target value */
87};
88
89#define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target)
90#define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target)
91
92static inline void emit(const u32 insn, struct jit_ctx *ctx)
93{
94 if (ctx->image != NULL)
95 ctx->image[ctx->idx] = cpu_to_le32(insn);
96
97 ctx->idx++;
98}
99
100static inline void emit_a64_mov_i(const int is64, const int reg,
101 const s32 val, struct jit_ctx *ctx)
102{
103 u16 hi = val >> 16;
104 u16 lo = val & 0xffff;
105
106 if (hi & 0x8000) {
107 if (hi == 0xffff) {
108 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
109 } else {
110 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
111 if (lo != 0xffff)
112 emit(A64_MOVK(is64, reg, lo, 0), ctx);
113 }
114 } else {
115 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
116 if (hi)
117 emit(A64_MOVK(is64, reg, hi, 16), ctx);
118 }
119}
120
121static int i64_i16_blocks(const u64 val, bool inverse)
122{
123 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
124 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
125 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
126 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
127}
128
129static inline void emit_a64_mov_i64(const int reg, const u64 val,
130 struct jit_ctx *ctx)
131{
132 u64 nrm_tmp = val, rev_tmp = ~val;
133 bool inverse;
134 int shift;
135
136 if (!(nrm_tmp >> 32))
137 return emit_a64_mov_i(0, reg, (u32)val, ctx);
138
139 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
140 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
141 (fls64(nrm_tmp) - 1)), 16), 0);
142 if (inverse)
143 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
144 else
145 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
146 shift -= 16;
147 while (shift >= 0) {
148 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
149 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
150 shift -= 16;
151 }
152}
153
154static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
155{
156 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
157 emit(insn, ctx);
158}
159
160/*
161 * Kernel addresses in the vmalloc space use at most 48 bits, and the
162 * remaining bits are guaranteed to be 0x1. So we can compose the address
163 * with a fixed length movn/movk/movk sequence.
164 */
165static inline void emit_addr_mov_i64(const int reg, const u64 val,
166 struct jit_ctx *ctx)
167{
168 u64 tmp = val;
169 int shift = 0;
170
171 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
172 while (shift < 32) {
173 tmp >>= 16;
174 shift += 16;
175 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
176 }
177}
178
179static inline void emit_call(u64 target, struct jit_ctx *ctx)
180{
181 u8 tmp = bpf2a64[TMP_REG_1];
182
183 emit_addr_mov_i64(tmp, target, ctx);
184 emit(A64_BLR(tmp), ctx);
185}
186
187static inline int bpf2a64_offset(int bpf_insn, int off,
188 const struct jit_ctx *ctx)
189{
190 /* BPF JMP offset is relative to the next instruction */
191 bpf_insn++;
192 /*
193 * Whereas arm64 branch instructions encode the offset
194 * from the branch itself, so we must subtract 1 from the
195 * instruction offset.
196 */
197 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
198}
199
200static void jit_fill_hole(void *area, unsigned int size)
201{
202 __le32 *ptr;
203 /* We are guaranteed to have aligned memory. */
204 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
205 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
206}
207
208static inline int epilogue_offset(const struct jit_ctx *ctx)
209{
210 int to = ctx->epilogue_offset;
211 int from = ctx->idx;
212
213 return to - from;
214}
215
216static bool is_addsub_imm(u32 imm)
217{
218 /* Either imm12 or shifted imm12. */
219 return !(imm & ~0xfff) || !(imm & ~0xfff000);
220}
221
222/*
223 * There are 3 types of AArch64 LDR/STR (immediate) instruction:
224 * Post-index, Pre-index, Unsigned offset.
225 *
226 * For BPF ldr/str, the "unsigned offset" type is sufficient.
227 *
228 * "Unsigned offset" type LDR(immediate) format:
229 *
230 * 3 2 1 0
231 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
232 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
233 * |x x|1 1 1 0 0 1 0 1| imm12 | Rn | Rt |
234 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
235 * scale
236 *
237 * "Unsigned offset" type STR(immediate) format:
238 * 3 2 1 0
239 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
240 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
241 * |x x|1 1 1 0 0 1 0 0| imm12 | Rn | Rt |
242 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
243 * scale
244 *
245 * The offset is calculated from imm12 and scale in the following way:
246 *
247 * offset = (u64)imm12 << scale
248 */
249static bool is_lsi_offset(int offset, int scale)
250{
251 if (offset < 0)
252 return false;
253
254 if (offset > (0xFFF << scale))
255 return false;
256
257 if (offset & ((1 << scale) - 1))
258 return false;
259
260 return true;
261}
262
263/* generated prologue:
264 * bti c // if CONFIG_ARM64_BTI_KERNEL
265 * mov x9, lr
266 * nop // POKE_OFFSET
267 * paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL
268 * stp x29, lr, [sp, #-16]!
269 * mov x29, sp
270 * stp x19, x20, [sp, #-16]!
271 * stp x21, x22, [sp, #-16]!
272 * stp x25, x26, [sp, #-16]!
273 * stp x27, x28, [sp, #-16]!
274 * mov x25, sp
275 * mov tcc, #0
276 * // PROLOGUE_OFFSET
277 */
278
279#define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
280#define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
281
282/* Offset of nop instruction in bpf prog entry to be poked */
283#define POKE_OFFSET (BTI_INSNS + 1)
284
285/* Tail call offset to jump into */
286#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
287
288static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
289{
290 const struct bpf_prog *prog = ctx->prog;
291 const bool is_main_prog = prog->aux->func_idx == 0;
292 const u8 r6 = bpf2a64[BPF_REG_6];
293 const u8 r7 = bpf2a64[BPF_REG_7];
294 const u8 r8 = bpf2a64[BPF_REG_8];
295 const u8 r9 = bpf2a64[BPF_REG_9];
296 const u8 fp = bpf2a64[BPF_REG_FP];
297 const u8 tcc = bpf2a64[TCALL_CNT];
298 const u8 fpb = bpf2a64[FP_BOTTOM];
299 const int idx0 = ctx->idx;
300 int cur_offset;
301
302 /*
303 * BPF prog stack layout
304 *
305 * high
306 * original A64_SP => 0:+-----+ BPF prologue
307 * |FP/LR|
308 * current A64_FP => -16:+-----+
309 * | ... | callee saved registers
310 * BPF fp register => -64:+-----+ <= (BPF_FP)
311 * | |
312 * | ... | BPF prog stack
313 * | |
314 * +-----+ <= (BPF_FP - prog->aux->stack_depth)
315 * |RSVD | padding
316 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
317 * | |
318 * | ... | Function call stack
319 * | |
320 * +-----+
321 * low
322 *
323 */
324
325 emit_bti(A64_BTI_C, ctx);
326
327 emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
328 emit(A64_NOP, ctx);
329
330 /* Sign lr */
331 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
332 emit(A64_PACIASP, ctx);
333
334 /* Save FP and LR registers to stay align with ARM64 AAPCS */
335 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
336 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
337
338 /* Save callee-saved registers */
339 emit(A64_PUSH(r6, r7, A64_SP), ctx);
340 emit(A64_PUSH(r8, r9, A64_SP), ctx);
341 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
342 emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
343
344 /* Set up BPF prog stack base register */
345 emit(A64_MOV(1, fp, A64_SP), ctx);
346
347 if (!ebpf_from_cbpf && is_main_prog) {
348 /* Initialize tail_call_cnt */
349 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
350
351 cur_offset = ctx->idx - idx0;
352 if (cur_offset != PROLOGUE_OFFSET) {
353 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
354 cur_offset, PROLOGUE_OFFSET);
355 return -1;
356 }
357
358 /* BTI landing pad for the tail call, done with a BR */
359 emit_bti(A64_BTI_J, ctx);
360 }
361
362 emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
363
364 /* Stack must be multiples of 16B */
365 ctx->stack_size = round_up(prog->aux->stack_depth, 16);
366
367 /* Set up function call stack */
368 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
369 return 0;
370}
371
372static int out_offset = -1; /* initialized on the first pass of build_body() */
373static int emit_bpf_tail_call(struct jit_ctx *ctx)
374{
375 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
376 const u8 r2 = bpf2a64[BPF_REG_2];
377 const u8 r3 = bpf2a64[BPF_REG_3];
378
379 const u8 tmp = bpf2a64[TMP_REG_1];
380 const u8 prg = bpf2a64[TMP_REG_2];
381 const u8 tcc = bpf2a64[TCALL_CNT];
382 const int idx0 = ctx->idx;
383#define cur_offset (ctx->idx - idx0)
384#define jmp_offset (out_offset - (cur_offset))
385 size_t off;
386
387 /* if (index >= array->map.max_entries)
388 * goto out;
389 */
390 off = offsetof(struct bpf_array, map.max_entries);
391 emit_a64_mov_i64(tmp, off, ctx);
392 emit(A64_LDR32(tmp, r2, tmp), ctx);
393 emit(A64_MOV(0, r3, r3), ctx);
394 emit(A64_CMP(0, r3, tmp), ctx);
395 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
396
397 /*
398 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
399 * goto out;
400 * tail_call_cnt++;
401 */
402 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
403 emit(A64_CMP(1, tcc, tmp), ctx);
404 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
405 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
406
407 /* prog = array->ptrs[index];
408 * if (prog == NULL)
409 * goto out;
410 */
411 off = offsetof(struct bpf_array, ptrs);
412 emit_a64_mov_i64(tmp, off, ctx);
413 emit(A64_ADD(1, tmp, r2, tmp), ctx);
414 emit(A64_LSL(1, prg, r3, 3), ctx);
415 emit(A64_LDR64(prg, tmp, prg), ctx);
416 emit(A64_CBZ(1, prg, jmp_offset), ctx);
417
418 /* goto *(prog->bpf_func + prologue_offset); */
419 off = offsetof(struct bpf_prog, bpf_func);
420 emit_a64_mov_i64(tmp, off, ctx);
421 emit(A64_LDR64(tmp, prg, tmp), ctx);
422 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
423 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
424 emit(A64_BR(tmp), ctx);
425
426 /* out: */
427 if (out_offset == -1)
428 out_offset = cur_offset;
429 if (cur_offset != out_offset) {
430 pr_err_once("tail_call out_offset = %d, expected %d!\n",
431 cur_offset, out_offset);
432 return -1;
433 }
434 return 0;
435#undef cur_offset
436#undef jmp_offset
437}
438
439#ifdef CONFIG_ARM64_LSE_ATOMICS
440static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
441{
442 const u8 code = insn->code;
443 const u8 dst = bpf2a64[insn->dst_reg];
444 const u8 src = bpf2a64[insn->src_reg];
445 const u8 tmp = bpf2a64[TMP_REG_1];
446 const u8 tmp2 = bpf2a64[TMP_REG_2];
447 const bool isdw = BPF_SIZE(code) == BPF_DW;
448 const s16 off = insn->off;
449 u8 reg;
450
451 if (!off) {
452 reg = dst;
453 } else {
454 emit_a64_mov_i(1, tmp, off, ctx);
455 emit(A64_ADD(1, tmp, tmp, dst), ctx);
456 reg = tmp;
457 }
458
459 switch (insn->imm) {
460 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
461 case BPF_ADD:
462 emit(A64_STADD(isdw, reg, src), ctx);
463 break;
464 case BPF_AND:
465 emit(A64_MVN(isdw, tmp2, src), ctx);
466 emit(A64_STCLR(isdw, reg, tmp2), ctx);
467 break;
468 case BPF_OR:
469 emit(A64_STSET(isdw, reg, src), ctx);
470 break;
471 case BPF_XOR:
472 emit(A64_STEOR(isdw, reg, src), ctx);
473 break;
474 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
475 case BPF_ADD | BPF_FETCH:
476 emit(A64_LDADDAL(isdw, src, reg, src), ctx);
477 break;
478 case BPF_AND | BPF_FETCH:
479 emit(A64_MVN(isdw, tmp2, src), ctx);
480 emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
481 break;
482 case BPF_OR | BPF_FETCH:
483 emit(A64_LDSETAL(isdw, src, reg, src), ctx);
484 break;
485 case BPF_XOR | BPF_FETCH:
486 emit(A64_LDEORAL(isdw, src, reg, src), ctx);
487 break;
488 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
489 case BPF_XCHG:
490 emit(A64_SWPAL(isdw, src, reg, src), ctx);
491 break;
492 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
493 case BPF_CMPXCHG:
494 emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
495 break;
496 default:
497 pr_err_once("unknown atomic op code %02x\n", insn->imm);
498 return -EINVAL;
499 }
500
501 return 0;
502}
503#else
504static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
505{
506 return -EINVAL;
507}
508#endif
509
510static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
511{
512 const u8 code = insn->code;
513 const u8 dst = bpf2a64[insn->dst_reg];
514 const u8 src = bpf2a64[insn->src_reg];
515 const u8 tmp = bpf2a64[TMP_REG_1];
516 const u8 tmp2 = bpf2a64[TMP_REG_2];
517 const u8 tmp3 = bpf2a64[TMP_REG_3];
518 const int i = insn - ctx->prog->insnsi;
519 const s32 imm = insn->imm;
520 const s16 off = insn->off;
521 const bool isdw = BPF_SIZE(code) == BPF_DW;
522 u8 reg;
523 s32 jmp_offset;
524
525 if (!off) {
526 reg = dst;
527 } else {
528 emit_a64_mov_i(1, tmp, off, ctx);
529 emit(A64_ADD(1, tmp, tmp, dst), ctx);
530 reg = tmp;
531 }
532
533 if (imm == BPF_ADD || imm == BPF_AND ||
534 imm == BPF_OR || imm == BPF_XOR) {
535 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
536 emit(A64_LDXR(isdw, tmp2, reg), ctx);
537 if (imm == BPF_ADD)
538 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
539 else if (imm == BPF_AND)
540 emit(A64_AND(isdw, tmp2, tmp2, src), ctx);
541 else if (imm == BPF_OR)
542 emit(A64_ORR(isdw, tmp2, tmp2, src), ctx);
543 else
544 emit(A64_EOR(isdw, tmp2, tmp2, src), ctx);
545 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
546 jmp_offset = -3;
547 check_imm19(jmp_offset);
548 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
549 } else if (imm == (BPF_ADD | BPF_FETCH) ||
550 imm == (BPF_AND | BPF_FETCH) ||
551 imm == (BPF_OR | BPF_FETCH) ||
552 imm == (BPF_XOR | BPF_FETCH)) {
553 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
554 const u8 ax = bpf2a64[BPF_REG_AX];
555
556 emit(A64_MOV(isdw, ax, src), ctx);
557 emit(A64_LDXR(isdw, src, reg), ctx);
558 if (imm == (BPF_ADD | BPF_FETCH))
559 emit(A64_ADD(isdw, tmp2, src, ax), ctx);
560 else if (imm == (BPF_AND | BPF_FETCH))
561 emit(A64_AND(isdw, tmp2, src, ax), ctx);
562 else if (imm == (BPF_OR | BPF_FETCH))
563 emit(A64_ORR(isdw, tmp2, src, ax), ctx);
564 else
565 emit(A64_EOR(isdw, tmp2, src, ax), ctx);
566 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
567 jmp_offset = -3;
568 check_imm19(jmp_offset);
569 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
570 emit(A64_DMB_ISH, ctx);
571 } else if (imm == BPF_XCHG) {
572 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
573 emit(A64_MOV(isdw, tmp2, src), ctx);
574 emit(A64_LDXR(isdw, src, reg), ctx);
575 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
576 jmp_offset = -2;
577 check_imm19(jmp_offset);
578 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
579 emit(A64_DMB_ISH, ctx);
580 } else if (imm == BPF_CMPXCHG) {
581 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
582 const u8 r0 = bpf2a64[BPF_REG_0];
583
584 emit(A64_MOV(isdw, tmp2, r0), ctx);
585 emit(A64_LDXR(isdw, r0, reg), ctx);
586 emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx);
587 jmp_offset = 4;
588 check_imm19(jmp_offset);
589 emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx);
590 emit(A64_STLXR(isdw, src, reg, tmp3), ctx);
591 jmp_offset = -4;
592 check_imm19(jmp_offset);
593 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
594 emit(A64_DMB_ISH, ctx);
595 } else {
596 pr_err_once("unknown atomic op code %02x\n", imm);
597 return -EINVAL;
598 }
599
600 return 0;
601}
602
603void dummy_tramp(void);
604
605asm (
606" .pushsection .text, \"ax\", @progbits\n"
607" .global dummy_tramp\n"
608" .type dummy_tramp, %function\n"
609"dummy_tramp:"
610#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
611" bti j\n" /* dummy_tramp is called via "br x10" */
612#endif
613" mov x10, x30\n"
614" mov x30, x9\n"
615" ret x10\n"
616" .size dummy_tramp, .-dummy_tramp\n"
617" .popsection\n"
618);
619
620/* build a plt initialized like this:
621 *
622 * plt:
623 * ldr tmp, target
624 * br tmp
625 * target:
626 * .quad dummy_tramp
627 *
628 * when a long jump trampoline is attached, target is filled with the
629 * trampoline address, and when the trampoline is removed, target is
630 * restored to dummy_tramp address.
631 */
632static void build_plt(struct jit_ctx *ctx)
633{
634 const u8 tmp = bpf2a64[TMP_REG_1];
635 struct bpf_plt *plt = NULL;
636
637 /* make sure target is 64-bit aligned */
638 if ((ctx->idx + PLT_TARGET_OFFSET / AARCH64_INSN_SIZE) % 2)
639 emit(A64_NOP, ctx);
640
641 plt = (struct bpf_plt *)(ctx->image + ctx->idx);
642 /* plt is called via bl, no BTI needed here */
643 emit(A64_LDR64LIT(tmp, 2 * AARCH64_INSN_SIZE), ctx);
644 emit(A64_BR(tmp), ctx);
645
646 if (ctx->image)
647 plt->target = (u64)&dummy_tramp;
648}
649
650static void build_epilogue(struct jit_ctx *ctx)
651{
652 const u8 r0 = bpf2a64[BPF_REG_0];
653 const u8 r6 = bpf2a64[BPF_REG_6];
654 const u8 r7 = bpf2a64[BPF_REG_7];
655 const u8 r8 = bpf2a64[BPF_REG_8];
656 const u8 r9 = bpf2a64[BPF_REG_9];
657 const u8 fp = bpf2a64[BPF_REG_FP];
658 const u8 fpb = bpf2a64[FP_BOTTOM];
659
660 /* We're done with BPF stack */
661 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
662
663 /* Restore x27 and x28 */
664 emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
665 /* Restore fs (x25) and x26 */
666 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
667
668 /* Restore callee-saved register */
669 emit(A64_POP(r8, r9, A64_SP), ctx);
670 emit(A64_POP(r6, r7, A64_SP), ctx);
671
672 /* Restore FP/LR registers */
673 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
674
675 /* Set return value */
676 emit(A64_MOV(1, A64_R(0), r0), ctx);
677
678 /* Authenticate lr */
679 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
680 emit(A64_AUTIASP, ctx);
681
682 emit(A64_RET(A64_LR), ctx);
683}
684
685#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
686#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
687
688bool ex_handler_bpf(const struct exception_table_entry *ex,
689 struct pt_regs *regs)
690{
691 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
692 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
693
694 regs->regs[dst_reg] = 0;
695 regs->pc = (unsigned long)&ex->fixup - offset;
696 return true;
697}
698
699/* For accesses to BTF pointers, add an entry to the exception table */
700static int add_exception_handler(const struct bpf_insn *insn,
701 struct jit_ctx *ctx,
702 int dst_reg)
703{
704 off_t offset;
705 unsigned long pc;
706 struct exception_table_entry *ex;
707
708 if (!ctx->image)
709 /* First pass */
710 return 0;
711
712 if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
713 return 0;
714
715 if (!ctx->prog->aux->extable ||
716 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
717 return -EINVAL;
718
719 ex = &ctx->prog->aux->extable[ctx->exentry_idx];
720 pc = (unsigned long)&ctx->image[ctx->idx - 1];
721
722 offset = pc - (long)&ex->insn;
723 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
724 return -ERANGE;
725 ex->insn = offset;
726
727 /*
728 * Since the extable follows the program, the fixup offset is always
729 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
730 * to keep things simple, and put the destination register in the upper
731 * bits. We don't need to worry about buildtime or runtime sort
732 * modifying the upper bits because the table is already sorted, and
733 * isn't part of the main exception table.
734 */
735 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
736 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
737 return -ERANGE;
738
739 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
740 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
741
742 ex->type = EX_TYPE_BPF;
743
744 ctx->exentry_idx++;
745 return 0;
746}
747
748/* JITs an eBPF instruction.
749 * Returns:
750 * 0 - successfully JITed an 8-byte eBPF instruction.
751 * >0 - successfully JITed a 16-byte eBPF instruction.
752 * <0 - failed to JIT.
753 */
754static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
755 bool extra_pass)
756{
757 const u8 code = insn->code;
758 const u8 dst = bpf2a64[insn->dst_reg];
759 const u8 src = bpf2a64[insn->src_reg];
760 const u8 tmp = bpf2a64[TMP_REG_1];
761 const u8 tmp2 = bpf2a64[TMP_REG_2];
762 const u8 fp = bpf2a64[BPF_REG_FP];
763 const u8 fpb = bpf2a64[FP_BOTTOM];
764 const s16 off = insn->off;
765 const s32 imm = insn->imm;
766 const int i = insn - ctx->prog->insnsi;
767 const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
768 BPF_CLASS(code) == BPF_JMP;
769 u8 jmp_cond;
770 s32 jmp_offset;
771 u32 a64_insn;
772 u8 src_adj;
773 u8 dst_adj;
774 int off_adj;
775 int ret;
776
777 switch (code) {
778 /* dst = src */
779 case BPF_ALU | BPF_MOV | BPF_X:
780 case BPF_ALU64 | BPF_MOV | BPF_X:
781 emit(A64_MOV(is64, dst, src), ctx);
782 break;
783 /* dst = dst OP src */
784 case BPF_ALU | BPF_ADD | BPF_X:
785 case BPF_ALU64 | BPF_ADD | BPF_X:
786 emit(A64_ADD(is64, dst, dst, src), ctx);
787 break;
788 case BPF_ALU | BPF_SUB | BPF_X:
789 case BPF_ALU64 | BPF_SUB | BPF_X:
790 emit(A64_SUB(is64, dst, dst, src), ctx);
791 break;
792 case BPF_ALU | BPF_AND | BPF_X:
793 case BPF_ALU64 | BPF_AND | BPF_X:
794 emit(A64_AND(is64, dst, dst, src), ctx);
795 break;
796 case BPF_ALU | BPF_OR | BPF_X:
797 case BPF_ALU64 | BPF_OR | BPF_X:
798 emit(A64_ORR(is64, dst, dst, src), ctx);
799 break;
800 case BPF_ALU | BPF_XOR | BPF_X:
801 case BPF_ALU64 | BPF_XOR | BPF_X:
802 emit(A64_EOR(is64, dst, dst, src), ctx);
803 break;
804 case BPF_ALU | BPF_MUL | BPF_X:
805 case BPF_ALU64 | BPF_MUL | BPF_X:
806 emit(A64_MUL(is64, dst, dst, src), ctx);
807 break;
808 case BPF_ALU | BPF_DIV | BPF_X:
809 case BPF_ALU64 | BPF_DIV | BPF_X:
810 emit(A64_UDIV(is64, dst, dst, src), ctx);
811 break;
812 case BPF_ALU | BPF_MOD | BPF_X:
813 case BPF_ALU64 | BPF_MOD | BPF_X:
814 emit(A64_UDIV(is64, tmp, dst, src), ctx);
815 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
816 break;
817 case BPF_ALU | BPF_LSH | BPF_X:
818 case BPF_ALU64 | BPF_LSH | BPF_X:
819 emit(A64_LSLV(is64, dst, dst, src), ctx);
820 break;
821 case BPF_ALU | BPF_RSH | BPF_X:
822 case BPF_ALU64 | BPF_RSH | BPF_X:
823 emit(A64_LSRV(is64, dst, dst, src), ctx);
824 break;
825 case BPF_ALU | BPF_ARSH | BPF_X:
826 case BPF_ALU64 | BPF_ARSH | BPF_X:
827 emit(A64_ASRV(is64, dst, dst, src), ctx);
828 break;
829 /* dst = -dst */
830 case BPF_ALU | BPF_NEG:
831 case BPF_ALU64 | BPF_NEG:
832 emit(A64_NEG(is64, dst, dst), ctx);
833 break;
834 /* dst = BSWAP##imm(dst) */
835 case BPF_ALU | BPF_END | BPF_FROM_LE:
836 case BPF_ALU | BPF_END | BPF_FROM_BE:
837#ifdef CONFIG_CPU_BIG_ENDIAN
838 if (BPF_SRC(code) == BPF_FROM_BE)
839 goto emit_bswap_uxt;
840#else /* !CONFIG_CPU_BIG_ENDIAN */
841 if (BPF_SRC(code) == BPF_FROM_LE)
842 goto emit_bswap_uxt;
843#endif
844 switch (imm) {
845 case 16:
846 emit(A64_REV16(is64, dst, dst), ctx);
847 /* zero-extend 16 bits into 64 bits */
848 emit(A64_UXTH(is64, dst, dst), ctx);
849 break;
850 case 32:
851 emit(A64_REV32(is64, dst, dst), ctx);
852 /* upper 32 bits already cleared */
853 break;
854 case 64:
855 emit(A64_REV64(dst, dst), ctx);
856 break;
857 }
858 break;
859emit_bswap_uxt:
860 switch (imm) {
861 case 16:
862 /* zero-extend 16 bits into 64 bits */
863 emit(A64_UXTH(is64, dst, dst), ctx);
864 break;
865 case 32:
866 /* zero-extend 32 bits into 64 bits */
867 emit(A64_UXTW(is64, dst, dst), ctx);
868 break;
869 case 64:
870 /* nop */
871 break;
872 }
873 break;
874 /* dst = imm */
875 case BPF_ALU | BPF_MOV | BPF_K:
876 case BPF_ALU64 | BPF_MOV | BPF_K:
877 emit_a64_mov_i(is64, dst, imm, ctx);
878 break;
879 /* dst = dst OP imm */
880 case BPF_ALU | BPF_ADD | BPF_K:
881 case BPF_ALU64 | BPF_ADD | BPF_K:
882 if (is_addsub_imm(imm)) {
883 emit(A64_ADD_I(is64, dst, dst, imm), ctx);
884 } else if (is_addsub_imm(-imm)) {
885 emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
886 } else {
887 emit_a64_mov_i(is64, tmp, imm, ctx);
888 emit(A64_ADD(is64, dst, dst, tmp), ctx);
889 }
890 break;
891 case BPF_ALU | BPF_SUB | BPF_K:
892 case BPF_ALU64 | BPF_SUB | BPF_K:
893 if (is_addsub_imm(imm)) {
894 emit(A64_SUB_I(is64, dst, dst, imm), ctx);
895 } else if (is_addsub_imm(-imm)) {
896 emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
897 } else {
898 emit_a64_mov_i(is64, tmp, imm, ctx);
899 emit(A64_SUB(is64, dst, dst, tmp), ctx);
900 }
901 break;
902 case BPF_ALU | BPF_AND | BPF_K:
903 case BPF_ALU64 | BPF_AND | BPF_K:
904 a64_insn = A64_AND_I(is64, dst, dst, imm);
905 if (a64_insn != AARCH64_BREAK_FAULT) {
906 emit(a64_insn, ctx);
907 } else {
908 emit_a64_mov_i(is64, tmp, imm, ctx);
909 emit(A64_AND(is64, dst, dst, tmp), ctx);
910 }
911 break;
912 case BPF_ALU | BPF_OR | BPF_K:
913 case BPF_ALU64 | BPF_OR | BPF_K:
914 a64_insn = A64_ORR_I(is64, dst, dst, imm);
915 if (a64_insn != AARCH64_BREAK_FAULT) {
916 emit(a64_insn, ctx);
917 } else {
918 emit_a64_mov_i(is64, tmp, imm, ctx);
919 emit(A64_ORR(is64, dst, dst, tmp), ctx);
920 }
921 break;
922 case BPF_ALU | BPF_XOR | BPF_K:
923 case BPF_ALU64 | BPF_XOR | BPF_K:
924 a64_insn = A64_EOR_I(is64, dst, dst, imm);
925 if (a64_insn != AARCH64_BREAK_FAULT) {
926 emit(a64_insn, ctx);
927 } else {
928 emit_a64_mov_i(is64, tmp, imm, ctx);
929 emit(A64_EOR(is64, dst, dst, tmp), ctx);
930 }
931 break;
932 case BPF_ALU | BPF_MUL | BPF_K:
933 case BPF_ALU64 | BPF_MUL | BPF_K:
934 emit_a64_mov_i(is64, tmp, imm, ctx);
935 emit(A64_MUL(is64, dst, dst, tmp), ctx);
936 break;
937 case BPF_ALU | BPF_DIV | BPF_K:
938 case BPF_ALU64 | BPF_DIV | BPF_K:
939 emit_a64_mov_i(is64, tmp, imm, ctx);
940 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
941 break;
942 case BPF_ALU | BPF_MOD | BPF_K:
943 case BPF_ALU64 | BPF_MOD | BPF_K:
944 emit_a64_mov_i(is64, tmp2, imm, ctx);
945 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
946 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
947 break;
948 case BPF_ALU | BPF_LSH | BPF_K:
949 case BPF_ALU64 | BPF_LSH | BPF_K:
950 emit(A64_LSL(is64, dst, dst, imm), ctx);
951 break;
952 case BPF_ALU | BPF_RSH | BPF_K:
953 case BPF_ALU64 | BPF_RSH | BPF_K:
954 emit(A64_LSR(is64, dst, dst, imm), ctx);
955 break;
956 case BPF_ALU | BPF_ARSH | BPF_K:
957 case BPF_ALU64 | BPF_ARSH | BPF_K:
958 emit(A64_ASR(is64, dst, dst, imm), ctx);
959 break;
960
961 /* JUMP off */
962 case BPF_JMP | BPF_JA:
963 jmp_offset = bpf2a64_offset(i, off, ctx);
964 check_imm26(jmp_offset);
965 emit(A64_B(jmp_offset), ctx);
966 break;
967 /* IF (dst COND src) JUMP off */
968 case BPF_JMP | BPF_JEQ | BPF_X:
969 case BPF_JMP | BPF_JGT | BPF_X:
970 case BPF_JMP | BPF_JLT | BPF_X:
971 case BPF_JMP | BPF_JGE | BPF_X:
972 case BPF_JMP | BPF_JLE | BPF_X:
973 case BPF_JMP | BPF_JNE | BPF_X:
974 case BPF_JMP | BPF_JSGT | BPF_X:
975 case BPF_JMP | BPF_JSLT | BPF_X:
976 case BPF_JMP | BPF_JSGE | BPF_X:
977 case BPF_JMP | BPF_JSLE | BPF_X:
978 case BPF_JMP32 | BPF_JEQ | BPF_X:
979 case BPF_JMP32 | BPF_JGT | BPF_X:
980 case BPF_JMP32 | BPF_JLT | BPF_X:
981 case BPF_JMP32 | BPF_JGE | BPF_X:
982 case BPF_JMP32 | BPF_JLE | BPF_X:
983 case BPF_JMP32 | BPF_JNE | BPF_X:
984 case BPF_JMP32 | BPF_JSGT | BPF_X:
985 case BPF_JMP32 | BPF_JSLT | BPF_X:
986 case BPF_JMP32 | BPF_JSGE | BPF_X:
987 case BPF_JMP32 | BPF_JSLE | BPF_X:
988 emit(A64_CMP(is64, dst, src), ctx);
989emit_cond_jmp:
990 jmp_offset = bpf2a64_offset(i, off, ctx);
991 check_imm19(jmp_offset);
992 switch (BPF_OP(code)) {
993 case BPF_JEQ:
994 jmp_cond = A64_COND_EQ;
995 break;
996 case BPF_JGT:
997 jmp_cond = A64_COND_HI;
998 break;
999 case BPF_JLT:
1000 jmp_cond = A64_COND_CC;
1001 break;
1002 case BPF_JGE:
1003 jmp_cond = A64_COND_CS;
1004 break;
1005 case BPF_JLE:
1006 jmp_cond = A64_COND_LS;
1007 break;
1008 case BPF_JSET:
1009 case BPF_JNE:
1010 jmp_cond = A64_COND_NE;
1011 break;
1012 case BPF_JSGT:
1013 jmp_cond = A64_COND_GT;
1014 break;
1015 case BPF_JSLT:
1016 jmp_cond = A64_COND_LT;
1017 break;
1018 case BPF_JSGE:
1019 jmp_cond = A64_COND_GE;
1020 break;
1021 case BPF_JSLE:
1022 jmp_cond = A64_COND_LE;
1023 break;
1024 default:
1025 return -EFAULT;
1026 }
1027 emit(A64_B_(jmp_cond, jmp_offset), ctx);
1028 break;
1029 case BPF_JMP | BPF_JSET | BPF_X:
1030 case BPF_JMP32 | BPF_JSET | BPF_X:
1031 emit(A64_TST(is64, dst, src), ctx);
1032 goto emit_cond_jmp;
1033 /* IF (dst COND imm) JUMP off */
1034 case BPF_JMP | BPF_JEQ | BPF_K:
1035 case BPF_JMP | BPF_JGT | BPF_K:
1036 case BPF_JMP | BPF_JLT | BPF_K:
1037 case BPF_JMP | BPF_JGE | BPF_K:
1038 case BPF_JMP | BPF_JLE | BPF_K:
1039 case BPF_JMP | BPF_JNE | BPF_K:
1040 case BPF_JMP | BPF_JSGT | BPF_K:
1041 case BPF_JMP | BPF_JSLT | BPF_K:
1042 case BPF_JMP | BPF_JSGE | BPF_K:
1043 case BPF_JMP | BPF_JSLE | BPF_K:
1044 case BPF_JMP32 | BPF_JEQ | BPF_K:
1045 case BPF_JMP32 | BPF_JGT | BPF_K:
1046 case BPF_JMP32 | BPF_JLT | BPF_K:
1047 case BPF_JMP32 | BPF_JGE | BPF_K:
1048 case BPF_JMP32 | BPF_JLE | BPF_K:
1049 case BPF_JMP32 | BPF_JNE | BPF_K:
1050 case BPF_JMP32 | BPF_JSGT | BPF_K:
1051 case BPF_JMP32 | BPF_JSLT | BPF_K:
1052 case BPF_JMP32 | BPF_JSGE | BPF_K:
1053 case BPF_JMP32 | BPF_JSLE | BPF_K:
1054 if (is_addsub_imm(imm)) {
1055 emit(A64_CMP_I(is64, dst, imm), ctx);
1056 } else if (is_addsub_imm(-imm)) {
1057 emit(A64_CMN_I(is64, dst, -imm), ctx);
1058 } else {
1059 emit_a64_mov_i(is64, tmp, imm, ctx);
1060 emit(A64_CMP(is64, dst, tmp), ctx);
1061 }
1062 goto emit_cond_jmp;
1063 case BPF_JMP | BPF_JSET | BPF_K:
1064 case BPF_JMP32 | BPF_JSET | BPF_K:
1065 a64_insn = A64_TST_I(is64, dst, imm);
1066 if (a64_insn != AARCH64_BREAK_FAULT) {
1067 emit(a64_insn, ctx);
1068 } else {
1069 emit_a64_mov_i(is64, tmp, imm, ctx);
1070 emit(A64_TST(is64, dst, tmp), ctx);
1071 }
1072 goto emit_cond_jmp;
1073 /* function call */
1074 case BPF_JMP | BPF_CALL:
1075 {
1076 const u8 r0 = bpf2a64[BPF_REG_0];
1077 bool func_addr_fixed;
1078 u64 func_addr;
1079
1080 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
1081 &func_addr, &func_addr_fixed);
1082 if (ret < 0)
1083 return ret;
1084 emit_call(func_addr, ctx);
1085 emit(A64_MOV(1, r0, A64_R(0)), ctx);
1086 break;
1087 }
1088 /* tail call */
1089 case BPF_JMP | BPF_TAIL_CALL:
1090 if (emit_bpf_tail_call(ctx))
1091 return -EFAULT;
1092 break;
1093 /* function return */
1094 case BPF_JMP | BPF_EXIT:
1095 /* Optimization: when last instruction is EXIT,
1096 simply fallthrough to epilogue. */
1097 if (i == ctx->prog->len - 1)
1098 break;
1099 jmp_offset = epilogue_offset(ctx);
1100 check_imm26(jmp_offset);
1101 emit(A64_B(jmp_offset), ctx);
1102 break;
1103
1104 /* dst = imm64 */
1105 case BPF_LD | BPF_IMM | BPF_DW:
1106 {
1107 const struct bpf_insn insn1 = insn[1];
1108 u64 imm64;
1109
1110 imm64 = (u64)insn1.imm << 32 | (u32)imm;
1111 if (bpf_pseudo_func(insn))
1112 emit_addr_mov_i64(dst, imm64, ctx);
1113 else
1114 emit_a64_mov_i64(dst, imm64, ctx);
1115
1116 return 1;
1117 }
1118
1119 /* LDX: dst = *(size *)(src + off) */
1120 case BPF_LDX | BPF_MEM | BPF_W:
1121 case BPF_LDX | BPF_MEM | BPF_H:
1122 case BPF_LDX | BPF_MEM | BPF_B:
1123 case BPF_LDX | BPF_MEM | BPF_DW:
1124 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1125 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1126 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1127 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1128 if (ctx->fpb_offset > 0 && src == fp) {
1129 src_adj = fpb;
1130 off_adj = off + ctx->fpb_offset;
1131 } else {
1132 src_adj = src;
1133 off_adj = off;
1134 }
1135 switch (BPF_SIZE(code)) {
1136 case BPF_W:
1137 if (is_lsi_offset(off_adj, 2)) {
1138 emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
1139 } else {
1140 emit_a64_mov_i(1, tmp, off, ctx);
1141 emit(A64_LDR32(dst, src, tmp), ctx);
1142 }
1143 break;
1144 case BPF_H:
1145 if (is_lsi_offset(off_adj, 1)) {
1146 emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
1147 } else {
1148 emit_a64_mov_i(1, tmp, off, ctx);
1149 emit(A64_LDRH(dst, src, tmp), ctx);
1150 }
1151 break;
1152 case BPF_B:
1153 if (is_lsi_offset(off_adj, 0)) {
1154 emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
1155 } else {
1156 emit_a64_mov_i(1, tmp, off, ctx);
1157 emit(A64_LDRB(dst, src, tmp), ctx);
1158 }
1159 break;
1160 case BPF_DW:
1161 if (is_lsi_offset(off_adj, 3)) {
1162 emit(A64_LDR64I(dst, src_adj, off_adj), ctx);
1163 } else {
1164 emit_a64_mov_i(1, tmp, off, ctx);
1165 emit(A64_LDR64(dst, src, tmp), ctx);
1166 }
1167 break;
1168 }
1169
1170 ret = add_exception_handler(insn, ctx, dst);
1171 if (ret)
1172 return ret;
1173 break;
1174
1175 /* speculation barrier */
1176 case BPF_ST | BPF_NOSPEC:
1177 /*
1178 * Nothing required here.
1179 *
1180 * In case of arm64, we rely on the firmware mitigation of
1181 * Speculative Store Bypass as controlled via the ssbd kernel
1182 * parameter. Whenever the mitigation is enabled, it works
1183 * for all of the kernel code with no need to provide any
1184 * additional instructions.
1185 */
1186 break;
1187
1188 /* ST: *(size *)(dst + off) = imm */
1189 case BPF_ST | BPF_MEM | BPF_W:
1190 case BPF_ST | BPF_MEM | BPF_H:
1191 case BPF_ST | BPF_MEM | BPF_B:
1192 case BPF_ST | BPF_MEM | BPF_DW:
1193 if (ctx->fpb_offset > 0 && dst == fp) {
1194 dst_adj = fpb;
1195 off_adj = off + ctx->fpb_offset;
1196 } else {
1197 dst_adj = dst;
1198 off_adj = off;
1199 }
1200 /* Load imm to a register then store it */
1201 emit_a64_mov_i(1, tmp, imm, ctx);
1202 switch (BPF_SIZE(code)) {
1203 case BPF_W:
1204 if (is_lsi_offset(off_adj, 2)) {
1205 emit(A64_STR32I(tmp, dst_adj, off_adj), ctx);
1206 } else {
1207 emit_a64_mov_i(1, tmp2, off, ctx);
1208 emit(A64_STR32(tmp, dst, tmp2), ctx);
1209 }
1210 break;
1211 case BPF_H:
1212 if (is_lsi_offset(off_adj, 1)) {
1213 emit(A64_STRHI(tmp, dst_adj, off_adj), ctx);
1214 } else {
1215 emit_a64_mov_i(1, tmp2, off, ctx);
1216 emit(A64_STRH(tmp, dst, tmp2), ctx);
1217 }
1218 break;
1219 case BPF_B:
1220 if (is_lsi_offset(off_adj, 0)) {
1221 emit(A64_STRBI(tmp, dst_adj, off_adj), ctx);
1222 } else {
1223 emit_a64_mov_i(1, tmp2, off, ctx);
1224 emit(A64_STRB(tmp, dst, tmp2), ctx);
1225 }
1226 break;
1227 case BPF_DW:
1228 if (is_lsi_offset(off_adj, 3)) {
1229 emit(A64_STR64I(tmp, dst_adj, off_adj), ctx);
1230 } else {
1231 emit_a64_mov_i(1, tmp2, off, ctx);
1232 emit(A64_STR64(tmp, dst, tmp2), ctx);
1233 }
1234 break;
1235 }
1236 break;
1237
1238 /* STX: *(size *)(dst + off) = src */
1239 case BPF_STX | BPF_MEM | BPF_W:
1240 case BPF_STX | BPF_MEM | BPF_H:
1241 case BPF_STX | BPF_MEM | BPF_B:
1242 case BPF_STX | BPF_MEM | BPF_DW:
1243 if (ctx->fpb_offset > 0 && dst == fp) {
1244 dst_adj = fpb;
1245 off_adj = off + ctx->fpb_offset;
1246 } else {
1247 dst_adj = dst;
1248 off_adj = off;
1249 }
1250 switch (BPF_SIZE(code)) {
1251 case BPF_W:
1252 if (is_lsi_offset(off_adj, 2)) {
1253 emit(A64_STR32I(src, dst_adj, off_adj), ctx);
1254 } else {
1255 emit_a64_mov_i(1, tmp, off, ctx);
1256 emit(A64_STR32(src, dst, tmp), ctx);
1257 }
1258 break;
1259 case BPF_H:
1260 if (is_lsi_offset(off_adj, 1)) {
1261 emit(A64_STRHI(src, dst_adj, off_adj), ctx);
1262 } else {
1263 emit_a64_mov_i(1, tmp, off, ctx);
1264 emit(A64_STRH(src, dst, tmp), ctx);
1265 }
1266 break;
1267 case BPF_B:
1268 if (is_lsi_offset(off_adj, 0)) {
1269 emit(A64_STRBI(src, dst_adj, off_adj), ctx);
1270 } else {
1271 emit_a64_mov_i(1, tmp, off, ctx);
1272 emit(A64_STRB(src, dst, tmp), ctx);
1273 }
1274 break;
1275 case BPF_DW:
1276 if (is_lsi_offset(off_adj, 3)) {
1277 emit(A64_STR64I(src, dst_adj, off_adj), ctx);
1278 } else {
1279 emit_a64_mov_i(1, tmp, off, ctx);
1280 emit(A64_STR64(src, dst, tmp), ctx);
1281 }
1282 break;
1283 }
1284 break;
1285
1286 case BPF_STX | BPF_ATOMIC | BPF_W:
1287 case BPF_STX | BPF_ATOMIC | BPF_DW:
1288 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
1289 ret = emit_lse_atomic(insn, ctx);
1290 else
1291 ret = emit_ll_sc_atomic(insn, ctx);
1292 if (ret)
1293 return ret;
1294 break;
1295
1296 default:
1297 pr_err_once("unknown opcode %02x\n", code);
1298 return -EINVAL;
1299 }
1300
1301 return 0;
1302}
1303
1304/*
1305 * Return 0 if FP may change at runtime, otherwise find the minimum negative
1306 * offset to FP, converts it to positive number, and align down to 8 bytes.
1307 */
1308static int find_fpb_offset(struct bpf_prog *prog)
1309{
1310 int i;
1311 int offset = 0;
1312
1313 for (i = 0; i < prog->len; i++) {
1314 const struct bpf_insn *insn = &prog->insnsi[i];
1315 const u8 class = BPF_CLASS(insn->code);
1316 const u8 mode = BPF_MODE(insn->code);
1317 const u8 src = insn->src_reg;
1318 const u8 dst = insn->dst_reg;
1319 const s32 imm = insn->imm;
1320 const s16 off = insn->off;
1321
1322 switch (class) {
1323 case BPF_STX:
1324 case BPF_ST:
1325 /* fp holds atomic operation result */
1326 if (class == BPF_STX && mode == BPF_ATOMIC &&
1327 ((imm == BPF_XCHG ||
1328 imm == (BPF_FETCH | BPF_ADD) ||
1329 imm == (BPF_FETCH | BPF_AND) ||
1330 imm == (BPF_FETCH | BPF_XOR) ||
1331 imm == (BPF_FETCH | BPF_OR)) &&
1332 src == BPF_REG_FP))
1333 return 0;
1334
1335 if (mode == BPF_MEM && dst == BPF_REG_FP &&
1336 off < offset)
1337 offset = insn->off;
1338 break;
1339
1340 case BPF_JMP32:
1341 case BPF_JMP:
1342 break;
1343
1344 case BPF_LDX:
1345 case BPF_LD:
1346 /* fp holds load result */
1347 if (dst == BPF_REG_FP)
1348 return 0;
1349
1350 if (class == BPF_LDX && mode == BPF_MEM &&
1351 src == BPF_REG_FP && off < offset)
1352 offset = off;
1353 break;
1354
1355 case BPF_ALU:
1356 case BPF_ALU64:
1357 default:
1358 /* fp holds ALU result */
1359 if (dst == BPF_REG_FP)
1360 return 0;
1361 }
1362 }
1363
1364 if (offset < 0) {
1365 /*
1366 * safely be converted to a positive 'int', since insn->off
1367 * is 's16'
1368 */
1369 offset = -offset;
1370 /* align down to 8 bytes */
1371 offset = ALIGN_DOWN(offset, 8);
1372 }
1373
1374 return offset;
1375}
1376
1377static int build_body(struct jit_ctx *ctx, bool extra_pass)
1378{
1379 const struct bpf_prog *prog = ctx->prog;
1380 int i;
1381
1382 /*
1383 * - offset[0] offset of the end of prologue,
1384 * start of the 1st instruction.
1385 * - offset[1] - offset of the end of 1st instruction,
1386 * start of the 2nd instruction
1387 * [....]
1388 * - offset[3] - offset of the end of 3rd instruction,
1389 * start of 4th instruction
1390 */
1391 for (i = 0; i < prog->len; i++) {
1392 const struct bpf_insn *insn = &prog->insnsi[i];
1393 int ret;
1394
1395 if (ctx->image == NULL)
1396 ctx->offset[i] = ctx->idx;
1397 ret = build_insn(insn, ctx, extra_pass);
1398 if (ret > 0) {
1399 i++;
1400 if (ctx->image == NULL)
1401 ctx->offset[i] = ctx->idx;
1402 continue;
1403 }
1404 if (ret)
1405 return ret;
1406 }
1407 /*
1408 * offset is allocated with prog->len + 1 so fill in
1409 * the last element with the offset after the last
1410 * instruction (end of program)
1411 */
1412 if (ctx->image == NULL)
1413 ctx->offset[i] = ctx->idx;
1414
1415 return 0;
1416}
1417
1418static int validate_code(struct jit_ctx *ctx)
1419{
1420 int i;
1421
1422 for (i = 0; i < ctx->idx; i++) {
1423 u32 a64_insn = le32_to_cpu(ctx->image[i]);
1424
1425 if (a64_insn == AARCH64_BREAK_FAULT)
1426 return -1;
1427 }
1428 return 0;
1429}
1430
1431static int validate_ctx(struct jit_ctx *ctx)
1432{
1433 if (validate_code(ctx))
1434 return -1;
1435
1436 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
1437 return -1;
1438
1439 return 0;
1440}
1441
1442static inline void bpf_flush_icache(void *start, void *end)
1443{
1444 flush_icache_range((unsigned long)start, (unsigned long)end);
1445}
1446
1447struct arm64_jit_data {
1448 struct bpf_binary_header *header;
1449 u8 *image;
1450 struct jit_ctx ctx;
1451};
1452
1453struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1454{
1455 int image_size, prog_size, extable_size, extable_align, extable_offset;
1456 struct bpf_prog *tmp, *orig_prog = prog;
1457 struct bpf_binary_header *header;
1458 struct arm64_jit_data *jit_data;
1459 bool was_classic = bpf_prog_was_classic(prog);
1460 bool tmp_blinded = false;
1461 bool extra_pass = false;
1462 struct jit_ctx ctx;
1463 u8 *image_ptr;
1464
1465 if (!prog->jit_requested)
1466 return orig_prog;
1467
1468 tmp = bpf_jit_blind_constants(prog);
1469 /* If blinding was requested and we failed during blinding,
1470 * we must fall back to the interpreter.
1471 */
1472 if (IS_ERR(tmp))
1473 return orig_prog;
1474 if (tmp != prog) {
1475 tmp_blinded = true;
1476 prog = tmp;
1477 }
1478
1479 jit_data = prog->aux->jit_data;
1480 if (!jit_data) {
1481 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1482 if (!jit_data) {
1483 prog = orig_prog;
1484 goto out;
1485 }
1486 prog->aux->jit_data = jit_data;
1487 }
1488 if (jit_data->ctx.offset) {
1489 ctx = jit_data->ctx;
1490 image_ptr = jit_data->image;
1491 header = jit_data->header;
1492 extra_pass = true;
1493 prog_size = sizeof(u32) * ctx.idx;
1494 goto skip_init_ctx;
1495 }
1496 memset(&ctx, 0, sizeof(ctx));
1497 ctx.prog = prog;
1498
1499 ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1500 if (ctx.offset == NULL) {
1501 prog = orig_prog;
1502 goto out_off;
1503 }
1504
1505 ctx.fpb_offset = find_fpb_offset(prog);
1506
1507 /*
1508 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
1509 *
1510 * BPF line info needs ctx->offset[i] to be the offset of
1511 * instruction[i] in jited image, so build prologue first.
1512 */
1513 if (build_prologue(&ctx, was_classic)) {
1514 prog = orig_prog;
1515 goto out_off;
1516 }
1517
1518 if (build_body(&ctx, extra_pass)) {
1519 prog = orig_prog;
1520 goto out_off;
1521 }
1522
1523 ctx.epilogue_offset = ctx.idx;
1524 build_epilogue(&ctx);
1525 build_plt(&ctx);
1526
1527 extable_align = __alignof__(struct exception_table_entry);
1528 extable_size = prog->aux->num_exentries *
1529 sizeof(struct exception_table_entry);
1530
1531 /* Now we know the actual image size. */
1532 prog_size = sizeof(u32) * ctx.idx;
1533 /* also allocate space for plt target */
1534 extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
1535 image_size = extable_offset + extable_size;
1536 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1537 sizeof(u32), jit_fill_hole);
1538 if (header == NULL) {
1539 prog = orig_prog;
1540 goto out_off;
1541 }
1542
1543 /* 2. Now, the actual pass. */
1544
1545 ctx.image = (__le32 *)image_ptr;
1546 if (extable_size)
1547 prog->aux->extable = (void *)image_ptr + extable_offset;
1548skip_init_ctx:
1549 ctx.idx = 0;
1550 ctx.exentry_idx = 0;
1551
1552 build_prologue(&ctx, was_classic);
1553
1554 if (build_body(&ctx, extra_pass)) {
1555 bpf_jit_binary_free(header);
1556 prog = orig_prog;
1557 goto out_off;
1558 }
1559
1560 build_epilogue(&ctx);
1561 build_plt(&ctx);
1562
1563 /* 3. Extra pass to validate JITed code. */
1564 if (validate_ctx(&ctx)) {
1565 bpf_jit_binary_free(header);
1566 prog = orig_prog;
1567 goto out_off;
1568 }
1569
1570 /* And we're done. */
1571 if (bpf_jit_enable > 1)
1572 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1573
1574 bpf_flush_icache(header, ctx.image + ctx.idx);
1575
1576 if (!prog->is_func || extra_pass) {
1577 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1578 pr_err_once("multi-func JIT bug %d != %d\n",
1579 ctx.idx, jit_data->ctx.idx);
1580 bpf_jit_binary_free(header);
1581 prog->bpf_func = NULL;
1582 prog->jited = 0;
1583 prog->jited_len = 0;
1584 goto out_off;
1585 }
1586 bpf_jit_binary_lock_ro(header);
1587 } else {
1588 jit_data->ctx = ctx;
1589 jit_data->image = image_ptr;
1590 jit_data->header = header;
1591 }
1592 prog->bpf_func = (void *)ctx.image;
1593 prog->jited = 1;
1594 prog->jited_len = prog_size;
1595
1596 if (!prog->is_func || extra_pass) {
1597 int i;
1598
1599 /* offset[prog->len] is the size of program */
1600 for (i = 0; i <= prog->len; i++)
1601 ctx.offset[i] *= AARCH64_INSN_SIZE;
1602 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1603out_off:
1604 kvfree(ctx.offset);
1605 kfree(jit_data);
1606 prog->aux->jit_data = NULL;
1607 }
1608out:
1609 if (tmp_blinded)
1610 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1611 tmp : orig_prog);
1612 return prog;
1613}
1614
1615bool bpf_jit_supports_kfunc_call(void)
1616{
1617 return true;
1618}
1619
1620u64 bpf_jit_alloc_exec_limit(void)
1621{
1622 return VMALLOC_END - VMALLOC_START;
1623}
1624
1625void *bpf_jit_alloc_exec(unsigned long size)
1626{
1627 /* Memory is intended to be executable, reset the pointer tag. */
1628 return kasan_reset_tag(vmalloc(size));
1629}
1630
1631void bpf_jit_free_exec(void *addr)
1632{
1633 return vfree(addr);
1634}
1635
1636/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1637bool bpf_jit_supports_subprog_tailcalls(void)
1638{
1639 return true;
1640}
1641
1642static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
1643 int args_off, int retval_off, int run_ctx_off,
1644 bool save_ret)
1645{
1646 __le32 *branch;
1647 u64 enter_prog;
1648 u64 exit_prog;
1649 struct bpf_prog *p = l->link.prog;
1650 int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1651
1652 enter_prog = (u64)bpf_trampoline_enter(p);
1653 exit_prog = (u64)bpf_trampoline_exit(p);
1654
1655 if (l->cookie == 0) {
1656 /* if cookie is zero, one instruction is enough to store it */
1657 emit(A64_STR64I(A64_ZR, A64_SP, run_ctx_off + cookie_off), ctx);
1658 } else {
1659 emit_a64_mov_i64(A64_R(10), l->cookie, ctx);
1660 emit(A64_STR64I(A64_R(10), A64_SP, run_ctx_off + cookie_off),
1661 ctx);
1662 }
1663
1664 /* save p to callee saved register x19 to avoid loading p with mov_i64
1665 * each time.
1666 */
1667 emit_addr_mov_i64(A64_R(19), (const u64)p, ctx);
1668
1669 /* arg1: prog */
1670 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx);
1671 /* arg2: &run_ctx */
1672 emit(A64_ADD_I(1, A64_R(1), A64_SP, run_ctx_off), ctx);
1673
1674 emit_call(enter_prog, ctx);
1675
1676 /* if (__bpf_prog_enter(prog) == 0)
1677 * goto skip_exec_of_prog;
1678 */
1679 branch = ctx->image + ctx->idx;
1680 emit(A64_NOP, ctx);
1681
1682 /* save return value to callee saved register x20 */
1683 emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
1684
1685 emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
1686 if (!p->jited)
1687 emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
1688
1689 emit_call((const u64)p->bpf_func, ctx);
1690
1691 if (save_ret)
1692 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
1693
1694 if (ctx->image) {
1695 int offset = &ctx->image[ctx->idx] - branch;
1696 *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
1697 }
1698
1699 /* arg1: prog */
1700 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx);
1701 /* arg2: start time */
1702 emit(A64_MOV(1, A64_R(1), A64_R(20)), ctx);
1703 /* arg3: &run_ctx */
1704 emit(A64_ADD_I(1, A64_R(2), A64_SP, run_ctx_off), ctx);
1705
1706 emit_call(exit_prog, ctx);
1707}
1708
1709static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
1710 int args_off, int retval_off, int run_ctx_off,
1711 __le32 **branches)
1712{
1713 int i;
1714
1715 /* The first fmod_ret program will receive a garbage return value.
1716 * Set this to 0 to avoid confusing the program.
1717 */
1718 emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx);
1719 for (i = 0; i < tl->nr_links; i++) {
1720 invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off,
1721 run_ctx_off, true);
1722 /* if (*(u64 *)(sp + retval_off) != 0)
1723 * goto do_fexit;
1724 */
1725 emit(A64_LDR64I(A64_R(10), A64_SP, retval_off), ctx);
1726 /* Save the location of branch, and generate a nop.
1727 * This nop will be replaced with a cbnz later.
1728 */
1729 branches[i] = ctx->image + ctx->idx;
1730 emit(A64_NOP, ctx);
1731 }
1732}
1733
1734static void save_args(struct jit_ctx *ctx, int args_off, int nargs)
1735{
1736 int i;
1737
1738 for (i = 0; i < nargs; i++) {
1739 emit(A64_STR64I(i, A64_SP, args_off), ctx);
1740 args_off += 8;
1741 }
1742}
1743
1744static void restore_args(struct jit_ctx *ctx, int args_off, int nargs)
1745{
1746 int i;
1747
1748 for (i = 0; i < nargs; i++) {
1749 emit(A64_LDR64I(i, A64_SP, args_off), ctx);
1750 args_off += 8;
1751 }
1752}
1753
1754/* Based on the x86's implementation of arch_prepare_bpf_trampoline().
1755 *
1756 * bpf prog and function entry before bpf trampoline hooked:
1757 * mov x9, lr
1758 * nop
1759 *
1760 * bpf prog and function entry after bpf trampoline hooked:
1761 * mov x9, lr
1762 * bl <bpf_trampoline or plt>
1763 *
1764 */
1765static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
1766 struct bpf_tramp_links *tlinks, void *orig_call,
1767 int nargs, u32 flags)
1768{
1769 int i;
1770 int stack_size;
1771 int retaddr_off;
1772 int regs_off;
1773 int retval_off;
1774 int args_off;
1775 int nargs_off;
1776 int ip_off;
1777 int run_ctx_off;
1778 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
1779 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
1780 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
1781 bool save_ret;
1782 __le32 **branches = NULL;
1783
1784 /* trampoline stack layout:
1785 * [ parent ip ]
1786 * [ FP ]
1787 * SP + retaddr_off [ self ip ]
1788 * [ FP ]
1789 *
1790 * [ padding ] align SP to multiples of 16
1791 *
1792 * [ x20 ] callee saved reg x20
1793 * SP + regs_off [ x19 ] callee saved reg x19
1794 *
1795 * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
1796 * BPF_TRAMP_F_RET_FENTRY_RET
1797 *
1798 * [ argN ]
1799 * [ ... ]
1800 * SP + args_off [ arg1 ]
1801 *
1802 * SP + nargs_off [ args count ]
1803 *
1804 * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
1805 *
1806 * SP + run_ctx_off [ bpf_tramp_run_ctx ]
1807 */
1808
1809 stack_size = 0;
1810 run_ctx_off = stack_size;
1811 /* room for bpf_tramp_run_ctx */
1812 stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
1813
1814 ip_off = stack_size;
1815 /* room for IP address argument */
1816 if (flags & BPF_TRAMP_F_IP_ARG)
1817 stack_size += 8;
1818
1819 nargs_off = stack_size;
1820 /* room for args count */
1821 stack_size += 8;
1822
1823 args_off = stack_size;
1824 /* room for args */
1825 stack_size += nargs * 8;
1826
1827 /* room for return value */
1828 retval_off = stack_size;
1829 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1830 if (save_ret)
1831 stack_size += 8;
1832
1833 /* room for callee saved registers, currently x19 and x20 are used */
1834 regs_off = stack_size;
1835 stack_size += 16;
1836
1837 /* round up to multiples of 16 to avoid SPAlignmentFault */
1838 stack_size = round_up(stack_size, 16);
1839
1840 /* return address locates above FP */
1841 retaddr_off = stack_size + 8;
1842
1843 /* bpf trampoline may be invoked by 3 instruction types:
1844 * 1. bl, attached to bpf prog or kernel function via short jump
1845 * 2. br, attached to bpf prog or kernel function via long jump
1846 * 3. blr, working as a function pointer, used by struct_ops.
1847 * So BTI_JC should used here to support both br and blr.
1848 */
1849 emit_bti(A64_BTI_JC, ctx);
1850
1851 /* frame for parent function */
1852 emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx);
1853 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
1854
1855 /* frame for patched function */
1856 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
1857 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
1858
1859 /* allocate stack space */
1860 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
1861
1862 if (flags & BPF_TRAMP_F_IP_ARG) {
1863 /* save ip address of the traced function */
1864 emit_addr_mov_i64(A64_R(10), (const u64)orig_call, ctx);
1865 emit(A64_STR64I(A64_R(10), A64_SP, ip_off), ctx);
1866 }
1867
1868 /* save args count*/
1869 emit(A64_MOVZ(1, A64_R(10), nargs, 0), ctx);
1870 emit(A64_STR64I(A64_R(10), A64_SP, nargs_off), ctx);
1871
1872 /* save args */
1873 save_args(ctx, args_off, nargs);
1874
1875 /* save callee saved registers */
1876 emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx);
1877 emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
1878
1879 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1880 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
1881 emit_call((const u64)__bpf_tramp_enter, ctx);
1882 }
1883
1884 for (i = 0; i < fentry->nr_links; i++)
1885 invoke_bpf_prog(ctx, fentry->links[i], args_off,
1886 retval_off, run_ctx_off,
1887 flags & BPF_TRAMP_F_RET_FENTRY_RET);
1888
1889 if (fmod_ret->nr_links) {
1890 branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *),
1891 GFP_KERNEL);
1892 if (!branches)
1893 return -ENOMEM;
1894
1895 invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off,
1896 run_ctx_off, branches);
1897 }
1898
1899 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1900 restore_args(ctx, args_off, nargs);
1901 /* call original func */
1902 emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
1903 emit(A64_BLR(A64_R(10)), ctx);
1904 /* store return value */
1905 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
1906 /* reserve a nop for bpf_tramp_image_put */
1907 im->ip_after_call = ctx->image + ctx->idx;
1908 emit(A64_NOP, ctx);
1909 }
1910
1911 /* update the branches saved in invoke_bpf_mod_ret with cbnz */
1912 for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) {
1913 int offset = &ctx->image[ctx->idx] - branches[i];
1914 *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
1915 }
1916
1917 for (i = 0; i < fexit->nr_links; i++)
1918 invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off,
1919 run_ctx_off, false);
1920
1921 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1922 im->ip_epilogue = ctx->image + ctx->idx;
1923 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
1924 emit_call((const u64)__bpf_tramp_exit, ctx);
1925 }
1926
1927 if (flags & BPF_TRAMP_F_RESTORE_REGS)
1928 restore_args(ctx, args_off, nargs);
1929
1930 /* restore callee saved register x19 and x20 */
1931 emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx);
1932 emit(A64_LDR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
1933
1934 if (save_ret)
1935 emit(A64_LDR64I(A64_R(0), A64_SP, retval_off), ctx);
1936
1937 /* reset SP */
1938 emit(A64_MOV(1, A64_SP, A64_FP), ctx);
1939
1940 /* pop frames */
1941 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
1942 emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx);
1943
1944 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
1945 /* skip patched function, return to parent */
1946 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
1947 emit(A64_RET(A64_R(9)), ctx);
1948 } else {
1949 /* return to patched function */
1950 emit(A64_MOV(1, A64_R(10), A64_LR), ctx);
1951 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
1952 emit(A64_RET(A64_R(10)), ctx);
1953 }
1954
1955 if (ctx->image)
1956 bpf_flush_icache(ctx->image, ctx->image + ctx->idx);
1957
1958 kfree(branches);
1959
1960 return ctx->idx;
1961}
1962
1963int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
1964 void *image_end, const struct btf_func_model *m,
1965 u32 flags, struct bpf_tramp_links *tlinks,
1966 void *orig_call)
1967{
1968 int i, ret;
1969 int nargs = m->nr_args;
1970 int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
1971 struct jit_ctx ctx = {
1972 .image = NULL,
1973 .idx = 0,
1974 };
1975
1976 /* the first 8 arguments are passed by registers */
1977 if (nargs > 8)
1978 return -ENOTSUPP;
1979
1980 /* don't support struct argument */
1981 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
1982 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
1983 return -ENOTSUPP;
1984 }
1985
1986 ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nargs, flags);
1987 if (ret < 0)
1988 return ret;
1989
1990 if (ret > max_insns)
1991 return -EFBIG;
1992
1993 ctx.image = image;
1994 ctx.idx = 0;
1995
1996 jit_fill_hole(image, (unsigned int)(image_end - image));
1997 ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nargs, flags);
1998
1999 if (ret > 0 && validate_code(&ctx) < 0)
2000 ret = -EINVAL;
2001
2002 if (ret > 0)
2003 ret *= AARCH64_INSN_SIZE;
2004
2005 return ret;
2006}
2007
2008static bool is_long_jump(void *ip, void *target)
2009{
2010 long offset;
2011
2012 /* NULL target means this is a NOP */
2013 if (!target)
2014 return false;
2015
2016 offset = (long)target - (long)ip;
2017 return offset < -SZ_128M || offset >= SZ_128M;
2018}
2019
2020static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip,
2021 void *addr, void *plt, u32 *insn)
2022{
2023 void *target;
2024
2025 if (!addr) {
2026 *insn = aarch64_insn_gen_nop();
2027 return 0;
2028 }
2029
2030 if (is_long_jump(ip, addr))
2031 target = plt;
2032 else
2033 target = addr;
2034
2035 *insn = aarch64_insn_gen_branch_imm((unsigned long)ip,
2036 (unsigned long)target,
2037 type);
2038
2039 return *insn != AARCH64_BREAK_FAULT ? 0 : -EFAULT;
2040}
2041
2042/* Replace the branch instruction from @ip to @old_addr in a bpf prog or a bpf
2043 * trampoline with the branch instruction from @ip to @new_addr. If @old_addr
2044 * or @new_addr is NULL, the old or new instruction is NOP.
2045 *
2046 * When @ip is the bpf prog entry, a bpf trampoline is being attached or
2047 * detached. Since bpf trampoline and bpf prog are allocated separately with
2048 * vmalloc, the address distance may exceed 128MB, the maximum branch range.
2049 * So long jump should be handled.
2050 *
2051 * When a bpf prog is constructed, a plt pointing to empty trampoline
2052 * dummy_tramp is placed at the end:
2053 *
2054 * bpf_prog:
2055 * mov x9, lr
2056 * nop // patchsite
2057 * ...
2058 * ret
2059 *
2060 * plt:
2061 * ldr x10, target
2062 * br x10
2063 * target:
2064 * .quad dummy_tramp // plt target
2065 *
2066 * This is also the state when no trampoline is attached.
2067 *
2068 * When a short-jump bpf trampoline is attached, the patchsite is patched
2069 * to a bl instruction to the trampoline directly:
2070 *
2071 * bpf_prog:
2072 * mov x9, lr
2073 * bl <short-jump bpf trampoline address> // patchsite
2074 * ...
2075 * ret
2076 *
2077 * plt:
2078 * ldr x10, target
2079 * br x10
2080 * target:
2081 * .quad dummy_tramp // plt target
2082 *
2083 * When a long-jump bpf trampoline is attached, the plt target is filled with
2084 * the trampoline address and the patchsite is patched to a bl instruction to
2085 * the plt:
2086 *
2087 * bpf_prog:
2088 * mov x9, lr
2089 * bl plt // patchsite
2090 * ...
2091 * ret
2092 *
2093 * plt:
2094 * ldr x10, target
2095 * br x10
2096 * target:
2097 * .quad <long-jump bpf trampoline address> // plt target
2098 *
2099 * The dummy_tramp is used to prevent another CPU from jumping to unknown
2100 * locations during the patching process, making the patching process easier.
2101 */
2102int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
2103 void *old_addr, void *new_addr)
2104{
2105 int ret;
2106 u32 old_insn;
2107 u32 new_insn;
2108 u32 replaced;
2109 struct bpf_plt *plt = NULL;
2110 unsigned long size = 0UL;
2111 unsigned long offset = ~0UL;
2112 enum aarch64_insn_branch_type branch_type;
2113 char namebuf[KSYM_NAME_LEN];
2114 void *image = NULL;
2115 u64 plt_target = 0ULL;
2116 bool poking_bpf_entry;
2117
2118 if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf))
2119 /* Only poking bpf text is supported. Since kernel function
2120 * entry is set up by ftrace, we reply on ftrace to poke kernel
2121 * functions.
2122 */
2123 return -ENOTSUPP;
2124
2125 image = ip - offset;
2126 /* zero offset means we're poking bpf prog entry */
2127 poking_bpf_entry = (offset == 0UL);
2128
2129 /* bpf prog entry, find plt and the real patchsite */
2130 if (poking_bpf_entry) {
2131 /* plt locates at the end of bpf prog */
2132 plt = image + size - PLT_TARGET_OFFSET;
2133
2134 /* skip to the nop instruction in bpf prog entry:
2135 * bti c // if BTI enabled
2136 * mov x9, x30
2137 * nop
2138 */
2139 ip = image + POKE_OFFSET * AARCH64_INSN_SIZE;
2140 }
2141
2142 /* long jump is only possible at bpf prog entry */
2143 if (WARN_ON((is_long_jump(ip, new_addr) || is_long_jump(ip, old_addr)) &&
2144 !poking_bpf_entry))
2145 return -EINVAL;
2146
2147 if (poke_type == BPF_MOD_CALL)
2148 branch_type = AARCH64_INSN_BRANCH_LINK;
2149 else
2150 branch_type = AARCH64_INSN_BRANCH_NOLINK;
2151
2152 if (gen_branch_or_nop(branch_type, ip, old_addr, plt, &old_insn) < 0)
2153 return -EFAULT;
2154
2155 if (gen_branch_or_nop(branch_type, ip, new_addr, plt, &new_insn) < 0)
2156 return -EFAULT;
2157
2158 if (is_long_jump(ip, new_addr))
2159 plt_target = (u64)new_addr;
2160 else if (is_long_jump(ip, old_addr))
2161 /* if the old target is a long jump and the new target is not,
2162 * restore the plt target to dummy_tramp, so there is always a
2163 * legal and harmless address stored in plt target, and we'll
2164 * never jump from plt to an unknown place.
2165 */
2166 plt_target = (u64)&dummy_tramp;
2167
2168 if (plt_target) {
2169 /* non-zero plt_target indicates we're patching a bpf prog,
2170 * which is read only.
2171 */
2172 if (set_memory_rw(PAGE_MASK & ((uintptr_t)&plt->target), 1))
2173 return -EFAULT;
2174 WRITE_ONCE(plt->target, plt_target);
2175 set_memory_ro(PAGE_MASK & ((uintptr_t)&plt->target), 1);
2176 /* since plt target points to either the new trampoline
2177 * or dummy_tramp, even if another CPU reads the old plt
2178 * target value before fetching the bl instruction to plt,
2179 * it will be brought back by dummy_tramp, so no barrier is
2180 * required here.
2181 */
2182 }
2183
2184 /* if the old target and the new target are both long jumps, no
2185 * patching is required
2186 */
2187 if (old_insn == new_insn)
2188 return 0;
2189
2190 mutex_lock(&text_mutex);
2191 if (aarch64_insn_read(ip, &replaced)) {
2192 ret = -EFAULT;
2193 goto out;
2194 }
2195
2196 if (replaced != old_insn) {
2197 ret = -EFAULT;
2198 goto out;
2199 }
2200
2201 /* We call aarch64_insn_patch_text_nosync() to replace instruction
2202 * atomically, so no other CPUs will fetch a half-new and half-old
2203 * instruction. But there is chance that another CPU executes the
2204 * old instruction after the patching operation finishes (e.g.,
2205 * pipeline not flushed, or icache not synchronized yet).
2206 *
2207 * 1. when a new trampoline is attached, it is not a problem for
2208 * different CPUs to jump to different trampolines temporarily.
2209 *
2210 * 2. when an old trampoline is freed, we should wait for all other
2211 * CPUs to exit the trampoline and make sure the trampoline is no
2212 * longer reachable, since bpf_tramp_image_put() function already
2213 * uses percpu_ref and task-based rcu to do the sync, no need to call
2214 * the sync version here, see bpf_tramp_image_put() for details.
2215 */
2216 ret = aarch64_insn_patch_text_nosync(ip, new_insn);
2217out:
2218 mutex_unlock(&text_mutex);
2219
2220 return ret;
2221}
1/*
2 * BPF JIT compiler for ARM64
3 *
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#define pr_fmt(fmt) "bpf_jit: " fmt
20
21#include <linux/filter.h>
22#include <linux/printk.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25
26#include <asm/byteorder.h>
27#include <asm/cacheflush.h>
28#include <asm/debug-monitors.h>
29
30#include "bpf_jit.h"
31
32int bpf_jit_enable __read_mostly;
33
34#define TMP_REG_1 (MAX_BPF_REG + 0)
35#define TMP_REG_2 (MAX_BPF_REG + 1)
36
37/* Map BPF registers to A64 registers */
38static const int bpf2a64[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1] = A64_R(0),
43 [BPF_REG_2] = A64_R(1),
44 [BPF_REG_3] = A64_R(2),
45 [BPF_REG_4] = A64_R(3),
46 [BPF_REG_5] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6] = A64_R(19),
49 [BPF_REG_7] = A64_R(20),
50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_R(25),
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24),
57};
58
59struct jit_ctx {
60 const struct bpf_prog *prog;
61 int idx;
62 int tmp_used;
63 int epilogue_offset;
64 int *offset;
65 u32 *image;
66};
67
68static inline void emit(const u32 insn, struct jit_ctx *ctx)
69{
70 if (ctx->image != NULL)
71 ctx->image[ctx->idx] = cpu_to_le32(insn);
72
73 ctx->idx++;
74}
75
76static inline void emit_a64_mov_i64(const int reg, const u64 val,
77 struct jit_ctx *ctx)
78{
79 u64 tmp = val;
80 int shift = 0;
81
82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
83 tmp >>= 16;
84 shift += 16;
85 while (tmp) {
86 if (tmp & 0xffff)
87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
88 tmp >>= 16;
89 shift += 16;
90 }
91}
92
93static inline void emit_a64_mov_i(const int is64, const int reg,
94 const s32 val, struct jit_ctx *ctx)
95{
96 u16 hi = val >> 16;
97 u16 lo = val & 0xffff;
98
99 if (hi & 0x8000) {
100 if (hi == 0xffff) {
101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
102 } else {
103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
104 emit(A64_MOVK(is64, reg, lo, 0), ctx);
105 }
106 } else {
107 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
108 if (hi)
109 emit(A64_MOVK(is64, reg, hi, 16), ctx);
110 }
111}
112
113static inline int bpf2a64_offset(int bpf_to, int bpf_from,
114 const struct jit_ctx *ctx)
115{
116 int to = ctx->offset[bpf_to];
117 /* -1 to account for the Branch instruction */
118 int from = ctx->offset[bpf_from] - 1;
119
120 return to - from;
121}
122
123static void jit_fill_hole(void *area, unsigned int size)
124{
125 u32 *ptr;
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
129}
130
131static inline int epilogue_offset(const struct jit_ctx *ctx)
132{
133 int to = ctx->epilogue_offset;
134 int from = ctx->idx;
135
136 return to - from;
137}
138
139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141
142#define _STACK_SIZE \
143 (MAX_BPF_STACK \
144 + 4 /* extra for skb_copy_bits buffer */)
145
146#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
147
148static void build_prologue(struct jit_ctx *ctx)
149{
150 const u8 r6 = bpf2a64[BPF_REG_6];
151 const u8 r7 = bpf2a64[BPF_REG_7];
152 const u8 r8 = bpf2a64[BPF_REG_8];
153 const u8 r9 = bpf2a64[BPF_REG_9];
154 const u8 fp = bpf2a64[BPF_REG_FP];
155 const u8 tmp1 = bpf2a64[TMP_REG_1];
156 const u8 tmp2 = bpf2a64[TMP_REG_2];
157
158 /*
159 * BPF prog stack layout
160 *
161 * high
162 * original A64_SP => 0:+-----+ BPF prologue
163 * |FP/LR|
164 * current A64_FP => -16:+-----+
165 * | ... | callee saved registers
166 * +-----+
167 * | | x25/x26
168 * BPF fp register => -80:+-----+ <= (BPF_FP)
169 * | |
170 * | ... | BPF prog stack
171 * | |
172 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
173 * |RSVD | JIT scratchpad
174 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
175 * | |
176 * | ... | Function call stack
177 * | |
178 * +-----+
179 * low
180 *
181 */
182
183 /* Save FP and LR registers to stay align with ARM64 AAPCS */
184 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
185 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
186
187 /* Save callee-saved register */
188 emit(A64_PUSH(r6, r7, A64_SP), ctx);
189 emit(A64_PUSH(r8, r9, A64_SP), ctx);
190 if (ctx->tmp_used)
191 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
192
193 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
194 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
195
196 /* Set up BPF prog stack base register (x25) */
197 emit(A64_MOV(1, fp, A64_SP), ctx);
198
199 /* Set up function call stack */
200 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
201}
202
203static void build_epilogue(struct jit_ctx *ctx)
204{
205 const u8 r0 = bpf2a64[BPF_REG_0];
206 const u8 r6 = bpf2a64[BPF_REG_6];
207 const u8 r7 = bpf2a64[BPF_REG_7];
208 const u8 r8 = bpf2a64[BPF_REG_8];
209 const u8 r9 = bpf2a64[BPF_REG_9];
210 const u8 fp = bpf2a64[BPF_REG_FP];
211 const u8 tmp1 = bpf2a64[TMP_REG_1];
212 const u8 tmp2 = bpf2a64[TMP_REG_2];
213
214 /* We're done with BPF stack */
215 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
216
217 /* Restore fs (x25) and x26 */
218 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
219
220 /* Restore callee-saved register */
221 if (ctx->tmp_used)
222 emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
223 emit(A64_POP(r8, r9, A64_SP), ctx);
224 emit(A64_POP(r6, r7, A64_SP), ctx);
225
226 /* Restore FP/LR registers */
227 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
228
229 /* Set return value */
230 emit(A64_MOV(1, A64_R(0), r0), ctx);
231
232 emit(A64_RET(A64_LR), ctx);
233}
234
235/* JITs an eBPF instruction.
236 * Returns:
237 * 0 - successfully JITed an 8-byte eBPF instruction.
238 * >0 - successfully JITed a 16-byte eBPF instruction.
239 * <0 - failed to JIT.
240 */
241static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
242{
243 const u8 code = insn->code;
244 const u8 dst = bpf2a64[insn->dst_reg];
245 const u8 src = bpf2a64[insn->src_reg];
246 const u8 tmp = bpf2a64[TMP_REG_1];
247 const u8 tmp2 = bpf2a64[TMP_REG_2];
248 const s16 off = insn->off;
249 const s32 imm = insn->imm;
250 const int i = insn - ctx->prog->insnsi;
251 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
252 u8 jmp_cond;
253 s32 jmp_offset;
254
255#define check_imm(bits, imm) do { \
256 if ((((imm) > 0) && ((imm) >> (bits))) || \
257 (((imm) < 0) && (~(imm) >> (bits)))) { \
258 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
259 i, imm, imm); \
260 return -EINVAL; \
261 } \
262} while (0)
263#define check_imm19(imm) check_imm(19, imm)
264#define check_imm26(imm) check_imm(26, imm)
265
266 switch (code) {
267 /* dst = src */
268 case BPF_ALU | BPF_MOV | BPF_X:
269 case BPF_ALU64 | BPF_MOV | BPF_X:
270 emit(A64_MOV(is64, dst, src), ctx);
271 break;
272 /* dst = dst OP src */
273 case BPF_ALU | BPF_ADD | BPF_X:
274 case BPF_ALU64 | BPF_ADD | BPF_X:
275 emit(A64_ADD(is64, dst, dst, src), ctx);
276 break;
277 case BPF_ALU | BPF_SUB | BPF_X:
278 case BPF_ALU64 | BPF_SUB | BPF_X:
279 emit(A64_SUB(is64, dst, dst, src), ctx);
280 break;
281 case BPF_ALU | BPF_AND | BPF_X:
282 case BPF_ALU64 | BPF_AND | BPF_X:
283 emit(A64_AND(is64, dst, dst, src), ctx);
284 break;
285 case BPF_ALU | BPF_OR | BPF_X:
286 case BPF_ALU64 | BPF_OR | BPF_X:
287 emit(A64_ORR(is64, dst, dst, src), ctx);
288 break;
289 case BPF_ALU | BPF_XOR | BPF_X:
290 case BPF_ALU64 | BPF_XOR | BPF_X:
291 emit(A64_EOR(is64, dst, dst, src), ctx);
292 break;
293 case BPF_ALU | BPF_MUL | BPF_X:
294 case BPF_ALU64 | BPF_MUL | BPF_X:
295 emit(A64_MUL(is64, dst, dst, src), ctx);
296 break;
297 case BPF_ALU | BPF_DIV | BPF_X:
298 case BPF_ALU64 | BPF_DIV | BPF_X:
299 case BPF_ALU | BPF_MOD | BPF_X:
300 case BPF_ALU64 | BPF_MOD | BPF_X:
301 {
302 const u8 r0 = bpf2a64[BPF_REG_0];
303
304 /* if (src == 0) return 0 */
305 jmp_offset = 3; /* skip ahead to else path */
306 check_imm19(jmp_offset);
307 emit(A64_CBNZ(is64, src, jmp_offset), ctx);
308 emit(A64_MOVZ(1, r0, 0, 0), ctx);
309 jmp_offset = epilogue_offset(ctx);
310 check_imm26(jmp_offset);
311 emit(A64_B(jmp_offset), ctx);
312 /* else */
313 switch (BPF_OP(code)) {
314 case BPF_DIV:
315 emit(A64_UDIV(is64, dst, dst, src), ctx);
316 break;
317 case BPF_MOD:
318 ctx->tmp_used = 1;
319 emit(A64_UDIV(is64, tmp, dst, src), ctx);
320 emit(A64_MUL(is64, tmp, tmp, src), ctx);
321 emit(A64_SUB(is64, dst, dst, tmp), ctx);
322 break;
323 }
324 break;
325 }
326 case BPF_ALU | BPF_LSH | BPF_X:
327 case BPF_ALU64 | BPF_LSH | BPF_X:
328 emit(A64_LSLV(is64, dst, dst, src), ctx);
329 break;
330 case BPF_ALU | BPF_RSH | BPF_X:
331 case BPF_ALU64 | BPF_RSH | BPF_X:
332 emit(A64_LSRV(is64, dst, dst, src), ctx);
333 break;
334 case BPF_ALU | BPF_ARSH | BPF_X:
335 case BPF_ALU64 | BPF_ARSH | BPF_X:
336 emit(A64_ASRV(is64, dst, dst, src), ctx);
337 break;
338 /* dst = -dst */
339 case BPF_ALU | BPF_NEG:
340 case BPF_ALU64 | BPF_NEG:
341 emit(A64_NEG(is64, dst, dst), ctx);
342 break;
343 /* dst = BSWAP##imm(dst) */
344 case BPF_ALU | BPF_END | BPF_FROM_LE:
345 case BPF_ALU | BPF_END | BPF_FROM_BE:
346#ifdef CONFIG_CPU_BIG_ENDIAN
347 if (BPF_SRC(code) == BPF_FROM_BE)
348 goto emit_bswap_uxt;
349#else /* !CONFIG_CPU_BIG_ENDIAN */
350 if (BPF_SRC(code) == BPF_FROM_LE)
351 goto emit_bswap_uxt;
352#endif
353 switch (imm) {
354 case 16:
355 emit(A64_REV16(is64, dst, dst), ctx);
356 /* zero-extend 16 bits into 64 bits */
357 emit(A64_UXTH(is64, dst, dst), ctx);
358 break;
359 case 32:
360 emit(A64_REV32(is64, dst, dst), ctx);
361 /* upper 32 bits already cleared */
362 break;
363 case 64:
364 emit(A64_REV64(dst, dst), ctx);
365 break;
366 }
367 break;
368emit_bswap_uxt:
369 switch (imm) {
370 case 16:
371 /* zero-extend 16 bits into 64 bits */
372 emit(A64_UXTH(is64, dst, dst), ctx);
373 break;
374 case 32:
375 /* zero-extend 32 bits into 64 bits */
376 emit(A64_UXTW(is64, dst, dst), ctx);
377 break;
378 case 64:
379 /* nop */
380 break;
381 }
382 break;
383 /* dst = imm */
384 case BPF_ALU | BPF_MOV | BPF_K:
385 case BPF_ALU64 | BPF_MOV | BPF_K:
386 emit_a64_mov_i(is64, dst, imm, ctx);
387 break;
388 /* dst = dst OP imm */
389 case BPF_ALU | BPF_ADD | BPF_K:
390 case BPF_ALU64 | BPF_ADD | BPF_K:
391 ctx->tmp_used = 1;
392 emit_a64_mov_i(is64, tmp, imm, ctx);
393 emit(A64_ADD(is64, dst, dst, tmp), ctx);
394 break;
395 case BPF_ALU | BPF_SUB | BPF_K:
396 case BPF_ALU64 | BPF_SUB | BPF_K:
397 ctx->tmp_used = 1;
398 emit_a64_mov_i(is64, tmp, imm, ctx);
399 emit(A64_SUB(is64, dst, dst, tmp), ctx);
400 break;
401 case BPF_ALU | BPF_AND | BPF_K:
402 case BPF_ALU64 | BPF_AND | BPF_K:
403 ctx->tmp_used = 1;
404 emit_a64_mov_i(is64, tmp, imm, ctx);
405 emit(A64_AND(is64, dst, dst, tmp), ctx);
406 break;
407 case BPF_ALU | BPF_OR | BPF_K:
408 case BPF_ALU64 | BPF_OR | BPF_K:
409 ctx->tmp_used = 1;
410 emit_a64_mov_i(is64, tmp, imm, ctx);
411 emit(A64_ORR(is64, dst, dst, tmp), ctx);
412 break;
413 case BPF_ALU | BPF_XOR | BPF_K:
414 case BPF_ALU64 | BPF_XOR | BPF_K:
415 ctx->tmp_used = 1;
416 emit_a64_mov_i(is64, tmp, imm, ctx);
417 emit(A64_EOR(is64, dst, dst, tmp), ctx);
418 break;
419 case BPF_ALU | BPF_MUL | BPF_K:
420 case BPF_ALU64 | BPF_MUL | BPF_K:
421 ctx->tmp_used = 1;
422 emit_a64_mov_i(is64, tmp, imm, ctx);
423 emit(A64_MUL(is64, dst, dst, tmp), ctx);
424 break;
425 case BPF_ALU | BPF_DIV | BPF_K:
426 case BPF_ALU64 | BPF_DIV | BPF_K:
427 ctx->tmp_used = 1;
428 emit_a64_mov_i(is64, tmp, imm, ctx);
429 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
430 break;
431 case BPF_ALU | BPF_MOD | BPF_K:
432 case BPF_ALU64 | BPF_MOD | BPF_K:
433 ctx->tmp_used = 1;
434 emit_a64_mov_i(is64, tmp2, imm, ctx);
435 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
436 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
437 emit(A64_SUB(is64, dst, dst, tmp), ctx);
438 break;
439 case BPF_ALU | BPF_LSH | BPF_K:
440 case BPF_ALU64 | BPF_LSH | BPF_K:
441 emit(A64_LSL(is64, dst, dst, imm), ctx);
442 break;
443 case BPF_ALU | BPF_RSH | BPF_K:
444 case BPF_ALU64 | BPF_RSH | BPF_K:
445 emit(A64_LSR(is64, dst, dst, imm), ctx);
446 break;
447 case BPF_ALU | BPF_ARSH | BPF_K:
448 case BPF_ALU64 | BPF_ARSH | BPF_K:
449 emit(A64_ASR(is64, dst, dst, imm), ctx);
450 break;
451
452 /* JUMP off */
453 case BPF_JMP | BPF_JA:
454 jmp_offset = bpf2a64_offset(i + off, i, ctx);
455 check_imm26(jmp_offset);
456 emit(A64_B(jmp_offset), ctx);
457 break;
458 /* IF (dst COND src) JUMP off */
459 case BPF_JMP | BPF_JEQ | BPF_X:
460 case BPF_JMP | BPF_JGT | BPF_X:
461 case BPF_JMP | BPF_JGE | BPF_X:
462 case BPF_JMP | BPF_JNE | BPF_X:
463 case BPF_JMP | BPF_JSGT | BPF_X:
464 case BPF_JMP | BPF_JSGE | BPF_X:
465 emit(A64_CMP(1, dst, src), ctx);
466emit_cond_jmp:
467 jmp_offset = bpf2a64_offset(i + off, i, ctx);
468 check_imm19(jmp_offset);
469 switch (BPF_OP(code)) {
470 case BPF_JEQ:
471 jmp_cond = A64_COND_EQ;
472 break;
473 case BPF_JGT:
474 jmp_cond = A64_COND_HI;
475 break;
476 case BPF_JGE:
477 jmp_cond = A64_COND_CS;
478 break;
479 case BPF_JSET:
480 case BPF_JNE:
481 jmp_cond = A64_COND_NE;
482 break;
483 case BPF_JSGT:
484 jmp_cond = A64_COND_GT;
485 break;
486 case BPF_JSGE:
487 jmp_cond = A64_COND_GE;
488 break;
489 default:
490 return -EFAULT;
491 }
492 emit(A64_B_(jmp_cond, jmp_offset), ctx);
493 break;
494 case BPF_JMP | BPF_JSET | BPF_X:
495 emit(A64_TST(1, dst, src), ctx);
496 goto emit_cond_jmp;
497 /* IF (dst COND imm) JUMP off */
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JGT | BPF_K:
500 case BPF_JMP | BPF_JGE | BPF_K:
501 case BPF_JMP | BPF_JNE | BPF_K:
502 case BPF_JMP | BPF_JSGT | BPF_K:
503 case BPF_JMP | BPF_JSGE | BPF_K:
504 ctx->tmp_used = 1;
505 emit_a64_mov_i(1, tmp, imm, ctx);
506 emit(A64_CMP(1, dst, tmp), ctx);
507 goto emit_cond_jmp;
508 case BPF_JMP | BPF_JSET | BPF_K:
509 ctx->tmp_used = 1;
510 emit_a64_mov_i(1, tmp, imm, ctx);
511 emit(A64_TST(1, dst, tmp), ctx);
512 goto emit_cond_jmp;
513 /* function call */
514 case BPF_JMP | BPF_CALL:
515 {
516 const u8 r0 = bpf2a64[BPF_REG_0];
517 const u64 func = (u64)__bpf_call_base + imm;
518
519 ctx->tmp_used = 1;
520 emit_a64_mov_i64(tmp, func, ctx);
521 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
522 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
523 emit(A64_BLR(tmp), ctx);
524 emit(A64_MOV(1, r0, A64_R(0)), ctx);
525 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
526 break;
527 }
528 /* function return */
529 case BPF_JMP | BPF_EXIT:
530 /* Optimization: when last instruction is EXIT,
531 simply fallthrough to epilogue. */
532 if (i == ctx->prog->len - 1)
533 break;
534 jmp_offset = epilogue_offset(ctx);
535 check_imm26(jmp_offset);
536 emit(A64_B(jmp_offset), ctx);
537 break;
538
539 /* dst = imm64 */
540 case BPF_LD | BPF_IMM | BPF_DW:
541 {
542 const struct bpf_insn insn1 = insn[1];
543 u64 imm64;
544
545 if (insn1.code != 0 || insn1.src_reg != 0 ||
546 insn1.dst_reg != 0 || insn1.off != 0) {
547 /* Note: verifier in BPF core must catch invalid
548 * instructions.
549 */
550 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
551 return -EINVAL;
552 }
553
554 imm64 = (u64)insn1.imm << 32 | (u32)imm;
555 emit_a64_mov_i64(dst, imm64, ctx);
556
557 return 1;
558 }
559
560 /* LDX: dst = *(size *)(src + off) */
561 case BPF_LDX | BPF_MEM | BPF_W:
562 case BPF_LDX | BPF_MEM | BPF_H:
563 case BPF_LDX | BPF_MEM | BPF_B:
564 case BPF_LDX | BPF_MEM | BPF_DW:
565 ctx->tmp_used = 1;
566 emit_a64_mov_i(1, tmp, off, ctx);
567 switch (BPF_SIZE(code)) {
568 case BPF_W:
569 emit(A64_LDR32(dst, src, tmp), ctx);
570 break;
571 case BPF_H:
572 emit(A64_LDRH(dst, src, tmp), ctx);
573 break;
574 case BPF_B:
575 emit(A64_LDRB(dst, src, tmp), ctx);
576 break;
577 case BPF_DW:
578 emit(A64_LDR64(dst, src, tmp), ctx);
579 break;
580 }
581 break;
582
583 /* ST: *(size *)(dst + off) = imm */
584 case BPF_ST | BPF_MEM | BPF_W:
585 case BPF_ST | BPF_MEM | BPF_H:
586 case BPF_ST | BPF_MEM | BPF_B:
587 case BPF_ST | BPF_MEM | BPF_DW:
588 /* Load imm to a register then store it */
589 ctx->tmp_used = 1;
590 emit_a64_mov_i(1, tmp2, off, ctx);
591 emit_a64_mov_i(1, tmp, imm, ctx);
592 switch (BPF_SIZE(code)) {
593 case BPF_W:
594 emit(A64_STR32(tmp, dst, tmp2), ctx);
595 break;
596 case BPF_H:
597 emit(A64_STRH(tmp, dst, tmp2), ctx);
598 break;
599 case BPF_B:
600 emit(A64_STRB(tmp, dst, tmp2), ctx);
601 break;
602 case BPF_DW:
603 emit(A64_STR64(tmp, dst, tmp2), ctx);
604 break;
605 }
606 break;
607
608 /* STX: *(size *)(dst + off) = src */
609 case BPF_STX | BPF_MEM | BPF_W:
610 case BPF_STX | BPF_MEM | BPF_H:
611 case BPF_STX | BPF_MEM | BPF_B:
612 case BPF_STX | BPF_MEM | BPF_DW:
613 ctx->tmp_used = 1;
614 emit_a64_mov_i(1, tmp, off, ctx);
615 switch (BPF_SIZE(code)) {
616 case BPF_W:
617 emit(A64_STR32(src, dst, tmp), ctx);
618 break;
619 case BPF_H:
620 emit(A64_STRH(src, dst, tmp), ctx);
621 break;
622 case BPF_B:
623 emit(A64_STRB(src, dst, tmp), ctx);
624 break;
625 case BPF_DW:
626 emit(A64_STR64(src, dst, tmp), ctx);
627 break;
628 }
629 break;
630 /* STX XADD: lock *(u32 *)(dst + off) += src */
631 case BPF_STX | BPF_XADD | BPF_W:
632 /* STX XADD: lock *(u64 *)(dst + off) += src */
633 case BPF_STX | BPF_XADD | BPF_DW:
634 goto notyet;
635
636 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
637 case BPF_LD | BPF_ABS | BPF_W:
638 case BPF_LD | BPF_ABS | BPF_H:
639 case BPF_LD | BPF_ABS | BPF_B:
640 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
641 case BPF_LD | BPF_IND | BPF_W:
642 case BPF_LD | BPF_IND | BPF_H:
643 case BPF_LD | BPF_IND | BPF_B:
644 {
645 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
646 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
647 const u8 fp = bpf2a64[BPF_REG_FP];
648 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
649 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
650 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
651 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
652 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
653 int size;
654
655 emit(A64_MOV(1, r1, r6), ctx);
656 emit_a64_mov_i(0, r2, imm, ctx);
657 if (BPF_MODE(code) == BPF_IND)
658 emit(A64_ADD(0, r2, r2, src), ctx);
659 switch (BPF_SIZE(code)) {
660 case BPF_W:
661 size = 4;
662 break;
663 case BPF_H:
664 size = 2;
665 break;
666 case BPF_B:
667 size = 1;
668 break;
669 default:
670 return -EINVAL;
671 }
672 emit_a64_mov_i64(r3, size, ctx);
673 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
674 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
675 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
676 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
677 emit(A64_BLR(r5), ctx);
678 emit(A64_MOV(1, r0, A64_R(0)), ctx);
679 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
680
681 jmp_offset = epilogue_offset(ctx);
682 check_imm19(jmp_offset);
683 emit(A64_CBZ(1, r0, jmp_offset), ctx);
684 emit(A64_MOV(1, r5, r0), ctx);
685 switch (BPF_SIZE(code)) {
686 case BPF_W:
687 emit(A64_LDR32(r0, r5, A64_ZR), ctx);
688#ifndef CONFIG_CPU_BIG_ENDIAN
689 emit(A64_REV32(0, r0, r0), ctx);
690#endif
691 break;
692 case BPF_H:
693 emit(A64_LDRH(r0, r5, A64_ZR), ctx);
694#ifndef CONFIG_CPU_BIG_ENDIAN
695 emit(A64_REV16(0, r0, r0), ctx);
696#endif
697 break;
698 case BPF_B:
699 emit(A64_LDRB(r0, r5, A64_ZR), ctx);
700 break;
701 }
702 break;
703 }
704notyet:
705 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
706 return -EFAULT;
707
708 default:
709 pr_err_once("unknown opcode %02x\n", code);
710 return -EINVAL;
711 }
712
713 return 0;
714}
715
716static int build_body(struct jit_ctx *ctx)
717{
718 const struct bpf_prog *prog = ctx->prog;
719 int i;
720
721 for (i = 0; i < prog->len; i++) {
722 const struct bpf_insn *insn = &prog->insnsi[i];
723 int ret;
724
725 ret = build_insn(insn, ctx);
726
727 if (ctx->image == NULL)
728 ctx->offset[i] = ctx->idx;
729
730 if (ret > 0) {
731 i++;
732 continue;
733 }
734 if (ret)
735 return ret;
736 }
737
738 return 0;
739}
740
741static int validate_code(struct jit_ctx *ctx)
742{
743 int i;
744
745 for (i = 0; i < ctx->idx; i++) {
746 u32 a64_insn = le32_to_cpu(ctx->image[i]);
747
748 if (a64_insn == AARCH64_BREAK_FAULT)
749 return -1;
750 }
751
752 return 0;
753}
754
755static inline void bpf_flush_icache(void *start, void *end)
756{
757 flush_icache_range((unsigned long)start, (unsigned long)end);
758}
759
760void bpf_jit_compile(struct bpf_prog *prog)
761{
762 /* Nothing to do here. We support Internal BPF. */
763}
764
765void bpf_int_jit_compile(struct bpf_prog *prog)
766{
767 struct bpf_binary_header *header;
768 struct jit_ctx ctx;
769 int image_size;
770 u8 *image_ptr;
771
772 if (!bpf_jit_enable)
773 return;
774
775 if (!prog || !prog->len)
776 return;
777
778 memset(&ctx, 0, sizeof(ctx));
779 ctx.prog = prog;
780
781 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
782 if (ctx.offset == NULL)
783 return;
784
785 /* 1. Initial fake pass to compute ctx->idx. */
786
787 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
788 if (build_body(&ctx))
789 goto out;
790
791 build_prologue(&ctx);
792
793 ctx.epilogue_offset = ctx.idx;
794 build_epilogue(&ctx);
795
796 /* Now we know the actual image size. */
797 image_size = sizeof(u32) * ctx.idx;
798 header = bpf_jit_binary_alloc(image_size, &image_ptr,
799 sizeof(u32), jit_fill_hole);
800 if (header == NULL)
801 goto out;
802
803 /* 2. Now, the actual pass. */
804
805 ctx.image = (u32 *)image_ptr;
806 ctx.idx = 0;
807
808 build_prologue(&ctx);
809
810 if (build_body(&ctx)) {
811 bpf_jit_binary_free(header);
812 goto out;
813 }
814
815 build_epilogue(&ctx);
816
817 /* 3. Extra pass to validate JITed code. */
818 if (validate_code(&ctx)) {
819 bpf_jit_binary_free(header);
820 goto out;
821 }
822
823 /* And we're done. */
824 if (bpf_jit_enable > 1)
825 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
826
827 bpf_flush_icache(header, ctx.image + ctx.idx);
828
829 set_memory_ro((unsigned long)header, header->pages);
830 prog->bpf_func = (void *)ctx.image;
831 prog->jited = 1;
832out:
833 kfree(ctx.offset);
834}
835
836void bpf_jit_free(struct bpf_prog *prog)
837{
838 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
839 struct bpf_binary_header *header = (void *)addr;
840
841 if (!prog->jited)
842 goto free_filter;
843
844 set_memory_rw(addr, header->pages);
845 bpf_jit_binary_free(header);
846
847free_filter:
848 bpf_prog_unlock_free(prog);
849}