Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Just-In-Time compiler for eBPF filters on 32bit ARM
4 *
5 * Copyright (c) 2023 Puranjay Mohan <puranjay12@gmail.com>
6 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
7 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
8 */
9
10#include <linux/bpf.h>
11#include <linux/bitops.h>
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/filter.h>
15#include <linux/netdevice.h>
16#include <linux/string.h>
17#include <linux/slab.h>
18#include <linux/if_vlan.h>
19#include <linux/math64.h>
20
21#include <asm/cacheflush.h>
22#include <asm/hwcap.h>
23#include <asm/opcodes.h>
24#include <asm/system_info.h>
25
26#include "bpf_jit_32.h"
27
28/*
29 * eBPF prog stack layout:
30 *
31 * high
32 * original ARM_SP => +-----+
33 * | | callee saved registers
34 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
35 * | ... | eBPF JIT scratch space
36 * eBPF fp register => +-----+
37 * (BPF_FP) | ... | eBPF prog stack
38 * +-----+
39 * |RSVD | JIT scratchpad
40 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
41 * | ... | caller-saved registers
42 * +-----+
43 * | ... | arguments passed on stack
44 * ARM_SP during call => +-----|
45 * | |
46 * | ... | Function call stack
47 * | |
48 * +-----+
49 * low
50 *
51 * The callee saved registers depends on whether frame pointers are enabled.
52 * With frame pointers (to be compliant with the ABI):
53 *
54 * high
55 * original ARM_SP => +--------------+ \
56 * | pc | |
57 * current ARM_FP => +--------------+ } callee saved registers
58 * |r4-r9,fp,ip,lr| |
59 * +--------------+ /
60 * low
61 *
62 * Without frame pointers:
63 *
64 * high
65 * original ARM_SP => +--------------+
66 * | r4-r9,fp,lr | callee saved registers
67 * current ARM_FP => +--------------+
68 * low
69 *
70 * When popping registers off the stack at the end of a BPF function, we
71 * reference them via the current ARM_FP register.
72 *
73 * Some eBPF operations are implemented via a call to a helper function.
74 * Such calls are "invisible" in the eBPF code, so it is up to the calling
75 * program to preserve any caller-saved ARM registers during the call. The
76 * JIT emits code to push and pop those registers onto the stack, immediately
77 * above the callee stack frame.
78 */
79#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
80 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
81 1 << ARM_FP)
82#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
83#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
84
85#define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
86
87enum {
88 /* Stack layout - these are offsets from (top of stack - 4) */
89 BPF_R2_HI,
90 BPF_R2_LO,
91 BPF_R3_HI,
92 BPF_R3_LO,
93 BPF_R4_HI,
94 BPF_R4_LO,
95 BPF_R5_HI,
96 BPF_R5_LO,
97 BPF_R7_HI,
98 BPF_R7_LO,
99 BPF_R8_HI,
100 BPF_R8_LO,
101 BPF_R9_HI,
102 BPF_R9_LO,
103 BPF_FP_HI,
104 BPF_FP_LO,
105 BPF_TC_HI,
106 BPF_TC_LO,
107 BPF_AX_HI,
108 BPF_AX_LO,
109 /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
110 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
111 * BPF_REG_FP and Tail call counts.
112 */
113 BPF_JIT_SCRATCH_REGS,
114};
115
116/*
117 * Negative "register" values indicate the register is stored on the stack
118 * and are the offset from the top of the eBPF JIT scratch space.
119 */
120#define STACK_OFFSET(k) (-4 - (k) * 4)
121#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4)
122
123#ifdef CONFIG_FRAME_POINTER
124#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
125#else
126#define EBPF_SCRATCH_TO_ARM_FP(x) (x)
127#endif
128
129#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
130#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
131#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
132
133#define FLAG_IMM_OVERFLOW (1 << 0)
134
135/*
136 * Map eBPF registers to ARM 32bit registers or stack scratch space.
137 *
138 * 1. First argument is passed using the arm 32bit registers and rest of the
139 * arguments are passed on stack scratch space.
140 * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
141 * arguments are mapped to scratch space on stack.
142 * 3. We need two 64 bit temp registers to do complex operations on eBPF
143 * registers.
144 *
145 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
146 * registers, we have to map each eBPF registers with two arm 32 bit regs or
147 * scratch memory space and we have to build eBPF 64 bit register from those.
148 *
149 */
150static const s8 bpf2a32[][2] = {
151 /* return value from in-kernel function, and exit value from eBPF */
152 [BPF_REG_0] = {ARM_R1, ARM_R0},
153 /* arguments from eBPF program to in-kernel function */
154 [BPF_REG_1] = {ARM_R3, ARM_R2},
155 /* Stored on stack scratch space */
156 [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
157 [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
158 [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
159 [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
160 /* callee saved registers that in-kernel function will preserve */
161 [BPF_REG_6] = {ARM_R5, ARM_R4},
162 /* Stored on stack scratch space */
163 [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
164 [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
165 [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
166 /* Read only Frame Pointer to access Stack */
167 [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
168 /* Temporary Register for BPF JIT, can be used
169 * for constant blindings and others.
170 */
171 [TMP_REG_1] = {ARM_R7, ARM_R6},
172 [TMP_REG_2] = {ARM_R9, ARM_R8},
173 /* Tail call count. Stored on stack scratch space. */
174 [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
175 /* temporary register for blinding constants.
176 * Stored on stack scratch space.
177 */
178 [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
179};
180
181#define dst_lo dst[1]
182#define dst_hi dst[0]
183#define src_lo src[1]
184#define src_hi src[0]
185
186/*
187 * JIT Context:
188 *
189 * prog : bpf_prog
190 * idx : index of current last JITed instruction.
191 * prologue_bytes : bytes used in prologue.
192 * epilogue_offset : offset of epilogue starting.
193 * offsets : array of eBPF instruction offsets in
194 * JITed code.
195 * target : final JITed code.
196 * epilogue_bytes : no of bytes used in epilogue.
197 * imm_count : no of immediate counts used for global
198 * variables.
199 * imms : array of global variable addresses.
200 */
201
202struct jit_ctx {
203 const struct bpf_prog *prog;
204 unsigned int idx;
205 unsigned int prologue_bytes;
206 unsigned int epilogue_offset;
207 unsigned int cpu_architecture;
208 u32 flags;
209 u32 *offsets;
210 u32 *target;
211 u32 stack_size;
212#if __LINUX_ARM_ARCH__ < 7
213 u16 epilogue_bytes;
214 u16 imm_count;
215 u32 *imms;
216#endif
217};
218
219/*
220 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
221 * (where the assembly routines like __aeabi_uidiv could cause problems).
222 */
223static u32 jit_udiv32(u32 dividend, u32 divisor)
224{
225 return dividend / divisor;
226}
227
228static u32 jit_mod32(u32 dividend, u32 divisor)
229{
230 return dividend % divisor;
231}
232
233static s32 jit_sdiv32(s32 dividend, s32 divisor)
234{
235 return dividend / divisor;
236}
237
238static s32 jit_smod32(s32 dividend, s32 divisor)
239{
240 return dividend % divisor;
241}
242
243/* Wrappers for 64-bit div/mod */
244static u64 jit_udiv64(u64 dividend, u64 divisor)
245{
246 return div64_u64(dividend, divisor);
247}
248
249static u64 jit_mod64(u64 dividend, u64 divisor)
250{
251 u64 rem;
252
253 div64_u64_rem(dividend, divisor, &rem);
254 return rem;
255}
256
257static s64 jit_sdiv64(s64 dividend, s64 divisor)
258{
259 return div64_s64(dividend, divisor);
260}
261
262static s64 jit_smod64(s64 dividend, s64 divisor)
263{
264 u64 q;
265
266 q = div64_s64(dividend, divisor);
267
268 return dividend - q * divisor;
269}
270
271static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
272{
273 inst |= (cond << 28);
274 inst = __opcode_to_mem_arm(inst);
275
276 if (ctx->target != NULL)
277 ctx->target[ctx->idx] = inst;
278
279 ctx->idx++;
280}
281
282/*
283 * Emit an instruction that will be executed unconditionally.
284 */
285static inline void emit(u32 inst, struct jit_ctx *ctx)
286{
287 _emit(ARM_COND_AL, inst, ctx);
288}
289
290/*
291 * This is rather horrid, but necessary to convert an integer constant
292 * to an immediate operand for the opcodes, and be able to detect at
293 * build time whether the constant can't be converted (iow, usable in
294 * BUILD_BUG_ON()).
295 */
296#define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
297#define const_imm8m(x) \
298 ({ int r; \
299 u32 v = (x); \
300 if (!(v & ~0x000000ff)) \
301 r = imm12val(v, 0); \
302 else if (!(v & ~0xc000003f)) \
303 r = imm12val(v, 2); \
304 else if (!(v & ~0xf000000f)) \
305 r = imm12val(v, 4); \
306 else if (!(v & ~0xfc000003)) \
307 r = imm12val(v, 6); \
308 else if (!(v & ~0xff000000)) \
309 r = imm12val(v, 8); \
310 else if (!(v & ~0x3fc00000)) \
311 r = imm12val(v, 10); \
312 else if (!(v & ~0x0ff00000)) \
313 r = imm12val(v, 12); \
314 else if (!(v & ~0x03fc0000)) \
315 r = imm12val(v, 14); \
316 else if (!(v & ~0x00ff0000)) \
317 r = imm12val(v, 16); \
318 else if (!(v & ~0x003fc000)) \
319 r = imm12val(v, 18); \
320 else if (!(v & ~0x000ff000)) \
321 r = imm12val(v, 20); \
322 else if (!(v & ~0x0003fc00)) \
323 r = imm12val(v, 22); \
324 else if (!(v & ~0x0000ff00)) \
325 r = imm12val(v, 24); \
326 else if (!(v & ~0x00003fc0)) \
327 r = imm12val(v, 26); \
328 else if (!(v & ~0x00000ff0)) \
329 r = imm12val(v, 28); \
330 else if (!(v & ~0x000003fc)) \
331 r = imm12val(v, 30); \
332 else \
333 r = -1; \
334 r; })
335
336/*
337 * Checks if immediate value can be converted to imm12(12 bits) value.
338 */
339static int imm8m(u32 x)
340{
341 u32 rot;
342
343 for (rot = 0; rot < 16; rot++)
344 if ((x & ~ror32(0xff, 2 * rot)) == 0)
345 return rol32(x, 2 * rot) | (rot << 8);
346 return -1;
347}
348
349#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
350
351static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
352{
353 op |= rt << 12 | rn << 16;
354 if (imm12 >= 0)
355 op |= ARM_INST_LDST__U;
356 else
357 imm12 = -imm12;
358 return op | (imm12 & ARM_INST_LDST__IMM12);
359}
360
361static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
362{
363 op |= rt << 12 | rn << 16;
364 if (imm8 >= 0)
365 op |= ARM_INST_LDST__U;
366 else
367 imm8 = -imm8;
368 return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
369}
370
371#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
372#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
373#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
374#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
375
376#define ARM_LDRSH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSH_I, rt, rn, off)
377#define ARM_LDRSB_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRSB_I, rt, rn, off)
378
379#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
380#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
381#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
382#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
383
384/*
385 * Initializes the JIT space with undefined instructions.
386 */
387static void jit_fill_hole(void *area, unsigned int size)
388{
389 u32 *ptr;
390 /* We are guaranteed to have aligned memory. */
391 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
392 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
393}
394
395#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
396/* EABI requires the stack to be aligned to 64-bit boundaries */
397#define STACK_ALIGNMENT 8
398#else
399/* Stack must be aligned to 32-bit boundaries */
400#define STACK_ALIGNMENT 4
401#endif
402
403/* total stack size used in JITed code */
404#define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE)
405#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
406
407#if __LINUX_ARM_ARCH__ < 7
408
409static u16 imm_offset(u32 k, struct jit_ctx *ctx)
410{
411 unsigned int i = 0, offset;
412 u16 imm;
413
414 /* on the "fake" run we just count them (duplicates included) */
415 if (ctx->target == NULL) {
416 ctx->imm_count++;
417 return 0;
418 }
419
420 while ((i < ctx->imm_count) && ctx->imms[i]) {
421 if (ctx->imms[i] == k)
422 break;
423 i++;
424 }
425
426 if (ctx->imms[i] == 0)
427 ctx->imms[i] = k;
428
429 /* constants go just after the epilogue */
430 offset = ctx->offsets[ctx->prog->len - 1] * 4;
431 offset += ctx->prologue_bytes;
432 offset += ctx->epilogue_bytes;
433 offset += i * 4;
434
435 ctx->target[offset / 4] = k;
436
437 /* PC in ARM mode == address of the instruction + 8 */
438 imm = offset - (8 + ctx->idx * 4);
439
440 if (imm & ~0xfff) {
441 /*
442 * literal pool is too far, signal it into flags. we
443 * can only detect it on the second pass unfortunately.
444 */
445 ctx->flags |= FLAG_IMM_OVERFLOW;
446 return 0;
447 }
448
449 return imm;
450}
451
452#endif /* __LINUX_ARM_ARCH__ */
453
454static inline int bpf2a32_offset(int bpf_to, int bpf_from,
455 const struct jit_ctx *ctx) {
456 int to, from;
457
458 if (ctx->target == NULL)
459 return 0;
460 to = ctx->offsets[bpf_to];
461 from = ctx->offsets[bpf_from];
462
463 return to - from - 1;
464}
465
466/*
467 * Move an immediate that's not an imm8m to a core register.
468 */
469static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
470{
471#if __LINUX_ARM_ARCH__ < 7
472 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
473#else
474 emit(ARM_MOVW(rd, val & 0xffff), ctx);
475 if (val > 0xffff)
476 emit(ARM_MOVT(rd, val >> 16), ctx);
477#endif
478}
479
480static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
481{
482 int imm12 = imm8m(val);
483
484 if (imm12 >= 0)
485 emit(ARM_MOV_I(rd, imm12), ctx);
486 else
487 emit_mov_i_no8m(rd, val, ctx);
488}
489
490static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
491{
492 if (elf_hwcap & HWCAP_THUMB)
493 emit(ARM_BX(tgt_reg), ctx);
494 else
495 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
496}
497
498static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
499{
500#if __LINUX_ARM_ARCH__ < 5
501 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
502 emit_bx_r(tgt_reg, ctx);
503#else
504 emit(ARM_BLX_R(tgt_reg), ctx);
505#endif
506}
507
508static inline int epilogue_offset(const struct jit_ctx *ctx)
509{
510 int to, from;
511 /* No need for 1st dummy run */
512 if (ctx->target == NULL)
513 return 0;
514 to = ctx->epilogue_offset;
515 from = ctx->idx;
516
517 return to - from - 2;
518}
519
520static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op, u8 sign)
521{
522 const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
523 const s8 *tmp = bpf2a32[TMP_REG_1];
524 u32 dst;
525
526#if __LINUX_ARM_ARCH__ == 7
527 if (elf_hwcap & HWCAP_IDIVA) {
528 if (op == BPF_DIV) {
529 emit(sign ? ARM_SDIV(rd, rm, rn) : ARM_UDIV(rd, rm, rn), ctx);
530 } else {
531 emit(sign ? ARM_SDIV(ARM_IP, rm, rn) : ARM_UDIV(ARM_IP, rm, rn), ctx);
532 emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
533 }
534 return;
535 }
536#endif
537
538 /*
539 * For BPF_ALU | BPF_DIV | BPF_K instructions
540 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
541 * function, we need to save it on caller side to save
542 * it from getting destroyed within callee.
543 * After the return from the callee, we restore ARM_R0
544 * ARM_R1.
545 */
546 if (rn != ARM_R1) {
547 emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
548 emit(ARM_MOV_R(ARM_R1, rn), ctx);
549 }
550 if (rm != ARM_R0) {
551 emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
552 emit(ARM_MOV_R(ARM_R0, rm), ctx);
553 }
554
555 /* Push caller-saved registers on stack */
556 emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
557
558 /* Call appropriate function */
559 if (sign) {
560 if (op == BPF_DIV)
561 dst = (u32)jit_sdiv32;
562 else
563 dst = (u32)jit_smod32;
564 } else {
565 if (op == BPF_DIV)
566 dst = (u32)jit_udiv32;
567 else
568 dst = (u32)jit_mod32;
569 }
570
571 emit_mov_i(ARM_IP, dst, ctx);
572 emit_blx_r(ARM_IP, ctx);
573
574 /* Restore caller-saved registers from stack */
575 emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
576
577 /* Save return value */
578 if (rd != ARM_R0)
579 emit(ARM_MOV_R(rd, ARM_R0), ctx);
580
581 /* Restore ARM_R0 and ARM_R1 */
582 if (rn != ARM_R1)
583 emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
584 if (rm != ARM_R0)
585 emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
586}
587
588static inline void emit_udivmod64(const s8 *rd, const s8 *rm, const s8 *rn, struct jit_ctx *ctx,
589 u8 op, u8 sign)
590{
591 u32 dst;
592
593 /* Push caller-saved registers on stack */
594 emit(ARM_PUSH(CALLER_MASK), ctx);
595
596 /*
597 * As we are implementing 64-bit div/mod as function calls, We need to put the dividend in
598 * R0-R1 and the divisor in R2-R3. As we have already pushed these registers on the stack,
599 * we can recover them later after returning from the function call.
600 */
601 if (rm[1] != ARM_R0 || rn[1] != ARM_R2) {
602 /*
603 * Move Rm to {R1, R0} if it is not already there.
604 */
605 if (rm[1] != ARM_R0) {
606 if (rn[1] == ARM_R0)
607 emit(ARM_PUSH(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
608 emit(ARM_MOV_R(ARM_R1, rm[0]), ctx);
609 emit(ARM_MOV_R(ARM_R0, rm[1]), ctx);
610 if (rn[1] == ARM_R0) {
611 emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
612 goto cont;
613 }
614 }
615 /*
616 * Move Rn to {R3, R2} if it is not already there.
617 */
618 if (rn[1] != ARM_R2) {
619 emit(ARM_MOV_R(ARM_R3, rn[0]), ctx);
620 emit(ARM_MOV_R(ARM_R2, rn[1]), ctx);
621 }
622 }
623
624cont:
625
626 /* Call appropriate function */
627 if (sign) {
628 if (op == BPF_DIV)
629 dst = (u32)jit_sdiv64;
630 else
631 dst = (u32)jit_smod64;
632 } else {
633 if (op == BPF_DIV)
634 dst = (u32)jit_udiv64;
635 else
636 dst = (u32)jit_mod64;
637 }
638
639 emit_mov_i(ARM_IP, dst, ctx);
640 emit_blx_r(ARM_IP, ctx);
641
642 /* Save return value */
643 if (rd[1] != ARM_R0) {
644 emit(ARM_MOV_R(rd[0], ARM_R1), ctx);
645 emit(ARM_MOV_R(rd[1], ARM_R0), ctx);
646 }
647
648 /* Recover {R3, R2} and {R1, R0} from stack if they are not Rd */
649 if (rd[1] != ARM_R0 && rd[1] != ARM_R2) {
650 emit(ARM_POP(CALLER_MASK), ctx);
651 } else if (rd[1] != ARM_R0) {
652 emit(ARM_POP(BIT(ARM_R0) | BIT(ARM_R1)), ctx);
653 emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
654 } else {
655 emit(ARM_ADD_I(ARM_SP, ARM_SP, 8), ctx);
656 emit(ARM_POP(BIT(ARM_R2) | BIT(ARM_R3)), ctx);
657 }
658}
659
660/* Is the translated BPF register on stack? */
661static bool is_stacked(s8 reg)
662{
663 return reg < 0;
664}
665
666/* If a BPF register is on the stack (stk is true), load it to the
667 * supplied temporary register and return the temporary register
668 * for subsequent operations, otherwise just use the CPU register.
669 */
670static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
671{
672 if (is_stacked(reg)) {
673 emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
674 reg = tmp;
675 }
676 return reg;
677}
678
679static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
680 struct jit_ctx *ctx)
681{
682 if (is_stacked(reg[1])) {
683 if (__LINUX_ARM_ARCH__ >= 6 ||
684 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
685 emit(ARM_LDRD_I(tmp[1], ARM_FP,
686 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
687 } else {
688 emit(ARM_LDR_I(tmp[1], ARM_FP,
689 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
690 emit(ARM_LDR_I(tmp[0], ARM_FP,
691 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
692 }
693 reg = tmp;
694 }
695 return reg;
696}
697
698/* If a BPF register is on the stack (stk is true), save the register
699 * back to the stack. If the source register is not the same, then
700 * move it into the correct register.
701 */
702static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
703{
704 if (is_stacked(reg))
705 emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
706 else if (reg != src)
707 emit(ARM_MOV_R(reg, src), ctx);
708}
709
710static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
711 struct jit_ctx *ctx)
712{
713 if (is_stacked(reg[1])) {
714 if (__LINUX_ARM_ARCH__ >= 6 ||
715 ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
716 emit(ARM_STRD_I(src[1], ARM_FP,
717 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
718 } else {
719 emit(ARM_STR_I(src[1], ARM_FP,
720 EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
721 emit(ARM_STR_I(src[0], ARM_FP,
722 EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
723 }
724 } else {
725 if (reg[1] != src[1])
726 emit(ARM_MOV_R(reg[1], src[1]), ctx);
727 if (reg[0] != src[0])
728 emit(ARM_MOV_R(reg[0], src[0]), ctx);
729 }
730}
731
732static inline void emit_a32_mov_i(const s8 dst, const u32 val,
733 struct jit_ctx *ctx)
734{
735 const s8 *tmp = bpf2a32[TMP_REG_1];
736
737 if (is_stacked(dst)) {
738 emit_mov_i(tmp[1], val, ctx);
739 arm_bpf_put_reg32(dst, tmp[1], ctx);
740 } else {
741 emit_mov_i(dst, val, ctx);
742 }
743}
744
745static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
746{
747 const s8 *tmp = bpf2a32[TMP_REG_1];
748 const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
749
750 emit_mov_i(rd[1], (u32)val, ctx);
751 emit_mov_i(rd[0], val >> 32, ctx);
752
753 arm_bpf_put_reg64(dst, rd, ctx);
754}
755
756/* Sign extended move */
757static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
758 const u32 val, struct jit_ctx *ctx) {
759 u64 val64 = val;
760
761 if (is64 && (val & (1<<31)))
762 val64 |= 0xffffffff00000000ULL;
763 emit_a32_mov_i64(dst, val64, ctx);
764}
765
766static inline void emit_a32_add_r(const u8 dst, const u8 src,
767 const bool is64, const bool hi,
768 struct jit_ctx *ctx) {
769 /* 64 bit :
770 * adds dst_lo, dst_lo, src_lo
771 * adc dst_hi, dst_hi, src_hi
772 * 32 bit :
773 * add dst_lo, dst_lo, src_lo
774 */
775 if (!hi && is64)
776 emit(ARM_ADDS_R(dst, dst, src), ctx);
777 else if (hi && is64)
778 emit(ARM_ADC_R(dst, dst, src), ctx);
779 else
780 emit(ARM_ADD_R(dst, dst, src), ctx);
781}
782
783static inline void emit_a32_sub_r(const u8 dst, const u8 src,
784 const bool is64, const bool hi,
785 struct jit_ctx *ctx) {
786 /* 64 bit :
787 * subs dst_lo, dst_lo, src_lo
788 * sbc dst_hi, dst_hi, src_hi
789 * 32 bit :
790 * sub dst_lo, dst_lo, src_lo
791 */
792 if (!hi && is64)
793 emit(ARM_SUBS_R(dst, dst, src), ctx);
794 else if (hi && is64)
795 emit(ARM_SBC_R(dst, dst, src), ctx);
796 else
797 emit(ARM_SUB_R(dst, dst, src), ctx);
798}
799
800static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
801 const bool hi, const u8 op, struct jit_ctx *ctx){
802 switch (BPF_OP(op)) {
803 /* dst = dst + src */
804 case BPF_ADD:
805 emit_a32_add_r(dst, src, is64, hi, ctx);
806 break;
807 /* dst = dst - src */
808 case BPF_SUB:
809 emit_a32_sub_r(dst, src, is64, hi, ctx);
810 break;
811 /* dst = dst | src */
812 case BPF_OR:
813 emit(ARM_ORR_R(dst, dst, src), ctx);
814 break;
815 /* dst = dst & src */
816 case BPF_AND:
817 emit(ARM_AND_R(dst, dst, src), ctx);
818 break;
819 /* dst = dst ^ src */
820 case BPF_XOR:
821 emit(ARM_EOR_R(dst, dst, src), ctx);
822 break;
823 /* dst = dst * src */
824 case BPF_MUL:
825 emit(ARM_MUL(dst, dst, src), ctx);
826 break;
827 /* dst = dst << src */
828 case BPF_LSH:
829 emit(ARM_LSL_R(dst, dst, src), ctx);
830 break;
831 /* dst = dst >> src */
832 case BPF_RSH:
833 emit(ARM_LSR_R(dst, dst, src), ctx);
834 break;
835 /* dst = dst >> src (signed)*/
836 case BPF_ARSH:
837 emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
838 break;
839 }
840}
841
842/* ALU operation (64 bit) */
843static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
844 const s8 src[], struct jit_ctx *ctx,
845 const u8 op) {
846 const s8 *tmp = bpf2a32[TMP_REG_1];
847 const s8 *tmp2 = bpf2a32[TMP_REG_2];
848 const s8 *rd;
849
850 rd = arm_bpf_get_reg64(dst, tmp, ctx);
851 if (is64) {
852 const s8 *rs;
853
854 rs = arm_bpf_get_reg64(src, tmp2, ctx);
855
856 /* ALU operation */
857 emit_alu_r(rd[1], rs[1], true, false, op, ctx);
858 emit_alu_r(rd[0], rs[0], true, true, op, ctx);
859 } else {
860 s8 rs;
861
862 rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
863
864 /* ALU operation */
865 emit_alu_r(rd[1], rs, true, false, op, ctx);
866 if (!ctx->prog->aux->verifier_zext)
867 emit_a32_mov_i(rd[0], 0, ctx);
868 }
869
870 arm_bpf_put_reg64(dst, rd, ctx);
871}
872
873/* dst = src (4 bytes)*/
874static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
875 const s8 *tmp = bpf2a32[TMP_REG_1];
876 s8 rt;
877
878 rt = arm_bpf_get_reg32(src, tmp[0], ctx);
879 arm_bpf_put_reg32(dst, rt, ctx);
880}
881
882/* dst = src */
883static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
884 const s8 src[],
885 struct jit_ctx *ctx) {
886 if (!is64) {
887 emit_a32_mov_r(dst_lo, src_lo, ctx);
888 if (!ctx->prog->aux->verifier_zext)
889 /* Zero out high 4 bytes */
890 emit_a32_mov_i(dst_hi, 0, ctx);
891 } else if (__LINUX_ARM_ARCH__ < 6 &&
892 ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
893 /* complete 8 byte move */
894 emit_a32_mov_r(dst_lo, src_lo, ctx);
895 emit_a32_mov_r(dst_hi, src_hi, ctx);
896 } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
897 const u8 *tmp = bpf2a32[TMP_REG_1];
898
899 emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
900 emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
901 } else if (is_stacked(src_lo)) {
902 emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
903 } else if (is_stacked(dst_lo)) {
904 emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
905 } else {
906 emit(ARM_MOV_R(dst[0], src[0]), ctx);
907 emit(ARM_MOV_R(dst[1], src[1]), ctx);
908 }
909}
910
911/* dst = (signed)src */
912static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
913 struct jit_ctx *ctx) {
914 const s8 *tmp = bpf2a32[TMP_REG_1];
915 s8 rs;
916 s8 rd;
917
918 if (is_stacked(dst_lo))
919 rd = tmp[1];
920 else
921 rd = dst_lo;
922 rs = arm_bpf_get_reg32(src_lo, rd, ctx);
923 /* rs may be one of src[1], dst[1], or tmp[1] */
924
925 /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
926 * extension only happens in the upper 64 bits.
927 */
928 if (off != 32) {
929 /* Sign extend rs into rd */
930 emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
931 emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
932 } else {
933 rd = rs;
934 }
935
936 /* Write rd to dst_lo
937 *
938 * Optimization:
939 * Assume:
940 * 1. dst == src and stacked.
941 * 2. off == 32
942 *
943 * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
944 * So, we don't need to write rd back to dst_lo as they have the same value.
945 * This saves us one str instruction.
946 */
947 if (dst_lo != src_lo || off != 32)
948 arm_bpf_put_reg32(dst_lo, rd, ctx);
949
950 if (!is64) {
951 if (!ctx->prog->aux->verifier_zext)
952 /* Zero out high 4 bytes */
953 emit_a32_mov_i(dst_hi, 0, ctx);
954 } else {
955 if (is_stacked(dst_hi)) {
956 emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
957 arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
958 } else {
959 emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
960 }
961 }
962}
963
964/* Shift operations */
965static inline void emit_a32_alu_i(const s8 dst, const u32 val,
966 struct jit_ctx *ctx, const u8 op) {
967 const s8 *tmp = bpf2a32[TMP_REG_1];
968 s8 rd;
969
970 rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
971
972 /* Do shift operation */
973 switch (op) {
974 case BPF_LSH:
975 emit(ARM_LSL_I(rd, rd, val), ctx);
976 break;
977 case BPF_RSH:
978 emit(ARM_LSR_I(rd, rd, val), ctx);
979 break;
980 case BPF_ARSH:
981 emit(ARM_ASR_I(rd, rd, val), ctx);
982 break;
983 case BPF_NEG:
984 emit(ARM_RSB_I(rd, rd, val), ctx);
985 break;
986 }
987
988 arm_bpf_put_reg32(dst, rd, ctx);
989}
990
991/* dst = ~dst (64 bit) */
992static inline void emit_a32_neg64(const s8 dst[],
993 struct jit_ctx *ctx){
994 const s8 *tmp = bpf2a32[TMP_REG_1];
995 const s8 *rd;
996
997 /* Setup Operand */
998 rd = arm_bpf_get_reg64(dst, tmp, ctx);
999
1000 /* Do Negate Operation */
1001 emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
1002 emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
1003
1004 arm_bpf_put_reg64(dst, rd, ctx);
1005}
1006
1007/* dst = dst << src */
1008static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
1009 struct jit_ctx *ctx) {
1010 const s8 *tmp = bpf2a32[TMP_REG_1];
1011 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1012 const s8 *rd;
1013 s8 rt;
1014
1015 /* Setup Operands */
1016 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1017 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1018
1019 /* Do LSH operation */
1020 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
1021 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
1022 emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
1023 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
1024 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
1025 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
1026
1027 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1028 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1029}
1030
1031/* dst = dst >> src (signed)*/
1032static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
1033 struct jit_ctx *ctx) {
1034 const s8 *tmp = bpf2a32[TMP_REG_1];
1035 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1036 const s8 *rd;
1037 s8 rt;
1038
1039 /* Setup Operands */
1040 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1041 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1042
1043 /* Do the ARSH operation */
1044 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
1045 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
1046 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
1047 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
1048 _emit(ARM_COND_PL,
1049 ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
1050 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
1051
1052 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1053 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1054}
1055
1056/* dst = dst >> src */
1057static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
1058 struct jit_ctx *ctx) {
1059 const s8 *tmp = bpf2a32[TMP_REG_1];
1060 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1061 const s8 *rd;
1062 s8 rt;
1063
1064 /* Setup Operands */
1065 rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1066 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1067
1068 /* Do RSH operation */
1069 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
1070 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
1071 emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
1072 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
1073 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
1074 emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
1075
1076 arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
1077 arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
1078}
1079
1080/* dst = dst << val */
1081static inline void emit_a32_lsh_i64(const s8 dst[],
1082 const u32 val, struct jit_ctx *ctx){
1083 const s8 *tmp = bpf2a32[TMP_REG_1];
1084 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1085 const s8 *rd;
1086
1087 /* Setup operands */
1088 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1089
1090 /* Do LSH operation */
1091 if (val < 32) {
1092 emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
1093 emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
1094 emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
1095 } else {
1096 if (val == 32)
1097 emit(ARM_MOV_R(rd[0], rd[1]), ctx);
1098 else
1099 emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
1100 emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
1101 }
1102
1103 arm_bpf_put_reg64(dst, rd, ctx);
1104}
1105
1106/* dst = dst >> val */
1107static inline void emit_a32_rsh_i64(const s8 dst[],
1108 const u32 val, struct jit_ctx *ctx) {
1109 const s8 *tmp = bpf2a32[TMP_REG_1];
1110 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1111 const s8 *rd;
1112
1113 /* Setup operands */
1114 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1115
1116 /* Do LSR operation */
1117 if (val == 0) {
1118 /* An immediate value of 0 encodes a shift amount of 32
1119 * for LSR. To shift by 0, don't do anything.
1120 */
1121 } else if (val < 32) {
1122 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
1123 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
1124 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
1125 } else if (val == 32) {
1126 emit(ARM_MOV_R(rd[1], rd[0]), ctx);
1127 emit(ARM_MOV_I(rd[0], 0), ctx);
1128 } else {
1129 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
1130 emit(ARM_MOV_I(rd[0], 0), ctx);
1131 }
1132
1133 arm_bpf_put_reg64(dst, rd, ctx);
1134}
1135
1136/* dst = dst >> val (signed) */
1137static inline void emit_a32_arsh_i64(const s8 dst[],
1138 const u32 val, struct jit_ctx *ctx){
1139 const s8 *tmp = bpf2a32[TMP_REG_1];
1140 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1141 const s8 *rd;
1142
1143 /* Setup operands */
1144 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1145
1146 /* Do ARSH operation */
1147 if (val == 0) {
1148 /* An immediate value of 0 encodes a shift amount of 32
1149 * for ASR. To shift by 0, don't do anything.
1150 */
1151 } else if (val < 32) {
1152 emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
1153 emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
1154 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
1155 } else if (val == 32) {
1156 emit(ARM_MOV_R(rd[1], rd[0]), ctx);
1157 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
1158 } else {
1159 emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
1160 emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
1161 }
1162
1163 arm_bpf_put_reg64(dst, rd, ctx);
1164}
1165
1166static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
1167 struct jit_ctx *ctx) {
1168 const s8 *tmp = bpf2a32[TMP_REG_1];
1169 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1170 const s8 *rd, *rt;
1171
1172 /* Setup operands for multiplication */
1173 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1174 rt = arm_bpf_get_reg64(src, tmp2, ctx);
1175
1176 /* Do Multiplication */
1177 emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
1178 emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
1179 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
1180
1181 emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
1182 emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
1183
1184 arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
1185 arm_bpf_put_reg32(dst_hi, rd[0], ctx);
1186}
1187
1188static bool is_ldst_imm(s16 off, const u8 size)
1189{
1190 s16 off_max = 0;
1191
1192 switch (size) {
1193 case BPF_B:
1194 case BPF_W:
1195 off_max = 0xfff;
1196 break;
1197 case BPF_H:
1198 off_max = 0xff;
1199 break;
1200 case BPF_DW:
1201 /* Need to make sure off+4 does not overflow. */
1202 off_max = 0xfff - 4;
1203 break;
1204 }
1205 return -off_max <= off && off <= off_max;
1206}
1207
1208static bool is_ldst_imm8(s16 off, const u8 size)
1209{
1210 s16 off_max = 0;
1211
1212 switch (size) {
1213 case BPF_B:
1214 off_max = 0xff;
1215 break;
1216 case BPF_W:
1217 off_max = 0xfff;
1218 break;
1219 case BPF_H:
1220 off_max = 0xff;
1221 break;
1222 }
1223 return -off_max <= off && off <= off_max;
1224}
1225
1226/* *(size *)(dst + off) = src */
1227static inline void emit_str_r(const s8 dst, const s8 src[],
1228 s16 off, struct jit_ctx *ctx, const u8 sz){
1229 const s8 *tmp = bpf2a32[TMP_REG_1];
1230 s8 rd;
1231
1232 rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1233
1234 if (!is_ldst_imm(off, sz)) {
1235 emit_a32_mov_i(tmp[0], off, ctx);
1236 emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1237 rd = tmp[0];
1238 off = 0;
1239 }
1240 switch (sz) {
1241 case BPF_B:
1242 /* Store a Byte */
1243 emit(ARM_STRB_I(src_lo, rd, off), ctx);
1244 break;
1245 case BPF_H:
1246 /* Store a HalfWord */
1247 emit(ARM_STRH_I(src_lo, rd, off), ctx);
1248 break;
1249 case BPF_W:
1250 /* Store a Word */
1251 emit(ARM_STR_I(src_lo, rd, off), ctx);
1252 break;
1253 case BPF_DW:
1254 /* Store a Double Word */
1255 emit(ARM_STR_I(src_lo, rd, off), ctx);
1256 emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1257 break;
1258 }
1259}
1260
1261/* dst = *(size*)(src + off) */
1262static inline void emit_ldx_r(const s8 dst[], const s8 src,
1263 s16 off, struct jit_ctx *ctx, const u8 sz){
1264 const s8 *tmp = bpf2a32[TMP_REG_1];
1265 const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1266 s8 rm = src;
1267
1268 if (!is_ldst_imm(off, sz)) {
1269 emit_a32_mov_i(tmp[0], off, ctx);
1270 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1271 rm = tmp[0];
1272 off = 0;
1273 } else if (rd[1] == rm) {
1274 emit(ARM_MOV_R(tmp[0], rm), ctx);
1275 rm = tmp[0];
1276 }
1277 switch (sz) {
1278 case BPF_B:
1279 /* Load a Byte */
1280 emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1281 if (!ctx->prog->aux->verifier_zext)
1282 emit_a32_mov_i(rd[0], 0, ctx);
1283 break;
1284 case BPF_H:
1285 /* Load a HalfWord */
1286 emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1287 if (!ctx->prog->aux->verifier_zext)
1288 emit_a32_mov_i(rd[0], 0, ctx);
1289 break;
1290 case BPF_W:
1291 /* Load a Word */
1292 emit(ARM_LDR_I(rd[1], rm, off), ctx);
1293 if (!ctx->prog->aux->verifier_zext)
1294 emit_a32_mov_i(rd[0], 0, ctx);
1295 break;
1296 case BPF_DW:
1297 /* Load a Double Word */
1298 emit(ARM_LDR_I(rd[1], rm, off), ctx);
1299 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1300 break;
1301 }
1302 arm_bpf_put_reg64(dst, rd, ctx);
1303}
1304
1305/* dst = *(signed size*)(src + off) */
1306static inline void emit_ldsx_r(const s8 dst[], const s8 src,
1307 s16 off, struct jit_ctx *ctx, const u8 sz){
1308 const s8 *tmp = bpf2a32[TMP_REG_1];
1309 const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1310 s8 rm = src;
1311 int add_off;
1312
1313 if (!is_ldst_imm8(off, sz)) {
1314 /*
1315 * offset does not fit in the load/store immediate,
1316 * construct an ADD instruction to apply the offset.
1317 */
1318 add_off = imm8m(off);
1319 if (add_off > 0) {
1320 emit(ARM_ADD_I(tmp[0], src, add_off), ctx);
1321 rm = tmp[0];
1322 } else {
1323 emit_a32_mov_i(tmp[0], off, ctx);
1324 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1325 rm = tmp[0];
1326 }
1327 off = 0;
1328 }
1329
1330 switch (sz) {
1331 case BPF_B:
1332 /* Load a Byte with sign extension*/
1333 emit(ARM_LDRSB_I(rd[1], rm, off), ctx);
1334 break;
1335 case BPF_H:
1336 /* Load a HalfWord with sign extension*/
1337 emit(ARM_LDRSH_I(rd[1], rm, off), ctx);
1338 break;
1339 case BPF_W:
1340 /* Load a Word*/
1341 emit(ARM_LDR_I(rd[1], rm, off), ctx);
1342 break;
1343 }
1344 /* Carry the sign extension to upper 32 bits */
1345 emit(ARM_ASR_I(rd[0], rd[1], 31), ctx);
1346 arm_bpf_put_reg64(dst, rd, ctx);
1347}
1348
1349/* Arithmatic Operation */
1350static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1351 const u8 rn, struct jit_ctx *ctx, u8 op,
1352 bool is_jmp64) {
1353 switch (op) {
1354 case BPF_JSET:
1355 if (is_jmp64) {
1356 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1357 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1358 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1359 } else {
1360 emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1361 }
1362 break;
1363 case BPF_JEQ:
1364 case BPF_JNE:
1365 case BPF_JGT:
1366 case BPF_JGE:
1367 case BPF_JLE:
1368 case BPF_JLT:
1369 if (is_jmp64) {
1370 emit(ARM_CMP_R(rd, rm), ctx);
1371 /* Only compare low halve if high halve are equal. */
1372 _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1373 } else {
1374 emit(ARM_CMP_R(rt, rn), ctx);
1375 }
1376 break;
1377 case BPF_JSLE:
1378 case BPF_JSGT:
1379 emit(ARM_CMP_R(rn, rt), ctx);
1380 if (is_jmp64)
1381 emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1382 break;
1383 case BPF_JSLT:
1384 case BPF_JSGE:
1385 emit(ARM_CMP_R(rt, rn), ctx);
1386 if (is_jmp64)
1387 emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1388 break;
1389 }
1390}
1391
1392static int out_offset = -1; /* initialized on the first pass of build_body() */
1393static int emit_bpf_tail_call(struct jit_ctx *ctx)
1394{
1395
1396 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1397 const s8 *r2 = bpf2a32[BPF_REG_2];
1398 const s8 *r3 = bpf2a32[BPF_REG_3];
1399 const s8 *tmp = bpf2a32[TMP_REG_1];
1400 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1401 const s8 *tcc = bpf2a32[TCALL_CNT];
1402 const s8 *tc;
1403 const int idx0 = ctx->idx;
1404#define cur_offset (ctx->idx - idx0)
1405#define jmp_offset (out_offset - (cur_offset) - 2)
1406 u32 lo, hi;
1407 s8 r_array, r_index;
1408 int off;
1409
1410 /* if (index >= array->map.max_entries)
1411 * goto out;
1412 */
1413 BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1414 ARM_INST_LDST__IMM12);
1415 off = offsetof(struct bpf_array, map.max_entries);
1416 r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1417 /* index is 32-bit for arrays */
1418 r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1419 /* array->map.max_entries */
1420 emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1421 /* index >= array->map.max_entries */
1422 emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1423 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1424
1425 /* tmp2[0] = array, tmp2[1] = index */
1426
1427 /*
1428 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
1429 * goto out;
1430 * tail_call_cnt++;
1431 */
1432 lo = (u32)MAX_TAIL_CALL_CNT;
1433 hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1434 tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1435 emit(ARM_CMP_I(tc[0], hi), ctx);
1436 _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1437 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1438 emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1439 emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1440 arm_bpf_put_reg64(tcc, tmp, ctx);
1441
1442 /* prog = array->ptrs[index]
1443 * if (prog == NULL)
1444 * goto out;
1445 */
1446 BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1447 off = imm8m(offsetof(struct bpf_array, ptrs));
1448 emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1449 emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1450 emit(ARM_CMP_I(tmp[1], 0), ctx);
1451 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1452
1453 /* goto *(prog->bpf_func + prologue_size); */
1454 BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1455 ARM_INST_LDST__IMM12);
1456 off = offsetof(struct bpf_prog, bpf_func);
1457 emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1458 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1459 emit_bx_r(tmp[1], ctx);
1460
1461 /* out: */
1462 if (out_offset == -1)
1463 out_offset = cur_offset;
1464 if (cur_offset != out_offset) {
1465 pr_err_once("tail_call out_offset = %d, expected %d!\n",
1466 cur_offset, out_offset);
1467 return -1;
1468 }
1469 return 0;
1470#undef cur_offset
1471#undef jmp_offset
1472}
1473
1474/* 0xabcd => 0xcdab */
1475static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1476{
1477#if __LINUX_ARM_ARCH__ < 6
1478 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1479
1480 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1481 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1482 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1483 emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1484#else /* ARMv6+ */
1485 emit(ARM_REV16(rd, rn), ctx);
1486#endif
1487}
1488
1489/* 0xabcdefgh => 0xghefcdab */
1490static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1491{
1492#if __LINUX_ARM_ARCH__ < 6
1493 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1494
1495 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1496 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1497 emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1498
1499 emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1500 emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1501 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1502 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1503 emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1504 emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1505 emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1506
1507#else /* ARMv6+ */
1508 emit(ARM_REV(rd, rn), ctx);
1509#endif
1510}
1511
1512// push the scratch stack register on top of the stack
1513static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1514{
1515 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1516 const s8 *rt;
1517 u16 reg_set = 0;
1518
1519 rt = arm_bpf_get_reg64(src, tmp2, ctx);
1520
1521 reg_set = (1 << rt[1]) | (1 << rt[0]);
1522 emit(ARM_PUSH(reg_set), ctx);
1523}
1524
1525static void build_prologue(struct jit_ctx *ctx)
1526{
1527 const s8 arm_r0 = bpf2a32[BPF_REG_0][1];
1528 const s8 *bpf_r1 = bpf2a32[BPF_REG_1];
1529 const s8 *bpf_fp = bpf2a32[BPF_REG_FP];
1530 const s8 *tcc = bpf2a32[TCALL_CNT];
1531
1532 /* Save callee saved registers. */
1533#ifdef CONFIG_FRAME_POINTER
1534 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1535 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1536 emit(ARM_PUSH(reg_set), ctx);
1537 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1538#else
1539 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1540 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1541#endif
1542 /* mov r3, #0 */
1543 /* sub r2, sp, #SCRATCH_SIZE */
1544 emit(ARM_MOV_I(bpf_r1[0], 0), ctx);
1545 emit(ARM_SUB_I(bpf_r1[1], ARM_SP, SCRATCH_SIZE), ctx);
1546
1547 ctx->stack_size = imm8m(STACK_SIZE);
1548
1549 /* Set up function call stack */
1550 emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1551
1552 /* Set up BPF prog stack base register */
1553 emit_a32_mov_r64(true, bpf_fp, bpf_r1, ctx);
1554
1555 /* Initialize Tail Count */
1556 emit(ARM_MOV_I(bpf_r1[1], 0), ctx);
1557 emit_a32_mov_r64(true, tcc, bpf_r1, ctx);
1558
1559 /* Move BPF_CTX to BPF_R1 */
1560 emit(ARM_MOV_R(bpf_r1[1], arm_r0), ctx);
1561
1562 /* end of prologue */
1563}
1564
1565/* restore callee saved registers. */
1566static void build_epilogue(struct jit_ctx *ctx)
1567{
1568#ifdef CONFIG_FRAME_POINTER
1569 /* When using frame pointers, some additional registers need to
1570 * be loaded. */
1571 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1572 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1573 emit(ARM_LDM(ARM_SP, reg_set), ctx);
1574#else
1575 /* Restore callee saved registers. */
1576 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1577 emit(ARM_POP(CALLEE_POP_MASK), ctx);
1578#endif
1579}
1580
1581/*
1582 * Convert an eBPF instruction to native instruction, i.e
1583 * JITs an eBPF instruction.
1584 * Returns :
1585 * 0 - Successfully JITed an 8-byte eBPF instruction
1586 * >0 - Successfully JITed a 16-byte eBPF instruction
1587 * <0 - Failed to JIT.
1588 */
1589static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1590{
1591 const u8 code = insn->code;
1592 const s8 *dst = bpf2a32[insn->dst_reg];
1593 const s8 *src = bpf2a32[insn->src_reg];
1594 const s8 *tmp = bpf2a32[TMP_REG_1];
1595 const s8 *tmp2 = bpf2a32[TMP_REG_2];
1596 const s16 off = insn->off;
1597 const s32 imm = insn->imm;
1598 const int i = insn - ctx->prog->insnsi;
1599 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1600 const s8 *rd, *rs;
1601 s8 rd_lo, rt, rm, rn;
1602 s32 jmp_offset;
1603
1604#define check_imm(bits, imm) do { \
1605 if ((imm) >= (1 << ((bits) - 1)) || \
1606 (imm) < -(1 << ((bits) - 1))) { \
1607 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
1608 i, imm, imm); \
1609 return -EINVAL; \
1610 } \
1611} while (0)
1612#define check_imm24(imm) check_imm(24, imm)
1613
1614 switch (code) {
1615 /* ALU operations */
1616
1617 /* dst = src */
1618 case BPF_ALU | BPF_MOV | BPF_K:
1619 case BPF_ALU | BPF_MOV | BPF_X:
1620 case BPF_ALU64 | BPF_MOV | BPF_K:
1621 case BPF_ALU64 | BPF_MOV | BPF_X:
1622 switch (BPF_SRC(code)) {
1623 case BPF_X:
1624 if (imm == 1) {
1625 /* Special mov32 for zext */
1626 emit_a32_mov_i(dst_hi, 0, ctx);
1627 break;
1628 }
1629 if (insn->off)
1630 emit_a32_movsx_r64(is64, insn->off, dst, src, ctx);
1631 else
1632 emit_a32_mov_r64(is64, dst, src, ctx);
1633 break;
1634 case BPF_K:
1635 /* Sign-extend immediate value to destination reg */
1636 emit_a32_mov_se_i64(is64, dst, imm, ctx);
1637 break;
1638 }
1639 break;
1640 /* dst = dst + src/imm */
1641 /* dst = dst - src/imm */
1642 /* dst = dst | src/imm */
1643 /* dst = dst & src/imm */
1644 /* dst = dst ^ src/imm */
1645 /* dst = dst * src/imm */
1646 /* dst = dst << src */
1647 /* dst = dst >> src */
1648 case BPF_ALU | BPF_ADD | BPF_K:
1649 case BPF_ALU | BPF_ADD | BPF_X:
1650 case BPF_ALU | BPF_SUB | BPF_K:
1651 case BPF_ALU | BPF_SUB | BPF_X:
1652 case BPF_ALU | BPF_OR | BPF_K:
1653 case BPF_ALU | BPF_OR | BPF_X:
1654 case BPF_ALU | BPF_AND | BPF_K:
1655 case BPF_ALU | BPF_AND | BPF_X:
1656 case BPF_ALU | BPF_XOR | BPF_K:
1657 case BPF_ALU | BPF_XOR | BPF_X:
1658 case BPF_ALU | BPF_MUL | BPF_K:
1659 case BPF_ALU | BPF_MUL | BPF_X:
1660 case BPF_ALU | BPF_LSH | BPF_X:
1661 case BPF_ALU | BPF_RSH | BPF_X:
1662 case BPF_ALU | BPF_ARSH | BPF_X:
1663 case BPF_ALU64 | BPF_ADD | BPF_K:
1664 case BPF_ALU64 | BPF_ADD | BPF_X:
1665 case BPF_ALU64 | BPF_SUB | BPF_K:
1666 case BPF_ALU64 | BPF_SUB | BPF_X:
1667 case BPF_ALU64 | BPF_OR | BPF_K:
1668 case BPF_ALU64 | BPF_OR | BPF_X:
1669 case BPF_ALU64 | BPF_AND | BPF_K:
1670 case BPF_ALU64 | BPF_AND | BPF_X:
1671 case BPF_ALU64 | BPF_XOR | BPF_K:
1672 case BPF_ALU64 | BPF_XOR | BPF_X:
1673 switch (BPF_SRC(code)) {
1674 case BPF_X:
1675 emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1676 break;
1677 case BPF_K:
1678 /* Move immediate value to the temporary register
1679 * and then do the ALU operation on the temporary
1680 * register as this will sign-extend the immediate
1681 * value into temporary reg and then it would be
1682 * safe to do the operation on it.
1683 */
1684 emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1685 emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
1686 break;
1687 }
1688 break;
1689 /* dst = dst / src(imm) */
1690 /* dst = dst % src(imm) */
1691 case BPF_ALU | BPF_DIV | BPF_K:
1692 case BPF_ALU | BPF_DIV | BPF_X:
1693 case BPF_ALU | BPF_MOD | BPF_K:
1694 case BPF_ALU | BPF_MOD | BPF_X:
1695 rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1696 switch (BPF_SRC(code)) {
1697 case BPF_X:
1698 rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1699 break;
1700 case BPF_K:
1701 rt = tmp2[0];
1702 emit_a32_mov_i(rt, imm, ctx);
1703 break;
1704 default:
1705 rt = src_lo;
1706 break;
1707 }
1708 emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code), off);
1709 arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1710 if (!ctx->prog->aux->verifier_zext)
1711 emit_a32_mov_i(dst_hi, 0, ctx);
1712 break;
1713 case BPF_ALU64 | BPF_DIV | BPF_K:
1714 case BPF_ALU64 | BPF_DIV | BPF_X:
1715 case BPF_ALU64 | BPF_MOD | BPF_K:
1716 case BPF_ALU64 | BPF_MOD | BPF_X:
1717 rd = arm_bpf_get_reg64(dst, tmp2, ctx);
1718 switch (BPF_SRC(code)) {
1719 case BPF_X:
1720 rs = arm_bpf_get_reg64(src, tmp, ctx);
1721 break;
1722 case BPF_K:
1723 rs = tmp;
1724 emit_a32_mov_se_i64(is64, rs, imm, ctx);
1725 break;
1726 }
1727 emit_udivmod64(rd, rd, rs, ctx, BPF_OP(code), off);
1728 arm_bpf_put_reg64(dst, rd, ctx);
1729 break;
1730 /* dst = dst << imm */
1731 /* dst = dst >> imm */
1732 /* dst = dst >> imm (signed) */
1733 case BPF_ALU | BPF_LSH | BPF_K:
1734 case BPF_ALU | BPF_RSH | BPF_K:
1735 case BPF_ALU | BPF_ARSH | BPF_K:
1736 if (unlikely(imm > 31))
1737 return -EINVAL;
1738 if (imm)
1739 emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1740 if (!ctx->prog->aux->verifier_zext)
1741 emit_a32_mov_i(dst_hi, 0, ctx);
1742 break;
1743 /* dst = dst << imm */
1744 case BPF_ALU64 | BPF_LSH | BPF_K:
1745 if (unlikely(imm > 63))
1746 return -EINVAL;
1747 emit_a32_lsh_i64(dst, imm, ctx);
1748 break;
1749 /* dst = dst >> imm */
1750 case BPF_ALU64 | BPF_RSH | BPF_K:
1751 if (unlikely(imm > 63))
1752 return -EINVAL;
1753 emit_a32_rsh_i64(dst, imm, ctx);
1754 break;
1755 /* dst = dst << src */
1756 case BPF_ALU64 | BPF_LSH | BPF_X:
1757 emit_a32_lsh_r64(dst, src, ctx);
1758 break;
1759 /* dst = dst >> src */
1760 case BPF_ALU64 | BPF_RSH | BPF_X:
1761 emit_a32_rsh_r64(dst, src, ctx);
1762 break;
1763 /* dst = dst >> src (signed) */
1764 case BPF_ALU64 | BPF_ARSH | BPF_X:
1765 emit_a32_arsh_r64(dst, src, ctx);
1766 break;
1767 /* dst = dst >> imm (signed) */
1768 case BPF_ALU64 | BPF_ARSH | BPF_K:
1769 if (unlikely(imm > 63))
1770 return -EINVAL;
1771 emit_a32_arsh_i64(dst, imm, ctx);
1772 break;
1773 /* dst = ~dst */
1774 case BPF_ALU | BPF_NEG:
1775 emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1776 if (!ctx->prog->aux->verifier_zext)
1777 emit_a32_mov_i(dst_hi, 0, ctx);
1778 break;
1779 /* dst = ~dst (64 bit) */
1780 case BPF_ALU64 | BPF_NEG:
1781 emit_a32_neg64(dst, ctx);
1782 break;
1783 /* dst = dst * src/imm */
1784 case BPF_ALU64 | BPF_MUL | BPF_X:
1785 case BPF_ALU64 | BPF_MUL | BPF_K:
1786 switch (BPF_SRC(code)) {
1787 case BPF_X:
1788 emit_a32_mul_r64(dst, src, ctx);
1789 break;
1790 case BPF_K:
1791 /* Move immediate value to the temporary register
1792 * and then do the multiplication on it as this
1793 * will sign-extend the immediate value into temp
1794 * reg then it would be safe to do the operation
1795 * on it.
1796 */
1797 emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1798 emit_a32_mul_r64(dst, tmp2, ctx);
1799 break;
1800 }
1801 break;
1802 /* dst = htole(dst) */
1803 /* dst = htobe(dst) */
1804 case BPF_ALU | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
1805 case BPF_ALU | BPF_END | BPF_FROM_BE: /* also BPF_TO_BE */
1806 /* dst = bswap(dst) */
1807 case BPF_ALU64 | BPF_END | BPF_FROM_LE: /* also BPF_TO_LE */
1808 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1809 if (BPF_SRC(code) == BPF_FROM_LE && BPF_CLASS(code) != BPF_ALU64)
1810 goto emit_bswap_uxt;
1811 switch (imm) {
1812 case 16:
1813 emit_rev16(rd[1], rd[1], ctx);
1814 goto emit_bswap_uxt;
1815 case 32:
1816 emit_rev32(rd[1], rd[1], ctx);
1817 goto emit_bswap_uxt;
1818 case 64:
1819 emit_rev32(ARM_LR, rd[1], ctx);
1820 emit_rev32(rd[1], rd[0], ctx);
1821 emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
1822 break;
1823 }
1824 goto exit;
1825emit_bswap_uxt:
1826 switch (imm) {
1827 case 16:
1828 /* zero-extend 16 bits into 64 bits */
1829#if __LINUX_ARM_ARCH__ < 6
1830 emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1831 emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1832#else /* ARMv6+ */
1833 emit(ARM_UXTH(rd[1], rd[1]), ctx);
1834#endif
1835 if (!ctx->prog->aux->verifier_zext)
1836 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1837 break;
1838 case 32:
1839 /* zero-extend 32 bits into 64 bits */
1840 if (!ctx->prog->aux->verifier_zext)
1841 emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1842 break;
1843 case 64:
1844 /* nop */
1845 break;
1846 }
1847exit:
1848 arm_bpf_put_reg64(dst, rd, ctx);
1849 break;
1850 /* dst = imm64 */
1851 case BPF_LD | BPF_IMM | BPF_DW:
1852 {
1853 u64 val = (u32)imm | (u64)insn[1].imm << 32;
1854
1855 emit_a32_mov_i64(dst, val, ctx);
1856
1857 return 1;
1858 }
1859 /* LDX: dst = *(size *)(src + off) */
1860 case BPF_LDX | BPF_MEM | BPF_W:
1861 case BPF_LDX | BPF_MEM | BPF_H:
1862 case BPF_LDX | BPF_MEM | BPF_B:
1863 case BPF_LDX | BPF_MEM | BPF_DW:
1864 /* LDSX: dst = *(signed size *)(src + off) */
1865 case BPF_LDX | BPF_MEMSX | BPF_B:
1866 case BPF_LDX | BPF_MEMSX | BPF_H:
1867 case BPF_LDX | BPF_MEMSX | BPF_W:
1868 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1869 if (BPF_MODE(insn->code) == BPF_MEMSX)
1870 emit_ldsx_r(dst, rn, off, ctx, BPF_SIZE(code));
1871 else
1872 emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1873 break;
1874 /* speculation barrier */
1875 case BPF_ST | BPF_NOSPEC:
1876 break;
1877 /* ST: *(size *)(dst + off) = imm */
1878 case BPF_ST | BPF_MEM | BPF_W:
1879 case BPF_ST | BPF_MEM | BPF_H:
1880 case BPF_ST | BPF_MEM | BPF_B:
1881 case BPF_ST | BPF_MEM | BPF_DW:
1882 switch (BPF_SIZE(code)) {
1883 case BPF_DW:
1884 /* Sign-extend immediate value into temp reg */
1885 emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1886 break;
1887 case BPF_W:
1888 case BPF_H:
1889 case BPF_B:
1890 emit_a32_mov_i(tmp2[1], imm, ctx);
1891 break;
1892 }
1893 emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1894 break;
1895 /* Atomic ops */
1896 case BPF_STX | BPF_ATOMIC | BPF_W:
1897 case BPF_STX | BPF_ATOMIC | BPF_DW:
1898 goto notyet;
1899 /* STX: *(size *)(dst + off) = src */
1900 case BPF_STX | BPF_MEM | BPF_W:
1901 case BPF_STX | BPF_MEM | BPF_H:
1902 case BPF_STX | BPF_MEM | BPF_B:
1903 case BPF_STX | BPF_MEM | BPF_DW:
1904 rs = arm_bpf_get_reg64(src, tmp2, ctx);
1905 emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1906 break;
1907 /* PC += off if dst == src */
1908 /* PC += off if dst > src */
1909 /* PC += off if dst >= src */
1910 /* PC += off if dst < src */
1911 /* PC += off if dst <= src */
1912 /* PC += off if dst != src */
1913 /* PC += off if dst > src (signed) */
1914 /* PC += off if dst >= src (signed) */
1915 /* PC += off if dst < src (signed) */
1916 /* PC += off if dst <= src (signed) */
1917 /* PC += off if dst & src */
1918 case BPF_JMP | BPF_JEQ | BPF_X:
1919 case BPF_JMP | BPF_JGT | BPF_X:
1920 case BPF_JMP | BPF_JGE | BPF_X:
1921 case BPF_JMP | BPF_JNE | BPF_X:
1922 case BPF_JMP | BPF_JSGT | BPF_X:
1923 case BPF_JMP | BPF_JSGE | BPF_X:
1924 case BPF_JMP | BPF_JSET | BPF_X:
1925 case BPF_JMP | BPF_JLE | BPF_X:
1926 case BPF_JMP | BPF_JLT | BPF_X:
1927 case BPF_JMP | BPF_JSLT | BPF_X:
1928 case BPF_JMP | BPF_JSLE | BPF_X:
1929 case BPF_JMP32 | BPF_JEQ | BPF_X:
1930 case BPF_JMP32 | BPF_JGT | BPF_X:
1931 case BPF_JMP32 | BPF_JGE | BPF_X:
1932 case BPF_JMP32 | BPF_JNE | BPF_X:
1933 case BPF_JMP32 | BPF_JSGT | BPF_X:
1934 case BPF_JMP32 | BPF_JSGE | BPF_X:
1935 case BPF_JMP32 | BPF_JSET | BPF_X:
1936 case BPF_JMP32 | BPF_JLE | BPF_X:
1937 case BPF_JMP32 | BPF_JLT | BPF_X:
1938 case BPF_JMP32 | BPF_JSLT | BPF_X:
1939 case BPF_JMP32 | BPF_JSLE | BPF_X:
1940 /* Setup source registers */
1941 rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1942 rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1943 goto go_jmp;
1944 /* PC += off if dst == imm */
1945 /* PC += off if dst > imm */
1946 /* PC += off if dst >= imm */
1947 /* PC += off if dst < imm */
1948 /* PC += off if dst <= imm */
1949 /* PC += off if dst != imm */
1950 /* PC += off if dst > imm (signed) */
1951 /* PC += off if dst >= imm (signed) */
1952 /* PC += off if dst < imm (signed) */
1953 /* PC += off if dst <= imm (signed) */
1954 /* PC += off if dst & imm */
1955 case BPF_JMP | BPF_JEQ | BPF_K:
1956 case BPF_JMP | BPF_JGT | BPF_K:
1957 case BPF_JMP | BPF_JGE | BPF_K:
1958 case BPF_JMP | BPF_JNE | BPF_K:
1959 case BPF_JMP | BPF_JSGT | BPF_K:
1960 case BPF_JMP | BPF_JSGE | BPF_K:
1961 case BPF_JMP | BPF_JSET | BPF_K:
1962 case BPF_JMP | BPF_JLT | BPF_K:
1963 case BPF_JMP | BPF_JLE | BPF_K:
1964 case BPF_JMP | BPF_JSLT | BPF_K:
1965 case BPF_JMP | BPF_JSLE | BPF_K:
1966 case BPF_JMP32 | BPF_JEQ | BPF_K:
1967 case BPF_JMP32 | BPF_JGT | BPF_K:
1968 case BPF_JMP32 | BPF_JGE | BPF_K:
1969 case BPF_JMP32 | BPF_JNE | BPF_K:
1970 case BPF_JMP32 | BPF_JSGT | BPF_K:
1971 case BPF_JMP32 | BPF_JSGE | BPF_K:
1972 case BPF_JMP32 | BPF_JSET | BPF_K:
1973 case BPF_JMP32 | BPF_JLT | BPF_K:
1974 case BPF_JMP32 | BPF_JLE | BPF_K:
1975 case BPF_JMP32 | BPF_JSLT | BPF_K:
1976 case BPF_JMP32 | BPF_JSLE | BPF_K:
1977 if (off == 0)
1978 break;
1979 rm = tmp2[0];
1980 rn = tmp2[1];
1981 /* Sign-extend immediate value */
1982 emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1983go_jmp:
1984 /* Setup destination register */
1985 rd = arm_bpf_get_reg64(dst, tmp, ctx);
1986
1987 /* Check for the condition */
1988 emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1989 BPF_CLASS(code) == BPF_JMP);
1990
1991 /* Setup JUMP instruction */
1992 jmp_offset = bpf2a32_offset(i+off, i, ctx);
1993 switch (BPF_OP(code)) {
1994 case BPF_JNE:
1995 case BPF_JSET:
1996 _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1997 break;
1998 case BPF_JEQ:
1999 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
2000 break;
2001 case BPF_JGT:
2002 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
2003 break;
2004 case BPF_JGE:
2005 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
2006 break;
2007 case BPF_JSGT:
2008 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
2009 break;
2010 case BPF_JSGE:
2011 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
2012 break;
2013 case BPF_JLE:
2014 _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
2015 break;
2016 case BPF_JLT:
2017 _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
2018 break;
2019 case BPF_JSLT:
2020 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
2021 break;
2022 case BPF_JSLE:
2023 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
2024 break;
2025 }
2026 break;
2027 /* JMP OFF */
2028 case BPF_JMP | BPF_JA:
2029 case BPF_JMP32 | BPF_JA:
2030 {
2031 if (BPF_CLASS(code) == BPF_JMP32 && imm != 0)
2032 jmp_offset = bpf2a32_offset(i + imm, i, ctx);
2033 else if (BPF_CLASS(code) == BPF_JMP && off != 0)
2034 jmp_offset = bpf2a32_offset(i + off, i, ctx);
2035 else
2036 break;
2037
2038 check_imm24(jmp_offset);
2039 emit(ARM_B(jmp_offset), ctx);
2040 break;
2041 }
2042 /* tail call */
2043 case BPF_JMP | BPF_TAIL_CALL:
2044 if (emit_bpf_tail_call(ctx))
2045 return -EFAULT;
2046 break;
2047 /* function call */
2048 case BPF_JMP | BPF_CALL:
2049 {
2050 const s8 *r0 = bpf2a32[BPF_REG_0];
2051 const s8 *r1 = bpf2a32[BPF_REG_1];
2052 const s8 *r2 = bpf2a32[BPF_REG_2];
2053 const s8 *r3 = bpf2a32[BPF_REG_3];
2054 const s8 *r4 = bpf2a32[BPF_REG_4];
2055 const s8 *r5 = bpf2a32[BPF_REG_5];
2056 const u32 func = (u32)__bpf_call_base + (u32)imm;
2057
2058 emit_a32_mov_r64(true, r0, r1, ctx);
2059 emit_a32_mov_r64(true, r1, r2, ctx);
2060 emit_push_r64(r5, ctx);
2061 emit_push_r64(r4, ctx);
2062 emit_push_r64(r3, ctx);
2063
2064 emit_a32_mov_i(tmp[1], func, ctx);
2065 emit_blx_r(tmp[1], ctx);
2066
2067 emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
2068 break;
2069 }
2070 /* function return */
2071 case BPF_JMP | BPF_EXIT:
2072 /* Optimization: when last instruction is EXIT
2073 * simply fallthrough to epilogue.
2074 */
2075 if (i == ctx->prog->len - 1)
2076 break;
2077 jmp_offset = epilogue_offset(ctx);
2078 check_imm24(jmp_offset);
2079 emit(ARM_B(jmp_offset), ctx);
2080 break;
2081notyet:
2082 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
2083 return -EFAULT;
2084 default:
2085 pr_err_once("unknown opcode %02x\n", code);
2086 return -EINVAL;
2087 }
2088
2089 if (ctx->flags & FLAG_IMM_OVERFLOW)
2090 /*
2091 * this instruction generated an overflow when
2092 * trying to access the literal pool, so
2093 * delegate this filter to the kernel interpreter.
2094 */
2095 return -1;
2096 return 0;
2097}
2098
2099static int build_body(struct jit_ctx *ctx)
2100{
2101 const struct bpf_prog *prog = ctx->prog;
2102 unsigned int i;
2103
2104 for (i = 0; i < prog->len; i++) {
2105 const struct bpf_insn *insn = &(prog->insnsi[i]);
2106 int ret;
2107
2108 ret = build_insn(insn, ctx);
2109
2110 /* It's used with loading the 64 bit immediate value. */
2111 if (ret > 0) {
2112 i++;
2113 if (ctx->target == NULL)
2114 ctx->offsets[i] = ctx->idx;
2115 continue;
2116 }
2117
2118 if (ctx->target == NULL)
2119 ctx->offsets[i] = ctx->idx;
2120
2121 /* If unsuccesful, return with error code */
2122 if (ret)
2123 return ret;
2124 }
2125 return 0;
2126}
2127
2128static int validate_code(struct jit_ctx *ctx)
2129{
2130 int i;
2131
2132 for (i = 0; i < ctx->idx; i++) {
2133 if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
2134 return -1;
2135 }
2136
2137 return 0;
2138}
2139
2140bool bpf_jit_needs_zext(void)
2141{
2142 return true;
2143}
2144
2145struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2146{
2147 struct bpf_prog *tmp, *orig_prog = prog;
2148 struct bpf_binary_header *header;
2149 bool tmp_blinded = false;
2150 struct jit_ctx ctx;
2151 unsigned int tmp_idx;
2152 unsigned int image_size;
2153 u8 *image_ptr;
2154
2155 /* If BPF JIT was not enabled then we must fall back to
2156 * the interpreter.
2157 */
2158 if (!prog->jit_requested)
2159 return orig_prog;
2160
2161 /* If constant blinding was enabled and we failed during blinding
2162 * then we must fall back to the interpreter. Otherwise, we save
2163 * the new JITed code.
2164 */
2165 tmp = bpf_jit_blind_constants(prog);
2166
2167 if (IS_ERR(tmp))
2168 return orig_prog;
2169 if (tmp != prog) {
2170 tmp_blinded = true;
2171 prog = tmp;
2172 }
2173
2174 memset(&ctx, 0, sizeof(ctx));
2175 ctx.prog = prog;
2176 ctx.cpu_architecture = cpu_architecture();
2177
2178 /* Not able to allocate memory for offsets[] , then
2179 * we must fall back to the interpreter
2180 */
2181 ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
2182 if (ctx.offsets == NULL) {
2183 prog = orig_prog;
2184 goto out;
2185 }
2186
2187 /* 1) fake pass to find in the length of the JITed code,
2188 * to compute ctx->offsets and other context variables
2189 * needed to compute final JITed code.
2190 * Also, calculate random starting pointer/start of JITed code
2191 * which is prefixed by random number of fault instructions.
2192 *
2193 * If the first pass fails then there is no chance of it
2194 * being successful in the second pass, so just fall back
2195 * to the interpreter.
2196 */
2197 if (build_body(&ctx)) {
2198 prog = orig_prog;
2199 goto out_off;
2200 }
2201
2202 tmp_idx = ctx.idx;
2203 build_prologue(&ctx);
2204 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
2205
2206 ctx.epilogue_offset = ctx.idx;
2207
2208#if __LINUX_ARM_ARCH__ < 7
2209 tmp_idx = ctx.idx;
2210 build_epilogue(&ctx);
2211 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
2212
2213 ctx.idx += ctx.imm_count;
2214 if (ctx.imm_count) {
2215 ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
2216 if (ctx.imms == NULL) {
2217 prog = orig_prog;
2218 goto out_off;
2219 }
2220 }
2221#else
2222 /* there's nothing about the epilogue on ARMv7 */
2223 build_epilogue(&ctx);
2224#endif
2225 /* Now we can get the actual image size of the JITed arm code.
2226 * Currently, we are not considering the THUMB-2 instructions
2227 * for jit, although it can decrease the size of the image.
2228 *
2229 * As each arm instruction is of length 32bit, we are translating
2230 * number of JITed instructions into the size required to store these
2231 * JITed code.
2232 */
2233 image_size = sizeof(u32) * ctx.idx;
2234
2235 /* Now we know the size of the structure to make */
2236 header = bpf_jit_binary_alloc(image_size, &image_ptr,
2237 sizeof(u32), jit_fill_hole);
2238 /* Not able to allocate memory for the structure then
2239 * we must fall back to the interpretation
2240 */
2241 if (header == NULL) {
2242 prog = orig_prog;
2243 goto out_imms;
2244 }
2245
2246 /* 2.) Actual pass to generate final JIT code */
2247 ctx.target = (u32 *) image_ptr;
2248 ctx.idx = 0;
2249
2250 build_prologue(&ctx);
2251
2252 /* If building the body of the JITed code fails somehow,
2253 * we fall back to the interpretation.
2254 */
2255 if (build_body(&ctx) < 0)
2256 goto out_free;
2257 build_epilogue(&ctx);
2258
2259 /* 3.) Extra pass to validate JITed Code */
2260 if (validate_code(&ctx))
2261 goto out_free;
2262 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
2263
2264 if (bpf_jit_enable > 1)
2265 /* there are 2 passes here */
2266 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
2267
2268 if (bpf_jit_binary_lock_ro(header))
2269 goto out_free;
2270 prog->bpf_func = (void *)ctx.target;
2271 prog->jited = 1;
2272 prog->jited_len = image_size;
2273
2274out_imms:
2275#if __LINUX_ARM_ARCH__ < 7
2276 if (ctx.imm_count)
2277 kfree(ctx.imms);
2278#endif
2279out_off:
2280 kfree(ctx.offsets);
2281out:
2282 if (tmp_blinded)
2283 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2284 tmp : orig_prog);
2285 return prog;
2286
2287out_free:
2288 image_ptr = NULL;
2289 bpf_jit_binary_free(header);
2290 prog = orig_prog;
2291 goto out_imms;
2292}
2293
1/*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
3 *
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/bitops.h>
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/filter.h>
15#include <linux/netdevice.h>
16#include <linux/string.h>
17#include <linux/slab.h>
18#include <linux/if_vlan.h>
19
20#include <asm/cacheflush.h>
21#include <asm/hwcap.h>
22#include <asm/opcodes.h>
23
24#include "bpf_jit_32.h"
25
26/*
27 * ABI:
28 *
29 * r0 scratch register
30 * r4 BPF register A
31 * r5 BPF register X
32 * r6 pointer to the skb
33 * r7 skb->data
34 * r8 skb_headlen(skb)
35 */
36
37#define r_scratch ARM_R0
38/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
39#define r_off ARM_R1
40#define r_A ARM_R4
41#define r_X ARM_R5
42#define r_skb ARM_R6
43#define r_skb_data ARM_R7
44#define r_skb_hl ARM_R8
45
46#define SCRATCH_SP_OFFSET 0
47#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
48
49#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50#define SEEN_MEM_WORD(k) (1 << (k))
51#define SEEN_X (1 << BPF_MEMWORDS)
52#define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53#define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55
56#define FLAG_NEED_X_RESET (1 << 0)
57#define FLAG_IMM_OVERFLOW (1 << 1)
58
59struct jit_ctx {
60 const struct bpf_prog *skf;
61 unsigned idx;
62 unsigned prologue_bytes;
63 int ret0_fp_idx;
64 u32 seen;
65 u32 flags;
66 u32 *offsets;
67 u32 *target;
68#if __LINUX_ARM_ARCH__ < 7
69 u16 epilogue_bytes;
70 u16 imm_count;
71 u32 *imms;
72#endif
73};
74
75int bpf_jit_enable __read_mostly;
76
77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
89{
90 u8 ret;
91 int err;
92
93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
97
98 return (u64)err << 32 | ret;
99}
100
101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
102{
103 u16 ret;
104 int err;
105
106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
110
111 return (u64)err << 32 | ntohs(ret);
112}
113
114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
115{
116 u32 ret;
117 int err;
118
119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
123
124 return (u64)err << 32 | ntohl(ret);
125}
126
127/*
128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
129 * (where the assembly routines like __aeabi_uidiv could cause problems).
130 */
131static u32 jit_udiv(u32 dividend, u32 divisor)
132{
133 return dividend / divisor;
134}
135
136static u32 jit_mod(u32 dividend, u32 divisor)
137{
138 return dividend % divisor;
139}
140
141static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
142{
143 inst |= (cond << 28);
144 inst = __opcode_to_mem_arm(inst);
145
146 if (ctx->target != NULL)
147 ctx->target[ctx->idx] = inst;
148
149 ctx->idx++;
150}
151
152/*
153 * Emit an instruction that will be executed unconditionally.
154 */
155static inline void emit(u32 inst, struct jit_ctx *ctx)
156{
157 _emit(ARM_COND_AL, inst, ctx);
158}
159
160static u16 saved_regs(struct jit_ctx *ctx)
161{
162 u16 ret = 0;
163
164 if ((ctx->skf->len > 1) ||
165 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
166 ret |= 1 << r_A;
167
168#ifdef CONFIG_FRAME_POINTER
169 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
170#else
171 if (ctx->seen & SEEN_CALL)
172 ret |= 1 << ARM_LR;
173#endif
174 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
175 ret |= 1 << r_skb;
176 if (ctx->seen & SEEN_DATA)
177 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
178 if (ctx->seen & SEEN_X)
179 ret |= 1 << r_X;
180
181 return ret;
182}
183
184static inline int mem_words_used(struct jit_ctx *ctx)
185{
186 /* yes, we do waste some stack space IF there are "holes" in the set" */
187 return fls(ctx->seen & SEEN_MEM);
188}
189
190static void jit_fill_hole(void *area, unsigned int size)
191{
192 u32 *ptr;
193 /* We are guaranteed to have aligned memory. */
194 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
195 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
196}
197
198static void build_prologue(struct jit_ctx *ctx)
199{
200 u16 reg_set = saved_regs(ctx);
201 u16 off;
202
203#ifdef CONFIG_FRAME_POINTER
204 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
205 emit(ARM_PUSH(reg_set), ctx);
206 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
207#else
208 if (reg_set)
209 emit(ARM_PUSH(reg_set), ctx);
210#endif
211
212 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
213 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
214
215 if (ctx->seen & SEEN_DATA) {
216 off = offsetof(struct sk_buff, data);
217 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
218 /* headlen = len - data_len */
219 off = offsetof(struct sk_buff, len);
220 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
221 off = offsetof(struct sk_buff, data_len);
222 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
223 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
224 }
225
226 if (ctx->flags & FLAG_NEED_X_RESET)
227 emit(ARM_MOV_I(r_X, 0), ctx);
228
229 /* do not leak kernel data to userspace */
230 if (bpf_needs_clear_a(&ctx->skf->insns[0]))
231 emit(ARM_MOV_I(r_A, 0), ctx);
232
233 /* stack space for the BPF_MEM words */
234 if (ctx->seen & SEEN_MEM)
235 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
236}
237
238static void build_epilogue(struct jit_ctx *ctx)
239{
240 u16 reg_set = saved_regs(ctx);
241
242 if (ctx->seen & SEEN_MEM)
243 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
244
245 reg_set &= ~(1 << ARM_LR);
246
247#ifdef CONFIG_FRAME_POINTER
248 /* the first instruction of the prologue was: mov ip, sp */
249 reg_set &= ~(1 << ARM_IP);
250 reg_set |= (1 << ARM_SP);
251 emit(ARM_LDM(ARM_SP, reg_set), ctx);
252#else
253 if (reg_set) {
254 if (ctx->seen & SEEN_CALL)
255 reg_set |= 1 << ARM_PC;
256 emit(ARM_POP(reg_set), ctx);
257 }
258
259 if (!(ctx->seen & SEEN_CALL))
260 emit(ARM_BX(ARM_LR), ctx);
261#endif
262}
263
264static int16_t imm8m(u32 x)
265{
266 u32 rot;
267
268 for (rot = 0; rot < 16; rot++)
269 if ((x & ~ror32(0xff, 2 * rot)) == 0)
270 return rol32(x, 2 * rot) | (rot << 8);
271
272 return -1;
273}
274
275#if __LINUX_ARM_ARCH__ < 7
276
277static u16 imm_offset(u32 k, struct jit_ctx *ctx)
278{
279 unsigned i = 0, offset;
280 u16 imm;
281
282 /* on the "fake" run we just count them (duplicates included) */
283 if (ctx->target == NULL) {
284 ctx->imm_count++;
285 return 0;
286 }
287
288 while ((i < ctx->imm_count) && ctx->imms[i]) {
289 if (ctx->imms[i] == k)
290 break;
291 i++;
292 }
293
294 if (ctx->imms[i] == 0)
295 ctx->imms[i] = k;
296
297 /* constants go just after the epilogue */
298 offset = ctx->offsets[ctx->skf->len];
299 offset += ctx->prologue_bytes;
300 offset += ctx->epilogue_bytes;
301 offset += i * 4;
302
303 ctx->target[offset / 4] = k;
304
305 /* PC in ARM mode == address of the instruction + 8 */
306 imm = offset - (8 + ctx->idx * 4);
307
308 if (imm & ~0xfff) {
309 /*
310 * literal pool is too far, signal it into flags. we
311 * can only detect it on the second pass unfortunately.
312 */
313 ctx->flags |= FLAG_IMM_OVERFLOW;
314 return 0;
315 }
316
317 return imm;
318}
319
320#endif /* __LINUX_ARM_ARCH__ */
321
322/*
323 * Move an immediate that's not an imm8m to a core register.
324 */
325static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
326{
327#if __LINUX_ARM_ARCH__ < 7
328 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
329#else
330 emit(ARM_MOVW(rd, val & 0xffff), ctx);
331 if (val > 0xffff)
332 emit(ARM_MOVT(rd, val >> 16), ctx);
333#endif
334}
335
336static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
337{
338 int imm12 = imm8m(val);
339
340 if (imm12 >= 0)
341 emit(ARM_MOV_I(rd, imm12), ctx);
342 else
343 emit_mov_i_no8m(rd, val, ctx);
344}
345
346#if __LINUX_ARM_ARCH__ < 6
347
348static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
349{
350 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
351 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
352 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
353 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
354 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
355 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
356 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
357 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
358}
359
360static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
361{
362 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
363 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
364 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
365}
366
367static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
368{
369 /* r_dst = (r_src << 8) | (r_src >> 8) */
370 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
371 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
372
373 /*
374 * we need to mask out the bits set in r_dst[23:16] due to
375 * the first shift instruction.
376 *
377 * note that 0x8ff is the encoded immediate 0x00ff0000.
378 */
379 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
380}
381
382#else /* ARMv6+ */
383
384static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
385{
386 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
387#ifdef __LITTLE_ENDIAN
388 _emit(cond, ARM_REV(r_res, r_res), ctx);
389#endif
390}
391
392static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
393{
394 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
395#ifdef __LITTLE_ENDIAN
396 _emit(cond, ARM_REV16(r_res, r_res), ctx);
397#endif
398}
399
400static inline void emit_swap16(u8 r_dst __maybe_unused,
401 u8 r_src __maybe_unused,
402 struct jit_ctx *ctx __maybe_unused)
403{
404#ifdef __LITTLE_ENDIAN
405 emit(ARM_REV16(r_dst, r_src), ctx);
406#endif
407}
408
409#endif /* __LINUX_ARM_ARCH__ < 6 */
410
411
412/* Compute the immediate value for a PC-relative branch. */
413static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
414{
415 u32 imm;
416
417 if (ctx->target == NULL)
418 return 0;
419 /*
420 * BPF allows only forward jumps and the offset of the target is
421 * still the one computed during the first pass.
422 */
423 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
424
425 return imm >> 2;
426}
427
428#define OP_IMM3(op, r1, r2, imm_val, ctx) \
429 do { \
430 imm12 = imm8m(imm_val); \
431 if (imm12 < 0) { \
432 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
433 emit(op ## _R((r1), (r2), r_scratch), ctx); \
434 } else { \
435 emit(op ## _I((r1), (r2), imm12), ctx); \
436 } \
437 } while (0)
438
439static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
440{
441 if (ctx->ret0_fp_idx >= 0) {
442 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
443 /* NOP to keep the size constant between passes */
444 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
445 } else {
446 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
447 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
448 }
449}
450
451static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
452{
453#if __LINUX_ARM_ARCH__ < 5
454 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
455
456 if (elf_hwcap & HWCAP_THUMB)
457 emit(ARM_BX(tgt_reg), ctx);
458 else
459 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
460#else
461 emit(ARM_BLX_R(tgt_reg), ctx);
462#endif
463}
464
465static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
466 int bpf_op)
467{
468#if __LINUX_ARM_ARCH__ == 7
469 if (elf_hwcap & HWCAP_IDIVA) {
470 if (bpf_op == BPF_DIV)
471 emit(ARM_UDIV(rd, rm, rn), ctx);
472 else {
473 emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
474 emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
475 }
476 return;
477 }
478#endif
479
480 /*
481 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
482 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
483 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
484 * before using it as a source for ARM_R1.
485 *
486 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
487 * ARM_R5 (r_X) so there is no particular register overlap
488 * issues.
489 */
490 if (rn != ARM_R1)
491 emit(ARM_MOV_R(ARM_R1, rn), ctx);
492 if (rm != ARM_R0)
493 emit(ARM_MOV_R(ARM_R0, rm), ctx);
494
495 ctx->seen |= SEEN_CALL;
496 emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
497 ctx);
498 emit_blx_r(ARM_R3, ctx);
499
500 if (rd != ARM_R0)
501 emit(ARM_MOV_R(rd, ARM_R0), ctx);
502}
503
504static inline void update_on_xread(struct jit_ctx *ctx)
505{
506 if (!(ctx->seen & SEEN_X))
507 ctx->flags |= FLAG_NEED_X_RESET;
508
509 ctx->seen |= SEEN_X;
510}
511
512static int build_body(struct jit_ctx *ctx)
513{
514 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
515 const struct bpf_prog *prog = ctx->skf;
516 const struct sock_filter *inst;
517 unsigned i, load_order, off, condt;
518 int imm12;
519 u32 k;
520
521 for (i = 0; i < prog->len; i++) {
522 u16 code;
523
524 inst = &(prog->insns[i]);
525 /* K as an immediate value operand */
526 k = inst->k;
527 code = bpf_anc_helper(inst);
528
529 /* compute offsets only in the fake pass */
530 if (ctx->target == NULL)
531 ctx->offsets[i] = ctx->idx * 4;
532
533 switch (code) {
534 case BPF_LD | BPF_IMM:
535 emit_mov_i(r_A, k, ctx);
536 break;
537 case BPF_LD | BPF_W | BPF_LEN:
538 ctx->seen |= SEEN_SKB;
539 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
540 emit(ARM_LDR_I(r_A, r_skb,
541 offsetof(struct sk_buff, len)), ctx);
542 break;
543 case BPF_LD | BPF_MEM:
544 /* A = scratch[k] */
545 ctx->seen |= SEEN_MEM_WORD(k);
546 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
547 break;
548 case BPF_LD | BPF_W | BPF_ABS:
549 load_order = 2;
550 goto load;
551 case BPF_LD | BPF_H | BPF_ABS:
552 load_order = 1;
553 goto load;
554 case BPF_LD | BPF_B | BPF_ABS:
555 load_order = 0;
556load:
557 emit_mov_i(r_off, k, ctx);
558load_common:
559 ctx->seen |= SEEN_DATA | SEEN_CALL;
560
561 if (load_order > 0) {
562 emit(ARM_SUB_I(r_scratch, r_skb_hl,
563 1 << load_order), ctx);
564 emit(ARM_CMP_R(r_scratch, r_off), ctx);
565 condt = ARM_COND_GE;
566 } else {
567 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
568 condt = ARM_COND_HI;
569 }
570
571 /*
572 * test for negative offset, only if we are
573 * currently scheduled to take the fast
574 * path. this will update the flags so that
575 * the slowpath instruction are ignored if the
576 * offset is negative.
577 *
578 * for loard_order == 0 the HI condition will
579 * make loads at offset 0 take the slow path too.
580 */
581 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
582
583 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
584 ctx);
585
586 if (load_order == 0)
587 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
588 ctx);
589 else if (load_order == 1)
590 emit_load_be16(condt, r_A, r_scratch, ctx);
591 else if (load_order == 2)
592 emit_load_be32(condt, r_A, r_scratch, ctx);
593
594 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
595
596 /* the slowpath */
597 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
598 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
599 /* the offset is already in R1 */
600 emit_blx_r(ARM_R3, ctx);
601 /* check the result of skb_copy_bits */
602 emit(ARM_CMP_I(ARM_R1, 0), ctx);
603 emit_err_ret(ARM_COND_NE, ctx);
604 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
605 break;
606 case BPF_LD | BPF_W | BPF_IND:
607 load_order = 2;
608 goto load_ind;
609 case BPF_LD | BPF_H | BPF_IND:
610 load_order = 1;
611 goto load_ind;
612 case BPF_LD | BPF_B | BPF_IND:
613 load_order = 0;
614load_ind:
615 update_on_xread(ctx);
616 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
617 goto load_common;
618 case BPF_LDX | BPF_IMM:
619 ctx->seen |= SEEN_X;
620 emit_mov_i(r_X, k, ctx);
621 break;
622 case BPF_LDX | BPF_W | BPF_LEN:
623 ctx->seen |= SEEN_X | SEEN_SKB;
624 emit(ARM_LDR_I(r_X, r_skb,
625 offsetof(struct sk_buff, len)), ctx);
626 break;
627 case BPF_LDX | BPF_MEM:
628 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
629 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
630 break;
631 case BPF_LDX | BPF_B | BPF_MSH:
632 /* x = ((*(frame + k)) & 0xf) << 2; */
633 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
634 /* the interpreter should deal with the negative K */
635 if ((int)k < 0)
636 return -1;
637 /* offset in r1: we might have to take the slow path */
638 emit_mov_i(r_off, k, ctx);
639 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
640
641 /* load in r0: common with the slowpath */
642 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
643 ARM_R1), ctx);
644 /*
645 * emit_mov_i() might generate one or two instructions,
646 * the same holds for emit_blx_r()
647 */
648 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
649
650 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
651 /* r_off is r1 */
652 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
653 emit_blx_r(ARM_R3, ctx);
654 /* check the return value of skb_copy_bits */
655 emit(ARM_CMP_I(ARM_R1, 0), ctx);
656 emit_err_ret(ARM_COND_NE, ctx);
657
658 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
659 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
660 break;
661 case BPF_ST:
662 ctx->seen |= SEEN_MEM_WORD(k);
663 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
664 break;
665 case BPF_STX:
666 update_on_xread(ctx);
667 ctx->seen |= SEEN_MEM_WORD(k);
668 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
669 break;
670 case BPF_ALU | BPF_ADD | BPF_K:
671 /* A += K */
672 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
673 break;
674 case BPF_ALU | BPF_ADD | BPF_X:
675 update_on_xread(ctx);
676 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
677 break;
678 case BPF_ALU | BPF_SUB | BPF_K:
679 /* A -= K */
680 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
681 break;
682 case BPF_ALU | BPF_SUB | BPF_X:
683 update_on_xread(ctx);
684 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
685 break;
686 case BPF_ALU | BPF_MUL | BPF_K:
687 /* A *= K */
688 emit_mov_i(r_scratch, k, ctx);
689 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
690 break;
691 case BPF_ALU | BPF_MUL | BPF_X:
692 update_on_xread(ctx);
693 emit(ARM_MUL(r_A, r_A, r_X), ctx);
694 break;
695 case BPF_ALU | BPF_DIV | BPF_K:
696 if (k == 1)
697 break;
698 emit_mov_i(r_scratch, k, ctx);
699 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
700 break;
701 case BPF_ALU | BPF_DIV | BPF_X:
702 update_on_xread(ctx);
703 emit(ARM_CMP_I(r_X, 0), ctx);
704 emit_err_ret(ARM_COND_EQ, ctx);
705 emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
706 break;
707 case BPF_ALU | BPF_MOD | BPF_K:
708 if (k == 1) {
709 emit_mov_i(r_A, 0, ctx);
710 break;
711 }
712 emit_mov_i(r_scratch, k, ctx);
713 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
714 break;
715 case BPF_ALU | BPF_MOD | BPF_X:
716 update_on_xread(ctx);
717 emit(ARM_CMP_I(r_X, 0), ctx);
718 emit_err_ret(ARM_COND_EQ, ctx);
719 emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
720 break;
721 case BPF_ALU | BPF_OR | BPF_K:
722 /* A |= K */
723 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
724 break;
725 case BPF_ALU | BPF_OR | BPF_X:
726 update_on_xread(ctx);
727 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
728 break;
729 case BPF_ALU | BPF_XOR | BPF_K:
730 /* A ^= K; */
731 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
732 break;
733 case BPF_ANC | SKF_AD_ALU_XOR_X:
734 case BPF_ALU | BPF_XOR | BPF_X:
735 /* A ^= X */
736 update_on_xread(ctx);
737 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
738 break;
739 case BPF_ALU | BPF_AND | BPF_K:
740 /* A &= K */
741 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
742 break;
743 case BPF_ALU | BPF_AND | BPF_X:
744 update_on_xread(ctx);
745 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
746 break;
747 case BPF_ALU | BPF_LSH | BPF_K:
748 if (unlikely(k > 31))
749 return -1;
750 emit(ARM_LSL_I(r_A, r_A, k), ctx);
751 break;
752 case BPF_ALU | BPF_LSH | BPF_X:
753 update_on_xread(ctx);
754 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
755 break;
756 case BPF_ALU | BPF_RSH | BPF_K:
757 if (unlikely(k > 31))
758 return -1;
759 if (k)
760 emit(ARM_LSR_I(r_A, r_A, k), ctx);
761 break;
762 case BPF_ALU | BPF_RSH | BPF_X:
763 update_on_xread(ctx);
764 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
765 break;
766 case BPF_ALU | BPF_NEG:
767 /* A = -A */
768 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
769 break;
770 case BPF_JMP | BPF_JA:
771 /* pc += K */
772 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
773 break;
774 case BPF_JMP | BPF_JEQ | BPF_K:
775 /* pc += (A == K) ? pc->jt : pc->jf */
776 condt = ARM_COND_EQ;
777 goto cmp_imm;
778 case BPF_JMP | BPF_JGT | BPF_K:
779 /* pc += (A > K) ? pc->jt : pc->jf */
780 condt = ARM_COND_HI;
781 goto cmp_imm;
782 case BPF_JMP | BPF_JGE | BPF_K:
783 /* pc += (A >= K) ? pc->jt : pc->jf */
784 condt = ARM_COND_HS;
785cmp_imm:
786 imm12 = imm8m(k);
787 if (imm12 < 0) {
788 emit_mov_i_no8m(r_scratch, k, ctx);
789 emit(ARM_CMP_R(r_A, r_scratch), ctx);
790 } else {
791 emit(ARM_CMP_I(r_A, imm12), ctx);
792 }
793cond_jump:
794 if (inst->jt)
795 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
796 ctx)), ctx);
797 if (inst->jf)
798 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
799 ctx)), ctx);
800 break;
801 case BPF_JMP | BPF_JEQ | BPF_X:
802 /* pc += (A == X) ? pc->jt : pc->jf */
803 condt = ARM_COND_EQ;
804 goto cmp_x;
805 case BPF_JMP | BPF_JGT | BPF_X:
806 /* pc += (A > X) ? pc->jt : pc->jf */
807 condt = ARM_COND_HI;
808 goto cmp_x;
809 case BPF_JMP | BPF_JGE | BPF_X:
810 /* pc += (A >= X) ? pc->jt : pc->jf */
811 condt = ARM_COND_CS;
812cmp_x:
813 update_on_xread(ctx);
814 emit(ARM_CMP_R(r_A, r_X), ctx);
815 goto cond_jump;
816 case BPF_JMP | BPF_JSET | BPF_K:
817 /* pc += (A & K) ? pc->jt : pc->jf */
818 condt = ARM_COND_NE;
819 /* not set iff all zeroes iff Z==1 iff EQ */
820
821 imm12 = imm8m(k);
822 if (imm12 < 0) {
823 emit_mov_i_no8m(r_scratch, k, ctx);
824 emit(ARM_TST_R(r_A, r_scratch), ctx);
825 } else {
826 emit(ARM_TST_I(r_A, imm12), ctx);
827 }
828 goto cond_jump;
829 case BPF_JMP | BPF_JSET | BPF_X:
830 /* pc += (A & X) ? pc->jt : pc->jf */
831 update_on_xread(ctx);
832 condt = ARM_COND_NE;
833 emit(ARM_TST_R(r_A, r_X), ctx);
834 goto cond_jump;
835 case BPF_RET | BPF_A:
836 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
837 goto b_epilogue;
838 case BPF_RET | BPF_K:
839 if ((k == 0) && (ctx->ret0_fp_idx < 0))
840 ctx->ret0_fp_idx = i;
841 emit_mov_i(ARM_R0, k, ctx);
842b_epilogue:
843 if (i != ctx->skf->len - 1)
844 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
845 break;
846 case BPF_MISC | BPF_TAX:
847 /* X = A */
848 ctx->seen |= SEEN_X;
849 emit(ARM_MOV_R(r_X, r_A), ctx);
850 break;
851 case BPF_MISC | BPF_TXA:
852 /* A = X */
853 update_on_xread(ctx);
854 emit(ARM_MOV_R(r_A, r_X), ctx);
855 break;
856 case BPF_ANC | SKF_AD_PROTOCOL:
857 /* A = ntohs(skb->protocol) */
858 ctx->seen |= SEEN_SKB;
859 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
860 protocol) != 2);
861 off = offsetof(struct sk_buff, protocol);
862 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
863 emit_swap16(r_A, r_scratch, ctx);
864 break;
865 case BPF_ANC | SKF_AD_CPU:
866 /* r_scratch = current_thread_info() */
867 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
868 /* A = current_thread_info()->cpu */
869 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
870 off = offsetof(struct thread_info, cpu);
871 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
872 break;
873 case BPF_ANC | SKF_AD_IFINDEX:
874 case BPF_ANC | SKF_AD_HATYPE:
875 /* A = skb->dev->ifindex */
876 /* A = skb->dev->type */
877 ctx->seen |= SEEN_SKB;
878 off = offsetof(struct sk_buff, dev);
879 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
880
881 emit(ARM_CMP_I(r_scratch, 0), ctx);
882 emit_err_ret(ARM_COND_EQ, ctx);
883
884 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
885 ifindex) != 4);
886 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
887 type) != 2);
888
889 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
890 off = offsetof(struct net_device, ifindex);
891 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
892 } else {
893 /*
894 * offset of field "type" in "struct
895 * net_device" is above what can be
896 * used in the ldrh rd, [rn, #imm]
897 * instruction, so load the offset in
898 * a register and use ldrh rd, [rn, rm]
899 */
900 off = offsetof(struct net_device, type);
901 emit_mov_i(ARM_R3, off, ctx);
902 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
903 }
904 break;
905 case BPF_ANC | SKF_AD_MARK:
906 ctx->seen |= SEEN_SKB;
907 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
908 off = offsetof(struct sk_buff, mark);
909 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
910 break;
911 case BPF_ANC | SKF_AD_RXHASH:
912 ctx->seen |= SEEN_SKB;
913 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
914 off = offsetof(struct sk_buff, hash);
915 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
916 break;
917 case BPF_ANC | SKF_AD_VLAN_TAG:
918 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
919 ctx->seen |= SEEN_SKB;
920 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
921 off = offsetof(struct sk_buff, vlan_tci);
922 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
923 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
924 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
925 else {
926 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
927 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
928 }
929 break;
930 case BPF_ANC | SKF_AD_PKTTYPE:
931 ctx->seen |= SEEN_SKB;
932 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
933 __pkt_type_offset[0]) != 1);
934 off = PKT_TYPE_OFFSET();
935 emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
936 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
937#ifdef __BIG_ENDIAN_BITFIELD
938 emit(ARM_LSR_I(r_A, r_A, 5), ctx);
939#endif
940 break;
941 case BPF_ANC | SKF_AD_QUEUE:
942 ctx->seen |= SEEN_SKB;
943 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
944 queue_mapping) != 2);
945 BUILD_BUG_ON(offsetof(struct sk_buff,
946 queue_mapping) > 0xff);
947 off = offsetof(struct sk_buff, queue_mapping);
948 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
949 break;
950 case BPF_ANC | SKF_AD_PAY_OFFSET:
951 ctx->seen |= SEEN_SKB | SEEN_CALL;
952
953 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
954 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
955 emit_blx_r(ARM_R3, ctx);
956 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
957 break;
958 case BPF_LDX | BPF_W | BPF_ABS:
959 /*
960 * load a 32bit word from struct seccomp_data.
961 * seccomp_check_filter() will already have checked
962 * that k is 32bit aligned and lies within the
963 * struct seccomp_data.
964 */
965 ctx->seen |= SEEN_SKB;
966 emit(ARM_LDR_I(r_A, r_skb, k), ctx);
967 break;
968 default:
969 return -1;
970 }
971
972 if (ctx->flags & FLAG_IMM_OVERFLOW)
973 /*
974 * this instruction generated an overflow when
975 * trying to access the literal pool, so
976 * delegate this filter to the kernel interpreter.
977 */
978 return -1;
979 }
980
981 /* compute offsets only during the first pass */
982 if (ctx->target == NULL)
983 ctx->offsets[i] = ctx->idx * 4;
984
985 return 0;
986}
987
988
989void bpf_jit_compile(struct bpf_prog *fp)
990{
991 struct bpf_binary_header *header;
992 struct jit_ctx ctx;
993 unsigned tmp_idx;
994 unsigned alloc_size;
995 u8 *target_ptr;
996
997 if (!bpf_jit_enable)
998 return;
999
1000 memset(&ctx, 0, sizeof(ctx));
1001 ctx.skf = fp;
1002 ctx.ret0_fp_idx = -1;
1003
1004 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
1005 if (ctx.offsets == NULL)
1006 return;
1007
1008 /* fake pass to fill in the ctx->seen */
1009 if (unlikely(build_body(&ctx)))
1010 goto out;
1011
1012 tmp_idx = ctx.idx;
1013 build_prologue(&ctx);
1014 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1015
1016#if __LINUX_ARM_ARCH__ < 7
1017 tmp_idx = ctx.idx;
1018 build_epilogue(&ctx);
1019 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1020
1021 ctx.idx += ctx.imm_count;
1022 if (ctx.imm_count) {
1023 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1024 if (ctx.imms == NULL)
1025 goto out;
1026 }
1027#else
1028 /* there's nothing after the epilogue on ARMv7 */
1029 build_epilogue(&ctx);
1030#endif
1031 alloc_size = 4 * ctx.idx;
1032 header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1033 4, jit_fill_hole);
1034 if (header == NULL)
1035 goto out;
1036
1037 ctx.target = (u32 *) target_ptr;
1038 ctx.idx = 0;
1039
1040 build_prologue(&ctx);
1041 if (build_body(&ctx) < 0) {
1042#if __LINUX_ARM_ARCH__ < 7
1043 if (ctx.imm_count)
1044 kfree(ctx.imms);
1045#endif
1046 bpf_jit_binary_free(header);
1047 goto out;
1048 }
1049 build_epilogue(&ctx);
1050
1051 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1052
1053#if __LINUX_ARM_ARCH__ < 7
1054 if (ctx.imm_count)
1055 kfree(ctx.imms);
1056#endif
1057
1058 if (bpf_jit_enable > 1)
1059 /* there are 2 passes here */
1060 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1061
1062 set_memory_ro((unsigned long)header, header->pages);
1063 fp->bpf_func = (void *)ctx.target;
1064 fp->jited = 1;
1065out:
1066 kfree(ctx.offsets);
1067 return;
1068}
1069
1070void bpf_jit_free(struct bpf_prog *fp)
1071{
1072 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1073 struct bpf_binary_header *header = (void *)addr;
1074
1075 if (!fp->jited)
1076 goto free_filter;
1077
1078 set_memory_rw(addr, header->pages);
1079 bpf_jit_binary_free(header);
1080
1081free_filter:
1082 bpf_prog_unlock_free(fp);
1083}