Loading...
1/*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
3 *
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/bitops.h>
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/filter.h>
15#include <linux/netdevice.h>
16#include <linux/string.h>
17#include <linux/slab.h>
18#include <linux/if_vlan.h>
19
20#include <asm/cacheflush.h>
21#include <asm/hwcap.h>
22#include <asm/opcodes.h>
23
24#include "bpf_jit_32.h"
25
26/*
27 * ABI:
28 *
29 * r0 scratch register
30 * r4 BPF register A
31 * r5 BPF register X
32 * r6 pointer to the skb
33 * r7 skb->data
34 * r8 skb_headlen(skb)
35 */
36
37#define r_scratch ARM_R0
38/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
39#define r_off ARM_R1
40#define r_A ARM_R4
41#define r_X ARM_R5
42#define r_skb ARM_R6
43#define r_skb_data ARM_R7
44#define r_skb_hl ARM_R8
45
46#define SCRATCH_SP_OFFSET 0
47#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
48
49#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50#define SEEN_MEM_WORD(k) (1 << (k))
51#define SEEN_X (1 << BPF_MEMWORDS)
52#define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53#define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55
56#define FLAG_NEED_X_RESET (1 << 0)
57#define FLAG_IMM_OVERFLOW (1 << 1)
58
59struct jit_ctx {
60 const struct bpf_prog *skf;
61 unsigned idx;
62 unsigned prologue_bytes;
63 int ret0_fp_idx;
64 u32 seen;
65 u32 flags;
66 u32 *offsets;
67 u32 *target;
68#if __LINUX_ARM_ARCH__ < 7
69 u16 epilogue_bytes;
70 u16 imm_count;
71 u32 *imms;
72#endif
73};
74
75int bpf_jit_enable __read_mostly;
76
77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
89{
90 u8 ret;
91 int err;
92
93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
97
98 return (u64)err << 32 | ret;
99}
100
101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
102{
103 u16 ret;
104 int err;
105
106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
110
111 return (u64)err << 32 | ntohs(ret);
112}
113
114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
115{
116 u32 ret;
117 int err;
118
119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
123
124 return (u64)err << 32 | ntohl(ret);
125}
126
127/*
128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
129 * (where the assembly routines like __aeabi_uidiv could cause problems).
130 */
131static u32 jit_udiv(u32 dividend, u32 divisor)
132{
133 return dividend / divisor;
134}
135
136static u32 jit_mod(u32 dividend, u32 divisor)
137{
138 return dividend % divisor;
139}
140
141static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
142{
143 inst |= (cond << 28);
144 inst = __opcode_to_mem_arm(inst);
145
146 if (ctx->target != NULL)
147 ctx->target[ctx->idx] = inst;
148
149 ctx->idx++;
150}
151
152/*
153 * Emit an instruction that will be executed unconditionally.
154 */
155static inline void emit(u32 inst, struct jit_ctx *ctx)
156{
157 _emit(ARM_COND_AL, inst, ctx);
158}
159
160static u16 saved_regs(struct jit_ctx *ctx)
161{
162 u16 ret = 0;
163
164 if ((ctx->skf->len > 1) ||
165 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
166 ret |= 1 << r_A;
167
168#ifdef CONFIG_FRAME_POINTER
169 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
170#else
171 if (ctx->seen & SEEN_CALL)
172 ret |= 1 << ARM_LR;
173#endif
174 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
175 ret |= 1 << r_skb;
176 if (ctx->seen & SEEN_DATA)
177 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
178 if (ctx->seen & SEEN_X)
179 ret |= 1 << r_X;
180
181 return ret;
182}
183
184static inline int mem_words_used(struct jit_ctx *ctx)
185{
186 /* yes, we do waste some stack space IF there are "holes" in the set" */
187 return fls(ctx->seen & SEEN_MEM);
188}
189
190static void jit_fill_hole(void *area, unsigned int size)
191{
192 u32 *ptr;
193 /* We are guaranteed to have aligned memory. */
194 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
195 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
196}
197
198static void build_prologue(struct jit_ctx *ctx)
199{
200 u16 reg_set = saved_regs(ctx);
201 u16 off;
202
203#ifdef CONFIG_FRAME_POINTER
204 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
205 emit(ARM_PUSH(reg_set), ctx);
206 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
207#else
208 if (reg_set)
209 emit(ARM_PUSH(reg_set), ctx);
210#endif
211
212 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
213 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
214
215 if (ctx->seen & SEEN_DATA) {
216 off = offsetof(struct sk_buff, data);
217 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
218 /* headlen = len - data_len */
219 off = offsetof(struct sk_buff, len);
220 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
221 off = offsetof(struct sk_buff, data_len);
222 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
223 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
224 }
225
226 if (ctx->flags & FLAG_NEED_X_RESET)
227 emit(ARM_MOV_I(r_X, 0), ctx);
228
229 /* do not leak kernel data to userspace */
230 if (bpf_needs_clear_a(&ctx->skf->insns[0]))
231 emit(ARM_MOV_I(r_A, 0), ctx);
232
233 /* stack space for the BPF_MEM words */
234 if (ctx->seen & SEEN_MEM)
235 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
236}
237
238static void build_epilogue(struct jit_ctx *ctx)
239{
240 u16 reg_set = saved_regs(ctx);
241
242 if (ctx->seen & SEEN_MEM)
243 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
244
245 reg_set &= ~(1 << ARM_LR);
246
247#ifdef CONFIG_FRAME_POINTER
248 /* the first instruction of the prologue was: mov ip, sp */
249 reg_set &= ~(1 << ARM_IP);
250 reg_set |= (1 << ARM_SP);
251 emit(ARM_LDM(ARM_SP, reg_set), ctx);
252#else
253 if (reg_set) {
254 if (ctx->seen & SEEN_CALL)
255 reg_set |= 1 << ARM_PC;
256 emit(ARM_POP(reg_set), ctx);
257 }
258
259 if (!(ctx->seen & SEEN_CALL))
260 emit(ARM_BX(ARM_LR), ctx);
261#endif
262}
263
264static int16_t imm8m(u32 x)
265{
266 u32 rot;
267
268 for (rot = 0; rot < 16; rot++)
269 if ((x & ~ror32(0xff, 2 * rot)) == 0)
270 return rol32(x, 2 * rot) | (rot << 8);
271
272 return -1;
273}
274
275#if __LINUX_ARM_ARCH__ < 7
276
277static u16 imm_offset(u32 k, struct jit_ctx *ctx)
278{
279 unsigned i = 0, offset;
280 u16 imm;
281
282 /* on the "fake" run we just count them (duplicates included) */
283 if (ctx->target == NULL) {
284 ctx->imm_count++;
285 return 0;
286 }
287
288 while ((i < ctx->imm_count) && ctx->imms[i]) {
289 if (ctx->imms[i] == k)
290 break;
291 i++;
292 }
293
294 if (ctx->imms[i] == 0)
295 ctx->imms[i] = k;
296
297 /* constants go just after the epilogue */
298 offset = ctx->offsets[ctx->skf->len];
299 offset += ctx->prologue_bytes;
300 offset += ctx->epilogue_bytes;
301 offset += i * 4;
302
303 ctx->target[offset / 4] = k;
304
305 /* PC in ARM mode == address of the instruction + 8 */
306 imm = offset - (8 + ctx->idx * 4);
307
308 if (imm & ~0xfff) {
309 /*
310 * literal pool is too far, signal it into flags. we
311 * can only detect it on the second pass unfortunately.
312 */
313 ctx->flags |= FLAG_IMM_OVERFLOW;
314 return 0;
315 }
316
317 return imm;
318}
319
320#endif /* __LINUX_ARM_ARCH__ */
321
322/*
323 * Move an immediate that's not an imm8m to a core register.
324 */
325static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
326{
327#if __LINUX_ARM_ARCH__ < 7
328 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
329#else
330 emit(ARM_MOVW(rd, val & 0xffff), ctx);
331 if (val > 0xffff)
332 emit(ARM_MOVT(rd, val >> 16), ctx);
333#endif
334}
335
336static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
337{
338 int imm12 = imm8m(val);
339
340 if (imm12 >= 0)
341 emit(ARM_MOV_I(rd, imm12), ctx);
342 else
343 emit_mov_i_no8m(rd, val, ctx);
344}
345
346#if __LINUX_ARM_ARCH__ < 6
347
348static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
349{
350 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
351 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
352 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
353 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
354 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
355 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
356 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
357 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
358}
359
360static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
361{
362 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
363 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
364 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
365}
366
367static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
368{
369 /* r_dst = (r_src << 8) | (r_src >> 8) */
370 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
371 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
372
373 /*
374 * we need to mask out the bits set in r_dst[23:16] due to
375 * the first shift instruction.
376 *
377 * note that 0x8ff is the encoded immediate 0x00ff0000.
378 */
379 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
380}
381
382#else /* ARMv6+ */
383
384static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
385{
386 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
387#ifdef __LITTLE_ENDIAN
388 _emit(cond, ARM_REV(r_res, r_res), ctx);
389#endif
390}
391
392static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
393{
394 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
395#ifdef __LITTLE_ENDIAN
396 _emit(cond, ARM_REV16(r_res, r_res), ctx);
397#endif
398}
399
400static inline void emit_swap16(u8 r_dst __maybe_unused,
401 u8 r_src __maybe_unused,
402 struct jit_ctx *ctx __maybe_unused)
403{
404#ifdef __LITTLE_ENDIAN
405 emit(ARM_REV16(r_dst, r_src), ctx);
406#endif
407}
408
409#endif /* __LINUX_ARM_ARCH__ < 6 */
410
411
412/* Compute the immediate value for a PC-relative branch. */
413static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
414{
415 u32 imm;
416
417 if (ctx->target == NULL)
418 return 0;
419 /*
420 * BPF allows only forward jumps and the offset of the target is
421 * still the one computed during the first pass.
422 */
423 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
424
425 return imm >> 2;
426}
427
428#define OP_IMM3(op, r1, r2, imm_val, ctx) \
429 do { \
430 imm12 = imm8m(imm_val); \
431 if (imm12 < 0) { \
432 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
433 emit(op ## _R((r1), (r2), r_scratch), ctx); \
434 } else { \
435 emit(op ## _I((r1), (r2), imm12), ctx); \
436 } \
437 } while (0)
438
439static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
440{
441 if (ctx->ret0_fp_idx >= 0) {
442 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
443 /* NOP to keep the size constant between passes */
444 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
445 } else {
446 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
447 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
448 }
449}
450
451static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
452{
453#if __LINUX_ARM_ARCH__ < 5
454 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
455
456 if (elf_hwcap & HWCAP_THUMB)
457 emit(ARM_BX(tgt_reg), ctx);
458 else
459 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
460#else
461 emit(ARM_BLX_R(tgt_reg), ctx);
462#endif
463}
464
465static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
466 int bpf_op)
467{
468#if __LINUX_ARM_ARCH__ == 7
469 if (elf_hwcap & HWCAP_IDIVA) {
470 if (bpf_op == BPF_DIV)
471 emit(ARM_UDIV(rd, rm, rn), ctx);
472 else {
473 emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
474 emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
475 }
476 return;
477 }
478#endif
479
480 /*
481 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
482 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
483 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
484 * before using it as a source for ARM_R1.
485 *
486 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
487 * ARM_R5 (r_X) so there is no particular register overlap
488 * issues.
489 */
490 if (rn != ARM_R1)
491 emit(ARM_MOV_R(ARM_R1, rn), ctx);
492 if (rm != ARM_R0)
493 emit(ARM_MOV_R(ARM_R0, rm), ctx);
494
495 ctx->seen |= SEEN_CALL;
496 emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
497 ctx);
498 emit_blx_r(ARM_R3, ctx);
499
500 if (rd != ARM_R0)
501 emit(ARM_MOV_R(rd, ARM_R0), ctx);
502}
503
504static inline void update_on_xread(struct jit_ctx *ctx)
505{
506 if (!(ctx->seen & SEEN_X))
507 ctx->flags |= FLAG_NEED_X_RESET;
508
509 ctx->seen |= SEEN_X;
510}
511
512static int build_body(struct jit_ctx *ctx)
513{
514 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
515 const struct bpf_prog *prog = ctx->skf;
516 const struct sock_filter *inst;
517 unsigned i, load_order, off, condt;
518 int imm12;
519 u32 k;
520
521 for (i = 0; i < prog->len; i++) {
522 u16 code;
523
524 inst = &(prog->insns[i]);
525 /* K as an immediate value operand */
526 k = inst->k;
527 code = bpf_anc_helper(inst);
528
529 /* compute offsets only in the fake pass */
530 if (ctx->target == NULL)
531 ctx->offsets[i] = ctx->idx * 4;
532
533 switch (code) {
534 case BPF_LD | BPF_IMM:
535 emit_mov_i(r_A, k, ctx);
536 break;
537 case BPF_LD | BPF_W | BPF_LEN:
538 ctx->seen |= SEEN_SKB;
539 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
540 emit(ARM_LDR_I(r_A, r_skb,
541 offsetof(struct sk_buff, len)), ctx);
542 break;
543 case BPF_LD | BPF_MEM:
544 /* A = scratch[k] */
545 ctx->seen |= SEEN_MEM_WORD(k);
546 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
547 break;
548 case BPF_LD | BPF_W | BPF_ABS:
549 load_order = 2;
550 goto load;
551 case BPF_LD | BPF_H | BPF_ABS:
552 load_order = 1;
553 goto load;
554 case BPF_LD | BPF_B | BPF_ABS:
555 load_order = 0;
556load:
557 emit_mov_i(r_off, k, ctx);
558load_common:
559 ctx->seen |= SEEN_DATA | SEEN_CALL;
560
561 if (load_order > 0) {
562 emit(ARM_SUB_I(r_scratch, r_skb_hl,
563 1 << load_order), ctx);
564 emit(ARM_CMP_R(r_scratch, r_off), ctx);
565 condt = ARM_COND_GE;
566 } else {
567 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
568 condt = ARM_COND_HI;
569 }
570
571 /*
572 * test for negative offset, only if we are
573 * currently scheduled to take the fast
574 * path. this will update the flags so that
575 * the slowpath instruction are ignored if the
576 * offset is negative.
577 *
578 * for loard_order == 0 the HI condition will
579 * make loads at offset 0 take the slow path too.
580 */
581 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
582
583 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
584 ctx);
585
586 if (load_order == 0)
587 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
588 ctx);
589 else if (load_order == 1)
590 emit_load_be16(condt, r_A, r_scratch, ctx);
591 else if (load_order == 2)
592 emit_load_be32(condt, r_A, r_scratch, ctx);
593
594 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
595
596 /* the slowpath */
597 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
598 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
599 /* the offset is already in R1 */
600 emit_blx_r(ARM_R3, ctx);
601 /* check the result of skb_copy_bits */
602 emit(ARM_CMP_I(ARM_R1, 0), ctx);
603 emit_err_ret(ARM_COND_NE, ctx);
604 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
605 break;
606 case BPF_LD | BPF_W | BPF_IND:
607 load_order = 2;
608 goto load_ind;
609 case BPF_LD | BPF_H | BPF_IND:
610 load_order = 1;
611 goto load_ind;
612 case BPF_LD | BPF_B | BPF_IND:
613 load_order = 0;
614load_ind:
615 update_on_xread(ctx);
616 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
617 goto load_common;
618 case BPF_LDX | BPF_IMM:
619 ctx->seen |= SEEN_X;
620 emit_mov_i(r_X, k, ctx);
621 break;
622 case BPF_LDX | BPF_W | BPF_LEN:
623 ctx->seen |= SEEN_X | SEEN_SKB;
624 emit(ARM_LDR_I(r_X, r_skb,
625 offsetof(struct sk_buff, len)), ctx);
626 break;
627 case BPF_LDX | BPF_MEM:
628 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
629 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
630 break;
631 case BPF_LDX | BPF_B | BPF_MSH:
632 /* x = ((*(frame + k)) & 0xf) << 2; */
633 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
634 /* the interpreter should deal with the negative K */
635 if ((int)k < 0)
636 return -1;
637 /* offset in r1: we might have to take the slow path */
638 emit_mov_i(r_off, k, ctx);
639 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
640
641 /* load in r0: common with the slowpath */
642 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
643 ARM_R1), ctx);
644 /*
645 * emit_mov_i() might generate one or two instructions,
646 * the same holds for emit_blx_r()
647 */
648 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
649
650 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
651 /* r_off is r1 */
652 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
653 emit_blx_r(ARM_R3, ctx);
654 /* check the return value of skb_copy_bits */
655 emit(ARM_CMP_I(ARM_R1, 0), ctx);
656 emit_err_ret(ARM_COND_NE, ctx);
657
658 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
659 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
660 break;
661 case BPF_ST:
662 ctx->seen |= SEEN_MEM_WORD(k);
663 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
664 break;
665 case BPF_STX:
666 update_on_xread(ctx);
667 ctx->seen |= SEEN_MEM_WORD(k);
668 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
669 break;
670 case BPF_ALU | BPF_ADD | BPF_K:
671 /* A += K */
672 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
673 break;
674 case BPF_ALU | BPF_ADD | BPF_X:
675 update_on_xread(ctx);
676 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
677 break;
678 case BPF_ALU | BPF_SUB | BPF_K:
679 /* A -= K */
680 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
681 break;
682 case BPF_ALU | BPF_SUB | BPF_X:
683 update_on_xread(ctx);
684 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
685 break;
686 case BPF_ALU | BPF_MUL | BPF_K:
687 /* A *= K */
688 emit_mov_i(r_scratch, k, ctx);
689 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
690 break;
691 case BPF_ALU | BPF_MUL | BPF_X:
692 update_on_xread(ctx);
693 emit(ARM_MUL(r_A, r_A, r_X), ctx);
694 break;
695 case BPF_ALU | BPF_DIV | BPF_K:
696 if (k == 1)
697 break;
698 emit_mov_i(r_scratch, k, ctx);
699 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
700 break;
701 case BPF_ALU | BPF_DIV | BPF_X:
702 update_on_xread(ctx);
703 emit(ARM_CMP_I(r_X, 0), ctx);
704 emit_err_ret(ARM_COND_EQ, ctx);
705 emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
706 break;
707 case BPF_ALU | BPF_MOD | BPF_K:
708 if (k == 1) {
709 emit_mov_i(r_A, 0, ctx);
710 break;
711 }
712 emit_mov_i(r_scratch, k, ctx);
713 emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
714 break;
715 case BPF_ALU | BPF_MOD | BPF_X:
716 update_on_xread(ctx);
717 emit(ARM_CMP_I(r_X, 0), ctx);
718 emit_err_ret(ARM_COND_EQ, ctx);
719 emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
720 break;
721 case BPF_ALU | BPF_OR | BPF_K:
722 /* A |= K */
723 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
724 break;
725 case BPF_ALU | BPF_OR | BPF_X:
726 update_on_xread(ctx);
727 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
728 break;
729 case BPF_ALU | BPF_XOR | BPF_K:
730 /* A ^= K; */
731 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
732 break;
733 case BPF_ANC | SKF_AD_ALU_XOR_X:
734 case BPF_ALU | BPF_XOR | BPF_X:
735 /* A ^= X */
736 update_on_xread(ctx);
737 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
738 break;
739 case BPF_ALU | BPF_AND | BPF_K:
740 /* A &= K */
741 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
742 break;
743 case BPF_ALU | BPF_AND | BPF_X:
744 update_on_xread(ctx);
745 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
746 break;
747 case BPF_ALU | BPF_LSH | BPF_K:
748 if (unlikely(k > 31))
749 return -1;
750 emit(ARM_LSL_I(r_A, r_A, k), ctx);
751 break;
752 case BPF_ALU | BPF_LSH | BPF_X:
753 update_on_xread(ctx);
754 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
755 break;
756 case BPF_ALU | BPF_RSH | BPF_K:
757 if (unlikely(k > 31))
758 return -1;
759 if (k)
760 emit(ARM_LSR_I(r_A, r_A, k), ctx);
761 break;
762 case BPF_ALU | BPF_RSH | BPF_X:
763 update_on_xread(ctx);
764 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
765 break;
766 case BPF_ALU | BPF_NEG:
767 /* A = -A */
768 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
769 break;
770 case BPF_JMP | BPF_JA:
771 /* pc += K */
772 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
773 break;
774 case BPF_JMP | BPF_JEQ | BPF_K:
775 /* pc += (A == K) ? pc->jt : pc->jf */
776 condt = ARM_COND_EQ;
777 goto cmp_imm;
778 case BPF_JMP | BPF_JGT | BPF_K:
779 /* pc += (A > K) ? pc->jt : pc->jf */
780 condt = ARM_COND_HI;
781 goto cmp_imm;
782 case BPF_JMP | BPF_JGE | BPF_K:
783 /* pc += (A >= K) ? pc->jt : pc->jf */
784 condt = ARM_COND_HS;
785cmp_imm:
786 imm12 = imm8m(k);
787 if (imm12 < 0) {
788 emit_mov_i_no8m(r_scratch, k, ctx);
789 emit(ARM_CMP_R(r_A, r_scratch), ctx);
790 } else {
791 emit(ARM_CMP_I(r_A, imm12), ctx);
792 }
793cond_jump:
794 if (inst->jt)
795 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
796 ctx)), ctx);
797 if (inst->jf)
798 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
799 ctx)), ctx);
800 break;
801 case BPF_JMP | BPF_JEQ | BPF_X:
802 /* pc += (A == X) ? pc->jt : pc->jf */
803 condt = ARM_COND_EQ;
804 goto cmp_x;
805 case BPF_JMP | BPF_JGT | BPF_X:
806 /* pc += (A > X) ? pc->jt : pc->jf */
807 condt = ARM_COND_HI;
808 goto cmp_x;
809 case BPF_JMP | BPF_JGE | BPF_X:
810 /* pc += (A >= X) ? pc->jt : pc->jf */
811 condt = ARM_COND_CS;
812cmp_x:
813 update_on_xread(ctx);
814 emit(ARM_CMP_R(r_A, r_X), ctx);
815 goto cond_jump;
816 case BPF_JMP | BPF_JSET | BPF_K:
817 /* pc += (A & K) ? pc->jt : pc->jf */
818 condt = ARM_COND_NE;
819 /* not set iff all zeroes iff Z==1 iff EQ */
820
821 imm12 = imm8m(k);
822 if (imm12 < 0) {
823 emit_mov_i_no8m(r_scratch, k, ctx);
824 emit(ARM_TST_R(r_A, r_scratch), ctx);
825 } else {
826 emit(ARM_TST_I(r_A, imm12), ctx);
827 }
828 goto cond_jump;
829 case BPF_JMP | BPF_JSET | BPF_X:
830 /* pc += (A & X) ? pc->jt : pc->jf */
831 update_on_xread(ctx);
832 condt = ARM_COND_NE;
833 emit(ARM_TST_R(r_A, r_X), ctx);
834 goto cond_jump;
835 case BPF_RET | BPF_A:
836 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
837 goto b_epilogue;
838 case BPF_RET | BPF_K:
839 if ((k == 0) && (ctx->ret0_fp_idx < 0))
840 ctx->ret0_fp_idx = i;
841 emit_mov_i(ARM_R0, k, ctx);
842b_epilogue:
843 if (i != ctx->skf->len - 1)
844 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
845 break;
846 case BPF_MISC | BPF_TAX:
847 /* X = A */
848 ctx->seen |= SEEN_X;
849 emit(ARM_MOV_R(r_X, r_A), ctx);
850 break;
851 case BPF_MISC | BPF_TXA:
852 /* A = X */
853 update_on_xread(ctx);
854 emit(ARM_MOV_R(r_A, r_X), ctx);
855 break;
856 case BPF_ANC | SKF_AD_PROTOCOL:
857 /* A = ntohs(skb->protocol) */
858 ctx->seen |= SEEN_SKB;
859 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
860 protocol) != 2);
861 off = offsetof(struct sk_buff, protocol);
862 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
863 emit_swap16(r_A, r_scratch, ctx);
864 break;
865 case BPF_ANC | SKF_AD_CPU:
866 /* r_scratch = current_thread_info() */
867 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
868 /* A = current_thread_info()->cpu */
869 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
870 off = offsetof(struct thread_info, cpu);
871 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
872 break;
873 case BPF_ANC | SKF_AD_IFINDEX:
874 case BPF_ANC | SKF_AD_HATYPE:
875 /* A = skb->dev->ifindex */
876 /* A = skb->dev->type */
877 ctx->seen |= SEEN_SKB;
878 off = offsetof(struct sk_buff, dev);
879 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
880
881 emit(ARM_CMP_I(r_scratch, 0), ctx);
882 emit_err_ret(ARM_COND_EQ, ctx);
883
884 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
885 ifindex) != 4);
886 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
887 type) != 2);
888
889 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
890 off = offsetof(struct net_device, ifindex);
891 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
892 } else {
893 /*
894 * offset of field "type" in "struct
895 * net_device" is above what can be
896 * used in the ldrh rd, [rn, #imm]
897 * instruction, so load the offset in
898 * a register and use ldrh rd, [rn, rm]
899 */
900 off = offsetof(struct net_device, type);
901 emit_mov_i(ARM_R3, off, ctx);
902 emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
903 }
904 break;
905 case BPF_ANC | SKF_AD_MARK:
906 ctx->seen |= SEEN_SKB;
907 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
908 off = offsetof(struct sk_buff, mark);
909 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
910 break;
911 case BPF_ANC | SKF_AD_RXHASH:
912 ctx->seen |= SEEN_SKB;
913 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
914 off = offsetof(struct sk_buff, hash);
915 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
916 break;
917 case BPF_ANC | SKF_AD_VLAN_TAG:
918 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
919 ctx->seen |= SEEN_SKB;
920 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
921 off = offsetof(struct sk_buff, vlan_tci);
922 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
923 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
924 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
925 else {
926 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
927 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
928 }
929 break;
930 case BPF_ANC | SKF_AD_PKTTYPE:
931 ctx->seen |= SEEN_SKB;
932 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
933 __pkt_type_offset[0]) != 1);
934 off = PKT_TYPE_OFFSET();
935 emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
936 emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
937#ifdef __BIG_ENDIAN_BITFIELD
938 emit(ARM_LSR_I(r_A, r_A, 5), ctx);
939#endif
940 break;
941 case BPF_ANC | SKF_AD_QUEUE:
942 ctx->seen |= SEEN_SKB;
943 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
944 queue_mapping) != 2);
945 BUILD_BUG_ON(offsetof(struct sk_buff,
946 queue_mapping) > 0xff);
947 off = offsetof(struct sk_buff, queue_mapping);
948 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
949 break;
950 case BPF_ANC | SKF_AD_PAY_OFFSET:
951 ctx->seen |= SEEN_SKB | SEEN_CALL;
952
953 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
954 emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
955 emit_blx_r(ARM_R3, ctx);
956 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
957 break;
958 case BPF_LDX | BPF_W | BPF_ABS:
959 /*
960 * load a 32bit word from struct seccomp_data.
961 * seccomp_check_filter() will already have checked
962 * that k is 32bit aligned and lies within the
963 * struct seccomp_data.
964 */
965 ctx->seen |= SEEN_SKB;
966 emit(ARM_LDR_I(r_A, r_skb, k), ctx);
967 break;
968 default:
969 return -1;
970 }
971
972 if (ctx->flags & FLAG_IMM_OVERFLOW)
973 /*
974 * this instruction generated an overflow when
975 * trying to access the literal pool, so
976 * delegate this filter to the kernel interpreter.
977 */
978 return -1;
979 }
980
981 /* compute offsets only during the first pass */
982 if (ctx->target == NULL)
983 ctx->offsets[i] = ctx->idx * 4;
984
985 return 0;
986}
987
988
989void bpf_jit_compile(struct bpf_prog *fp)
990{
991 struct bpf_binary_header *header;
992 struct jit_ctx ctx;
993 unsigned tmp_idx;
994 unsigned alloc_size;
995 u8 *target_ptr;
996
997 if (!bpf_jit_enable)
998 return;
999
1000 memset(&ctx, 0, sizeof(ctx));
1001 ctx.skf = fp;
1002 ctx.ret0_fp_idx = -1;
1003
1004 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
1005 if (ctx.offsets == NULL)
1006 return;
1007
1008 /* fake pass to fill in the ctx->seen */
1009 if (unlikely(build_body(&ctx)))
1010 goto out;
1011
1012 tmp_idx = ctx.idx;
1013 build_prologue(&ctx);
1014 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1015
1016#if __LINUX_ARM_ARCH__ < 7
1017 tmp_idx = ctx.idx;
1018 build_epilogue(&ctx);
1019 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1020
1021 ctx.idx += ctx.imm_count;
1022 if (ctx.imm_count) {
1023 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
1024 if (ctx.imms == NULL)
1025 goto out;
1026 }
1027#else
1028 /* there's nothing after the epilogue on ARMv7 */
1029 build_epilogue(&ctx);
1030#endif
1031 alloc_size = 4 * ctx.idx;
1032 header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
1033 4, jit_fill_hole);
1034 if (header == NULL)
1035 goto out;
1036
1037 ctx.target = (u32 *) target_ptr;
1038 ctx.idx = 0;
1039
1040 build_prologue(&ctx);
1041 if (build_body(&ctx) < 0) {
1042#if __LINUX_ARM_ARCH__ < 7
1043 if (ctx.imm_count)
1044 kfree(ctx.imms);
1045#endif
1046 bpf_jit_binary_free(header);
1047 goto out;
1048 }
1049 build_epilogue(&ctx);
1050
1051 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1052
1053#if __LINUX_ARM_ARCH__ < 7
1054 if (ctx.imm_count)
1055 kfree(ctx.imms);
1056#endif
1057
1058 if (bpf_jit_enable > 1)
1059 /* there are 2 passes here */
1060 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1061
1062 set_memory_ro((unsigned long)header, header->pages);
1063 fp->bpf_func = (void *)ctx.target;
1064 fp->jited = 1;
1065out:
1066 kfree(ctx.offsets);
1067 return;
1068}
1069
1070void bpf_jit_free(struct bpf_prog *fp)
1071{
1072 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1073 struct bpf_binary_header *header = (void *)addr;
1074
1075 if (!fp->jited)
1076 goto free_filter;
1077
1078 set_memory_rw(addr, header->pages);
1079 bpf_jit_binary_free(header);
1080
1081free_filter:
1082 bpf_prog_unlock_free(fp);
1083}
1/*
2 * Just-In-Time compiler for eBPF filters on 32bit ARM
3 *
4 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
5 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
10 */
11
12#include <linux/bpf.h>
13#include <linux/bitops.h>
14#include <linux/compiler.h>
15#include <linux/errno.h>
16#include <linux/filter.h>
17#include <linux/netdevice.h>
18#include <linux/string.h>
19#include <linux/slab.h>
20#include <linux/if_vlan.h>
21
22#include <asm/cacheflush.h>
23#include <asm/hwcap.h>
24#include <asm/opcodes.h>
25
26#include "bpf_jit_32.h"
27
28/*
29 * eBPF prog stack layout:
30 *
31 * high
32 * original ARM_SP => +-----+
33 * | | callee saved registers
34 * +-----+ <= (BPF_FP + SCRATCH_SIZE)
35 * | ... | eBPF JIT scratch space
36 * eBPF fp register => +-----+
37 * (BPF_FP) | ... | eBPF prog stack
38 * +-----+
39 * |RSVD | JIT scratchpad
40 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
41 * | |
42 * | ... | Function call stack
43 * | |
44 * +-----+
45 * low
46 *
47 * The callee saved registers depends on whether frame pointers are enabled.
48 * With frame pointers (to be compliant with the ABI):
49 *
50 * high
51 * original ARM_SP => +------------------+ \
52 * | pc | |
53 * current ARM_FP => +------------------+ } callee saved registers
54 * |r4-r8,r10,fp,ip,lr| |
55 * +------------------+ /
56 * low
57 *
58 * Without frame pointers:
59 *
60 * high
61 * original ARM_SP => +------------------+
62 * | r4-r8,r10,fp,lr | callee saved registers
63 * current ARM_FP => +------------------+
64 * low
65 *
66 * When popping registers off the stack at the end of a BPF function, we
67 * reference them via the current ARM_FP register.
68 */
69#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
70 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
71 1 << ARM_FP)
72#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
73#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
74
75#define STACK_OFFSET(k) (k)
76#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
77#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
78#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
79
80#define FLAG_IMM_OVERFLOW (1 << 0)
81
82/*
83 * Map eBPF registers to ARM 32bit registers or stack scratch space.
84 *
85 * 1. First argument is passed using the arm 32bit registers and rest of the
86 * arguments are passed on stack scratch space.
87 * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest
88 * arguments are mapped to scratch space on stack.
89 * 3. We need two 64 bit temp registers to do complex operations on eBPF
90 * registers.
91 *
92 * As the eBPF registers are all 64 bit registers and arm has only 32 bit
93 * registers, we have to map each eBPF registers with two arm 32 bit regs or
94 * scratch memory space and we have to build eBPF 64 bit register from those.
95 *
96 */
97static const u8 bpf2a32[][2] = {
98 /* return value from in-kernel function, and exit value from eBPF */
99 [BPF_REG_0] = {ARM_R1, ARM_R0},
100 /* arguments from eBPF program to in-kernel function */
101 [BPF_REG_1] = {ARM_R3, ARM_R2},
102 /* Stored on stack scratch space */
103 [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
104 [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
105 [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
106 [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
107 /* callee saved registers that in-kernel function will preserve */
108 [BPF_REG_6] = {ARM_R5, ARM_R4},
109 /* Stored on stack scratch space */
110 [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
111 [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
112 [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
113 /* Read only Frame Pointer to access Stack */
114 [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
115 /* Temporary Register for internal BPF JIT, can be used
116 * for constant blindings and others.
117 */
118 [TMP_REG_1] = {ARM_R7, ARM_R6},
119 [TMP_REG_2] = {ARM_R10, ARM_R8},
120 /* Tail call count. Stored on stack scratch space. */
121 [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
122 /* temporary register for blinding constants.
123 * Stored on stack scratch space.
124 */
125 [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
126};
127
128#define dst_lo dst[1]
129#define dst_hi dst[0]
130#define src_lo src[1]
131#define src_hi src[0]
132
133/*
134 * JIT Context:
135 *
136 * prog : bpf_prog
137 * idx : index of current last JITed instruction.
138 * prologue_bytes : bytes used in prologue.
139 * epilogue_offset : offset of epilogue starting.
140 * offsets : array of eBPF instruction offsets in
141 * JITed code.
142 * target : final JITed code.
143 * epilogue_bytes : no of bytes used in epilogue.
144 * imm_count : no of immediate counts used for global
145 * variables.
146 * imms : array of global variable addresses.
147 */
148
149struct jit_ctx {
150 const struct bpf_prog *prog;
151 unsigned int idx;
152 unsigned int prologue_bytes;
153 unsigned int epilogue_offset;
154 u32 flags;
155 u32 *offsets;
156 u32 *target;
157 u32 stack_size;
158#if __LINUX_ARM_ARCH__ < 7
159 u16 epilogue_bytes;
160 u16 imm_count;
161 u32 *imms;
162#endif
163};
164
165/*
166 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
167 * (where the assembly routines like __aeabi_uidiv could cause problems).
168 */
169static u32 jit_udiv32(u32 dividend, u32 divisor)
170{
171 return dividend / divisor;
172}
173
174static u32 jit_mod32(u32 dividend, u32 divisor)
175{
176 return dividend % divisor;
177}
178
179static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
180{
181 inst |= (cond << 28);
182 inst = __opcode_to_mem_arm(inst);
183
184 if (ctx->target != NULL)
185 ctx->target[ctx->idx] = inst;
186
187 ctx->idx++;
188}
189
190/*
191 * Emit an instruction that will be executed unconditionally.
192 */
193static inline void emit(u32 inst, struct jit_ctx *ctx)
194{
195 _emit(ARM_COND_AL, inst, ctx);
196}
197
198/*
199 * Checks if immediate value can be converted to imm12(12 bits) value.
200 */
201static int16_t imm8m(u32 x)
202{
203 u32 rot;
204
205 for (rot = 0; rot < 16; rot++)
206 if ((x & ~ror32(0xff, 2 * rot)) == 0)
207 return rol32(x, 2 * rot) | (rot << 8);
208 return -1;
209}
210
211/*
212 * Initializes the JIT space with undefined instructions.
213 */
214static void jit_fill_hole(void *area, unsigned int size)
215{
216 u32 *ptr;
217 /* We are guaranteed to have aligned memory. */
218 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
219 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
220}
221
222#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
223/* EABI requires the stack to be aligned to 64-bit boundaries */
224#define STACK_ALIGNMENT 8
225#else
226/* Stack must be aligned to 32-bit boundaries */
227#define STACK_ALIGNMENT 4
228#endif
229
230/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
231 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
232 * BPF_REG_FP and Tail call counts.
233 */
234#define SCRATCH_SIZE 80
235
236/* total stack size used in JITed code */
237#define _STACK_SIZE \
238 (ctx->prog->aux->stack_depth + \
239 + SCRATCH_SIZE + \
240 + 4 /* extra for skb_copy_bits buffer */)
241
242#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
243
244/* Get the offset of eBPF REGISTERs stored on scratch space. */
245#define STACK_VAR(off) (STACK_SIZE-off-4)
246
247/* Offset of skb_copy_bits buffer */
248#define SKB_BUFFER STACK_VAR(SCRATCH_SIZE)
249
250#if __LINUX_ARM_ARCH__ < 7
251
252static u16 imm_offset(u32 k, struct jit_ctx *ctx)
253{
254 unsigned int i = 0, offset;
255 u16 imm;
256
257 /* on the "fake" run we just count them (duplicates included) */
258 if (ctx->target == NULL) {
259 ctx->imm_count++;
260 return 0;
261 }
262
263 while ((i < ctx->imm_count) && ctx->imms[i]) {
264 if (ctx->imms[i] == k)
265 break;
266 i++;
267 }
268
269 if (ctx->imms[i] == 0)
270 ctx->imms[i] = k;
271
272 /* constants go just after the epilogue */
273 offset = ctx->offsets[ctx->prog->len - 1] * 4;
274 offset += ctx->prologue_bytes;
275 offset += ctx->epilogue_bytes;
276 offset += i * 4;
277
278 ctx->target[offset / 4] = k;
279
280 /* PC in ARM mode == address of the instruction + 8 */
281 imm = offset - (8 + ctx->idx * 4);
282
283 if (imm & ~0xfff) {
284 /*
285 * literal pool is too far, signal it into flags. we
286 * can only detect it on the second pass unfortunately.
287 */
288 ctx->flags |= FLAG_IMM_OVERFLOW;
289 return 0;
290 }
291
292 return imm;
293}
294
295#endif /* __LINUX_ARM_ARCH__ */
296
297static inline int bpf2a32_offset(int bpf_to, int bpf_from,
298 const struct jit_ctx *ctx) {
299 int to, from;
300
301 if (ctx->target == NULL)
302 return 0;
303 to = ctx->offsets[bpf_to];
304 from = ctx->offsets[bpf_from];
305
306 return to - from - 1;
307}
308
309/*
310 * Move an immediate that's not an imm8m to a core register.
311 */
312static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
313{
314#if __LINUX_ARM_ARCH__ < 7
315 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
316#else
317 emit(ARM_MOVW(rd, val & 0xffff), ctx);
318 if (val > 0xffff)
319 emit(ARM_MOVT(rd, val >> 16), ctx);
320#endif
321}
322
323static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
324{
325 int imm12 = imm8m(val);
326
327 if (imm12 >= 0)
328 emit(ARM_MOV_I(rd, imm12), ctx);
329 else
330 emit_mov_i_no8m(rd, val, ctx);
331}
332
333static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
334{
335 if (elf_hwcap & HWCAP_THUMB)
336 emit(ARM_BX(tgt_reg), ctx);
337 else
338 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
339}
340
341static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
342{
343#if __LINUX_ARM_ARCH__ < 5
344 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
345 emit_bx_r(tgt_reg, ctx);
346#else
347 emit(ARM_BLX_R(tgt_reg), ctx);
348#endif
349}
350
351static inline int epilogue_offset(const struct jit_ctx *ctx)
352{
353 int to, from;
354 /* No need for 1st dummy run */
355 if (ctx->target == NULL)
356 return 0;
357 to = ctx->epilogue_offset;
358 from = ctx->idx;
359
360 return to - from - 2;
361}
362
363static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
364{
365 const u8 *tmp = bpf2a32[TMP_REG_1];
366
367#if __LINUX_ARM_ARCH__ == 7
368 if (elf_hwcap & HWCAP_IDIVA) {
369 if (op == BPF_DIV)
370 emit(ARM_UDIV(rd, rm, rn), ctx);
371 else {
372 emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
373 emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
374 }
375 return;
376 }
377#endif
378
379 /*
380 * For BPF_ALU | BPF_DIV | BPF_K instructions
381 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
382 * function, we need to save it on caller side to save
383 * it from getting destroyed within callee.
384 * After the return from the callee, we restore ARM_R0
385 * ARM_R1.
386 */
387 if (rn != ARM_R1) {
388 emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
389 emit(ARM_MOV_R(ARM_R1, rn), ctx);
390 }
391 if (rm != ARM_R0) {
392 emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
393 emit(ARM_MOV_R(ARM_R0, rm), ctx);
394 }
395
396 /* Call appropriate function */
397 emit_mov_i(ARM_IP, op == BPF_DIV ?
398 (u32)jit_udiv32 : (u32)jit_mod32, ctx);
399 emit_blx_r(ARM_IP, ctx);
400
401 /* Save return value */
402 if (rd != ARM_R0)
403 emit(ARM_MOV_R(rd, ARM_R0), ctx);
404
405 /* Restore ARM_R0 and ARM_R1 */
406 if (rn != ARM_R1)
407 emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
408 if (rm != ARM_R0)
409 emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
410}
411
412/* Checks whether BPF register is on scratch stack space or not. */
413static inline bool is_on_stack(u8 bpf_reg)
414{
415 static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
416 BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
417 BPF_REG_2, BPF_REG_FP};
418 int i, reg_len = sizeof(stack_regs);
419
420 for (i = 0 ; i < reg_len ; i++) {
421 if (bpf_reg == stack_regs[i])
422 return true;
423 }
424 return false;
425}
426
427static inline void emit_a32_mov_i(const u8 dst, const u32 val,
428 bool dstk, struct jit_ctx *ctx)
429{
430 const u8 *tmp = bpf2a32[TMP_REG_1];
431
432 if (dstk) {
433 emit_mov_i(tmp[1], val, ctx);
434 emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
435 } else {
436 emit_mov_i(dst, val, ctx);
437 }
438}
439
440/* Sign extended move */
441static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
442 const u32 val, bool dstk,
443 struct jit_ctx *ctx) {
444 u32 hi = 0;
445
446 if (is64 && (val & (1<<31)))
447 hi = (u32)~0;
448 emit_a32_mov_i(dst_lo, val, dstk, ctx);
449 emit_a32_mov_i(dst_hi, hi, dstk, ctx);
450}
451
452static inline void emit_a32_add_r(const u8 dst, const u8 src,
453 const bool is64, const bool hi,
454 struct jit_ctx *ctx) {
455 /* 64 bit :
456 * adds dst_lo, dst_lo, src_lo
457 * adc dst_hi, dst_hi, src_hi
458 * 32 bit :
459 * add dst_lo, dst_lo, src_lo
460 */
461 if (!hi && is64)
462 emit(ARM_ADDS_R(dst, dst, src), ctx);
463 else if (hi && is64)
464 emit(ARM_ADC_R(dst, dst, src), ctx);
465 else
466 emit(ARM_ADD_R(dst, dst, src), ctx);
467}
468
469static inline void emit_a32_sub_r(const u8 dst, const u8 src,
470 const bool is64, const bool hi,
471 struct jit_ctx *ctx) {
472 /* 64 bit :
473 * subs dst_lo, dst_lo, src_lo
474 * sbc dst_hi, dst_hi, src_hi
475 * 32 bit :
476 * sub dst_lo, dst_lo, src_lo
477 */
478 if (!hi && is64)
479 emit(ARM_SUBS_R(dst, dst, src), ctx);
480 else if (hi && is64)
481 emit(ARM_SBC_R(dst, dst, src), ctx);
482 else
483 emit(ARM_SUB_R(dst, dst, src), ctx);
484}
485
486static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
487 const bool hi, const u8 op, struct jit_ctx *ctx){
488 switch (BPF_OP(op)) {
489 /* dst = dst + src */
490 case BPF_ADD:
491 emit_a32_add_r(dst, src, is64, hi, ctx);
492 break;
493 /* dst = dst - src */
494 case BPF_SUB:
495 emit_a32_sub_r(dst, src, is64, hi, ctx);
496 break;
497 /* dst = dst | src */
498 case BPF_OR:
499 emit(ARM_ORR_R(dst, dst, src), ctx);
500 break;
501 /* dst = dst & src */
502 case BPF_AND:
503 emit(ARM_AND_R(dst, dst, src), ctx);
504 break;
505 /* dst = dst ^ src */
506 case BPF_XOR:
507 emit(ARM_EOR_R(dst, dst, src), ctx);
508 break;
509 /* dst = dst * src */
510 case BPF_MUL:
511 emit(ARM_MUL(dst, dst, src), ctx);
512 break;
513 /* dst = dst << src */
514 case BPF_LSH:
515 emit(ARM_LSL_R(dst, dst, src), ctx);
516 break;
517 /* dst = dst >> src */
518 case BPF_RSH:
519 emit(ARM_LSR_R(dst, dst, src), ctx);
520 break;
521 /* dst = dst >> src (signed)*/
522 case BPF_ARSH:
523 emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
524 break;
525 }
526}
527
528/* ALU operation (32 bit)
529 * dst = dst (op) src
530 */
531static inline void emit_a32_alu_r(const u8 dst, const u8 src,
532 bool dstk, bool sstk,
533 struct jit_ctx *ctx, const bool is64,
534 const bool hi, const u8 op) {
535 const u8 *tmp = bpf2a32[TMP_REG_1];
536 u8 rn = sstk ? tmp[1] : src;
537
538 if (sstk)
539 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
540
541 /* ALU operation */
542 if (dstk) {
543 emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
544 emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
545 emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
546 } else {
547 emit_alu_r(dst, rn, is64, hi, op, ctx);
548 }
549}
550
551/* ALU operation (64 bit) */
552static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
553 const u8 src[], bool dstk,
554 bool sstk, struct jit_ctx *ctx,
555 const u8 op) {
556 emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
557 if (is64)
558 emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
559 else
560 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
561}
562
563/* dst = imm (4 bytes)*/
564static inline void emit_a32_mov_r(const u8 dst, const u8 src,
565 bool dstk, bool sstk,
566 struct jit_ctx *ctx) {
567 const u8 *tmp = bpf2a32[TMP_REG_1];
568 u8 rt = sstk ? tmp[0] : src;
569
570 if (sstk)
571 emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
572 if (dstk)
573 emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
574 else
575 emit(ARM_MOV_R(dst, rt), ctx);
576}
577
578/* dst = src */
579static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
580 const u8 src[], bool dstk,
581 bool sstk, struct jit_ctx *ctx) {
582 emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
583 if (is64) {
584 /* complete 8 byte move */
585 emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
586 } else {
587 /* Zero out high 4 bytes */
588 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
589 }
590}
591
592/* Shift operations */
593static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
594 struct jit_ctx *ctx, const u8 op) {
595 const u8 *tmp = bpf2a32[TMP_REG_1];
596 u8 rd = dstk ? tmp[0] : dst;
597
598 if (dstk)
599 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
600
601 /* Do shift operation */
602 switch (op) {
603 case BPF_LSH:
604 emit(ARM_LSL_I(rd, rd, val), ctx);
605 break;
606 case BPF_RSH:
607 emit(ARM_LSR_I(rd, rd, val), ctx);
608 break;
609 case BPF_NEG:
610 emit(ARM_RSB_I(rd, rd, val), ctx);
611 break;
612 }
613
614 if (dstk)
615 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
616}
617
618/* dst = ~dst (64 bit) */
619static inline void emit_a32_neg64(const u8 dst[], bool dstk,
620 struct jit_ctx *ctx){
621 const u8 *tmp = bpf2a32[TMP_REG_1];
622 u8 rd = dstk ? tmp[1] : dst[1];
623 u8 rm = dstk ? tmp[0] : dst[0];
624
625 /* Setup Operand */
626 if (dstk) {
627 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
628 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
629 }
630
631 /* Do Negate Operation */
632 emit(ARM_RSBS_I(rd, rd, 0), ctx);
633 emit(ARM_RSC_I(rm, rm, 0), ctx);
634
635 if (dstk) {
636 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
637 emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
638 }
639}
640
641/* dst = dst << src */
642static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
643 bool sstk, struct jit_ctx *ctx) {
644 const u8 *tmp = bpf2a32[TMP_REG_1];
645 const u8 *tmp2 = bpf2a32[TMP_REG_2];
646
647 /* Setup Operands */
648 u8 rt = sstk ? tmp2[1] : src_lo;
649 u8 rd = dstk ? tmp[1] : dst_lo;
650 u8 rm = dstk ? tmp[0] : dst_hi;
651
652 if (sstk)
653 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
654 if (dstk) {
655 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
656 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
657 }
658
659 /* Do LSH operation */
660 emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
661 emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
662 emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
663 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
664 emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
665 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
666
667 if (dstk) {
668 emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
669 emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
670 } else {
671 emit(ARM_MOV_R(rd, ARM_LR), ctx);
672 emit(ARM_MOV_R(rm, ARM_IP), ctx);
673 }
674}
675
676/* dst = dst >> src (signed)*/
677static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
678 bool sstk, struct jit_ctx *ctx) {
679 const u8 *tmp = bpf2a32[TMP_REG_1];
680 const u8 *tmp2 = bpf2a32[TMP_REG_2];
681 /* Setup Operands */
682 u8 rt = sstk ? tmp2[1] : src_lo;
683 u8 rd = dstk ? tmp[1] : dst_lo;
684 u8 rm = dstk ? tmp[0] : dst_hi;
685
686 if (sstk)
687 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
688 if (dstk) {
689 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
690 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
691 }
692
693 /* Do the ARSH operation */
694 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
695 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
696 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
697 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
698 _emit(ARM_COND_MI, ARM_B(0), ctx);
699 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
700 emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
701 if (dstk) {
702 emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
703 emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
704 } else {
705 emit(ARM_MOV_R(rd, ARM_LR), ctx);
706 emit(ARM_MOV_R(rm, ARM_IP), ctx);
707 }
708}
709
710/* dst = dst >> src */
711static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
712 bool sstk, struct jit_ctx *ctx) {
713 const u8 *tmp = bpf2a32[TMP_REG_1];
714 const u8 *tmp2 = bpf2a32[TMP_REG_2];
715 /* Setup Operands */
716 u8 rt = sstk ? tmp2[1] : src_lo;
717 u8 rd = dstk ? tmp[1] : dst_lo;
718 u8 rm = dstk ? tmp[0] : dst_hi;
719
720 if (sstk)
721 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
722 if (dstk) {
723 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
724 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
725 }
726
727 /* Do LSH operation */
728 emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
729 emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
730 emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
731 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
732 emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
733 emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
734 if (dstk) {
735 emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
736 emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
737 } else {
738 emit(ARM_MOV_R(rd, ARM_LR), ctx);
739 emit(ARM_MOV_R(rm, ARM_IP), ctx);
740 }
741}
742
743/* dst = dst << val */
744static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
745 const u32 val, struct jit_ctx *ctx){
746 const u8 *tmp = bpf2a32[TMP_REG_1];
747 const u8 *tmp2 = bpf2a32[TMP_REG_2];
748 /* Setup operands */
749 u8 rd = dstk ? tmp[1] : dst_lo;
750 u8 rm = dstk ? tmp[0] : dst_hi;
751
752 if (dstk) {
753 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
754 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
755 }
756
757 /* Do LSH operation */
758 if (val < 32) {
759 emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
760 emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
761 emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
762 } else {
763 if (val == 32)
764 emit(ARM_MOV_R(rm, rd), ctx);
765 else
766 emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
767 emit(ARM_EOR_R(rd, rd, rd), ctx);
768 }
769
770 if (dstk) {
771 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
772 emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
773 }
774}
775
776/* dst = dst >> val */
777static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
778 const u32 val, struct jit_ctx *ctx) {
779 const u8 *tmp = bpf2a32[TMP_REG_1];
780 const u8 *tmp2 = bpf2a32[TMP_REG_2];
781 /* Setup operands */
782 u8 rd = dstk ? tmp[1] : dst_lo;
783 u8 rm = dstk ? tmp[0] : dst_hi;
784
785 if (dstk) {
786 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
787 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
788 }
789
790 /* Do LSR operation */
791 if (val < 32) {
792 emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
793 emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
794 emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
795 } else if (val == 32) {
796 emit(ARM_MOV_R(rd, rm), ctx);
797 emit(ARM_MOV_I(rm, 0), ctx);
798 } else {
799 emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
800 emit(ARM_MOV_I(rm, 0), ctx);
801 }
802
803 if (dstk) {
804 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
805 emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
806 }
807}
808
809/* dst = dst >> val (signed) */
810static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
811 const u32 val, struct jit_ctx *ctx){
812 const u8 *tmp = bpf2a32[TMP_REG_1];
813 const u8 *tmp2 = bpf2a32[TMP_REG_2];
814 /* Setup operands */
815 u8 rd = dstk ? tmp[1] : dst_lo;
816 u8 rm = dstk ? tmp[0] : dst_hi;
817
818 if (dstk) {
819 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
820 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
821 }
822
823 /* Do ARSH operation */
824 if (val < 32) {
825 emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
826 emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
827 emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
828 } else if (val == 32) {
829 emit(ARM_MOV_R(rd, rm), ctx);
830 emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
831 } else {
832 emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
833 emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
834 }
835
836 if (dstk) {
837 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
838 emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
839 }
840}
841
842static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
843 bool sstk, struct jit_ctx *ctx) {
844 const u8 *tmp = bpf2a32[TMP_REG_1];
845 const u8 *tmp2 = bpf2a32[TMP_REG_2];
846 /* Setup operands for multiplication */
847 u8 rd = dstk ? tmp[1] : dst_lo;
848 u8 rm = dstk ? tmp[0] : dst_hi;
849 u8 rt = sstk ? tmp2[1] : src_lo;
850 u8 rn = sstk ? tmp2[0] : src_hi;
851
852 if (dstk) {
853 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
854 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
855 }
856 if (sstk) {
857 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
858 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
859 }
860
861 /* Do Multiplication */
862 emit(ARM_MUL(ARM_IP, rd, rn), ctx);
863 emit(ARM_MUL(ARM_LR, rm, rt), ctx);
864 emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
865
866 emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
867 emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
868 if (dstk) {
869 emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
870 emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
871 } else {
872 emit(ARM_MOV_R(rd, ARM_IP), ctx);
873 }
874}
875
876/* *(size *)(dst + off) = src */
877static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
878 const s32 off, struct jit_ctx *ctx, const u8 sz){
879 const u8 *tmp = bpf2a32[TMP_REG_1];
880 u8 rd = dstk ? tmp[1] : dst;
881
882 if (dstk)
883 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
884 if (off) {
885 emit_a32_mov_i(tmp[0], off, false, ctx);
886 emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
887 rd = tmp[0];
888 }
889 switch (sz) {
890 case BPF_W:
891 /* Store a Word */
892 emit(ARM_STR_I(src, rd, 0), ctx);
893 break;
894 case BPF_H:
895 /* Store a HalfWord */
896 emit(ARM_STRH_I(src, rd, 0), ctx);
897 break;
898 case BPF_B:
899 /* Store a Byte */
900 emit(ARM_STRB_I(src, rd, 0), ctx);
901 break;
902 }
903}
904
905/* dst = *(size*)(src + off) */
906static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
907 s32 off, struct jit_ctx *ctx, const u8 sz){
908 const u8 *tmp = bpf2a32[TMP_REG_1];
909 const u8 *rd = dstk ? tmp : dst;
910 u8 rm = src;
911 s32 off_max;
912
913 if (sz == BPF_H)
914 off_max = 0xff;
915 else
916 off_max = 0xfff;
917
918 if (off < 0 || off > off_max) {
919 emit_a32_mov_i(tmp[0], off, false, ctx);
920 emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
921 rm = tmp[0];
922 off = 0;
923 } else if (rd[1] == rm) {
924 emit(ARM_MOV_R(tmp[0], rm), ctx);
925 rm = tmp[0];
926 }
927 switch (sz) {
928 case BPF_B:
929 /* Load a Byte */
930 emit(ARM_LDRB_I(rd[1], rm, off), ctx);
931 emit_a32_mov_i(dst[0], 0, dstk, ctx);
932 break;
933 case BPF_H:
934 /* Load a HalfWord */
935 emit(ARM_LDRH_I(rd[1], rm, off), ctx);
936 emit_a32_mov_i(dst[0], 0, dstk, ctx);
937 break;
938 case BPF_W:
939 /* Load a Word */
940 emit(ARM_LDR_I(rd[1], rm, off), ctx);
941 emit_a32_mov_i(dst[0], 0, dstk, ctx);
942 break;
943 case BPF_DW:
944 /* Load a Double Word */
945 emit(ARM_LDR_I(rd[1], rm, off), ctx);
946 emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
947 break;
948 }
949 if (dstk)
950 emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
951 if (dstk && sz == BPF_DW)
952 emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
953}
954
955/* Arithmatic Operation */
956static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
957 const u8 rn, struct jit_ctx *ctx, u8 op) {
958 switch (op) {
959 case BPF_JSET:
960 emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
961 emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
962 emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
963 break;
964 case BPF_JEQ:
965 case BPF_JNE:
966 case BPF_JGT:
967 case BPF_JGE:
968 case BPF_JLE:
969 case BPF_JLT:
970 emit(ARM_CMP_R(rd, rm), ctx);
971 _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
972 break;
973 case BPF_JSLE:
974 case BPF_JSGT:
975 emit(ARM_CMP_R(rn, rt), ctx);
976 emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
977 break;
978 case BPF_JSLT:
979 case BPF_JSGE:
980 emit(ARM_CMP_R(rt, rn), ctx);
981 emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
982 break;
983 }
984}
985
986static int out_offset = -1; /* initialized on the first pass of build_body() */
987static int emit_bpf_tail_call(struct jit_ctx *ctx)
988{
989
990 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
991 const u8 *r2 = bpf2a32[BPF_REG_2];
992 const u8 *r3 = bpf2a32[BPF_REG_3];
993 const u8 *tmp = bpf2a32[TMP_REG_1];
994 const u8 *tmp2 = bpf2a32[TMP_REG_2];
995 const u8 *tcc = bpf2a32[TCALL_CNT];
996 const int idx0 = ctx->idx;
997#define cur_offset (ctx->idx - idx0)
998#define jmp_offset (out_offset - (cur_offset) - 2)
999 u32 off, lo, hi;
1000
1001 /* if (index >= array->map.max_entries)
1002 * goto out;
1003 */
1004 off = offsetof(struct bpf_array, map.max_entries);
1005 /* array->map.max_entries */
1006 emit_a32_mov_i(tmp[1], off, false, ctx);
1007 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
1008 emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
1009 /* index is 32-bit for arrays */
1010 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
1011 /* index >= array->map.max_entries */
1012 emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
1013 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1014
1015 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1016 * goto out;
1017 * tail_call_cnt++;
1018 */
1019 lo = (u32)MAX_TAIL_CALL_CNT;
1020 hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1021 emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
1022 emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
1023 emit(ARM_CMP_I(tmp[0], hi), ctx);
1024 _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
1025 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1026 emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
1027 emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
1028 emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
1029 emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
1030
1031 /* prog = array->ptrs[index]
1032 * if (prog == NULL)
1033 * goto out;
1034 */
1035 off = offsetof(struct bpf_array, ptrs);
1036 emit_a32_mov_i(tmp[1], off, false, ctx);
1037 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
1038 emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
1039 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
1040 emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
1041 emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
1042 emit(ARM_CMP_I(tmp[1], 0), ctx);
1043 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1044
1045 /* goto *(prog->bpf_func + prologue_size); */
1046 off = offsetof(struct bpf_prog, bpf_func);
1047 emit_a32_mov_i(tmp2[1], off, false, ctx);
1048 emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
1049 emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1050 emit_bx_r(tmp[1], ctx);
1051
1052 /* out: */
1053 if (out_offset == -1)
1054 out_offset = cur_offset;
1055 if (cur_offset != out_offset) {
1056 pr_err_once("tail_call out_offset = %d, expected %d!\n",
1057 cur_offset, out_offset);
1058 return -1;
1059 }
1060 return 0;
1061#undef cur_offset
1062#undef jmp_offset
1063}
1064
1065/* 0xabcd => 0xcdab */
1066static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1067{
1068#if __LINUX_ARM_ARCH__ < 6
1069 const u8 *tmp2 = bpf2a32[TMP_REG_2];
1070
1071 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1072 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1073 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1074 emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1075#else /* ARMv6+ */
1076 emit(ARM_REV16(rd, rn), ctx);
1077#endif
1078}
1079
1080/* 0xabcdefgh => 0xghefcdab */
1081static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1082{
1083#if __LINUX_ARM_ARCH__ < 6
1084 const u8 *tmp2 = bpf2a32[TMP_REG_2];
1085
1086 emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1087 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1088 emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1089
1090 emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1091 emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1092 emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1093 emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1094 emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1095 emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1096 emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1097
1098#else /* ARMv6+ */
1099 emit(ARM_REV(rd, rn), ctx);
1100#endif
1101}
1102
1103// push the scratch stack register on top of the stack
1104static inline void emit_push_r64(const u8 src[], const u8 shift,
1105 struct jit_ctx *ctx)
1106{
1107 const u8 *tmp2 = bpf2a32[TMP_REG_2];
1108 u16 reg_set = 0;
1109
1110 emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
1111 emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
1112
1113 reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
1114 emit(ARM_PUSH(reg_set), ctx);
1115}
1116
1117static void build_prologue(struct jit_ctx *ctx)
1118{
1119 const u8 r0 = bpf2a32[BPF_REG_0][1];
1120 const u8 r2 = bpf2a32[BPF_REG_1][1];
1121 const u8 r3 = bpf2a32[BPF_REG_1][0];
1122 const u8 r4 = bpf2a32[BPF_REG_6][1];
1123 const u8 fplo = bpf2a32[BPF_REG_FP][1];
1124 const u8 fphi = bpf2a32[BPF_REG_FP][0];
1125 const u8 *tcc = bpf2a32[TCALL_CNT];
1126
1127 /* Save callee saved registers. */
1128#ifdef CONFIG_FRAME_POINTER
1129 u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1130 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1131 emit(ARM_PUSH(reg_set), ctx);
1132 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1133#else
1134 emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1135 emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1136#endif
1137 /* Save frame pointer for later */
1138 emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
1139
1140 ctx->stack_size = imm8m(STACK_SIZE);
1141
1142 /* Set up function call stack */
1143 emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1144
1145 /* Set up BPF prog stack base register */
1146 emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
1147 emit_a32_mov_i(fphi, 0, true, ctx);
1148
1149 /* mov r4, 0 */
1150 emit(ARM_MOV_I(r4, 0), ctx);
1151
1152 /* Move BPF_CTX to BPF_R1 */
1153 emit(ARM_MOV_R(r3, r4), ctx);
1154 emit(ARM_MOV_R(r2, r0), ctx);
1155 /* Initialize Tail Count */
1156 emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
1157 emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
1158 /* end of prologue */
1159}
1160
1161/* restore callee saved registers. */
1162static void build_epilogue(struct jit_ctx *ctx)
1163{
1164#ifdef CONFIG_FRAME_POINTER
1165 /* When using frame pointers, some additional registers need to
1166 * be loaded. */
1167 u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1168 emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1169 emit(ARM_LDM(ARM_SP, reg_set), ctx);
1170#else
1171 /* Restore callee saved registers. */
1172 emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1173 emit(ARM_POP(CALLEE_POP_MASK), ctx);
1174#endif
1175}
1176
1177/*
1178 * Convert an eBPF instruction to native instruction, i.e
1179 * JITs an eBPF instruction.
1180 * Returns :
1181 * 0 - Successfully JITed an 8-byte eBPF instruction
1182 * >0 - Successfully JITed a 16-byte eBPF instruction
1183 * <0 - Failed to JIT.
1184 */
1185static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1186{
1187 const u8 code = insn->code;
1188 const u8 *dst = bpf2a32[insn->dst_reg];
1189 const u8 *src = bpf2a32[insn->src_reg];
1190 const u8 *tmp = bpf2a32[TMP_REG_1];
1191 const u8 *tmp2 = bpf2a32[TMP_REG_2];
1192 const s16 off = insn->off;
1193 const s32 imm = insn->imm;
1194 const int i = insn - ctx->prog->insnsi;
1195 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1196 const bool dstk = is_on_stack(insn->dst_reg);
1197 const bool sstk = is_on_stack(insn->src_reg);
1198 u8 rd, rt, rm, rn;
1199 s32 jmp_offset;
1200
1201#define check_imm(bits, imm) do { \
1202 if ((((imm) > 0) && ((imm) >> (bits))) || \
1203 (((imm) < 0) && (~(imm) >> (bits)))) { \
1204 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
1205 i, imm, imm); \
1206 return -EINVAL; \
1207 } \
1208} while (0)
1209#define check_imm24(imm) check_imm(24, imm)
1210
1211 switch (code) {
1212 /* ALU operations */
1213
1214 /* dst = src */
1215 case BPF_ALU | BPF_MOV | BPF_K:
1216 case BPF_ALU | BPF_MOV | BPF_X:
1217 case BPF_ALU64 | BPF_MOV | BPF_K:
1218 case BPF_ALU64 | BPF_MOV | BPF_X:
1219 switch (BPF_SRC(code)) {
1220 case BPF_X:
1221 emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
1222 break;
1223 case BPF_K:
1224 /* Sign-extend immediate value to destination reg */
1225 emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
1226 break;
1227 }
1228 break;
1229 /* dst = dst + src/imm */
1230 /* dst = dst - src/imm */
1231 /* dst = dst | src/imm */
1232 /* dst = dst & src/imm */
1233 /* dst = dst ^ src/imm */
1234 /* dst = dst * src/imm */
1235 /* dst = dst << src */
1236 /* dst = dst >> src */
1237 case BPF_ALU | BPF_ADD | BPF_K:
1238 case BPF_ALU | BPF_ADD | BPF_X:
1239 case BPF_ALU | BPF_SUB | BPF_K:
1240 case BPF_ALU | BPF_SUB | BPF_X:
1241 case BPF_ALU | BPF_OR | BPF_K:
1242 case BPF_ALU | BPF_OR | BPF_X:
1243 case BPF_ALU | BPF_AND | BPF_K:
1244 case BPF_ALU | BPF_AND | BPF_X:
1245 case BPF_ALU | BPF_XOR | BPF_K:
1246 case BPF_ALU | BPF_XOR | BPF_X:
1247 case BPF_ALU | BPF_MUL | BPF_K:
1248 case BPF_ALU | BPF_MUL | BPF_X:
1249 case BPF_ALU | BPF_LSH | BPF_X:
1250 case BPF_ALU | BPF_RSH | BPF_X:
1251 case BPF_ALU | BPF_ARSH | BPF_K:
1252 case BPF_ALU | BPF_ARSH | BPF_X:
1253 case BPF_ALU64 | BPF_ADD | BPF_K:
1254 case BPF_ALU64 | BPF_ADD | BPF_X:
1255 case BPF_ALU64 | BPF_SUB | BPF_K:
1256 case BPF_ALU64 | BPF_SUB | BPF_X:
1257 case BPF_ALU64 | BPF_OR | BPF_K:
1258 case BPF_ALU64 | BPF_OR | BPF_X:
1259 case BPF_ALU64 | BPF_AND | BPF_K:
1260 case BPF_ALU64 | BPF_AND | BPF_X:
1261 case BPF_ALU64 | BPF_XOR | BPF_K:
1262 case BPF_ALU64 | BPF_XOR | BPF_X:
1263 switch (BPF_SRC(code)) {
1264 case BPF_X:
1265 emit_a32_alu_r64(is64, dst, src, dstk, sstk,
1266 ctx, BPF_OP(code));
1267 break;
1268 case BPF_K:
1269 /* Move immediate value to the temporary register
1270 * and then do the ALU operation on the temporary
1271 * register as this will sign-extend the immediate
1272 * value into temporary reg and then it would be
1273 * safe to do the operation on it.
1274 */
1275 emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
1276 emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
1277 ctx, BPF_OP(code));
1278 break;
1279 }
1280 break;
1281 /* dst = dst / src(imm) */
1282 /* dst = dst % src(imm) */
1283 case BPF_ALU | BPF_DIV | BPF_K:
1284 case BPF_ALU | BPF_DIV | BPF_X:
1285 case BPF_ALU | BPF_MOD | BPF_K:
1286 case BPF_ALU | BPF_MOD | BPF_X:
1287 rt = src_lo;
1288 rd = dstk ? tmp2[1] : dst_lo;
1289 if (dstk)
1290 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
1291 switch (BPF_SRC(code)) {
1292 case BPF_X:
1293 rt = sstk ? tmp2[0] : rt;
1294 if (sstk)
1295 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
1296 ctx);
1297 break;
1298 case BPF_K:
1299 rt = tmp2[0];
1300 emit_a32_mov_i(rt, imm, false, ctx);
1301 break;
1302 }
1303 emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
1304 if (dstk)
1305 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
1306 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1307 break;
1308 case BPF_ALU64 | BPF_DIV | BPF_K:
1309 case BPF_ALU64 | BPF_DIV | BPF_X:
1310 case BPF_ALU64 | BPF_MOD | BPF_K:
1311 case BPF_ALU64 | BPF_MOD | BPF_X:
1312 goto notyet;
1313 /* dst = dst >> imm */
1314 /* dst = dst << imm */
1315 case BPF_ALU | BPF_RSH | BPF_K:
1316 case BPF_ALU | BPF_LSH | BPF_K:
1317 if (unlikely(imm > 31))
1318 return -EINVAL;
1319 if (imm)
1320 emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
1321 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1322 break;
1323 /* dst = dst << imm */
1324 case BPF_ALU64 | BPF_LSH | BPF_K:
1325 if (unlikely(imm > 63))
1326 return -EINVAL;
1327 emit_a32_lsh_i64(dst, dstk, imm, ctx);
1328 break;
1329 /* dst = dst >> imm */
1330 case BPF_ALU64 | BPF_RSH | BPF_K:
1331 if (unlikely(imm > 63))
1332 return -EINVAL;
1333 emit_a32_lsr_i64(dst, dstk, imm, ctx);
1334 break;
1335 /* dst = dst << src */
1336 case BPF_ALU64 | BPF_LSH | BPF_X:
1337 emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
1338 break;
1339 /* dst = dst >> src */
1340 case BPF_ALU64 | BPF_RSH | BPF_X:
1341 emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
1342 break;
1343 /* dst = dst >> src (signed) */
1344 case BPF_ALU64 | BPF_ARSH | BPF_X:
1345 emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
1346 break;
1347 /* dst = dst >> imm (signed) */
1348 case BPF_ALU64 | BPF_ARSH | BPF_K:
1349 if (unlikely(imm > 63))
1350 return -EINVAL;
1351 emit_a32_arsh_i64(dst, dstk, imm, ctx);
1352 break;
1353 /* dst = ~dst */
1354 case BPF_ALU | BPF_NEG:
1355 emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
1356 emit_a32_mov_i(dst_hi, 0, dstk, ctx);
1357 break;
1358 /* dst = ~dst (64 bit) */
1359 case BPF_ALU64 | BPF_NEG:
1360 emit_a32_neg64(dst, dstk, ctx);
1361 break;
1362 /* dst = dst * src/imm */
1363 case BPF_ALU64 | BPF_MUL | BPF_X:
1364 case BPF_ALU64 | BPF_MUL | BPF_K:
1365 switch (BPF_SRC(code)) {
1366 case BPF_X:
1367 emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
1368 break;
1369 case BPF_K:
1370 /* Move immediate value to the temporary register
1371 * and then do the multiplication on it as this
1372 * will sign-extend the immediate value into temp
1373 * reg then it would be safe to do the operation
1374 * on it.
1375 */
1376 emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
1377 emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
1378 break;
1379 }
1380 break;
1381 /* dst = htole(dst) */
1382 /* dst = htobe(dst) */
1383 case BPF_ALU | BPF_END | BPF_FROM_LE:
1384 case BPF_ALU | BPF_END | BPF_FROM_BE:
1385 rd = dstk ? tmp[0] : dst_hi;
1386 rt = dstk ? tmp[1] : dst_lo;
1387 if (dstk) {
1388 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1389 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1390 }
1391 if (BPF_SRC(code) == BPF_FROM_LE)
1392 goto emit_bswap_uxt;
1393 switch (imm) {
1394 case 16:
1395 emit_rev16(rt, rt, ctx);
1396 goto emit_bswap_uxt;
1397 case 32:
1398 emit_rev32(rt, rt, ctx);
1399 goto emit_bswap_uxt;
1400 case 64:
1401 emit_rev32(ARM_LR, rt, ctx);
1402 emit_rev32(rt, rd, ctx);
1403 emit(ARM_MOV_R(rd, ARM_LR), ctx);
1404 break;
1405 }
1406 goto exit;
1407emit_bswap_uxt:
1408 switch (imm) {
1409 case 16:
1410 /* zero-extend 16 bits into 64 bits */
1411#if __LINUX_ARM_ARCH__ < 6
1412 emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
1413 emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
1414#else /* ARMv6+ */
1415 emit(ARM_UXTH(rt, rt), ctx);
1416#endif
1417 emit(ARM_EOR_R(rd, rd, rd), ctx);
1418 break;
1419 case 32:
1420 /* zero-extend 32 bits into 64 bits */
1421 emit(ARM_EOR_R(rd, rd, rd), ctx);
1422 break;
1423 case 64:
1424 /* nop */
1425 break;
1426 }
1427exit:
1428 if (dstk) {
1429 emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1430 emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1431 }
1432 break;
1433 /* dst = imm64 */
1434 case BPF_LD | BPF_IMM | BPF_DW:
1435 {
1436 const struct bpf_insn insn1 = insn[1];
1437 u32 hi, lo = imm;
1438
1439 hi = insn1.imm;
1440 emit_a32_mov_i(dst_lo, lo, dstk, ctx);
1441 emit_a32_mov_i(dst_hi, hi, dstk, ctx);
1442
1443 return 1;
1444 }
1445 /* LDX: dst = *(size *)(src + off) */
1446 case BPF_LDX | BPF_MEM | BPF_W:
1447 case BPF_LDX | BPF_MEM | BPF_H:
1448 case BPF_LDX | BPF_MEM | BPF_B:
1449 case BPF_LDX | BPF_MEM | BPF_DW:
1450 rn = sstk ? tmp2[1] : src_lo;
1451 if (sstk)
1452 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1453 emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
1454 break;
1455 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
1456 case BPF_LD | BPF_ABS | BPF_W:
1457 case BPF_LD | BPF_ABS | BPF_H:
1458 case BPF_LD | BPF_ABS | BPF_B:
1459 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
1460 case BPF_LD | BPF_IND | BPF_W:
1461 case BPF_LD | BPF_IND | BPF_H:
1462 case BPF_LD | BPF_IND | BPF_B:
1463 {
1464 const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */
1465 const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/
1466 /* rtn value */
1467 const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */
1468 const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */
1469 const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */
1470 const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */
1471 int size;
1472
1473 /* Setting up first argument */
1474 emit(ARM_MOV_R(r0, r4), ctx);
1475
1476 /* Setting up second argument */
1477 emit_a32_mov_i(r1, imm, false, ctx);
1478 if (BPF_MODE(code) == BPF_IND)
1479 emit_a32_alu_r(r1, src_lo, false, sstk, ctx,
1480 false, false, BPF_ADD);
1481
1482 /* Setting up third argument */
1483 switch (BPF_SIZE(code)) {
1484 case BPF_W:
1485 size = 4;
1486 break;
1487 case BPF_H:
1488 size = 2;
1489 break;
1490 case BPF_B:
1491 size = 1;
1492 break;
1493 default:
1494 return -EINVAL;
1495 }
1496 emit_a32_mov_i(r2, size, false, ctx);
1497
1498 /* Setting up fourth argument */
1499 emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx);
1500
1501 /* Setting up function pointer to call */
1502 emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx);
1503 emit_blx_r(r6, ctx);
1504
1505 emit(ARM_EOR_R(r1, r1, r1), ctx);
1506 /* Check if return address is NULL or not.
1507 * if NULL then jump to epilogue
1508 * else continue to load the value from retn address
1509 */
1510 emit(ARM_CMP_I(r0, 0), ctx);
1511 jmp_offset = epilogue_offset(ctx);
1512 check_imm24(jmp_offset);
1513 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1514
1515 /* Load value from the address */
1516 switch (BPF_SIZE(code)) {
1517 case BPF_W:
1518 emit(ARM_LDR_I(r0, r0, 0), ctx);
1519 emit_rev32(r0, r0, ctx);
1520 break;
1521 case BPF_H:
1522 emit(ARM_LDRH_I(r0, r0, 0), ctx);
1523 emit_rev16(r0, r0, ctx);
1524 break;
1525 case BPF_B:
1526 emit(ARM_LDRB_I(r0, r0, 0), ctx);
1527 /* No need to reverse */
1528 break;
1529 }
1530 break;
1531 }
1532 /* ST: *(size *)(dst + off) = imm */
1533 case BPF_ST | BPF_MEM | BPF_W:
1534 case BPF_ST | BPF_MEM | BPF_H:
1535 case BPF_ST | BPF_MEM | BPF_B:
1536 case BPF_ST | BPF_MEM | BPF_DW:
1537 switch (BPF_SIZE(code)) {
1538 case BPF_DW:
1539 /* Sign-extend immediate value into temp reg */
1540 emit_a32_mov_i64(true, tmp2, imm, false, ctx);
1541 emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
1542 emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
1543 break;
1544 case BPF_W:
1545 case BPF_H:
1546 case BPF_B:
1547 emit_a32_mov_i(tmp2[1], imm, false, ctx);
1548 emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
1549 BPF_SIZE(code));
1550 break;
1551 }
1552 break;
1553 /* STX XADD: lock *(u32 *)(dst + off) += src */
1554 case BPF_STX | BPF_XADD | BPF_W:
1555 /* STX XADD: lock *(u64 *)(dst + off) += src */
1556 case BPF_STX | BPF_XADD | BPF_DW:
1557 goto notyet;
1558 /* STX: *(size *)(dst + off) = src */
1559 case BPF_STX | BPF_MEM | BPF_W:
1560 case BPF_STX | BPF_MEM | BPF_H:
1561 case BPF_STX | BPF_MEM | BPF_B:
1562 case BPF_STX | BPF_MEM | BPF_DW:
1563 {
1564 u8 sz = BPF_SIZE(code);
1565
1566 rn = sstk ? tmp2[1] : src_lo;
1567 rm = sstk ? tmp2[0] : src_hi;
1568 if (sstk) {
1569 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1570 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
1571 }
1572
1573 /* Store the value */
1574 if (BPF_SIZE(code) == BPF_DW) {
1575 emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
1576 emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
1577 } else {
1578 emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
1579 }
1580 break;
1581 }
1582 /* PC += off if dst == src */
1583 /* PC += off if dst > src */
1584 /* PC += off if dst >= src */
1585 /* PC += off if dst < src */
1586 /* PC += off if dst <= src */
1587 /* PC += off if dst != src */
1588 /* PC += off if dst > src (signed) */
1589 /* PC += off if dst >= src (signed) */
1590 /* PC += off if dst < src (signed) */
1591 /* PC += off if dst <= src (signed) */
1592 /* PC += off if dst & src */
1593 case BPF_JMP | BPF_JEQ | BPF_X:
1594 case BPF_JMP | BPF_JGT | BPF_X:
1595 case BPF_JMP | BPF_JGE | BPF_X:
1596 case BPF_JMP | BPF_JNE | BPF_X:
1597 case BPF_JMP | BPF_JSGT | BPF_X:
1598 case BPF_JMP | BPF_JSGE | BPF_X:
1599 case BPF_JMP | BPF_JSET | BPF_X:
1600 case BPF_JMP | BPF_JLE | BPF_X:
1601 case BPF_JMP | BPF_JLT | BPF_X:
1602 case BPF_JMP | BPF_JSLT | BPF_X:
1603 case BPF_JMP | BPF_JSLE | BPF_X:
1604 /* Setup source registers */
1605 rm = sstk ? tmp2[0] : src_hi;
1606 rn = sstk ? tmp2[1] : src_lo;
1607 if (sstk) {
1608 emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
1609 emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
1610 }
1611 goto go_jmp;
1612 /* PC += off if dst == imm */
1613 /* PC += off if dst > imm */
1614 /* PC += off if dst >= imm */
1615 /* PC += off if dst < imm */
1616 /* PC += off if dst <= imm */
1617 /* PC += off if dst != imm */
1618 /* PC += off if dst > imm (signed) */
1619 /* PC += off if dst >= imm (signed) */
1620 /* PC += off if dst < imm (signed) */
1621 /* PC += off if dst <= imm (signed) */
1622 /* PC += off if dst & imm */
1623 case BPF_JMP | BPF_JEQ | BPF_K:
1624 case BPF_JMP | BPF_JGT | BPF_K:
1625 case BPF_JMP | BPF_JGE | BPF_K:
1626 case BPF_JMP | BPF_JNE | BPF_K:
1627 case BPF_JMP | BPF_JSGT | BPF_K:
1628 case BPF_JMP | BPF_JSGE | BPF_K:
1629 case BPF_JMP | BPF_JSET | BPF_K:
1630 case BPF_JMP | BPF_JLT | BPF_K:
1631 case BPF_JMP | BPF_JLE | BPF_K:
1632 case BPF_JMP | BPF_JSLT | BPF_K:
1633 case BPF_JMP | BPF_JSLE | BPF_K:
1634 if (off == 0)
1635 break;
1636 rm = tmp2[0];
1637 rn = tmp2[1];
1638 /* Sign-extend immediate value */
1639 emit_a32_mov_i64(true, tmp2, imm, false, ctx);
1640go_jmp:
1641 /* Setup destination register */
1642 rd = dstk ? tmp[0] : dst_hi;
1643 rt = dstk ? tmp[1] : dst_lo;
1644 if (dstk) {
1645 emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
1646 emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
1647 }
1648
1649 /* Check for the condition */
1650 emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
1651
1652 /* Setup JUMP instruction */
1653 jmp_offset = bpf2a32_offset(i+off, i, ctx);
1654 switch (BPF_OP(code)) {
1655 case BPF_JNE:
1656 case BPF_JSET:
1657 _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1658 break;
1659 case BPF_JEQ:
1660 _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1661 break;
1662 case BPF_JGT:
1663 _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1664 break;
1665 case BPF_JGE:
1666 _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1667 break;
1668 case BPF_JSGT:
1669 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1670 break;
1671 case BPF_JSGE:
1672 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1673 break;
1674 case BPF_JLE:
1675 _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1676 break;
1677 case BPF_JLT:
1678 _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1679 break;
1680 case BPF_JSLT:
1681 _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1682 break;
1683 case BPF_JSLE:
1684 _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1685 break;
1686 }
1687 break;
1688 /* JMP OFF */
1689 case BPF_JMP | BPF_JA:
1690 {
1691 if (off == 0)
1692 break;
1693 jmp_offset = bpf2a32_offset(i+off, i, ctx);
1694 check_imm24(jmp_offset);
1695 emit(ARM_B(jmp_offset), ctx);
1696 break;
1697 }
1698 /* tail call */
1699 case BPF_JMP | BPF_TAIL_CALL:
1700 if (emit_bpf_tail_call(ctx))
1701 return -EFAULT;
1702 break;
1703 /* function call */
1704 case BPF_JMP | BPF_CALL:
1705 {
1706 const u8 *r0 = bpf2a32[BPF_REG_0];
1707 const u8 *r1 = bpf2a32[BPF_REG_1];
1708 const u8 *r2 = bpf2a32[BPF_REG_2];
1709 const u8 *r3 = bpf2a32[BPF_REG_3];
1710 const u8 *r4 = bpf2a32[BPF_REG_4];
1711 const u8 *r5 = bpf2a32[BPF_REG_5];
1712 const u32 func = (u32)__bpf_call_base + (u32)imm;
1713
1714 emit_a32_mov_r64(true, r0, r1, false, false, ctx);
1715 emit_a32_mov_r64(true, r1, r2, false, true, ctx);
1716 emit_push_r64(r5, 0, ctx);
1717 emit_push_r64(r4, 8, ctx);
1718 emit_push_r64(r3, 16, ctx);
1719
1720 emit_a32_mov_i(tmp[1], func, false, ctx);
1721 emit_blx_r(tmp[1], ctx);
1722
1723 emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1724 break;
1725 }
1726 /* function return */
1727 case BPF_JMP | BPF_EXIT:
1728 /* Optimization: when last instruction is EXIT
1729 * simply fallthrough to epilogue.
1730 */
1731 if (i == ctx->prog->len - 1)
1732 break;
1733 jmp_offset = epilogue_offset(ctx);
1734 check_imm24(jmp_offset);
1735 emit(ARM_B(jmp_offset), ctx);
1736 break;
1737notyet:
1738 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1739 return -EFAULT;
1740 default:
1741 pr_err_once("unknown opcode %02x\n", code);
1742 return -EINVAL;
1743 }
1744
1745 if (ctx->flags & FLAG_IMM_OVERFLOW)
1746 /*
1747 * this instruction generated an overflow when
1748 * trying to access the literal pool, so
1749 * delegate this filter to the kernel interpreter.
1750 */
1751 return -1;
1752 return 0;
1753}
1754
1755static int build_body(struct jit_ctx *ctx)
1756{
1757 const struct bpf_prog *prog = ctx->prog;
1758 unsigned int i;
1759
1760 for (i = 0; i < prog->len; i++) {
1761 const struct bpf_insn *insn = &(prog->insnsi[i]);
1762 int ret;
1763
1764 ret = build_insn(insn, ctx);
1765
1766 /* It's used with loading the 64 bit immediate value. */
1767 if (ret > 0) {
1768 i++;
1769 if (ctx->target == NULL)
1770 ctx->offsets[i] = ctx->idx;
1771 continue;
1772 }
1773
1774 if (ctx->target == NULL)
1775 ctx->offsets[i] = ctx->idx;
1776
1777 /* If unsuccesfull, return with error code */
1778 if (ret)
1779 return ret;
1780 }
1781 return 0;
1782}
1783
1784static int validate_code(struct jit_ctx *ctx)
1785{
1786 int i;
1787
1788 for (i = 0; i < ctx->idx; i++) {
1789 if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1790 return -1;
1791 }
1792
1793 return 0;
1794}
1795
1796void bpf_jit_compile(struct bpf_prog *prog)
1797{
1798 /* Nothing to do here. We support Internal BPF. */
1799}
1800
1801struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1802{
1803 struct bpf_prog *tmp, *orig_prog = prog;
1804 struct bpf_binary_header *header;
1805 bool tmp_blinded = false;
1806 struct jit_ctx ctx;
1807 unsigned int tmp_idx;
1808 unsigned int image_size;
1809 u8 *image_ptr;
1810
1811 /* If BPF JIT was not enabled then we must fall back to
1812 * the interpreter.
1813 */
1814 if (!prog->jit_requested)
1815 return orig_prog;
1816
1817 /* If constant blinding was enabled and we failed during blinding
1818 * then we must fall back to the interpreter. Otherwise, we save
1819 * the new JITed code.
1820 */
1821 tmp = bpf_jit_blind_constants(prog);
1822
1823 if (IS_ERR(tmp))
1824 return orig_prog;
1825 if (tmp != prog) {
1826 tmp_blinded = true;
1827 prog = tmp;
1828 }
1829
1830 memset(&ctx, 0, sizeof(ctx));
1831 ctx.prog = prog;
1832
1833 /* Not able to allocate memory for offsets[] , then
1834 * we must fall back to the interpreter
1835 */
1836 ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1837 if (ctx.offsets == NULL) {
1838 prog = orig_prog;
1839 goto out;
1840 }
1841
1842 /* 1) fake pass to find in the length of the JITed code,
1843 * to compute ctx->offsets and other context variables
1844 * needed to compute final JITed code.
1845 * Also, calculate random starting pointer/start of JITed code
1846 * which is prefixed by random number of fault instructions.
1847 *
1848 * If the first pass fails then there is no chance of it
1849 * being successful in the second pass, so just fall back
1850 * to the interpreter.
1851 */
1852 if (build_body(&ctx)) {
1853 prog = orig_prog;
1854 goto out_off;
1855 }
1856
1857 tmp_idx = ctx.idx;
1858 build_prologue(&ctx);
1859 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1860
1861 ctx.epilogue_offset = ctx.idx;
1862
1863#if __LINUX_ARM_ARCH__ < 7
1864 tmp_idx = ctx.idx;
1865 build_epilogue(&ctx);
1866 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1867
1868 ctx.idx += ctx.imm_count;
1869 if (ctx.imm_count) {
1870 ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1871 if (ctx.imms == NULL) {
1872 prog = orig_prog;
1873 goto out_off;
1874 }
1875 }
1876#else
1877 /* there's nothing about the epilogue on ARMv7 */
1878 build_epilogue(&ctx);
1879#endif
1880 /* Now we can get the actual image size of the JITed arm code.
1881 * Currently, we are not considering the THUMB-2 instructions
1882 * for jit, although it can decrease the size of the image.
1883 *
1884 * As each arm instruction is of length 32bit, we are translating
1885 * number of JITed intructions into the size required to store these
1886 * JITed code.
1887 */
1888 image_size = sizeof(u32) * ctx.idx;
1889
1890 /* Now we know the size of the structure to make */
1891 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1892 sizeof(u32), jit_fill_hole);
1893 /* Not able to allocate memory for the structure then
1894 * we must fall back to the interpretation
1895 */
1896 if (header == NULL) {
1897 prog = orig_prog;
1898 goto out_imms;
1899 }
1900
1901 /* 2.) Actual pass to generate final JIT code */
1902 ctx.target = (u32 *) image_ptr;
1903 ctx.idx = 0;
1904
1905 build_prologue(&ctx);
1906
1907 /* If building the body of the JITed code fails somehow,
1908 * we fall back to the interpretation.
1909 */
1910 if (build_body(&ctx) < 0) {
1911 image_ptr = NULL;
1912 bpf_jit_binary_free(header);
1913 prog = orig_prog;
1914 goto out_imms;
1915 }
1916 build_epilogue(&ctx);
1917
1918 /* 3.) Extra pass to validate JITed Code */
1919 if (validate_code(&ctx)) {
1920 image_ptr = NULL;
1921 bpf_jit_binary_free(header);
1922 prog = orig_prog;
1923 goto out_imms;
1924 }
1925 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1926
1927 if (bpf_jit_enable > 1)
1928 /* there are 2 passes here */
1929 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1930
1931 set_memory_ro((unsigned long)header, header->pages);
1932 prog->bpf_func = (void *)ctx.target;
1933 prog->jited = 1;
1934 prog->jited_len = image_size;
1935
1936out_imms:
1937#if __LINUX_ARM_ARCH__ < 7
1938 if (ctx.imm_count)
1939 kfree(ctx.imms);
1940#endif
1941out_off:
1942 kfree(ctx.offsets);
1943out:
1944 if (tmp_blinded)
1945 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1946 tmp : orig_prog);
1947 return prog;
1948}
1949