Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Just-In-Time compiler for eBPF bytecode on MIPS.
4 * Implementation of JIT functions common to 32-bit and 64-bit CPUs.
5 *
6 * Copyright (c) 2021 Anyfi Networks AB.
7 * Author: Johan Almbladh <johan.almbladh@gmail.com>
8 *
9 * Based on code and ideas from
10 * Copyright (c) 2017 Cavium, Inc.
11 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
12 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
13 */
14
15/*
16 * Code overview
17 * =============
18 *
19 * - bpf_jit_comp.h
20 * Common definitions and utilities.
21 *
22 * - bpf_jit_comp.c
23 * Implementation of JIT top-level logic and exported JIT API functions.
24 * Implementation of internal operations shared by 32-bit and 64-bit code.
25 * JMP and ALU JIT control code, register control code, shared ALU and
26 * JMP/JMP32 JIT operations.
27 *
28 * - bpf_jit_comp32.c
29 * Implementation of functions to JIT prologue, epilogue and a single eBPF
30 * instruction for 32-bit MIPS CPUs. The functions use shared operations
31 * where possible, and implement the rest for 32-bit MIPS such as ALU64
32 * operations.
33 *
34 * - bpf_jit_comp64.c
35 * Ditto, for 64-bit MIPS CPUs.
36 *
37 * Zero and sign extension
38 * ========================
39 * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension,
40 * but the eBPF instruction set mandates zero extension. We let the verifier
41 * insert explicit zero-extensions after 32-bit ALU operations, both for
42 * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs
43 * are JITed with sign extensions inserted when so expected.
44 *
45 * ALU operations
46 * ==============
47 * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are
48 * JITed in the following steps. ALU64 operations on 32-bit MIPS are more
49 * complicated and therefore only processed by special implementations in
50 * step (3).
51 *
52 * 1) valid_alu_i:
53 * Determine if an immediate operation can be emitted as such, or if
54 * we must fall back to the register version.
55 *
56 * 2) rewrite_alu_i:
57 * Convert BPF operation and immediate value to a canonical form for
58 * JITing. In some degenerate cases this form may be a no-op.
59 *
60 * 3) emit_alu_{i,i64,r,64}:
61 * Emit instructions for an ALU or ALU64 immediate or register operation.
62 *
63 * JMP operations
64 * ==============
65 * JMP and JMP32 operations require an JIT instruction offset table for
66 * translating the jump offset. This table is computed by dry-running the
67 * JIT without actually emitting anything. However, the computed PC-relative
68 * offset may overflow the 18-bit offset field width of the native MIPS
69 * branch instruction. In such cases, the long jump is converted into the
70 * following sequence.
71 *
72 * <branch> !<cond> +2 Inverted PC-relative branch
73 * nop Delay slot
74 * j <offset> Unconditional absolute long jump
75 * nop Delay slot
76 *
77 * Since this converted sequence alters the offset table, all offsets must
78 * be re-calculated. This may in turn trigger new branch conversions, so
79 * the process is repeated until no further changes are made. Normally it
80 * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we
81 * fall back to converting every remaining jump operation. The branch
82 * conversion is independent of how the JMP or JMP32 condition is JITed.
83 *
84 * JMP32 and JMP operations are JITed as follows.
85 *
86 * 1) setup_jmp_{i,r}:
87 * Convert jump conditional and offset into a form that can be JITed.
88 * This form may be a no-op, a canonical form, or an inverted PC-relative
89 * jump if branch conversion is necessary.
90 *
91 * 2) valid_jmp_i:
92 * Determine if an immediate operations can be emitted as such, or if
93 * we must fall back to the register version. Applies to JMP32 for 32-bit
94 * MIPS, and both JMP and JMP32 for 64-bit MIPS.
95 *
96 * 3) emit_jmp_{i,i64,r,r64}:
97 * Emit instructions for an JMP or JMP32 immediate or register operation.
98 *
99 * 4) finish_jmp_{i,r}:
100 * Emit any instructions needed to finish the jump. This includes a nop
101 * for the delay slot if a branch was emitted, and a long absolute jump
102 * if the branch was converted.
103 */
104
105#include <linux/limits.h>
106#include <linux/bitops.h>
107#include <linux/errno.h>
108#include <linux/filter.h>
109#include <linux/bpf.h>
110#include <linux/slab.h>
111#include <asm/bitops.h>
112#include <asm/cacheflush.h>
113#include <asm/cpu-features.h>
114#include <asm/isa-rev.h>
115#include <asm/uasm.h>
116
117#include "bpf_jit_comp.h"
118
119/* Convenience macros for descriptor access */
120#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT)
121#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT)
122
123/*
124 * Push registers on the stack, starting at a given depth from the stack
125 * pointer and increasing. The next depth to be written is returned.
126 */
127int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
128{
129 int reg;
130
131 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
132 if (mask & BIT(reg)) {
133 if ((excl & BIT(reg)) == 0) {
134 if (sizeof(long) == 4)
135 emit(ctx, sw, reg, depth, MIPS_R_SP);
136 else /* sizeof(long) == 8 */
137 emit(ctx, sd, reg, depth, MIPS_R_SP);
138 }
139 depth += sizeof(long);
140 }
141
142 ctx->stack_used = max((int)ctx->stack_used, depth);
143 return depth;
144}
145
146/*
147 * Pop registers from the stack, starting at a given depth from the stack
148 * pointer and increasing. The next depth to be read is returned.
149 */
150int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
151{
152 int reg;
153
154 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
155 if (mask & BIT(reg)) {
156 if ((excl & BIT(reg)) == 0) {
157 if (sizeof(long) == 4)
158 emit(ctx, lw, reg, depth, MIPS_R_SP);
159 else /* sizeof(long) == 8 */
160 emit(ctx, ld, reg, depth, MIPS_R_SP);
161 }
162 depth += sizeof(long);
163 }
164
165 return depth;
166}
167
168/* Compute the 28-bit jump target address from a BPF program location */
169int get_target(struct jit_context *ctx, u32 loc)
170{
171 u32 index = INDEX(ctx->descriptors[loc]);
172 unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index];
173 unsigned long addr = (unsigned long)&ctx->target[index];
174
175 if (!ctx->target)
176 return 0;
177
178 if ((addr ^ pc) & ~MIPS_JMP_MASK)
179 return -1;
180
181 return addr & MIPS_JMP_MASK;
182}
183
184/* Compute the PC-relative offset to relative BPF program offset */
185int get_offset(const struct jit_context *ctx, int off)
186{
187 return (INDEX(ctx->descriptors[ctx->bpf_index + off]) -
188 ctx->jit_index - 1) * sizeof(u32);
189}
190
191/* dst = imm (register width) */
192void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm)
193{
194 if (imm >= -0x8000 && imm <= 0x7fff) {
195 emit(ctx, addiu, dst, MIPS_R_ZERO, imm);
196 } else {
197 emit(ctx, lui, dst, (s16)((u32)imm >> 16));
198 emit(ctx, ori, dst, dst, (u16)(imm & 0xffff));
199 }
200 clobber_reg(ctx, dst);
201}
202
203/* dst = src (register width) */
204void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src)
205{
206 emit(ctx, ori, dst, src, 0);
207 clobber_reg(ctx, dst);
208}
209
210/* Validate ALU immediate range */
211bool valid_alu_i(u8 op, s32 imm)
212{
213 switch (BPF_OP(op)) {
214 case BPF_NEG:
215 case BPF_LSH:
216 case BPF_RSH:
217 case BPF_ARSH:
218 /* All legal eBPF values are valid */
219 return true;
220 case BPF_ADD:
221 /* imm must be 16 bits */
222 return imm >= -0x8000 && imm <= 0x7fff;
223 case BPF_SUB:
224 /* -imm must be 16 bits */
225 return imm >= -0x7fff && imm <= 0x8000;
226 case BPF_AND:
227 case BPF_OR:
228 case BPF_XOR:
229 /* imm must be 16 bits unsigned */
230 return imm >= 0 && imm <= 0xffff;
231 case BPF_MUL:
232 /* imm must be zero or a positive power of two */
233 return imm == 0 || (imm > 0 && is_power_of_2(imm));
234 case BPF_DIV:
235 case BPF_MOD:
236 /* imm must be an 17-bit power of two */
237 return (u32)imm <= 0x10000 && is_power_of_2((u32)imm);
238 }
239 return false;
240}
241
242/* Rewrite ALU immediate operation */
243bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val)
244{
245 bool act = true;
246
247 switch (BPF_OP(op)) {
248 case BPF_LSH:
249 case BPF_RSH:
250 case BPF_ARSH:
251 case BPF_ADD:
252 case BPF_SUB:
253 case BPF_OR:
254 case BPF_XOR:
255 /* imm == 0 is a no-op */
256 act = imm != 0;
257 break;
258 case BPF_MUL:
259 if (imm == 1) {
260 /* dst * 1 is a no-op */
261 act = false;
262 } else if (imm == 0) {
263 /* dst * 0 is dst & 0 */
264 op = BPF_AND;
265 } else {
266 /* dst * (1 << n) is dst << n */
267 op = BPF_LSH;
268 imm = ilog2(abs(imm));
269 }
270 break;
271 case BPF_DIV:
272 if (imm == 1) {
273 /* dst / 1 is a no-op */
274 act = false;
275 } else {
276 /* dst / (1 << n) is dst >> n */
277 op = BPF_RSH;
278 imm = ilog2(imm);
279 }
280 break;
281 case BPF_MOD:
282 /* dst % (1 << n) is dst & ((1 << n) - 1) */
283 op = BPF_AND;
284 imm--;
285 break;
286 }
287
288 *alu = op;
289 *val = imm;
290 return act;
291}
292
293/* ALU immediate operation (32-bit) */
294void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
295{
296 switch (BPF_OP(op)) {
297 /* dst = -dst */
298 case BPF_NEG:
299 emit(ctx, subu, dst, MIPS_R_ZERO, dst);
300 break;
301 /* dst = dst & imm */
302 case BPF_AND:
303 emit(ctx, andi, dst, dst, (u16)imm);
304 break;
305 /* dst = dst | imm */
306 case BPF_OR:
307 emit(ctx, ori, dst, dst, (u16)imm);
308 break;
309 /* dst = dst ^ imm */
310 case BPF_XOR:
311 emit(ctx, xori, dst, dst, (u16)imm);
312 break;
313 /* dst = dst << imm */
314 case BPF_LSH:
315 emit(ctx, sll, dst, dst, imm);
316 break;
317 /* dst = dst >> imm */
318 case BPF_RSH:
319 emit(ctx, srl, dst, dst, imm);
320 break;
321 /* dst = dst >> imm (arithmetic) */
322 case BPF_ARSH:
323 emit(ctx, sra, dst, dst, imm);
324 break;
325 /* dst = dst + imm */
326 case BPF_ADD:
327 emit(ctx, addiu, dst, dst, imm);
328 break;
329 /* dst = dst - imm */
330 case BPF_SUB:
331 emit(ctx, addiu, dst, dst, -imm);
332 break;
333 }
334 clobber_reg(ctx, dst);
335}
336
337/* ALU register operation (32-bit) */
338void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op)
339{
340 switch (BPF_OP(op)) {
341 /* dst = dst & src */
342 case BPF_AND:
343 emit(ctx, and, dst, dst, src);
344 break;
345 /* dst = dst | src */
346 case BPF_OR:
347 emit(ctx, or, dst, dst, src);
348 break;
349 /* dst = dst ^ src */
350 case BPF_XOR:
351 emit(ctx, xor, dst, dst, src);
352 break;
353 /* dst = dst << src */
354 case BPF_LSH:
355 emit(ctx, sllv, dst, dst, src);
356 break;
357 /* dst = dst >> src */
358 case BPF_RSH:
359 emit(ctx, srlv, dst, dst, src);
360 break;
361 /* dst = dst >> src (arithmetic) */
362 case BPF_ARSH:
363 emit(ctx, srav, dst, dst, src);
364 break;
365 /* dst = dst + src */
366 case BPF_ADD:
367 emit(ctx, addu, dst, dst, src);
368 break;
369 /* dst = dst - src */
370 case BPF_SUB:
371 emit(ctx, subu, dst, dst, src);
372 break;
373 /* dst = dst * src */
374 case BPF_MUL:
375 if (cpu_has_mips32r1 || cpu_has_mips32r6) {
376 emit(ctx, mul, dst, dst, src);
377 } else {
378 emit(ctx, multu, dst, src);
379 emit(ctx, mflo, dst);
380 }
381 break;
382 /* dst = dst / src */
383 case BPF_DIV:
384 if (cpu_has_mips32r6) {
385 emit(ctx, divu_r6, dst, dst, src);
386 } else {
387 emit(ctx, divu, dst, src);
388 emit(ctx, mflo, dst);
389 }
390 break;
391 /* dst = dst % src */
392 case BPF_MOD:
393 if (cpu_has_mips32r6) {
394 emit(ctx, modu, dst, dst, src);
395 } else {
396 emit(ctx, divu, dst, src);
397 emit(ctx, mfhi, dst);
398 }
399 break;
400 }
401 clobber_reg(ctx, dst);
402}
403
404/* Atomic read-modify-write (32-bit) */
405void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code)
406{
407 LLSC_sync(ctx);
408 emit(ctx, ll, MIPS_R_T9, off, dst);
409 switch (code) {
410 case BPF_ADD:
411 case BPF_ADD | BPF_FETCH:
412 emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src);
413 break;
414 case BPF_AND:
415 case BPF_AND | BPF_FETCH:
416 emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src);
417 break;
418 case BPF_OR:
419 case BPF_OR | BPF_FETCH:
420 emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src);
421 break;
422 case BPF_XOR:
423 case BPF_XOR | BPF_FETCH:
424 emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src);
425 break;
426 case BPF_XCHG:
427 emit(ctx, move, MIPS_R_T8, src);
428 break;
429 }
430 emit(ctx, sc, MIPS_R_T8, off, dst);
431 emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset);
432 emit(ctx, nop); /* Delay slot */
433
434 if (code & BPF_FETCH) {
435 emit(ctx, move, src, MIPS_R_T9);
436 clobber_reg(ctx, src);
437 }
438}
439
440/* Atomic compare-and-exchange (32-bit) */
441void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off)
442{
443 LLSC_sync(ctx);
444 emit(ctx, ll, MIPS_R_T9, off, dst);
445 emit(ctx, bne, MIPS_R_T9, res, 12);
446 emit(ctx, move, MIPS_R_T8, src); /* Delay slot */
447 emit(ctx, sc, MIPS_R_T8, off, dst);
448 emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset);
449 emit(ctx, move, res, MIPS_R_T9); /* Delay slot */
450 clobber_reg(ctx, res);
451}
452
453/* Swap bytes and truncate a register word or half word */
454void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width)
455{
456 u8 tmp = MIPS_R_T8;
457 u8 msk = MIPS_R_T9;
458
459 switch (width) {
460 /* Swap bytes in a word */
461 case 32:
462 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
463 emit(ctx, wsbh, dst, dst);
464 emit(ctx, rotr, dst, dst, 16);
465 } else {
466 emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */
467 emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */
468 emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
469
470 emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */
471 emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */
472
473 emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */
474 emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */
475 emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */
476 emit(ctx, and, dst, dst, msk); /* dst = dst & msk */
477 emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */
478 }
479 break;
480 /* Swap bytes in a half word */
481 case 16:
482 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
483 emit(ctx, wsbh, dst, dst);
484 emit(ctx, andi, dst, dst, 0xffff);
485 } else {
486 emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */
487 emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */
488 emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */
489 emit(ctx, sll, dst, dst, 8); /* d = d << 8 */
490 emit(ctx, or, dst, dst, tmp); /* d = d | t */
491 }
492 break;
493 }
494 clobber_reg(ctx, dst);
495}
496
497/* Validate jump immediate range */
498bool valid_jmp_i(u8 op, s32 imm)
499{
500 switch (op) {
501 case JIT_JNOP:
502 /* Immediate value not used */
503 return true;
504 case BPF_JEQ:
505 case BPF_JNE:
506 /* No immediate operation */
507 return false;
508 case BPF_JSET:
509 case JIT_JNSET:
510 /* imm must be 16 bits unsigned */
511 return imm >= 0 && imm <= 0xffff;
512 case BPF_JGE:
513 case BPF_JLT:
514 case BPF_JSGE:
515 case BPF_JSLT:
516 /* imm must be 16 bits */
517 return imm >= -0x8000 && imm <= 0x7fff;
518 case BPF_JGT:
519 case BPF_JLE:
520 case BPF_JSGT:
521 case BPF_JSLE:
522 /* imm + 1 must be 16 bits */
523 return imm >= -0x8001 && imm <= 0x7ffe;
524 }
525 return false;
526}
527
528/* Invert a conditional jump operation */
529static u8 invert_jmp(u8 op)
530{
531 switch (op) {
532 case BPF_JA: return JIT_JNOP;
533 case BPF_JEQ: return BPF_JNE;
534 case BPF_JNE: return BPF_JEQ;
535 case BPF_JSET: return JIT_JNSET;
536 case BPF_JGT: return BPF_JLE;
537 case BPF_JGE: return BPF_JLT;
538 case BPF_JLT: return BPF_JGE;
539 case BPF_JLE: return BPF_JGT;
540 case BPF_JSGT: return BPF_JSLE;
541 case BPF_JSGE: return BPF_JSLT;
542 case BPF_JSLT: return BPF_JSGE;
543 case BPF_JSLE: return BPF_JSGT;
544 }
545 return 0;
546}
547
548/* Prepare a PC-relative jump operation */
549static void setup_jmp(struct jit_context *ctx, u8 bpf_op,
550 s16 bpf_off, u8 *jit_op, s32 *jit_off)
551{
552 u32 *descp = &ctx->descriptors[ctx->bpf_index];
553 int op = bpf_op;
554 int offset = 0;
555
556 /* Do not compute offsets on the first pass */
557 if (INDEX(*descp) == 0)
558 goto done;
559
560 /* Skip jumps never taken */
561 if (bpf_op == JIT_JNOP)
562 goto done;
563
564 /* Convert jumps always taken */
565 if (bpf_op == BPF_JA)
566 *descp |= JIT_DESC_CONVERT;
567
568 /*
569 * Current ctx->jit_index points to the start of the branch preamble.
570 * Since the preamble differs among different branch conditionals,
571 * the current index cannot be used to compute the branch offset.
572 * Instead, we use the offset table value for the next instruction,
573 * which gives the index immediately after the branch delay slot.
574 */
575 if (!CONVERTED(*descp)) {
576 int target = ctx->bpf_index + bpf_off + 1;
577 int origin = ctx->bpf_index + 1;
578
579 offset = (INDEX(ctx->descriptors[target]) -
580 INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32);
581 }
582
583 /*
584 * The PC-relative branch offset field on MIPS is 18 bits signed,
585 * so if the computed offset is larger than this we generate a an
586 * absolute jump that we skip with an inverted conditional branch.
587 */
588 if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) {
589 offset = 3 * sizeof(u32);
590 op = invert_jmp(bpf_op);
591 ctx->changes += !CONVERTED(*descp);
592 *descp |= JIT_DESC_CONVERT;
593 }
594
595done:
596 *jit_off = offset;
597 *jit_op = op;
598}
599
600/* Prepare a PC-relative jump operation with immediate conditional */
601void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
602 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
603{
604 bool always = false;
605 bool never = false;
606
607 switch (bpf_op) {
608 case BPF_JEQ:
609 case BPF_JNE:
610 break;
611 case BPF_JSET:
612 case BPF_JLT:
613 never = imm == 0;
614 break;
615 case BPF_JGE:
616 always = imm == 0;
617 break;
618 case BPF_JGT:
619 never = (u32)imm == U32_MAX;
620 break;
621 case BPF_JLE:
622 always = (u32)imm == U32_MAX;
623 break;
624 case BPF_JSGT:
625 never = imm == S32_MAX && width == 32;
626 break;
627 case BPF_JSGE:
628 always = imm == S32_MIN && width == 32;
629 break;
630 case BPF_JSLT:
631 never = imm == S32_MIN && width == 32;
632 break;
633 case BPF_JSLE:
634 always = imm == S32_MAX && width == 32;
635 break;
636 }
637
638 if (never)
639 bpf_op = JIT_JNOP;
640 if (always)
641 bpf_op = BPF_JA;
642 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
643}
644
645/* Prepare a PC-relative jump operation with register conditional */
646void setup_jmp_r(struct jit_context *ctx, bool same_reg,
647 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
648{
649 switch (bpf_op) {
650 case BPF_JSET:
651 break;
652 case BPF_JEQ:
653 case BPF_JGE:
654 case BPF_JLE:
655 case BPF_JSGE:
656 case BPF_JSLE:
657 if (same_reg)
658 bpf_op = BPF_JA;
659 break;
660 case BPF_JNE:
661 case BPF_JLT:
662 case BPF_JGT:
663 case BPF_JSGT:
664 case BPF_JSLT:
665 if (same_reg)
666 bpf_op = JIT_JNOP;
667 break;
668 }
669 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
670}
671
672/* Finish a PC-relative jump operation */
673int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off)
674{
675 /* Emit conditional branch delay slot */
676 if (jit_op != JIT_JNOP)
677 emit(ctx, nop);
678 /*
679 * Emit an absolute long jump with delay slot,
680 * if the PC-relative branch was converted.
681 */
682 if (CONVERTED(ctx->descriptors[ctx->bpf_index])) {
683 int target = get_target(ctx, ctx->bpf_index + bpf_off + 1);
684
685 if (target < 0)
686 return -1;
687 emit(ctx, j, target);
688 emit(ctx, nop);
689 }
690 return 0;
691}
692
693/* Jump immediate (32-bit) */
694void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op)
695{
696 switch (op) {
697 /* No-op, used internally for branch optimization */
698 case JIT_JNOP:
699 break;
700 /* PC += off if dst & imm */
701 case BPF_JSET:
702 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
703 emit(ctx, bnez, MIPS_R_T9, off);
704 break;
705 /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
706 case JIT_JNSET:
707 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
708 emit(ctx, beqz, MIPS_R_T9, off);
709 break;
710 /* PC += off if dst > imm */
711 case BPF_JGT:
712 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
713 emit(ctx, beqz, MIPS_R_T9, off);
714 break;
715 /* PC += off if dst >= imm */
716 case BPF_JGE:
717 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
718 emit(ctx, beqz, MIPS_R_T9, off);
719 break;
720 /* PC += off if dst < imm */
721 case BPF_JLT:
722 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
723 emit(ctx, bnez, MIPS_R_T9, off);
724 break;
725 /* PC += off if dst <= imm */
726 case BPF_JLE:
727 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
728 emit(ctx, bnez, MIPS_R_T9, off);
729 break;
730 /* PC += off if dst > imm (signed) */
731 case BPF_JSGT:
732 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
733 emit(ctx, beqz, MIPS_R_T9, off);
734 break;
735 /* PC += off if dst >= imm (signed) */
736 case BPF_JSGE:
737 emit(ctx, slti, MIPS_R_T9, dst, imm);
738 emit(ctx, beqz, MIPS_R_T9, off);
739 break;
740 /* PC += off if dst < imm (signed) */
741 case BPF_JSLT:
742 emit(ctx, slti, MIPS_R_T9, dst, imm);
743 emit(ctx, bnez, MIPS_R_T9, off);
744 break;
745 /* PC += off if dst <= imm (signed) */
746 case BPF_JSLE:
747 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
748 emit(ctx, bnez, MIPS_R_T9, off);
749 break;
750 }
751}
752
753/* Jump register (32-bit) */
754void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op)
755{
756 switch (op) {
757 /* No-op, used internally for branch optimization */
758 case JIT_JNOP:
759 break;
760 /* PC += off if dst == src */
761 case BPF_JEQ:
762 emit(ctx, beq, dst, src, off);
763 break;
764 /* PC += off if dst != src */
765 case BPF_JNE:
766 emit(ctx, bne, dst, src, off);
767 break;
768 /* PC += off if dst & src */
769 case BPF_JSET:
770 emit(ctx, and, MIPS_R_T9, dst, src);
771 emit(ctx, bnez, MIPS_R_T9, off);
772 break;
773 /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
774 case JIT_JNSET:
775 emit(ctx, and, MIPS_R_T9, dst, src);
776 emit(ctx, beqz, MIPS_R_T9, off);
777 break;
778 /* PC += off if dst > src */
779 case BPF_JGT:
780 emit(ctx, sltu, MIPS_R_T9, src, dst);
781 emit(ctx, bnez, MIPS_R_T9, off);
782 break;
783 /* PC += off if dst >= src */
784 case BPF_JGE:
785 emit(ctx, sltu, MIPS_R_T9, dst, src);
786 emit(ctx, beqz, MIPS_R_T9, off);
787 break;
788 /* PC += off if dst < src */
789 case BPF_JLT:
790 emit(ctx, sltu, MIPS_R_T9, dst, src);
791 emit(ctx, bnez, MIPS_R_T9, off);
792 break;
793 /* PC += off if dst <= src */
794 case BPF_JLE:
795 emit(ctx, sltu, MIPS_R_T9, src, dst);
796 emit(ctx, beqz, MIPS_R_T9, off);
797 break;
798 /* PC += off if dst > src (signed) */
799 case BPF_JSGT:
800 emit(ctx, slt, MIPS_R_T9, src, dst);
801 emit(ctx, bnez, MIPS_R_T9, off);
802 break;
803 /* PC += off if dst >= src (signed) */
804 case BPF_JSGE:
805 emit(ctx, slt, MIPS_R_T9, dst, src);
806 emit(ctx, beqz, MIPS_R_T9, off);
807 break;
808 /* PC += off if dst < src (signed) */
809 case BPF_JSLT:
810 emit(ctx, slt, MIPS_R_T9, dst, src);
811 emit(ctx, bnez, MIPS_R_T9, off);
812 break;
813 /* PC += off if dst <= src (signed) */
814 case BPF_JSLE:
815 emit(ctx, slt, MIPS_R_T9, src, dst);
816 emit(ctx, beqz, MIPS_R_T9, off);
817 break;
818 }
819}
820
821/* Jump always */
822int emit_ja(struct jit_context *ctx, s16 off)
823{
824 int target = get_target(ctx, ctx->bpf_index + off + 1);
825
826 if (target < 0)
827 return -1;
828 emit(ctx, j, target);
829 emit(ctx, nop);
830 return 0;
831}
832
833/* Jump to epilogue */
834int emit_exit(struct jit_context *ctx)
835{
836 int target = get_target(ctx, ctx->program->len);
837
838 if (target < 0)
839 return -1;
840 emit(ctx, j, target);
841 emit(ctx, nop);
842 return 0;
843}
844
845/* Build the program body from eBPF bytecode */
846static int build_body(struct jit_context *ctx)
847{
848 const struct bpf_prog *prog = ctx->program;
849 unsigned int i;
850
851 ctx->stack_used = 0;
852 for (i = 0; i < prog->len; i++) {
853 const struct bpf_insn *insn = &prog->insnsi[i];
854 u32 *descp = &ctx->descriptors[i];
855 int ret;
856
857 access_reg(ctx, insn->src_reg);
858 access_reg(ctx, insn->dst_reg);
859
860 ctx->bpf_index = i;
861 if (ctx->target == NULL) {
862 ctx->changes += INDEX(*descp) != ctx->jit_index;
863 *descp &= JIT_DESC_CONVERT;
864 *descp |= ctx->jit_index;
865 }
866
867 ret = build_insn(insn, ctx);
868 if (ret < 0)
869 return ret;
870
871 if (ret > 0) {
872 i++;
873 if (ctx->target == NULL)
874 descp[1] = ctx->jit_index;
875 }
876 }
877
878 /* Store the end offset, where the epilogue begins */
879 ctx->descriptors[prog->len] = ctx->jit_index;
880 return 0;
881}
882
883/* Set the branch conversion flag on all instructions */
884static void set_convert_flag(struct jit_context *ctx, bool enable)
885{
886 const struct bpf_prog *prog = ctx->program;
887 u32 flag = enable ? JIT_DESC_CONVERT : 0;
888 unsigned int i;
889
890 for (i = 0; i <= prog->len; i++)
891 ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag;
892}
893
894static void jit_fill_hole(void *area, unsigned int size)
895{
896 u32 *p;
897
898 /* We are guaranteed to have aligned memory. */
899 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
900 uasm_i_break(&p, BRK_BUG); /* Increments p */
901}
902
903bool bpf_jit_needs_zext(void)
904{
905 return true;
906}
907
908struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
909{
910 struct bpf_prog *tmp, *orig_prog = prog;
911 struct bpf_binary_header *header = NULL;
912 struct jit_context ctx;
913 bool tmp_blinded = false;
914 unsigned int tmp_idx;
915 unsigned int image_size;
916 u8 *image_ptr;
917 int tries;
918
919 /*
920 * If BPF JIT was not enabled then we must fall back to
921 * the interpreter.
922 */
923 if (!prog->jit_requested)
924 return orig_prog;
925 /*
926 * If constant blinding was enabled and we failed during blinding
927 * then we must fall back to the interpreter. Otherwise, we save
928 * the new JITed code.
929 */
930 tmp = bpf_jit_blind_constants(prog);
931 if (IS_ERR(tmp))
932 return orig_prog;
933 if (tmp != prog) {
934 tmp_blinded = true;
935 prog = tmp;
936 }
937
938 memset(&ctx, 0, sizeof(ctx));
939 ctx.program = prog;
940
941 /*
942 * Not able to allocate memory for descriptors[], then
943 * we must fall back to the interpreter
944 */
945 ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors),
946 GFP_KERNEL);
947 if (ctx.descriptors == NULL)
948 goto out_err;
949
950 /* First pass discovers used resources */
951 if (build_body(&ctx) < 0)
952 goto out_err;
953 /*
954 * Second pass computes instruction offsets.
955 * If any PC-relative branches are out of range, a sequence of
956 * a PC-relative branch + a jump is generated, and we have to
957 * try again from the beginning to generate the new offsets.
958 * This is done until no additional conversions are necessary.
959 * The last two iterations are done with all branches being
960 * converted, to guarantee offset table convergence within a
961 * fixed number of iterations.
962 */
963 ctx.jit_index = 0;
964 build_prologue(&ctx);
965 tmp_idx = ctx.jit_index;
966
967 tries = JIT_MAX_ITERATIONS;
968 do {
969 ctx.jit_index = tmp_idx;
970 ctx.changes = 0;
971 if (tries == 2)
972 set_convert_flag(&ctx, true);
973 if (build_body(&ctx) < 0)
974 goto out_err;
975 } while (ctx.changes > 0 && --tries > 0);
976
977 if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge"))
978 goto out_err;
979
980 build_epilogue(&ctx, MIPS_R_RA);
981
982 /* Now we know the size of the structure to make */
983 image_size = sizeof(u32) * ctx.jit_index;
984 header = bpf_jit_binary_alloc(image_size, &image_ptr,
985 sizeof(u32), jit_fill_hole);
986 /*
987 * Not able to allocate memory for the structure then
988 * we must fall back to the interpretation
989 */
990 if (header == NULL)
991 goto out_err;
992
993 /* Actual pass to generate final JIT code */
994 ctx.target = (u32 *)image_ptr;
995 ctx.jit_index = 0;
996
997 /*
998 * If building the JITed code fails somehow,
999 * we fall back to the interpretation.
1000 */
1001 build_prologue(&ctx);
1002 if (build_body(&ctx) < 0)
1003 goto out_err;
1004 build_epilogue(&ctx, MIPS_R_RA);
1005
1006 /* Populate line info meta data */
1007 set_convert_flag(&ctx, false);
1008 bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
1009
1010 /* Set as read-only exec and flush instruction cache */
1011 bpf_jit_binary_lock_ro(header);
1012 flush_icache_range((unsigned long)header,
1013 (unsigned long)&ctx.target[ctx.jit_index]);
1014
1015 if (bpf_jit_enable > 1)
1016 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1017
1018 prog->bpf_func = (void *)ctx.target;
1019 prog->jited = 1;
1020 prog->jited_len = image_size;
1021
1022out:
1023 if (tmp_blinded)
1024 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1025 tmp : orig_prog);
1026 kfree(ctx.descriptors);
1027 return prog;
1028
1029out_err:
1030 prog = orig_prog;
1031 if (header)
1032 bpf_jit_binary_free(header);
1033 goto out;
1034}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Just-In-Time compiler for eBPF bytecode on MIPS.
4 * Implementation of JIT functions common to 32-bit and 64-bit CPUs.
5 *
6 * Copyright (c) 2021 Anyfi Networks AB.
7 * Author: Johan Almbladh <johan.almbladh@gmail.com>
8 *
9 * Based on code and ideas from
10 * Copyright (c) 2017 Cavium, Inc.
11 * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
12 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
13 */
14
15/*
16 * Code overview
17 * =============
18 *
19 * - bpf_jit_comp.h
20 * Common definitions and utilities.
21 *
22 * - bpf_jit_comp.c
23 * Implementation of JIT top-level logic and exported JIT API functions.
24 * Implementation of internal operations shared by 32-bit and 64-bit code.
25 * JMP and ALU JIT control code, register control code, shared ALU and
26 * JMP/JMP32 JIT operations.
27 *
28 * - bpf_jit_comp32.c
29 * Implementation of functions to JIT prologue, epilogue and a single eBPF
30 * instruction for 32-bit MIPS CPUs. The functions use shared operations
31 * where possible, and implement the rest for 32-bit MIPS such as ALU64
32 * operations.
33 *
34 * - bpf_jit_comp64.c
35 * Ditto, for 64-bit MIPS CPUs.
36 *
37 * Zero and sign extension
38 * ========================
39 * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension,
40 * but the eBPF instruction set mandates zero extension. We let the verifier
41 * insert explicit zero-extensions after 32-bit ALU operations, both for
42 * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs
43 * are JITed with sign extensions inserted when so expected.
44 *
45 * ALU operations
46 * ==============
47 * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are
48 * JITed in the following steps. ALU64 operations on 32-bit MIPS are more
49 * complicated and therefore only processed by special implementations in
50 * step (3).
51 *
52 * 1) valid_alu_i:
53 * Determine if an immediate operation can be emitted as such, or if
54 * we must fall back to the register version.
55 *
56 * 2) rewrite_alu_i:
57 * Convert BPF operation and immediate value to a canonical form for
58 * JITing. In some degenerate cases this form may be a no-op.
59 *
60 * 3) emit_alu_{i,i64,r,64}:
61 * Emit instructions for an ALU or ALU64 immediate or register operation.
62 *
63 * JMP operations
64 * ==============
65 * JMP and JMP32 operations require an JIT instruction offset table for
66 * translating the jump offset. This table is computed by dry-running the
67 * JIT without actually emitting anything. However, the computed PC-relative
68 * offset may overflow the 18-bit offset field width of the native MIPS
69 * branch instruction. In such cases, the long jump is converted into the
70 * following sequence.
71 *
72 * <branch> !<cond> +2 Inverted PC-relative branch
73 * nop Delay slot
74 * j <offset> Unconditional absolute long jump
75 * nop Delay slot
76 *
77 * Since this converted sequence alters the offset table, all offsets must
78 * be re-calculated. This may in turn trigger new branch conversions, so
79 * the process is repeated until no further changes are made. Normally it
80 * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we
81 * fall back to converting every remaining jump operation. The branch
82 * conversion is independent of how the JMP or JMP32 condition is JITed.
83 *
84 * JMP32 and JMP operations are JITed as follows.
85 *
86 * 1) setup_jmp_{i,r}:
87 * Convert jump conditional and offset into a form that can be JITed.
88 * This form may be a no-op, a canonical form, or an inverted PC-relative
89 * jump if branch conversion is necessary.
90 *
91 * 2) valid_jmp_i:
92 * Determine if an immediate operations can be emitted as such, or if
93 * we must fall back to the register version. Applies to JMP32 for 32-bit
94 * MIPS, and both JMP and JMP32 for 64-bit MIPS.
95 *
96 * 3) emit_jmp_{i,i64,r,r64}:
97 * Emit instructions for an JMP or JMP32 immediate or register operation.
98 *
99 * 4) finish_jmp_{i,r}:
100 * Emit any instructions needed to finish the jump. This includes a nop
101 * for the delay slot if a branch was emitted, and a long absolute jump
102 * if the branch was converted.
103 */
104
105#include <linux/limits.h>
106#include <linux/bitops.h>
107#include <linux/errno.h>
108#include <linux/filter.h>
109#include <linux/bpf.h>
110#include <linux/slab.h>
111#include <asm/bitops.h>
112#include <asm/cacheflush.h>
113#include <asm/cpu-features.h>
114#include <asm/isa-rev.h>
115#include <asm/uasm.h>
116
117#include "bpf_jit_comp.h"
118
119/* Convenience macros for descriptor access */
120#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT)
121#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT)
122
123/*
124 * Push registers on the stack, starting at a given depth from the stack
125 * pointer and increasing. The next depth to be written is returned.
126 */
127int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
128{
129 int reg;
130
131 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
132 if (mask & BIT(reg)) {
133 if ((excl & BIT(reg)) == 0) {
134 if (sizeof(long) == 4)
135 emit(ctx, sw, reg, depth, MIPS_R_SP);
136 else /* sizeof(long) == 8 */
137 emit(ctx, sd, reg, depth, MIPS_R_SP);
138 }
139 depth += sizeof(long);
140 }
141
142 ctx->stack_used = max((int)ctx->stack_used, depth);
143 return depth;
144}
145
146/*
147 * Pop registers from the stack, starting at a given depth from the stack
148 * pointer and increasing. The next depth to be read is returned.
149 */
150int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
151{
152 int reg;
153
154 for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
155 if (mask & BIT(reg)) {
156 if ((excl & BIT(reg)) == 0) {
157 if (sizeof(long) == 4)
158 emit(ctx, lw, reg, depth, MIPS_R_SP);
159 else /* sizeof(long) == 8 */
160 emit(ctx, ld, reg, depth, MIPS_R_SP);
161 }
162 depth += sizeof(long);
163 }
164
165 return depth;
166}
167
168/* Compute the 28-bit jump target address from a BPF program location */
169int get_target(struct jit_context *ctx, u32 loc)
170{
171 u32 index = INDEX(ctx->descriptors[loc]);
172 unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index];
173 unsigned long addr = (unsigned long)&ctx->target[index];
174
175 if (!ctx->target)
176 return 0;
177
178 if ((addr ^ pc) & ~MIPS_JMP_MASK)
179 return -1;
180
181 return addr & MIPS_JMP_MASK;
182}
183
184/* Compute the PC-relative offset to relative BPF program offset */
185int get_offset(const struct jit_context *ctx, int off)
186{
187 return (INDEX(ctx->descriptors[ctx->bpf_index + off]) -
188 ctx->jit_index - 1) * sizeof(u32);
189}
190
191/* dst = imm (register width) */
192void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm)
193{
194 if (imm >= -0x8000 && imm <= 0x7fff) {
195 emit(ctx, addiu, dst, MIPS_R_ZERO, imm);
196 } else {
197 emit(ctx, lui, dst, (s16)((u32)imm >> 16));
198 emit(ctx, ori, dst, dst, (u16)(imm & 0xffff));
199 }
200 clobber_reg(ctx, dst);
201}
202
203/* dst = src (register width) */
204void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src)
205{
206 emit(ctx, ori, dst, src, 0);
207 clobber_reg(ctx, dst);
208}
209
210/* Validate ALU immediate range */
211bool valid_alu_i(u8 op, s32 imm)
212{
213 switch (BPF_OP(op)) {
214 case BPF_NEG:
215 case BPF_LSH:
216 case BPF_RSH:
217 case BPF_ARSH:
218 /* All legal eBPF values are valid */
219 return true;
220 case BPF_ADD:
221 if (IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS))
222 return false;
223 /* imm must be 16 bits */
224 return imm >= -0x8000 && imm <= 0x7fff;
225 case BPF_SUB:
226 if (IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS))
227 return false;
228 /* -imm must be 16 bits */
229 return imm >= -0x7fff && imm <= 0x8000;
230 case BPF_AND:
231 case BPF_OR:
232 case BPF_XOR:
233 /* imm must be 16 bits unsigned */
234 return imm >= 0 && imm <= 0xffff;
235 case BPF_MUL:
236 /* imm must be zero or a positive power of two */
237 return imm == 0 || (imm > 0 && is_power_of_2(imm));
238 case BPF_DIV:
239 case BPF_MOD:
240 /* imm must be an 17-bit power of two */
241 return (u32)imm <= 0x10000 && is_power_of_2((u32)imm);
242 }
243 return false;
244}
245
246/* Rewrite ALU immediate operation */
247bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val)
248{
249 bool act = true;
250
251 switch (BPF_OP(op)) {
252 case BPF_LSH:
253 case BPF_RSH:
254 case BPF_ARSH:
255 case BPF_ADD:
256 case BPF_SUB:
257 case BPF_OR:
258 case BPF_XOR:
259 /* imm == 0 is a no-op */
260 act = imm != 0;
261 break;
262 case BPF_MUL:
263 if (imm == 1) {
264 /* dst * 1 is a no-op */
265 act = false;
266 } else if (imm == 0) {
267 /* dst * 0 is dst & 0 */
268 op = BPF_AND;
269 } else {
270 /* dst * (1 << n) is dst << n */
271 op = BPF_LSH;
272 imm = ilog2(abs(imm));
273 }
274 break;
275 case BPF_DIV:
276 if (imm == 1) {
277 /* dst / 1 is a no-op */
278 act = false;
279 } else {
280 /* dst / (1 << n) is dst >> n */
281 op = BPF_RSH;
282 imm = ilog2(imm);
283 }
284 break;
285 case BPF_MOD:
286 /* dst % (1 << n) is dst & ((1 << n) - 1) */
287 op = BPF_AND;
288 imm--;
289 break;
290 }
291
292 *alu = op;
293 *val = imm;
294 return act;
295}
296
297/* ALU immediate operation (32-bit) */
298void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
299{
300 switch (BPF_OP(op)) {
301 /* dst = -dst */
302 case BPF_NEG:
303 emit(ctx, subu, dst, MIPS_R_ZERO, dst);
304 break;
305 /* dst = dst & imm */
306 case BPF_AND:
307 emit(ctx, andi, dst, dst, (u16)imm);
308 break;
309 /* dst = dst | imm */
310 case BPF_OR:
311 emit(ctx, ori, dst, dst, (u16)imm);
312 break;
313 /* dst = dst ^ imm */
314 case BPF_XOR:
315 emit(ctx, xori, dst, dst, (u16)imm);
316 break;
317 /* dst = dst << imm */
318 case BPF_LSH:
319 emit(ctx, sll, dst, dst, imm);
320 break;
321 /* dst = dst >> imm */
322 case BPF_RSH:
323 emit(ctx, srl, dst, dst, imm);
324 break;
325 /* dst = dst >> imm (arithmetic) */
326 case BPF_ARSH:
327 emit(ctx, sra, dst, dst, imm);
328 break;
329 /* dst = dst + imm */
330 case BPF_ADD:
331 emit(ctx, addiu, dst, dst, imm);
332 break;
333 /* dst = dst - imm */
334 case BPF_SUB:
335 emit(ctx, addiu, dst, dst, -imm);
336 break;
337 }
338 clobber_reg(ctx, dst);
339}
340
341/* ALU register operation (32-bit) */
342void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op)
343{
344 switch (BPF_OP(op)) {
345 /* dst = dst & src */
346 case BPF_AND:
347 emit(ctx, and, dst, dst, src);
348 break;
349 /* dst = dst | src */
350 case BPF_OR:
351 emit(ctx, or, dst, dst, src);
352 break;
353 /* dst = dst ^ src */
354 case BPF_XOR:
355 emit(ctx, xor, dst, dst, src);
356 break;
357 /* dst = dst << src */
358 case BPF_LSH:
359 emit(ctx, sllv, dst, dst, src);
360 break;
361 /* dst = dst >> src */
362 case BPF_RSH:
363 emit(ctx, srlv, dst, dst, src);
364 break;
365 /* dst = dst >> src (arithmetic) */
366 case BPF_ARSH:
367 emit(ctx, srav, dst, dst, src);
368 break;
369 /* dst = dst + src */
370 case BPF_ADD:
371 emit(ctx, addu, dst, dst, src);
372 break;
373 /* dst = dst - src */
374 case BPF_SUB:
375 emit(ctx, subu, dst, dst, src);
376 break;
377 /* dst = dst * src */
378 case BPF_MUL:
379 if (cpu_has_mips32r1 || cpu_has_mips32r6) {
380 emit(ctx, mul, dst, dst, src);
381 } else {
382 emit(ctx, multu, dst, src);
383 emit(ctx, mflo, dst);
384 }
385 break;
386 /* dst = dst / src */
387 case BPF_DIV:
388 if (cpu_has_mips32r6) {
389 emit(ctx, divu_r6, dst, dst, src);
390 } else {
391 emit(ctx, divu, dst, src);
392 emit(ctx, mflo, dst);
393 }
394 break;
395 /* dst = dst % src */
396 case BPF_MOD:
397 if (cpu_has_mips32r6) {
398 emit(ctx, modu, dst, dst, src);
399 } else {
400 emit(ctx, divu, dst, src);
401 emit(ctx, mfhi, dst);
402 }
403 break;
404 }
405 clobber_reg(ctx, dst);
406}
407
408/* Atomic read-modify-write (32-bit) */
409void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code)
410{
411 LLSC_sync(ctx);
412 emit(ctx, ll, MIPS_R_T9, off, dst);
413 switch (code) {
414 case BPF_ADD:
415 case BPF_ADD | BPF_FETCH:
416 emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src);
417 break;
418 case BPF_AND:
419 case BPF_AND | BPF_FETCH:
420 emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src);
421 break;
422 case BPF_OR:
423 case BPF_OR | BPF_FETCH:
424 emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src);
425 break;
426 case BPF_XOR:
427 case BPF_XOR | BPF_FETCH:
428 emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src);
429 break;
430 case BPF_XCHG:
431 emit(ctx, move, MIPS_R_T8, src);
432 break;
433 }
434 emit(ctx, sc, MIPS_R_T8, off, dst);
435 emit(ctx, LLSC_beqz, MIPS_R_T8, -16 - LLSC_offset);
436 emit(ctx, nop); /* Delay slot */
437
438 if (code & BPF_FETCH) {
439 emit(ctx, move, src, MIPS_R_T9);
440 clobber_reg(ctx, src);
441 }
442}
443
444/* Atomic compare-and-exchange (32-bit) */
445void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off)
446{
447 LLSC_sync(ctx);
448 emit(ctx, ll, MIPS_R_T9, off, dst);
449 emit(ctx, bne, MIPS_R_T9, res, 12);
450 emit(ctx, move, MIPS_R_T8, src); /* Delay slot */
451 emit(ctx, sc, MIPS_R_T8, off, dst);
452 emit(ctx, LLSC_beqz, MIPS_R_T8, -20 - LLSC_offset);
453 emit(ctx, move, res, MIPS_R_T9); /* Delay slot */
454 clobber_reg(ctx, res);
455}
456
457/* Swap bytes and truncate a register word or half word */
458void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width)
459{
460 u8 tmp = MIPS_R_T8;
461 u8 msk = MIPS_R_T9;
462
463 switch (width) {
464 /* Swap bytes in a word */
465 case 32:
466 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
467 emit(ctx, wsbh, dst, dst);
468 emit(ctx, rotr, dst, dst, 16);
469 } else {
470 emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */
471 emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */
472 emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
473
474 emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */
475 emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */
476
477 emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */
478 emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */
479 emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */
480 emit(ctx, and, dst, dst, msk); /* dst = dst & msk */
481 emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */
482 }
483 break;
484 /* Swap bytes in a half word */
485 case 16:
486 if (cpu_has_mips32r2 || cpu_has_mips32r6) {
487 emit(ctx, wsbh, dst, dst);
488 emit(ctx, andi, dst, dst, 0xffff);
489 } else {
490 emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */
491 emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */
492 emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */
493 emit(ctx, sll, dst, dst, 8); /* d = d << 8 */
494 emit(ctx, or, dst, dst, tmp); /* d = d | t */
495 }
496 break;
497 }
498 clobber_reg(ctx, dst);
499}
500
501/* Validate jump immediate range */
502bool valid_jmp_i(u8 op, s32 imm)
503{
504 switch (op) {
505 case JIT_JNOP:
506 /* Immediate value not used */
507 return true;
508 case BPF_JEQ:
509 case BPF_JNE:
510 /* No immediate operation */
511 return false;
512 case BPF_JSET:
513 case JIT_JNSET:
514 /* imm must be 16 bits unsigned */
515 return imm >= 0 && imm <= 0xffff;
516 case BPF_JGE:
517 case BPF_JLT:
518 case BPF_JSGE:
519 case BPF_JSLT:
520 /* imm must be 16 bits */
521 return imm >= -0x8000 && imm <= 0x7fff;
522 case BPF_JGT:
523 case BPF_JLE:
524 case BPF_JSGT:
525 case BPF_JSLE:
526 /* imm + 1 must be 16 bits */
527 return imm >= -0x8001 && imm <= 0x7ffe;
528 }
529 return false;
530}
531
532/* Invert a conditional jump operation */
533static u8 invert_jmp(u8 op)
534{
535 switch (op) {
536 case BPF_JA: return JIT_JNOP;
537 case BPF_JEQ: return BPF_JNE;
538 case BPF_JNE: return BPF_JEQ;
539 case BPF_JSET: return JIT_JNSET;
540 case BPF_JGT: return BPF_JLE;
541 case BPF_JGE: return BPF_JLT;
542 case BPF_JLT: return BPF_JGE;
543 case BPF_JLE: return BPF_JGT;
544 case BPF_JSGT: return BPF_JSLE;
545 case BPF_JSGE: return BPF_JSLT;
546 case BPF_JSLT: return BPF_JSGE;
547 case BPF_JSLE: return BPF_JSGT;
548 }
549 return 0;
550}
551
552/* Prepare a PC-relative jump operation */
553static void setup_jmp(struct jit_context *ctx, u8 bpf_op,
554 s16 bpf_off, u8 *jit_op, s32 *jit_off)
555{
556 u32 *descp = &ctx->descriptors[ctx->bpf_index];
557 int op = bpf_op;
558 int offset = 0;
559
560 /* Do not compute offsets on the first pass */
561 if (INDEX(*descp) == 0)
562 goto done;
563
564 /* Skip jumps never taken */
565 if (bpf_op == JIT_JNOP)
566 goto done;
567
568 /* Convert jumps always taken */
569 if (bpf_op == BPF_JA)
570 *descp |= JIT_DESC_CONVERT;
571
572 /*
573 * Current ctx->jit_index points to the start of the branch preamble.
574 * Since the preamble differs among different branch conditionals,
575 * the current index cannot be used to compute the branch offset.
576 * Instead, we use the offset table value for the next instruction,
577 * which gives the index immediately after the branch delay slot.
578 */
579 if (!CONVERTED(*descp)) {
580 int target = ctx->bpf_index + bpf_off + 1;
581 int origin = ctx->bpf_index + 1;
582
583 offset = (INDEX(ctx->descriptors[target]) -
584 INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32);
585 }
586
587 /*
588 * The PC-relative branch offset field on MIPS is 18 bits signed,
589 * so if the computed offset is larger than this we generate a an
590 * absolute jump that we skip with an inverted conditional branch.
591 */
592 if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) {
593 offset = 3 * sizeof(u32);
594 op = invert_jmp(bpf_op);
595 ctx->changes += !CONVERTED(*descp);
596 *descp |= JIT_DESC_CONVERT;
597 }
598
599done:
600 *jit_off = offset;
601 *jit_op = op;
602}
603
604/* Prepare a PC-relative jump operation with immediate conditional */
605void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
606 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
607{
608 bool always = false;
609 bool never = false;
610
611 switch (bpf_op) {
612 case BPF_JEQ:
613 case BPF_JNE:
614 break;
615 case BPF_JSET:
616 case BPF_JLT:
617 never = imm == 0;
618 break;
619 case BPF_JGE:
620 always = imm == 0;
621 break;
622 case BPF_JGT:
623 never = (u32)imm == U32_MAX;
624 break;
625 case BPF_JLE:
626 always = (u32)imm == U32_MAX;
627 break;
628 case BPF_JSGT:
629 never = imm == S32_MAX && width == 32;
630 break;
631 case BPF_JSGE:
632 always = imm == S32_MIN && width == 32;
633 break;
634 case BPF_JSLT:
635 never = imm == S32_MIN && width == 32;
636 break;
637 case BPF_JSLE:
638 always = imm == S32_MAX && width == 32;
639 break;
640 }
641
642 if (never)
643 bpf_op = JIT_JNOP;
644 if (always)
645 bpf_op = BPF_JA;
646 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
647}
648
649/* Prepare a PC-relative jump operation with register conditional */
650void setup_jmp_r(struct jit_context *ctx, bool same_reg,
651 u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
652{
653 switch (bpf_op) {
654 case BPF_JSET:
655 break;
656 case BPF_JEQ:
657 case BPF_JGE:
658 case BPF_JLE:
659 case BPF_JSGE:
660 case BPF_JSLE:
661 if (same_reg)
662 bpf_op = BPF_JA;
663 break;
664 case BPF_JNE:
665 case BPF_JLT:
666 case BPF_JGT:
667 case BPF_JSGT:
668 case BPF_JSLT:
669 if (same_reg)
670 bpf_op = JIT_JNOP;
671 break;
672 }
673 setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
674}
675
676/* Finish a PC-relative jump operation */
677int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off)
678{
679 /* Emit conditional branch delay slot */
680 if (jit_op != JIT_JNOP)
681 emit(ctx, nop);
682 /*
683 * Emit an absolute long jump with delay slot,
684 * if the PC-relative branch was converted.
685 */
686 if (CONVERTED(ctx->descriptors[ctx->bpf_index])) {
687 int target = get_target(ctx, ctx->bpf_index + bpf_off + 1);
688
689 if (target < 0)
690 return -1;
691 emit(ctx, j, target);
692 emit(ctx, nop);
693 }
694 return 0;
695}
696
697/* Jump immediate (32-bit) */
698void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op)
699{
700 switch (op) {
701 /* No-op, used internally for branch optimization */
702 case JIT_JNOP:
703 break;
704 /* PC += off if dst & imm */
705 case BPF_JSET:
706 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
707 emit(ctx, bnez, MIPS_R_T9, off);
708 break;
709 /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
710 case JIT_JNSET:
711 emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
712 emit(ctx, beqz, MIPS_R_T9, off);
713 break;
714 /* PC += off if dst > imm */
715 case BPF_JGT:
716 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
717 emit(ctx, beqz, MIPS_R_T9, off);
718 break;
719 /* PC += off if dst >= imm */
720 case BPF_JGE:
721 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
722 emit(ctx, beqz, MIPS_R_T9, off);
723 break;
724 /* PC += off if dst < imm */
725 case BPF_JLT:
726 emit(ctx, sltiu, MIPS_R_T9, dst, imm);
727 emit(ctx, bnez, MIPS_R_T9, off);
728 break;
729 /* PC += off if dst <= imm */
730 case BPF_JLE:
731 emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
732 emit(ctx, bnez, MIPS_R_T9, off);
733 break;
734 /* PC += off if dst > imm (signed) */
735 case BPF_JSGT:
736 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
737 emit(ctx, beqz, MIPS_R_T9, off);
738 break;
739 /* PC += off if dst >= imm (signed) */
740 case BPF_JSGE:
741 emit(ctx, slti, MIPS_R_T9, dst, imm);
742 emit(ctx, beqz, MIPS_R_T9, off);
743 break;
744 /* PC += off if dst < imm (signed) */
745 case BPF_JSLT:
746 emit(ctx, slti, MIPS_R_T9, dst, imm);
747 emit(ctx, bnez, MIPS_R_T9, off);
748 break;
749 /* PC += off if dst <= imm (signed) */
750 case BPF_JSLE:
751 emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
752 emit(ctx, bnez, MIPS_R_T9, off);
753 break;
754 }
755}
756
757/* Jump register (32-bit) */
758void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op)
759{
760 switch (op) {
761 /* No-op, used internally for branch optimization */
762 case JIT_JNOP:
763 break;
764 /* PC += off if dst == src */
765 case BPF_JEQ:
766 emit(ctx, beq, dst, src, off);
767 break;
768 /* PC += off if dst != src */
769 case BPF_JNE:
770 emit(ctx, bne, dst, src, off);
771 break;
772 /* PC += off if dst & src */
773 case BPF_JSET:
774 emit(ctx, and, MIPS_R_T9, dst, src);
775 emit(ctx, bnez, MIPS_R_T9, off);
776 break;
777 /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
778 case JIT_JNSET:
779 emit(ctx, and, MIPS_R_T9, dst, src);
780 emit(ctx, beqz, MIPS_R_T9, off);
781 break;
782 /* PC += off if dst > src */
783 case BPF_JGT:
784 emit(ctx, sltu, MIPS_R_T9, src, dst);
785 emit(ctx, bnez, MIPS_R_T9, off);
786 break;
787 /* PC += off if dst >= src */
788 case BPF_JGE:
789 emit(ctx, sltu, MIPS_R_T9, dst, src);
790 emit(ctx, beqz, MIPS_R_T9, off);
791 break;
792 /* PC += off if dst < src */
793 case BPF_JLT:
794 emit(ctx, sltu, MIPS_R_T9, dst, src);
795 emit(ctx, bnez, MIPS_R_T9, off);
796 break;
797 /* PC += off if dst <= src */
798 case BPF_JLE:
799 emit(ctx, sltu, MIPS_R_T9, src, dst);
800 emit(ctx, beqz, MIPS_R_T9, off);
801 break;
802 /* PC += off if dst > src (signed) */
803 case BPF_JSGT:
804 emit(ctx, slt, MIPS_R_T9, src, dst);
805 emit(ctx, bnez, MIPS_R_T9, off);
806 break;
807 /* PC += off if dst >= src (signed) */
808 case BPF_JSGE:
809 emit(ctx, slt, MIPS_R_T9, dst, src);
810 emit(ctx, beqz, MIPS_R_T9, off);
811 break;
812 /* PC += off if dst < src (signed) */
813 case BPF_JSLT:
814 emit(ctx, slt, MIPS_R_T9, dst, src);
815 emit(ctx, bnez, MIPS_R_T9, off);
816 break;
817 /* PC += off if dst <= src (signed) */
818 case BPF_JSLE:
819 emit(ctx, slt, MIPS_R_T9, src, dst);
820 emit(ctx, beqz, MIPS_R_T9, off);
821 break;
822 }
823}
824
825/* Jump always */
826int emit_ja(struct jit_context *ctx, s16 off)
827{
828 int target = get_target(ctx, ctx->bpf_index + off + 1);
829
830 if (target < 0)
831 return -1;
832 emit(ctx, j, target);
833 emit(ctx, nop);
834 return 0;
835}
836
837/* Jump to epilogue */
838int emit_exit(struct jit_context *ctx)
839{
840 int target = get_target(ctx, ctx->program->len);
841
842 if (target < 0)
843 return -1;
844 emit(ctx, j, target);
845 emit(ctx, nop);
846 return 0;
847}
848
849/* Build the program body from eBPF bytecode */
850static int build_body(struct jit_context *ctx)
851{
852 const struct bpf_prog *prog = ctx->program;
853 unsigned int i;
854
855 ctx->stack_used = 0;
856 for (i = 0; i < prog->len; i++) {
857 const struct bpf_insn *insn = &prog->insnsi[i];
858 u32 *descp = &ctx->descriptors[i];
859 int ret;
860
861 access_reg(ctx, insn->src_reg);
862 access_reg(ctx, insn->dst_reg);
863
864 ctx->bpf_index = i;
865 if (ctx->target == NULL) {
866 ctx->changes += INDEX(*descp) != ctx->jit_index;
867 *descp &= JIT_DESC_CONVERT;
868 *descp |= ctx->jit_index;
869 }
870
871 ret = build_insn(insn, ctx);
872 if (ret < 0)
873 return ret;
874
875 if (ret > 0) {
876 i++;
877 if (ctx->target == NULL)
878 descp[1] = ctx->jit_index;
879 }
880 }
881
882 /* Store the end offset, where the epilogue begins */
883 ctx->descriptors[prog->len] = ctx->jit_index;
884 return 0;
885}
886
887/* Set the branch conversion flag on all instructions */
888static void set_convert_flag(struct jit_context *ctx, bool enable)
889{
890 const struct bpf_prog *prog = ctx->program;
891 u32 flag = enable ? JIT_DESC_CONVERT : 0;
892 unsigned int i;
893
894 for (i = 0; i <= prog->len; i++)
895 ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag;
896}
897
898static void jit_fill_hole(void *area, unsigned int size)
899{
900 u32 *p;
901
902 /* We are guaranteed to have aligned memory. */
903 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
904 uasm_i_break(&p, BRK_BUG); /* Increments p */
905}
906
907bool bpf_jit_needs_zext(void)
908{
909 return true;
910}
911
912struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
913{
914 struct bpf_prog *tmp, *orig_prog = prog;
915 struct bpf_binary_header *header = NULL;
916 struct jit_context ctx;
917 bool tmp_blinded = false;
918 unsigned int tmp_idx;
919 unsigned int image_size;
920 u8 *image_ptr;
921 int tries;
922
923 /*
924 * If BPF JIT was not enabled then we must fall back to
925 * the interpreter.
926 */
927 if (!prog->jit_requested)
928 return orig_prog;
929 /*
930 * If constant blinding was enabled and we failed during blinding
931 * then we must fall back to the interpreter. Otherwise, we save
932 * the new JITed code.
933 */
934 tmp = bpf_jit_blind_constants(prog);
935 if (IS_ERR(tmp))
936 return orig_prog;
937 if (tmp != prog) {
938 tmp_blinded = true;
939 prog = tmp;
940 }
941
942 memset(&ctx, 0, sizeof(ctx));
943 ctx.program = prog;
944
945 /*
946 * Not able to allocate memory for descriptors[], then
947 * we must fall back to the interpreter
948 */
949 ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors),
950 GFP_KERNEL);
951 if (ctx.descriptors == NULL)
952 goto out_err;
953
954 /* First pass discovers used resources */
955 if (build_body(&ctx) < 0)
956 goto out_err;
957 /*
958 * Second pass computes instruction offsets.
959 * If any PC-relative branches are out of range, a sequence of
960 * a PC-relative branch + a jump is generated, and we have to
961 * try again from the beginning to generate the new offsets.
962 * This is done until no additional conversions are necessary.
963 * The last two iterations are done with all branches being
964 * converted, to guarantee offset table convergence within a
965 * fixed number of iterations.
966 */
967 ctx.jit_index = 0;
968 build_prologue(&ctx);
969 tmp_idx = ctx.jit_index;
970
971 tries = JIT_MAX_ITERATIONS;
972 do {
973 ctx.jit_index = tmp_idx;
974 ctx.changes = 0;
975 if (tries == 2)
976 set_convert_flag(&ctx, true);
977 if (build_body(&ctx) < 0)
978 goto out_err;
979 } while (ctx.changes > 0 && --tries > 0);
980
981 if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge"))
982 goto out_err;
983
984 build_epilogue(&ctx, MIPS_R_RA);
985
986 /* Now we know the size of the structure to make */
987 image_size = sizeof(u32) * ctx.jit_index;
988 header = bpf_jit_binary_alloc(image_size, &image_ptr,
989 sizeof(u32), jit_fill_hole);
990 /*
991 * Not able to allocate memory for the structure then
992 * we must fall back to the interpretation
993 */
994 if (header == NULL)
995 goto out_err;
996
997 /* Actual pass to generate final JIT code */
998 ctx.target = (u32 *)image_ptr;
999 ctx.jit_index = 0;
1000
1001 /*
1002 * If building the JITed code fails somehow,
1003 * we fall back to the interpretation.
1004 */
1005 build_prologue(&ctx);
1006 if (build_body(&ctx) < 0)
1007 goto out_err;
1008 build_epilogue(&ctx, MIPS_R_RA);
1009
1010 /* Populate line info meta data */
1011 set_convert_flag(&ctx, false);
1012 bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
1013
1014 /* Set as read-only exec and flush instruction cache */
1015 bpf_jit_binary_lock_ro(header);
1016 flush_icache_range((unsigned long)header,
1017 (unsigned long)&ctx.target[ctx.jit_index]);
1018
1019 if (bpf_jit_enable > 1)
1020 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1021
1022 prog->bpf_func = (void *)ctx.target;
1023 prog->jited = 1;
1024 prog->jited_len = image_size;
1025
1026out:
1027 if (tmp_blinded)
1028 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1029 tmp : orig_prog);
1030 kfree(ctx.descriptors);
1031 return prog;
1032
1033out_err:
1034 prog = orig_prog;
1035 if (header)
1036 bpf_jit_binary_free(header);
1037 goto out;
1038}