Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/if_vlan.h>
11#include <linux/bpf.h>
12#include <linux/memory.h>
13#include <linux/sort.h>
14#include <asm/extable.h>
15#include <asm/ftrace.h>
16#include <asm/set_memory.h>
17#include <asm/nospec-branch.h>
18#include <asm/text-patching.h>
19#include <asm/unwind.h>
20#include <asm/cfi.h>
21
22static bool all_callee_regs_used[4] = {true, true, true, true};
23
24static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
25{
26 if (len == 1)
27 *ptr = bytes;
28 else if (len == 2)
29 *(u16 *)ptr = bytes;
30 else {
31 *(u32 *)ptr = bytes;
32 barrier();
33 }
34 return ptr + len;
35}
36
37#define EMIT(bytes, len) \
38 do { prog = emit_code(prog, bytes, len); } while (0)
39
40#define EMIT1(b1) EMIT(b1, 1)
41#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
42#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
44
45#define EMIT1_off32(b1, off) \
46 do { EMIT1(b1); EMIT(off, 4); } while (0)
47#define EMIT2_off32(b1, b2, off) \
48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49#define EMIT3_off32(b1, b2, b3, off) \
50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51#define EMIT4_off32(b1, b2, b3, b4, off) \
52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
53
54#ifdef CONFIG_X86_KERNEL_IBT
55#define EMIT_ENDBR() EMIT(gen_endbr(), 4)
56#define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
57#else
58#define EMIT_ENDBR()
59#define EMIT_ENDBR_POISON()
60#endif
61
62static bool is_imm8(int value)
63{
64 return value <= 127 && value >= -128;
65}
66
67/*
68 * Let us limit the positive offset to be <= 123.
69 * This is to ensure eventual jit convergence For the following patterns:
70 * ...
71 * pass4, final_proglen=4391:
72 * ...
73 * 20e: 48 85 ff test rdi,rdi
74 * 211: 74 7d je 0x290
75 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
76 * ...
77 * 289: 48 85 ff test rdi,rdi
78 * 28c: 74 17 je 0x2a5
79 * 28e: e9 7f ff ff ff jmp 0x212
80 * 293: bf 03 00 00 00 mov edi,0x3
81 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
82 * and insn at 0x28e is 5-byte jmp insn with offset -129.
83 *
84 * pass5, final_proglen=4392:
85 * ...
86 * 20e: 48 85 ff test rdi,rdi
87 * 211: 0f 84 80 00 00 00 je 0x297
88 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
89 * ...
90 * 28d: 48 85 ff test rdi,rdi
91 * 290: 74 1a je 0x2ac
92 * 292: eb 84 jmp 0x218
93 * 294: bf 03 00 00 00 mov edi,0x3
94 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
95 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
96 * At the same time, insn at 0x292 is a 2-byte insn since its offset is
97 * -124.
98 *
99 * pass6 will repeat the same code as in pass4 and this will prevent
100 * eventual convergence.
101 *
102 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
103 * cycle in the above. In the above example je offset <= 0x7c should work.
104 *
105 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
106 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
107 * avoid no convergence issue.
108 *
109 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
110 * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
111 */
112static bool is_imm8_jmp_offset(int value)
113{
114 return value <= 123 && value >= -128;
115}
116
117static bool is_simm32(s64 value)
118{
119 return value == (s64)(s32)value;
120}
121
122static bool is_uimm32(u64 value)
123{
124 return value == (u64)(u32)value;
125}
126
127/* mov dst, src */
128#define EMIT_mov(DST, SRC) \
129 do { \
130 if (DST != SRC) \
131 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
132 } while (0)
133
134static int bpf_size_to_x86_bytes(int bpf_size)
135{
136 if (bpf_size == BPF_W)
137 return 4;
138 else if (bpf_size == BPF_H)
139 return 2;
140 else if (bpf_size == BPF_B)
141 return 1;
142 else if (bpf_size == BPF_DW)
143 return 4; /* imm32 */
144 else
145 return 0;
146}
147
148/*
149 * List of x86 cond jumps opcodes (. + s8)
150 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
151 */
152#define X86_JB 0x72
153#define X86_JAE 0x73
154#define X86_JE 0x74
155#define X86_JNE 0x75
156#define X86_JBE 0x76
157#define X86_JA 0x77
158#define X86_JL 0x7C
159#define X86_JGE 0x7D
160#define X86_JLE 0x7E
161#define X86_JG 0x7F
162
163/* Pick a register outside of BPF range for JIT internal work */
164#define AUX_REG (MAX_BPF_JIT_REG + 1)
165#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
166#define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
167
168/*
169 * The following table maps BPF registers to x86-64 registers.
170 *
171 * x86-64 register R12 is unused, since if used as base address
172 * register in load/store instructions, it always needs an
173 * extra byte of encoding and is callee saved.
174 *
175 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
176 * trampoline. x86-64 register R10 is used for blinding (if enabled).
177 */
178static const int reg2hex[] = {
179 [BPF_REG_0] = 0, /* RAX */
180 [BPF_REG_1] = 7, /* RDI */
181 [BPF_REG_2] = 6, /* RSI */
182 [BPF_REG_3] = 2, /* RDX */
183 [BPF_REG_4] = 1, /* RCX */
184 [BPF_REG_5] = 0, /* R8 */
185 [BPF_REG_6] = 3, /* RBX callee saved */
186 [BPF_REG_7] = 5, /* R13 callee saved */
187 [BPF_REG_8] = 6, /* R14 callee saved */
188 [BPF_REG_9] = 7, /* R15 callee saved */
189 [BPF_REG_FP] = 5, /* RBP readonly */
190 [BPF_REG_AX] = 2, /* R10 temp register */
191 [AUX_REG] = 3, /* R11 temp register */
192 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
193 [X86_REG_R12] = 4, /* R12 callee saved */
194};
195
196static const int reg2pt_regs[] = {
197 [BPF_REG_0] = offsetof(struct pt_regs, ax),
198 [BPF_REG_1] = offsetof(struct pt_regs, di),
199 [BPF_REG_2] = offsetof(struct pt_regs, si),
200 [BPF_REG_3] = offsetof(struct pt_regs, dx),
201 [BPF_REG_4] = offsetof(struct pt_regs, cx),
202 [BPF_REG_5] = offsetof(struct pt_regs, r8),
203 [BPF_REG_6] = offsetof(struct pt_regs, bx),
204 [BPF_REG_7] = offsetof(struct pt_regs, r13),
205 [BPF_REG_8] = offsetof(struct pt_regs, r14),
206 [BPF_REG_9] = offsetof(struct pt_regs, r15),
207};
208
209/*
210 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
211 * which need extra byte of encoding.
212 * rax,rcx,...,rbp have simpler encoding
213 */
214static bool is_ereg(u32 reg)
215{
216 return (1 << reg) & (BIT(BPF_REG_5) |
217 BIT(AUX_REG) |
218 BIT(BPF_REG_7) |
219 BIT(BPF_REG_8) |
220 BIT(BPF_REG_9) |
221 BIT(X86_REG_R9) |
222 BIT(X86_REG_R12) |
223 BIT(BPF_REG_AX));
224}
225
226/*
227 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
228 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
229 * of encoding. al,cl,dl,bl have simpler encoding.
230 */
231static bool is_ereg_8l(u32 reg)
232{
233 return is_ereg(reg) ||
234 (1 << reg) & (BIT(BPF_REG_1) |
235 BIT(BPF_REG_2) |
236 BIT(BPF_REG_FP));
237}
238
239static bool is_axreg(u32 reg)
240{
241 return reg == BPF_REG_0;
242}
243
244/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
245static u8 add_1mod(u8 byte, u32 reg)
246{
247 if (is_ereg(reg))
248 byte |= 1;
249 return byte;
250}
251
252static u8 add_2mod(u8 byte, u32 r1, u32 r2)
253{
254 if (is_ereg(r1))
255 byte |= 1;
256 if (is_ereg(r2))
257 byte |= 4;
258 return byte;
259}
260
261static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index)
262{
263 if (is_ereg(r1))
264 byte |= 1;
265 if (is_ereg(index))
266 byte |= 2;
267 if (is_ereg(r2))
268 byte |= 4;
269 return byte;
270}
271
272/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
273static u8 add_1reg(u8 byte, u32 dst_reg)
274{
275 return byte + reg2hex[dst_reg];
276}
277
278/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
279static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
280{
281 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
282}
283
284/* Some 1-byte opcodes for binary ALU operations */
285static u8 simple_alu_opcodes[] = {
286 [BPF_ADD] = 0x01,
287 [BPF_SUB] = 0x29,
288 [BPF_AND] = 0x21,
289 [BPF_OR] = 0x09,
290 [BPF_XOR] = 0x31,
291 [BPF_LSH] = 0xE0,
292 [BPF_RSH] = 0xE8,
293 [BPF_ARSH] = 0xF8,
294};
295
296static void jit_fill_hole(void *area, unsigned int size)
297{
298 /* Fill whole space with INT3 instructions */
299 memset(area, 0xcc, size);
300}
301
302int bpf_arch_text_invalidate(void *dst, size_t len)
303{
304 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
305}
306
307struct jit_context {
308 int cleanup_addr; /* Epilogue code offset */
309
310 /*
311 * Program specific offsets of labels in the code; these rely on the
312 * JIT doing at least 2 passes, recording the position on the first
313 * pass, only to generate the correct offset on the second pass.
314 */
315 int tail_call_direct_label;
316 int tail_call_indirect_label;
317};
318
319/* Maximum number of bytes emitted while JITing one eBPF insn */
320#define BPF_MAX_INSN_SIZE 128
321#define BPF_INSN_SAFETY 64
322
323/* Number of bytes emit_patch() needs to generate instructions */
324#define X86_PATCH_SIZE 5
325/* Number of bytes that will be skipped on tailcall */
326#define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
327
328static void push_r9(u8 **pprog)
329{
330 u8 *prog = *pprog;
331
332 EMIT2(0x41, 0x51); /* push r9 */
333 *pprog = prog;
334}
335
336static void pop_r9(u8 **pprog)
337{
338 u8 *prog = *pprog;
339
340 EMIT2(0x41, 0x59); /* pop r9 */
341 *pprog = prog;
342}
343
344static void push_r12(u8 **pprog)
345{
346 u8 *prog = *pprog;
347
348 EMIT2(0x41, 0x54); /* push r12 */
349 *pprog = prog;
350}
351
352static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
353{
354 u8 *prog = *pprog;
355
356 if (callee_regs_used[0])
357 EMIT1(0x53); /* push rbx */
358 if (callee_regs_used[1])
359 EMIT2(0x41, 0x55); /* push r13 */
360 if (callee_regs_used[2])
361 EMIT2(0x41, 0x56); /* push r14 */
362 if (callee_regs_used[3])
363 EMIT2(0x41, 0x57); /* push r15 */
364 *pprog = prog;
365}
366
367static void pop_r12(u8 **pprog)
368{
369 u8 *prog = *pprog;
370
371 EMIT2(0x41, 0x5C); /* pop r12 */
372 *pprog = prog;
373}
374
375static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
376{
377 u8 *prog = *pprog;
378
379 if (callee_regs_used[3])
380 EMIT2(0x41, 0x5F); /* pop r15 */
381 if (callee_regs_used[2])
382 EMIT2(0x41, 0x5E); /* pop r14 */
383 if (callee_regs_used[1])
384 EMIT2(0x41, 0x5D); /* pop r13 */
385 if (callee_regs_used[0])
386 EMIT1(0x5B); /* pop rbx */
387 *pprog = prog;
388}
389
390static void emit_nops(u8 **pprog, int len)
391{
392 u8 *prog = *pprog;
393 int i, noplen;
394
395 while (len > 0) {
396 noplen = len;
397
398 if (noplen > ASM_NOP_MAX)
399 noplen = ASM_NOP_MAX;
400
401 for (i = 0; i < noplen; i++)
402 EMIT1(x86_nops[noplen][i]);
403 len -= noplen;
404 }
405
406 *pprog = prog;
407}
408
409/*
410 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
411 * in arch/x86/kernel/alternative.c
412 */
413
414static void emit_fineibt(u8 **pprog, u32 hash)
415{
416 u8 *prog = *pprog;
417
418 EMIT_ENDBR();
419 EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */
420 EMIT2(0x74, 0x07); /* jz.d8 +7 */
421 EMIT2(0x0f, 0x0b); /* ud2 */
422 EMIT1(0x90); /* nop */
423 EMIT_ENDBR_POISON();
424
425 *pprog = prog;
426}
427
428static void emit_kcfi(u8 **pprog, u32 hash)
429{
430 u8 *prog = *pprog;
431
432 EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
433#ifdef CONFIG_CALL_PADDING
434 EMIT1(0x90);
435 EMIT1(0x90);
436 EMIT1(0x90);
437 EMIT1(0x90);
438 EMIT1(0x90);
439 EMIT1(0x90);
440 EMIT1(0x90);
441 EMIT1(0x90);
442 EMIT1(0x90);
443 EMIT1(0x90);
444 EMIT1(0x90);
445#endif
446 EMIT_ENDBR();
447
448 *pprog = prog;
449}
450
451static void emit_cfi(u8 **pprog, u32 hash)
452{
453 u8 *prog = *pprog;
454
455 switch (cfi_mode) {
456 case CFI_FINEIBT:
457 emit_fineibt(&prog, hash);
458 break;
459
460 case CFI_KCFI:
461 emit_kcfi(&prog, hash);
462 break;
463
464 default:
465 EMIT_ENDBR();
466 break;
467 }
468
469 *pprog = prog;
470}
471
472static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
473{
474 u8 *prog = *pprog;
475
476 if (!is_subprog) {
477 /* cmp rax, MAX_TAIL_CALL_CNT */
478 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT);
479 EMIT2(X86_JA, 6); /* ja 6 */
480 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
481 * case1: entry of main prog.
482 * case2: tail callee of main prog.
483 */
484 EMIT1(0x50); /* push rax */
485 /* Make rax as tail_call_cnt_ptr. */
486 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
487 EMIT2(0xEB, 1); /* jmp 1 */
488 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
489 * case: tail callee of subprog.
490 */
491 EMIT1(0x50); /* push rax */
492 /* push tail_call_cnt_ptr */
493 EMIT1(0x50); /* push rax */
494 } else { /* is_subprog */
495 /* rax is tail_call_cnt_ptr. */
496 EMIT1(0x50); /* push rax */
497 EMIT1(0x50); /* push rax */
498 }
499
500 *pprog = prog;
501}
502
503/*
504 * Emit x86-64 prologue code for BPF program.
505 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
506 * while jumping to another program
507 */
508static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
509 bool tail_call_reachable, bool is_subprog,
510 bool is_exception_cb)
511{
512 u8 *prog = *pprog;
513
514 emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
515 /* BPF trampoline can be made to work without these nops,
516 * but let's waste 5 bytes for now and optimize later
517 */
518 emit_nops(&prog, X86_PATCH_SIZE);
519 if (!ebpf_from_cbpf) {
520 if (tail_call_reachable && !is_subprog)
521 /* When it's the entry of the whole tailcall context,
522 * zeroing rax means initialising tail_call_cnt.
523 */
524 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
525 else
526 /* Keep the same instruction layout. */
527 emit_nops(&prog, 3); /* nop3 */
528 }
529 /* Exception callback receives FP as third parameter */
530 if (is_exception_cb) {
531 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
532 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
533 /* The main frame must have exception_boundary as true, so we
534 * first restore those callee-saved regs from stack, before
535 * reusing the stack frame.
536 */
537 pop_callee_regs(&prog, all_callee_regs_used);
538 pop_r12(&prog);
539 /* Reset the stack frame. */
540 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
541 } else {
542 EMIT1(0x55); /* push rbp */
543 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
544 }
545
546 /* X86_TAIL_CALL_OFFSET is here */
547 EMIT_ENDBR();
548
549 /* sub rsp, rounded_stack_depth */
550 if (stack_depth)
551 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
552 if (tail_call_reachable)
553 emit_prologue_tail_call(&prog, is_subprog);
554 *pprog = prog;
555}
556
557static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
558{
559 u8 *prog = *pprog;
560 s64 offset;
561
562 offset = func - (ip + X86_PATCH_SIZE);
563 if (!is_simm32(offset)) {
564 pr_err("Target call %p is out of range\n", func);
565 return -ERANGE;
566 }
567 EMIT1_off32(opcode, offset);
568 *pprog = prog;
569 return 0;
570}
571
572static int emit_call(u8 **pprog, void *func, void *ip)
573{
574 return emit_patch(pprog, func, ip, 0xE8);
575}
576
577static int emit_rsb_call(u8 **pprog, void *func, void *ip)
578{
579 OPTIMIZER_HIDE_VAR(func);
580 ip += x86_call_depth_emit_accounting(pprog, func, ip);
581 return emit_patch(pprog, func, ip, 0xE8);
582}
583
584static int emit_jump(u8 **pprog, void *func, void *ip)
585{
586 return emit_patch(pprog, func, ip, 0xE9);
587}
588
589static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
590 void *old_addr, void *new_addr)
591{
592 const u8 *nop_insn = x86_nops[5];
593 u8 old_insn[X86_PATCH_SIZE];
594 u8 new_insn[X86_PATCH_SIZE];
595 u8 *prog;
596 int ret;
597
598 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
599 if (old_addr) {
600 prog = old_insn;
601 ret = t == BPF_MOD_CALL ?
602 emit_call(&prog, old_addr, ip) :
603 emit_jump(&prog, old_addr, ip);
604 if (ret)
605 return ret;
606 }
607
608 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
609 if (new_addr) {
610 prog = new_insn;
611 ret = t == BPF_MOD_CALL ?
612 emit_call(&prog, new_addr, ip) :
613 emit_jump(&prog, new_addr, ip);
614 if (ret)
615 return ret;
616 }
617
618 ret = -EBUSY;
619 mutex_lock(&text_mutex);
620 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
621 goto out;
622 ret = 1;
623 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
624 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
625 ret = 0;
626 }
627out:
628 mutex_unlock(&text_mutex);
629 return ret;
630}
631
632int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
633 void *old_addr, void *new_addr)
634{
635 if (!is_kernel_text((long)ip) &&
636 !is_bpf_text_address((long)ip))
637 /* BPF poking in modules is not supported */
638 return -EINVAL;
639
640 /*
641 * See emit_prologue(), for IBT builds the trampoline hook is preceded
642 * with an ENDBR instruction.
643 */
644 if (is_endbr(*(u32 *)ip))
645 ip += ENDBR_INSN_SIZE;
646
647 return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
648}
649
650#define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
651
652static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
653{
654 u8 *prog = *pprog;
655
656 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
657 EMIT_LFENCE();
658 EMIT2(0xFF, 0xE0 + reg);
659 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
660 OPTIMIZER_HIDE_VAR(reg);
661 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
662 emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip);
663 else
664 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
665 } else {
666 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
667 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS))
668 EMIT1(0xCC); /* int3 */
669 }
670
671 *pprog = prog;
672}
673
674static void emit_return(u8 **pprog, u8 *ip)
675{
676 u8 *prog = *pprog;
677
678 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
679 emit_jump(&prog, x86_return_thunk, ip);
680 } else {
681 EMIT1(0xC3); /* ret */
682 if (IS_ENABLED(CONFIG_MITIGATION_SLS))
683 EMIT1(0xCC); /* int3 */
684 }
685
686 *pprog = prog;
687}
688
689#define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
690
691/*
692 * Generate the following code:
693 *
694 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
695 * if (index >= array->map.max_entries)
696 * goto out;
697 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
698 * goto out;
699 * prog = array->ptrs[index];
700 * if (prog == NULL)
701 * goto out;
702 * goto *(prog->bpf_func + prologue_size);
703 * out:
704 */
705static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog,
706 u8 **pprog, bool *callee_regs_used,
707 u32 stack_depth, u8 *ip,
708 struct jit_context *ctx)
709{
710 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
711 u8 *prog = *pprog, *start = *pprog;
712 int offset;
713
714 /*
715 * rdi - pointer to ctx
716 * rsi - pointer to bpf_array
717 * rdx - index in bpf_array
718 */
719
720 /*
721 * if (index >= array->map.max_entries)
722 * goto out;
723 */
724 EMIT2(0x89, 0xD2); /* mov edx, edx */
725 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
726 offsetof(struct bpf_array, map.max_entries));
727
728 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
729 EMIT2(X86_JBE, offset); /* jbe out */
730
731 /*
732 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
733 * goto out;
734 */
735 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
736 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
737
738 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
739 EMIT2(X86_JAE, offset); /* jae out */
740
741 /* prog = array->ptrs[index]; */
742 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
743 offsetof(struct bpf_array, ptrs));
744
745 /*
746 * if (prog == NULL)
747 * goto out;
748 */
749 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
750
751 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
752 EMIT2(X86_JE, offset); /* je out */
753
754 /* Inc tail_call_cnt if the slot is populated. */
755 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
756
757 if (bpf_prog->aux->exception_boundary) {
758 pop_callee_regs(&prog, all_callee_regs_used);
759 pop_r12(&prog);
760 } else {
761 pop_callee_regs(&prog, callee_regs_used);
762 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
763 pop_r12(&prog);
764 }
765
766 /* Pop tail_call_cnt_ptr. */
767 EMIT1(0x58); /* pop rax */
768 /* Pop tail_call_cnt, if it's main prog.
769 * Pop tail_call_cnt_ptr, if it's subprog.
770 */
771 EMIT1(0x58); /* pop rax */
772 if (stack_depth)
773 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
774 round_up(stack_depth, 8));
775
776 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
777 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
778 offsetof(struct bpf_prog, bpf_func));
779 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
780 X86_TAIL_CALL_OFFSET);
781 /*
782 * Now we're ready to jump into next BPF program
783 * rdi == ctx (1st arg)
784 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
785 */
786 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
787
788 /* out: */
789 ctx->tail_call_indirect_label = prog - start;
790 *pprog = prog;
791}
792
793static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog,
794 struct bpf_jit_poke_descriptor *poke,
795 u8 **pprog, u8 *ip,
796 bool *callee_regs_used, u32 stack_depth,
797 struct jit_context *ctx)
798{
799 int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth);
800 u8 *prog = *pprog, *start = *pprog;
801 int offset;
802
803 /*
804 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
805 * goto out;
806 */
807 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
808 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
809
810 offset = ctx->tail_call_direct_label - (prog + 2 - start);
811 EMIT2(X86_JAE, offset); /* jae out */
812
813 poke->tailcall_bypass = ip + (prog - start);
814 poke->adj_off = X86_TAIL_CALL_OFFSET;
815 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
816 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
817
818 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
819 poke->tailcall_bypass);
820
821 /* Inc tail_call_cnt if the slot is populated. */
822 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
823
824 if (bpf_prog->aux->exception_boundary) {
825 pop_callee_regs(&prog, all_callee_regs_used);
826 pop_r12(&prog);
827 } else {
828 pop_callee_regs(&prog, callee_regs_used);
829 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena))
830 pop_r12(&prog);
831 }
832
833 /* Pop tail_call_cnt_ptr. */
834 EMIT1(0x58); /* pop rax */
835 /* Pop tail_call_cnt, if it's main prog.
836 * Pop tail_call_cnt_ptr, if it's subprog.
837 */
838 EMIT1(0x58); /* pop rax */
839 if (stack_depth)
840 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
841
842 emit_nops(&prog, X86_PATCH_SIZE);
843
844 /* out: */
845 ctx->tail_call_direct_label = prog - start;
846
847 *pprog = prog;
848}
849
850static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
851{
852 struct bpf_jit_poke_descriptor *poke;
853 struct bpf_array *array;
854 struct bpf_prog *target;
855 int i, ret;
856
857 for (i = 0; i < prog->aux->size_poke_tab; i++) {
858 poke = &prog->aux->poke_tab[i];
859 if (poke->aux && poke->aux != prog->aux)
860 continue;
861
862 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
863
864 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
865 continue;
866
867 array = container_of(poke->tail_call.map, struct bpf_array, map);
868 mutex_lock(&array->aux->poke_mutex);
869 target = array->ptrs[poke->tail_call.key];
870 if (target) {
871 ret = __bpf_arch_text_poke(poke->tailcall_target,
872 BPF_MOD_JUMP, NULL,
873 (u8 *)target->bpf_func +
874 poke->adj_off);
875 BUG_ON(ret < 0);
876 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
877 BPF_MOD_JUMP,
878 (u8 *)poke->tailcall_target +
879 X86_PATCH_SIZE, NULL);
880 BUG_ON(ret < 0);
881 }
882 WRITE_ONCE(poke->tailcall_target_stable, true);
883 mutex_unlock(&array->aux->poke_mutex);
884 }
885}
886
887static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
888 u32 dst_reg, const u32 imm32)
889{
890 u8 *prog = *pprog;
891 u8 b1, b2, b3;
892
893 /*
894 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
895 * (which zero-extends imm32) to save 2 bytes.
896 */
897 if (sign_propagate && (s32)imm32 < 0) {
898 /* 'mov %rax, imm32' sign extends imm32 */
899 b1 = add_1mod(0x48, dst_reg);
900 b2 = 0xC7;
901 b3 = 0xC0;
902 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
903 goto done;
904 }
905
906 /*
907 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
908 * to save 3 bytes.
909 */
910 if (imm32 == 0) {
911 if (is_ereg(dst_reg))
912 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
913 b2 = 0x31; /* xor */
914 b3 = 0xC0;
915 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
916 goto done;
917 }
918
919 /* mov %eax, imm32 */
920 if (is_ereg(dst_reg))
921 EMIT1(add_1mod(0x40, dst_reg));
922 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
923done:
924 *pprog = prog;
925}
926
927static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
928 const u32 imm32_hi, const u32 imm32_lo)
929{
930 u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo;
931 u8 *prog = *pprog;
932
933 if (is_uimm32(imm64)) {
934 /*
935 * For emitting plain u32, where sign bit must not be
936 * propagated LLVM tends to load imm64 over mov32
937 * directly, so save couple of bytes by just doing
938 * 'mov %eax, imm32' instead.
939 */
940 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
941 } else if (is_simm32(imm64)) {
942 emit_mov_imm32(&prog, true, dst_reg, imm32_lo);
943 } else {
944 /* movabsq rax, imm64 */
945 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
946 EMIT(imm32_lo, 4);
947 EMIT(imm32_hi, 4);
948 }
949
950 *pprog = prog;
951}
952
953static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
954{
955 u8 *prog = *pprog;
956
957 if (is64) {
958 /* mov dst, src */
959 EMIT_mov(dst_reg, src_reg);
960 } else {
961 /* mov32 dst, src */
962 if (is_ereg(dst_reg) || is_ereg(src_reg))
963 EMIT1(add_2mod(0x40, dst_reg, src_reg));
964 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
965 }
966
967 *pprog = prog;
968}
969
970static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg,
971 u32 src_reg)
972{
973 u8 *prog = *pprog;
974
975 if (is64) {
976 /* movs[b,w,l]q dst, src */
977 if (num_bits == 8)
978 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe,
979 add_2reg(0xC0, src_reg, dst_reg));
980 else if (num_bits == 16)
981 EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf,
982 add_2reg(0xC0, src_reg, dst_reg));
983 else if (num_bits == 32)
984 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63,
985 add_2reg(0xC0, src_reg, dst_reg));
986 } else {
987 /* movs[b,w]l dst, src */
988 if (num_bits == 8) {
989 EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe,
990 add_2reg(0xC0, src_reg, dst_reg));
991 } else if (num_bits == 16) {
992 if (is_ereg(dst_reg) || is_ereg(src_reg))
993 EMIT1(add_2mod(0x40, src_reg, dst_reg));
994 EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf,
995 add_2reg(0xC0, src_reg, dst_reg));
996 }
997 }
998
999 *pprog = prog;
1000}
1001
1002/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
1003static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
1004{
1005 u8 *prog = *pprog;
1006
1007 if (is_imm8(off)) {
1008 /* 1-byte signed displacement.
1009 *
1010 * If off == 0 we could skip this and save one extra byte, but
1011 * special case of x86 R13 which always needs an offset is not
1012 * worth the hassle
1013 */
1014 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
1015 } else {
1016 /* 4-byte signed displacement */
1017 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
1018 }
1019 *pprog = prog;
1020}
1021
1022static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off)
1023{
1024 u8 *prog = *pprog;
1025
1026 if (is_imm8(off)) {
1027 EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1028 } else {
1029 EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off);
1030 }
1031 *pprog = prog;
1032}
1033
1034/*
1035 * Emit a REX byte if it will be necessary to address these registers
1036 */
1037static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
1038{
1039 u8 *prog = *pprog;
1040
1041 if (is64)
1042 EMIT1(add_2mod(0x48, dst_reg, src_reg));
1043 else if (is_ereg(dst_reg) || is_ereg(src_reg))
1044 EMIT1(add_2mod(0x40, dst_reg, src_reg));
1045 *pprog = prog;
1046}
1047
1048/*
1049 * Similar version of maybe_emit_mod() for a single register
1050 */
1051static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
1052{
1053 u8 *prog = *pprog;
1054
1055 if (is64)
1056 EMIT1(add_1mod(0x48, reg));
1057 else if (is_ereg(reg))
1058 EMIT1(add_1mod(0x40, reg));
1059 *pprog = prog;
1060}
1061
1062/* LDX: dst_reg = *(u8*)(src_reg + off) */
1063static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1064{
1065 u8 *prog = *pprog;
1066
1067 switch (size) {
1068 case BPF_B:
1069 /* Emit 'movzx rax, byte ptr [rax + off]' */
1070 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
1071 break;
1072 case BPF_H:
1073 /* Emit 'movzx rax, word ptr [rax + off]' */
1074 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
1075 break;
1076 case BPF_W:
1077 /* Emit 'mov eax, dword ptr [rax+0x14]' */
1078 if (is_ereg(dst_reg) || is_ereg(src_reg))
1079 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
1080 else
1081 EMIT1(0x8B);
1082 break;
1083 case BPF_DW:
1084 /* Emit 'mov rax, qword ptr [rax+0x14]' */
1085 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
1086 break;
1087 }
1088 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1089 *pprog = prog;
1090}
1091
1092/* LDSX: dst_reg = *(s8*)(src_reg + off) */
1093static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1094{
1095 u8 *prog = *pprog;
1096
1097 switch (size) {
1098 case BPF_B:
1099 /* Emit 'movsx rax, byte ptr [rax + off]' */
1100 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE);
1101 break;
1102 case BPF_H:
1103 /* Emit 'movsx rax, word ptr [rax + off]' */
1104 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF);
1105 break;
1106 case BPF_W:
1107 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
1108 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63);
1109 break;
1110 }
1111 emit_insn_suffix(&prog, src_reg, dst_reg, off);
1112 *pprog = prog;
1113}
1114
1115static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1116{
1117 u8 *prog = *pprog;
1118
1119 switch (size) {
1120 case BPF_B:
1121 /* movzx rax, byte ptr [rax + r12 + off] */
1122 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6);
1123 break;
1124 case BPF_H:
1125 /* movzx rax, word ptr [rax + r12 + off] */
1126 EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7);
1127 break;
1128 case BPF_W:
1129 /* mov eax, dword ptr [rax + r12 + off] */
1130 EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B);
1131 break;
1132 case BPF_DW:
1133 /* mov rax, qword ptr [rax + r12 + off] */
1134 EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B);
1135 break;
1136 }
1137 emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off);
1138 *pprog = prog;
1139}
1140
1141static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1142{
1143 emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1144}
1145
1146/* STX: *(u8*)(dst_reg + off) = src_reg */
1147static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1148{
1149 u8 *prog = *pprog;
1150
1151 switch (size) {
1152 case BPF_B:
1153 /* Emit 'mov byte ptr [rax + off], al' */
1154 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
1155 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1156 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
1157 else
1158 EMIT1(0x88);
1159 break;
1160 case BPF_H:
1161 if (is_ereg(dst_reg) || is_ereg(src_reg))
1162 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
1163 else
1164 EMIT2(0x66, 0x89);
1165 break;
1166 case BPF_W:
1167 if (is_ereg(dst_reg) || is_ereg(src_reg))
1168 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
1169 else
1170 EMIT1(0x89);
1171 break;
1172 case BPF_DW:
1173 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
1174 break;
1175 }
1176 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1177 *pprog = prog;
1178}
1179
1180/* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
1181static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1182{
1183 u8 *prog = *pprog;
1184
1185 switch (size) {
1186 case BPF_B:
1187 /* mov byte ptr [rax + r12 + off], al */
1188 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88);
1189 break;
1190 case BPF_H:
1191 /* mov word ptr [rax + r12 + off], ax */
1192 EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1193 break;
1194 case BPF_W:
1195 /* mov dword ptr [rax + r12 + 1], eax */
1196 EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89);
1197 break;
1198 case BPF_DW:
1199 /* mov qword ptr [rax + r12 + 1], rax */
1200 EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89);
1201 break;
1202 }
1203 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1204 *pprog = prog;
1205}
1206
1207static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
1208{
1209 emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off);
1210}
1211
1212/* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
1213static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm)
1214{
1215 u8 *prog = *pprog;
1216
1217 switch (size) {
1218 case BPF_B:
1219 /* mov byte ptr [rax + r12 + off], imm8 */
1220 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6);
1221 break;
1222 case BPF_H:
1223 /* mov word ptr [rax + r12 + off], imm16 */
1224 EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1225 break;
1226 case BPF_W:
1227 /* mov dword ptr [rax + r12 + 1], imm32 */
1228 EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7);
1229 break;
1230 case BPF_DW:
1231 /* mov qword ptr [rax + r12 + 1], imm32 */
1232 EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7);
1233 break;
1234 }
1235 emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off);
1236 EMIT(imm, bpf_size_to_x86_bytes(size));
1237 *pprog = prog;
1238}
1239
1240static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm)
1241{
1242 emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm);
1243}
1244
1245static int emit_atomic(u8 **pprog, u8 atomic_op,
1246 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
1247{
1248 u8 *prog = *pprog;
1249
1250 EMIT1(0xF0); /* lock prefix */
1251
1252 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
1253
1254 /* emit opcode */
1255 switch (atomic_op) {
1256 case BPF_ADD:
1257 case BPF_AND:
1258 case BPF_OR:
1259 case BPF_XOR:
1260 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1261 EMIT1(simple_alu_opcodes[atomic_op]);
1262 break;
1263 case BPF_ADD | BPF_FETCH:
1264 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1265 EMIT2(0x0F, 0xC1);
1266 break;
1267 case BPF_XCHG:
1268 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1269 EMIT1(0x87);
1270 break;
1271 case BPF_CMPXCHG:
1272 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1273 EMIT2(0x0F, 0xB1);
1274 break;
1275 default:
1276 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1277 return -EFAULT;
1278 }
1279
1280 emit_insn_suffix(&prog, dst_reg, src_reg, off);
1281
1282 *pprog = prog;
1283 return 0;
1284}
1285
1286static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size,
1287 u32 dst_reg, u32 src_reg, u32 index_reg, int off)
1288{
1289 u8 *prog = *pprog;
1290
1291 EMIT1(0xF0); /* lock prefix */
1292 switch (size) {
1293 case BPF_W:
1294 EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg));
1295 break;
1296 case BPF_DW:
1297 EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg));
1298 break;
1299 default:
1300 pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1301 return -EFAULT;
1302 }
1303
1304 /* emit opcode */
1305 switch (atomic_op) {
1306 case BPF_ADD:
1307 case BPF_AND:
1308 case BPF_OR:
1309 case BPF_XOR:
1310 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1311 EMIT1(simple_alu_opcodes[atomic_op]);
1312 break;
1313 case BPF_ADD | BPF_FETCH:
1314 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1315 EMIT2(0x0F, 0xC1);
1316 break;
1317 case BPF_XCHG:
1318 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1319 EMIT1(0x87);
1320 break;
1321 case BPF_CMPXCHG:
1322 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1323 EMIT2(0x0F, 0xB1);
1324 break;
1325 default:
1326 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
1327 return -EFAULT;
1328 }
1329 emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off);
1330 *pprog = prog;
1331 return 0;
1332}
1333
1334#define DONT_CLEAR 1
1335
1336bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
1337{
1338 u32 reg = x->fixup >> 8;
1339
1340 /* jump over faulting load and clear dest register */
1341 if (reg != DONT_CLEAR)
1342 *(unsigned long *)((void *)regs + reg) = 0;
1343 regs->ip += x->fixup & 0xff;
1344 return true;
1345}
1346
1347static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
1348 bool *regs_used)
1349{
1350 int i;
1351
1352 for (i = 1; i <= insn_cnt; i++, insn++) {
1353 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
1354 regs_used[0] = true;
1355 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
1356 regs_used[1] = true;
1357 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
1358 regs_used[2] = true;
1359 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
1360 regs_used[3] = true;
1361 }
1362}
1363
1364/* emit the 3-byte VEX prefix
1365 *
1366 * r: same as rex.r, extra bit for ModRM reg field
1367 * x: same as rex.x, extra bit for SIB index field
1368 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1369 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1370 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1371 * src_reg2: additional source reg (encoded as BPF reg)
1372 * l: vector length (128 bit or 256 bit) or reserved
1373 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1374 */
1375static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
1376 bool w, u8 src_reg2, bool l, u8 pp)
1377{
1378 u8 *prog = *pprog;
1379 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
1380 u8 b1, b2;
1381 u8 vvvv = reg2hex[src_reg2];
1382
1383 /* reg2hex gives only the lower 3 bit of vvvv */
1384 if (is_ereg(src_reg2))
1385 vvvv |= 1 << 3;
1386
1387 /*
1388 * 2nd byte of 3-byte VEX prefix
1389 * ~ means bit inverted encoding
1390 *
1391 * 7 0
1392 * +---+---+---+---+---+---+---+---+
1393 * |~R |~X |~B | m |
1394 * +---+---+---+---+---+---+---+---+
1395 */
1396 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
1397 /*
1398 * 3rd byte of 3-byte VEX prefix
1399 *
1400 * 7 0
1401 * +---+---+---+---+---+---+---+---+
1402 * | W | ~vvvv | L | pp |
1403 * +---+---+---+---+---+---+---+---+
1404 */
1405 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
1406
1407 EMIT3(b0, b1, b2);
1408 *pprog = prog;
1409}
1410
1411/* emit BMI2 shift instruction */
1412static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
1413{
1414 u8 *prog = *pprog;
1415 bool r = is_ereg(dst_reg);
1416 u8 m = 2; /* escape code 0f38 */
1417
1418 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
1419 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
1420 *pprog = prog;
1421}
1422
1423static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr)
1424{
1425 u8 *prog = *pprog;
1426
1427 /* movabs r9, priv_frame_ptr */
1428 emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32,
1429 (u32) (__force long) priv_frame_ptr);
1430
1431#ifdef CONFIG_SMP
1432 /* add <r9>, gs:[<off>] */
1433 EMIT2(0x65, 0x4c);
1434 EMIT3(0x03, 0x0c, 0x25);
1435 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1436#endif
1437
1438 *pprog = prog;
1439}
1440
1441#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1442
1443#define __LOAD_TCC_PTR(off) \
1444 EMIT3_off32(0x48, 0x8B, 0x85, off)
1445/* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1446#define LOAD_TAIL_CALL_CNT_PTR(stack) \
1447 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1448
1449/* Memory size/value to protect private stack overflow/underflow */
1450#define PRIV_STACK_GUARD_SZ 8
1451#define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL
1452
1453static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
1454 int oldproglen, struct jit_context *ctx, bool jmp_padding)
1455{
1456 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
1457 struct bpf_insn *insn = bpf_prog->insnsi;
1458 bool callee_regs_used[4] = {};
1459 int insn_cnt = bpf_prog->len;
1460 bool seen_exit = false;
1461 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1462 void __percpu *priv_frame_ptr = NULL;
1463 u64 arena_vm_start, user_vm_start;
1464 void __percpu *priv_stack_ptr;
1465 int i, excnt = 0;
1466 int ilen, proglen = 0;
1467 u8 *prog = temp;
1468 u32 stack_depth;
1469 int err;
1470
1471 stack_depth = bpf_prog->aux->stack_depth;
1472 priv_stack_ptr = bpf_prog->aux->priv_stack_ptr;
1473 if (priv_stack_ptr) {
1474 priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8);
1475 stack_depth = 0;
1476 }
1477
1478 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena);
1479 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena);
1480
1481 detect_reg_usage(insn, insn_cnt, callee_regs_used);
1482
1483 emit_prologue(&prog, stack_depth,
1484 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
1485 bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
1486 /* Exception callback will clobber callee regs for its own use, and
1487 * restore the original callee regs from main prog's stack frame.
1488 */
1489 if (bpf_prog->aux->exception_boundary) {
1490 /* We also need to save r12, which is not mapped to any BPF
1491 * register, as we throw after entry into the kernel, which may
1492 * overwrite r12.
1493 */
1494 push_r12(&prog);
1495 push_callee_regs(&prog, all_callee_regs_used);
1496 } else {
1497 if (arena_vm_start)
1498 push_r12(&prog);
1499 push_callee_regs(&prog, callee_regs_used);
1500 }
1501 if (arena_vm_start)
1502 emit_mov_imm64(&prog, X86_REG_R12,
1503 arena_vm_start >> 32, (u32) arena_vm_start);
1504
1505 if (priv_frame_ptr)
1506 emit_priv_frame_ptr(&prog, priv_frame_ptr);
1507
1508 ilen = prog - temp;
1509 if (rw_image)
1510 memcpy(rw_image + proglen, temp, ilen);
1511 proglen += ilen;
1512 addrs[0] = proglen;
1513 prog = temp;
1514
1515 for (i = 1; i <= insn_cnt; i++, insn++) {
1516 const s32 imm32 = insn->imm;
1517 u32 dst_reg = insn->dst_reg;
1518 u32 src_reg = insn->src_reg;
1519 u8 b2 = 0, b3 = 0;
1520 u8 *start_of_ldx;
1521 s64 jmp_offset;
1522 s16 insn_off;
1523 u8 jmp_cond;
1524 u8 *func;
1525 int nops;
1526
1527 if (priv_frame_ptr) {
1528 if (src_reg == BPF_REG_FP)
1529 src_reg = X86_REG_R9;
1530
1531 if (dst_reg == BPF_REG_FP)
1532 dst_reg = X86_REG_R9;
1533 }
1534
1535 switch (insn->code) {
1536 /* ALU */
1537 case BPF_ALU | BPF_ADD | BPF_X:
1538 case BPF_ALU | BPF_SUB | BPF_X:
1539 case BPF_ALU | BPF_AND | BPF_X:
1540 case BPF_ALU | BPF_OR | BPF_X:
1541 case BPF_ALU | BPF_XOR | BPF_X:
1542 case BPF_ALU64 | BPF_ADD | BPF_X:
1543 case BPF_ALU64 | BPF_SUB | BPF_X:
1544 case BPF_ALU64 | BPF_AND | BPF_X:
1545 case BPF_ALU64 | BPF_OR | BPF_X:
1546 case BPF_ALU64 | BPF_XOR | BPF_X:
1547 maybe_emit_mod(&prog, dst_reg, src_reg,
1548 BPF_CLASS(insn->code) == BPF_ALU64);
1549 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1550 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1551 break;
1552
1553 case BPF_ALU64 | BPF_MOV | BPF_X:
1554 if (insn_is_cast_user(insn)) {
1555 if (dst_reg != src_reg)
1556 /* 32-bit mov */
1557 emit_mov_reg(&prog, false, dst_reg, src_reg);
1558 /* shl dst_reg, 32 */
1559 maybe_emit_1mod(&prog, dst_reg, true);
1560 EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32);
1561
1562 /* or dst_reg, user_vm_start */
1563 maybe_emit_1mod(&prog, dst_reg, true);
1564 if (is_axreg(dst_reg))
1565 EMIT1_off32(0x0D, user_vm_start >> 32);
1566 else
1567 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32);
1568
1569 /* rol dst_reg, 32 */
1570 maybe_emit_1mod(&prog, dst_reg, true);
1571 EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32);
1572
1573 /* xor r11, r11 */
1574 EMIT3(0x4D, 0x31, 0xDB);
1575
1576 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1577 maybe_emit_mod(&prog, dst_reg, dst_reg, false);
1578 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1579
1580 /* cmove r11, dst_reg; if so, set dst_reg to zero */
1581 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1582 maybe_emit_mod(&prog, AUX_REG, dst_reg, true);
1583 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg));
1584 break;
1585 } else if (insn_is_mov_percpu_addr(insn)) {
1586 /* mov <dst>, <src> (if necessary) */
1587 EMIT_mov(dst_reg, src_reg);
1588#ifdef CONFIG_SMP
1589 /* add <dst>, gs:[<off>] */
1590 EMIT2(0x65, add_1mod(0x48, dst_reg));
1591 EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25);
1592 EMIT((u32)(unsigned long)&this_cpu_off, 4);
1593#endif
1594 break;
1595 }
1596 fallthrough;
1597 case BPF_ALU | BPF_MOV | BPF_X:
1598 if (insn->off == 0)
1599 emit_mov_reg(&prog,
1600 BPF_CLASS(insn->code) == BPF_ALU64,
1601 dst_reg, src_reg);
1602 else
1603 emit_movsx_reg(&prog, insn->off,
1604 BPF_CLASS(insn->code) == BPF_ALU64,
1605 dst_reg, src_reg);
1606 break;
1607
1608 /* neg dst */
1609 case BPF_ALU | BPF_NEG:
1610 case BPF_ALU64 | BPF_NEG:
1611 maybe_emit_1mod(&prog, dst_reg,
1612 BPF_CLASS(insn->code) == BPF_ALU64);
1613 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1614 break;
1615
1616 case BPF_ALU | BPF_ADD | BPF_K:
1617 case BPF_ALU | BPF_SUB | BPF_K:
1618 case BPF_ALU | BPF_AND | BPF_K:
1619 case BPF_ALU | BPF_OR | BPF_K:
1620 case BPF_ALU | BPF_XOR | BPF_K:
1621 case BPF_ALU64 | BPF_ADD | BPF_K:
1622 case BPF_ALU64 | BPF_SUB | BPF_K:
1623 case BPF_ALU64 | BPF_AND | BPF_K:
1624 case BPF_ALU64 | BPF_OR | BPF_K:
1625 case BPF_ALU64 | BPF_XOR | BPF_K:
1626 maybe_emit_1mod(&prog, dst_reg,
1627 BPF_CLASS(insn->code) == BPF_ALU64);
1628
1629 /*
1630 * b3 holds 'normal' opcode, b2 short form only valid
1631 * in case dst is eax/rax.
1632 */
1633 switch (BPF_OP(insn->code)) {
1634 case BPF_ADD:
1635 b3 = 0xC0;
1636 b2 = 0x05;
1637 break;
1638 case BPF_SUB:
1639 b3 = 0xE8;
1640 b2 = 0x2D;
1641 break;
1642 case BPF_AND:
1643 b3 = 0xE0;
1644 b2 = 0x25;
1645 break;
1646 case BPF_OR:
1647 b3 = 0xC8;
1648 b2 = 0x0D;
1649 break;
1650 case BPF_XOR:
1651 b3 = 0xF0;
1652 b2 = 0x35;
1653 break;
1654 }
1655
1656 if (is_imm8(imm32))
1657 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1658 else if (is_axreg(dst_reg))
1659 EMIT1_off32(b2, imm32);
1660 else
1661 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1662 break;
1663
1664 case BPF_ALU64 | BPF_MOV | BPF_K:
1665 case BPF_ALU | BPF_MOV | BPF_K:
1666 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1667 dst_reg, imm32);
1668 break;
1669
1670 case BPF_LD | BPF_IMM | BPF_DW:
1671 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1672 insn++;
1673 i++;
1674 break;
1675
1676 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1677 case BPF_ALU | BPF_MOD | BPF_X:
1678 case BPF_ALU | BPF_DIV | BPF_X:
1679 case BPF_ALU | BPF_MOD | BPF_K:
1680 case BPF_ALU | BPF_DIV | BPF_K:
1681 case BPF_ALU64 | BPF_MOD | BPF_X:
1682 case BPF_ALU64 | BPF_DIV | BPF_X:
1683 case BPF_ALU64 | BPF_MOD | BPF_K:
1684 case BPF_ALU64 | BPF_DIV | BPF_K: {
1685 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1686
1687 if (dst_reg != BPF_REG_0)
1688 EMIT1(0x50); /* push rax */
1689 if (dst_reg != BPF_REG_3)
1690 EMIT1(0x52); /* push rdx */
1691
1692 if (BPF_SRC(insn->code) == BPF_X) {
1693 if (src_reg == BPF_REG_0 ||
1694 src_reg == BPF_REG_3) {
1695 /* mov r11, src_reg */
1696 EMIT_mov(AUX_REG, src_reg);
1697 src_reg = AUX_REG;
1698 }
1699 } else {
1700 /* mov r11, imm32 */
1701 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1702 src_reg = AUX_REG;
1703 }
1704
1705 if (dst_reg != BPF_REG_0)
1706 /* mov rax, dst_reg */
1707 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1708
1709 if (insn->off == 0) {
1710 /*
1711 * xor edx, edx
1712 * equivalent to 'xor rdx, rdx', but one byte less
1713 */
1714 EMIT2(0x31, 0xd2);
1715
1716 /* div src_reg */
1717 maybe_emit_1mod(&prog, src_reg, is64);
1718 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1719 } else {
1720 if (BPF_CLASS(insn->code) == BPF_ALU)
1721 EMIT1(0x99); /* cdq */
1722 else
1723 EMIT2(0x48, 0x99); /* cqo */
1724
1725 /* idiv src_reg */
1726 maybe_emit_1mod(&prog, src_reg, is64);
1727 EMIT2(0xF7, add_1reg(0xF8, src_reg));
1728 }
1729
1730 if (BPF_OP(insn->code) == BPF_MOD &&
1731 dst_reg != BPF_REG_3)
1732 /* mov dst_reg, rdx */
1733 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1734 else if (BPF_OP(insn->code) == BPF_DIV &&
1735 dst_reg != BPF_REG_0)
1736 /* mov dst_reg, rax */
1737 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1738
1739 if (dst_reg != BPF_REG_3)
1740 EMIT1(0x5A); /* pop rdx */
1741 if (dst_reg != BPF_REG_0)
1742 EMIT1(0x58); /* pop rax */
1743 break;
1744 }
1745
1746 case BPF_ALU | BPF_MUL | BPF_K:
1747 case BPF_ALU64 | BPF_MUL | BPF_K:
1748 maybe_emit_mod(&prog, dst_reg, dst_reg,
1749 BPF_CLASS(insn->code) == BPF_ALU64);
1750
1751 if (is_imm8(imm32))
1752 /* imul dst_reg, dst_reg, imm8 */
1753 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1754 imm32);
1755 else
1756 /* imul dst_reg, dst_reg, imm32 */
1757 EMIT2_off32(0x69,
1758 add_2reg(0xC0, dst_reg, dst_reg),
1759 imm32);
1760 break;
1761
1762 case BPF_ALU | BPF_MUL | BPF_X:
1763 case BPF_ALU64 | BPF_MUL | BPF_X:
1764 maybe_emit_mod(&prog, src_reg, dst_reg,
1765 BPF_CLASS(insn->code) == BPF_ALU64);
1766
1767 /* imul dst_reg, src_reg */
1768 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1769 break;
1770
1771 /* Shifts */
1772 case BPF_ALU | BPF_LSH | BPF_K:
1773 case BPF_ALU | BPF_RSH | BPF_K:
1774 case BPF_ALU | BPF_ARSH | BPF_K:
1775 case BPF_ALU64 | BPF_LSH | BPF_K:
1776 case BPF_ALU64 | BPF_RSH | BPF_K:
1777 case BPF_ALU64 | BPF_ARSH | BPF_K:
1778 maybe_emit_1mod(&prog, dst_reg,
1779 BPF_CLASS(insn->code) == BPF_ALU64);
1780
1781 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1782 if (imm32 == 1)
1783 EMIT2(0xD1, add_1reg(b3, dst_reg));
1784 else
1785 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1786 break;
1787
1788 case BPF_ALU | BPF_LSH | BPF_X:
1789 case BPF_ALU | BPF_RSH | BPF_X:
1790 case BPF_ALU | BPF_ARSH | BPF_X:
1791 case BPF_ALU64 | BPF_LSH | BPF_X:
1792 case BPF_ALU64 | BPF_RSH | BPF_X:
1793 case BPF_ALU64 | BPF_ARSH | BPF_X:
1794 /* BMI2 shifts aren't better when shift count is already in rcx */
1795 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1796 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1797 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1798 u8 op;
1799
1800 switch (BPF_OP(insn->code)) {
1801 case BPF_LSH:
1802 op = 1; /* prefix 0x66 */
1803 break;
1804 case BPF_RSH:
1805 op = 3; /* prefix 0xf2 */
1806 break;
1807 case BPF_ARSH:
1808 op = 2; /* prefix 0xf3 */
1809 break;
1810 }
1811
1812 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1813
1814 break;
1815 }
1816
1817 if (src_reg != BPF_REG_4) { /* common case */
1818 /* Check for bad case when dst_reg == rcx */
1819 if (dst_reg == BPF_REG_4) {
1820 /* mov r11, dst_reg */
1821 EMIT_mov(AUX_REG, dst_reg);
1822 dst_reg = AUX_REG;
1823 } else {
1824 EMIT1(0x51); /* push rcx */
1825 }
1826 /* mov rcx, src_reg */
1827 EMIT_mov(BPF_REG_4, src_reg);
1828 }
1829
1830 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1831 maybe_emit_1mod(&prog, dst_reg,
1832 BPF_CLASS(insn->code) == BPF_ALU64);
1833
1834 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1835 EMIT2(0xD3, add_1reg(b3, dst_reg));
1836
1837 if (src_reg != BPF_REG_4) {
1838 if (insn->dst_reg == BPF_REG_4)
1839 /* mov dst_reg, r11 */
1840 EMIT_mov(insn->dst_reg, AUX_REG);
1841 else
1842 EMIT1(0x59); /* pop rcx */
1843 }
1844
1845 break;
1846
1847 case BPF_ALU | BPF_END | BPF_FROM_BE:
1848 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1849 switch (imm32) {
1850 case 16:
1851 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1852 EMIT1(0x66);
1853 if (is_ereg(dst_reg))
1854 EMIT1(0x41);
1855 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1856
1857 /* Emit 'movzwl eax, ax' */
1858 if (is_ereg(dst_reg))
1859 EMIT3(0x45, 0x0F, 0xB7);
1860 else
1861 EMIT2(0x0F, 0xB7);
1862 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1863 break;
1864 case 32:
1865 /* Emit 'bswap eax' to swap lower 4 bytes */
1866 if (is_ereg(dst_reg))
1867 EMIT2(0x41, 0x0F);
1868 else
1869 EMIT1(0x0F);
1870 EMIT1(add_1reg(0xC8, dst_reg));
1871 break;
1872 case 64:
1873 /* Emit 'bswap rax' to swap 8 bytes */
1874 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1875 add_1reg(0xC8, dst_reg));
1876 break;
1877 }
1878 break;
1879
1880 case BPF_ALU | BPF_END | BPF_FROM_LE:
1881 switch (imm32) {
1882 case 16:
1883 /*
1884 * Emit 'movzwl eax, ax' to zero extend 16-bit
1885 * into 64 bit
1886 */
1887 if (is_ereg(dst_reg))
1888 EMIT3(0x45, 0x0F, 0xB7);
1889 else
1890 EMIT2(0x0F, 0xB7);
1891 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1892 break;
1893 case 32:
1894 /* Emit 'mov eax, eax' to clear upper 32-bits */
1895 if (is_ereg(dst_reg))
1896 EMIT1(0x45);
1897 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1898 break;
1899 case 64:
1900 /* nop */
1901 break;
1902 }
1903 break;
1904
1905 /* speculation barrier */
1906 case BPF_ST | BPF_NOSPEC:
1907 EMIT_LFENCE();
1908 break;
1909
1910 /* ST: *(u8*)(dst_reg + off) = imm */
1911 case BPF_ST | BPF_MEM | BPF_B:
1912 if (is_ereg(dst_reg))
1913 EMIT2(0x41, 0xC6);
1914 else
1915 EMIT1(0xC6);
1916 goto st;
1917 case BPF_ST | BPF_MEM | BPF_H:
1918 if (is_ereg(dst_reg))
1919 EMIT3(0x66, 0x41, 0xC7);
1920 else
1921 EMIT2(0x66, 0xC7);
1922 goto st;
1923 case BPF_ST | BPF_MEM | BPF_W:
1924 if (is_ereg(dst_reg))
1925 EMIT2(0x41, 0xC7);
1926 else
1927 EMIT1(0xC7);
1928 goto st;
1929 case BPF_ST | BPF_MEM | BPF_DW:
1930 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1931
1932st: if (is_imm8(insn->off))
1933 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1934 else
1935 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1936
1937 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1938 break;
1939
1940 /* STX: *(u8*)(dst_reg + off) = src_reg */
1941 case BPF_STX | BPF_MEM | BPF_B:
1942 case BPF_STX | BPF_MEM | BPF_H:
1943 case BPF_STX | BPF_MEM | BPF_W:
1944 case BPF_STX | BPF_MEM | BPF_DW:
1945 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1946 break;
1947
1948 case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
1949 case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
1950 case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
1951 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
1952 start_of_ldx = prog;
1953 emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm);
1954 goto populate_extable;
1955
1956 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
1957 case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
1958 case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
1959 case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
1960 case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
1961 case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
1962 case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
1963 case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
1964 case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
1965 start_of_ldx = prog;
1966 if (BPF_CLASS(insn->code) == BPF_LDX)
1967 emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1968 else
1969 emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1970populate_extable:
1971 {
1972 struct exception_table_entry *ex;
1973 u8 *_insn = image + proglen + (start_of_ldx - temp);
1974 s64 delta;
1975
1976 if (!bpf_prog->aux->extable)
1977 break;
1978
1979 if (excnt >= bpf_prog->aux->num_exentries) {
1980 pr_err("mem32 extable bug\n");
1981 return -EFAULT;
1982 }
1983 ex = &bpf_prog->aux->extable[excnt++];
1984
1985 delta = _insn - (u8 *)&ex->insn;
1986 /* switch ex to rw buffer for writes */
1987 ex = (void *)rw_image + ((void *)ex - (void *)image);
1988
1989 ex->insn = delta;
1990
1991 ex->data = EX_TYPE_BPF;
1992
1993 ex->fixup = (prog - start_of_ldx) |
1994 ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8);
1995 }
1996 break;
1997
1998 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1999 case BPF_LDX | BPF_MEM | BPF_B:
2000 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
2001 case BPF_LDX | BPF_MEM | BPF_H:
2002 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
2003 case BPF_LDX | BPF_MEM | BPF_W:
2004 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
2005 case BPF_LDX | BPF_MEM | BPF_DW:
2006 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
2007 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
2008 case BPF_LDX | BPF_MEMSX | BPF_B:
2009 case BPF_LDX | BPF_MEMSX | BPF_H:
2010 case BPF_LDX | BPF_MEMSX | BPF_W:
2011 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
2012 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
2013 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
2014 insn_off = insn->off;
2015
2016 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2017 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2018 /* Conservatively check that src_reg + insn->off is a kernel address:
2019 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
2020 * and
2021 * src_reg + insn->off < VSYSCALL_ADDR
2022 */
2023
2024 u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
2025 u8 *end_of_jmp;
2026
2027 /* movabsq r10, VSYSCALL_ADDR */
2028 emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
2029 (u32)(long)VSYSCALL_ADDR);
2030
2031 /* mov src_reg, r11 */
2032 EMIT_mov(AUX_REG, src_reg);
2033
2034 if (insn->off) {
2035 /* add r11, insn->off */
2036 maybe_emit_1mod(&prog, AUX_REG, true);
2037 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
2038 }
2039
2040 /* sub r11, r10 */
2041 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2042 EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2043
2044 /* movabsq r10, limit */
2045 emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
2046 (u32)(long)limit);
2047
2048 /* cmp r10, r11 */
2049 maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
2050 EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
2051
2052 /* if unsigned '>', goto load */
2053 EMIT2(X86_JA, 0);
2054 end_of_jmp = prog;
2055
2056 /* xor dst_reg, dst_reg */
2057 emit_mov_imm32(&prog, false, dst_reg, 0);
2058 /* jmp byte_after_ldx */
2059 EMIT2(0xEB, 0);
2060
2061 /* populate jmp_offset for JAE above to jump to start_of_ldx */
2062 start_of_ldx = prog;
2063 end_of_jmp[-1] = start_of_ldx - end_of_jmp;
2064 }
2065 if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
2066 BPF_MODE(insn->code) == BPF_MEMSX)
2067 emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2068 else
2069 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off);
2070 if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
2071 BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
2072 struct exception_table_entry *ex;
2073 u8 *_insn = image + proglen + (start_of_ldx - temp);
2074 s64 delta;
2075
2076 /* populate jmp_offset for JMP above */
2077 start_of_ldx[-1] = prog - start_of_ldx;
2078
2079 if (!bpf_prog->aux->extable)
2080 break;
2081
2082 if (excnt >= bpf_prog->aux->num_exentries) {
2083 pr_err("ex gen bug\n");
2084 return -EFAULT;
2085 }
2086 ex = &bpf_prog->aux->extable[excnt++];
2087
2088 delta = _insn - (u8 *)&ex->insn;
2089 if (!is_simm32(delta)) {
2090 pr_err("extable->insn doesn't fit into 32-bit\n");
2091 return -EFAULT;
2092 }
2093 /* switch ex to rw buffer for writes */
2094 ex = (void *)rw_image + ((void *)ex - (void *)image);
2095
2096 ex->insn = delta;
2097
2098 ex->data = EX_TYPE_BPF;
2099
2100 if (dst_reg > BPF_REG_9) {
2101 pr_err("verifier error\n");
2102 return -EFAULT;
2103 }
2104 /*
2105 * Compute size of x86 insn and its target dest x86 register.
2106 * ex_handler_bpf() will use lower 8 bits to adjust
2107 * pt_regs->ip to jump over this x86 instruction
2108 * and upper bits to figure out which pt_regs to zero out.
2109 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2110 * of 4 bytes will be ignored and rbx will be zero inited.
2111 */
2112 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
2113 }
2114 break;
2115
2116 case BPF_STX | BPF_ATOMIC | BPF_W:
2117 case BPF_STX | BPF_ATOMIC | BPF_DW:
2118 if (insn->imm == (BPF_AND | BPF_FETCH) ||
2119 insn->imm == (BPF_OR | BPF_FETCH) ||
2120 insn->imm == (BPF_XOR | BPF_FETCH)) {
2121 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
2122 u32 real_src_reg = src_reg;
2123 u32 real_dst_reg = dst_reg;
2124 u8 *branch_target;
2125
2126 /*
2127 * Can't be implemented with a single x86 insn.
2128 * Need to do a CMPXCHG loop.
2129 */
2130
2131 /* Will need RAX as a CMPXCHG operand so save R0 */
2132 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
2133 if (src_reg == BPF_REG_0)
2134 real_src_reg = BPF_REG_AX;
2135 if (dst_reg == BPF_REG_0)
2136 real_dst_reg = BPF_REG_AX;
2137
2138 branch_target = prog;
2139 /* Load old value */
2140 emit_ldx(&prog, BPF_SIZE(insn->code),
2141 BPF_REG_0, real_dst_reg, insn->off);
2142 /*
2143 * Perform the (commutative) operation locally,
2144 * put the result in the AUX_REG.
2145 */
2146 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
2147 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
2148 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
2149 add_2reg(0xC0, AUX_REG, real_src_reg));
2150 /* Attempt to swap in new value */
2151 err = emit_atomic(&prog, BPF_CMPXCHG,
2152 real_dst_reg, AUX_REG,
2153 insn->off,
2154 BPF_SIZE(insn->code));
2155 if (WARN_ON(err))
2156 return err;
2157 /*
2158 * ZF tells us whether we won the race. If it's
2159 * cleared we need to try again.
2160 */
2161 EMIT2(X86_JNE, -(prog - branch_target) - 2);
2162 /* Return the pre-modification value */
2163 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
2164 /* Restore R0 after clobbering RAX */
2165 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
2166 break;
2167 }
2168
2169 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
2170 insn->off, BPF_SIZE(insn->code));
2171 if (err)
2172 return err;
2173 break;
2174
2175 case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
2176 case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
2177 start_of_ldx = prog;
2178 err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code),
2179 dst_reg, src_reg, X86_REG_R12, insn->off);
2180 if (err)
2181 return err;
2182 goto populate_extable;
2183
2184 /* call */
2185 case BPF_JMP | BPF_CALL: {
2186 u8 *ip = image + addrs[i - 1];
2187
2188 func = (u8 *) __bpf_call_base + imm32;
2189 if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
2190 LOAD_TAIL_CALL_CNT_PTR(stack_depth);
2191 ip += 7;
2192 }
2193 if (!imm32)
2194 return -EINVAL;
2195 if (priv_frame_ptr) {
2196 push_r9(&prog);
2197 ip += 2;
2198 }
2199 ip += x86_call_depth_emit_accounting(&prog, func, ip);
2200 if (emit_call(&prog, func, ip))
2201 return -EINVAL;
2202 if (priv_frame_ptr)
2203 pop_r9(&prog);
2204 break;
2205 }
2206
2207 case BPF_JMP | BPF_TAIL_CALL:
2208 if (imm32)
2209 emit_bpf_tail_call_direct(bpf_prog,
2210 &bpf_prog->aux->poke_tab[imm32 - 1],
2211 &prog, image + addrs[i - 1],
2212 callee_regs_used,
2213 stack_depth,
2214 ctx);
2215 else
2216 emit_bpf_tail_call_indirect(bpf_prog,
2217 &prog,
2218 callee_regs_used,
2219 stack_depth,
2220 image + addrs[i - 1],
2221 ctx);
2222 break;
2223
2224 /* cond jump */
2225 case BPF_JMP | BPF_JEQ | BPF_X:
2226 case BPF_JMP | BPF_JNE | BPF_X:
2227 case BPF_JMP | BPF_JGT | BPF_X:
2228 case BPF_JMP | BPF_JLT | BPF_X:
2229 case BPF_JMP | BPF_JGE | BPF_X:
2230 case BPF_JMP | BPF_JLE | BPF_X:
2231 case BPF_JMP | BPF_JSGT | BPF_X:
2232 case BPF_JMP | BPF_JSLT | BPF_X:
2233 case BPF_JMP | BPF_JSGE | BPF_X:
2234 case BPF_JMP | BPF_JSLE | BPF_X:
2235 case BPF_JMP32 | BPF_JEQ | BPF_X:
2236 case BPF_JMP32 | BPF_JNE | BPF_X:
2237 case BPF_JMP32 | BPF_JGT | BPF_X:
2238 case BPF_JMP32 | BPF_JLT | BPF_X:
2239 case BPF_JMP32 | BPF_JGE | BPF_X:
2240 case BPF_JMP32 | BPF_JLE | BPF_X:
2241 case BPF_JMP32 | BPF_JSGT | BPF_X:
2242 case BPF_JMP32 | BPF_JSLT | BPF_X:
2243 case BPF_JMP32 | BPF_JSGE | BPF_X:
2244 case BPF_JMP32 | BPF_JSLE | BPF_X:
2245 /* cmp dst_reg, src_reg */
2246 maybe_emit_mod(&prog, dst_reg, src_reg,
2247 BPF_CLASS(insn->code) == BPF_JMP);
2248 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
2249 goto emit_cond_jmp;
2250
2251 case BPF_JMP | BPF_JSET | BPF_X:
2252 case BPF_JMP32 | BPF_JSET | BPF_X:
2253 /* test dst_reg, src_reg */
2254 maybe_emit_mod(&prog, dst_reg, src_reg,
2255 BPF_CLASS(insn->code) == BPF_JMP);
2256 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
2257 goto emit_cond_jmp;
2258
2259 case BPF_JMP | BPF_JSET | BPF_K:
2260 case BPF_JMP32 | BPF_JSET | BPF_K:
2261 /* test dst_reg, imm32 */
2262 maybe_emit_1mod(&prog, dst_reg,
2263 BPF_CLASS(insn->code) == BPF_JMP);
2264 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
2265 goto emit_cond_jmp;
2266
2267 case BPF_JMP | BPF_JEQ | BPF_K:
2268 case BPF_JMP | BPF_JNE | BPF_K:
2269 case BPF_JMP | BPF_JGT | BPF_K:
2270 case BPF_JMP | BPF_JLT | BPF_K:
2271 case BPF_JMP | BPF_JGE | BPF_K:
2272 case BPF_JMP | BPF_JLE | BPF_K:
2273 case BPF_JMP | BPF_JSGT | BPF_K:
2274 case BPF_JMP | BPF_JSLT | BPF_K:
2275 case BPF_JMP | BPF_JSGE | BPF_K:
2276 case BPF_JMP | BPF_JSLE | BPF_K:
2277 case BPF_JMP32 | BPF_JEQ | BPF_K:
2278 case BPF_JMP32 | BPF_JNE | BPF_K:
2279 case BPF_JMP32 | BPF_JGT | BPF_K:
2280 case BPF_JMP32 | BPF_JLT | BPF_K:
2281 case BPF_JMP32 | BPF_JGE | BPF_K:
2282 case BPF_JMP32 | BPF_JLE | BPF_K:
2283 case BPF_JMP32 | BPF_JSGT | BPF_K:
2284 case BPF_JMP32 | BPF_JSLT | BPF_K:
2285 case BPF_JMP32 | BPF_JSGE | BPF_K:
2286 case BPF_JMP32 | BPF_JSLE | BPF_K:
2287 /* test dst_reg, dst_reg to save one extra byte */
2288 if (imm32 == 0) {
2289 maybe_emit_mod(&prog, dst_reg, dst_reg,
2290 BPF_CLASS(insn->code) == BPF_JMP);
2291 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
2292 goto emit_cond_jmp;
2293 }
2294
2295 /* cmp dst_reg, imm8/32 */
2296 maybe_emit_1mod(&prog, dst_reg,
2297 BPF_CLASS(insn->code) == BPF_JMP);
2298
2299 if (is_imm8(imm32))
2300 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
2301 else
2302 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
2303
2304emit_cond_jmp: /* Convert BPF opcode to x86 */
2305 switch (BPF_OP(insn->code)) {
2306 case BPF_JEQ:
2307 jmp_cond = X86_JE;
2308 break;
2309 case BPF_JSET:
2310 case BPF_JNE:
2311 jmp_cond = X86_JNE;
2312 break;
2313 case BPF_JGT:
2314 /* GT is unsigned '>', JA in x86 */
2315 jmp_cond = X86_JA;
2316 break;
2317 case BPF_JLT:
2318 /* LT is unsigned '<', JB in x86 */
2319 jmp_cond = X86_JB;
2320 break;
2321 case BPF_JGE:
2322 /* GE is unsigned '>=', JAE in x86 */
2323 jmp_cond = X86_JAE;
2324 break;
2325 case BPF_JLE:
2326 /* LE is unsigned '<=', JBE in x86 */
2327 jmp_cond = X86_JBE;
2328 break;
2329 case BPF_JSGT:
2330 /* Signed '>', GT in x86 */
2331 jmp_cond = X86_JG;
2332 break;
2333 case BPF_JSLT:
2334 /* Signed '<', LT in x86 */
2335 jmp_cond = X86_JL;
2336 break;
2337 case BPF_JSGE:
2338 /* Signed '>=', GE in x86 */
2339 jmp_cond = X86_JGE;
2340 break;
2341 case BPF_JSLE:
2342 /* Signed '<=', LE in x86 */
2343 jmp_cond = X86_JLE;
2344 break;
2345 default: /* to silence GCC warning */
2346 return -EFAULT;
2347 }
2348 jmp_offset = addrs[i + insn->off] - addrs[i];
2349 if (is_imm8_jmp_offset(jmp_offset)) {
2350 if (jmp_padding) {
2351 /* To keep the jmp_offset valid, the extra bytes are
2352 * padded before the jump insn, so we subtract the
2353 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2354 *
2355 * If the previous pass already emits an imm8
2356 * jmp_cond, then this BPF insn won't shrink, so
2357 * "nops" is 0.
2358 *
2359 * On the other hand, if the previous pass emits an
2360 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2361 * keep the image from shrinking further.
2362 *
2363 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2364 * is 2 bytes, so the size difference is 4 bytes.
2365 */
2366 nops = INSN_SZ_DIFF - 2;
2367 if (nops != 0 && nops != 4) {
2368 pr_err("unexpected jmp_cond padding: %d bytes\n",
2369 nops);
2370 return -EFAULT;
2371 }
2372 emit_nops(&prog, nops);
2373 }
2374 EMIT2(jmp_cond, jmp_offset);
2375 } else if (is_simm32(jmp_offset)) {
2376 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2377 } else {
2378 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2379 return -EFAULT;
2380 }
2381
2382 break;
2383
2384 case BPF_JMP | BPF_JA:
2385 case BPF_JMP32 | BPF_JA:
2386 if (BPF_CLASS(insn->code) == BPF_JMP) {
2387 if (insn->off == -1)
2388 /* -1 jmp instructions will always jump
2389 * backwards two bytes. Explicitly handling
2390 * this case avoids wasting too many passes
2391 * when there are long sequences of replaced
2392 * dead code.
2393 */
2394 jmp_offset = -2;
2395 else
2396 jmp_offset = addrs[i + insn->off] - addrs[i];
2397 } else {
2398 if (insn->imm == -1)
2399 jmp_offset = -2;
2400 else
2401 jmp_offset = addrs[i + insn->imm] - addrs[i];
2402 }
2403
2404 if (!jmp_offset) {
2405 /*
2406 * If jmp_padding is enabled, the extra nops will
2407 * be inserted. Otherwise, optimize out nop jumps.
2408 */
2409 if (jmp_padding) {
2410 /* There are 3 possible conditions.
2411 * (1) This BPF_JA is already optimized out in
2412 * the previous run, so there is no need
2413 * to pad any extra byte (0 byte).
2414 * (2) The previous pass emits an imm8 jmp,
2415 * so we pad 2 bytes to match the previous
2416 * insn size.
2417 * (3) Similarly, the previous pass emits an
2418 * imm32 jmp, and 5 bytes is padded.
2419 */
2420 nops = INSN_SZ_DIFF;
2421 if (nops != 0 && nops != 2 && nops != 5) {
2422 pr_err("unexpected nop jump padding: %d bytes\n",
2423 nops);
2424 return -EFAULT;
2425 }
2426 emit_nops(&prog, nops);
2427 }
2428 break;
2429 }
2430emit_jmp:
2431 if (is_imm8_jmp_offset(jmp_offset)) {
2432 if (jmp_padding) {
2433 /* To avoid breaking jmp_offset, the extra bytes
2434 * are padded before the actual jmp insn, so
2435 * 2 bytes is subtracted from INSN_SZ_DIFF.
2436 *
2437 * If the previous pass already emits an imm8
2438 * jmp, there is nothing to pad (0 byte).
2439 *
2440 * If it emits an imm32 jmp (5 bytes) previously
2441 * and now an imm8 jmp (2 bytes), then we pad
2442 * (5 - 2 = 3) bytes to stop the image from
2443 * shrinking further.
2444 */
2445 nops = INSN_SZ_DIFF - 2;
2446 if (nops != 0 && nops != 3) {
2447 pr_err("unexpected jump padding: %d bytes\n",
2448 nops);
2449 return -EFAULT;
2450 }
2451 emit_nops(&prog, INSN_SZ_DIFF - 2);
2452 }
2453 EMIT2(0xEB, jmp_offset);
2454 } else if (is_simm32(jmp_offset)) {
2455 EMIT1_off32(0xE9, jmp_offset);
2456 } else {
2457 pr_err("jmp gen bug %llx\n", jmp_offset);
2458 return -EFAULT;
2459 }
2460 break;
2461
2462 case BPF_JMP | BPF_EXIT:
2463 if (seen_exit) {
2464 jmp_offset = ctx->cleanup_addr - addrs[i];
2465 goto emit_jmp;
2466 }
2467 seen_exit = true;
2468 /* Update cleanup_addr */
2469 ctx->cleanup_addr = proglen;
2470 if (bpf_prog->aux->exception_boundary) {
2471 pop_callee_regs(&prog, all_callee_regs_used);
2472 pop_r12(&prog);
2473 } else {
2474 pop_callee_regs(&prog, callee_regs_used);
2475 if (arena_vm_start)
2476 pop_r12(&prog);
2477 }
2478 EMIT1(0xC9); /* leave */
2479 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
2480 break;
2481
2482 default:
2483 /*
2484 * By design x86-64 JIT should support all BPF instructions.
2485 * This error will be seen if new instruction was added
2486 * to the interpreter, but not to the JIT, or if there is
2487 * junk in bpf_prog.
2488 */
2489 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
2490 return -EINVAL;
2491 }
2492
2493 ilen = prog - temp;
2494 if (ilen > BPF_MAX_INSN_SIZE) {
2495 pr_err("bpf_jit: fatal insn size error\n");
2496 return -EFAULT;
2497 }
2498
2499 if (image) {
2500 /*
2501 * When populating the image, assert that:
2502 *
2503 * i) We do not write beyond the allocated space, and
2504 * ii) addrs[i] did not change from the prior run, in order
2505 * to validate assumptions made for computing branch
2506 * displacements.
2507 */
2508 if (unlikely(proglen + ilen > oldproglen ||
2509 proglen + ilen != addrs[i])) {
2510 pr_err("bpf_jit: fatal error\n");
2511 return -EFAULT;
2512 }
2513 memcpy(rw_image + proglen, temp, ilen);
2514 }
2515 proglen += ilen;
2516 addrs[i] = proglen;
2517 prog = temp;
2518 }
2519
2520 if (image && excnt != bpf_prog->aux->num_exentries) {
2521 pr_err("extable is not populated\n");
2522 return -EFAULT;
2523 }
2524 return proglen;
2525}
2526
2527static void clean_stack_garbage(const struct btf_func_model *m,
2528 u8 **pprog, int nr_stack_slots,
2529 int stack_size)
2530{
2531 int arg_size, off;
2532 u8 *prog;
2533
2534 /* Generally speaking, the compiler will pass the arguments
2535 * on-stack with "push" instruction, which will take 8-byte
2536 * on the stack. In this case, there won't be garbage values
2537 * while we copy the arguments from origin stack frame to current
2538 * in BPF_DW.
2539 *
2540 * However, sometimes the compiler will only allocate 4-byte on
2541 * the stack for the arguments. For now, this case will only
2542 * happen if there is only one argument on-stack and its size
2543 * not more than 4 byte. In this case, there will be garbage
2544 * values on the upper 4-byte where we store the argument on
2545 * current stack frame.
2546 *
2547 * arguments on origin stack:
2548 *
2549 * stack_arg_1(4-byte) xxx(4-byte)
2550 *
2551 * what we copy:
2552 *
2553 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2554 *
2555 * and the xxx is the garbage values which we should clean here.
2556 */
2557 if (nr_stack_slots != 1)
2558 return;
2559
2560 /* the size of the last argument */
2561 arg_size = m->arg_size[m->nr_args - 1];
2562 if (arg_size <= 4) {
2563 off = -(stack_size - 4);
2564 prog = *pprog;
2565 /* mov DWORD PTR [rbp + off], 0 */
2566 if (!is_imm8(off))
2567 EMIT2_off32(0xC7, 0x85, off);
2568 else
2569 EMIT3(0xC7, 0x45, off);
2570 EMIT(0, 4);
2571 *pprog = prog;
2572 }
2573}
2574
2575/* get the count of the regs that are used to pass arguments */
2576static int get_nr_used_regs(const struct btf_func_model *m)
2577{
2578 int i, arg_regs, nr_used_regs = 0;
2579
2580 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2581 arg_regs = (m->arg_size[i] + 7) / 8;
2582 if (nr_used_regs + arg_regs <= 6)
2583 nr_used_regs += arg_regs;
2584
2585 if (nr_used_regs >= 6)
2586 break;
2587 }
2588
2589 return nr_used_regs;
2590}
2591
2592static void save_args(const struct btf_func_model *m, u8 **prog,
2593 int stack_size, bool for_call_origin)
2594{
2595 int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0;
2596 int i, j;
2597
2598 /* Store function arguments to stack.
2599 * For a function that accepts two pointers the sequence will be:
2600 * mov QWORD PTR [rbp-0x10],rdi
2601 * mov QWORD PTR [rbp-0x8],rsi
2602 */
2603 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2604 arg_regs = (m->arg_size[i] + 7) / 8;
2605
2606 /* According to the research of Yonghong, struct members
2607 * should be all in register or all on the stack.
2608 * Meanwhile, the compiler will pass the argument on regs
2609 * if the remaining regs can hold the argument.
2610 *
2611 * Disorder of the args can happen. For example:
2612 *
2613 * struct foo_struct {
2614 * long a;
2615 * int b;
2616 * };
2617 * int foo(char, char, char, char, char, struct foo_struct,
2618 * char);
2619 *
2620 * the arg1-5,arg7 will be passed by regs, and arg6 will
2621 * by stack.
2622 */
2623 if (nr_regs + arg_regs > 6) {
2624 /* copy function arguments from origin stack frame
2625 * into current stack frame.
2626 *
2627 * The starting address of the arguments on-stack
2628 * is:
2629 * rbp + 8(push rbp) +
2630 * 8(return addr of origin call) +
2631 * 8(return addr of the caller)
2632 * which means: rbp + 24
2633 */
2634 for (j = 0; j < arg_regs; j++) {
2635 emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP,
2636 nr_stack_slots * 8 + 0x18);
2637 emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0,
2638 -stack_size);
2639
2640 if (!nr_stack_slots)
2641 first_off = stack_size;
2642 stack_size -= 8;
2643 nr_stack_slots++;
2644 }
2645 } else {
2646 /* Only copy the arguments on-stack to current
2647 * 'stack_size' and ignore the regs, used to
2648 * prepare the arguments on-stack for origin call.
2649 */
2650 if (for_call_origin) {
2651 nr_regs += arg_regs;
2652 continue;
2653 }
2654
2655 /* copy the arguments from regs into stack */
2656 for (j = 0; j < arg_regs; j++) {
2657 emit_stx(prog, BPF_DW, BPF_REG_FP,
2658 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2659 -stack_size);
2660 stack_size -= 8;
2661 nr_regs++;
2662 }
2663 }
2664 }
2665
2666 clean_stack_garbage(m, prog, nr_stack_slots, first_off);
2667}
2668
2669static void restore_regs(const struct btf_func_model *m, u8 **prog,
2670 int stack_size)
2671{
2672 int i, j, arg_regs, nr_regs = 0;
2673
2674 /* Restore function arguments from stack.
2675 * For a function that accepts two pointers the sequence will be:
2676 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2677 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2678 *
2679 * The logic here is similar to what we do in save_args()
2680 */
2681 for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) {
2682 arg_regs = (m->arg_size[i] + 7) / 8;
2683 if (nr_regs + arg_regs <= 6) {
2684 for (j = 0; j < arg_regs; j++) {
2685 emit_ldx(prog, BPF_DW,
2686 nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs,
2687 BPF_REG_FP,
2688 -stack_size);
2689 stack_size -= 8;
2690 nr_regs++;
2691 }
2692 } else {
2693 stack_size -= 8 * arg_regs;
2694 }
2695
2696 if (nr_regs >= 6)
2697 break;
2698 }
2699}
2700
2701static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
2702 struct bpf_tramp_link *l, int stack_size,
2703 int run_ctx_off, bool save_ret,
2704 void *image, void *rw_image)
2705{
2706 u8 *prog = *pprog;
2707 u8 *jmp_insn;
2708 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2709 struct bpf_prog *p = l->link.prog;
2710 u64 cookie = l->cookie;
2711
2712 /* mov rdi, cookie */
2713 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
2714
2715 /* Prepare struct bpf_tramp_run_ctx.
2716 *
2717 * bpf_tramp_run_ctx is already preserved by
2718 * arch_prepare_bpf_trampoline().
2719 *
2720 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2721 */
2722 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
2723
2724 /* arg1: mov rdi, progs[i] */
2725 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2726 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2727 if (!is_imm8(-run_ctx_off))
2728 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off);
2729 else
2730 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
2731
2732 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image)))
2733 return -EINVAL;
2734 /* remember prog start time returned by __bpf_prog_enter */
2735 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
2736
2737 /* if (__bpf_prog_enter*(prog) == 0)
2738 * goto skip_exec_of_prog;
2739 */
2740 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
2741 /* emit 2 nops that will be replaced with JE insn */
2742 jmp_insn = prog;
2743 emit_nops(&prog, 2);
2744
2745 /* arg1: lea rdi, [rbp - stack_size] */
2746 if (!is_imm8(-stack_size))
2747 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size);
2748 else
2749 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
2750 /* arg2: progs[i]->insnsi for interpreter */
2751 if (!p->jited)
2752 emit_mov_imm64(&prog, BPF_REG_2,
2753 (long) p->insnsi >> 32,
2754 (u32) (long) p->insnsi);
2755 /* call JITed bpf program or interpreter */
2756 if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image)))
2757 return -EINVAL;
2758
2759 /*
2760 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2761 * of the previous call which is then passed on the stack to
2762 * the next BPF program.
2763 *
2764 * BPF_TRAMP_FENTRY trampoline may need to return the return
2765 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2766 */
2767 if (save_ret)
2768 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2769
2770 /* replace 2 nops with JE insn, since jmp target is known */
2771 jmp_insn[0] = X86_JE;
2772 jmp_insn[1] = prog - jmp_insn - 2;
2773
2774 /* arg1: mov rdi, progs[i] */
2775 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
2776 /* arg2: mov rsi, rbx <- start time in nsec */
2777 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
2778 /* arg3: lea rdx, [rbp - run_ctx_off] */
2779 if (!is_imm8(-run_ctx_off))
2780 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off);
2781 else
2782 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
2783 if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image)))
2784 return -EINVAL;
2785
2786 *pprog = prog;
2787 return 0;
2788}
2789
2790static void emit_align(u8 **pprog, u32 align)
2791{
2792 u8 *target, *prog = *pprog;
2793
2794 target = PTR_ALIGN(prog, align);
2795 if (target != prog)
2796 emit_nops(&prog, target - prog);
2797
2798 *pprog = prog;
2799}
2800
2801static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2802{
2803 u8 *prog = *pprog;
2804 s64 offset;
2805
2806 offset = func - (ip + 2 + 4);
2807 if (!is_simm32(offset)) {
2808 pr_err("Target %p is out of range\n", func);
2809 return -EINVAL;
2810 }
2811 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2812 *pprog = prog;
2813 return 0;
2814}
2815
2816static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2817 struct bpf_tramp_links *tl, int stack_size,
2818 int run_ctx_off, bool save_ret,
2819 void *image, void *rw_image)
2820{
2821 int i;
2822 u8 *prog = *pprog;
2823
2824 for (i = 0; i < tl->nr_links; i++) {
2825 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2826 run_ctx_off, save_ret, image, rw_image))
2827 return -EINVAL;
2828 }
2829 *pprog = prog;
2830 return 0;
2831}
2832
2833static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2834 struct bpf_tramp_links *tl, int stack_size,
2835 int run_ctx_off, u8 **branches,
2836 void *image, void *rw_image)
2837{
2838 u8 *prog = *pprog;
2839 int i;
2840
2841 /* The first fmod_ret program will receive a garbage return value.
2842 * Set this to 0 to avoid confusing the program.
2843 */
2844 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2845 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2846 for (i = 0; i < tl->nr_links; i++) {
2847 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true,
2848 image, rw_image))
2849 return -EINVAL;
2850
2851 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2852 * if (*(u64 *)(rbp - 8) != 0)
2853 * goto do_fexit;
2854 */
2855 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2856 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2857
2858 /* Save the location of the branch and Generate 6 nops
2859 * (4 bytes for an offset and 2 bytes for the jump) These nops
2860 * are replaced with a conditional jump once do_fexit (i.e. the
2861 * start of the fexit invocation) is finalized.
2862 */
2863 branches[i] = prog;
2864 emit_nops(&prog, 4 + 2);
2865 }
2866
2867 *pprog = prog;
2868 return 0;
2869}
2870
2871/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
2872#define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
2873 __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
2874
2875/* Example:
2876 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2877 * its 'struct btf_func_model' will be nr_args=2
2878 * The assembly code when eth_type_trans is executing after trampoline:
2879 *
2880 * push rbp
2881 * mov rbp, rsp
2882 * sub rsp, 16 // space for skb and dev
2883 * push rbx // temp regs to pass start time
2884 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2885 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2886 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2887 * mov rbx, rax // remember start time in bpf stats are enabled
2888 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2889 * call addr_of_jited_FENTRY_prog
2890 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2891 * mov rsi, rbx // prog start time
2892 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2893 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2894 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2895 * pop rbx
2896 * leave
2897 * ret
2898 *
2899 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2900 * replaced with 'call generated_bpf_trampoline'. When it returns
2901 * eth_type_trans will continue executing with original skb and dev pointers.
2902 *
2903 * The assembly code when eth_type_trans is called from trampoline:
2904 *
2905 * push rbp
2906 * mov rbp, rsp
2907 * sub rsp, 24 // space for skb, dev, return value
2908 * push rbx // temp regs to pass start time
2909 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2910 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2911 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2912 * mov rbx, rax // remember start time if bpf stats are enabled
2913 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2914 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2915 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2916 * mov rsi, rbx // prog start time
2917 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2918 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2919 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2920 * call eth_type_trans+5 // execute body of eth_type_trans
2921 * mov qword ptr [rbp - 8], rax // save return value
2922 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2923 * mov rbx, rax // remember start time in bpf stats are enabled
2924 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2925 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2926 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2927 * mov rsi, rbx // prog start time
2928 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2929 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2930 * pop rbx
2931 * leave
2932 * add rsp, 8 // skip eth_type_trans's frame
2933 * ret // return to its caller
2934 */
2935static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
2936 void *rw_image_end, void *image,
2937 const struct btf_func_model *m, u32 flags,
2938 struct bpf_tramp_links *tlinks,
2939 void *func_addr)
2940{
2941 int i, ret, nr_regs = m->nr_args, stack_size = 0;
2942 int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
2943 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2944 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2945 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2946 void *orig_call = func_addr;
2947 u8 **branches = NULL;
2948 u8 *prog;
2949 bool save_ret;
2950
2951 /*
2952 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2953 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2954 * because @func_addr.
2955 */
2956 WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) &&
2957 (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET)));
2958
2959 /* extra registers for struct arguments */
2960 for (i = 0; i < m->nr_args; i++) {
2961 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2962 nr_regs += (m->arg_size[i] + 7) / 8 - 1;
2963 }
2964
2965 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2966 * are passed through regs, the remains are through stack.
2967 */
2968 if (nr_regs > MAX_BPF_FUNC_ARGS)
2969 return -ENOTSUPP;
2970
2971 /* Generated trampoline stack layout:
2972 *
2973 * RBP + 8 [ return address ]
2974 * RBP + 0 [ RBP ]
2975 *
2976 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2977 * BPF_TRAMP_F_RET_FENTRY_RET flags
2978 *
2979 * [ reg_argN ] always
2980 * [ ... ]
2981 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2982 *
2983 * RBP - nregs_off [ regs count ] always
2984 *
2985 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2986 *
2987 * RBP - rbx_off [ rbx value ] always
2988 *
2989 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2990 *
2991 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
2992 * [ ... ]
2993 * [ stack_arg2 ]
2994 * RBP - arg_stack_off [ stack_arg1 ]
2995 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
2996 */
2997
2998 /* room for return value of orig_call or fentry prog */
2999 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
3000 if (save_ret)
3001 stack_size += 8;
3002
3003 stack_size += nr_regs * 8;
3004 regs_off = stack_size;
3005
3006 /* regs count */
3007 stack_size += 8;
3008 nregs_off = stack_size;
3009
3010 if (flags & BPF_TRAMP_F_IP_ARG)
3011 stack_size += 8; /* room for IP address argument */
3012
3013 ip_off = stack_size;
3014
3015 stack_size += 8;
3016 rbx_off = stack_size;
3017
3018 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
3019 run_ctx_off = stack_size;
3020
3021 if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) {
3022 /* the space that used to pass arguments on-stack */
3023 stack_size += (nr_regs - get_nr_used_regs(m)) * 8;
3024 /* make sure the stack pointer is 16-byte aligned if we
3025 * need pass arguments on stack, which means
3026 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
3027 * should be 16-byte aligned. Following code depend on
3028 * that stack_size is already 8-byte aligned.
3029 */
3030 stack_size += (stack_size % 16) ? 0 : 8;
3031 }
3032
3033 arg_stack_off = stack_size;
3034
3035 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3036 /* skip patched call instruction and point orig_call to actual
3037 * body of the kernel function.
3038 */
3039 if (is_endbr(*(u32 *)orig_call))
3040 orig_call += ENDBR_INSN_SIZE;
3041 orig_call += X86_PATCH_SIZE;
3042 }
3043
3044 prog = rw_image;
3045
3046 if (flags & BPF_TRAMP_F_INDIRECT) {
3047 /*
3048 * Indirect call for bpf_struct_ops
3049 */
3050 emit_cfi(&prog, cfi_get_func_hash(func_addr));
3051 } else {
3052 /*
3053 * Direct-call fentry stub, as such it needs accounting for the
3054 * __fentry__ call.
3055 */
3056 x86_call_depth_emit_accounting(&prog, NULL, image);
3057 }
3058 EMIT1(0x55); /* push rbp */
3059 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
3060 if (!is_imm8(stack_size)) {
3061 /* sub rsp, stack_size */
3062 EMIT3_off32(0x48, 0x81, 0xEC, stack_size);
3063 } else {
3064 /* sub rsp, stack_size */
3065 EMIT4(0x48, 0x83, 0xEC, stack_size);
3066 }
3067 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
3068 EMIT1(0x50); /* push rax */
3069 /* mov QWORD PTR [rbp - rbx_off], rbx */
3070 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off);
3071
3072 /* Store number of argument registers of the traced function:
3073 * mov rax, nr_regs
3074 * mov QWORD PTR [rbp - nregs_off], rax
3075 */
3076 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
3077 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
3078
3079 if (flags & BPF_TRAMP_F_IP_ARG) {
3080 /* Store IP address of the traced function:
3081 * movabsq rax, func_addr
3082 * mov QWORD PTR [rbp - ip_off], rax
3083 */
3084 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
3085 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
3086 }
3087
3088 save_args(m, &prog, regs_off, false);
3089
3090 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3091 /* arg1: mov rdi, im */
3092 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3093 if (emit_rsb_call(&prog, __bpf_tramp_enter,
3094 image + (prog - (u8 *)rw_image))) {
3095 ret = -EINVAL;
3096 goto cleanup;
3097 }
3098 }
3099
3100 if (fentry->nr_links) {
3101 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
3102 flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image))
3103 return -EINVAL;
3104 }
3105
3106 if (fmod_ret->nr_links) {
3107 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
3108 GFP_KERNEL);
3109 if (!branches)
3110 return -ENOMEM;
3111
3112 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
3113 run_ctx_off, branches, image, rw_image)) {
3114 ret = -EINVAL;
3115 goto cleanup;
3116 }
3117 }
3118
3119 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3120 restore_regs(m, &prog, regs_off);
3121 save_args(m, &prog, arg_stack_off, true);
3122
3123 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3124 /* Before calling the original function, load the
3125 * tail_call_cnt_ptr from stack to rax.
3126 */
3127 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3128 }
3129
3130 if (flags & BPF_TRAMP_F_ORIG_STACK) {
3131 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
3132 EMIT2(0xff, 0xd3); /* call *rbx */
3133 } else {
3134 /* call original function */
3135 if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) {
3136 ret = -EINVAL;
3137 goto cleanup;
3138 }
3139 }
3140 /* remember return value in a stack for bpf prog to access */
3141 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
3142 im->ip_after_call = image + (prog - (u8 *)rw_image);
3143 emit_nops(&prog, X86_PATCH_SIZE);
3144 }
3145
3146 if (fmod_ret->nr_links) {
3147 /* From Intel 64 and IA-32 Architectures Optimization
3148 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3149 * Coding Rule 11: All branch targets should be 16-byte
3150 * aligned.
3151 */
3152 emit_align(&prog, 16);
3153 /* Update the branches saved in invoke_bpf_mod_ret with the
3154 * aligned address of do_fexit.
3155 */
3156 for (i = 0; i < fmod_ret->nr_links; i++) {
3157 emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image),
3158 image + (branches[i] - (u8 *)rw_image), X86_JNE);
3159 }
3160 }
3161
3162 if (fexit->nr_links) {
3163 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
3164 false, image, rw_image)) {
3165 ret = -EINVAL;
3166 goto cleanup;
3167 }
3168 }
3169
3170 if (flags & BPF_TRAMP_F_RESTORE_REGS)
3171 restore_regs(m, &prog, regs_off);
3172
3173 /* This needs to be done regardless. If there were fmod_ret programs,
3174 * the return value is only updated on the stack and still needs to be
3175 * restored to R0.
3176 */
3177 if (flags & BPF_TRAMP_F_CALL_ORIG) {
3178 im->ip_epilogue = image + (prog - (u8 *)rw_image);
3179 /* arg1: mov rdi, im */
3180 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
3181 if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) {
3182 ret = -EINVAL;
3183 goto cleanup;
3184 }
3185 } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) {
3186 /* Before running the original function, load the
3187 * tail_call_cnt_ptr from stack to rax.
3188 */
3189 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size);
3190 }
3191
3192 /* restore return value of orig_call or fentry prog back into RAX */
3193 if (save_ret)
3194 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
3195
3196 emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
3197 EMIT1(0xC9); /* leave */
3198 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
3199 /* skip our return address and return to parent */
3200 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3201 }
3202 emit_return(&prog, image + (prog - (u8 *)rw_image));
3203 /* Make sure the trampoline generation logic doesn't overflow */
3204 if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) {
3205 ret = -EFAULT;
3206 goto cleanup;
3207 }
3208 ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY;
3209
3210cleanup:
3211 kfree(branches);
3212 return ret;
3213}
3214
3215void *arch_alloc_bpf_trampoline(unsigned int size)
3216{
3217 return bpf_prog_pack_alloc(size, jit_fill_hole);
3218}
3219
3220void arch_free_bpf_trampoline(void *image, unsigned int size)
3221{
3222 bpf_prog_pack_free(image, size);
3223}
3224
3225int arch_protect_bpf_trampoline(void *image, unsigned int size)
3226{
3227 return 0;
3228}
3229
3230int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
3231 const struct btf_func_model *m, u32 flags,
3232 struct bpf_tramp_links *tlinks,
3233 void *func_addr)
3234{
3235 void *rw_image, *tmp;
3236 int ret;
3237 u32 size = image_end - image;
3238
3239 /* rw_image doesn't need to be in module memory range, so we can
3240 * use kvmalloc.
3241 */
3242 rw_image = kvmalloc(size, GFP_KERNEL);
3243 if (!rw_image)
3244 return -ENOMEM;
3245
3246 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
3247 flags, tlinks, func_addr);
3248 if (ret < 0)
3249 goto out;
3250
3251 tmp = bpf_arch_text_copy(image, rw_image, size);
3252 if (IS_ERR(tmp))
3253 ret = PTR_ERR(tmp);
3254out:
3255 kvfree(rw_image);
3256 return ret;
3257}
3258
3259int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
3260 struct bpf_tramp_links *tlinks, void *func_addr)
3261{
3262 struct bpf_tramp_image im;
3263 void *image;
3264 int ret;
3265
3266 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3267 * This will NOT cause fragmentation in direct map, as we do not
3268 * call set_memory_*() on this buffer.
3269 *
3270 * We cannot use kvmalloc here, because we need image to be in
3271 * module memory range.
3272 */
3273 image = bpf_jit_alloc_exec(PAGE_SIZE);
3274 if (!image)
3275 return -ENOMEM;
3276
3277 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
3278 m, flags, tlinks, func_addr);
3279 bpf_jit_free_exec(image);
3280 return ret;
3281}
3282
3283static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
3284{
3285 u8 *jg_reloc, *prog = *pprog;
3286 int pivot, err, jg_bytes = 1;
3287 s64 jg_offset;
3288
3289 if (a == b) {
3290 /* Leaf node of recursion, i.e. not a range of indices
3291 * anymore.
3292 */
3293 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3294 if (!is_simm32(progs[a]))
3295 return -1;
3296 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
3297 progs[a]);
3298 err = emit_cond_near_jump(&prog, /* je func */
3299 (void *)progs[a], image + (prog - buf),
3300 X86_JE);
3301 if (err)
3302 return err;
3303
3304 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
3305
3306 *pprog = prog;
3307 return 0;
3308 }
3309
3310 /* Not a leaf node, so we pivot, and recursively descend into
3311 * the lower and upper ranges.
3312 */
3313 pivot = (b - a) / 2;
3314 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
3315 if (!is_simm32(progs[a + pivot]))
3316 return -1;
3317 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
3318
3319 if (pivot > 2) { /* jg upper_part */
3320 /* Require near jump. */
3321 jg_bytes = 4;
3322 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
3323 } else {
3324 EMIT2(X86_JG, 0);
3325 }
3326 jg_reloc = prog;
3327
3328 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
3329 progs, image, buf);
3330 if (err)
3331 return err;
3332
3333 /* From Intel 64 and IA-32 Architectures Optimization
3334 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3335 * Coding Rule 11: All branch targets should be 16-byte
3336 * aligned.
3337 */
3338 emit_align(&prog, 16);
3339 jg_offset = prog - jg_reloc;
3340 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
3341
3342 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
3343 b, progs, image, buf);
3344 if (err)
3345 return err;
3346
3347 *pprog = prog;
3348 return 0;
3349}
3350
3351static int cmp_ips(const void *a, const void *b)
3352{
3353 const s64 *ipa = a;
3354 const s64 *ipb = b;
3355
3356 if (*ipa > *ipb)
3357 return 1;
3358 if (*ipa < *ipb)
3359 return -1;
3360 return 0;
3361}
3362
3363int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
3364{
3365 u8 *prog = buf;
3366
3367 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
3368 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
3369}
3370
3371static const char *bpf_get_prog_name(struct bpf_prog *prog)
3372{
3373 if (prog->aux->ksym.prog)
3374 return prog->aux->ksym.name;
3375 return prog->aux->name;
3376}
3377
3378static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size)
3379{
3380 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3381 u64 *stack_ptr;
3382
3383 for_each_possible_cpu(cpu) {
3384 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3385 stack_ptr[0] = PRIV_STACK_GUARD_VAL;
3386 stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL;
3387 }
3388}
3389
3390static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size,
3391 struct bpf_prog *prog)
3392{
3393 int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3;
3394 u64 *stack_ptr;
3395
3396 for_each_possible_cpu(cpu) {
3397 stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu);
3398 if (stack_ptr[0] != PRIV_STACK_GUARD_VAL ||
3399 stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) {
3400 pr_err("BPF private stack overflow/underflow detected for prog %sx\n",
3401 bpf_get_prog_name(prog));
3402 break;
3403 }
3404 }
3405}
3406
3407struct x64_jit_data {
3408 struct bpf_binary_header *rw_header;
3409 struct bpf_binary_header *header;
3410 int *addrs;
3411 u8 *image;
3412 int proglen;
3413 struct jit_context ctx;
3414};
3415
3416#define MAX_PASSES 20
3417#define PADDING_PASSES (MAX_PASSES - 5)
3418
3419struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
3420{
3421 struct bpf_binary_header *rw_header = NULL;
3422 struct bpf_binary_header *header = NULL;
3423 struct bpf_prog *tmp, *orig_prog = prog;
3424 void __percpu *priv_stack_ptr = NULL;
3425 struct x64_jit_data *jit_data;
3426 int priv_stack_alloc_sz;
3427 int proglen, oldproglen = 0;
3428 struct jit_context ctx = {};
3429 bool tmp_blinded = false;
3430 bool extra_pass = false;
3431 bool padding = false;
3432 u8 *rw_image = NULL;
3433 u8 *image = NULL;
3434 int *addrs;
3435 int pass;
3436 int i;
3437
3438 if (!prog->jit_requested)
3439 return orig_prog;
3440
3441 tmp = bpf_jit_blind_constants(prog);
3442 /*
3443 * If blinding was requested and we failed during blinding,
3444 * we must fall back to the interpreter.
3445 */
3446 if (IS_ERR(tmp))
3447 return orig_prog;
3448 if (tmp != prog) {
3449 tmp_blinded = true;
3450 prog = tmp;
3451 }
3452
3453 jit_data = prog->aux->jit_data;
3454 if (!jit_data) {
3455 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
3456 if (!jit_data) {
3457 prog = orig_prog;
3458 goto out;
3459 }
3460 prog->aux->jit_data = jit_data;
3461 }
3462 priv_stack_ptr = prog->aux->priv_stack_ptr;
3463 if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) {
3464 /* Allocate actual private stack size with verifier-calculated
3465 * stack size plus two memory guards to protect overflow and
3466 * underflow.
3467 */
3468 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3469 2 * PRIV_STACK_GUARD_SZ;
3470 priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
3471 if (!priv_stack_ptr) {
3472 prog = orig_prog;
3473 goto out_priv_stack;
3474 }
3475
3476 priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
3477 prog->aux->priv_stack_ptr = priv_stack_ptr;
3478 }
3479 addrs = jit_data->addrs;
3480 if (addrs) {
3481 ctx = jit_data->ctx;
3482 oldproglen = jit_data->proglen;
3483 image = jit_data->image;
3484 header = jit_data->header;
3485 rw_header = jit_data->rw_header;
3486 rw_image = (void *)rw_header + ((void *)image - (void *)header);
3487 extra_pass = true;
3488 padding = true;
3489 goto skip_init_addrs;
3490 }
3491 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
3492 if (!addrs) {
3493 prog = orig_prog;
3494 goto out_addrs;
3495 }
3496
3497 /*
3498 * Before first pass, make a rough estimation of addrs[]
3499 * each BPF instruction is translated to less than 64 bytes
3500 */
3501 for (proglen = 0, i = 0; i <= prog->len; i++) {
3502 proglen += 64;
3503 addrs[i] = proglen;
3504 }
3505 ctx.cleanup_addr = proglen;
3506skip_init_addrs:
3507
3508 /*
3509 * JITed image shrinks with every pass and the loop iterates
3510 * until the image stops shrinking. Very large BPF programs
3511 * may converge on the last pass. In such case do one more
3512 * pass to emit the final image.
3513 */
3514 for (pass = 0; pass < MAX_PASSES || image; pass++) {
3515 if (!padding && pass >= PADDING_PASSES)
3516 padding = true;
3517 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
3518 if (proglen <= 0) {
3519out_image:
3520 image = NULL;
3521 if (header) {
3522 bpf_arch_text_copy(&header->size, &rw_header->size,
3523 sizeof(rw_header->size));
3524 bpf_jit_binary_pack_free(header, rw_header);
3525 }
3526 /* Fall back to interpreter mode */
3527 prog = orig_prog;
3528 if (extra_pass) {
3529 prog->bpf_func = NULL;
3530 prog->jited = 0;
3531 prog->jited_len = 0;
3532 }
3533 goto out_addrs;
3534 }
3535 if (image) {
3536 if (proglen != oldproglen) {
3537 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3538 proglen, oldproglen);
3539 goto out_image;
3540 }
3541 break;
3542 }
3543 if (proglen == oldproglen) {
3544 /*
3545 * The number of entries in extable is the number of BPF_LDX
3546 * insns that access kernel memory via "pointer to BTF type".
3547 * The verifier changed their opcode from LDX|MEM|size
3548 * to LDX|PROBE_MEM|size to make JITing easier.
3549 */
3550 u32 align = __alignof__(struct exception_table_entry);
3551 u32 extable_size = prog->aux->num_exentries *
3552 sizeof(struct exception_table_entry);
3553
3554 /* allocate module memory for x86 insns and extable */
3555 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
3556 &image, align, &rw_header, &rw_image,
3557 jit_fill_hole);
3558 if (!header) {
3559 prog = orig_prog;
3560 goto out_addrs;
3561 }
3562 prog->aux->extable = (void *) image + roundup(proglen, align);
3563 }
3564 oldproglen = proglen;
3565 cond_resched();
3566 }
3567
3568 if (bpf_jit_enable > 1)
3569 bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
3570
3571 if (image) {
3572 if (!prog->is_func || extra_pass) {
3573 /*
3574 * bpf_jit_binary_pack_finalize fails in two scenarios:
3575 * 1) header is not pointing to proper module memory;
3576 * 2) the arch doesn't support bpf_arch_text_copy().
3577 *
3578 * Both cases are serious bugs and justify WARN_ON.
3579 */
3580 if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) {
3581 /* header has been freed */
3582 header = NULL;
3583 goto out_image;
3584 }
3585
3586 bpf_tail_call_direct_fixup(prog);
3587 } else {
3588 jit_data->addrs = addrs;
3589 jit_data->ctx = ctx;
3590 jit_data->proglen = proglen;
3591 jit_data->image = image;
3592 jit_data->header = header;
3593 jit_data->rw_header = rw_header;
3594 }
3595 /*
3596 * ctx.prog_offset is used when CFI preambles put code *before*
3597 * the function. See emit_cfi(). For FineIBT specifically this code
3598 * can also be executed and bpf_prog_kallsyms_add() will
3599 * generate an additional symbol to cover this, hence also
3600 * decrement proglen.
3601 */
3602 prog->bpf_func = (void *)image + cfi_get_offset();
3603 prog->jited = 1;
3604 prog->jited_len = proglen - cfi_get_offset();
3605 } else {
3606 prog = orig_prog;
3607 }
3608
3609 if (!image || !prog->is_func || extra_pass) {
3610 if (image)
3611 bpf_prog_fill_jited_linfo(prog, addrs + 1);
3612out_addrs:
3613 kvfree(addrs);
3614 if (!image && priv_stack_ptr) {
3615 free_percpu(priv_stack_ptr);
3616 prog->aux->priv_stack_ptr = NULL;
3617 }
3618out_priv_stack:
3619 kfree(jit_data);
3620 prog->aux->jit_data = NULL;
3621 }
3622out:
3623 if (tmp_blinded)
3624 bpf_jit_prog_release_other(prog, prog == orig_prog ?
3625 tmp : orig_prog);
3626 return prog;
3627}
3628
3629bool bpf_jit_supports_kfunc_call(void)
3630{
3631 return true;
3632}
3633
3634void *bpf_arch_text_copy(void *dst, void *src, size_t len)
3635{
3636 if (text_poke_copy(dst, src, len) == NULL)
3637 return ERR_PTR(-EINVAL);
3638 return dst;
3639}
3640
3641/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3642bool bpf_jit_supports_subprog_tailcalls(void)
3643{
3644 return true;
3645}
3646
3647bool bpf_jit_supports_percpu_insn(void)
3648{
3649 return true;
3650}
3651
3652void bpf_jit_free(struct bpf_prog *prog)
3653{
3654 if (prog->jited) {
3655 struct x64_jit_data *jit_data = prog->aux->jit_data;
3656 struct bpf_binary_header *hdr;
3657 void __percpu *priv_stack_ptr;
3658 int priv_stack_alloc_sz;
3659
3660 /*
3661 * If we fail the final pass of JIT (from jit_subprogs),
3662 * the program may not be finalized yet. Call finalize here
3663 * before freeing it.
3664 */
3665 if (jit_data) {
3666 bpf_jit_binary_pack_finalize(jit_data->header,
3667 jit_data->rw_header);
3668 kvfree(jit_data->addrs);
3669 kfree(jit_data);
3670 }
3671 prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset();
3672 hdr = bpf_jit_binary_pack_hdr(prog);
3673 bpf_jit_binary_pack_free(hdr, NULL);
3674 priv_stack_ptr = prog->aux->priv_stack_ptr;
3675 if (priv_stack_ptr) {
3676 priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
3677 2 * PRIV_STACK_GUARD_SZ;
3678 priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog);
3679 free_percpu(prog->aux->priv_stack_ptr);
3680 }
3681 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
3682 }
3683
3684 bpf_prog_unlock_free(prog);
3685}
3686
3687bool bpf_jit_supports_exceptions(void)
3688{
3689 /* We unwind through both kernel frames (starting from within bpf_throw
3690 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3691 * to walk kernel frames and reach BPF frames in the stack trace.
3692 */
3693 return IS_ENABLED(CONFIG_UNWINDER_ORC);
3694}
3695
3696bool bpf_jit_supports_private_stack(void)
3697{
3698 return true;
3699}
3700
3701void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3702{
3703#if defined(CONFIG_UNWINDER_ORC)
3704 struct unwind_state state;
3705 unsigned long addr;
3706
3707 for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state);
3708 unwind_next_frame(&state)) {
3709 addr = unwind_get_return_address(&state);
3710 if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp))
3711 break;
3712 }
3713 return;
3714#endif
3715 WARN(1, "verification of programs using bpf_throw should have failed\n");
3716}
3717
3718void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3719 struct bpf_prog *new, struct bpf_prog *old)
3720{
3721 u8 *old_addr, *new_addr, *old_bypass_addr;
3722 int ret;
3723
3724 old_bypass_addr = old ? NULL : poke->bypass_addr;
3725 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
3726 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
3727
3728 /*
3729 * On program loading or teardown, the program's kallsym entry
3730 * might not be in place, so we use __bpf_arch_text_poke to skip
3731 * the kallsyms check.
3732 */
3733 if (new) {
3734 ret = __bpf_arch_text_poke(poke->tailcall_target,
3735 BPF_MOD_JUMP,
3736 old_addr, new_addr);
3737 BUG_ON(ret < 0);
3738 if (!old) {
3739 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3740 BPF_MOD_JUMP,
3741 poke->bypass_addr,
3742 NULL);
3743 BUG_ON(ret < 0);
3744 }
3745 } else {
3746 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
3747 BPF_MOD_JUMP,
3748 old_bypass_addr,
3749 poke->bypass_addr);
3750 BUG_ON(ret < 0);
3751 /* let other CPUs finish the execution of program
3752 * so that it will not possible to expose them
3753 * to invalid nop, stack unwind, nop state
3754 */
3755 if (!ret)
3756 synchronize_rcu();
3757 ret = __bpf_arch_text_poke(poke->tailcall_target,
3758 BPF_MOD_JUMP,
3759 old_addr, NULL);
3760 BUG_ON(ret < 0);
3761 }
3762}
3763
3764bool bpf_jit_supports_arena(void)
3765{
3766 return true;
3767}
3768
3769bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3770{
3771 if (!in_arena)
3772 return true;
3773 switch (insn->code) {
3774 case BPF_STX | BPF_ATOMIC | BPF_W:
3775 case BPF_STX | BPF_ATOMIC | BPF_DW:
3776 if (insn->imm == (BPF_AND | BPF_FETCH) ||
3777 insn->imm == (BPF_OR | BPF_FETCH) ||
3778 insn->imm == (BPF_XOR | BPF_FETCH))
3779 return false;
3780 }
3781 return true;
3782}
3783
3784bool bpf_jit_supports_ptr_xchg(void)
3785{
3786 return true;
3787}
3788
3789/* x86-64 JIT emits its own code to filter user addresses so return 0 here */
3790u64 bpf_arch_uaddress_limit(void)
3791{
3792 return 0;
3793}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * bpf_jit_comp.c: BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/if_vlan.h>
11#include <linux/bpf.h>
12#include <linux/memory.h>
13#include <linux/sort.h>
14#include <asm/extable.h>
15#include <asm/set_memory.h>
16#include <asm/nospec-branch.h>
17#include <asm/text-patching.h>
18#include <asm/asm-prototypes.h>
19
20static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21{
22 if (len == 1)
23 *ptr = bytes;
24 else if (len == 2)
25 *(u16 *)ptr = bytes;
26 else {
27 *(u32 *)ptr = bytes;
28 barrier();
29 }
30 return ptr + len;
31}
32
33#define EMIT(bytes, len) \
34 do { prog = emit_code(prog, bytes, len); } while (0)
35
36#define EMIT1(b1) EMIT(b1, 1)
37#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40
41#define EMIT1_off32(b1, off) \
42 do { EMIT1(b1); EMIT(off, 4); } while (0)
43#define EMIT2_off32(b1, b2, off) \
44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45#define EMIT3_off32(b1, b2, b3, off) \
46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47#define EMIT4_off32(b1, b2, b3, b4, off) \
48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49
50static bool is_imm8(int value)
51{
52 return value <= 127 && value >= -128;
53}
54
55static bool is_simm32(s64 value)
56{
57 return value == (s64)(s32)value;
58}
59
60static bool is_uimm32(u64 value)
61{
62 return value == (u64)(u32)value;
63}
64
65/* mov dst, src */
66#define EMIT_mov(DST, SRC) \
67 do { \
68 if (DST != SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 } while (0)
71
72static int bpf_size_to_x86_bytes(int bpf_size)
73{
74 if (bpf_size == BPF_W)
75 return 4;
76 else if (bpf_size == BPF_H)
77 return 2;
78 else if (bpf_size == BPF_B)
79 return 1;
80 else if (bpf_size == BPF_DW)
81 return 4; /* imm32 */
82 else
83 return 0;
84}
85
86/*
87 * List of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89 */
90#define X86_JB 0x72
91#define X86_JAE 0x73
92#define X86_JE 0x74
93#define X86_JNE 0x75
94#define X86_JBE 0x76
95#define X86_JA 0x77
96#define X86_JL 0x7C
97#define X86_JGE 0x7D
98#define X86_JLE 0x7E
99#define X86_JG 0x7F
100
101/* Pick a register outside of BPF range for JIT internal work */
102#define AUX_REG (MAX_BPF_JIT_REG + 1)
103#define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104
105/*
106 * The following table maps BPF registers to x86-64 registers.
107 *
108 * x86-64 register R12 is unused, since if used as base address
109 * register in load/store instructions, it always needs an
110 * extra byte of encoding and is callee saved.
111 *
112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
114 */
115static const int reg2hex[] = {
116 [BPF_REG_0] = 0, /* RAX */
117 [BPF_REG_1] = 7, /* RDI */
118 [BPF_REG_2] = 6, /* RSI */
119 [BPF_REG_3] = 2, /* RDX */
120 [BPF_REG_4] = 1, /* RCX */
121 [BPF_REG_5] = 0, /* R8 */
122 [BPF_REG_6] = 3, /* RBX callee saved */
123 [BPF_REG_7] = 5, /* R13 callee saved */
124 [BPF_REG_8] = 6, /* R14 callee saved */
125 [BPF_REG_9] = 7, /* R15 callee saved */
126 [BPF_REG_FP] = 5, /* RBP readonly */
127 [BPF_REG_AX] = 2, /* R10 temp register */
128 [AUX_REG] = 3, /* R11 temp register */
129 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
130};
131
132static const int reg2pt_regs[] = {
133 [BPF_REG_0] = offsetof(struct pt_regs, ax),
134 [BPF_REG_1] = offsetof(struct pt_regs, di),
135 [BPF_REG_2] = offsetof(struct pt_regs, si),
136 [BPF_REG_3] = offsetof(struct pt_regs, dx),
137 [BPF_REG_4] = offsetof(struct pt_regs, cx),
138 [BPF_REG_5] = offsetof(struct pt_regs, r8),
139 [BPF_REG_6] = offsetof(struct pt_regs, bx),
140 [BPF_REG_7] = offsetof(struct pt_regs, r13),
141 [BPF_REG_8] = offsetof(struct pt_regs, r14),
142 [BPF_REG_9] = offsetof(struct pt_regs, r15),
143};
144
145/*
146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
149 */
150static bool is_ereg(u32 reg)
151{
152 return (1 << reg) & (BIT(BPF_REG_5) |
153 BIT(AUX_REG) |
154 BIT(BPF_REG_7) |
155 BIT(BPF_REG_8) |
156 BIT(BPF_REG_9) |
157 BIT(X86_REG_R9) |
158 BIT(BPF_REG_AX));
159}
160
161/*
162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164 * of encoding. al,cl,dl,bl have simpler encoding.
165 */
166static bool is_ereg_8l(u32 reg)
167{
168 return is_ereg(reg) ||
169 (1 << reg) & (BIT(BPF_REG_1) |
170 BIT(BPF_REG_2) |
171 BIT(BPF_REG_FP));
172}
173
174static bool is_axreg(u32 reg)
175{
176 return reg == BPF_REG_0;
177}
178
179/* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
180static u8 add_1mod(u8 byte, u32 reg)
181{
182 if (is_ereg(reg))
183 byte |= 1;
184 return byte;
185}
186
187static u8 add_2mod(u8 byte, u32 r1, u32 r2)
188{
189 if (is_ereg(r1))
190 byte |= 1;
191 if (is_ereg(r2))
192 byte |= 4;
193 return byte;
194}
195
196/* Encode 'dst_reg' register into x86-64 opcode 'byte' */
197static u8 add_1reg(u8 byte, u32 dst_reg)
198{
199 return byte + reg2hex[dst_reg];
200}
201
202/* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
203static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204{
205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
206}
207
208/* Some 1-byte opcodes for binary ALU operations */
209static u8 simple_alu_opcodes[] = {
210 [BPF_ADD] = 0x01,
211 [BPF_SUB] = 0x29,
212 [BPF_AND] = 0x21,
213 [BPF_OR] = 0x09,
214 [BPF_XOR] = 0x31,
215 [BPF_LSH] = 0xE0,
216 [BPF_RSH] = 0xE8,
217 [BPF_ARSH] = 0xF8,
218};
219
220static void jit_fill_hole(void *area, unsigned int size)
221{
222 /* Fill whole space with INT3 instructions */
223 memset(area, 0xcc, size);
224}
225
226struct jit_context {
227 int cleanup_addr; /* Epilogue code offset */
228};
229
230/* Maximum number of bytes emitted while JITing one eBPF insn */
231#define BPF_MAX_INSN_SIZE 128
232#define BPF_INSN_SAFETY 64
233
234/* Number of bytes emit_patch() needs to generate instructions */
235#define X86_PATCH_SIZE 5
236/* Number of bytes that will be skipped on tailcall */
237#define X86_TAIL_CALL_OFFSET 11
238
239static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240{
241 u8 *prog = *pprog;
242
243 if (callee_regs_used[0])
244 EMIT1(0x53); /* push rbx */
245 if (callee_regs_used[1])
246 EMIT2(0x41, 0x55); /* push r13 */
247 if (callee_regs_used[2])
248 EMIT2(0x41, 0x56); /* push r14 */
249 if (callee_regs_used[3])
250 EMIT2(0x41, 0x57); /* push r15 */
251 *pprog = prog;
252}
253
254static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
255{
256 u8 *prog = *pprog;
257
258 if (callee_regs_used[3])
259 EMIT2(0x41, 0x5F); /* pop r15 */
260 if (callee_regs_used[2])
261 EMIT2(0x41, 0x5E); /* pop r14 */
262 if (callee_regs_used[1])
263 EMIT2(0x41, 0x5D); /* pop r13 */
264 if (callee_regs_used[0])
265 EMIT1(0x5B); /* pop rbx */
266 *pprog = prog;
267}
268
269/*
270 * Emit x86-64 prologue code for BPF program.
271 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
272 * while jumping to another program
273 */
274static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
275 bool tail_call_reachable, bool is_subprog)
276{
277 u8 *prog = *pprog;
278
279 /* BPF trampoline can be made to work without these nops,
280 * but let's waste 5 bytes for now and optimize later
281 */
282 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
283 prog += X86_PATCH_SIZE;
284 if (!ebpf_from_cbpf) {
285 if (tail_call_reachable && !is_subprog)
286 EMIT2(0x31, 0xC0); /* xor eax, eax */
287 else
288 EMIT2(0x66, 0x90); /* nop2 */
289 }
290 EMIT1(0x55); /* push rbp */
291 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
292 /* sub rsp, rounded_stack_depth */
293 if (stack_depth)
294 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
295 if (tail_call_reachable)
296 EMIT1(0x50); /* push rax */
297 *pprog = prog;
298}
299
300static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
301{
302 u8 *prog = *pprog;
303 s64 offset;
304
305 offset = func - (ip + X86_PATCH_SIZE);
306 if (!is_simm32(offset)) {
307 pr_err("Target call %p is out of range\n", func);
308 return -ERANGE;
309 }
310 EMIT1_off32(opcode, offset);
311 *pprog = prog;
312 return 0;
313}
314
315static int emit_call(u8 **pprog, void *func, void *ip)
316{
317 return emit_patch(pprog, func, ip, 0xE8);
318}
319
320static int emit_jump(u8 **pprog, void *func, void *ip)
321{
322 return emit_patch(pprog, func, ip, 0xE9);
323}
324
325static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
326 void *old_addr, void *new_addr,
327 const bool text_live)
328{
329 const u8 *nop_insn = x86_nops[5];
330 u8 old_insn[X86_PATCH_SIZE];
331 u8 new_insn[X86_PATCH_SIZE];
332 u8 *prog;
333 int ret;
334
335 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
336 if (old_addr) {
337 prog = old_insn;
338 ret = t == BPF_MOD_CALL ?
339 emit_call(&prog, old_addr, ip) :
340 emit_jump(&prog, old_addr, ip);
341 if (ret)
342 return ret;
343 }
344
345 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
346 if (new_addr) {
347 prog = new_insn;
348 ret = t == BPF_MOD_CALL ?
349 emit_call(&prog, new_addr, ip) :
350 emit_jump(&prog, new_addr, ip);
351 if (ret)
352 return ret;
353 }
354
355 ret = -EBUSY;
356 mutex_lock(&text_mutex);
357 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
358 goto out;
359 ret = 1;
360 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
361 if (text_live)
362 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
363 else
364 memcpy(ip, new_insn, X86_PATCH_SIZE);
365 ret = 0;
366 }
367out:
368 mutex_unlock(&text_mutex);
369 return ret;
370}
371
372int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
373 void *old_addr, void *new_addr)
374{
375 if (!is_kernel_text((long)ip) &&
376 !is_bpf_text_address((long)ip))
377 /* BPF poking in modules is not supported */
378 return -EINVAL;
379
380 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
381}
382
383static int get_pop_bytes(bool *callee_regs_used)
384{
385 int bytes = 0;
386
387 if (callee_regs_used[3])
388 bytes += 2;
389 if (callee_regs_used[2])
390 bytes += 2;
391 if (callee_regs_used[1])
392 bytes += 2;
393 if (callee_regs_used[0])
394 bytes += 1;
395
396 return bytes;
397}
398
399/*
400 * Generate the following code:
401 *
402 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
403 * if (index >= array->map.max_entries)
404 * goto out;
405 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
406 * goto out;
407 * prog = array->ptrs[index];
408 * if (prog == NULL)
409 * goto out;
410 * goto *(prog->bpf_func + prologue_size);
411 * out:
412 */
413static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
414 u32 stack_depth)
415{
416 int tcc_off = -4 - round_up(stack_depth, 8);
417 u8 *prog = *pprog;
418 int pop_bytes = 0;
419 int off1 = 42;
420 int off2 = 31;
421 int off3 = 9;
422
423 /* count the additional bytes used for popping callee regs from stack
424 * that need to be taken into account for each of the offsets that
425 * are used for bailing out of the tail call
426 */
427 pop_bytes = get_pop_bytes(callee_regs_used);
428 off1 += pop_bytes;
429 off2 += pop_bytes;
430 off3 += pop_bytes;
431
432 if (stack_depth) {
433 off1 += 7;
434 off2 += 7;
435 off3 += 7;
436 }
437
438 /*
439 * rdi - pointer to ctx
440 * rsi - pointer to bpf_array
441 * rdx - index in bpf_array
442 */
443
444 /*
445 * if (index >= array->map.max_entries)
446 * goto out;
447 */
448 EMIT2(0x89, 0xD2); /* mov edx, edx */
449 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
450 offsetof(struct bpf_array, map.max_entries));
451#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
452 EMIT2(X86_JBE, OFFSET1); /* jbe out */
453
454 /*
455 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
456 * goto out;
457 */
458 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
459 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
460#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
461 EMIT2(X86_JA, OFFSET2); /* ja out */
462 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
463 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
464
465 /* prog = array->ptrs[index]; */
466 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
467 offsetof(struct bpf_array, ptrs));
468
469 /*
470 * if (prog == NULL)
471 * goto out;
472 */
473 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
474#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
475 EMIT2(X86_JE, OFFSET3); /* je out */
476
477 *pprog = prog;
478 pop_callee_regs(pprog, callee_regs_used);
479 prog = *pprog;
480
481 EMIT1(0x58); /* pop rax */
482 if (stack_depth)
483 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
484 round_up(stack_depth, 8));
485
486 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
487 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
488 offsetof(struct bpf_prog, bpf_func));
489 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
490 X86_TAIL_CALL_OFFSET);
491 /*
492 * Now we're ready to jump into next BPF program
493 * rdi == ctx (1st arg)
494 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
495 */
496 RETPOLINE_RCX_BPF_JIT();
497
498 /* out: */
499 *pprog = prog;
500}
501
502static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
503 u8 **pprog, int addr, u8 *image,
504 bool *callee_regs_used, u32 stack_depth)
505{
506 int tcc_off = -4 - round_up(stack_depth, 8);
507 u8 *prog = *pprog;
508 int pop_bytes = 0;
509 int off1 = 20;
510 int poke_off;
511
512 /* count the additional bytes used for popping callee regs to stack
513 * that need to be taken into account for jump offset that is used for
514 * bailing out from of the tail call when limit is reached
515 */
516 pop_bytes = get_pop_bytes(callee_regs_used);
517 off1 += pop_bytes;
518
519 /*
520 * total bytes for:
521 * - nop5/ jmpq $off
522 * - pop callee regs
523 * - sub rsp, $val if depth > 0
524 * - pop rax
525 */
526 poke_off = X86_PATCH_SIZE + pop_bytes + 1;
527 if (stack_depth) {
528 poke_off += 7;
529 off1 += 7;
530 }
531
532 /*
533 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
534 * goto out;
535 */
536 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
537 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
538 EMIT2(X86_JA, off1); /* ja out */
539 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
540 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
541
542 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
543 poke->adj_off = X86_TAIL_CALL_OFFSET;
544 poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
545 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
546
547 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
548 poke->tailcall_bypass);
549
550 *pprog = prog;
551 pop_callee_regs(pprog, callee_regs_used);
552 prog = *pprog;
553 EMIT1(0x58); /* pop rax */
554 if (stack_depth)
555 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
556
557 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
558 prog += X86_PATCH_SIZE;
559 /* out: */
560
561 *pprog = prog;
562}
563
564static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
565{
566 struct bpf_jit_poke_descriptor *poke;
567 struct bpf_array *array;
568 struct bpf_prog *target;
569 int i, ret;
570
571 for (i = 0; i < prog->aux->size_poke_tab; i++) {
572 poke = &prog->aux->poke_tab[i];
573 if (poke->aux && poke->aux != prog->aux)
574 continue;
575
576 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
577
578 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
579 continue;
580
581 array = container_of(poke->tail_call.map, struct bpf_array, map);
582 mutex_lock(&array->aux->poke_mutex);
583 target = array->ptrs[poke->tail_call.key];
584 if (target) {
585 /* Plain memcpy is used when image is not live yet
586 * and still not locked as read-only. Once poke
587 * location is active (poke->tailcall_target_stable),
588 * any parallel bpf_arch_text_poke() might occur
589 * still on the read-write image until we finally
590 * locked it as read-only. Both modifications on
591 * the given image are under text_mutex to avoid
592 * interference.
593 */
594 ret = __bpf_arch_text_poke(poke->tailcall_target,
595 BPF_MOD_JUMP, NULL,
596 (u8 *)target->bpf_func +
597 poke->adj_off, false);
598 BUG_ON(ret < 0);
599 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
600 BPF_MOD_JUMP,
601 (u8 *)poke->tailcall_target +
602 X86_PATCH_SIZE, NULL, false);
603 BUG_ON(ret < 0);
604 }
605 WRITE_ONCE(poke->tailcall_target_stable, true);
606 mutex_unlock(&array->aux->poke_mutex);
607 }
608}
609
610static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
611 u32 dst_reg, const u32 imm32)
612{
613 u8 *prog = *pprog;
614 u8 b1, b2, b3;
615
616 /*
617 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
618 * (which zero-extends imm32) to save 2 bytes.
619 */
620 if (sign_propagate && (s32)imm32 < 0) {
621 /* 'mov %rax, imm32' sign extends imm32 */
622 b1 = add_1mod(0x48, dst_reg);
623 b2 = 0xC7;
624 b3 = 0xC0;
625 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
626 goto done;
627 }
628
629 /*
630 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
631 * to save 3 bytes.
632 */
633 if (imm32 == 0) {
634 if (is_ereg(dst_reg))
635 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
636 b2 = 0x31; /* xor */
637 b3 = 0xC0;
638 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
639 goto done;
640 }
641
642 /* mov %eax, imm32 */
643 if (is_ereg(dst_reg))
644 EMIT1(add_1mod(0x40, dst_reg));
645 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
646done:
647 *pprog = prog;
648}
649
650static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
651 const u32 imm32_hi, const u32 imm32_lo)
652{
653 u8 *prog = *pprog;
654
655 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
656 /*
657 * For emitting plain u32, where sign bit must not be
658 * propagated LLVM tends to load imm64 over mov32
659 * directly, so save couple of bytes by just doing
660 * 'mov %eax, imm32' instead.
661 */
662 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
663 } else {
664 /* movabsq %rax, imm64 */
665 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
666 EMIT(imm32_lo, 4);
667 EMIT(imm32_hi, 4);
668 }
669
670 *pprog = prog;
671}
672
673static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
674{
675 u8 *prog = *pprog;
676
677 if (is64) {
678 /* mov dst, src */
679 EMIT_mov(dst_reg, src_reg);
680 } else {
681 /* mov32 dst, src */
682 if (is_ereg(dst_reg) || is_ereg(src_reg))
683 EMIT1(add_2mod(0x40, dst_reg, src_reg));
684 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
685 }
686
687 *pprog = prog;
688}
689
690/* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
691static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
692{
693 u8 *prog = *pprog;
694
695 if (is_imm8(off)) {
696 /* 1-byte signed displacement.
697 *
698 * If off == 0 we could skip this and save one extra byte, but
699 * special case of x86 R13 which always needs an offset is not
700 * worth the hassle
701 */
702 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
703 } else {
704 /* 4-byte signed displacement */
705 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
706 }
707 *pprog = prog;
708}
709
710/*
711 * Emit a REX byte if it will be necessary to address these registers
712 */
713static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
714{
715 u8 *prog = *pprog;
716
717 if (is64)
718 EMIT1(add_2mod(0x48, dst_reg, src_reg));
719 else if (is_ereg(dst_reg) || is_ereg(src_reg))
720 EMIT1(add_2mod(0x40, dst_reg, src_reg));
721 *pprog = prog;
722}
723
724/* LDX: dst_reg = *(u8*)(src_reg + off) */
725static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
726{
727 u8 *prog = *pprog;
728
729 switch (size) {
730 case BPF_B:
731 /* Emit 'movzx rax, byte ptr [rax + off]' */
732 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
733 break;
734 case BPF_H:
735 /* Emit 'movzx rax, word ptr [rax + off]' */
736 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
737 break;
738 case BPF_W:
739 /* Emit 'mov eax, dword ptr [rax+0x14]' */
740 if (is_ereg(dst_reg) || is_ereg(src_reg))
741 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
742 else
743 EMIT1(0x8B);
744 break;
745 case BPF_DW:
746 /* Emit 'mov rax, qword ptr [rax+0x14]' */
747 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
748 break;
749 }
750 emit_insn_suffix(&prog, src_reg, dst_reg, off);
751 *pprog = prog;
752}
753
754/* STX: *(u8*)(dst_reg + off) = src_reg */
755static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
756{
757 u8 *prog = *pprog;
758
759 switch (size) {
760 case BPF_B:
761 /* Emit 'mov byte ptr [rax + off], al' */
762 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
763 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
764 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
765 else
766 EMIT1(0x88);
767 break;
768 case BPF_H:
769 if (is_ereg(dst_reg) || is_ereg(src_reg))
770 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
771 else
772 EMIT2(0x66, 0x89);
773 break;
774 case BPF_W:
775 if (is_ereg(dst_reg) || is_ereg(src_reg))
776 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
777 else
778 EMIT1(0x89);
779 break;
780 case BPF_DW:
781 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
782 break;
783 }
784 emit_insn_suffix(&prog, dst_reg, src_reg, off);
785 *pprog = prog;
786}
787
788static int emit_atomic(u8 **pprog, u8 atomic_op,
789 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
790{
791 u8 *prog = *pprog;
792
793 EMIT1(0xF0); /* lock prefix */
794
795 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
796
797 /* emit opcode */
798 switch (atomic_op) {
799 case BPF_ADD:
800 case BPF_SUB:
801 case BPF_AND:
802 case BPF_OR:
803 case BPF_XOR:
804 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
805 EMIT1(simple_alu_opcodes[atomic_op]);
806 break;
807 case BPF_ADD | BPF_FETCH:
808 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
809 EMIT2(0x0F, 0xC1);
810 break;
811 case BPF_XCHG:
812 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
813 EMIT1(0x87);
814 break;
815 case BPF_CMPXCHG:
816 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
817 EMIT2(0x0F, 0xB1);
818 break;
819 default:
820 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
821 return -EFAULT;
822 }
823
824 emit_insn_suffix(&prog, dst_reg, src_reg, off);
825
826 *pprog = prog;
827 return 0;
828}
829
830static bool ex_handler_bpf(const struct exception_table_entry *x,
831 struct pt_regs *regs, int trapnr,
832 unsigned long error_code, unsigned long fault_addr)
833{
834 u32 reg = x->fixup >> 8;
835
836 /* jump over faulting load and clear dest register */
837 *(unsigned long *)((void *)regs + reg) = 0;
838 regs->ip += x->fixup & 0xff;
839 return true;
840}
841
842static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
843 bool *regs_used, bool *tail_call_seen)
844{
845 int i;
846
847 for (i = 1; i <= insn_cnt; i++, insn++) {
848 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
849 *tail_call_seen = true;
850 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
851 regs_used[0] = true;
852 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
853 regs_used[1] = true;
854 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
855 regs_used[2] = true;
856 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
857 regs_used[3] = true;
858 }
859}
860
861static void emit_nops(u8 **pprog, int len)
862{
863 u8 *prog = *pprog;
864 int i, noplen;
865
866 while (len > 0) {
867 noplen = len;
868
869 if (noplen > ASM_NOP_MAX)
870 noplen = ASM_NOP_MAX;
871
872 for (i = 0; i < noplen; i++)
873 EMIT1(x86_nops[noplen][i]);
874 len -= noplen;
875 }
876
877 *pprog = prog;
878}
879
880#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
881
882static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
883 int oldproglen, struct jit_context *ctx, bool jmp_padding)
884{
885 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
886 struct bpf_insn *insn = bpf_prog->insnsi;
887 bool callee_regs_used[4] = {};
888 int insn_cnt = bpf_prog->len;
889 bool tail_call_seen = false;
890 bool seen_exit = false;
891 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
892 int i, excnt = 0;
893 int ilen, proglen = 0;
894 u8 *prog = temp;
895 int err;
896
897 detect_reg_usage(insn, insn_cnt, callee_regs_used,
898 &tail_call_seen);
899
900 /* tail call's presence in current prog implies it is reachable */
901 tail_call_reachable |= tail_call_seen;
902
903 emit_prologue(&prog, bpf_prog->aux->stack_depth,
904 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
905 bpf_prog->aux->func_idx != 0);
906 push_callee_regs(&prog, callee_regs_used);
907
908 ilen = prog - temp;
909 if (image)
910 memcpy(image + proglen, temp, ilen);
911 proglen += ilen;
912 addrs[0] = proglen;
913 prog = temp;
914
915 for (i = 1; i <= insn_cnt; i++, insn++) {
916 const s32 imm32 = insn->imm;
917 u32 dst_reg = insn->dst_reg;
918 u32 src_reg = insn->src_reg;
919 u8 b2 = 0, b3 = 0;
920 u8 *start_of_ldx;
921 s64 jmp_offset;
922 u8 jmp_cond;
923 u8 *func;
924 int nops;
925
926 switch (insn->code) {
927 /* ALU */
928 case BPF_ALU | BPF_ADD | BPF_X:
929 case BPF_ALU | BPF_SUB | BPF_X:
930 case BPF_ALU | BPF_AND | BPF_X:
931 case BPF_ALU | BPF_OR | BPF_X:
932 case BPF_ALU | BPF_XOR | BPF_X:
933 case BPF_ALU64 | BPF_ADD | BPF_X:
934 case BPF_ALU64 | BPF_SUB | BPF_X:
935 case BPF_ALU64 | BPF_AND | BPF_X:
936 case BPF_ALU64 | BPF_OR | BPF_X:
937 case BPF_ALU64 | BPF_XOR | BPF_X:
938 maybe_emit_mod(&prog, dst_reg, src_reg,
939 BPF_CLASS(insn->code) == BPF_ALU64);
940 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
941 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
942 break;
943
944 case BPF_ALU64 | BPF_MOV | BPF_X:
945 case BPF_ALU | BPF_MOV | BPF_X:
946 emit_mov_reg(&prog,
947 BPF_CLASS(insn->code) == BPF_ALU64,
948 dst_reg, src_reg);
949 break;
950
951 /* neg dst */
952 case BPF_ALU | BPF_NEG:
953 case BPF_ALU64 | BPF_NEG:
954 if (BPF_CLASS(insn->code) == BPF_ALU64)
955 EMIT1(add_1mod(0x48, dst_reg));
956 else if (is_ereg(dst_reg))
957 EMIT1(add_1mod(0x40, dst_reg));
958 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
959 break;
960
961 case BPF_ALU | BPF_ADD | BPF_K:
962 case BPF_ALU | BPF_SUB | BPF_K:
963 case BPF_ALU | BPF_AND | BPF_K:
964 case BPF_ALU | BPF_OR | BPF_K:
965 case BPF_ALU | BPF_XOR | BPF_K:
966 case BPF_ALU64 | BPF_ADD | BPF_K:
967 case BPF_ALU64 | BPF_SUB | BPF_K:
968 case BPF_ALU64 | BPF_AND | BPF_K:
969 case BPF_ALU64 | BPF_OR | BPF_K:
970 case BPF_ALU64 | BPF_XOR | BPF_K:
971 if (BPF_CLASS(insn->code) == BPF_ALU64)
972 EMIT1(add_1mod(0x48, dst_reg));
973 else if (is_ereg(dst_reg))
974 EMIT1(add_1mod(0x40, dst_reg));
975
976 /*
977 * b3 holds 'normal' opcode, b2 short form only valid
978 * in case dst is eax/rax.
979 */
980 switch (BPF_OP(insn->code)) {
981 case BPF_ADD:
982 b3 = 0xC0;
983 b2 = 0x05;
984 break;
985 case BPF_SUB:
986 b3 = 0xE8;
987 b2 = 0x2D;
988 break;
989 case BPF_AND:
990 b3 = 0xE0;
991 b2 = 0x25;
992 break;
993 case BPF_OR:
994 b3 = 0xC8;
995 b2 = 0x0D;
996 break;
997 case BPF_XOR:
998 b3 = 0xF0;
999 b2 = 0x35;
1000 break;
1001 }
1002
1003 if (is_imm8(imm32))
1004 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1005 else if (is_axreg(dst_reg))
1006 EMIT1_off32(b2, imm32);
1007 else
1008 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1009 break;
1010
1011 case BPF_ALU64 | BPF_MOV | BPF_K:
1012 case BPF_ALU | BPF_MOV | BPF_K:
1013 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1014 dst_reg, imm32);
1015 break;
1016
1017 case BPF_LD | BPF_IMM | BPF_DW:
1018 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1019 insn++;
1020 i++;
1021 break;
1022
1023 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1024 case BPF_ALU | BPF_MOD | BPF_X:
1025 case BPF_ALU | BPF_DIV | BPF_X:
1026 case BPF_ALU | BPF_MOD | BPF_K:
1027 case BPF_ALU | BPF_DIV | BPF_K:
1028 case BPF_ALU64 | BPF_MOD | BPF_X:
1029 case BPF_ALU64 | BPF_DIV | BPF_X:
1030 case BPF_ALU64 | BPF_MOD | BPF_K:
1031 case BPF_ALU64 | BPF_DIV | BPF_K:
1032 EMIT1(0x50); /* push rax */
1033 EMIT1(0x52); /* push rdx */
1034
1035 if (BPF_SRC(insn->code) == BPF_X)
1036 /* mov r11, src_reg */
1037 EMIT_mov(AUX_REG, src_reg);
1038 else
1039 /* mov r11, imm32 */
1040 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1041
1042 /* mov rax, dst_reg */
1043 EMIT_mov(BPF_REG_0, dst_reg);
1044
1045 /*
1046 * xor edx, edx
1047 * equivalent to 'xor rdx, rdx', but one byte less
1048 */
1049 EMIT2(0x31, 0xd2);
1050
1051 if (BPF_CLASS(insn->code) == BPF_ALU64)
1052 /* div r11 */
1053 EMIT3(0x49, 0xF7, 0xF3);
1054 else
1055 /* div r11d */
1056 EMIT3(0x41, 0xF7, 0xF3);
1057
1058 if (BPF_OP(insn->code) == BPF_MOD)
1059 /* mov r11, rdx */
1060 EMIT3(0x49, 0x89, 0xD3);
1061 else
1062 /* mov r11, rax */
1063 EMIT3(0x49, 0x89, 0xC3);
1064
1065 EMIT1(0x5A); /* pop rdx */
1066 EMIT1(0x58); /* pop rax */
1067
1068 /* mov dst_reg, r11 */
1069 EMIT_mov(dst_reg, AUX_REG);
1070 break;
1071
1072 case BPF_ALU | BPF_MUL | BPF_K:
1073 case BPF_ALU | BPF_MUL | BPF_X:
1074 case BPF_ALU64 | BPF_MUL | BPF_K:
1075 case BPF_ALU64 | BPF_MUL | BPF_X:
1076 {
1077 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1078
1079 if (dst_reg != BPF_REG_0)
1080 EMIT1(0x50); /* push rax */
1081 if (dst_reg != BPF_REG_3)
1082 EMIT1(0x52); /* push rdx */
1083
1084 /* mov r11, dst_reg */
1085 EMIT_mov(AUX_REG, dst_reg);
1086
1087 if (BPF_SRC(insn->code) == BPF_X)
1088 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1089 else
1090 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1091
1092 if (is64)
1093 EMIT1(add_1mod(0x48, AUX_REG));
1094 else if (is_ereg(AUX_REG))
1095 EMIT1(add_1mod(0x40, AUX_REG));
1096 /* mul(q) r11 */
1097 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1098
1099 if (dst_reg != BPF_REG_3)
1100 EMIT1(0x5A); /* pop rdx */
1101 if (dst_reg != BPF_REG_0) {
1102 /* mov dst_reg, rax */
1103 EMIT_mov(dst_reg, BPF_REG_0);
1104 EMIT1(0x58); /* pop rax */
1105 }
1106 break;
1107 }
1108 /* Shifts */
1109 case BPF_ALU | BPF_LSH | BPF_K:
1110 case BPF_ALU | BPF_RSH | BPF_K:
1111 case BPF_ALU | BPF_ARSH | BPF_K:
1112 case BPF_ALU64 | BPF_LSH | BPF_K:
1113 case BPF_ALU64 | BPF_RSH | BPF_K:
1114 case BPF_ALU64 | BPF_ARSH | BPF_K:
1115 if (BPF_CLASS(insn->code) == BPF_ALU64)
1116 EMIT1(add_1mod(0x48, dst_reg));
1117 else if (is_ereg(dst_reg))
1118 EMIT1(add_1mod(0x40, dst_reg));
1119
1120 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1121 if (imm32 == 1)
1122 EMIT2(0xD1, add_1reg(b3, dst_reg));
1123 else
1124 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1125 break;
1126
1127 case BPF_ALU | BPF_LSH | BPF_X:
1128 case BPF_ALU | BPF_RSH | BPF_X:
1129 case BPF_ALU | BPF_ARSH | BPF_X:
1130 case BPF_ALU64 | BPF_LSH | BPF_X:
1131 case BPF_ALU64 | BPF_RSH | BPF_X:
1132 case BPF_ALU64 | BPF_ARSH | BPF_X:
1133
1134 /* Check for bad case when dst_reg == rcx */
1135 if (dst_reg == BPF_REG_4) {
1136 /* mov r11, dst_reg */
1137 EMIT_mov(AUX_REG, dst_reg);
1138 dst_reg = AUX_REG;
1139 }
1140
1141 if (src_reg != BPF_REG_4) { /* common case */
1142 EMIT1(0x51); /* push rcx */
1143
1144 /* mov rcx, src_reg */
1145 EMIT_mov(BPF_REG_4, src_reg);
1146 }
1147
1148 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1149 if (BPF_CLASS(insn->code) == BPF_ALU64)
1150 EMIT1(add_1mod(0x48, dst_reg));
1151 else if (is_ereg(dst_reg))
1152 EMIT1(add_1mod(0x40, dst_reg));
1153
1154 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1155 EMIT2(0xD3, add_1reg(b3, dst_reg));
1156
1157 if (src_reg != BPF_REG_4)
1158 EMIT1(0x59); /* pop rcx */
1159
1160 if (insn->dst_reg == BPF_REG_4)
1161 /* mov dst_reg, r11 */
1162 EMIT_mov(insn->dst_reg, AUX_REG);
1163 break;
1164
1165 case BPF_ALU | BPF_END | BPF_FROM_BE:
1166 switch (imm32) {
1167 case 16:
1168 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1169 EMIT1(0x66);
1170 if (is_ereg(dst_reg))
1171 EMIT1(0x41);
1172 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1173
1174 /* Emit 'movzwl eax, ax' */
1175 if (is_ereg(dst_reg))
1176 EMIT3(0x45, 0x0F, 0xB7);
1177 else
1178 EMIT2(0x0F, 0xB7);
1179 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1180 break;
1181 case 32:
1182 /* Emit 'bswap eax' to swap lower 4 bytes */
1183 if (is_ereg(dst_reg))
1184 EMIT2(0x41, 0x0F);
1185 else
1186 EMIT1(0x0F);
1187 EMIT1(add_1reg(0xC8, dst_reg));
1188 break;
1189 case 64:
1190 /* Emit 'bswap rax' to swap 8 bytes */
1191 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1192 add_1reg(0xC8, dst_reg));
1193 break;
1194 }
1195 break;
1196
1197 case BPF_ALU | BPF_END | BPF_FROM_LE:
1198 switch (imm32) {
1199 case 16:
1200 /*
1201 * Emit 'movzwl eax, ax' to zero extend 16-bit
1202 * into 64 bit
1203 */
1204 if (is_ereg(dst_reg))
1205 EMIT3(0x45, 0x0F, 0xB7);
1206 else
1207 EMIT2(0x0F, 0xB7);
1208 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1209 break;
1210 case 32:
1211 /* Emit 'mov eax, eax' to clear upper 32-bits */
1212 if (is_ereg(dst_reg))
1213 EMIT1(0x45);
1214 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1215 break;
1216 case 64:
1217 /* nop */
1218 break;
1219 }
1220 break;
1221
1222 /* speculation barrier */
1223 case BPF_ST | BPF_NOSPEC:
1224 if (boot_cpu_has(X86_FEATURE_XMM2))
1225 /* Emit 'lfence' */
1226 EMIT3(0x0F, 0xAE, 0xE8);
1227 break;
1228
1229 /* ST: *(u8*)(dst_reg + off) = imm */
1230 case BPF_ST | BPF_MEM | BPF_B:
1231 if (is_ereg(dst_reg))
1232 EMIT2(0x41, 0xC6);
1233 else
1234 EMIT1(0xC6);
1235 goto st;
1236 case BPF_ST | BPF_MEM | BPF_H:
1237 if (is_ereg(dst_reg))
1238 EMIT3(0x66, 0x41, 0xC7);
1239 else
1240 EMIT2(0x66, 0xC7);
1241 goto st;
1242 case BPF_ST | BPF_MEM | BPF_W:
1243 if (is_ereg(dst_reg))
1244 EMIT2(0x41, 0xC7);
1245 else
1246 EMIT1(0xC7);
1247 goto st;
1248 case BPF_ST | BPF_MEM | BPF_DW:
1249 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1250
1251st: if (is_imm8(insn->off))
1252 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1253 else
1254 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1255
1256 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1257 break;
1258
1259 /* STX: *(u8*)(dst_reg + off) = src_reg */
1260 case BPF_STX | BPF_MEM | BPF_B:
1261 case BPF_STX | BPF_MEM | BPF_H:
1262 case BPF_STX | BPF_MEM | BPF_W:
1263 case BPF_STX | BPF_MEM | BPF_DW:
1264 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1265 break;
1266
1267 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1268 case BPF_LDX | BPF_MEM | BPF_B:
1269 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1270 case BPF_LDX | BPF_MEM | BPF_H:
1271 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1272 case BPF_LDX | BPF_MEM | BPF_W:
1273 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1274 case BPF_LDX | BPF_MEM | BPF_DW:
1275 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1276 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1277 /* test src_reg, src_reg */
1278 maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1279 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1280 /* jne start_of_ldx */
1281 EMIT2(X86_JNE, 0);
1282 /* xor dst_reg, dst_reg */
1283 emit_mov_imm32(&prog, false, dst_reg, 0);
1284 /* jmp byte_after_ldx */
1285 EMIT2(0xEB, 0);
1286
1287 /* populate jmp_offset for JNE above */
1288 temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1289 start_of_ldx = prog;
1290 }
1291 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1292 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1293 struct exception_table_entry *ex;
1294 u8 *_insn = image + proglen + (start_of_ldx - temp);
1295 s64 delta;
1296
1297 /* populate jmp_offset for JMP above */
1298 start_of_ldx[-1] = prog - start_of_ldx;
1299
1300 if (!bpf_prog->aux->extable)
1301 break;
1302
1303 if (excnt >= bpf_prog->aux->num_exentries) {
1304 pr_err("ex gen bug\n");
1305 return -EFAULT;
1306 }
1307 ex = &bpf_prog->aux->extable[excnt++];
1308
1309 delta = _insn - (u8 *)&ex->insn;
1310 if (!is_simm32(delta)) {
1311 pr_err("extable->insn doesn't fit into 32-bit\n");
1312 return -EFAULT;
1313 }
1314 ex->insn = delta;
1315
1316 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1317 if (!is_simm32(delta)) {
1318 pr_err("extable->handler doesn't fit into 32-bit\n");
1319 return -EFAULT;
1320 }
1321 ex->handler = delta;
1322
1323 if (dst_reg > BPF_REG_9) {
1324 pr_err("verifier error\n");
1325 return -EFAULT;
1326 }
1327 /*
1328 * Compute size of x86 insn and its target dest x86 register.
1329 * ex_handler_bpf() will use lower 8 bits to adjust
1330 * pt_regs->ip to jump over this x86 instruction
1331 * and upper bits to figure out which pt_regs to zero out.
1332 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1333 * of 4 bytes will be ignored and rbx will be zero inited.
1334 */
1335 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1336 }
1337 break;
1338
1339 case BPF_STX | BPF_ATOMIC | BPF_W:
1340 case BPF_STX | BPF_ATOMIC | BPF_DW:
1341 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1342 insn->imm == (BPF_OR | BPF_FETCH) ||
1343 insn->imm == (BPF_XOR | BPF_FETCH)) {
1344 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1345 u32 real_src_reg = src_reg;
1346 u32 real_dst_reg = dst_reg;
1347 u8 *branch_target;
1348
1349 /*
1350 * Can't be implemented with a single x86 insn.
1351 * Need to do a CMPXCHG loop.
1352 */
1353
1354 /* Will need RAX as a CMPXCHG operand so save R0 */
1355 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1356 if (src_reg == BPF_REG_0)
1357 real_src_reg = BPF_REG_AX;
1358 if (dst_reg == BPF_REG_0)
1359 real_dst_reg = BPF_REG_AX;
1360
1361 branch_target = prog;
1362 /* Load old value */
1363 emit_ldx(&prog, BPF_SIZE(insn->code),
1364 BPF_REG_0, real_dst_reg, insn->off);
1365 /*
1366 * Perform the (commutative) operation locally,
1367 * put the result in the AUX_REG.
1368 */
1369 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1370 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1371 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1372 add_2reg(0xC0, AUX_REG, real_src_reg));
1373 /* Attempt to swap in new value */
1374 err = emit_atomic(&prog, BPF_CMPXCHG,
1375 real_dst_reg, AUX_REG,
1376 insn->off,
1377 BPF_SIZE(insn->code));
1378 if (WARN_ON(err))
1379 return err;
1380 /*
1381 * ZF tells us whether we won the race. If it's
1382 * cleared we need to try again.
1383 */
1384 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1385 /* Return the pre-modification value */
1386 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1387 /* Restore R0 after clobbering RAX */
1388 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1389 break;
1390 }
1391
1392 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1393 insn->off, BPF_SIZE(insn->code));
1394 if (err)
1395 return err;
1396 break;
1397
1398 /* call */
1399 case BPF_JMP | BPF_CALL:
1400 func = (u8 *) __bpf_call_base + imm32;
1401 if (tail_call_reachable) {
1402 EMIT3_off32(0x48, 0x8B, 0x85,
1403 -(bpf_prog->aux->stack_depth + 8));
1404 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1405 return -EINVAL;
1406 } else {
1407 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1408 return -EINVAL;
1409 }
1410 break;
1411
1412 case BPF_JMP | BPF_TAIL_CALL:
1413 if (imm32)
1414 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1415 &prog, addrs[i], image,
1416 callee_regs_used,
1417 bpf_prog->aux->stack_depth);
1418 else
1419 emit_bpf_tail_call_indirect(&prog,
1420 callee_regs_used,
1421 bpf_prog->aux->stack_depth);
1422 break;
1423
1424 /* cond jump */
1425 case BPF_JMP | BPF_JEQ | BPF_X:
1426 case BPF_JMP | BPF_JNE | BPF_X:
1427 case BPF_JMP | BPF_JGT | BPF_X:
1428 case BPF_JMP | BPF_JLT | BPF_X:
1429 case BPF_JMP | BPF_JGE | BPF_X:
1430 case BPF_JMP | BPF_JLE | BPF_X:
1431 case BPF_JMP | BPF_JSGT | BPF_X:
1432 case BPF_JMP | BPF_JSLT | BPF_X:
1433 case BPF_JMP | BPF_JSGE | BPF_X:
1434 case BPF_JMP | BPF_JSLE | BPF_X:
1435 case BPF_JMP32 | BPF_JEQ | BPF_X:
1436 case BPF_JMP32 | BPF_JNE | BPF_X:
1437 case BPF_JMP32 | BPF_JGT | BPF_X:
1438 case BPF_JMP32 | BPF_JLT | BPF_X:
1439 case BPF_JMP32 | BPF_JGE | BPF_X:
1440 case BPF_JMP32 | BPF_JLE | BPF_X:
1441 case BPF_JMP32 | BPF_JSGT | BPF_X:
1442 case BPF_JMP32 | BPF_JSLT | BPF_X:
1443 case BPF_JMP32 | BPF_JSGE | BPF_X:
1444 case BPF_JMP32 | BPF_JSLE | BPF_X:
1445 /* cmp dst_reg, src_reg */
1446 maybe_emit_mod(&prog, dst_reg, src_reg,
1447 BPF_CLASS(insn->code) == BPF_JMP);
1448 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1449 goto emit_cond_jmp;
1450
1451 case BPF_JMP | BPF_JSET | BPF_X:
1452 case BPF_JMP32 | BPF_JSET | BPF_X:
1453 /* test dst_reg, src_reg */
1454 maybe_emit_mod(&prog, dst_reg, src_reg,
1455 BPF_CLASS(insn->code) == BPF_JMP);
1456 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1457 goto emit_cond_jmp;
1458
1459 case BPF_JMP | BPF_JSET | BPF_K:
1460 case BPF_JMP32 | BPF_JSET | BPF_K:
1461 /* test dst_reg, imm32 */
1462 if (BPF_CLASS(insn->code) == BPF_JMP)
1463 EMIT1(add_1mod(0x48, dst_reg));
1464 else if (is_ereg(dst_reg))
1465 EMIT1(add_1mod(0x40, dst_reg));
1466 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1467 goto emit_cond_jmp;
1468
1469 case BPF_JMP | BPF_JEQ | BPF_K:
1470 case BPF_JMP | BPF_JNE | BPF_K:
1471 case BPF_JMP | BPF_JGT | BPF_K:
1472 case BPF_JMP | BPF_JLT | BPF_K:
1473 case BPF_JMP | BPF_JGE | BPF_K:
1474 case BPF_JMP | BPF_JLE | BPF_K:
1475 case BPF_JMP | BPF_JSGT | BPF_K:
1476 case BPF_JMP | BPF_JSLT | BPF_K:
1477 case BPF_JMP | BPF_JSGE | BPF_K:
1478 case BPF_JMP | BPF_JSLE | BPF_K:
1479 case BPF_JMP32 | BPF_JEQ | BPF_K:
1480 case BPF_JMP32 | BPF_JNE | BPF_K:
1481 case BPF_JMP32 | BPF_JGT | BPF_K:
1482 case BPF_JMP32 | BPF_JLT | BPF_K:
1483 case BPF_JMP32 | BPF_JGE | BPF_K:
1484 case BPF_JMP32 | BPF_JLE | BPF_K:
1485 case BPF_JMP32 | BPF_JSGT | BPF_K:
1486 case BPF_JMP32 | BPF_JSLT | BPF_K:
1487 case BPF_JMP32 | BPF_JSGE | BPF_K:
1488 case BPF_JMP32 | BPF_JSLE | BPF_K:
1489 /* test dst_reg, dst_reg to save one extra byte */
1490 if (imm32 == 0) {
1491 maybe_emit_mod(&prog, dst_reg, dst_reg,
1492 BPF_CLASS(insn->code) == BPF_JMP);
1493 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1494 goto emit_cond_jmp;
1495 }
1496
1497 /* cmp dst_reg, imm8/32 */
1498 if (BPF_CLASS(insn->code) == BPF_JMP)
1499 EMIT1(add_1mod(0x48, dst_reg));
1500 else if (is_ereg(dst_reg))
1501 EMIT1(add_1mod(0x40, dst_reg));
1502
1503 if (is_imm8(imm32))
1504 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1505 else
1506 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1507
1508emit_cond_jmp: /* Convert BPF opcode to x86 */
1509 switch (BPF_OP(insn->code)) {
1510 case BPF_JEQ:
1511 jmp_cond = X86_JE;
1512 break;
1513 case BPF_JSET:
1514 case BPF_JNE:
1515 jmp_cond = X86_JNE;
1516 break;
1517 case BPF_JGT:
1518 /* GT is unsigned '>', JA in x86 */
1519 jmp_cond = X86_JA;
1520 break;
1521 case BPF_JLT:
1522 /* LT is unsigned '<', JB in x86 */
1523 jmp_cond = X86_JB;
1524 break;
1525 case BPF_JGE:
1526 /* GE is unsigned '>=', JAE in x86 */
1527 jmp_cond = X86_JAE;
1528 break;
1529 case BPF_JLE:
1530 /* LE is unsigned '<=', JBE in x86 */
1531 jmp_cond = X86_JBE;
1532 break;
1533 case BPF_JSGT:
1534 /* Signed '>', GT in x86 */
1535 jmp_cond = X86_JG;
1536 break;
1537 case BPF_JSLT:
1538 /* Signed '<', LT in x86 */
1539 jmp_cond = X86_JL;
1540 break;
1541 case BPF_JSGE:
1542 /* Signed '>=', GE in x86 */
1543 jmp_cond = X86_JGE;
1544 break;
1545 case BPF_JSLE:
1546 /* Signed '<=', LE in x86 */
1547 jmp_cond = X86_JLE;
1548 break;
1549 default: /* to silence GCC warning */
1550 return -EFAULT;
1551 }
1552 jmp_offset = addrs[i + insn->off] - addrs[i];
1553 if (is_imm8(jmp_offset)) {
1554 if (jmp_padding) {
1555 /* To keep the jmp_offset valid, the extra bytes are
1556 * padded before the jump insn, so we subtract the
1557 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1558 *
1559 * If the previous pass already emits an imm8
1560 * jmp_cond, then this BPF insn won't shrink, so
1561 * "nops" is 0.
1562 *
1563 * On the other hand, if the previous pass emits an
1564 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1565 * keep the image from shrinking further.
1566 *
1567 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1568 * is 2 bytes, so the size difference is 4 bytes.
1569 */
1570 nops = INSN_SZ_DIFF - 2;
1571 if (nops != 0 && nops != 4) {
1572 pr_err("unexpected jmp_cond padding: %d bytes\n",
1573 nops);
1574 return -EFAULT;
1575 }
1576 emit_nops(&prog, nops);
1577 }
1578 EMIT2(jmp_cond, jmp_offset);
1579 } else if (is_simm32(jmp_offset)) {
1580 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1581 } else {
1582 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1583 return -EFAULT;
1584 }
1585
1586 break;
1587
1588 case BPF_JMP | BPF_JA:
1589 if (insn->off == -1)
1590 /* -1 jmp instructions will always jump
1591 * backwards two bytes. Explicitly handling
1592 * this case avoids wasting too many passes
1593 * when there are long sequences of replaced
1594 * dead code.
1595 */
1596 jmp_offset = -2;
1597 else
1598 jmp_offset = addrs[i + insn->off] - addrs[i];
1599
1600 if (!jmp_offset) {
1601 /*
1602 * If jmp_padding is enabled, the extra nops will
1603 * be inserted. Otherwise, optimize out nop jumps.
1604 */
1605 if (jmp_padding) {
1606 /* There are 3 possible conditions.
1607 * (1) This BPF_JA is already optimized out in
1608 * the previous run, so there is no need
1609 * to pad any extra byte (0 byte).
1610 * (2) The previous pass emits an imm8 jmp,
1611 * so we pad 2 bytes to match the previous
1612 * insn size.
1613 * (3) Similarly, the previous pass emits an
1614 * imm32 jmp, and 5 bytes is padded.
1615 */
1616 nops = INSN_SZ_DIFF;
1617 if (nops != 0 && nops != 2 && nops != 5) {
1618 pr_err("unexpected nop jump padding: %d bytes\n",
1619 nops);
1620 return -EFAULT;
1621 }
1622 emit_nops(&prog, nops);
1623 }
1624 break;
1625 }
1626emit_jmp:
1627 if (is_imm8(jmp_offset)) {
1628 if (jmp_padding) {
1629 /* To avoid breaking jmp_offset, the extra bytes
1630 * are padded before the actual jmp insn, so
1631 * 2 bytes is subtracted from INSN_SZ_DIFF.
1632 *
1633 * If the previous pass already emits an imm8
1634 * jmp, there is nothing to pad (0 byte).
1635 *
1636 * If it emits an imm32 jmp (5 bytes) previously
1637 * and now an imm8 jmp (2 bytes), then we pad
1638 * (5 - 2 = 3) bytes to stop the image from
1639 * shrinking further.
1640 */
1641 nops = INSN_SZ_DIFF - 2;
1642 if (nops != 0 && nops != 3) {
1643 pr_err("unexpected jump padding: %d bytes\n",
1644 nops);
1645 return -EFAULT;
1646 }
1647 emit_nops(&prog, INSN_SZ_DIFF - 2);
1648 }
1649 EMIT2(0xEB, jmp_offset);
1650 } else if (is_simm32(jmp_offset)) {
1651 EMIT1_off32(0xE9, jmp_offset);
1652 } else {
1653 pr_err("jmp gen bug %llx\n", jmp_offset);
1654 return -EFAULT;
1655 }
1656 break;
1657
1658 case BPF_JMP | BPF_EXIT:
1659 if (seen_exit) {
1660 jmp_offset = ctx->cleanup_addr - addrs[i];
1661 goto emit_jmp;
1662 }
1663 seen_exit = true;
1664 /* Update cleanup_addr */
1665 ctx->cleanup_addr = proglen;
1666 pop_callee_regs(&prog, callee_regs_used);
1667 EMIT1(0xC9); /* leave */
1668 EMIT1(0xC3); /* ret */
1669 break;
1670
1671 default:
1672 /*
1673 * By design x86-64 JIT should support all BPF instructions.
1674 * This error will be seen if new instruction was added
1675 * to the interpreter, but not to the JIT, or if there is
1676 * junk in bpf_prog.
1677 */
1678 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1679 return -EINVAL;
1680 }
1681
1682 ilen = prog - temp;
1683 if (ilen > BPF_MAX_INSN_SIZE) {
1684 pr_err("bpf_jit: fatal insn size error\n");
1685 return -EFAULT;
1686 }
1687
1688 if (image) {
1689 /*
1690 * When populating the image, assert that:
1691 *
1692 * i) We do not write beyond the allocated space, and
1693 * ii) addrs[i] did not change from the prior run, in order
1694 * to validate assumptions made for computing branch
1695 * displacements.
1696 */
1697 if (unlikely(proglen + ilen > oldproglen ||
1698 proglen + ilen != addrs[i])) {
1699 pr_err("bpf_jit: fatal error\n");
1700 return -EFAULT;
1701 }
1702 memcpy(image + proglen, temp, ilen);
1703 }
1704 proglen += ilen;
1705 addrs[i] = proglen;
1706 prog = temp;
1707 }
1708
1709 if (image && excnt != bpf_prog->aux->num_exentries) {
1710 pr_err("extable is not populated\n");
1711 return -EFAULT;
1712 }
1713 return proglen;
1714}
1715
1716static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1717 int stack_size)
1718{
1719 int i;
1720 /* Store function arguments to stack.
1721 * For a function that accepts two pointers the sequence will be:
1722 * mov QWORD PTR [rbp-0x10],rdi
1723 * mov QWORD PTR [rbp-0x8],rsi
1724 */
1725 for (i = 0; i < min(nr_args, 6); i++)
1726 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1727 BPF_REG_FP,
1728 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1729 -(stack_size - i * 8));
1730}
1731
1732static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1733 int stack_size)
1734{
1735 int i;
1736
1737 /* Restore function arguments from stack.
1738 * For a function that accepts two pointers the sequence will be:
1739 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1740 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1741 */
1742 for (i = 0; i < min(nr_args, 6); i++)
1743 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1744 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1745 BPF_REG_FP,
1746 -(stack_size - i * 8));
1747}
1748
1749static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1750 struct bpf_prog *p, int stack_size, bool save_ret)
1751{
1752 u8 *prog = *pprog;
1753 u8 *jmp_insn;
1754
1755 /* arg1: mov rdi, progs[i] */
1756 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1757 if (emit_call(&prog,
1758 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1759 __bpf_prog_enter, prog))
1760 return -EINVAL;
1761 /* remember prog start time returned by __bpf_prog_enter */
1762 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1763
1764 /* if (__bpf_prog_enter*(prog) == 0)
1765 * goto skip_exec_of_prog;
1766 */
1767 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1768 /* emit 2 nops that will be replaced with JE insn */
1769 jmp_insn = prog;
1770 emit_nops(&prog, 2);
1771
1772 /* arg1: lea rdi, [rbp - stack_size] */
1773 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1774 /* arg2: progs[i]->insnsi for interpreter */
1775 if (!p->jited)
1776 emit_mov_imm64(&prog, BPF_REG_2,
1777 (long) p->insnsi >> 32,
1778 (u32) (long) p->insnsi);
1779 /* call JITed bpf program or interpreter */
1780 if (emit_call(&prog, p->bpf_func, prog))
1781 return -EINVAL;
1782
1783 /*
1784 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1785 * of the previous call which is then passed on the stack to
1786 * the next BPF program.
1787 *
1788 * BPF_TRAMP_FENTRY trampoline may need to return the return
1789 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1790 */
1791 if (save_ret)
1792 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1793
1794 /* replace 2 nops with JE insn, since jmp target is known */
1795 jmp_insn[0] = X86_JE;
1796 jmp_insn[1] = prog - jmp_insn - 2;
1797
1798 /* arg1: mov rdi, progs[i] */
1799 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1800 /* arg2: mov rsi, rbx <- start time in nsec */
1801 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1802 if (emit_call(&prog,
1803 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1804 __bpf_prog_exit, prog))
1805 return -EINVAL;
1806
1807 *pprog = prog;
1808 return 0;
1809}
1810
1811static void emit_align(u8 **pprog, u32 align)
1812{
1813 u8 *target, *prog = *pprog;
1814
1815 target = PTR_ALIGN(prog, align);
1816 if (target != prog)
1817 emit_nops(&prog, target - prog);
1818
1819 *pprog = prog;
1820}
1821
1822static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1823{
1824 u8 *prog = *pprog;
1825 s64 offset;
1826
1827 offset = func - (ip + 2 + 4);
1828 if (!is_simm32(offset)) {
1829 pr_err("Target %p is out of range\n", func);
1830 return -EINVAL;
1831 }
1832 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1833 *pprog = prog;
1834 return 0;
1835}
1836
1837static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1838 struct bpf_tramp_progs *tp, int stack_size,
1839 bool save_ret)
1840{
1841 int i;
1842 u8 *prog = *pprog;
1843
1844 for (i = 0; i < tp->nr_progs; i++) {
1845 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1846 save_ret))
1847 return -EINVAL;
1848 }
1849 *pprog = prog;
1850 return 0;
1851}
1852
1853static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1854 struct bpf_tramp_progs *tp, int stack_size,
1855 u8 **branches)
1856{
1857 u8 *prog = *pprog;
1858 int i;
1859
1860 /* The first fmod_ret program will receive a garbage return value.
1861 * Set this to 0 to avoid confusing the program.
1862 */
1863 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1864 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1865 for (i = 0; i < tp->nr_progs; i++) {
1866 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1867 return -EINVAL;
1868
1869 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1870 * if (*(u64 *)(rbp - 8) != 0)
1871 * goto do_fexit;
1872 */
1873 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1874 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1875
1876 /* Save the location of the branch and Generate 6 nops
1877 * (4 bytes for an offset and 2 bytes for the jump) These nops
1878 * are replaced with a conditional jump once do_fexit (i.e. the
1879 * start of the fexit invocation) is finalized.
1880 */
1881 branches[i] = prog;
1882 emit_nops(&prog, 4 + 2);
1883 }
1884
1885 *pprog = prog;
1886 return 0;
1887}
1888
1889static bool is_valid_bpf_tramp_flags(unsigned int flags)
1890{
1891 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1892 (flags & BPF_TRAMP_F_SKIP_FRAME))
1893 return false;
1894
1895 /*
1896 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1897 * and it must be used alone.
1898 */
1899 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1900 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1901 return false;
1902
1903 return true;
1904}
1905
1906/* Example:
1907 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1908 * its 'struct btf_func_model' will be nr_args=2
1909 * The assembly code when eth_type_trans is executing after trampoline:
1910 *
1911 * push rbp
1912 * mov rbp, rsp
1913 * sub rsp, 16 // space for skb and dev
1914 * push rbx // temp regs to pass start time
1915 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1916 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1917 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1918 * mov rbx, rax // remember start time in bpf stats are enabled
1919 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1920 * call addr_of_jited_FENTRY_prog
1921 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1922 * mov rsi, rbx // prog start time
1923 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1924 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1925 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1926 * pop rbx
1927 * leave
1928 * ret
1929 *
1930 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1931 * replaced with 'call generated_bpf_trampoline'. When it returns
1932 * eth_type_trans will continue executing with original skb and dev pointers.
1933 *
1934 * The assembly code when eth_type_trans is called from trampoline:
1935 *
1936 * push rbp
1937 * mov rbp, rsp
1938 * sub rsp, 24 // space for skb, dev, return value
1939 * push rbx // temp regs to pass start time
1940 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1941 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1942 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1943 * mov rbx, rax // remember start time if bpf stats are enabled
1944 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1945 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1946 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1947 * mov rsi, rbx // prog start time
1948 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1949 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1950 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1951 * call eth_type_trans+5 // execute body of eth_type_trans
1952 * mov qword ptr [rbp - 8], rax // save return value
1953 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1954 * mov rbx, rax // remember start time in bpf stats are enabled
1955 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1956 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1957 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1958 * mov rsi, rbx // prog start time
1959 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1960 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1961 * pop rbx
1962 * leave
1963 * add rsp, 8 // skip eth_type_trans's frame
1964 * ret // return to its caller
1965 */
1966int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1967 const struct btf_func_model *m, u32 flags,
1968 struct bpf_tramp_progs *tprogs,
1969 void *orig_call)
1970{
1971 int ret, i, nr_args = m->nr_args;
1972 int stack_size = nr_args * 8;
1973 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1974 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1975 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1976 u8 **branches = NULL;
1977 u8 *prog;
1978 bool save_ret;
1979
1980 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1981 if (nr_args > 6)
1982 return -ENOTSUPP;
1983
1984 if (!is_valid_bpf_tramp_flags(flags))
1985 return -EINVAL;
1986
1987 /* room for return value of orig_call or fentry prog */
1988 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1989 if (save_ret)
1990 stack_size += 8;
1991
1992 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1993 /* skip patched call instruction and point orig_call to actual
1994 * body of the kernel function.
1995 */
1996 orig_call += X86_PATCH_SIZE;
1997
1998 prog = image;
1999
2000 EMIT1(0x55); /* push rbp */
2001 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2002 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2003 EMIT1(0x53); /* push rbx */
2004
2005 save_regs(m, &prog, nr_args, stack_size);
2006
2007 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2008 /* arg1: mov rdi, im */
2009 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2010 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2011 ret = -EINVAL;
2012 goto cleanup;
2013 }
2014 }
2015
2016 if (fentry->nr_progs)
2017 if (invoke_bpf(m, &prog, fentry, stack_size,
2018 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2019 return -EINVAL;
2020
2021 if (fmod_ret->nr_progs) {
2022 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2023 GFP_KERNEL);
2024 if (!branches)
2025 return -ENOMEM;
2026
2027 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2028 branches)) {
2029 ret = -EINVAL;
2030 goto cleanup;
2031 }
2032 }
2033
2034 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2035 restore_regs(m, &prog, nr_args, stack_size);
2036
2037 /* call original function */
2038 if (emit_call(&prog, orig_call, prog)) {
2039 ret = -EINVAL;
2040 goto cleanup;
2041 }
2042 /* remember return value in a stack for bpf prog to access */
2043 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2044 im->ip_after_call = prog;
2045 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2046 prog += X86_PATCH_SIZE;
2047 }
2048
2049 if (fmod_ret->nr_progs) {
2050 /* From Intel 64 and IA-32 Architectures Optimization
2051 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2052 * Coding Rule 11: All branch targets should be 16-byte
2053 * aligned.
2054 */
2055 emit_align(&prog, 16);
2056 /* Update the branches saved in invoke_bpf_mod_ret with the
2057 * aligned address of do_fexit.
2058 */
2059 for (i = 0; i < fmod_ret->nr_progs; i++)
2060 emit_cond_near_jump(&branches[i], prog, branches[i],
2061 X86_JNE);
2062 }
2063
2064 if (fexit->nr_progs)
2065 if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
2066 ret = -EINVAL;
2067 goto cleanup;
2068 }
2069
2070 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2071 restore_regs(m, &prog, nr_args, stack_size);
2072
2073 /* This needs to be done regardless. If there were fmod_ret programs,
2074 * the return value is only updated on the stack and still needs to be
2075 * restored to R0.
2076 */
2077 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2078 im->ip_epilogue = prog;
2079 /* arg1: mov rdi, im */
2080 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2081 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2082 ret = -EINVAL;
2083 goto cleanup;
2084 }
2085 }
2086 /* restore return value of orig_call or fentry prog back into RAX */
2087 if (save_ret)
2088 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2089
2090 EMIT1(0x5B); /* pop rbx */
2091 EMIT1(0xC9); /* leave */
2092 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2093 /* skip our return address and return to parent */
2094 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2095 EMIT1(0xC3); /* ret */
2096 /* Make sure the trampoline generation logic doesn't overflow */
2097 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2098 ret = -EFAULT;
2099 goto cleanup;
2100 }
2101 ret = prog - (u8 *)image;
2102
2103cleanup:
2104 kfree(branches);
2105 return ret;
2106}
2107
2108static int emit_fallback_jump(u8 **pprog)
2109{
2110 u8 *prog = *pprog;
2111 int err = 0;
2112
2113#ifdef CONFIG_RETPOLINE
2114 /* Note that this assumes the the compiler uses external
2115 * thunks for indirect calls. Both clang and GCC use the same
2116 * naming convention for external thunks.
2117 */
2118 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2119#else
2120 EMIT2(0xFF, 0xE2); /* jmp rdx */
2121#endif
2122 *pprog = prog;
2123 return err;
2124}
2125
2126static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2127{
2128 u8 *jg_reloc, *prog = *pprog;
2129 int pivot, err, jg_bytes = 1;
2130 s64 jg_offset;
2131
2132 if (a == b) {
2133 /* Leaf node of recursion, i.e. not a range of indices
2134 * anymore.
2135 */
2136 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2137 if (!is_simm32(progs[a]))
2138 return -1;
2139 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2140 progs[a]);
2141 err = emit_cond_near_jump(&prog, /* je func */
2142 (void *)progs[a], prog,
2143 X86_JE);
2144 if (err)
2145 return err;
2146
2147 err = emit_fallback_jump(&prog); /* jmp thunk/indirect */
2148 if (err)
2149 return err;
2150
2151 *pprog = prog;
2152 return 0;
2153 }
2154
2155 /* Not a leaf node, so we pivot, and recursively descend into
2156 * the lower and upper ranges.
2157 */
2158 pivot = (b - a) / 2;
2159 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2160 if (!is_simm32(progs[a + pivot]))
2161 return -1;
2162 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2163
2164 if (pivot > 2) { /* jg upper_part */
2165 /* Require near jump. */
2166 jg_bytes = 4;
2167 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2168 } else {
2169 EMIT2(X86_JG, 0);
2170 }
2171 jg_reloc = prog;
2172
2173 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2174 progs);
2175 if (err)
2176 return err;
2177
2178 /* From Intel 64 and IA-32 Architectures Optimization
2179 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2180 * Coding Rule 11: All branch targets should be 16-byte
2181 * aligned.
2182 */
2183 emit_align(&prog, 16);
2184 jg_offset = prog - jg_reloc;
2185 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2186
2187 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2188 b, progs);
2189 if (err)
2190 return err;
2191
2192 *pprog = prog;
2193 return 0;
2194}
2195
2196static int cmp_ips(const void *a, const void *b)
2197{
2198 const s64 *ipa = a;
2199 const s64 *ipb = b;
2200
2201 if (*ipa > *ipb)
2202 return 1;
2203 if (*ipa < *ipb)
2204 return -1;
2205 return 0;
2206}
2207
2208int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2209{
2210 u8 *prog = image;
2211
2212 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2213 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2214}
2215
2216struct x64_jit_data {
2217 struct bpf_binary_header *header;
2218 int *addrs;
2219 u8 *image;
2220 int proglen;
2221 struct jit_context ctx;
2222};
2223
2224#define MAX_PASSES 20
2225#define PADDING_PASSES (MAX_PASSES - 5)
2226
2227struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2228{
2229 struct bpf_binary_header *header = NULL;
2230 struct bpf_prog *tmp, *orig_prog = prog;
2231 struct x64_jit_data *jit_data;
2232 int proglen, oldproglen = 0;
2233 struct jit_context ctx = {};
2234 bool tmp_blinded = false;
2235 bool extra_pass = false;
2236 bool padding = false;
2237 u8 *image = NULL;
2238 int *addrs;
2239 int pass;
2240 int i;
2241
2242 if (!prog->jit_requested)
2243 return orig_prog;
2244
2245 tmp = bpf_jit_blind_constants(prog);
2246 /*
2247 * If blinding was requested and we failed during blinding,
2248 * we must fall back to the interpreter.
2249 */
2250 if (IS_ERR(tmp))
2251 return orig_prog;
2252 if (tmp != prog) {
2253 tmp_blinded = true;
2254 prog = tmp;
2255 }
2256
2257 jit_data = prog->aux->jit_data;
2258 if (!jit_data) {
2259 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2260 if (!jit_data) {
2261 prog = orig_prog;
2262 goto out;
2263 }
2264 prog->aux->jit_data = jit_data;
2265 }
2266 addrs = jit_data->addrs;
2267 if (addrs) {
2268 ctx = jit_data->ctx;
2269 oldproglen = jit_data->proglen;
2270 image = jit_data->image;
2271 header = jit_data->header;
2272 extra_pass = true;
2273 padding = true;
2274 goto skip_init_addrs;
2275 }
2276 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2277 if (!addrs) {
2278 prog = orig_prog;
2279 goto out_addrs;
2280 }
2281
2282 /*
2283 * Before first pass, make a rough estimation of addrs[]
2284 * each BPF instruction is translated to less than 64 bytes
2285 */
2286 for (proglen = 0, i = 0; i <= prog->len; i++) {
2287 proglen += 64;
2288 addrs[i] = proglen;
2289 }
2290 ctx.cleanup_addr = proglen;
2291skip_init_addrs:
2292
2293 /*
2294 * JITed image shrinks with every pass and the loop iterates
2295 * until the image stops shrinking. Very large BPF programs
2296 * may converge on the last pass. In such case do one more
2297 * pass to emit the final image.
2298 */
2299 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2300 if (!padding && pass >= PADDING_PASSES)
2301 padding = true;
2302 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2303 if (proglen <= 0) {
2304out_image:
2305 image = NULL;
2306 if (header)
2307 bpf_jit_binary_free(header);
2308 prog = orig_prog;
2309 goto out_addrs;
2310 }
2311 if (image) {
2312 if (proglen != oldproglen) {
2313 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2314 proglen, oldproglen);
2315 goto out_image;
2316 }
2317 break;
2318 }
2319 if (proglen == oldproglen) {
2320 /*
2321 * The number of entries in extable is the number of BPF_LDX
2322 * insns that access kernel memory via "pointer to BTF type".
2323 * The verifier changed their opcode from LDX|MEM|size
2324 * to LDX|PROBE_MEM|size to make JITing easier.
2325 */
2326 u32 align = __alignof__(struct exception_table_entry);
2327 u32 extable_size = prog->aux->num_exentries *
2328 sizeof(struct exception_table_entry);
2329
2330 /* allocate module memory for x86 insns and extable */
2331 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2332 &image, align, jit_fill_hole);
2333 if (!header) {
2334 prog = orig_prog;
2335 goto out_addrs;
2336 }
2337 prog->aux->extable = (void *) image + roundup(proglen, align);
2338 }
2339 oldproglen = proglen;
2340 cond_resched();
2341 }
2342
2343 if (bpf_jit_enable > 1)
2344 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2345
2346 if (image) {
2347 if (!prog->is_func || extra_pass) {
2348 bpf_tail_call_direct_fixup(prog);
2349 bpf_jit_binary_lock_ro(header);
2350 } else {
2351 jit_data->addrs = addrs;
2352 jit_data->ctx = ctx;
2353 jit_data->proglen = proglen;
2354 jit_data->image = image;
2355 jit_data->header = header;
2356 }
2357 prog->bpf_func = (void *)image;
2358 prog->jited = 1;
2359 prog->jited_len = proglen;
2360 } else {
2361 prog = orig_prog;
2362 }
2363
2364 if (!image || !prog->is_func || extra_pass) {
2365 if (image)
2366 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2367out_addrs:
2368 kvfree(addrs);
2369 kfree(jit_data);
2370 prog->aux->jit_data = NULL;
2371 }
2372out:
2373 if (tmp_blinded)
2374 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2375 tmp : orig_prog);
2376 return prog;
2377}
2378
2379bool bpf_jit_supports_kfunc_call(void)
2380{
2381 return true;
2382}