Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * BPF Jit compiler for s390.
4 *
5 * Minimum build requirements:
6 *
7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
10 * - 64BIT
11 *
12 * Copyright IBM Corp. 2012,2015
13 *
14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
15 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
16 */
17
18#define KMSG_COMPONENT "bpf_jit"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/netdevice.h>
22#include <linux/filter.h>
23#include <linux/init.h>
24#include <linux/bpf.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <asm/cacheflush.h>
28#include <asm/extable.h>
29#include <asm/dis.h>
30#include <asm/facility.h>
31#include <asm/nospec-branch.h>
32#include <asm/set_memory.h>
33#include "bpf_jit.h"
34
35struct bpf_jit {
36 u32 seen; /* Flags to remember seen eBPF instructions */
37 u32 seen_reg[16]; /* Array to remember which registers are used */
38 u32 *addrs; /* Array with relative instruction addresses */
39 u8 *prg_buf; /* Start of program */
40 int size; /* Size of program and literal pool */
41 int size_prg; /* Size of program */
42 int prg; /* Current position in program */
43 int lit32_start; /* Start of 32-bit literal pool */
44 int lit32; /* Current position in 32-bit literal pool */
45 int lit64_start; /* Start of 64-bit literal pool */
46 int lit64; /* Current position in 64-bit literal pool */
47 int base_ip; /* Base address for literal pool */
48 int exit_ip; /* Address of exit */
49 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
50 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
51 int tail_call_start; /* Tail call start offset */
52 int excnt; /* Number of exception table entries */
53};
54
55#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
56#define SEEN_LITERAL BIT(1) /* code uses literals */
57#define SEEN_FUNC BIT(2) /* calls C functions */
58#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
59#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
60
61/*
62 * s390 registers
63 */
64#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
65#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
66#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
67#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */
68#define REG_0 REG_W0 /* Register 0 */
69#define REG_1 REG_W1 /* Register 1 */
70#define REG_2 BPF_REG_1 /* Register 2 */
71#define REG_14 BPF_REG_0 /* Register 14 */
72
73/*
74 * Mapping of BPF registers to s390 registers
75 */
76static const int reg2hex[] = {
77 /* Return code */
78 [BPF_REG_0] = 14,
79 /* Function parameters */
80 [BPF_REG_1] = 2,
81 [BPF_REG_2] = 3,
82 [BPF_REG_3] = 4,
83 [BPF_REG_4] = 5,
84 [BPF_REG_5] = 6,
85 /* Call saved registers */
86 [BPF_REG_6] = 7,
87 [BPF_REG_7] = 8,
88 [BPF_REG_8] = 9,
89 [BPF_REG_9] = 10,
90 /* BPF stack pointer */
91 [BPF_REG_FP] = 13,
92 /* Register for blinding */
93 [BPF_REG_AX] = 12,
94 /* Work registers for s390x backend */
95 [REG_W0] = 0,
96 [REG_W1] = 1,
97 [REG_L] = 11,
98 [REG_15] = 15,
99};
100
101static inline u32 reg(u32 dst_reg, u32 src_reg)
102{
103 return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
104}
105
106static inline u32 reg_high(u32 reg)
107{
108 return reg2hex[reg] << 4;
109}
110
111static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
112{
113 u32 r1 = reg2hex[b1];
114
115 if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
116 jit->seen_reg[r1] = 1;
117}
118
119#define REG_SET_SEEN(b1) \
120({ \
121 reg_set_seen(jit, b1); \
122})
123
124#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
125
126/*
127 * EMIT macros for code generation
128 */
129
130#define _EMIT2(op) \
131({ \
132 if (jit->prg_buf) \
133 *(u16 *) (jit->prg_buf + jit->prg) = (op); \
134 jit->prg += 2; \
135})
136
137#define EMIT2(op, b1, b2) \
138({ \
139 _EMIT2((op) | reg(b1, b2)); \
140 REG_SET_SEEN(b1); \
141 REG_SET_SEEN(b2); \
142})
143
144#define _EMIT4(op) \
145({ \
146 if (jit->prg_buf) \
147 *(u32 *) (jit->prg_buf + jit->prg) = (op); \
148 jit->prg += 4; \
149})
150
151#define EMIT4(op, b1, b2) \
152({ \
153 _EMIT4((op) | reg(b1, b2)); \
154 REG_SET_SEEN(b1); \
155 REG_SET_SEEN(b2); \
156})
157
158#define EMIT4_RRF(op, b1, b2, b3) \
159({ \
160 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
161 REG_SET_SEEN(b1); \
162 REG_SET_SEEN(b2); \
163 REG_SET_SEEN(b3); \
164})
165
166#define _EMIT4_DISP(op, disp) \
167({ \
168 unsigned int __disp = (disp) & 0xfff; \
169 _EMIT4((op) | __disp); \
170})
171
172#define EMIT4_DISP(op, b1, b2, disp) \
173({ \
174 _EMIT4_DISP((op) | reg_high(b1) << 16 | \
175 reg_high(b2) << 8, (disp)); \
176 REG_SET_SEEN(b1); \
177 REG_SET_SEEN(b2); \
178})
179
180#define EMIT4_IMM(op, b1, imm) \
181({ \
182 unsigned int __imm = (imm) & 0xffff; \
183 _EMIT4((op) | reg_high(b1) << 16 | __imm); \
184 REG_SET_SEEN(b1); \
185})
186
187#define EMIT4_PCREL(op, pcrel) \
188({ \
189 long __pcrel = ((pcrel) >> 1) & 0xffff; \
190 _EMIT4((op) | __pcrel); \
191})
192
193#define EMIT4_PCREL_RIC(op, mask, target) \
194({ \
195 int __rel = ((target) - jit->prg) / 2; \
196 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
197})
198
199#define _EMIT6(op1, op2) \
200({ \
201 if (jit->prg_buf) { \
202 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
203 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
204 } \
205 jit->prg += 6; \
206})
207
208#define _EMIT6_DISP(op1, op2, disp) \
209({ \
210 unsigned int __disp = (disp) & 0xfff; \
211 _EMIT6((op1) | __disp, op2); \
212})
213
214#define _EMIT6_DISP_LH(op1, op2, disp) \
215({ \
216 u32 _disp = (u32) (disp); \
217 unsigned int __disp_h = _disp & 0xff000; \
218 unsigned int __disp_l = _disp & 0x00fff; \
219 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
220})
221
222#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
223({ \
224 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
225 reg_high(b3) << 8, op2, disp); \
226 REG_SET_SEEN(b1); \
227 REG_SET_SEEN(b2); \
228 REG_SET_SEEN(b3); \
229})
230
231#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
232({ \
233 unsigned int rel = (int)((target) - jit->prg) / 2; \
234 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
235 (op2) | (mask) << 12); \
236 REG_SET_SEEN(b1); \
237 REG_SET_SEEN(b2); \
238})
239
240#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
241({ \
242 unsigned int rel = (int)((target) - jit->prg) / 2; \
243 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
244 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
245 REG_SET_SEEN(b1); \
246 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
247})
248
249#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
250({ \
251 int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
252 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
253 REG_SET_SEEN(b1); \
254 REG_SET_SEEN(b2); \
255})
256
257#define EMIT6_PCREL_RILB(op, b, target) \
258({ \
259 unsigned int rel = (int)((target) - jit->prg) / 2; \
260 _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
261 REG_SET_SEEN(b); \
262})
263
264#define EMIT6_PCREL_RIL(op, target) \
265({ \
266 unsigned int rel = (int)((target) - jit->prg) / 2; \
267 _EMIT6((op) | rel >> 16, rel & 0xffff); \
268})
269
270#define EMIT6_PCREL_RILC(op, mask, target) \
271({ \
272 EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
273})
274
275#define _EMIT6_IMM(op, imm) \
276({ \
277 unsigned int __imm = (imm); \
278 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
279})
280
281#define EMIT6_IMM(op, b1, imm) \
282({ \
283 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
284 REG_SET_SEEN(b1); \
285})
286
287#define _EMIT_CONST_U32(val) \
288({ \
289 unsigned int ret; \
290 ret = jit->lit32; \
291 if (jit->prg_buf) \
292 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
293 jit->lit32 += 4; \
294 ret; \
295})
296
297#define EMIT_CONST_U32(val) \
298({ \
299 jit->seen |= SEEN_LITERAL; \
300 _EMIT_CONST_U32(val) - jit->base_ip; \
301})
302
303#define _EMIT_CONST_U64(val) \
304({ \
305 unsigned int ret; \
306 ret = jit->lit64; \
307 if (jit->prg_buf) \
308 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
309 jit->lit64 += 8; \
310 ret; \
311})
312
313#define EMIT_CONST_U64(val) \
314({ \
315 jit->seen |= SEEN_LITERAL; \
316 _EMIT_CONST_U64(val) - jit->base_ip; \
317})
318
319#define EMIT_ZERO(b1) \
320({ \
321 if (!fp->aux->verifier_zext) { \
322 /* llgfr %dst,%dst (zero extend to 64 bit) */ \
323 EMIT4(0xb9160000, b1, b1); \
324 REG_SET_SEEN(b1); \
325 } \
326})
327
328/*
329 * Return whether this is the first pass. The first pass is special, since we
330 * don't know any sizes yet, and thus must be conservative.
331 */
332static bool is_first_pass(struct bpf_jit *jit)
333{
334 return jit->size == 0;
335}
336
337/*
338 * Return whether this is the code generation pass. The code generation pass is
339 * special, since we should change as little as possible.
340 */
341static bool is_codegen_pass(struct bpf_jit *jit)
342{
343 return jit->prg_buf;
344}
345
346/*
347 * Return whether "rel" can be encoded as a short PC-relative offset
348 */
349static bool is_valid_rel(int rel)
350{
351 return rel >= -65536 && rel <= 65534;
352}
353
354/*
355 * Return whether "off" can be reached using a short PC-relative offset
356 */
357static bool can_use_rel(struct bpf_jit *jit, int off)
358{
359 return is_valid_rel(off - jit->prg);
360}
361
362/*
363 * Return whether given displacement can be encoded using
364 * Long-Displacement Facility
365 */
366static bool is_valid_ldisp(int disp)
367{
368 return disp >= -524288 && disp <= 524287;
369}
370
371/*
372 * Return whether the next 32-bit literal pool entry can be referenced using
373 * Long-Displacement Facility
374 */
375static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
376{
377 return is_valid_ldisp(jit->lit32 - jit->base_ip);
378}
379
380/*
381 * Return whether the next 64-bit literal pool entry can be referenced using
382 * Long-Displacement Facility
383 */
384static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
385{
386 return is_valid_ldisp(jit->lit64 - jit->base_ip);
387}
388
389/*
390 * Fill whole space with illegal instructions
391 */
392static void jit_fill_hole(void *area, unsigned int size)
393{
394 memset(area, 0, size);
395}
396
397/*
398 * Save registers from "rs" (register start) to "re" (register end) on stack
399 */
400static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
401{
402 u32 off = STK_OFF_R6 + (rs - 6) * 8;
403
404 if (rs == re)
405 /* stg %rs,off(%r15) */
406 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
407 else
408 /* stmg %rs,%re,off(%r15) */
409 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
410}
411
412/*
413 * Restore registers from "rs" (register start) to "re" (register end) on stack
414 */
415static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
416{
417 u32 off = STK_OFF_R6 + (rs - 6) * 8;
418
419 if (jit->seen & SEEN_STACK)
420 off += STK_OFF + stack_depth;
421
422 if (rs == re)
423 /* lg %rs,off(%r15) */
424 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
425 else
426 /* lmg %rs,%re,off(%r15) */
427 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
428}
429
430/*
431 * Return first seen register (from start)
432 */
433static int get_start(struct bpf_jit *jit, int start)
434{
435 int i;
436
437 for (i = start; i <= 15; i++) {
438 if (jit->seen_reg[i])
439 return i;
440 }
441 return 0;
442}
443
444/*
445 * Return last seen register (from start) (gap >= 2)
446 */
447static int get_end(struct bpf_jit *jit, int start)
448{
449 int i;
450
451 for (i = start; i < 15; i++) {
452 if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
453 return i - 1;
454 }
455 return jit->seen_reg[15] ? 15 : 14;
456}
457
458#define REGS_SAVE 1
459#define REGS_RESTORE 0
460/*
461 * Save and restore clobbered registers (6-15) on stack.
462 * We save/restore registers in chunks with gap >= 2 registers.
463 */
464static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
465{
466 const int last = 15, save_restore_size = 6;
467 int re = 6, rs;
468
469 if (is_first_pass(jit)) {
470 /*
471 * We don't know yet which registers are used. Reserve space
472 * conservatively.
473 */
474 jit->prg += (last - re + 1) * save_restore_size;
475 return;
476 }
477
478 do {
479 rs = get_start(jit, re);
480 if (!rs)
481 break;
482 re = get_end(jit, rs + 1);
483 if (op == REGS_SAVE)
484 save_regs(jit, rs, re);
485 else
486 restore_regs(jit, rs, re, stack_depth);
487 re++;
488 } while (re <= last);
489}
490
491static void bpf_skip(struct bpf_jit *jit, int size)
492{
493 if (size >= 6 && !is_valid_rel(size)) {
494 /* brcl 0xf,size */
495 EMIT6_PCREL_RIL(0xc0f4000000, size);
496 size -= 6;
497 } else if (size >= 4 && is_valid_rel(size)) {
498 /* brc 0xf,size */
499 EMIT4_PCREL(0xa7f40000, size);
500 size -= 4;
501 }
502 while (size >= 2) {
503 /* bcr 0,%0 */
504 _EMIT2(0x0700);
505 size -= 2;
506 }
507}
508
509/*
510 * Emit function prologue
511 *
512 * Save registers and create stack frame if necessary.
513 * See stack frame layout desription in "bpf_jit.h"!
514 */
515static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
516{
517 if (jit->seen & SEEN_TAIL_CALL) {
518 /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
519 _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
520 } else {
521 /*
522 * There are no tail calls. Insert nops in order to have
523 * tail_call_start at a predictable offset.
524 */
525 bpf_skip(jit, 6);
526 }
527 /* Tail calls have to skip above initialization */
528 jit->tail_call_start = jit->prg;
529 /* Save registers */
530 save_restore_regs(jit, REGS_SAVE, stack_depth);
531 /* Setup literal pool */
532 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
533 if (!is_first_pass(jit) &&
534 is_valid_ldisp(jit->size - (jit->prg + 2))) {
535 /* basr %l,0 */
536 EMIT2(0x0d00, REG_L, REG_0);
537 jit->base_ip = jit->prg;
538 } else {
539 /* larl %l,lit32_start */
540 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
541 jit->base_ip = jit->lit32_start;
542 }
543 }
544 /* Setup stack and backchain */
545 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
546 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
547 /* lgr %w1,%r15 (backchain) */
548 EMIT4(0xb9040000, REG_W1, REG_15);
549 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
550 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
551 /* aghi %r15,-STK_OFF */
552 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
553 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
554 /* stg %w1,152(%r15) (backchain) */
555 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
556 REG_15, 152);
557 }
558}
559
560/*
561 * Function epilogue
562 */
563static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
564{
565 jit->exit_ip = jit->prg;
566 /* Load exit code: lgr %r2,%b0 */
567 EMIT4(0xb9040000, REG_2, BPF_REG_0);
568 /* Restore registers */
569 save_restore_regs(jit, REGS_RESTORE, stack_depth);
570 if (nospec_uses_trampoline()) {
571 jit->r14_thunk_ip = jit->prg;
572 /* Generate __s390_indirect_jump_r14 thunk */
573 /* exrl %r0,.+10 */
574 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
575 /* j . */
576 EMIT4_PCREL(0xa7f40000, 0);
577 }
578 /* br %r14 */
579 _EMIT2(0x07fe);
580
581 if ((nospec_uses_trampoline()) &&
582 (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
583 jit->r1_thunk_ip = jit->prg;
584 /* Generate __s390_indirect_jump_r1 thunk */
585 /* exrl %r0,.+10 */
586 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
587 /* j . */
588 EMIT4_PCREL(0xa7f40000, 0);
589 /* br %r1 */
590 _EMIT2(0x07f1);
591 }
592}
593
594static int get_probe_mem_regno(const u8 *insn)
595{
596 /*
597 * insn must point to llgc, llgh, llgf or lg, which have destination
598 * register at the same position.
599 */
600 if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
601 return -1;
602 if (insn[5] != 0x90 && /* llgc */
603 insn[5] != 0x91 && /* llgh */
604 insn[5] != 0x16 && /* llgf */
605 insn[5] != 0x04) /* lg */
606 return -1;
607 return insn[1] >> 4;
608}
609
610bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
611{
612 regs->psw.addr = extable_fixup(x);
613 regs->gprs[x->data] = 0;
614 return true;
615}
616
617static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
618 int probe_prg, int nop_prg)
619{
620 struct exception_table_entry *ex;
621 int reg, prg;
622 s64 delta;
623 u8 *insn;
624 int i;
625
626 if (!fp->aux->extable)
627 /* Do nothing during early JIT passes. */
628 return 0;
629 insn = jit->prg_buf + probe_prg;
630 reg = get_probe_mem_regno(insn);
631 if (WARN_ON_ONCE(reg < 0))
632 /* JIT bug - unexpected probe instruction. */
633 return -1;
634 if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
635 /* JIT bug - gap between probe and nop instructions. */
636 return -1;
637 for (i = 0; i < 2; i++) {
638 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
639 /* Verifier bug - not enough entries. */
640 return -1;
641 ex = &fp->aux->extable[jit->excnt];
642 /* Add extable entries for probe and nop instructions. */
643 prg = i == 0 ? probe_prg : nop_prg;
644 delta = jit->prg_buf + prg - (u8 *)&ex->insn;
645 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
646 /* JIT bug - code and extable must be close. */
647 return -1;
648 ex->insn = delta;
649 /*
650 * Always land on the nop. Note that extable infrastructure
651 * ignores fixup field, it is handled by ex_handler_bpf().
652 */
653 delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
654 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
655 /* JIT bug - landing pad and extable must be close. */
656 return -1;
657 ex->fixup = delta;
658 ex->type = EX_TYPE_BPF;
659 ex->data = reg;
660 jit->excnt++;
661 }
662 return 0;
663}
664
665/*
666 * Compile one eBPF instruction into s390x code
667 *
668 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
669 * stack space for the large switch statement.
670 */
671static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
672 int i, bool extra_pass, u32 stack_depth)
673{
674 struct bpf_insn *insn = &fp->insnsi[i];
675 u32 dst_reg = insn->dst_reg;
676 u32 src_reg = insn->src_reg;
677 int last, insn_count = 1;
678 u32 *addrs = jit->addrs;
679 s32 imm = insn->imm;
680 s16 off = insn->off;
681 int probe_prg = -1;
682 unsigned int mask;
683 int nop_prg;
684 int err;
685
686 if (BPF_CLASS(insn->code) == BPF_LDX &&
687 BPF_MODE(insn->code) == BPF_PROBE_MEM)
688 probe_prg = jit->prg;
689
690 switch (insn->code) {
691 /*
692 * BPF_MOV
693 */
694 case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
695 /* llgfr %dst,%src */
696 EMIT4(0xb9160000, dst_reg, src_reg);
697 if (insn_is_zext(&insn[1]))
698 insn_count = 2;
699 break;
700 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
701 /* lgr %dst,%src */
702 EMIT4(0xb9040000, dst_reg, src_reg);
703 break;
704 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
705 /* llilf %dst,imm */
706 EMIT6_IMM(0xc00f0000, dst_reg, imm);
707 if (insn_is_zext(&insn[1]))
708 insn_count = 2;
709 break;
710 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
711 /* lgfi %dst,imm */
712 EMIT6_IMM(0xc0010000, dst_reg, imm);
713 break;
714 /*
715 * BPF_LD 64
716 */
717 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
718 {
719 /* 16 byte instruction that uses two 'struct bpf_insn' */
720 u64 imm64;
721
722 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
723 /* lgrl %dst,imm */
724 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
725 insn_count = 2;
726 break;
727 }
728 /*
729 * BPF_ADD
730 */
731 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
732 /* ar %dst,%src */
733 EMIT2(0x1a00, dst_reg, src_reg);
734 EMIT_ZERO(dst_reg);
735 break;
736 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
737 /* agr %dst,%src */
738 EMIT4(0xb9080000, dst_reg, src_reg);
739 break;
740 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
741 if (imm != 0) {
742 /* alfi %dst,imm */
743 EMIT6_IMM(0xc20b0000, dst_reg, imm);
744 }
745 EMIT_ZERO(dst_reg);
746 break;
747 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
748 if (!imm)
749 break;
750 /* agfi %dst,imm */
751 EMIT6_IMM(0xc2080000, dst_reg, imm);
752 break;
753 /*
754 * BPF_SUB
755 */
756 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
757 /* sr %dst,%src */
758 EMIT2(0x1b00, dst_reg, src_reg);
759 EMIT_ZERO(dst_reg);
760 break;
761 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
762 /* sgr %dst,%src */
763 EMIT4(0xb9090000, dst_reg, src_reg);
764 break;
765 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
766 if (imm != 0) {
767 /* alfi %dst,-imm */
768 EMIT6_IMM(0xc20b0000, dst_reg, -imm);
769 }
770 EMIT_ZERO(dst_reg);
771 break;
772 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
773 if (!imm)
774 break;
775 if (imm == -0x80000000) {
776 /* algfi %dst,0x80000000 */
777 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
778 } else {
779 /* agfi %dst,-imm */
780 EMIT6_IMM(0xc2080000, dst_reg, -imm);
781 }
782 break;
783 /*
784 * BPF_MUL
785 */
786 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
787 /* msr %dst,%src */
788 EMIT4(0xb2520000, dst_reg, src_reg);
789 EMIT_ZERO(dst_reg);
790 break;
791 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
792 /* msgr %dst,%src */
793 EMIT4(0xb90c0000, dst_reg, src_reg);
794 break;
795 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
796 if (imm != 1) {
797 /* msfi %r5,imm */
798 EMIT6_IMM(0xc2010000, dst_reg, imm);
799 }
800 EMIT_ZERO(dst_reg);
801 break;
802 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
803 if (imm == 1)
804 break;
805 /* msgfi %dst,imm */
806 EMIT6_IMM(0xc2000000, dst_reg, imm);
807 break;
808 /*
809 * BPF_DIV / BPF_MOD
810 */
811 case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
812 case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
813 {
814 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
815
816 /* lhi %w0,0 */
817 EMIT4_IMM(0xa7080000, REG_W0, 0);
818 /* lr %w1,%dst */
819 EMIT2(0x1800, REG_W1, dst_reg);
820 /* dlr %w0,%src */
821 EMIT4(0xb9970000, REG_W0, src_reg);
822 /* llgfr %dst,%rc */
823 EMIT4(0xb9160000, dst_reg, rc_reg);
824 if (insn_is_zext(&insn[1]))
825 insn_count = 2;
826 break;
827 }
828 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
829 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
830 {
831 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
832
833 /* lghi %w0,0 */
834 EMIT4_IMM(0xa7090000, REG_W0, 0);
835 /* lgr %w1,%dst */
836 EMIT4(0xb9040000, REG_W1, dst_reg);
837 /* dlgr %w0,%dst */
838 EMIT4(0xb9870000, REG_W0, src_reg);
839 /* lgr %dst,%rc */
840 EMIT4(0xb9040000, dst_reg, rc_reg);
841 break;
842 }
843 case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
844 case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
845 {
846 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
847
848 if (imm == 1) {
849 if (BPF_OP(insn->code) == BPF_MOD)
850 /* lhgi %dst,0 */
851 EMIT4_IMM(0xa7090000, dst_reg, 0);
852 else
853 EMIT_ZERO(dst_reg);
854 break;
855 }
856 /* lhi %w0,0 */
857 EMIT4_IMM(0xa7080000, REG_W0, 0);
858 /* lr %w1,%dst */
859 EMIT2(0x1800, REG_W1, dst_reg);
860 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
861 /* dl %w0,<d(imm)>(%l) */
862 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
863 EMIT_CONST_U32(imm));
864 } else {
865 /* lgfrl %dst,imm */
866 EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
867 _EMIT_CONST_U32(imm));
868 jit->seen |= SEEN_LITERAL;
869 /* dlr %w0,%dst */
870 EMIT4(0xb9970000, REG_W0, dst_reg);
871 }
872 /* llgfr %dst,%rc */
873 EMIT4(0xb9160000, dst_reg, rc_reg);
874 if (insn_is_zext(&insn[1]))
875 insn_count = 2;
876 break;
877 }
878 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
879 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
880 {
881 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
882
883 if (imm == 1) {
884 if (BPF_OP(insn->code) == BPF_MOD)
885 /* lhgi %dst,0 */
886 EMIT4_IMM(0xa7090000, dst_reg, 0);
887 break;
888 }
889 /* lghi %w0,0 */
890 EMIT4_IMM(0xa7090000, REG_W0, 0);
891 /* lgr %w1,%dst */
892 EMIT4(0xb9040000, REG_W1, dst_reg);
893 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
894 /* dlg %w0,<d(imm)>(%l) */
895 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
896 EMIT_CONST_U64(imm));
897 } else {
898 /* lgrl %dst,imm */
899 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
900 _EMIT_CONST_U64(imm));
901 jit->seen |= SEEN_LITERAL;
902 /* dlgr %w0,%dst */
903 EMIT4(0xb9870000, REG_W0, dst_reg);
904 }
905 /* lgr %dst,%rc */
906 EMIT4(0xb9040000, dst_reg, rc_reg);
907 break;
908 }
909 /*
910 * BPF_AND
911 */
912 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
913 /* nr %dst,%src */
914 EMIT2(0x1400, dst_reg, src_reg);
915 EMIT_ZERO(dst_reg);
916 break;
917 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
918 /* ngr %dst,%src */
919 EMIT4(0xb9800000, dst_reg, src_reg);
920 break;
921 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
922 /* nilf %dst,imm */
923 EMIT6_IMM(0xc00b0000, dst_reg, imm);
924 EMIT_ZERO(dst_reg);
925 break;
926 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
927 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
928 /* ng %dst,<d(imm)>(%l) */
929 EMIT6_DISP_LH(0xe3000000, 0x0080,
930 dst_reg, REG_0, REG_L,
931 EMIT_CONST_U64(imm));
932 } else {
933 /* lgrl %w0,imm */
934 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
935 _EMIT_CONST_U64(imm));
936 jit->seen |= SEEN_LITERAL;
937 /* ngr %dst,%w0 */
938 EMIT4(0xb9800000, dst_reg, REG_W0);
939 }
940 break;
941 /*
942 * BPF_OR
943 */
944 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
945 /* or %dst,%src */
946 EMIT2(0x1600, dst_reg, src_reg);
947 EMIT_ZERO(dst_reg);
948 break;
949 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
950 /* ogr %dst,%src */
951 EMIT4(0xb9810000, dst_reg, src_reg);
952 break;
953 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
954 /* oilf %dst,imm */
955 EMIT6_IMM(0xc00d0000, dst_reg, imm);
956 EMIT_ZERO(dst_reg);
957 break;
958 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
959 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
960 /* og %dst,<d(imm)>(%l) */
961 EMIT6_DISP_LH(0xe3000000, 0x0081,
962 dst_reg, REG_0, REG_L,
963 EMIT_CONST_U64(imm));
964 } else {
965 /* lgrl %w0,imm */
966 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
967 _EMIT_CONST_U64(imm));
968 jit->seen |= SEEN_LITERAL;
969 /* ogr %dst,%w0 */
970 EMIT4(0xb9810000, dst_reg, REG_W0);
971 }
972 break;
973 /*
974 * BPF_XOR
975 */
976 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
977 /* xr %dst,%src */
978 EMIT2(0x1700, dst_reg, src_reg);
979 EMIT_ZERO(dst_reg);
980 break;
981 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
982 /* xgr %dst,%src */
983 EMIT4(0xb9820000, dst_reg, src_reg);
984 break;
985 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
986 if (imm != 0) {
987 /* xilf %dst,imm */
988 EMIT6_IMM(0xc0070000, dst_reg, imm);
989 }
990 EMIT_ZERO(dst_reg);
991 break;
992 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
993 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
994 /* xg %dst,<d(imm)>(%l) */
995 EMIT6_DISP_LH(0xe3000000, 0x0082,
996 dst_reg, REG_0, REG_L,
997 EMIT_CONST_U64(imm));
998 } else {
999 /* lgrl %w0,imm */
1000 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1001 _EMIT_CONST_U64(imm));
1002 jit->seen |= SEEN_LITERAL;
1003 /* xgr %dst,%w0 */
1004 EMIT4(0xb9820000, dst_reg, REG_W0);
1005 }
1006 break;
1007 /*
1008 * BPF_LSH
1009 */
1010 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1011 /* sll %dst,0(%src) */
1012 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1013 EMIT_ZERO(dst_reg);
1014 break;
1015 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1016 /* sllg %dst,%dst,0(%src) */
1017 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1018 break;
1019 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1020 if (imm != 0) {
1021 /* sll %dst,imm(%r0) */
1022 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1023 }
1024 EMIT_ZERO(dst_reg);
1025 break;
1026 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1027 if (imm == 0)
1028 break;
1029 /* sllg %dst,%dst,imm(%r0) */
1030 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1031 break;
1032 /*
1033 * BPF_RSH
1034 */
1035 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1036 /* srl %dst,0(%src) */
1037 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1038 EMIT_ZERO(dst_reg);
1039 break;
1040 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1041 /* srlg %dst,%dst,0(%src) */
1042 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1043 break;
1044 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1045 if (imm != 0) {
1046 /* srl %dst,imm(%r0) */
1047 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1048 }
1049 EMIT_ZERO(dst_reg);
1050 break;
1051 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1052 if (imm == 0)
1053 break;
1054 /* srlg %dst,%dst,imm(%r0) */
1055 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1056 break;
1057 /*
1058 * BPF_ARSH
1059 */
1060 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1061 /* sra %dst,%dst,0(%src) */
1062 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1063 EMIT_ZERO(dst_reg);
1064 break;
1065 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1066 /* srag %dst,%dst,0(%src) */
1067 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1068 break;
1069 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1070 if (imm != 0) {
1071 /* sra %dst,imm(%r0) */
1072 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1073 }
1074 EMIT_ZERO(dst_reg);
1075 break;
1076 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1077 if (imm == 0)
1078 break;
1079 /* srag %dst,%dst,imm(%r0) */
1080 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1081 break;
1082 /*
1083 * BPF_NEG
1084 */
1085 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1086 /* lcr %dst,%dst */
1087 EMIT2(0x1300, dst_reg, dst_reg);
1088 EMIT_ZERO(dst_reg);
1089 break;
1090 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1091 /* lcgr %dst,%dst */
1092 EMIT4(0xb9030000, dst_reg, dst_reg);
1093 break;
1094 /*
1095 * BPF_FROM_BE/LE
1096 */
1097 case BPF_ALU | BPF_END | BPF_FROM_BE:
1098 /* s390 is big endian, therefore only clear high order bytes */
1099 switch (imm) {
1100 case 16: /* dst = (u16) cpu_to_be16(dst) */
1101 /* llghr %dst,%dst */
1102 EMIT4(0xb9850000, dst_reg, dst_reg);
1103 if (insn_is_zext(&insn[1]))
1104 insn_count = 2;
1105 break;
1106 case 32: /* dst = (u32) cpu_to_be32(dst) */
1107 if (!fp->aux->verifier_zext)
1108 /* llgfr %dst,%dst */
1109 EMIT4(0xb9160000, dst_reg, dst_reg);
1110 break;
1111 case 64: /* dst = (u64) cpu_to_be64(dst) */
1112 break;
1113 }
1114 break;
1115 case BPF_ALU | BPF_END | BPF_FROM_LE:
1116 switch (imm) {
1117 case 16: /* dst = (u16) cpu_to_le16(dst) */
1118 /* lrvr %dst,%dst */
1119 EMIT4(0xb91f0000, dst_reg, dst_reg);
1120 /* srl %dst,16(%r0) */
1121 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1122 /* llghr %dst,%dst */
1123 EMIT4(0xb9850000, dst_reg, dst_reg);
1124 if (insn_is_zext(&insn[1]))
1125 insn_count = 2;
1126 break;
1127 case 32: /* dst = (u32) cpu_to_le32(dst) */
1128 /* lrvr %dst,%dst */
1129 EMIT4(0xb91f0000, dst_reg, dst_reg);
1130 if (!fp->aux->verifier_zext)
1131 /* llgfr %dst,%dst */
1132 EMIT4(0xb9160000, dst_reg, dst_reg);
1133 break;
1134 case 64: /* dst = (u64) cpu_to_le64(dst) */
1135 /* lrvgr %dst,%dst */
1136 EMIT4(0xb90f0000, dst_reg, dst_reg);
1137 break;
1138 }
1139 break;
1140 /*
1141 * BPF_NOSPEC (speculation barrier)
1142 */
1143 case BPF_ST | BPF_NOSPEC:
1144 break;
1145 /*
1146 * BPF_ST(X)
1147 */
1148 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1149 /* stcy %src,off(%dst) */
1150 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1151 jit->seen |= SEEN_MEM;
1152 break;
1153 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1154 /* sthy %src,off(%dst) */
1155 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1156 jit->seen |= SEEN_MEM;
1157 break;
1158 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1159 /* sty %src,off(%dst) */
1160 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1161 jit->seen |= SEEN_MEM;
1162 break;
1163 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1164 /* stg %src,off(%dst) */
1165 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1166 jit->seen |= SEEN_MEM;
1167 break;
1168 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1169 /* lhi %w0,imm */
1170 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1171 /* stcy %w0,off(dst) */
1172 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1173 jit->seen |= SEEN_MEM;
1174 break;
1175 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1176 /* lhi %w0,imm */
1177 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1178 /* sthy %w0,off(dst) */
1179 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1180 jit->seen |= SEEN_MEM;
1181 break;
1182 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1183 /* llilf %w0,imm */
1184 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1185 /* sty %w0,off(%dst) */
1186 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1187 jit->seen |= SEEN_MEM;
1188 break;
1189 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1190 /* lgfi %w0,imm */
1191 EMIT6_IMM(0xc0010000, REG_W0, imm);
1192 /* stg %w0,off(%dst) */
1193 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1194 jit->seen |= SEEN_MEM;
1195 break;
1196 /*
1197 * BPF_ATOMIC
1198 */
1199 case BPF_STX | BPF_ATOMIC | BPF_DW:
1200 case BPF_STX | BPF_ATOMIC | BPF_W:
1201 {
1202 bool is32 = BPF_SIZE(insn->code) == BPF_W;
1203
1204 switch (insn->imm) {
1205/* {op32|op64} {%w0|%src},%src,off(%dst) */
1206#define EMIT_ATOMIC(op32, op64) do { \
1207 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
1208 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
1209 src_reg, dst_reg, off); \
1210 if (is32 && (insn->imm & BPF_FETCH)) \
1211 EMIT_ZERO(src_reg); \
1212} while (0)
1213 case BPF_ADD:
1214 case BPF_ADD | BPF_FETCH:
1215 /* {laal|laalg} */
1216 EMIT_ATOMIC(0x00fa, 0x00ea);
1217 break;
1218 case BPF_AND:
1219 case BPF_AND | BPF_FETCH:
1220 /* {lan|lang} */
1221 EMIT_ATOMIC(0x00f4, 0x00e4);
1222 break;
1223 case BPF_OR:
1224 case BPF_OR | BPF_FETCH:
1225 /* {lao|laog} */
1226 EMIT_ATOMIC(0x00f6, 0x00e6);
1227 break;
1228 case BPF_XOR:
1229 case BPF_XOR | BPF_FETCH:
1230 /* {lax|laxg} */
1231 EMIT_ATOMIC(0x00f7, 0x00e7);
1232 break;
1233#undef EMIT_ATOMIC
1234 case BPF_XCHG:
1235 /* {ly|lg} %w0,off(%dst) */
1236 EMIT6_DISP_LH(0xe3000000,
1237 is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1238 dst_reg, off);
1239 /* 0: {csy|csg} %w0,%src,off(%dst) */
1240 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1241 REG_W0, src_reg, dst_reg, off);
1242 /* brc 4,0b */
1243 EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1244 /* {llgfr|lgr} %src,%w0 */
1245 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1246 if (is32 && insn_is_zext(&insn[1]))
1247 insn_count = 2;
1248 break;
1249 case BPF_CMPXCHG:
1250 /* 0: {csy|csg} %b0,%src,off(%dst) */
1251 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1252 BPF_REG_0, src_reg, dst_reg, off);
1253 break;
1254 default:
1255 pr_err("Unknown atomic operation %02x\n", insn->imm);
1256 return -1;
1257 }
1258
1259 jit->seen |= SEEN_MEM;
1260 break;
1261 }
1262 /*
1263 * BPF_LDX
1264 */
1265 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1266 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1267 /* llgc %dst,0(off,%src) */
1268 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1269 jit->seen |= SEEN_MEM;
1270 if (insn_is_zext(&insn[1]))
1271 insn_count = 2;
1272 break;
1273 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1274 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1275 /* llgh %dst,0(off,%src) */
1276 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1277 jit->seen |= SEEN_MEM;
1278 if (insn_is_zext(&insn[1]))
1279 insn_count = 2;
1280 break;
1281 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1282 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1283 /* llgf %dst,off(%src) */
1284 jit->seen |= SEEN_MEM;
1285 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1286 if (insn_is_zext(&insn[1]))
1287 insn_count = 2;
1288 break;
1289 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1290 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1291 /* lg %dst,0(off,%src) */
1292 jit->seen |= SEEN_MEM;
1293 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1294 break;
1295 /*
1296 * BPF_JMP / CALL
1297 */
1298 case BPF_JMP | BPF_CALL:
1299 {
1300 u64 func;
1301 bool func_addr_fixed;
1302 int ret;
1303
1304 ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1305 &func, &func_addr_fixed);
1306 if (ret < 0)
1307 return -1;
1308
1309 REG_SET_SEEN(BPF_REG_5);
1310 jit->seen |= SEEN_FUNC;
1311 /* lgrl %w1,func */
1312 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1313 if (nospec_uses_trampoline()) {
1314 /* brasl %r14,__s390_indirect_jump_r1 */
1315 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1316 } else {
1317 /* basr %r14,%w1 */
1318 EMIT2(0x0d00, REG_14, REG_W1);
1319 }
1320 /* lgr %b0,%r2: load return value into %b0 */
1321 EMIT4(0xb9040000, BPF_REG_0, REG_2);
1322 break;
1323 }
1324 case BPF_JMP | BPF_TAIL_CALL: {
1325 int patch_1_clrj, patch_2_clij, patch_3_brc;
1326
1327 /*
1328 * Implicit input:
1329 * B1: pointer to ctx
1330 * B2: pointer to bpf_array
1331 * B3: index in bpf_array
1332 */
1333 jit->seen |= SEEN_TAIL_CALL;
1334
1335 /*
1336 * if (index >= array->map.max_entries)
1337 * goto out;
1338 */
1339
1340 /* llgf %w1,map.max_entries(%b2) */
1341 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1342 offsetof(struct bpf_array, map.max_entries));
1343 /* if ((u32)%b3 >= (u32)%w1) goto out; */
1344 /* clrj %b3,%w1,0xa,out */
1345 patch_1_clrj = jit->prg;
1346 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1347 jit->prg);
1348
1349 /*
1350 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1351 * goto out;
1352 */
1353
1354 if (jit->seen & SEEN_STACK)
1355 off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1356 else
1357 off = STK_OFF_TCCNT;
1358 /* lhi %w0,1 */
1359 EMIT4_IMM(0xa7080000, REG_W0, 1);
1360 /* laal %w1,%w0,off(%r15) */
1361 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1362 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1363 patch_2_clij = jit->prg;
1364 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1365 2, jit->prg);
1366
1367 /*
1368 * prog = array->ptrs[index];
1369 * if (prog == NULL)
1370 * goto out;
1371 */
1372
1373 /* llgfr %r1,%b3: %r1 = (u32) index */
1374 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1375 /* sllg %r1,%r1,3: %r1 *= 8 */
1376 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1377 /* ltg %r1,prog(%b2,%r1) */
1378 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1379 REG_1, offsetof(struct bpf_array, ptrs));
1380 /* brc 0x8,out */
1381 patch_3_brc = jit->prg;
1382 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1383
1384 /*
1385 * Restore registers before calling function
1386 */
1387 save_restore_regs(jit, REGS_RESTORE, stack_depth);
1388
1389 /*
1390 * goto *(prog->bpf_func + tail_call_start);
1391 */
1392
1393 /* lg %r1,bpf_func(%r1) */
1394 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1395 offsetof(struct bpf_prog, bpf_func));
1396 /* bc 0xf,tail_call_start(%r1) */
1397 _EMIT4(0x47f01000 + jit->tail_call_start);
1398 /* out: */
1399 if (jit->prg_buf) {
1400 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1401 (jit->prg - patch_1_clrj) >> 1;
1402 *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1403 (jit->prg - patch_2_clij) >> 1;
1404 *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1405 (jit->prg - patch_3_brc) >> 1;
1406 }
1407 break;
1408 }
1409 case BPF_JMP | BPF_EXIT: /* return b0 */
1410 last = (i == fp->len - 1) ? 1 : 0;
1411 if (last)
1412 break;
1413 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1414 /* brc 0xf, <exit> */
1415 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1416 else
1417 /* brcl 0xf, <exit> */
1418 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1419 break;
1420 /*
1421 * Branch relative (number of skipped instructions) to offset on
1422 * condition.
1423 *
1424 * Condition code to mask mapping:
1425 *
1426 * CC | Description | Mask
1427 * ------------------------------
1428 * 0 | Operands equal | 8
1429 * 1 | First operand low | 4
1430 * 2 | First operand high | 2
1431 * 3 | Unused | 1
1432 *
1433 * For s390x relative branches: ip = ip + off_bytes
1434 * For BPF relative branches: insn = insn + off_insns + 1
1435 *
1436 * For example for s390x with offset 0 we jump to the branch
1437 * instruction itself (loop) and for BPF with offset 0 we
1438 * branch to the instruction behind the branch.
1439 */
1440 case BPF_JMP | BPF_JA: /* if (true) */
1441 mask = 0xf000; /* j */
1442 goto branch_oc;
1443 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1444 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1445 mask = 0x2000; /* jh */
1446 goto branch_ks;
1447 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1448 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1449 mask = 0x4000; /* jl */
1450 goto branch_ks;
1451 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1452 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1453 mask = 0xa000; /* jhe */
1454 goto branch_ks;
1455 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1456 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1457 mask = 0xc000; /* jle */
1458 goto branch_ks;
1459 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1460 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1461 mask = 0x2000; /* jh */
1462 goto branch_ku;
1463 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1464 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1465 mask = 0x4000; /* jl */
1466 goto branch_ku;
1467 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1468 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1469 mask = 0xa000; /* jhe */
1470 goto branch_ku;
1471 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1472 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1473 mask = 0xc000; /* jle */
1474 goto branch_ku;
1475 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1476 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1477 mask = 0x7000; /* jne */
1478 goto branch_ku;
1479 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1480 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1481 mask = 0x8000; /* je */
1482 goto branch_ku;
1483 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1484 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1485 mask = 0x7000; /* jnz */
1486 if (BPF_CLASS(insn->code) == BPF_JMP32) {
1487 /* llilf %w1,imm (load zero extend imm) */
1488 EMIT6_IMM(0xc00f0000, REG_W1, imm);
1489 /* nr %w1,%dst */
1490 EMIT2(0x1400, REG_W1, dst_reg);
1491 } else {
1492 /* lgfi %w1,imm (load sign extend imm) */
1493 EMIT6_IMM(0xc0010000, REG_W1, imm);
1494 /* ngr %w1,%dst */
1495 EMIT4(0xb9800000, REG_W1, dst_reg);
1496 }
1497 goto branch_oc;
1498
1499 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1500 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1501 mask = 0x2000; /* jh */
1502 goto branch_xs;
1503 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1504 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1505 mask = 0x4000; /* jl */
1506 goto branch_xs;
1507 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1508 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1509 mask = 0xa000; /* jhe */
1510 goto branch_xs;
1511 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1512 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1513 mask = 0xc000; /* jle */
1514 goto branch_xs;
1515 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1516 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1517 mask = 0x2000; /* jh */
1518 goto branch_xu;
1519 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1520 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1521 mask = 0x4000; /* jl */
1522 goto branch_xu;
1523 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1524 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1525 mask = 0xa000; /* jhe */
1526 goto branch_xu;
1527 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1528 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1529 mask = 0xc000; /* jle */
1530 goto branch_xu;
1531 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1532 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1533 mask = 0x7000; /* jne */
1534 goto branch_xu;
1535 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1536 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1537 mask = 0x8000; /* je */
1538 goto branch_xu;
1539 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1540 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1541 {
1542 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1543
1544 mask = 0x7000; /* jnz */
1545 /* nrk or ngrk %w1,%dst,%src */
1546 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1547 REG_W1, dst_reg, src_reg);
1548 goto branch_oc;
1549branch_ks:
1550 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1551 /* cfi or cgfi %dst,imm */
1552 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1553 dst_reg, imm);
1554 if (!is_first_pass(jit) &&
1555 can_use_rel(jit, addrs[i + off + 1])) {
1556 /* brc mask,off */
1557 EMIT4_PCREL_RIC(0xa7040000,
1558 mask >> 12, addrs[i + off + 1]);
1559 } else {
1560 /* brcl mask,off */
1561 EMIT6_PCREL_RILC(0xc0040000,
1562 mask >> 12, addrs[i + off + 1]);
1563 }
1564 break;
1565branch_ku:
1566 /* lgfi %w1,imm (load sign extend imm) */
1567 src_reg = REG_1;
1568 EMIT6_IMM(0xc0010000, src_reg, imm);
1569 goto branch_xu;
1570branch_xs:
1571 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1572 if (!is_first_pass(jit) &&
1573 can_use_rel(jit, addrs[i + off + 1])) {
1574 /* crj or cgrj %dst,%src,mask,off */
1575 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1576 dst_reg, src_reg, i, off, mask);
1577 } else {
1578 /* cr or cgr %dst,%src */
1579 if (is_jmp32)
1580 EMIT2(0x1900, dst_reg, src_reg);
1581 else
1582 EMIT4(0xb9200000, dst_reg, src_reg);
1583 /* brcl mask,off */
1584 EMIT6_PCREL_RILC(0xc0040000,
1585 mask >> 12, addrs[i + off + 1]);
1586 }
1587 break;
1588branch_xu:
1589 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1590 if (!is_first_pass(jit) &&
1591 can_use_rel(jit, addrs[i + off + 1])) {
1592 /* clrj or clgrj %dst,%src,mask,off */
1593 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1594 dst_reg, src_reg, i, off, mask);
1595 } else {
1596 /* clr or clgr %dst,%src */
1597 if (is_jmp32)
1598 EMIT2(0x1500, dst_reg, src_reg);
1599 else
1600 EMIT4(0xb9210000, dst_reg, src_reg);
1601 /* brcl mask,off */
1602 EMIT6_PCREL_RILC(0xc0040000,
1603 mask >> 12, addrs[i + off + 1]);
1604 }
1605 break;
1606branch_oc:
1607 if (!is_first_pass(jit) &&
1608 can_use_rel(jit, addrs[i + off + 1])) {
1609 /* brc mask,off */
1610 EMIT4_PCREL_RIC(0xa7040000,
1611 mask >> 12, addrs[i + off + 1]);
1612 } else {
1613 /* brcl mask,off */
1614 EMIT6_PCREL_RILC(0xc0040000,
1615 mask >> 12, addrs[i + off + 1]);
1616 }
1617 break;
1618 }
1619 default: /* too complex, give up */
1620 pr_err("Unknown opcode %02x\n", insn->code);
1621 return -1;
1622 }
1623
1624 if (probe_prg != -1) {
1625 /*
1626 * Handlers of certain exceptions leave psw.addr pointing to
1627 * the instruction directly after the failing one. Therefore,
1628 * create two exception table entries and also add a nop in
1629 * case two probing instructions come directly after each
1630 * other.
1631 */
1632 nop_prg = jit->prg;
1633 /* bcr 0,%0 */
1634 _EMIT2(0x0700);
1635 err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1636 if (err < 0)
1637 return err;
1638 }
1639
1640 return insn_count;
1641}
1642
1643/*
1644 * Return whether new i-th instruction address does not violate any invariant
1645 */
1646static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1647{
1648 /* On the first pass anything goes */
1649 if (is_first_pass(jit))
1650 return true;
1651
1652 /* The codegen pass must not change anything */
1653 if (is_codegen_pass(jit))
1654 return jit->addrs[i] == jit->prg;
1655
1656 /* Passes in between must not increase code size */
1657 return jit->addrs[i] >= jit->prg;
1658}
1659
1660/*
1661 * Update the address of i-th instruction
1662 */
1663static int bpf_set_addr(struct bpf_jit *jit, int i)
1664{
1665 int delta;
1666
1667 if (is_codegen_pass(jit)) {
1668 delta = jit->prg - jit->addrs[i];
1669 if (delta < 0)
1670 bpf_skip(jit, -delta);
1671 }
1672 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1673 return -1;
1674 jit->addrs[i] = jit->prg;
1675 return 0;
1676}
1677
1678/*
1679 * Compile eBPF program into s390x code
1680 */
1681static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1682 bool extra_pass, u32 stack_depth)
1683{
1684 int i, insn_count, lit32_size, lit64_size;
1685
1686 jit->lit32 = jit->lit32_start;
1687 jit->lit64 = jit->lit64_start;
1688 jit->prg = 0;
1689 jit->excnt = 0;
1690
1691 bpf_jit_prologue(jit, stack_depth);
1692 if (bpf_set_addr(jit, 0) < 0)
1693 return -1;
1694 for (i = 0; i < fp->len; i += insn_count) {
1695 insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1696 if (insn_count < 0)
1697 return -1;
1698 /* Next instruction address */
1699 if (bpf_set_addr(jit, i + insn_count) < 0)
1700 return -1;
1701 }
1702 bpf_jit_epilogue(jit, stack_depth);
1703
1704 lit32_size = jit->lit32 - jit->lit32_start;
1705 lit64_size = jit->lit64 - jit->lit64_start;
1706 jit->lit32_start = jit->prg;
1707 if (lit32_size)
1708 jit->lit32_start = ALIGN(jit->lit32_start, 4);
1709 jit->lit64_start = jit->lit32_start + lit32_size;
1710 if (lit64_size)
1711 jit->lit64_start = ALIGN(jit->lit64_start, 8);
1712 jit->size = jit->lit64_start + lit64_size;
1713 jit->size_prg = jit->prg;
1714
1715 if (WARN_ON_ONCE(fp->aux->extable &&
1716 jit->excnt != fp->aux->num_exentries))
1717 /* Verifier bug - too many entries. */
1718 return -1;
1719
1720 return 0;
1721}
1722
1723bool bpf_jit_needs_zext(void)
1724{
1725 return true;
1726}
1727
1728struct s390_jit_data {
1729 struct bpf_binary_header *header;
1730 struct bpf_jit ctx;
1731 int pass;
1732};
1733
1734static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
1735 struct bpf_prog *fp)
1736{
1737 struct bpf_binary_header *header;
1738 u32 extable_size;
1739 u32 code_size;
1740
1741 /* We need two entries per insn. */
1742 fp->aux->num_exentries *= 2;
1743
1744 code_size = roundup(jit->size,
1745 __alignof__(struct exception_table_entry));
1746 extable_size = fp->aux->num_exentries *
1747 sizeof(struct exception_table_entry);
1748 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
1749 8, jit_fill_hole);
1750 if (!header)
1751 return NULL;
1752 fp->aux->extable = (struct exception_table_entry *)
1753 (jit->prg_buf + code_size);
1754 return header;
1755}
1756
1757/*
1758 * Compile eBPF program "fp"
1759 */
1760struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1761{
1762 u32 stack_depth = round_up(fp->aux->stack_depth, 8);
1763 struct bpf_prog *tmp, *orig_fp = fp;
1764 struct bpf_binary_header *header;
1765 struct s390_jit_data *jit_data;
1766 bool tmp_blinded = false;
1767 bool extra_pass = false;
1768 struct bpf_jit jit;
1769 int pass;
1770
1771 if (!fp->jit_requested)
1772 return orig_fp;
1773
1774 tmp = bpf_jit_blind_constants(fp);
1775 /*
1776 * If blinding was requested and we failed during blinding,
1777 * we must fall back to the interpreter.
1778 */
1779 if (IS_ERR(tmp))
1780 return orig_fp;
1781 if (tmp != fp) {
1782 tmp_blinded = true;
1783 fp = tmp;
1784 }
1785
1786 jit_data = fp->aux->jit_data;
1787 if (!jit_data) {
1788 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1789 if (!jit_data) {
1790 fp = orig_fp;
1791 goto out;
1792 }
1793 fp->aux->jit_data = jit_data;
1794 }
1795 if (jit_data->ctx.addrs) {
1796 jit = jit_data->ctx;
1797 header = jit_data->header;
1798 extra_pass = true;
1799 pass = jit_data->pass + 1;
1800 goto skip_init_ctx;
1801 }
1802
1803 memset(&jit, 0, sizeof(jit));
1804 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1805 if (jit.addrs == NULL) {
1806 fp = orig_fp;
1807 goto free_addrs;
1808 }
1809 /*
1810 * Three initial passes:
1811 * - 1/2: Determine clobbered registers
1812 * - 3: Calculate program size and addrs array
1813 */
1814 for (pass = 1; pass <= 3; pass++) {
1815 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1816 fp = orig_fp;
1817 goto free_addrs;
1818 }
1819 }
1820 /*
1821 * Final pass: Allocate and generate program
1822 */
1823 header = bpf_jit_alloc(&jit, fp);
1824 if (!header) {
1825 fp = orig_fp;
1826 goto free_addrs;
1827 }
1828skip_init_ctx:
1829 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1830 bpf_jit_binary_free(header);
1831 fp = orig_fp;
1832 goto free_addrs;
1833 }
1834 if (bpf_jit_enable > 1) {
1835 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1836 print_fn_code(jit.prg_buf, jit.size_prg);
1837 }
1838 if (!fp->is_func || extra_pass) {
1839 bpf_jit_binary_lock_ro(header);
1840 } else {
1841 jit_data->header = header;
1842 jit_data->ctx = jit;
1843 jit_data->pass = pass;
1844 }
1845 fp->bpf_func = (void *) jit.prg_buf;
1846 fp->jited = 1;
1847 fp->jited_len = jit.size;
1848
1849 if (!fp->is_func || extra_pass) {
1850 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1851free_addrs:
1852 kvfree(jit.addrs);
1853 kfree(jit_data);
1854 fp->aux->jit_data = NULL;
1855 }
1856out:
1857 if (tmp_blinded)
1858 bpf_jit_prog_release_other(fp, fp == orig_fp ?
1859 tmp : orig_fp);
1860 return fp;
1861}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * BPF Jit compiler for s390.
4 *
5 * Minimum build requirements:
6 *
7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg
8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
10 * - 64BIT
11 *
12 * Copyright IBM Corp. 2012,2015
13 *
14 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
15 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
16 */
17
18#define KMSG_COMPONENT "bpf_jit"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/netdevice.h>
22#include <linux/filter.h>
23#include <linux/init.h>
24#include <linux/bpf.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <asm/cacheflush.h>
28#include <asm/extable.h>
29#include <asm/dis.h>
30#include <asm/facility.h>
31#include <asm/nospec-branch.h>
32#include <asm/set_memory.h>
33#include <asm/text-patching.h>
34#include "bpf_jit.h"
35
36struct bpf_jit {
37 u32 seen; /* Flags to remember seen eBPF instructions */
38 u32 seen_reg[16]; /* Array to remember which registers are used */
39 u32 *addrs; /* Array with relative instruction addresses */
40 u8 *prg_buf; /* Start of program */
41 int size; /* Size of program and literal pool */
42 int size_prg; /* Size of program */
43 int prg; /* Current position in program */
44 int lit32_start; /* Start of 32-bit literal pool */
45 int lit32; /* Current position in 32-bit literal pool */
46 int lit64_start; /* Start of 64-bit literal pool */
47 int lit64; /* Current position in 64-bit literal pool */
48 int base_ip; /* Base address for literal pool */
49 int exit_ip; /* Address of exit */
50 int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
51 int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
52 int tail_call_start; /* Tail call start offset */
53 int excnt; /* Number of exception table entries */
54 int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
55 int prologue_plt; /* Start of prologue hotpatch PLT */
56};
57
58#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
59#define SEEN_LITERAL BIT(1) /* code uses literals */
60#define SEEN_FUNC BIT(2) /* calls C functions */
61#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
62
63/*
64 * s390 registers
65 */
66#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
67#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
68#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
69#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */
70#define REG_0 REG_W0 /* Register 0 */
71#define REG_1 REG_W1 /* Register 1 */
72#define REG_2 BPF_REG_1 /* Register 2 */
73#define REG_3 BPF_REG_2 /* Register 3 */
74#define REG_4 BPF_REG_3 /* Register 4 */
75#define REG_7 BPF_REG_6 /* Register 7 */
76#define REG_8 BPF_REG_7 /* Register 8 */
77#define REG_14 BPF_REG_0 /* Register 14 */
78
79/*
80 * Mapping of BPF registers to s390 registers
81 */
82static const int reg2hex[] = {
83 /* Return code */
84 [BPF_REG_0] = 14,
85 /* Function parameters */
86 [BPF_REG_1] = 2,
87 [BPF_REG_2] = 3,
88 [BPF_REG_3] = 4,
89 [BPF_REG_4] = 5,
90 [BPF_REG_5] = 6,
91 /* Call saved registers */
92 [BPF_REG_6] = 7,
93 [BPF_REG_7] = 8,
94 [BPF_REG_8] = 9,
95 [BPF_REG_9] = 10,
96 /* BPF stack pointer */
97 [BPF_REG_FP] = 13,
98 /* Register for blinding */
99 [BPF_REG_AX] = 12,
100 /* Work registers for s390x backend */
101 [REG_W0] = 0,
102 [REG_W1] = 1,
103 [REG_L] = 11,
104 [REG_15] = 15,
105};
106
107static inline u32 reg(u32 dst_reg, u32 src_reg)
108{
109 return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
110}
111
112static inline u32 reg_high(u32 reg)
113{
114 return reg2hex[reg] << 4;
115}
116
117static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
118{
119 u32 r1 = reg2hex[b1];
120
121 if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
122 jit->seen_reg[r1] = 1;
123}
124
125#define REG_SET_SEEN(b1) \
126({ \
127 reg_set_seen(jit, b1); \
128})
129
130#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
131
132/*
133 * EMIT macros for code generation
134 */
135
136#define _EMIT2(op) \
137({ \
138 if (jit->prg_buf) \
139 *(u16 *) (jit->prg_buf + jit->prg) = (op); \
140 jit->prg += 2; \
141})
142
143#define EMIT2(op, b1, b2) \
144({ \
145 _EMIT2((op) | reg(b1, b2)); \
146 REG_SET_SEEN(b1); \
147 REG_SET_SEEN(b2); \
148})
149
150#define _EMIT4(op) \
151({ \
152 if (jit->prg_buf) \
153 *(u32 *) (jit->prg_buf + jit->prg) = (op); \
154 jit->prg += 4; \
155})
156
157#define EMIT4(op, b1, b2) \
158({ \
159 _EMIT4((op) | reg(b1, b2)); \
160 REG_SET_SEEN(b1); \
161 REG_SET_SEEN(b2); \
162})
163
164#define EMIT4_RRF(op, b1, b2, b3) \
165({ \
166 _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
167 REG_SET_SEEN(b1); \
168 REG_SET_SEEN(b2); \
169 REG_SET_SEEN(b3); \
170})
171
172#define _EMIT4_DISP(op, disp) \
173({ \
174 unsigned int __disp = (disp) & 0xfff; \
175 _EMIT4((op) | __disp); \
176})
177
178#define EMIT4_DISP(op, b1, b2, disp) \
179({ \
180 _EMIT4_DISP((op) | reg_high(b1) << 16 | \
181 reg_high(b2) << 8, (disp)); \
182 REG_SET_SEEN(b1); \
183 REG_SET_SEEN(b2); \
184})
185
186#define EMIT4_IMM(op, b1, imm) \
187({ \
188 unsigned int __imm = (imm) & 0xffff; \
189 _EMIT4((op) | reg_high(b1) << 16 | __imm); \
190 REG_SET_SEEN(b1); \
191})
192
193#define EMIT4_PCREL(op, pcrel) \
194({ \
195 long __pcrel = ((pcrel) >> 1) & 0xffff; \
196 _EMIT4((op) | __pcrel); \
197})
198
199#define EMIT4_PCREL_RIC(op, mask, target) \
200({ \
201 int __rel = ((target) - jit->prg) / 2; \
202 _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
203})
204
205#define _EMIT6(op1, op2) \
206({ \
207 if (jit->prg_buf) { \
208 *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
209 *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
210 } \
211 jit->prg += 6; \
212})
213
214#define _EMIT6_DISP(op1, op2, disp) \
215({ \
216 unsigned int __disp = (disp) & 0xfff; \
217 _EMIT6((op1) | __disp, op2); \
218})
219
220#define _EMIT6_DISP_LH(op1, op2, disp) \
221({ \
222 u32 _disp = (u32) (disp); \
223 unsigned int __disp_h = _disp & 0xff000; \
224 unsigned int __disp_l = _disp & 0x00fff; \
225 _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
226})
227
228#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
229({ \
230 _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
231 reg_high(b3) << 8, op2, disp); \
232 REG_SET_SEEN(b1); \
233 REG_SET_SEEN(b2); \
234 REG_SET_SEEN(b3); \
235})
236
237#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
238({ \
239 unsigned int rel = (int)((target) - jit->prg) / 2; \
240 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
241 (op2) | (mask) << 12); \
242 REG_SET_SEEN(b1); \
243 REG_SET_SEEN(b2); \
244})
245
246#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
247({ \
248 unsigned int rel = (int)((target) - jit->prg) / 2; \
249 _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
250 (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
251 REG_SET_SEEN(b1); \
252 BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
253})
254
255#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
256({ \
257 int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
258 _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
259 REG_SET_SEEN(b1); \
260 REG_SET_SEEN(b2); \
261})
262
263#define EMIT6_PCREL_RILB(op, b, target) \
264({ \
265 unsigned int rel = (int)((target) - jit->prg) / 2; \
266 _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
267 REG_SET_SEEN(b); \
268})
269
270#define EMIT6_PCREL_RIL(op, target) \
271({ \
272 unsigned int rel = (int)((target) - jit->prg) / 2; \
273 _EMIT6((op) | rel >> 16, rel & 0xffff); \
274})
275
276#define EMIT6_PCREL_RILC(op, mask, target) \
277({ \
278 EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
279})
280
281#define _EMIT6_IMM(op, imm) \
282({ \
283 unsigned int __imm = (imm); \
284 _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
285})
286
287#define EMIT6_IMM(op, b1, imm) \
288({ \
289 _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
290 REG_SET_SEEN(b1); \
291})
292
293#define _EMIT_CONST_U32(val) \
294({ \
295 unsigned int ret; \
296 ret = jit->lit32; \
297 if (jit->prg_buf) \
298 *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
299 jit->lit32 += 4; \
300 ret; \
301})
302
303#define EMIT_CONST_U32(val) \
304({ \
305 jit->seen |= SEEN_LITERAL; \
306 _EMIT_CONST_U32(val) - jit->base_ip; \
307})
308
309#define _EMIT_CONST_U64(val) \
310({ \
311 unsigned int ret; \
312 ret = jit->lit64; \
313 if (jit->prg_buf) \
314 *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
315 jit->lit64 += 8; \
316 ret; \
317})
318
319#define EMIT_CONST_U64(val) \
320({ \
321 jit->seen |= SEEN_LITERAL; \
322 _EMIT_CONST_U64(val) - jit->base_ip; \
323})
324
325#define EMIT_ZERO(b1) \
326({ \
327 if (!fp->aux->verifier_zext) { \
328 /* llgfr %dst,%dst (zero extend to 64 bit) */ \
329 EMIT4(0xb9160000, b1, b1); \
330 REG_SET_SEEN(b1); \
331 } \
332})
333
334/*
335 * Return whether this is the first pass. The first pass is special, since we
336 * don't know any sizes yet, and thus must be conservative.
337 */
338static bool is_first_pass(struct bpf_jit *jit)
339{
340 return jit->size == 0;
341}
342
343/*
344 * Return whether this is the code generation pass. The code generation pass is
345 * special, since we should change as little as possible.
346 */
347static bool is_codegen_pass(struct bpf_jit *jit)
348{
349 return jit->prg_buf;
350}
351
352/*
353 * Return whether "rel" can be encoded as a short PC-relative offset
354 */
355static bool is_valid_rel(int rel)
356{
357 return rel >= -65536 && rel <= 65534;
358}
359
360/*
361 * Return whether "off" can be reached using a short PC-relative offset
362 */
363static bool can_use_rel(struct bpf_jit *jit, int off)
364{
365 return is_valid_rel(off - jit->prg);
366}
367
368/*
369 * Return whether given displacement can be encoded using
370 * Long-Displacement Facility
371 */
372static bool is_valid_ldisp(int disp)
373{
374 return disp >= -524288 && disp <= 524287;
375}
376
377/*
378 * Return whether the next 32-bit literal pool entry can be referenced using
379 * Long-Displacement Facility
380 */
381static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
382{
383 return is_valid_ldisp(jit->lit32 - jit->base_ip);
384}
385
386/*
387 * Return whether the next 64-bit literal pool entry can be referenced using
388 * Long-Displacement Facility
389 */
390static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
391{
392 return is_valid_ldisp(jit->lit64 - jit->base_ip);
393}
394
395/*
396 * Fill whole space with illegal instructions
397 */
398static void jit_fill_hole(void *area, unsigned int size)
399{
400 memset(area, 0, size);
401}
402
403/*
404 * Save registers from "rs" (register start) to "re" (register end) on stack
405 */
406static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
407{
408 u32 off = STK_OFF_R6 + (rs - 6) * 8;
409
410 if (rs == re)
411 /* stg %rs,off(%r15) */
412 _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
413 else
414 /* stmg %rs,%re,off(%r15) */
415 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
416}
417
418/*
419 * Restore registers from "rs" (register start) to "re" (register end) on stack
420 */
421static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
422{
423 u32 off = STK_OFF_R6 + (rs - 6) * 8;
424
425 if (jit->seen & SEEN_STACK)
426 off += STK_OFF + stack_depth;
427
428 if (rs == re)
429 /* lg %rs,off(%r15) */
430 _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
431 else
432 /* lmg %rs,%re,off(%r15) */
433 _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
434}
435
436/*
437 * Return first seen register (from start)
438 */
439static int get_start(struct bpf_jit *jit, int start)
440{
441 int i;
442
443 for (i = start; i <= 15; i++) {
444 if (jit->seen_reg[i])
445 return i;
446 }
447 return 0;
448}
449
450/*
451 * Return last seen register (from start) (gap >= 2)
452 */
453static int get_end(struct bpf_jit *jit, int start)
454{
455 int i;
456
457 for (i = start; i < 15; i++) {
458 if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
459 return i - 1;
460 }
461 return jit->seen_reg[15] ? 15 : 14;
462}
463
464#define REGS_SAVE 1
465#define REGS_RESTORE 0
466/*
467 * Save and restore clobbered registers (6-15) on stack.
468 * We save/restore registers in chunks with gap >= 2 registers.
469 */
470static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
471{
472 const int last = 15, save_restore_size = 6;
473 int re = 6, rs;
474
475 if (is_first_pass(jit)) {
476 /*
477 * We don't know yet which registers are used. Reserve space
478 * conservatively.
479 */
480 jit->prg += (last - re + 1) * save_restore_size;
481 return;
482 }
483
484 do {
485 rs = get_start(jit, re);
486 if (!rs)
487 break;
488 re = get_end(jit, rs + 1);
489 if (op == REGS_SAVE)
490 save_regs(jit, rs, re);
491 else
492 restore_regs(jit, rs, re, stack_depth);
493 re++;
494 } while (re <= last);
495}
496
497static void bpf_skip(struct bpf_jit *jit, int size)
498{
499 if (size >= 6 && !is_valid_rel(size)) {
500 /* brcl 0xf,size */
501 EMIT6_PCREL_RIL(0xc0f4000000, size);
502 size -= 6;
503 } else if (size >= 4 && is_valid_rel(size)) {
504 /* brc 0xf,size */
505 EMIT4_PCREL(0xa7f40000, size);
506 size -= 4;
507 }
508 while (size >= 2) {
509 /* bcr 0,%0 */
510 _EMIT2(0x0700);
511 size -= 2;
512 }
513}
514
515/*
516 * PLT for hotpatchable calls. The calling convention is the same as for the
517 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
518 */
519struct bpf_plt {
520 char code[16];
521 void *ret;
522 void *target;
523} __packed;
524extern const struct bpf_plt bpf_plt;
525asm(
526 ".pushsection .rodata\n"
527 " .balign 8\n"
528 "bpf_plt:\n"
529 " lgrl %r0,bpf_plt_ret\n"
530 " lgrl %r1,bpf_plt_target\n"
531 " br %r1\n"
532 " .balign 8\n"
533 "bpf_plt_ret: .quad 0\n"
534 "bpf_plt_target: .quad 0\n"
535 " .popsection\n"
536);
537
538static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
539{
540 memcpy(plt, &bpf_plt, sizeof(*plt));
541 plt->ret = ret;
542 plt->target = target;
543}
544
545/*
546 * Emit function prologue
547 *
548 * Save registers and create stack frame if necessary.
549 * See stack frame layout description in "bpf_jit.h"!
550 */
551static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp,
552 u32 stack_depth)
553{
554 /* No-op for hotpatching */
555 /* brcl 0,prologue_plt */
556 EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
557 jit->prologue_plt_ret = jit->prg;
558
559 if (!bpf_is_subprog(fp)) {
560 /* Initialize the tail call counter in the main program. */
561 /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
562 _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
563 } else {
564 /*
565 * Skip the tail call counter initialization in subprograms.
566 * Insert nops in order to have tail_call_start at a
567 * predictable offset.
568 */
569 bpf_skip(jit, 6);
570 }
571 /* Tail calls have to skip above initialization */
572 jit->tail_call_start = jit->prg;
573 /* Save registers */
574 save_restore_regs(jit, REGS_SAVE, stack_depth);
575 /* Setup literal pool */
576 if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
577 if (!is_first_pass(jit) &&
578 is_valid_ldisp(jit->size - (jit->prg + 2))) {
579 /* basr %l,0 */
580 EMIT2(0x0d00, REG_L, REG_0);
581 jit->base_ip = jit->prg;
582 } else {
583 /* larl %l,lit32_start */
584 EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
585 jit->base_ip = jit->lit32_start;
586 }
587 }
588 /* Setup stack and backchain */
589 if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
590 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
591 /* lgr %w1,%r15 (backchain) */
592 EMIT4(0xb9040000, REG_W1, REG_15);
593 /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
594 EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
595 /* aghi %r15,-STK_OFF */
596 EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
597 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
598 /* stg %w1,152(%r15) (backchain) */
599 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
600 REG_15, 152);
601 }
602}
603
604/*
605 * Emit an expoline for a jump that follows
606 */
607static void emit_expoline(struct bpf_jit *jit)
608{
609 /* exrl %r0,.+10 */
610 EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
611 /* j . */
612 EMIT4_PCREL(0xa7f40000, 0);
613}
614
615/*
616 * Emit __s390_indirect_jump_r1 thunk if necessary
617 */
618static void emit_r1_thunk(struct bpf_jit *jit)
619{
620 if (nospec_uses_trampoline()) {
621 jit->r1_thunk_ip = jit->prg;
622 emit_expoline(jit);
623 /* br %r1 */
624 _EMIT2(0x07f1);
625 }
626}
627
628/*
629 * Call r1 either directly or via __s390_indirect_jump_r1 thunk
630 */
631static void call_r1(struct bpf_jit *jit)
632{
633 if (nospec_uses_trampoline())
634 /* brasl %r14,__s390_indirect_jump_r1 */
635 EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
636 else
637 /* basr %r14,%r1 */
638 EMIT2(0x0d00, REG_14, REG_1);
639}
640
641/*
642 * Function epilogue
643 */
644static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
645{
646 jit->exit_ip = jit->prg;
647 /* Load exit code: lgr %r2,%b0 */
648 EMIT4(0xb9040000, REG_2, BPF_REG_0);
649 /* Restore registers */
650 save_restore_regs(jit, REGS_RESTORE, stack_depth);
651 if (nospec_uses_trampoline()) {
652 jit->r14_thunk_ip = jit->prg;
653 /* Generate __s390_indirect_jump_r14 thunk */
654 emit_expoline(jit);
655 }
656 /* br %r14 */
657 _EMIT2(0x07fe);
658
659 if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
660 emit_r1_thunk(jit);
661
662 jit->prg = ALIGN(jit->prg, 8);
663 jit->prologue_plt = jit->prg;
664 if (jit->prg_buf)
665 bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
666 jit->prg_buf + jit->prologue_plt_ret, NULL);
667 jit->prg += sizeof(struct bpf_plt);
668}
669
670static int get_probe_mem_regno(const u8 *insn)
671{
672 /*
673 * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have
674 * destination register at the same position.
675 */
676 if (insn[0] != 0xe3) /* common prefix */
677 return -1;
678 if (insn[5] != 0x90 && /* llgc */
679 insn[5] != 0x91 && /* llgh */
680 insn[5] != 0x16 && /* llgf */
681 insn[5] != 0x04 && /* lg */
682 insn[5] != 0x77 && /* lgb */
683 insn[5] != 0x15 && /* lgh */
684 insn[5] != 0x14) /* lgf */
685 return -1;
686 return insn[1] >> 4;
687}
688
689bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
690{
691 regs->psw.addr = extable_fixup(x);
692 regs->gprs[x->data] = 0;
693 return true;
694}
695
696static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
697 int probe_prg, int nop_prg)
698{
699 struct exception_table_entry *ex;
700 int reg, prg;
701 s64 delta;
702 u8 *insn;
703 int i;
704
705 if (!fp->aux->extable)
706 /* Do nothing during early JIT passes. */
707 return 0;
708 insn = jit->prg_buf + probe_prg;
709 reg = get_probe_mem_regno(insn);
710 if (WARN_ON_ONCE(reg < 0))
711 /* JIT bug - unexpected probe instruction. */
712 return -1;
713 if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
714 /* JIT bug - gap between probe and nop instructions. */
715 return -1;
716 for (i = 0; i < 2; i++) {
717 if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
718 /* Verifier bug - not enough entries. */
719 return -1;
720 ex = &fp->aux->extable[jit->excnt];
721 /* Add extable entries for probe and nop instructions. */
722 prg = i == 0 ? probe_prg : nop_prg;
723 delta = jit->prg_buf + prg - (u8 *)&ex->insn;
724 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
725 /* JIT bug - code and extable must be close. */
726 return -1;
727 ex->insn = delta;
728 /*
729 * Always land on the nop. Note that extable infrastructure
730 * ignores fixup field, it is handled by ex_handler_bpf().
731 */
732 delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
733 if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
734 /* JIT bug - landing pad and extable must be close. */
735 return -1;
736 ex->fixup = delta;
737 ex->type = EX_TYPE_BPF;
738 ex->data = reg;
739 jit->excnt++;
740 }
741 return 0;
742}
743
744/*
745 * Sign-extend the register if necessary
746 */
747static int sign_extend(struct bpf_jit *jit, int r, u8 size, u8 flags)
748{
749 if (!(flags & BTF_FMODEL_SIGNED_ARG))
750 return 0;
751
752 switch (size) {
753 case 1:
754 /* lgbr %r,%r */
755 EMIT4(0xb9060000, r, r);
756 return 0;
757 case 2:
758 /* lghr %r,%r */
759 EMIT4(0xb9070000, r, r);
760 return 0;
761 case 4:
762 /* lgfr %r,%r */
763 EMIT4(0xb9140000, r, r);
764 return 0;
765 case 8:
766 return 0;
767 default:
768 return -1;
769 }
770}
771
772/*
773 * Compile one eBPF instruction into s390x code
774 *
775 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
776 * stack space for the large switch statement.
777 */
778static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
779 int i, bool extra_pass, u32 stack_depth)
780{
781 struct bpf_insn *insn = &fp->insnsi[i];
782 s32 branch_oc_off = insn->off;
783 u32 dst_reg = insn->dst_reg;
784 u32 src_reg = insn->src_reg;
785 int last, insn_count = 1;
786 u32 *addrs = jit->addrs;
787 s32 imm = insn->imm;
788 s16 off = insn->off;
789 int probe_prg = -1;
790 unsigned int mask;
791 int nop_prg;
792 int err;
793
794 if (BPF_CLASS(insn->code) == BPF_LDX &&
795 (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
796 BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
797 probe_prg = jit->prg;
798
799 switch (insn->code) {
800 /*
801 * BPF_MOV
802 */
803 case BPF_ALU | BPF_MOV | BPF_X:
804 switch (insn->off) {
805 case 0: /* DST = (u32) SRC */
806 /* llgfr %dst,%src */
807 EMIT4(0xb9160000, dst_reg, src_reg);
808 if (insn_is_zext(&insn[1]))
809 insn_count = 2;
810 break;
811 case 8: /* DST = (u32)(s8) SRC */
812 /* lbr %dst,%src */
813 EMIT4(0xb9260000, dst_reg, src_reg);
814 /* llgfr %dst,%dst */
815 EMIT4(0xb9160000, dst_reg, dst_reg);
816 break;
817 case 16: /* DST = (u32)(s16) SRC */
818 /* lhr %dst,%src */
819 EMIT4(0xb9270000, dst_reg, src_reg);
820 /* llgfr %dst,%dst */
821 EMIT4(0xb9160000, dst_reg, dst_reg);
822 break;
823 }
824 break;
825 case BPF_ALU64 | BPF_MOV | BPF_X:
826 switch (insn->off) {
827 case 0: /* DST = SRC */
828 /* lgr %dst,%src */
829 EMIT4(0xb9040000, dst_reg, src_reg);
830 break;
831 case 8: /* DST = (s8) SRC */
832 /* lgbr %dst,%src */
833 EMIT4(0xb9060000, dst_reg, src_reg);
834 break;
835 case 16: /* DST = (s16) SRC */
836 /* lghr %dst,%src */
837 EMIT4(0xb9070000, dst_reg, src_reg);
838 break;
839 case 32: /* DST = (s32) SRC */
840 /* lgfr %dst,%src */
841 EMIT4(0xb9140000, dst_reg, src_reg);
842 break;
843 }
844 break;
845 case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
846 /* llilf %dst,imm */
847 EMIT6_IMM(0xc00f0000, dst_reg, imm);
848 if (insn_is_zext(&insn[1]))
849 insn_count = 2;
850 break;
851 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
852 /* lgfi %dst,imm */
853 EMIT6_IMM(0xc0010000, dst_reg, imm);
854 break;
855 /*
856 * BPF_LD 64
857 */
858 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
859 {
860 /* 16 byte instruction that uses two 'struct bpf_insn' */
861 u64 imm64;
862
863 imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
864 /* lgrl %dst,imm */
865 EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
866 insn_count = 2;
867 break;
868 }
869 /*
870 * BPF_ADD
871 */
872 case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
873 /* ar %dst,%src */
874 EMIT2(0x1a00, dst_reg, src_reg);
875 EMIT_ZERO(dst_reg);
876 break;
877 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
878 /* agr %dst,%src */
879 EMIT4(0xb9080000, dst_reg, src_reg);
880 break;
881 case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
882 if (imm != 0) {
883 /* alfi %dst,imm */
884 EMIT6_IMM(0xc20b0000, dst_reg, imm);
885 }
886 EMIT_ZERO(dst_reg);
887 break;
888 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
889 if (!imm)
890 break;
891 /* agfi %dst,imm */
892 EMIT6_IMM(0xc2080000, dst_reg, imm);
893 break;
894 /*
895 * BPF_SUB
896 */
897 case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
898 /* sr %dst,%src */
899 EMIT2(0x1b00, dst_reg, src_reg);
900 EMIT_ZERO(dst_reg);
901 break;
902 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
903 /* sgr %dst,%src */
904 EMIT4(0xb9090000, dst_reg, src_reg);
905 break;
906 case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
907 if (imm != 0) {
908 /* alfi %dst,-imm */
909 EMIT6_IMM(0xc20b0000, dst_reg, -imm);
910 }
911 EMIT_ZERO(dst_reg);
912 break;
913 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
914 if (!imm)
915 break;
916 if (imm == -0x80000000) {
917 /* algfi %dst,0x80000000 */
918 EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
919 } else {
920 /* agfi %dst,-imm */
921 EMIT6_IMM(0xc2080000, dst_reg, -imm);
922 }
923 break;
924 /*
925 * BPF_MUL
926 */
927 case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
928 /* msr %dst,%src */
929 EMIT4(0xb2520000, dst_reg, src_reg);
930 EMIT_ZERO(dst_reg);
931 break;
932 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
933 /* msgr %dst,%src */
934 EMIT4(0xb90c0000, dst_reg, src_reg);
935 break;
936 case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
937 if (imm != 1) {
938 /* msfi %r5,imm */
939 EMIT6_IMM(0xc2010000, dst_reg, imm);
940 }
941 EMIT_ZERO(dst_reg);
942 break;
943 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
944 if (imm == 1)
945 break;
946 /* msgfi %dst,imm */
947 EMIT6_IMM(0xc2000000, dst_reg, imm);
948 break;
949 /*
950 * BPF_DIV / BPF_MOD
951 */
952 case BPF_ALU | BPF_DIV | BPF_X:
953 case BPF_ALU | BPF_MOD | BPF_X:
954 {
955 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
956
957 switch (off) {
958 case 0: /* dst = (u32) dst {/,%} (u32) src */
959 /* xr %w0,%w0 */
960 EMIT2(0x1700, REG_W0, REG_W0);
961 /* lr %w1,%dst */
962 EMIT2(0x1800, REG_W1, dst_reg);
963 /* dlr %w0,%src */
964 EMIT4(0xb9970000, REG_W0, src_reg);
965 break;
966 case 1: /* dst = (u32) ((s32) dst {/,%} (s32) src) */
967 /* lgfr %r1,%dst */
968 EMIT4(0xb9140000, REG_W1, dst_reg);
969 /* dsgfr %r0,%src */
970 EMIT4(0xb91d0000, REG_W0, src_reg);
971 break;
972 }
973 /* llgfr %dst,%rc */
974 EMIT4(0xb9160000, dst_reg, rc_reg);
975 if (insn_is_zext(&insn[1]))
976 insn_count = 2;
977 break;
978 }
979 case BPF_ALU64 | BPF_DIV | BPF_X:
980 case BPF_ALU64 | BPF_MOD | BPF_X:
981 {
982 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
983
984 switch (off) {
985 case 0: /* dst = dst {/,%} src */
986 /* lghi %w0,0 */
987 EMIT4_IMM(0xa7090000, REG_W0, 0);
988 /* lgr %w1,%dst */
989 EMIT4(0xb9040000, REG_W1, dst_reg);
990 /* dlgr %w0,%src */
991 EMIT4(0xb9870000, REG_W0, src_reg);
992 break;
993 case 1: /* dst = (s64) dst {/,%} (s64) src */
994 /* lgr %w1,%dst */
995 EMIT4(0xb9040000, REG_W1, dst_reg);
996 /* dsgr %w0,%src */
997 EMIT4(0xb90d0000, REG_W0, src_reg);
998 break;
999 }
1000 /* lgr %dst,%rc */
1001 EMIT4(0xb9040000, dst_reg, rc_reg);
1002 break;
1003 }
1004 case BPF_ALU | BPF_DIV | BPF_K:
1005 case BPF_ALU | BPF_MOD | BPF_K:
1006 {
1007 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1008
1009 if (imm == 1) {
1010 if (BPF_OP(insn->code) == BPF_MOD)
1011 /* lghi %dst,0 */
1012 EMIT4_IMM(0xa7090000, dst_reg, 0);
1013 else
1014 EMIT_ZERO(dst_reg);
1015 break;
1016 }
1017 if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
1018 switch (off) {
1019 case 0: /* dst = (u32) dst {/,%} (u32) imm */
1020 /* xr %w0,%w0 */
1021 EMIT2(0x1700, REG_W0, REG_W0);
1022 /* lr %w1,%dst */
1023 EMIT2(0x1800, REG_W1, dst_reg);
1024 /* dl %w0,<d(imm)>(%l) */
1025 EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0,
1026 REG_L, EMIT_CONST_U32(imm));
1027 break;
1028 case 1: /* dst = (s32) dst {/,%} (s32) imm */
1029 /* lgfr %r1,%dst */
1030 EMIT4(0xb9140000, REG_W1, dst_reg);
1031 /* dsgf %r0,<d(imm)>(%l) */
1032 EMIT6_DISP_LH(0xe3000000, 0x001d, REG_W0, REG_0,
1033 REG_L, EMIT_CONST_U32(imm));
1034 break;
1035 }
1036 } else {
1037 switch (off) {
1038 case 0: /* dst = (u32) dst {/,%} (u32) imm */
1039 /* xr %w0,%w0 */
1040 EMIT2(0x1700, REG_W0, REG_W0);
1041 /* lr %w1,%dst */
1042 EMIT2(0x1800, REG_W1, dst_reg);
1043 /* lrl %dst,imm */
1044 EMIT6_PCREL_RILB(0xc40d0000, dst_reg,
1045 _EMIT_CONST_U32(imm));
1046 jit->seen |= SEEN_LITERAL;
1047 /* dlr %w0,%dst */
1048 EMIT4(0xb9970000, REG_W0, dst_reg);
1049 break;
1050 case 1: /* dst = (s32) dst {/,%} (s32) imm */
1051 /* lgfr %w1,%dst */
1052 EMIT4(0xb9140000, REG_W1, dst_reg);
1053 /* lgfrl %dst,imm */
1054 EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
1055 _EMIT_CONST_U32(imm));
1056 jit->seen |= SEEN_LITERAL;
1057 /* dsgr %w0,%dst */
1058 EMIT4(0xb90d0000, REG_W0, dst_reg);
1059 break;
1060 }
1061 }
1062 /* llgfr %dst,%rc */
1063 EMIT4(0xb9160000, dst_reg, rc_reg);
1064 if (insn_is_zext(&insn[1]))
1065 insn_count = 2;
1066 break;
1067 }
1068 case BPF_ALU64 | BPF_DIV | BPF_K:
1069 case BPF_ALU64 | BPF_MOD | BPF_K:
1070 {
1071 int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
1072
1073 if (imm == 1) {
1074 if (BPF_OP(insn->code) == BPF_MOD)
1075 /* lhgi %dst,0 */
1076 EMIT4_IMM(0xa7090000, dst_reg, 0);
1077 break;
1078 }
1079 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1080 switch (off) {
1081 case 0: /* dst = dst {/,%} imm */
1082 /* lghi %w0,0 */
1083 EMIT4_IMM(0xa7090000, REG_W0, 0);
1084 /* lgr %w1,%dst */
1085 EMIT4(0xb9040000, REG_W1, dst_reg);
1086 /* dlg %w0,<d(imm)>(%l) */
1087 EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0,
1088 REG_L, EMIT_CONST_U64(imm));
1089 break;
1090 case 1: /* dst = (s64) dst {/,%} (s64) imm */
1091 /* lgr %w1,%dst */
1092 EMIT4(0xb9040000, REG_W1, dst_reg);
1093 /* dsg %w0,<d(imm)>(%l) */
1094 EMIT6_DISP_LH(0xe3000000, 0x000d, REG_W0, REG_0,
1095 REG_L, EMIT_CONST_U64(imm));
1096 break;
1097 }
1098 } else {
1099 switch (off) {
1100 case 0: /* dst = dst {/,%} imm */
1101 /* lghi %w0,0 */
1102 EMIT4_IMM(0xa7090000, REG_W0, 0);
1103 /* lgr %w1,%dst */
1104 EMIT4(0xb9040000, REG_W1, dst_reg);
1105 /* lgrl %dst,imm */
1106 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1107 _EMIT_CONST_U64(imm));
1108 jit->seen |= SEEN_LITERAL;
1109 /* dlgr %w0,%dst */
1110 EMIT4(0xb9870000, REG_W0, dst_reg);
1111 break;
1112 case 1: /* dst = (s64) dst {/,%} (s64) imm */
1113 /* lgr %w1,%dst */
1114 EMIT4(0xb9040000, REG_W1, dst_reg);
1115 /* lgrl %dst,imm */
1116 EMIT6_PCREL_RILB(0xc4080000, dst_reg,
1117 _EMIT_CONST_U64(imm));
1118 jit->seen |= SEEN_LITERAL;
1119 /* dsgr %w0,%dst */
1120 EMIT4(0xb90d0000, REG_W0, dst_reg);
1121 break;
1122 }
1123 }
1124 /* lgr %dst,%rc */
1125 EMIT4(0xb9040000, dst_reg, rc_reg);
1126 break;
1127 }
1128 /*
1129 * BPF_AND
1130 */
1131 case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
1132 /* nr %dst,%src */
1133 EMIT2(0x1400, dst_reg, src_reg);
1134 EMIT_ZERO(dst_reg);
1135 break;
1136 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
1137 /* ngr %dst,%src */
1138 EMIT4(0xb9800000, dst_reg, src_reg);
1139 break;
1140 case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
1141 /* nilf %dst,imm */
1142 EMIT6_IMM(0xc00b0000, dst_reg, imm);
1143 EMIT_ZERO(dst_reg);
1144 break;
1145 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
1146 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1147 /* ng %dst,<d(imm)>(%l) */
1148 EMIT6_DISP_LH(0xe3000000, 0x0080,
1149 dst_reg, REG_0, REG_L,
1150 EMIT_CONST_U64(imm));
1151 } else {
1152 /* lgrl %w0,imm */
1153 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1154 _EMIT_CONST_U64(imm));
1155 jit->seen |= SEEN_LITERAL;
1156 /* ngr %dst,%w0 */
1157 EMIT4(0xb9800000, dst_reg, REG_W0);
1158 }
1159 break;
1160 /*
1161 * BPF_OR
1162 */
1163 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
1164 /* or %dst,%src */
1165 EMIT2(0x1600, dst_reg, src_reg);
1166 EMIT_ZERO(dst_reg);
1167 break;
1168 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
1169 /* ogr %dst,%src */
1170 EMIT4(0xb9810000, dst_reg, src_reg);
1171 break;
1172 case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
1173 /* oilf %dst,imm */
1174 EMIT6_IMM(0xc00d0000, dst_reg, imm);
1175 EMIT_ZERO(dst_reg);
1176 break;
1177 case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
1178 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1179 /* og %dst,<d(imm)>(%l) */
1180 EMIT6_DISP_LH(0xe3000000, 0x0081,
1181 dst_reg, REG_0, REG_L,
1182 EMIT_CONST_U64(imm));
1183 } else {
1184 /* lgrl %w0,imm */
1185 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1186 _EMIT_CONST_U64(imm));
1187 jit->seen |= SEEN_LITERAL;
1188 /* ogr %dst,%w0 */
1189 EMIT4(0xb9810000, dst_reg, REG_W0);
1190 }
1191 break;
1192 /*
1193 * BPF_XOR
1194 */
1195 case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
1196 /* xr %dst,%src */
1197 EMIT2(0x1700, dst_reg, src_reg);
1198 EMIT_ZERO(dst_reg);
1199 break;
1200 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
1201 /* xgr %dst,%src */
1202 EMIT4(0xb9820000, dst_reg, src_reg);
1203 break;
1204 case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1205 if (imm != 0) {
1206 /* xilf %dst,imm */
1207 EMIT6_IMM(0xc0070000, dst_reg, imm);
1208 }
1209 EMIT_ZERO(dst_reg);
1210 break;
1211 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1212 if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1213 /* xg %dst,<d(imm)>(%l) */
1214 EMIT6_DISP_LH(0xe3000000, 0x0082,
1215 dst_reg, REG_0, REG_L,
1216 EMIT_CONST_U64(imm));
1217 } else {
1218 /* lgrl %w0,imm */
1219 EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1220 _EMIT_CONST_U64(imm));
1221 jit->seen |= SEEN_LITERAL;
1222 /* xgr %dst,%w0 */
1223 EMIT4(0xb9820000, dst_reg, REG_W0);
1224 }
1225 break;
1226 /*
1227 * BPF_LSH
1228 */
1229 case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1230 /* sll %dst,0(%src) */
1231 EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1232 EMIT_ZERO(dst_reg);
1233 break;
1234 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1235 /* sllg %dst,%dst,0(%src) */
1236 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1237 break;
1238 case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1239 if (imm != 0) {
1240 /* sll %dst,imm(%r0) */
1241 EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1242 }
1243 EMIT_ZERO(dst_reg);
1244 break;
1245 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1246 if (imm == 0)
1247 break;
1248 /* sllg %dst,%dst,imm(%r0) */
1249 EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1250 break;
1251 /*
1252 * BPF_RSH
1253 */
1254 case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1255 /* srl %dst,0(%src) */
1256 EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1257 EMIT_ZERO(dst_reg);
1258 break;
1259 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1260 /* srlg %dst,%dst,0(%src) */
1261 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1262 break;
1263 case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1264 if (imm != 0) {
1265 /* srl %dst,imm(%r0) */
1266 EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1267 }
1268 EMIT_ZERO(dst_reg);
1269 break;
1270 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1271 if (imm == 0)
1272 break;
1273 /* srlg %dst,%dst,imm(%r0) */
1274 EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1275 break;
1276 /*
1277 * BPF_ARSH
1278 */
1279 case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1280 /* sra %dst,%dst,0(%src) */
1281 EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1282 EMIT_ZERO(dst_reg);
1283 break;
1284 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1285 /* srag %dst,%dst,0(%src) */
1286 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1287 break;
1288 case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1289 if (imm != 0) {
1290 /* sra %dst,imm(%r0) */
1291 EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1292 }
1293 EMIT_ZERO(dst_reg);
1294 break;
1295 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1296 if (imm == 0)
1297 break;
1298 /* srag %dst,%dst,imm(%r0) */
1299 EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1300 break;
1301 /*
1302 * BPF_NEG
1303 */
1304 case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1305 /* lcr %dst,%dst */
1306 EMIT2(0x1300, dst_reg, dst_reg);
1307 EMIT_ZERO(dst_reg);
1308 break;
1309 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1310 /* lcgr %dst,%dst */
1311 EMIT4(0xb9030000, dst_reg, dst_reg);
1312 break;
1313 /*
1314 * BPF_FROM_BE/LE
1315 */
1316 case BPF_ALU | BPF_END | BPF_FROM_BE:
1317 /* s390 is big endian, therefore only clear high order bytes */
1318 switch (imm) {
1319 case 16: /* dst = (u16) cpu_to_be16(dst) */
1320 /* llghr %dst,%dst */
1321 EMIT4(0xb9850000, dst_reg, dst_reg);
1322 if (insn_is_zext(&insn[1]))
1323 insn_count = 2;
1324 break;
1325 case 32: /* dst = (u32) cpu_to_be32(dst) */
1326 if (!fp->aux->verifier_zext)
1327 /* llgfr %dst,%dst */
1328 EMIT4(0xb9160000, dst_reg, dst_reg);
1329 break;
1330 case 64: /* dst = (u64) cpu_to_be64(dst) */
1331 break;
1332 }
1333 break;
1334 case BPF_ALU | BPF_END | BPF_FROM_LE:
1335 case BPF_ALU64 | BPF_END | BPF_FROM_LE:
1336 switch (imm) {
1337 case 16: /* dst = (u16) cpu_to_le16(dst) */
1338 /* lrvr %dst,%dst */
1339 EMIT4(0xb91f0000, dst_reg, dst_reg);
1340 /* srl %dst,16(%r0) */
1341 EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1342 /* llghr %dst,%dst */
1343 EMIT4(0xb9850000, dst_reg, dst_reg);
1344 if (insn_is_zext(&insn[1]))
1345 insn_count = 2;
1346 break;
1347 case 32: /* dst = (u32) cpu_to_le32(dst) */
1348 /* lrvr %dst,%dst */
1349 EMIT4(0xb91f0000, dst_reg, dst_reg);
1350 if (!fp->aux->verifier_zext)
1351 /* llgfr %dst,%dst */
1352 EMIT4(0xb9160000, dst_reg, dst_reg);
1353 break;
1354 case 64: /* dst = (u64) cpu_to_le64(dst) */
1355 /* lrvgr %dst,%dst */
1356 EMIT4(0xb90f0000, dst_reg, dst_reg);
1357 break;
1358 }
1359 break;
1360 /*
1361 * BPF_NOSPEC (speculation barrier)
1362 */
1363 case BPF_ST | BPF_NOSPEC:
1364 break;
1365 /*
1366 * BPF_ST(X)
1367 */
1368 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1369 /* stcy %src,off(%dst) */
1370 EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1371 jit->seen |= SEEN_MEM;
1372 break;
1373 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1374 /* sthy %src,off(%dst) */
1375 EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1376 jit->seen |= SEEN_MEM;
1377 break;
1378 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1379 /* sty %src,off(%dst) */
1380 EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1381 jit->seen |= SEEN_MEM;
1382 break;
1383 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1384 /* stg %src,off(%dst) */
1385 EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1386 jit->seen |= SEEN_MEM;
1387 break;
1388 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1389 /* lhi %w0,imm */
1390 EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1391 /* stcy %w0,off(dst) */
1392 EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1393 jit->seen |= SEEN_MEM;
1394 break;
1395 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1396 /* lhi %w0,imm */
1397 EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1398 /* sthy %w0,off(dst) */
1399 EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1400 jit->seen |= SEEN_MEM;
1401 break;
1402 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1403 /* llilf %w0,imm */
1404 EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1405 /* sty %w0,off(%dst) */
1406 EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1407 jit->seen |= SEEN_MEM;
1408 break;
1409 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1410 /* lgfi %w0,imm */
1411 EMIT6_IMM(0xc0010000, REG_W0, imm);
1412 /* stg %w0,off(%dst) */
1413 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1414 jit->seen |= SEEN_MEM;
1415 break;
1416 /*
1417 * BPF_ATOMIC
1418 */
1419 case BPF_STX | BPF_ATOMIC | BPF_DW:
1420 case BPF_STX | BPF_ATOMIC | BPF_W:
1421 {
1422 bool is32 = BPF_SIZE(insn->code) == BPF_W;
1423
1424 switch (insn->imm) {
1425/* {op32|op64} {%w0|%src},%src,off(%dst) */
1426#define EMIT_ATOMIC(op32, op64) do { \
1427 EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
1428 (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
1429 src_reg, dst_reg, off); \
1430 if (insn->imm & BPF_FETCH) { \
1431 /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
1432 _EMIT2(0x07e0); \
1433 if (is32) \
1434 EMIT_ZERO(src_reg); \
1435 } \
1436} while (0)
1437 case BPF_ADD:
1438 case BPF_ADD | BPF_FETCH:
1439 /* {laal|laalg} */
1440 EMIT_ATOMIC(0x00fa, 0x00ea);
1441 break;
1442 case BPF_AND:
1443 case BPF_AND | BPF_FETCH:
1444 /* {lan|lang} */
1445 EMIT_ATOMIC(0x00f4, 0x00e4);
1446 break;
1447 case BPF_OR:
1448 case BPF_OR | BPF_FETCH:
1449 /* {lao|laog} */
1450 EMIT_ATOMIC(0x00f6, 0x00e6);
1451 break;
1452 case BPF_XOR:
1453 case BPF_XOR | BPF_FETCH:
1454 /* {lax|laxg} */
1455 EMIT_ATOMIC(0x00f7, 0x00e7);
1456 break;
1457#undef EMIT_ATOMIC
1458 case BPF_XCHG:
1459 /* {ly|lg} %w0,off(%dst) */
1460 EMIT6_DISP_LH(0xe3000000,
1461 is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1462 dst_reg, off);
1463 /* 0: {csy|csg} %w0,%src,off(%dst) */
1464 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1465 REG_W0, src_reg, dst_reg, off);
1466 /* brc 4,0b */
1467 EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1468 /* {llgfr|lgr} %src,%w0 */
1469 EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1470 if (is32 && insn_is_zext(&insn[1]))
1471 insn_count = 2;
1472 break;
1473 case BPF_CMPXCHG:
1474 /* 0: {csy|csg} %b0,%src,off(%dst) */
1475 EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1476 BPF_REG_0, src_reg, dst_reg, off);
1477 break;
1478 default:
1479 pr_err("Unknown atomic operation %02x\n", insn->imm);
1480 return -1;
1481 }
1482
1483 jit->seen |= SEEN_MEM;
1484 break;
1485 }
1486 /*
1487 * BPF_LDX
1488 */
1489 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1490 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1491 /* llgc %dst,0(off,%src) */
1492 EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1493 jit->seen |= SEEN_MEM;
1494 if (insn_is_zext(&insn[1]))
1495 insn_count = 2;
1496 break;
1497 case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
1498 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1499 /* lgb %dst,0(off,%src) */
1500 EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
1501 jit->seen |= SEEN_MEM;
1502 break;
1503 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1504 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1505 /* llgh %dst,0(off,%src) */
1506 EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1507 jit->seen |= SEEN_MEM;
1508 if (insn_is_zext(&insn[1]))
1509 insn_count = 2;
1510 break;
1511 case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
1512 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1513 /* lgh %dst,0(off,%src) */
1514 EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
1515 jit->seen |= SEEN_MEM;
1516 break;
1517 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1518 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1519 /* llgf %dst,off(%src) */
1520 jit->seen |= SEEN_MEM;
1521 EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1522 if (insn_is_zext(&insn[1]))
1523 insn_count = 2;
1524 break;
1525 case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
1526 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
1527 /* lgf %dst,off(%src) */
1528 jit->seen |= SEEN_MEM;
1529 EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
1530 break;
1531 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1532 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1533 /* lg %dst,0(off,%src) */
1534 jit->seen |= SEEN_MEM;
1535 EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1536 break;
1537 /*
1538 * BPF_JMP / CALL
1539 */
1540 case BPF_JMP | BPF_CALL:
1541 {
1542 const struct btf_func_model *m;
1543 bool func_addr_fixed;
1544 int j, ret;
1545 u64 func;
1546
1547 ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1548 &func, &func_addr_fixed);
1549 if (ret < 0)
1550 return -1;
1551
1552 REG_SET_SEEN(BPF_REG_5);
1553 jit->seen |= SEEN_FUNC;
1554 /*
1555 * Copy the tail call counter to where the callee expects it.
1556 *
1557 * Note 1: The callee can increment the tail call counter, but
1558 * we do not load it back, since the x86 JIT does not do this
1559 * either.
1560 *
1561 * Note 2: We assume that the verifier does not let us call the
1562 * main program, which clears the tail call counter on entry.
1563 */
1564 /* mvc STK_OFF_TCCNT(4,%r15),N(%r15) */
1565 _EMIT6(0xd203f000 | STK_OFF_TCCNT,
1566 0xf000 | (STK_OFF_TCCNT + STK_OFF + stack_depth));
1567
1568 /* Sign-extend the kfunc arguments. */
1569 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
1570 m = bpf_jit_find_kfunc_model(fp, insn);
1571 if (!m)
1572 return -1;
1573
1574 for (j = 0; j < m->nr_args; j++) {
1575 if (sign_extend(jit, BPF_REG_1 + j,
1576 m->arg_size[j],
1577 m->arg_flags[j]))
1578 return -1;
1579 }
1580 }
1581
1582 /* lgrl %w1,func */
1583 EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1584 /* %r1() */
1585 call_r1(jit);
1586 /* lgr %b0,%r2: load return value into %b0 */
1587 EMIT4(0xb9040000, BPF_REG_0, REG_2);
1588 break;
1589 }
1590 case BPF_JMP | BPF_TAIL_CALL: {
1591 int patch_1_clrj, patch_2_clij, patch_3_brc;
1592
1593 /*
1594 * Implicit input:
1595 * B1: pointer to ctx
1596 * B2: pointer to bpf_array
1597 * B3: index in bpf_array
1598 *
1599 * if (index >= array->map.max_entries)
1600 * goto out;
1601 */
1602
1603 /* llgf %w1,map.max_entries(%b2) */
1604 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1605 offsetof(struct bpf_array, map.max_entries));
1606 /* if ((u32)%b3 >= (u32)%w1) goto out; */
1607 /* clrj %b3,%w1,0xa,out */
1608 patch_1_clrj = jit->prg;
1609 EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1610 jit->prg);
1611
1612 /*
1613 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
1614 * goto out;
1615 */
1616
1617 if (jit->seen & SEEN_STACK)
1618 off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1619 else
1620 off = STK_OFF_TCCNT;
1621 /* lhi %w0,1 */
1622 EMIT4_IMM(0xa7080000, REG_W0, 1);
1623 /* laal %w1,%w0,off(%r15) */
1624 EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1625 /* clij %w1,MAX_TAIL_CALL_CNT-1,0x2,out */
1626 patch_2_clij = jit->prg;
1627 EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT - 1,
1628 2, jit->prg);
1629
1630 /*
1631 * prog = array->ptrs[index];
1632 * if (prog == NULL)
1633 * goto out;
1634 */
1635
1636 /* llgfr %r1,%b3: %r1 = (u32) index */
1637 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1638 /* sllg %r1,%r1,3: %r1 *= 8 */
1639 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1640 /* ltg %r1,prog(%b2,%r1) */
1641 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1642 REG_1, offsetof(struct bpf_array, ptrs));
1643 /* brc 0x8,out */
1644 patch_3_brc = jit->prg;
1645 EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1646
1647 /*
1648 * Restore registers before calling function
1649 */
1650 save_restore_regs(jit, REGS_RESTORE, stack_depth);
1651
1652 /*
1653 * goto *(prog->bpf_func + tail_call_start);
1654 */
1655
1656 /* lg %r1,bpf_func(%r1) */
1657 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1658 offsetof(struct bpf_prog, bpf_func));
1659 if (nospec_uses_trampoline()) {
1660 jit->seen |= SEEN_FUNC;
1661 /* aghi %r1,tail_call_start */
1662 EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
1663 /* brcl 0xf,__s390_indirect_jump_r1 */
1664 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
1665 } else {
1666 /* bc 0xf,tail_call_start(%r1) */
1667 _EMIT4(0x47f01000 + jit->tail_call_start);
1668 }
1669 /* out: */
1670 if (jit->prg_buf) {
1671 *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1672 (jit->prg - patch_1_clrj) >> 1;
1673 *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1674 (jit->prg - patch_2_clij) >> 1;
1675 *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1676 (jit->prg - patch_3_brc) >> 1;
1677 }
1678 break;
1679 }
1680 case BPF_JMP | BPF_EXIT: /* return b0 */
1681 last = (i == fp->len - 1) ? 1 : 0;
1682 if (last)
1683 break;
1684 if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1685 /* brc 0xf, <exit> */
1686 EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1687 else
1688 /* brcl 0xf, <exit> */
1689 EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1690 break;
1691 /*
1692 * Branch relative (number of skipped instructions) to offset on
1693 * condition.
1694 *
1695 * Condition code to mask mapping:
1696 *
1697 * CC | Description | Mask
1698 * ------------------------------
1699 * 0 | Operands equal | 8
1700 * 1 | First operand low | 4
1701 * 2 | First operand high | 2
1702 * 3 | Unused | 1
1703 *
1704 * For s390x relative branches: ip = ip + off_bytes
1705 * For BPF relative branches: insn = insn + off_insns + 1
1706 *
1707 * For example for s390x with offset 0 we jump to the branch
1708 * instruction itself (loop) and for BPF with offset 0 we
1709 * branch to the instruction behind the branch.
1710 */
1711 case BPF_JMP32 | BPF_JA: /* if (true) */
1712 branch_oc_off = imm;
1713 fallthrough;
1714 case BPF_JMP | BPF_JA: /* if (true) */
1715 mask = 0xf000; /* j */
1716 goto branch_oc;
1717 case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1718 case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1719 mask = 0x2000; /* jh */
1720 goto branch_ks;
1721 case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1722 case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1723 mask = 0x4000; /* jl */
1724 goto branch_ks;
1725 case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1726 case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1727 mask = 0xa000; /* jhe */
1728 goto branch_ks;
1729 case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1730 case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1731 mask = 0xc000; /* jle */
1732 goto branch_ks;
1733 case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1734 case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1735 mask = 0x2000; /* jh */
1736 goto branch_ku;
1737 case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1738 case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1739 mask = 0x4000; /* jl */
1740 goto branch_ku;
1741 case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1742 case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1743 mask = 0xa000; /* jhe */
1744 goto branch_ku;
1745 case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1746 case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1747 mask = 0xc000; /* jle */
1748 goto branch_ku;
1749 case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1750 case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1751 mask = 0x7000; /* jne */
1752 goto branch_ku;
1753 case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1754 case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1755 mask = 0x8000; /* je */
1756 goto branch_ku;
1757 case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1758 case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1759 mask = 0x7000; /* jnz */
1760 if (BPF_CLASS(insn->code) == BPF_JMP32) {
1761 /* llilf %w1,imm (load zero extend imm) */
1762 EMIT6_IMM(0xc00f0000, REG_W1, imm);
1763 /* nr %w1,%dst */
1764 EMIT2(0x1400, REG_W1, dst_reg);
1765 } else {
1766 /* lgfi %w1,imm (load sign extend imm) */
1767 EMIT6_IMM(0xc0010000, REG_W1, imm);
1768 /* ngr %w1,%dst */
1769 EMIT4(0xb9800000, REG_W1, dst_reg);
1770 }
1771 goto branch_oc;
1772
1773 case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1774 case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1775 mask = 0x2000; /* jh */
1776 goto branch_xs;
1777 case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1778 case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1779 mask = 0x4000; /* jl */
1780 goto branch_xs;
1781 case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1782 case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1783 mask = 0xa000; /* jhe */
1784 goto branch_xs;
1785 case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1786 case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1787 mask = 0xc000; /* jle */
1788 goto branch_xs;
1789 case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1790 case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1791 mask = 0x2000; /* jh */
1792 goto branch_xu;
1793 case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1794 case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1795 mask = 0x4000; /* jl */
1796 goto branch_xu;
1797 case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1798 case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1799 mask = 0xa000; /* jhe */
1800 goto branch_xu;
1801 case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1802 case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1803 mask = 0xc000; /* jle */
1804 goto branch_xu;
1805 case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1806 case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1807 mask = 0x7000; /* jne */
1808 goto branch_xu;
1809 case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1810 case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1811 mask = 0x8000; /* je */
1812 goto branch_xu;
1813 case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1814 case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1815 {
1816 bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1817
1818 mask = 0x7000; /* jnz */
1819 /* nrk or ngrk %w1,%dst,%src */
1820 EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1821 REG_W1, dst_reg, src_reg);
1822 goto branch_oc;
1823branch_ks:
1824 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1825 /* cfi or cgfi %dst,imm */
1826 EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1827 dst_reg, imm);
1828 if (!is_first_pass(jit) &&
1829 can_use_rel(jit, addrs[i + off + 1])) {
1830 /* brc mask,off */
1831 EMIT4_PCREL_RIC(0xa7040000,
1832 mask >> 12, addrs[i + off + 1]);
1833 } else {
1834 /* brcl mask,off */
1835 EMIT6_PCREL_RILC(0xc0040000,
1836 mask >> 12, addrs[i + off + 1]);
1837 }
1838 break;
1839branch_ku:
1840 /* lgfi %w1,imm (load sign extend imm) */
1841 src_reg = REG_1;
1842 EMIT6_IMM(0xc0010000, src_reg, imm);
1843 goto branch_xu;
1844branch_xs:
1845 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1846 if (!is_first_pass(jit) &&
1847 can_use_rel(jit, addrs[i + off + 1])) {
1848 /* crj or cgrj %dst,%src,mask,off */
1849 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1850 dst_reg, src_reg, i, off, mask);
1851 } else {
1852 /* cr or cgr %dst,%src */
1853 if (is_jmp32)
1854 EMIT2(0x1900, dst_reg, src_reg);
1855 else
1856 EMIT4(0xb9200000, dst_reg, src_reg);
1857 /* brcl mask,off */
1858 EMIT6_PCREL_RILC(0xc0040000,
1859 mask >> 12, addrs[i + off + 1]);
1860 }
1861 break;
1862branch_xu:
1863 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1864 if (!is_first_pass(jit) &&
1865 can_use_rel(jit, addrs[i + off + 1])) {
1866 /* clrj or clgrj %dst,%src,mask,off */
1867 EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1868 dst_reg, src_reg, i, off, mask);
1869 } else {
1870 /* clr or clgr %dst,%src */
1871 if (is_jmp32)
1872 EMIT2(0x1500, dst_reg, src_reg);
1873 else
1874 EMIT4(0xb9210000, dst_reg, src_reg);
1875 /* brcl mask,off */
1876 EMIT6_PCREL_RILC(0xc0040000,
1877 mask >> 12, addrs[i + off + 1]);
1878 }
1879 break;
1880branch_oc:
1881 if (!is_first_pass(jit) &&
1882 can_use_rel(jit, addrs[i + branch_oc_off + 1])) {
1883 /* brc mask,off */
1884 EMIT4_PCREL_RIC(0xa7040000,
1885 mask >> 12,
1886 addrs[i + branch_oc_off + 1]);
1887 } else {
1888 /* brcl mask,off */
1889 EMIT6_PCREL_RILC(0xc0040000,
1890 mask >> 12,
1891 addrs[i + branch_oc_off + 1]);
1892 }
1893 break;
1894 }
1895 default: /* too complex, give up */
1896 pr_err("Unknown opcode %02x\n", insn->code);
1897 return -1;
1898 }
1899
1900 if (probe_prg != -1) {
1901 /*
1902 * Handlers of certain exceptions leave psw.addr pointing to
1903 * the instruction directly after the failing one. Therefore,
1904 * create two exception table entries and also add a nop in
1905 * case two probing instructions come directly after each
1906 * other.
1907 */
1908 nop_prg = jit->prg;
1909 /* bcr 0,%0 */
1910 _EMIT2(0x0700);
1911 err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1912 if (err < 0)
1913 return err;
1914 }
1915
1916 return insn_count;
1917}
1918
1919/*
1920 * Return whether new i-th instruction address does not violate any invariant
1921 */
1922static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1923{
1924 /* On the first pass anything goes */
1925 if (is_first_pass(jit))
1926 return true;
1927
1928 /* The codegen pass must not change anything */
1929 if (is_codegen_pass(jit))
1930 return jit->addrs[i] == jit->prg;
1931
1932 /* Passes in between must not increase code size */
1933 return jit->addrs[i] >= jit->prg;
1934}
1935
1936/*
1937 * Update the address of i-th instruction
1938 */
1939static int bpf_set_addr(struct bpf_jit *jit, int i)
1940{
1941 int delta;
1942
1943 if (is_codegen_pass(jit)) {
1944 delta = jit->prg - jit->addrs[i];
1945 if (delta < 0)
1946 bpf_skip(jit, -delta);
1947 }
1948 if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1949 return -1;
1950 jit->addrs[i] = jit->prg;
1951 return 0;
1952}
1953
1954/*
1955 * Compile eBPF program into s390x code
1956 */
1957static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1958 bool extra_pass, u32 stack_depth)
1959{
1960 int i, insn_count, lit32_size, lit64_size;
1961
1962 jit->lit32 = jit->lit32_start;
1963 jit->lit64 = jit->lit64_start;
1964 jit->prg = 0;
1965 jit->excnt = 0;
1966
1967 bpf_jit_prologue(jit, fp, stack_depth);
1968 if (bpf_set_addr(jit, 0) < 0)
1969 return -1;
1970 for (i = 0; i < fp->len; i += insn_count) {
1971 insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1972 if (insn_count < 0)
1973 return -1;
1974 /* Next instruction address */
1975 if (bpf_set_addr(jit, i + insn_count) < 0)
1976 return -1;
1977 }
1978 bpf_jit_epilogue(jit, stack_depth);
1979
1980 lit32_size = jit->lit32 - jit->lit32_start;
1981 lit64_size = jit->lit64 - jit->lit64_start;
1982 jit->lit32_start = jit->prg;
1983 if (lit32_size)
1984 jit->lit32_start = ALIGN(jit->lit32_start, 4);
1985 jit->lit64_start = jit->lit32_start + lit32_size;
1986 if (lit64_size)
1987 jit->lit64_start = ALIGN(jit->lit64_start, 8);
1988 jit->size = jit->lit64_start + lit64_size;
1989 jit->size_prg = jit->prg;
1990
1991 if (WARN_ON_ONCE(fp->aux->extable &&
1992 jit->excnt != fp->aux->num_exentries))
1993 /* Verifier bug - too many entries. */
1994 return -1;
1995
1996 return 0;
1997}
1998
1999bool bpf_jit_needs_zext(void)
2000{
2001 return true;
2002}
2003
2004struct s390_jit_data {
2005 struct bpf_binary_header *header;
2006 struct bpf_jit ctx;
2007 int pass;
2008};
2009
2010static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
2011 struct bpf_prog *fp)
2012{
2013 struct bpf_binary_header *header;
2014 u32 extable_size;
2015 u32 code_size;
2016
2017 /* We need two entries per insn. */
2018 fp->aux->num_exentries *= 2;
2019
2020 code_size = roundup(jit->size,
2021 __alignof__(struct exception_table_entry));
2022 extable_size = fp->aux->num_exentries *
2023 sizeof(struct exception_table_entry);
2024 header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
2025 8, jit_fill_hole);
2026 if (!header)
2027 return NULL;
2028 fp->aux->extable = (struct exception_table_entry *)
2029 (jit->prg_buf + code_size);
2030 return header;
2031}
2032
2033/*
2034 * Compile eBPF program "fp"
2035 */
2036struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
2037{
2038 u32 stack_depth = round_up(fp->aux->stack_depth, 8);
2039 struct bpf_prog *tmp, *orig_fp = fp;
2040 struct bpf_binary_header *header;
2041 struct s390_jit_data *jit_data;
2042 bool tmp_blinded = false;
2043 bool extra_pass = false;
2044 struct bpf_jit jit;
2045 int pass;
2046
2047 if (!fp->jit_requested)
2048 return orig_fp;
2049
2050 tmp = bpf_jit_blind_constants(fp);
2051 /*
2052 * If blinding was requested and we failed during blinding,
2053 * we must fall back to the interpreter.
2054 */
2055 if (IS_ERR(tmp))
2056 return orig_fp;
2057 if (tmp != fp) {
2058 tmp_blinded = true;
2059 fp = tmp;
2060 }
2061
2062 jit_data = fp->aux->jit_data;
2063 if (!jit_data) {
2064 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2065 if (!jit_data) {
2066 fp = orig_fp;
2067 goto out;
2068 }
2069 fp->aux->jit_data = jit_data;
2070 }
2071 if (jit_data->ctx.addrs) {
2072 jit = jit_data->ctx;
2073 header = jit_data->header;
2074 extra_pass = true;
2075 pass = jit_data->pass + 1;
2076 goto skip_init_ctx;
2077 }
2078
2079 memset(&jit, 0, sizeof(jit));
2080 jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
2081 if (jit.addrs == NULL) {
2082 fp = orig_fp;
2083 goto free_addrs;
2084 }
2085 /*
2086 * Three initial passes:
2087 * - 1/2: Determine clobbered registers
2088 * - 3: Calculate program size and addrs array
2089 */
2090 for (pass = 1; pass <= 3; pass++) {
2091 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2092 fp = orig_fp;
2093 goto free_addrs;
2094 }
2095 }
2096 /*
2097 * Final pass: Allocate and generate program
2098 */
2099 header = bpf_jit_alloc(&jit, fp);
2100 if (!header) {
2101 fp = orig_fp;
2102 goto free_addrs;
2103 }
2104skip_init_ctx:
2105 if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
2106 bpf_jit_binary_free(header);
2107 fp = orig_fp;
2108 goto free_addrs;
2109 }
2110 if (bpf_jit_enable > 1) {
2111 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
2112 print_fn_code(jit.prg_buf, jit.size_prg);
2113 }
2114 if (!fp->is_func || extra_pass) {
2115 bpf_jit_binary_lock_ro(header);
2116 } else {
2117 jit_data->header = header;
2118 jit_data->ctx = jit;
2119 jit_data->pass = pass;
2120 }
2121 fp->bpf_func = (void *) jit.prg_buf;
2122 fp->jited = 1;
2123 fp->jited_len = jit.size;
2124
2125 if (!fp->is_func || extra_pass) {
2126 bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
2127free_addrs:
2128 kvfree(jit.addrs);
2129 kfree(jit_data);
2130 fp->aux->jit_data = NULL;
2131 }
2132out:
2133 if (tmp_blinded)
2134 bpf_jit_prog_release_other(fp, fp == orig_fp ?
2135 tmp : orig_fp);
2136 return fp;
2137}
2138
2139bool bpf_jit_supports_kfunc_call(void)
2140{
2141 return true;
2142}
2143
2144bool bpf_jit_supports_far_kfunc_call(void)
2145{
2146 return true;
2147}
2148
2149int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2150 void *old_addr, void *new_addr)
2151{
2152 struct bpf_plt expected_plt, current_plt, new_plt, *plt;
2153 struct {
2154 u16 opc;
2155 s32 disp;
2156 } __packed insn;
2157 char *ret;
2158 int err;
2159
2160 /* Verify the branch to be patched. */
2161 err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
2162 if (err < 0)
2163 return err;
2164 if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
2165 return -EINVAL;
2166
2167 if (t == BPF_MOD_JUMP &&
2168 insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
2169 /*
2170 * The branch already points to the destination,
2171 * there is no PLT.
2172 */
2173 } else {
2174 /* Verify the PLT. */
2175 plt = ip + (insn.disp << 1);
2176 err = copy_from_kernel_nofault(¤t_plt, plt,
2177 sizeof(current_plt));
2178 if (err < 0)
2179 return err;
2180 ret = (char *)ip + 6;
2181 bpf_jit_plt(&expected_plt, ret, old_addr);
2182 if (memcmp(¤t_plt, &expected_plt, sizeof(current_plt)))
2183 return -EINVAL;
2184 /* Adjust the call address. */
2185 bpf_jit_plt(&new_plt, ret, new_addr);
2186 s390_kernel_write(&plt->target, &new_plt.target,
2187 sizeof(void *));
2188 }
2189
2190 /* Adjust the mask of the branch. */
2191 insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
2192 s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
2193
2194 /* Make the new code visible to the other CPUs. */
2195 text_poke_sync_lock();
2196
2197 return 0;
2198}
2199
2200struct bpf_tramp_jit {
2201 struct bpf_jit common;
2202 int orig_stack_args_off;/* Offset of arguments placed on stack by the
2203 * func_addr's original caller
2204 */
2205 int stack_size; /* Trampoline stack size */
2206 int backchain_off; /* Offset of backchain */
2207 int stack_args_off; /* Offset of stack arguments for calling
2208 * func_addr, has to be at the top
2209 */
2210 int reg_args_off; /* Offset of register arguments for calling
2211 * func_addr
2212 */
2213 int ip_off; /* For bpf_get_func_ip(), has to be at
2214 * (ctx - 16)
2215 */
2216 int arg_cnt_off; /* For bpf_get_func_arg_cnt(), has to be at
2217 * (ctx - 8)
2218 */
2219 int bpf_args_off; /* Offset of BPF_PROG context, which consists
2220 * of BPF arguments followed by return value
2221 */
2222 int retval_off; /* Offset of return value (see above) */
2223 int r7_r8_off; /* Offset of saved %r7 and %r8, which are used
2224 * for __bpf_prog_enter() return value and
2225 * func_addr respectively
2226 */
2227 int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
2228 int tccnt_off; /* Offset of saved tailcall counter */
2229 int r14_off; /* Offset of saved %r14, has to be at the
2230 * bottom */
2231 int do_fexit; /* do_fexit: label */
2232};
2233
2234static void load_imm64(struct bpf_jit *jit, int dst_reg, u64 val)
2235{
2236 /* llihf %dst_reg,val_hi */
2237 EMIT6_IMM(0xc00e0000, dst_reg, (val >> 32));
2238 /* oilf %rdst_reg,val_lo */
2239 EMIT6_IMM(0xc00d0000, dst_reg, val);
2240}
2241
2242static int invoke_bpf_prog(struct bpf_tramp_jit *tjit,
2243 const struct btf_func_model *m,
2244 struct bpf_tramp_link *tlink, bool save_ret)
2245{
2246 struct bpf_jit *jit = &tjit->common;
2247 int cookie_off = tjit->run_ctx_off +
2248 offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
2249 struct bpf_prog *p = tlink->link.prog;
2250 int patch;
2251
2252 /*
2253 * run_ctx.cookie = tlink->cookie;
2254 */
2255
2256 /* %r0 = tlink->cookie */
2257 load_imm64(jit, REG_W0, tlink->cookie);
2258 /* stg %r0,cookie_off(%r15) */
2259 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, REG_0, REG_15, cookie_off);
2260
2261 /*
2262 * if ((start = __bpf_prog_enter(p, &run_ctx)) == 0)
2263 * goto skip;
2264 */
2265
2266 /* %r1 = __bpf_prog_enter */
2267 load_imm64(jit, REG_1, (u64)bpf_trampoline_enter(p));
2268 /* %r2 = p */
2269 load_imm64(jit, REG_2, (u64)p);
2270 /* la %r3,run_ctx_off(%r15) */
2271 EMIT4_DISP(0x41000000, REG_3, REG_15, tjit->run_ctx_off);
2272 /* %r1() */
2273 call_r1(jit);
2274 /* ltgr %r7,%r2 */
2275 EMIT4(0xb9020000, REG_7, REG_2);
2276 /* brcl 8,skip */
2277 patch = jit->prg;
2278 EMIT6_PCREL_RILC(0xc0040000, 8, 0);
2279
2280 /*
2281 * retval = bpf_func(args, p->insnsi);
2282 */
2283
2284 /* %r1 = p->bpf_func */
2285 load_imm64(jit, REG_1, (u64)p->bpf_func);
2286 /* la %r2,bpf_args_off(%r15) */
2287 EMIT4_DISP(0x41000000, REG_2, REG_15, tjit->bpf_args_off);
2288 /* %r3 = p->insnsi */
2289 if (!p->jited)
2290 load_imm64(jit, REG_3, (u64)p->insnsi);
2291 /* %r1() */
2292 call_r1(jit);
2293 /* stg %r2,retval_off(%r15) */
2294 if (save_ret) {
2295 if (sign_extend(jit, REG_2, m->ret_size, m->ret_flags))
2296 return -1;
2297 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2298 tjit->retval_off);
2299 }
2300
2301 /* skip: */
2302 if (jit->prg_buf)
2303 *(u32 *)&jit->prg_buf[patch + 2] = (jit->prg - patch) >> 1;
2304
2305 /*
2306 * __bpf_prog_exit(p, start, &run_ctx);
2307 */
2308
2309 /* %r1 = __bpf_prog_exit */
2310 load_imm64(jit, REG_1, (u64)bpf_trampoline_exit(p));
2311 /* %r2 = p */
2312 load_imm64(jit, REG_2, (u64)p);
2313 /* lgr %r3,%r7 */
2314 EMIT4(0xb9040000, REG_3, REG_7);
2315 /* la %r4,run_ctx_off(%r15) */
2316 EMIT4_DISP(0x41000000, REG_4, REG_15, tjit->run_ctx_off);
2317 /* %r1() */
2318 call_r1(jit);
2319
2320 return 0;
2321}
2322
2323static int alloc_stack(struct bpf_tramp_jit *tjit, size_t size)
2324{
2325 int stack_offset = tjit->stack_size;
2326
2327 tjit->stack_size += size;
2328 return stack_offset;
2329}
2330
2331/* ABI uses %r2 - %r6 for parameter passing. */
2332#define MAX_NR_REG_ARGS 5
2333
2334/* The "L" field of the "mvc" instruction is 8 bits. */
2335#define MAX_MVC_SIZE 256
2336#define MAX_NR_STACK_ARGS (MAX_MVC_SIZE / sizeof(u64))
2337
2338/* -mfentry generates a 6-byte nop on s390x. */
2339#define S390X_PATCH_SIZE 6
2340
2341static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
2342 struct bpf_tramp_jit *tjit,
2343 const struct btf_func_model *m,
2344 u32 flags,
2345 struct bpf_tramp_links *tlinks,
2346 void *func_addr)
2347{
2348 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2349 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2350 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2351 int nr_bpf_args, nr_reg_args, nr_stack_args;
2352 struct bpf_jit *jit = &tjit->common;
2353 int arg, bpf_arg_off;
2354 int i, j;
2355
2356 /* Support as many stack arguments as "mvc" instruction can handle. */
2357 nr_reg_args = min_t(int, m->nr_args, MAX_NR_REG_ARGS);
2358 nr_stack_args = m->nr_args - nr_reg_args;
2359 if (nr_stack_args > MAX_NR_STACK_ARGS)
2360 return -ENOTSUPP;
2361
2362 /* Return to %r14, since func_addr and %r0 are not available. */
2363 if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) ||
2364 (flags & BPF_TRAMP_F_INDIRECT))
2365 flags |= BPF_TRAMP_F_SKIP_FRAME;
2366
2367 /*
2368 * Compute how many arguments we need to pass to BPF programs.
2369 * BPF ABI mirrors that of x86_64: arguments that are 16 bytes or
2370 * smaller are packed into 1 or 2 registers; larger arguments are
2371 * passed via pointers.
2372 * In s390x ABI, arguments that are 8 bytes or smaller are packed into
2373 * a register; larger arguments are passed via pointers.
2374 * We need to deal with this difference.
2375 */
2376 nr_bpf_args = 0;
2377 for (i = 0; i < m->nr_args; i++) {
2378 if (m->arg_size[i] <= 8)
2379 nr_bpf_args += 1;
2380 else if (m->arg_size[i] <= 16)
2381 nr_bpf_args += 2;
2382 else
2383 return -ENOTSUPP;
2384 }
2385
2386 /*
2387 * Calculate the stack layout.
2388 */
2389
2390 /*
2391 * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
2392 * ABI requires, put our backchain at the end of the allocated memory.
2393 */
2394 tjit->stack_size = STACK_FRAME_OVERHEAD;
2395 tjit->backchain_off = tjit->stack_size - sizeof(u64);
2396 tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
2397 tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
2398 tjit->ip_off = alloc_stack(tjit, sizeof(u64));
2399 tjit->arg_cnt_off = alloc_stack(tjit, sizeof(u64));
2400 tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
2401 tjit->retval_off = alloc_stack(tjit, sizeof(u64));
2402 tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
2403 tjit->run_ctx_off = alloc_stack(tjit,
2404 sizeof(struct bpf_tramp_run_ctx));
2405 tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
2406 tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
2407 /*
2408 * In accordance with the s390x ABI, the caller has allocated
2409 * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
2410 * backchain, and the rest we can use.
2411 */
2412 tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
2413 tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
2414
2415 /* lgr %r1,%r15 */
2416 EMIT4(0xb9040000, REG_1, REG_15);
2417 /* aghi %r15,-stack_size */
2418 EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
2419 /* stg %r1,backchain_off(%r15) */
2420 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
2421 tjit->backchain_off);
2422 /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
2423 _EMIT6(0xd203f000 | tjit->tccnt_off,
2424 0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
2425 /* stmg %r2,%rN,fwd_reg_args_off(%r15) */
2426 if (nr_reg_args)
2427 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
2428 REG_2 + (nr_reg_args - 1), REG_15,
2429 tjit->reg_args_off);
2430 for (i = 0, j = 0; i < m->nr_args; i++) {
2431 if (i < MAX_NR_REG_ARGS)
2432 arg = REG_2 + i;
2433 else
2434 arg = tjit->orig_stack_args_off +
2435 (i - MAX_NR_REG_ARGS) * sizeof(u64);
2436 bpf_arg_off = tjit->bpf_args_off + j * sizeof(u64);
2437 if (m->arg_size[i] <= 8) {
2438 if (i < MAX_NR_REG_ARGS)
2439 /* stg %arg,bpf_arg_off(%r15) */
2440 EMIT6_DISP_LH(0xe3000000, 0x0024, arg,
2441 REG_0, REG_15, bpf_arg_off);
2442 else
2443 /* mvc bpf_arg_off(8,%r15),arg(%r15) */
2444 _EMIT6(0xd207f000 | bpf_arg_off,
2445 0xf000 | arg);
2446 j += 1;
2447 } else {
2448 if (i < MAX_NR_REG_ARGS) {
2449 /* mvc bpf_arg_off(16,%r15),0(%arg) */
2450 _EMIT6(0xd20ff000 | bpf_arg_off,
2451 reg2hex[arg] << 12);
2452 } else {
2453 /* lg %r1,arg(%r15) */
2454 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_0,
2455 REG_15, arg);
2456 /* mvc bpf_arg_off(16,%r15),0(%r1) */
2457 _EMIT6(0xd20ff000 | bpf_arg_off, 0x1000);
2458 }
2459 j += 2;
2460 }
2461 }
2462 /* stmg %r7,%r8,r7_r8_off(%r15) */
2463 EMIT6_DISP_LH(0xeb000000, 0x0024, REG_7, REG_8, REG_15,
2464 tjit->r7_r8_off);
2465 /* stg %r14,r14_off(%r15) */
2466 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_14, REG_0, REG_15, tjit->r14_off);
2467
2468 if (flags & BPF_TRAMP_F_ORIG_STACK) {
2469 /*
2470 * The ftrace trampoline puts the return address (which is the
2471 * address of the original function + S390X_PATCH_SIZE) into
2472 * %r0; see ftrace_shared_hotpatch_trampoline_br and
2473 * ftrace_init_nop() for details.
2474 */
2475
2476 /* lgr %r8,%r0 */
2477 EMIT4(0xb9040000, REG_8, REG_0);
2478 } else {
2479 /* %r8 = func_addr + S390X_PATCH_SIZE */
2480 load_imm64(jit, REG_8, (u64)func_addr + S390X_PATCH_SIZE);
2481 }
2482
2483 /*
2484 * ip = func_addr;
2485 * arg_cnt = m->nr_args;
2486 */
2487
2488 if (flags & BPF_TRAMP_F_IP_ARG) {
2489 /* %r0 = func_addr */
2490 load_imm64(jit, REG_0, (u64)func_addr);
2491 /* stg %r0,ip_off(%r15) */
2492 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2493 tjit->ip_off);
2494 }
2495 /* lghi %r0,nr_bpf_args */
2496 EMIT4_IMM(0xa7090000, REG_0, nr_bpf_args);
2497 /* stg %r0,arg_cnt_off(%r15) */
2498 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_0, REG_0, REG_15,
2499 tjit->arg_cnt_off);
2500
2501 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2502 /*
2503 * __bpf_tramp_enter(im);
2504 */
2505
2506 /* %r1 = __bpf_tramp_enter */
2507 load_imm64(jit, REG_1, (u64)__bpf_tramp_enter);
2508 /* %r2 = im */
2509 load_imm64(jit, REG_2, (u64)im);
2510 /* %r1() */
2511 call_r1(jit);
2512 }
2513
2514 for (i = 0; i < fentry->nr_links; i++)
2515 if (invoke_bpf_prog(tjit, m, fentry->links[i],
2516 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2517 return -EINVAL;
2518
2519 if (fmod_ret->nr_links) {
2520 /*
2521 * retval = 0;
2522 */
2523
2524 /* xc retval_off(8,%r15),retval_off(%r15) */
2525 _EMIT6(0xd707f000 | tjit->retval_off,
2526 0xf000 | tjit->retval_off);
2527
2528 for (i = 0; i < fmod_ret->nr_links; i++) {
2529 if (invoke_bpf_prog(tjit, m, fmod_ret->links[i], true))
2530 return -EINVAL;
2531
2532 /*
2533 * if (retval)
2534 * goto do_fexit;
2535 */
2536
2537 /* ltg %r0,retval_off(%r15) */
2538 EMIT6_DISP_LH(0xe3000000, 0x0002, REG_0, REG_0, REG_15,
2539 tjit->retval_off);
2540 /* brcl 7,do_fexit */
2541 EMIT6_PCREL_RILC(0xc0040000, 7, tjit->do_fexit);
2542 }
2543 }
2544
2545 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2546 /*
2547 * retval = func_addr(args);
2548 */
2549
2550 /* lmg %r2,%rN,reg_args_off(%r15) */
2551 if (nr_reg_args)
2552 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2553 REG_2 + (nr_reg_args - 1), REG_15,
2554 tjit->reg_args_off);
2555 /* mvc stack_args_off(N,%r15),orig_stack_args_off(%r15) */
2556 if (nr_stack_args)
2557 _EMIT6(0xd200f000 |
2558 (nr_stack_args * sizeof(u64) - 1) << 16 |
2559 tjit->stack_args_off,
2560 0xf000 | tjit->orig_stack_args_off);
2561 /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2562 _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
2563 /* lgr %r1,%r8 */
2564 EMIT4(0xb9040000, REG_1, REG_8);
2565 /* %r1() */
2566 call_r1(jit);
2567 /* stg %r2,retval_off(%r15) */
2568 EMIT6_DISP_LH(0xe3000000, 0x0024, REG_2, REG_0, REG_15,
2569 tjit->retval_off);
2570
2571 im->ip_after_call = jit->prg_buf + jit->prg;
2572
2573 /*
2574 * The following nop will be patched by bpf_tramp_image_put().
2575 */
2576
2577 /* brcl 0,im->ip_epilogue */
2578 EMIT6_PCREL_RILC(0xc0040000, 0, (u64)im->ip_epilogue);
2579 }
2580
2581 /* do_fexit: */
2582 tjit->do_fexit = jit->prg;
2583 for (i = 0; i < fexit->nr_links; i++)
2584 if (invoke_bpf_prog(tjit, m, fexit->links[i], false))
2585 return -EINVAL;
2586
2587 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2588 im->ip_epilogue = jit->prg_buf + jit->prg;
2589
2590 /*
2591 * __bpf_tramp_exit(im);
2592 */
2593
2594 /* %r1 = __bpf_tramp_exit */
2595 load_imm64(jit, REG_1, (u64)__bpf_tramp_exit);
2596 /* %r2 = im */
2597 load_imm64(jit, REG_2, (u64)im);
2598 /* %r1() */
2599 call_r1(jit);
2600 }
2601
2602 /* lmg %r2,%rN,reg_args_off(%r15) */
2603 if ((flags & BPF_TRAMP_F_RESTORE_REGS) && nr_reg_args)
2604 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_2,
2605 REG_2 + (nr_reg_args - 1), REG_15,
2606 tjit->reg_args_off);
2607 /* lgr %r1,%r8 */
2608 if (!(flags & BPF_TRAMP_F_SKIP_FRAME))
2609 EMIT4(0xb9040000, REG_1, REG_8);
2610 /* lmg %r7,%r8,r7_r8_off(%r15) */
2611 EMIT6_DISP_LH(0xeb000000, 0x0004, REG_7, REG_8, REG_15,
2612 tjit->r7_r8_off);
2613 /* lg %r14,r14_off(%r15) */
2614 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_14, REG_0, REG_15, tjit->r14_off);
2615 /* lg %r2,retval_off(%r15) */
2616 if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
2617 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
2618 tjit->retval_off);
2619 /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
2620 _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
2621 0xf000 | tjit->tccnt_off);
2622 /* aghi %r15,stack_size */
2623 EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
2624 /* Emit an expoline for the following indirect jump. */
2625 if (nospec_uses_trampoline())
2626 emit_expoline(jit);
2627 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2628 /* br %r14 */
2629 _EMIT2(0x07fe);
2630 else
2631 /* br %r1 */
2632 _EMIT2(0x07f1);
2633
2634 emit_r1_thunk(jit);
2635
2636 return 0;
2637}
2638
2639int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
2640 struct bpf_tramp_links *tlinks, void *orig_call)
2641{
2642 struct bpf_tramp_image im;
2643 struct bpf_tramp_jit tjit;
2644 int ret;
2645
2646 memset(&tjit, 0, sizeof(tjit));
2647
2648 ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
2649 tlinks, orig_call);
2650
2651 return ret < 0 ? ret : tjit.common.prg;
2652}
2653
2654int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
2655 void *image_end, const struct btf_func_model *m,
2656 u32 flags, struct bpf_tramp_links *tlinks,
2657 void *func_addr)
2658{
2659 struct bpf_tramp_jit tjit;
2660 int ret;
2661
2662 /* Compute offsets, check whether the code fits. */
2663 memset(&tjit, 0, sizeof(tjit));
2664 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2665 tlinks, func_addr);
2666
2667 if (ret < 0)
2668 return ret;
2669 if (tjit.common.prg > (char *)image_end - (char *)image)
2670 /*
2671 * Use the same error code as for exceeding
2672 * BPF_MAX_TRAMP_LINKS.
2673 */
2674 return -E2BIG;
2675
2676 tjit.common.prg = 0;
2677 tjit.common.prg_buf = image;
2678 ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
2679 tlinks, func_addr);
2680
2681 return ret < 0 ? ret : tjit.common.prg;
2682}
2683
2684bool bpf_jit_supports_subprog_tailcalls(void)
2685{
2686 return true;
2687}