Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * bpf_jit.h: BPF JIT compiler for PPC
4 *
5 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
6 * 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 */
8#ifndef _BPF_JIT_H
9#define _BPF_JIT_H
10
11#ifndef __ASSEMBLY__
12
13#include <asm/types.h>
14#include <asm/ppc-opcode.h>
15
16#ifdef PPC64_ELF_ABI_v1
17#define FUNCTION_DESCR_SIZE 24
18#else
19#define FUNCTION_DESCR_SIZE 0
20#endif
21
22#define PLANT_INSTR(d, idx, instr) \
23 do { if (d) { (d)[idx] = instr; } idx++; } while (0)
24#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
25
26/* Long jump; (unconditional 'branch') */
27#define PPC_JMP(dest) \
28 do { \
29 long offset = (long)(dest) - (ctx->idx * 4); \
30 if (!is_offset_in_branch_range(offset)) { \
31 pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
32 return -ERANGE; \
33 } \
34 EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
35 } while (0)
36
37/* blr; (unconditional 'branch' with link) to absolute address */
38#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
39 (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
40/* "cond" here covers BO:BI fields. */
41#define PPC_BCC_SHORT(cond, dest) \
42 do { \
43 long offset = (long)(dest) - (ctx->idx * 4); \
44 if (!is_offset_in_cond_branch_range(offset)) { \
45 pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
46 return -ERANGE; \
47 } \
48 EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
49 } while (0)
50
51/* Sign-extended 32-bit immediate load */
52#define PPC_LI32(d, i) do { \
53 if ((int)(uintptr_t)(i) >= -32768 && \
54 (int)(uintptr_t)(i) < 32768) \
55 EMIT(PPC_RAW_LI(d, i)); \
56 else { \
57 EMIT(PPC_RAW_LIS(d, IMM_H(i))); \
58 if (IMM_L(i)) \
59 EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
60 } } while(0)
61
62#ifdef CONFIG_PPC32
63#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
64#endif
65
66#define PPC_LI64(d, i) do { \
67 if ((long)(i) >= -2147483648 && \
68 (long)(i) < 2147483648) \
69 PPC_LI32(d, i); \
70 else { \
71 if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
72 EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \
73 0xffff)); \
74 else { \
75 EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
76 if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
77 EMIT(PPC_RAW_ORI(d, d, \
78 ((uintptr_t)(i) >> 32) & 0xffff)); \
79 } \
80 EMIT(PPC_RAW_SLDI(d, d, 32)); \
81 if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
82 EMIT(PPC_RAW_ORIS(d, d, \
83 ((uintptr_t)(i) >> 16) & 0xffff)); \
84 if ((uintptr_t)(i) & 0x000000000000ffffULL) \
85 EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \
86 0xffff)); \
87 } } while (0)
88
89#ifdef CONFIG_PPC64
90#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
91#else
92#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
93#endif
94
95/*
96 * The fly in the ointment of code size changing from pass to pass is
97 * avoided by padding the short branch case with a NOP. If code size differs
98 * with different branch reaches we will have the issue of code moving from
99 * one pass to the next and will need a few passes to converge on a stable
100 * state.
101 */
102#define PPC_BCC(cond, dest) do { \
103 if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
104 PPC_BCC_SHORT(cond, dest); \
105 EMIT(PPC_RAW_NOP()); \
106 } else { \
107 /* Flip the 'T or F' bit to invert comparison */ \
108 PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
109 PPC_JMP(dest); \
110 } } while(0)
111
112/* To create a branch condition, select a bit of cr0... */
113#define CR0_LT 0
114#define CR0_GT 1
115#define CR0_EQ 2
116/* ...and modify BO[3] */
117#define COND_CMP_TRUE 0x100
118#define COND_CMP_FALSE 0x000
119/* Together, they make all required comparisons: */
120#define COND_GT (CR0_GT | COND_CMP_TRUE)
121#define COND_GE (CR0_LT | COND_CMP_FALSE)
122#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
123#define COND_NE (CR0_EQ | COND_CMP_FALSE)
124#define COND_LT (CR0_LT | COND_CMP_TRUE)
125#define COND_LE (CR0_GT | COND_CMP_FALSE)
126
127#define SEEN_FUNC 0x20000000 /* might call external helpers */
128#define SEEN_STACK 0x40000000 /* uses BPF stack */
129#define SEEN_TAILCALL 0x80000000 /* uses tail calls */
130
131#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
132#define SEEN_NVREG_MASK 0x0003ffff /* Non volatile registers r14-r31 */
133
134#ifdef CONFIG_PPC64
135extern const int b2p[MAX_BPF_JIT_REG + 2];
136#else
137extern const int b2p[MAX_BPF_JIT_REG + 1];
138#endif
139
140struct codegen_context {
141 /*
142 * This is used to track register usage as well
143 * as calls to external helpers.
144 * - register usage is tracked with corresponding
145 * bits (r3-r31)
146 * - rest of the bits can be used to track other
147 * things -- for now, we use bits 0 to 2
148 * encoded in SEEN_* macros above
149 */
150 unsigned int seen;
151 unsigned int idx;
152 unsigned int stack_size;
153 int b2p[ARRAY_SIZE(b2p)];
154};
155
156static inline void bpf_flush_icache(void *start, void *end)
157{
158 smp_wmb(); /* smp write barrier */
159 flush_icache_range((unsigned long)start, (unsigned long)end);
160}
161
162static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
163{
164 return ctx->seen & (1 << (31 - i));
165}
166
167static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
168{
169 ctx->seen |= 1 << (31 - i);
170}
171
172static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
173{
174 ctx->seen &= ~(1 << (31 - i));
175}
176
177void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
178int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
179 u32 *addrs, bool extra_pass);
180void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
181void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
182void bpf_jit_realloc_regs(struct codegen_context *ctx);
183
184#endif
185
186#endif
1/* bpf_jit.h: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 */
10#ifndef _BPF_JIT_H
11#define _BPF_JIT_H
12
13#define BPF_PPC_STACK_LOCALS 32
14#define BPF_PPC_STACK_BASIC (48+64)
15#define BPF_PPC_STACK_SAVE (18*8)
16#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
17 BPF_PPC_STACK_SAVE)
18#define BPF_PPC_SLOWPATH_FRAME (48+64)
19
20/*
21 * Generated code register usage:
22 *
23 * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
24 *
25 * skb r3 (Entry parameter)
26 * A register r4
27 * X register r5
28 * addr param r6
29 * r7-r10 scratch
30 * skb->data r14
31 * skb headlen r15 (skb->len - skb->data_len)
32 * m[0] r16
33 * m[...] ...
34 * m[15] r31
35 */
36#define r_skb 3
37#define r_ret 3
38#define r_A 4
39#define r_X 5
40#define r_addr 6
41#define r_scratch1 7
42#define r_D 14
43#define r_HL 15
44#define r_M 16
45
46#ifndef __ASSEMBLY__
47
48/*
49 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
50 */
51extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
52
53#define FUNCTION_DESCR_SIZE 24
54
55/*
56 * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
57 * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
58 * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
59 */
60#define IMM_H(i) ((uintptr_t)(i)>>16)
61#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
62 (((uintptr_t)(i) & 0x8000) >> 15))
63#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
64
65#define PLANT_INSTR(d, idx, instr) \
66 do { if (d) { (d)[idx] = instr; } idx++; } while (0)
67#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
68
69#define PPC_NOP() EMIT(PPC_INST_NOP)
70#define PPC_BLR() EMIT(PPC_INST_BLR)
71#define PPC_BLRL() EMIT(PPC_INST_BLRL)
72#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
73#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
74 __PPC_RA(a) | IMM_L(i))
75#define PPC_MR(d, a) PPC_OR(d, a, a)
76#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
77#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
78 __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
79#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
80#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
81 __PPC_RA(base) | ((i) & 0xfffc))
82
83#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
84 __PPC_RA(base) | IMM_L(i))
85#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
86 __PPC_RA(base) | IMM_L(i))
87#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
88 __PPC_RA(base) | IMM_L(i))
89/* Convenience helpers for the above with 'far' offsets: */
90#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
91 else { PPC_ADDIS(r, base, IMM_HA(i)); \
92 PPC_LD(r, r, IMM_L(i)); } } while(0)
93
94#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
95 else { PPC_ADDIS(r, base, IMM_HA(i)); \
96 PPC_LWZ(r, r, IMM_L(i)); } } while(0)
97
98#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
99 else { PPC_ADDIS(r, base, IMM_HA(i)); \
100 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
101
102#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
103#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
104#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
105#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
106
107#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
108 __PPC_RB(a) | __PPC_RA(b))
109#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
110 __PPC_RA(a) | __PPC_RB(b))
111#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
112 __PPC_RA(a) | __PPC_RB(b))
113#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
114 __PPC_RA(a) | __PPC_RB(b))
115#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
116 __PPC_RA(a) | IMM_L(i))
117#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
118 __PPC_RA(a) | __PPC_RB(b))
119#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
120 __PPC_RS(a) | __PPC_RB(b))
121#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
122 __PPC_RS(a) | IMM_L(i))
123#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
124 __PPC_RS(a) | __PPC_RB(b))
125#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
126 __PPC_RS(a) | __PPC_RB(b))
127#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
128 __PPC_RS(a) | IMM_L(i))
129#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
130 __PPC_RS(a) | IMM_L(i))
131#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
132 __PPC_RS(a) | __PPC_RB(s))
133#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
134 __PPC_RS(a) | __PPC_RB(s))
135/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
136#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
137 __PPC_RS(a) | __PPC_SH(i) | \
138 __PPC_MB(0) | __PPC_ME(31-(i)))
139/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
140#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
141 __PPC_RS(a) | __PPC_SH(32-(i)) | \
142 __PPC_MB(i) | __PPC_ME(31))
143/* sldi = rldicr Rx, Ry, n, 63-n */
144#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
145 __PPC_RS(a) | __PPC_SH(i) | \
146 __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
147#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
148
149/* Long jump; (unconditional 'branch') */
150#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
151 (((dest) - (ctx->idx * 4)) & 0x03fffffc))
152/* "cond" here covers BO:BI fields. */
153#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
154 (((cond) & 0x3ff) << 16) | \
155 (((dest) - (ctx->idx * 4)) & \
156 0xfffc))
157#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
158 if ((u32)(uintptr_t)(i) >= 32768) { \
159 PPC_ADDIS(d, d, IMM_HA(i)); \
160 } } while(0)
161#define PPC_LI64(d, i) do { \
162 if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
163 PPC_LI32(d, i); \
164 else { \
165 PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
166 if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
167 PPC_ORI(d, d, \
168 ((uintptr_t)(i) >> 32) & 0xffff); \
169 PPC_SLDI(d, d, 32); \
170 if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
171 PPC_ORIS(d, d, \
172 ((uintptr_t)(i) >> 16) & 0xffff); \
173 if ((uintptr_t)(i) & 0x000000000000ffffULL) \
174 PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
175 } } while (0);
176
177static inline bool is_nearbranch(int offset)
178{
179 return (offset < 32768) && (offset >= -32768);
180}
181
182/*
183 * The fly in the ointment of code size changing from pass to pass is
184 * avoided by padding the short branch case with a NOP. If code size differs
185 * with different branch reaches we will have the issue of code moving from
186 * one pass to the next and will need a few passes to converge on a stable
187 * state.
188 */
189#define PPC_BCC(cond, dest) do { \
190 if (is_nearbranch((dest) - (ctx->idx * 4))) { \
191 PPC_BCC_SHORT(cond, dest); \
192 PPC_NOP(); \
193 } else { \
194 /* Flip the 'T or F' bit to invert comparison */ \
195 PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
196 PPC_JMP(dest); \
197 } } while(0)
198
199/* To create a branch condition, select a bit of cr0... */
200#define CR0_LT 0
201#define CR0_GT 1
202#define CR0_EQ 2
203/* ...and modify BO[3] */
204#define COND_CMP_TRUE 0x100
205#define COND_CMP_FALSE 0x000
206/* Together, they make all required comparisons: */
207#define COND_GT (CR0_GT | COND_CMP_TRUE)
208#define COND_GE (CR0_LT | COND_CMP_FALSE)
209#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
210#define COND_NE (CR0_EQ | COND_CMP_FALSE)
211#define COND_LT (CR0_LT | COND_CMP_TRUE)
212
213#define SEEN_DATAREF 0x10000 /* might call external helpers */
214#define SEEN_XREG 0x20000 /* X reg is used */
215#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
216 * storage */
217#define SEEN_MEM_MSK 0x0ffff
218
219struct codegen_context {
220 unsigned int seen;
221 unsigned int idx;
222 int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
223};
224
225#endif
226
227#endif