Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* bpf_jit_comp.c: BPF JIT compiler
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
8 */
9#include <linux/moduleloader.h>
10#include <asm/cacheflush.h>
11#include <asm/asm-compat.h>
12#include <linux/netdevice.h>
13#include <linux/filter.h>
14#include <linux/if_vlan.h>
15
16#include "bpf_jit32.h"
17
18static inline void bpf_flush_icache(void *start, void *end)
19{
20 smp_wmb();
21 flush_icache_range((unsigned long)start, (unsigned long)end);
22}
23
24static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
25 struct codegen_context *ctx)
26{
27 int i;
28 const struct sock_filter *filter = fp->insns;
29
30 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
31 /* Make stackframe */
32 if (ctx->seen & SEEN_DATAREF) {
33 /* If we call any helpers (for loads), save LR */
34 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
35 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
36
37 /* Back up non-volatile regs. */
38 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
39 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
40 }
41 if (ctx->seen & SEEN_MEM) {
42 /*
43 * Conditionally save regs r15-r31 as some will be used
44 * for M[] data.
45 */
46 for (i = r_M; i < (r_M+16); i++) {
47 if (ctx->seen & (1 << (i-r_M)))
48 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
49 }
50 }
51 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
52 }
53
54 if (ctx->seen & SEEN_DATAREF) {
55 /*
56 * If this filter needs to access skb data,
57 * prepare r_D and r_HL:
58 * r_HL = skb->len - skb->data_len
59 * r_D = skb->data
60 */
61 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
62 data_len));
63 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
64 PPC_SUB(r_HL, r_HL, r_scratch1);
65 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
66 }
67
68 if (ctx->seen & SEEN_XREG) {
69 /*
70 * TODO: Could also detect whether first instr. sets X and
71 * avoid this (as below, with A).
72 */
73 PPC_LI(r_X, 0);
74 }
75
76 /* make sure we dont leak kernel information to user */
77 if (bpf_needs_clear_a(&filter[0]))
78 PPC_LI(r_A, 0);
79}
80
81static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
82{
83 int i;
84
85 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
86 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
87 if (ctx->seen & SEEN_DATAREF) {
88 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
89 PPC_MTLR(0);
90 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
91 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
92 }
93 if (ctx->seen & SEEN_MEM) {
94 /* Restore any saved non-vol registers */
95 for (i = r_M; i < (r_M+16); i++) {
96 if (ctx->seen & (1 << (i-r_M)))
97 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
98 }
99 }
100 }
101 /* The RETs have left a return value in R3. */
102
103 PPC_BLR();
104}
105
106#define CHOOSE_LOAD_FUNC(K, func) \
107 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
108
109/* Assemble the body code between the prologue & epilogue. */
110static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
111 struct codegen_context *ctx,
112 unsigned int *addrs)
113{
114 const struct sock_filter *filter = fp->insns;
115 int flen = fp->len;
116 u8 *func;
117 unsigned int true_cond;
118 int i;
119
120 /* Start of epilogue code */
121 unsigned int exit_addr = addrs[flen];
122
123 for (i = 0; i < flen; i++) {
124 unsigned int K = filter[i].k;
125 u16 code = bpf_anc_helper(&filter[i]);
126
127 /*
128 * addrs[] maps a BPF bytecode address into a real offset from
129 * the start of the body code.
130 */
131 addrs[i] = ctx->idx * 4;
132
133 switch (code) {
134 /*** ALU ops ***/
135 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
136 ctx->seen |= SEEN_XREG;
137 PPC_ADD(r_A, r_A, r_X);
138 break;
139 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
140 if (!K)
141 break;
142 PPC_ADDI(r_A, r_A, IMM_L(K));
143 if (K >= 32768)
144 PPC_ADDIS(r_A, r_A, IMM_HA(K));
145 break;
146 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
147 ctx->seen |= SEEN_XREG;
148 PPC_SUB(r_A, r_A, r_X);
149 break;
150 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
151 if (!K)
152 break;
153 PPC_ADDI(r_A, r_A, IMM_L(-K));
154 if (K >= 32768)
155 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
156 break;
157 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
158 ctx->seen |= SEEN_XREG;
159 PPC_MULW(r_A, r_A, r_X);
160 break;
161 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
162 if (K < 32768)
163 PPC_MULI(r_A, r_A, K);
164 else {
165 PPC_LI32(r_scratch1, K);
166 PPC_MULW(r_A, r_A, r_scratch1);
167 }
168 break;
169 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
170 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
171 ctx->seen |= SEEN_XREG;
172 PPC_CMPWI(r_X, 0);
173 if (ctx->pc_ret0 != -1) {
174 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
175 } else {
176 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
177 PPC_LI(r_ret, 0);
178 PPC_JMP(exit_addr);
179 }
180 if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
181 PPC_DIVWU(r_scratch1, r_A, r_X);
182 PPC_MULW(r_scratch1, r_X, r_scratch1);
183 PPC_SUB(r_A, r_A, r_scratch1);
184 } else {
185 PPC_DIVWU(r_A, r_A, r_X);
186 }
187 break;
188 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189 PPC_LI32(r_scratch2, K);
190 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
191 PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
192 PPC_SUB(r_A, r_A, r_scratch1);
193 break;
194 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
195 if (K == 1)
196 break;
197 PPC_LI32(r_scratch1, K);
198 PPC_DIVWU(r_A, r_A, r_scratch1);
199 break;
200 case BPF_ALU | BPF_AND | BPF_X:
201 ctx->seen |= SEEN_XREG;
202 PPC_AND(r_A, r_A, r_X);
203 break;
204 case BPF_ALU | BPF_AND | BPF_K:
205 if (!IMM_H(K))
206 PPC_ANDI(r_A, r_A, K);
207 else {
208 PPC_LI32(r_scratch1, K);
209 PPC_AND(r_A, r_A, r_scratch1);
210 }
211 break;
212 case BPF_ALU | BPF_OR | BPF_X:
213 ctx->seen |= SEEN_XREG;
214 PPC_OR(r_A, r_A, r_X);
215 break;
216 case BPF_ALU | BPF_OR | BPF_K:
217 if (IMM_L(K))
218 PPC_ORI(r_A, r_A, IMM_L(K));
219 if (K >= 65536)
220 PPC_ORIS(r_A, r_A, IMM_H(K));
221 break;
222 case BPF_ANC | SKF_AD_ALU_XOR_X:
223 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
224 ctx->seen |= SEEN_XREG;
225 PPC_XOR(r_A, r_A, r_X);
226 break;
227 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
228 if (IMM_L(K))
229 PPC_XORI(r_A, r_A, IMM_L(K));
230 if (K >= 65536)
231 PPC_XORIS(r_A, r_A, IMM_H(K));
232 break;
233 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
234 ctx->seen |= SEEN_XREG;
235 PPC_SLW(r_A, r_A, r_X);
236 break;
237 case BPF_ALU | BPF_LSH | BPF_K:
238 if (K == 0)
239 break;
240 else
241 PPC_SLWI(r_A, r_A, K);
242 break;
243 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
244 ctx->seen |= SEEN_XREG;
245 PPC_SRW(r_A, r_A, r_X);
246 break;
247 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
248 if (K == 0)
249 break;
250 else
251 PPC_SRWI(r_A, r_A, K);
252 break;
253 case BPF_ALU | BPF_NEG:
254 PPC_NEG(r_A, r_A);
255 break;
256 case BPF_RET | BPF_K:
257 PPC_LI32(r_ret, K);
258 if (!K) {
259 if (ctx->pc_ret0 == -1)
260 ctx->pc_ret0 = i;
261 }
262 /*
263 * If this isn't the very last instruction, branch to
264 * the epilogue if we've stuff to clean up. Otherwise,
265 * if there's nothing to tidy, just return. If we /are/
266 * the last instruction, we're about to fall through to
267 * the epilogue to return.
268 */
269 if (i != flen - 1) {
270 /*
271 * Note: 'seen' is properly valid only on pass
272 * #2. Both parts of this conditional are the
273 * same instruction size though, meaning the
274 * first pass will still correctly determine the
275 * code size/addresses.
276 */
277 if (ctx->seen)
278 PPC_JMP(exit_addr);
279 else
280 PPC_BLR();
281 }
282 break;
283 case BPF_RET | BPF_A:
284 PPC_MR(r_ret, r_A);
285 if (i != flen - 1) {
286 if (ctx->seen)
287 PPC_JMP(exit_addr);
288 else
289 PPC_BLR();
290 }
291 break;
292 case BPF_MISC | BPF_TAX: /* X = A */
293 PPC_MR(r_X, r_A);
294 break;
295 case BPF_MISC | BPF_TXA: /* A = X */
296 ctx->seen |= SEEN_XREG;
297 PPC_MR(r_A, r_X);
298 break;
299
300 /*** Constant loads/M[] access ***/
301 case BPF_LD | BPF_IMM: /* A = K */
302 PPC_LI32(r_A, K);
303 break;
304 case BPF_LDX | BPF_IMM: /* X = K */
305 PPC_LI32(r_X, K);
306 break;
307 case BPF_LD | BPF_MEM: /* A = mem[K] */
308 PPC_MR(r_A, r_M + (K & 0xf));
309 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
310 break;
311 case BPF_LDX | BPF_MEM: /* X = mem[K] */
312 PPC_MR(r_X, r_M + (K & 0xf));
313 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
314 break;
315 case BPF_ST: /* mem[K] = A */
316 PPC_MR(r_M + (K & 0xf), r_A);
317 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
318 break;
319 case BPF_STX: /* mem[K] = X */
320 PPC_MR(r_M + (K & 0xf), r_X);
321 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
322 break;
323 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
324 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
325 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
326 break;
327 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
328 PPC_LWZ_OFFS(r_A, r_skb, K);
329 break;
330 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
332 break;
333
334 /*** Ancillary info loads ***/
335 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
336 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
337 protocol) != 2);
338 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
339 protocol));
340 break;
341 case BPF_ANC | SKF_AD_IFINDEX:
342 case BPF_ANC | SKF_AD_HATYPE:
343 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
344 ifindex) != 4);
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
346 type) != 2);
347 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
348 dev));
349 PPC_CMPDI(r_scratch1, 0);
350 if (ctx->pc_ret0 != -1) {
351 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
352 } else {
353 /* Exit, returning 0; first pass hits here. */
354 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
355 PPC_LI(r_ret, 0);
356 PPC_JMP(exit_addr);
357 }
358 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
359 PPC_LWZ_OFFS(r_A, r_scratch1,
360 offsetof(struct net_device, ifindex));
361 } else {
362 PPC_LHZ_OFFS(r_A, r_scratch1,
363 offsetof(struct net_device, type));
364 }
365
366 break;
367 case BPF_ANC | SKF_AD_MARK:
368 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
369 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
370 mark));
371 break;
372 case BPF_ANC | SKF_AD_RXHASH:
373 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
374 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375 hash));
376 break;
377 case BPF_ANC | SKF_AD_VLAN_TAG:
378 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
379
380 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
381 vlan_tci));
382 break;
383 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
384 PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
385 if (PKT_VLAN_PRESENT_BIT)
386 PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
387 if (PKT_VLAN_PRESENT_BIT < 7)
388 PPC_ANDI(r_A, r_A, 1);
389 break;
390 case BPF_ANC | SKF_AD_QUEUE:
391 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
392 queue_mapping) != 2);
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 queue_mapping));
395 break;
396 case BPF_ANC | SKF_AD_PKTTYPE:
397 PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
398 PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
399 PPC_SRWI(r_A, r_A, 5);
400 break;
401 case BPF_ANC | SKF_AD_CPU:
402 PPC_BPF_LOAD_CPU(r_A);
403 break;
404 /*** Absolute loads from packet header/data ***/
405 case BPF_LD | BPF_W | BPF_ABS:
406 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
407 goto common_load;
408 case BPF_LD | BPF_H | BPF_ABS:
409 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
410 goto common_load;
411 case BPF_LD | BPF_B | BPF_ABS:
412 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
413 common_load:
414 /* Load from [K]. */
415 ctx->seen |= SEEN_DATAREF;
416 PPC_FUNC_ADDR(r_scratch1, func);
417 PPC_MTLR(r_scratch1);
418 PPC_LI32(r_addr, K);
419 PPC_BLRL();
420 /*
421 * Helper returns 'lt' condition on error, and an
422 * appropriate return value in r3
423 */
424 PPC_BCC(COND_LT, exit_addr);
425 break;
426
427 /*** Indirect loads from packet header/data ***/
428 case BPF_LD | BPF_W | BPF_IND:
429 func = sk_load_word;
430 goto common_load_ind;
431 case BPF_LD | BPF_H | BPF_IND:
432 func = sk_load_half;
433 goto common_load_ind;
434 case BPF_LD | BPF_B | BPF_IND:
435 func = sk_load_byte;
436 common_load_ind:
437 /*
438 * Load from [X + K]. Negative offsets are tested for
439 * in the helper functions.
440 */
441 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
442 PPC_FUNC_ADDR(r_scratch1, func);
443 PPC_MTLR(r_scratch1);
444 PPC_ADDI(r_addr, r_X, IMM_L(K));
445 if (K >= 32768)
446 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
447 PPC_BLRL();
448 /* If error, cr0.LT set */
449 PPC_BCC(COND_LT, exit_addr);
450 break;
451
452 case BPF_LDX | BPF_B | BPF_MSH:
453 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
454 goto common_load;
455 break;
456
457 /*** Jump and branches ***/
458 case BPF_JMP | BPF_JA:
459 if (K != 0)
460 PPC_JMP(addrs[i + 1 + K]);
461 break;
462
463 case BPF_JMP | BPF_JGT | BPF_K:
464 case BPF_JMP | BPF_JGT | BPF_X:
465 true_cond = COND_GT;
466 goto cond_branch;
467 case BPF_JMP | BPF_JGE | BPF_K:
468 case BPF_JMP | BPF_JGE | BPF_X:
469 true_cond = COND_GE;
470 goto cond_branch;
471 case BPF_JMP | BPF_JEQ | BPF_K:
472 case BPF_JMP | BPF_JEQ | BPF_X:
473 true_cond = COND_EQ;
474 goto cond_branch;
475 case BPF_JMP | BPF_JSET | BPF_K:
476 case BPF_JMP | BPF_JSET | BPF_X:
477 true_cond = COND_NE;
478 /* Fall through */
479 cond_branch:
480 /* same targets, can avoid doing the test :) */
481 if (filter[i].jt == filter[i].jf) {
482 if (filter[i].jt > 0)
483 PPC_JMP(addrs[i + 1 + filter[i].jt]);
484 break;
485 }
486
487 switch (code) {
488 case BPF_JMP | BPF_JGT | BPF_X:
489 case BPF_JMP | BPF_JGE | BPF_X:
490 case BPF_JMP | BPF_JEQ | BPF_X:
491 ctx->seen |= SEEN_XREG;
492 PPC_CMPLW(r_A, r_X);
493 break;
494 case BPF_JMP | BPF_JSET | BPF_X:
495 ctx->seen |= SEEN_XREG;
496 PPC_AND_DOT(r_scratch1, r_A, r_X);
497 break;
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JGT | BPF_K:
500 case BPF_JMP | BPF_JGE | BPF_K:
501 if (K < 32768)
502 PPC_CMPLWI(r_A, K);
503 else {
504 PPC_LI32(r_scratch1, K);
505 PPC_CMPLW(r_A, r_scratch1);
506 }
507 break;
508 case BPF_JMP | BPF_JSET | BPF_K:
509 if (K < 32768)
510 /* PPC_ANDI is /only/ dot-form */
511 PPC_ANDI(r_scratch1, r_A, K);
512 else {
513 PPC_LI32(r_scratch1, K);
514 PPC_AND_DOT(r_scratch1, r_A,
515 r_scratch1);
516 }
517 break;
518 }
519 /* Sometimes branches are constructed "backward", with
520 * the false path being the branch and true path being
521 * a fallthrough to the next instruction.
522 */
523 if (filter[i].jt == 0)
524 /* Swap the sense of the branch */
525 PPC_BCC(true_cond ^ COND_CMP_TRUE,
526 addrs[i + 1 + filter[i].jf]);
527 else {
528 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
529 if (filter[i].jf != 0)
530 PPC_JMP(addrs[i + 1 + filter[i].jf]);
531 }
532 break;
533 default:
534 /* The filter contains something cruel & unusual.
535 * We don't handle it, but also there shouldn't be
536 * anything missing from our list.
537 */
538 if (printk_ratelimit())
539 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
540 filter[i].code, i);
541 return -ENOTSUPP;
542 }
543
544 }
545 /* Set end-of-body-code address for exit. */
546 addrs[i] = ctx->idx * 4;
547
548 return 0;
549}
550
551void bpf_jit_compile(struct bpf_prog *fp)
552{
553 unsigned int proglen;
554 unsigned int alloclen;
555 u32 *image = NULL;
556 u32 *code_base;
557 unsigned int *addrs;
558 struct codegen_context cgctx;
559 int pass;
560 int flen = fp->len;
561
562 if (!bpf_jit_enable)
563 return;
564
565 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
566 if (addrs == NULL)
567 return;
568
569 /*
570 * There are multiple assembly passes as the generated code will change
571 * size as it settles down, figuring out the max branch offsets/exit
572 * paths required.
573 *
574 * The range of standard conditional branches is +/- 32Kbytes. Since
575 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
576 * finish with 8 bytes/instruction. Not feasible, so long jumps are
577 * used, distinct from short branches.
578 *
579 * Current:
580 *
581 * For now, both branch types assemble to 2 words (short branches padded
582 * with a NOP); this is less efficient, but assembly will always complete
583 * after exactly 3 passes:
584 *
585 * First pass: No code buffer; Program is "faux-generated" -- no code
586 * emitted but maximum size of output determined (and addrs[] filled
587 * in). Also, we note whether we use M[], whether we use skb data, etc.
588 * All generation choices assumed to be 'worst-case', e.g. branches all
589 * far (2 instructions), return path code reduction not available, etc.
590 *
591 * Second pass: Code buffer allocated with size determined previously.
592 * Prologue generated to support features we have seen used. Exit paths
593 * determined and addrs[] is filled in again, as code may be slightly
594 * smaller as a result.
595 *
596 * Third pass: Code generated 'for real', and branch destinations
597 * determined from now-accurate addrs[] map.
598 *
599 * Ideal:
600 *
601 * If we optimise this, near branches will be shorter. On the
602 * first assembly pass, we should err on the side of caution and
603 * generate the biggest code. On subsequent passes, branches will be
604 * generated short or long and code size will reduce. With smaller
605 * code, more branches may fall into the short category, and code will
606 * reduce more.
607 *
608 * Finally, if we see one pass generate code the same size as the
609 * previous pass we have converged and should now generate code for
610 * real. Allocating at the end will also save the memory that would
611 * otherwise be wasted by the (small) current code shrinkage.
612 * Preferably, we should do a small number of passes (e.g. 5) and if we
613 * haven't converged by then, get impatient and force code to generate
614 * as-is, even if the odd branch would be left long. The chances of a
615 * long jump are tiny with all but the most enormous of BPF filter
616 * inputs, so we should usually converge on the third pass.
617 */
618
619 cgctx.idx = 0;
620 cgctx.seen = 0;
621 cgctx.pc_ret0 = -1;
622 /* Scouting faux-generate pass 0 */
623 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
624 /* We hit something illegal or unsupported. */
625 goto out;
626
627 /*
628 * Pretend to build prologue, given the features we've seen. This will
629 * update ctgtx.idx as it pretends to output instructions, then we can
630 * calculate total size from idx.
631 */
632 bpf_jit_build_prologue(fp, 0, &cgctx);
633 bpf_jit_build_epilogue(0, &cgctx);
634
635 proglen = cgctx.idx * 4;
636 alloclen = proglen + FUNCTION_DESCR_SIZE;
637 image = module_alloc(alloclen);
638 if (!image)
639 goto out;
640
641 code_base = image + (FUNCTION_DESCR_SIZE/4);
642
643 /* Code generation passes 1-2 */
644 for (pass = 1; pass < 3; pass++) {
645 /* Now build the prologue, body code & epilogue for real. */
646 cgctx.idx = 0;
647 bpf_jit_build_prologue(fp, code_base, &cgctx);
648 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
649 bpf_jit_build_epilogue(code_base, &cgctx);
650
651 if (bpf_jit_enable > 1)
652 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
653 proglen - (cgctx.idx * 4), cgctx.seen);
654 }
655
656 if (bpf_jit_enable > 1)
657 /* Note that we output the base address of the code_base
658 * rather than image, since opcodes are in code_base.
659 */
660 bpf_jit_dump(flen, proglen, pass, code_base);
661
662 bpf_flush_icache(code_base, code_base + (proglen/4));
663
664#ifdef CONFIG_PPC64
665 /* Function descriptor nastiness: Address + TOC */
666 ((u64 *)image)[0] = (u64)code_base;
667 ((u64 *)image)[1] = local_paca->kernel_toc;
668#endif
669
670 fp->bpf_func = (void *)image;
671 fp->jited = 1;
672
673out:
674 kfree(addrs);
675 return;
676}
677
678void bpf_jit_free(struct bpf_prog *fp)
679{
680 if (fp->jited)
681 module_memfree(fp->bpf_func);
682
683 bpf_prog_unlock_free(fp);
684}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * eBPF JIT compiler
4 *
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6 * IBM Corporation
7 *
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 */
10#include <linux/moduleloader.h>
11#include <asm/cacheflush.h>
12#include <asm/asm-compat.h>
13#include <linux/netdevice.h>
14#include <linux/filter.h>
15#include <linux/if_vlan.h>
16#include <linux/kernel.h>
17#include <linux/memory.h>
18#include <linux/bpf.h>
19
20#include <asm/kprobes.h>
21#include <asm/text-patching.h>
22
23#include "bpf_jit.h"
24
25/* These offsets are from bpf prog end and stay the same across progs */
26static int bpf_jit_ool_stub, bpf_jit_long_branch_stub;
27
28static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
29{
30 memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
31}
32
33void dummy_tramp(void);
34
35asm (
36" .pushsection .text, \"ax\", @progbits ;"
37" .global dummy_tramp ;"
38" .type dummy_tramp, @function ;"
39"dummy_tramp: ;"
40#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
41" blr ;"
42#else
43/* LR is always in r11, so we don't need a 'mflr r11' here */
44" mtctr 11 ;"
45" mtlr 0 ;"
46" bctr ;"
47#endif
48" .size dummy_tramp, .-dummy_tramp ;"
49" .popsection ;"
50);
51
52void bpf_jit_build_fentry_stubs(u32 *image, struct codegen_context *ctx)
53{
54 int ool_stub_idx, long_branch_stub_idx;
55
56 /*
57 * Out-of-line stub:
58 * mflr r0
59 * [b|bl] tramp
60 * mtlr r0 // only with CONFIG_PPC_FTRACE_OUT_OF_LINE
61 * b bpf_func + 4
62 */
63 ool_stub_idx = ctx->idx;
64 EMIT(PPC_RAW_MFLR(_R0));
65 EMIT(PPC_RAW_NOP());
66 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
67 EMIT(PPC_RAW_MTLR(_R0));
68 WARN_ON_ONCE(!is_offset_in_branch_range(4 - (long)ctx->idx * 4));
69 EMIT(PPC_RAW_BRANCH(4 - (long)ctx->idx * 4));
70
71 /*
72 * Long branch stub:
73 * .long <dummy_tramp_addr>
74 * mflr r11
75 * bcl 20,31,$+4
76 * mflr r12
77 * ld r12, -8-SZL(r12)
78 * mtctr r12
79 * mtlr r11 // needed to retain ftrace ABI
80 * bctr
81 */
82 if (image)
83 *((unsigned long *)&image[ctx->idx]) = (unsigned long)dummy_tramp;
84 ctx->idx += SZL / 4;
85 long_branch_stub_idx = ctx->idx;
86 EMIT(PPC_RAW_MFLR(_R11));
87 EMIT(PPC_RAW_BCL4());
88 EMIT(PPC_RAW_MFLR(_R12));
89 EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL));
90 EMIT(PPC_RAW_MTCTR(_R12));
91 EMIT(PPC_RAW_MTLR(_R11));
92 EMIT(PPC_RAW_BCTR());
93
94 if (!bpf_jit_ool_stub) {
95 bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4;
96 bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 4;
97 }
98}
99
100int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
101{
102 if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
103 PPC_JMP(exit_addr);
104 } else if (ctx->alt_exit_addr) {
105 if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
106 return -1;
107 PPC_JMP(ctx->alt_exit_addr);
108 } else {
109 ctx->alt_exit_addr = ctx->idx * 4;
110 bpf_jit_build_epilogue(image, ctx);
111 }
112
113 return 0;
114}
115
116struct powerpc_jit_data {
117 /* address of rw header */
118 struct bpf_binary_header *hdr;
119 /* address of ro final header */
120 struct bpf_binary_header *fhdr;
121 u32 *addrs;
122 u8 *fimage;
123 u32 proglen;
124 struct codegen_context ctx;
125};
126
127bool bpf_jit_needs_zext(void)
128{
129 return true;
130}
131
132struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
133{
134 u32 proglen;
135 u32 alloclen;
136 u8 *image = NULL;
137 u32 *code_base;
138 u32 *addrs;
139 struct powerpc_jit_data *jit_data;
140 struct codegen_context cgctx;
141 int pass;
142 int flen;
143 struct bpf_binary_header *fhdr = NULL;
144 struct bpf_binary_header *hdr = NULL;
145 struct bpf_prog *org_fp = fp;
146 struct bpf_prog *tmp_fp;
147 bool bpf_blinded = false;
148 bool extra_pass = false;
149 u8 *fimage = NULL;
150 u32 *fcode_base;
151 u32 extable_len;
152 u32 fixup_len;
153
154 if (!fp->jit_requested)
155 return org_fp;
156
157 tmp_fp = bpf_jit_blind_constants(org_fp);
158 if (IS_ERR(tmp_fp))
159 return org_fp;
160
161 if (tmp_fp != org_fp) {
162 bpf_blinded = true;
163 fp = tmp_fp;
164 }
165
166 jit_data = fp->aux->jit_data;
167 if (!jit_data) {
168 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
169 if (!jit_data) {
170 fp = org_fp;
171 goto out;
172 }
173 fp->aux->jit_data = jit_data;
174 }
175
176 flen = fp->len;
177 addrs = jit_data->addrs;
178 if (addrs) {
179 cgctx = jit_data->ctx;
180 /*
181 * JIT compiled to a writable location (image/code_base) first.
182 * It is then moved to the readonly final location (fimage/fcode_base)
183 * using instruction patching.
184 */
185 fimage = jit_data->fimage;
186 fhdr = jit_data->fhdr;
187 proglen = jit_data->proglen;
188 hdr = jit_data->hdr;
189 image = (void *)hdr + ((void *)fimage - (void *)fhdr);
190 extra_pass = true;
191 /* During extra pass, ensure index is reset before repopulating extable entries */
192 cgctx.exentry_idx = 0;
193 goto skip_init_ctx;
194 }
195
196 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
197 if (addrs == NULL) {
198 fp = org_fp;
199 goto out_addrs;
200 }
201
202 memset(&cgctx, 0, sizeof(struct codegen_context));
203 bpf_jit_init_reg_mapping(&cgctx);
204
205 /* Make sure that the stack is quadword aligned. */
206 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
207
208 /* Scouting faux-generate pass 0 */
209 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
210 /* We hit something illegal or unsupported. */
211 fp = org_fp;
212 goto out_addrs;
213 }
214
215 /*
216 * If we have seen a tail call, we need a second pass.
217 * This is because bpf_jit_emit_common_epilogue() is called
218 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
219 * We also need a second pass if we ended up with too large
220 * a program so as to ensure BPF_EXIT branches are in range.
221 */
222 if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
223 cgctx.idx = 0;
224 if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
225 fp = org_fp;
226 goto out_addrs;
227 }
228 }
229
230 bpf_jit_realloc_regs(&cgctx);
231 /*
232 * Pretend to build prologue, given the features we've seen. This will
233 * update ctgtx.idx as it pretends to output instructions, then we can
234 * calculate total size from idx.
235 */
236 bpf_jit_build_prologue(NULL, &cgctx);
237 addrs[fp->len] = cgctx.idx * 4;
238 bpf_jit_build_epilogue(NULL, &cgctx);
239
240 fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
241 extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
242
243 proglen = cgctx.idx * 4;
244 alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
245
246 fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
247 bpf_jit_fill_ill_insns);
248 if (!fhdr) {
249 fp = org_fp;
250 goto out_addrs;
251 }
252
253 if (extable_len)
254 fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
255
256skip_init_ctx:
257 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
258 fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
259
260 /* Code generation passes 1-2 */
261 for (pass = 1; pass < 3; pass++) {
262 /* Now build the prologue, body code & epilogue for real. */
263 cgctx.idx = 0;
264 cgctx.alt_exit_addr = 0;
265 bpf_jit_build_prologue(code_base, &cgctx);
266 if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
267 extra_pass)) {
268 bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
269 bpf_jit_binary_pack_free(fhdr, hdr);
270 fp = org_fp;
271 goto out_addrs;
272 }
273 bpf_jit_build_epilogue(code_base, &cgctx);
274
275 if (bpf_jit_enable > 1)
276 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
277 proglen - (cgctx.idx * 4), cgctx.seen);
278 }
279
280 if (bpf_jit_enable > 1)
281 /*
282 * Note that we output the base address of the code_base
283 * rather than image, since opcodes are in code_base.
284 */
285 bpf_jit_dump(flen, proglen, pass, code_base);
286
287#ifdef CONFIG_PPC64_ELF_ABI_V1
288 /* Function descriptor nastiness: Address + TOC */
289 ((u64 *)image)[0] = (u64)fcode_base;
290 ((u64 *)image)[1] = local_paca->kernel_toc;
291#endif
292
293 fp->bpf_func = (void *)fimage;
294 fp->jited = 1;
295 fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE;
296
297 if (!fp->is_func || extra_pass) {
298 if (bpf_jit_binary_pack_finalize(fhdr, hdr)) {
299 fp = org_fp;
300 goto out_addrs;
301 }
302 bpf_prog_fill_jited_linfo(fp, addrs);
303out_addrs:
304 kfree(addrs);
305 kfree(jit_data);
306 fp->aux->jit_data = NULL;
307 } else {
308 jit_data->addrs = addrs;
309 jit_data->ctx = cgctx;
310 jit_data->proglen = proglen;
311 jit_data->fimage = fimage;
312 jit_data->fhdr = fhdr;
313 jit_data->hdr = hdr;
314 }
315
316out:
317 if (bpf_blinded)
318 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
319
320 return fp;
321}
322
323/*
324 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
325 * this function, as this only applies to BPF_PROBE_MEM, for now.
326 */
327int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
328 struct codegen_context *ctx, int insn_idx, int jmp_off,
329 int dst_reg)
330{
331 off_t offset;
332 unsigned long pc;
333 struct exception_table_entry *ex, *ex_entry;
334 u32 *fixup;
335
336 /* Populate extable entries only in the last pass */
337 if (pass != 2)
338 return 0;
339
340 if (!fp->aux->extable ||
341 WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
342 return -EINVAL;
343
344 /*
345 * Program is first written to image before copying to the
346 * final location (fimage). Accordingly, update in the image first.
347 * As all offsets used are relative, copying as is to the
348 * final location should be alright.
349 */
350 pc = (unsigned long)&image[insn_idx];
351 ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
352
353 fixup = (void *)ex -
354 (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
355 (ctx->exentry_idx * BPF_FIXUP_LEN * 4);
356
357 fixup[0] = PPC_RAW_LI(dst_reg, 0);
358 if (IS_ENABLED(CONFIG_PPC32))
359 fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
360
361 fixup[BPF_FIXUP_LEN - 1] =
362 PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
363
364 ex_entry = &ex[ctx->exentry_idx];
365
366 offset = pc - (long)&ex_entry->insn;
367 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
368 return -ERANGE;
369 ex_entry->insn = offset;
370
371 offset = (long)fixup - (long)&ex_entry->fixup;
372 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
373 return -ERANGE;
374 ex_entry->fixup = offset;
375
376 ctx->exentry_idx++;
377 return 0;
378}
379
380void *bpf_arch_text_copy(void *dst, void *src, size_t len)
381{
382 int err;
383
384 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
385 return ERR_PTR(-EINVAL);
386
387 mutex_lock(&text_mutex);
388 err = patch_instructions(dst, src, len, false);
389 mutex_unlock(&text_mutex);
390
391 return err ? ERR_PTR(err) : dst;
392}
393
394int bpf_arch_text_invalidate(void *dst, size_t len)
395{
396 u32 insn = BREAKPOINT_INSTRUCTION;
397 int ret;
398
399 if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
400 return -EINVAL;
401
402 mutex_lock(&text_mutex);
403 ret = patch_instructions(dst, &insn, len, true);
404 mutex_unlock(&text_mutex);
405
406 return ret;
407}
408
409void bpf_jit_free(struct bpf_prog *fp)
410{
411 if (fp->jited) {
412 struct powerpc_jit_data *jit_data = fp->aux->jit_data;
413 struct bpf_binary_header *hdr;
414
415 /*
416 * If we fail the final pass of JIT (from jit_subprogs),
417 * the program may not be finalized yet. Call finalize here
418 * before freeing it.
419 */
420 if (jit_data) {
421 bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr);
422 kvfree(jit_data->addrs);
423 kfree(jit_data);
424 }
425 hdr = bpf_jit_binary_pack_hdr(fp);
426 bpf_jit_binary_pack_free(hdr, NULL);
427 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
428 }
429
430 bpf_prog_unlock_free(fp);
431}
432
433bool bpf_jit_supports_kfunc_call(void)
434{
435 return true;
436}
437
438bool bpf_jit_supports_far_kfunc_call(void)
439{
440 return IS_ENABLED(CONFIG_PPC64);
441}
442
443void *arch_alloc_bpf_trampoline(unsigned int size)
444{
445 return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns);
446}
447
448void arch_free_bpf_trampoline(void *image, unsigned int size)
449{
450 bpf_prog_pack_free(image, size);
451}
452
453int arch_protect_bpf_trampoline(void *image, unsigned int size)
454{
455 return 0;
456}
457
458static int invoke_bpf_prog(u32 *image, u32 *ro_image, struct codegen_context *ctx,
459 struct bpf_tramp_link *l, int regs_off, int retval_off,
460 int run_ctx_off, bool save_ret)
461{
462 struct bpf_prog *p = l->link.prog;
463 ppc_inst_t branch_insn;
464 u32 jmp_idx;
465 int ret = 0;
466
467 /* Save cookie */
468 if (IS_ENABLED(CONFIG_PPC64)) {
469 PPC_LI64(_R3, l->cookie);
470 EMIT(PPC_RAW_STD(_R3, _R1, run_ctx_off + offsetof(struct bpf_tramp_run_ctx,
471 bpf_cookie)));
472 } else {
473 PPC_LI32(_R3, l->cookie >> 32);
474 PPC_LI32(_R4, l->cookie);
475 EMIT(PPC_RAW_STW(_R3, _R1,
476 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie)));
477 EMIT(PPC_RAW_STW(_R4, _R1,
478 run_ctx_off + offsetof(struct bpf_tramp_run_ctx, bpf_cookie) + 4));
479 }
480
481 /* __bpf_prog_enter(p, &bpf_tramp_run_ctx) */
482 PPC_LI_ADDR(_R3, p);
483 EMIT(PPC_RAW_MR(_R25, _R3));
484 EMIT(PPC_RAW_ADDI(_R4, _R1, run_ctx_off));
485 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
486 (unsigned long)bpf_trampoline_enter(p));
487 if (ret)
488 return ret;
489
490 /* Remember prog start time returned by __bpf_prog_enter */
491 EMIT(PPC_RAW_MR(_R26, _R3));
492
493 /*
494 * if (__bpf_prog_enter(p) == 0)
495 * goto skip_exec_of_prog;
496 *
497 * Emit a nop to be later patched with conditional branch, once offset is known
498 */
499 EMIT(PPC_RAW_CMPLI(_R3, 0));
500 jmp_idx = ctx->idx;
501 EMIT(PPC_RAW_NOP());
502
503 /* p->bpf_func(ctx) */
504 EMIT(PPC_RAW_ADDI(_R3, _R1, regs_off));
505 if (!p->jited)
506 PPC_LI_ADDR(_R4, (unsigned long)p->insnsi);
507 if (!create_branch(&branch_insn, (u32 *)&ro_image[ctx->idx], (unsigned long)p->bpf_func,
508 BRANCH_SET_LINK)) {
509 if (image)
510 image[ctx->idx] = ppc_inst_val(branch_insn);
511 ctx->idx++;
512 } else {
513 EMIT(PPC_RAW_LL(_R12, _R25, offsetof(struct bpf_prog, bpf_func)));
514 EMIT(PPC_RAW_MTCTR(_R12));
515 EMIT(PPC_RAW_BCTRL());
516 }
517
518 if (save_ret)
519 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
520
521 /* Fix up branch */
522 if (image) {
523 if (create_cond_branch(&branch_insn, &image[jmp_idx],
524 (unsigned long)&image[ctx->idx], COND_EQ << 16))
525 return -EINVAL;
526 image[jmp_idx] = ppc_inst_val(branch_insn);
527 }
528
529 /* __bpf_prog_exit(p, start_time, &bpf_tramp_run_ctx) */
530 EMIT(PPC_RAW_MR(_R3, _R25));
531 EMIT(PPC_RAW_MR(_R4, _R26));
532 EMIT(PPC_RAW_ADDI(_R5, _R1, run_ctx_off));
533 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
534 (unsigned long)bpf_trampoline_exit(p));
535
536 return ret;
537}
538
539static int invoke_bpf_mod_ret(u32 *image, u32 *ro_image, struct codegen_context *ctx,
540 struct bpf_tramp_links *tl, int regs_off, int retval_off,
541 int run_ctx_off, u32 *branches)
542{
543 int i;
544
545 /*
546 * The first fmod_ret program will receive a garbage return value.
547 * Set this to 0 to avoid confusing the program.
548 */
549 EMIT(PPC_RAW_LI(_R3, 0));
550 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
551 for (i = 0; i < tl->nr_links; i++) {
552 if (invoke_bpf_prog(image, ro_image, ctx, tl->links[i], regs_off, retval_off,
553 run_ctx_off, true))
554 return -EINVAL;
555
556 /*
557 * mod_ret prog stored return value after prog ctx. Emit:
558 * if (*(u64 *)(ret_val) != 0)
559 * goto do_fexit;
560 */
561 EMIT(PPC_RAW_LL(_R3, _R1, retval_off));
562 EMIT(PPC_RAW_CMPLI(_R3, 0));
563
564 /*
565 * Save the location of the branch and generate a nop, which is
566 * replaced with a conditional jump once do_fexit (i.e. the
567 * start of the fexit invocation) is finalized.
568 */
569 branches[i] = ctx->idx;
570 EMIT(PPC_RAW_NOP());
571 }
572
573 return 0;
574}
575
576static void bpf_trampoline_setup_tail_call_cnt(u32 *image, struct codegen_context *ctx,
577 int func_frame_offset, int r4_off)
578{
579 if (IS_ENABLED(CONFIG_PPC64)) {
580 /* See bpf_jit_stack_tailcallcnt() */
581 int tailcallcnt_offset = 6 * 8;
582
583 EMIT(PPC_RAW_LL(_R3, _R1, func_frame_offset - tailcallcnt_offset));
584 EMIT(PPC_RAW_STL(_R3, _R1, -tailcallcnt_offset));
585 } else {
586 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */
587 EMIT(PPC_RAW_LL(_R4, _R1, r4_off));
588 }
589}
590
591static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx,
592 int func_frame_offset, int r4_off)
593{
594 if (IS_ENABLED(CONFIG_PPC64)) {
595 /* See bpf_jit_stack_tailcallcnt() */
596 int tailcallcnt_offset = 6 * 8;
597
598 EMIT(PPC_RAW_LL(_R3, _R1, -tailcallcnt_offset));
599 EMIT(PPC_RAW_STL(_R3, _R1, func_frame_offset - tailcallcnt_offset));
600 } else {
601 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */
602 EMIT(PPC_RAW_STL(_R4, _R1, r4_off));
603 }
604}
605
606static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, int func_frame_offset,
607 int nr_regs, int regs_off)
608{
609 int param_save_area_offset;
610
611 param_save_area_offset = func_frame_offset; /* the two frames we alloted */
612 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */
613
614 for (int i = 0; i < nr_regs; i++) {
615 if (i < 8) {
616 EMIT(PPC_RAW_STL(_R3 + i, _R1, regs_off + i * SZL));
617 } else {
618 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL));
619 EMIT(PPC_RAW_STL(_R3, _R1, regs_off + i * SZL));
620 }
621 }
622}
623
624/* Used when restoring just the register parameters when returning back */
625static void bpf_trampoline_restore_args_regs(u32 *image, struct codegen_context *ctx,
626 int nr_regs, int regs_off)
627{
628 for (int i = 0; i < nr_regs && i < 8; i++)
629 EMIT(PPC_RAW_LL(_R3 + i, _R1, regs_off + i * SZL));
630}
631
632/* Used when we call into the traced function. Replicate parameter save area */
633static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx,
634 int func_frame_offset, int nr_regs, int regs_off)
635{
636 int param_save_area_offset;
637
638 param_save_area_offset = func_frame_offset; /* the two frames we alloted */
639 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */
640
641 for (int i = 8; i < nr_regs; i++) {
642 EMIT(PPC_RAW_LL(_R3, _R1, param_save_area_offset + i * SZL));
643 EMIT(PPC_RAW_STL(_R3, _R1, STACK_FRAME_MIN_SIZE + i * SZL));
644 }
645 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off);
646}
647
648static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image,
649 void *rw_image_end, void *ro_image,
650 const struct btf_func_model *m, u32 flags,
651 struct bpf_tramp_links *tlinks,
652 void *func_addr)
653{
654 int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0;
655 int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset;
656 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
657 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
658 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
659 struct codegen_context codegen_ctx, *ctx;
660 u32 *image = (u32 *)rw_image;
661 ppc_inst_t branch_insn;
662 u32 *branches = NULL;
663 bool save_ret;
664
665 if (IS_ENABLED(CONFIG_PPC32))
666 return -EOPNOTSUPP;
667
668 nr_regs = m->nr_args;
669 /* Extra registers for struct arguments */
670 for (i = 0; i < m->nr_args; i++)
671 if (m->arg_size[i] > SZL)
672 nr_regs += round_up(m->arg_size[i], SZL) / SZL - 1;
673
674 if (nr_regs > MAX_BPF_FUNC_ARGS)
675 return -EOPNOTSUPP;
676
677 ctx = &codegen_ctx;
678 memset(ctx, 0, sizeof(*ctx));
679
680 /*
681 * Generated stack layout:
682 *
683 * func prev back chain [ back chain ]
684 * [ ]
685 * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc)
686 * [ ] --
687 * LR save area [ r0 save (64-bit) ] | header
688 * [ r0 save (32-bit) ] |
689 * dummy frame for unwind [ back chain 1 ] --
690 * [ padding ] align stack frame
691 * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc
692 * alt_lr_off [ real lr (ool stub)] optional - actual lr
693 * [ r26 ]
694 * nvr_off [ r25 ] nvr save area
695 * retval_off [ return value ]
696 * [ reg argN ]
697 * [ ... ]
698 * regs_off [ reg_arg1 ] prog ctx context
699 * nregs_off [ args count ]
700 * ip_off [ traced function ]
701 * [ ... ]
702 * run_ctx_off [ bpf_tramp_run_ctx ]
703 * [ reg argN ]
704 * [ ... ]
705 * param_save_area [ reg_arg1 ] min 8 doublewords, per ABI
706 * [ TOC save (64-bit) ] --
707 * [ LR save (64-bit) ] | header
708 * [ LR save (32-bit) ] |
709 * bpf trampoline frame [ back chain 2 ] --
710 *
711 */
712
713 /* Minimum stack frame header */
714 bpf_frame_size = STACK_FRAME_MIN_SIZE;
715
716 /*
717 * Room for parameter save area.
718 *
719 * As per the ABI, this is required if we call into the traced
720 * function (BPF_TRAMP_F_CALL_ORIG):
721 * - if the function takes more than 8 arguments for the rest to spill onto the stack
722 * - or, if the function has variadic arguments
723 * - or, if this functions's prototype was not available to the caller
724 *
725 * Reserve space for at least 8 registers for now. This can be optimized later.
726 */
727 bpf_frame_size += (nr_regs > 8 ? nr_regs : 8) * SZL;
728
729 /* Room for struct bpf_tramp_run_ctx */
730 run_ctx_off = bpf_frame_size;
731 bpf_frame_size += round_up(sizeof(struct bpf_tramp_run_ctx), SZL);
732
733 /* Room for IP address argument */
734 ip_off = bpf_frame_size;
735 if (flags & BPF_TRAMP_F_IP_ARG)
736 bpf_frame_size += SZL;
737
738 /* Room for args count */
739 nregs_off = bpf_frame_size;
740 bpf_frame_size += SZL;
741
742 /* Room for args */
743 regs_off = bpf_frame_size;
744 bpf_frame_size += nr_regs * SZL;
745
746 /* Room for return value of func_addr or fentry prog */
747 retval_off = bpf_frame_size;
748 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
749 if (save_ret)
750 bpf_frame_size += SZL;
751
752 /* Room for nvr save area */
753 nvr_off = bpf_frame_size;
754 bpf_frame_size += 2 * SZL;
755
756 /* Optional save area for actual LR in case of ool ftrace */
757 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
758 alt_lr_off = bpf_frame_size;
759 bpf_frame_size += SZL;
760 }
761
762 if (IS_ENABLED(CONFIG_PPC32)) {
763 if (nr_regs < 2) {
764 r4_off = bpf_frame_size;
765 bpf_frame_size += SZL;
766 } else {
767 r4_off = regs_off + SZL;
768 }
769 }
770
771 /* Padding to align stack frame, if any */
772 bpf_frame_size = round_up(bpf_frame_size, SZL * 2);
773
774 /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */
775 bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64;
776
777 /* Offset to the traced function's stack frame */
778 func_frame_offset = bpf_dummy_frame_size + bpf_frame_size;
779
780 /* Create dummy frame for unwind, store original return value */
781 EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF));
782 /* Protect red zone where tail call count goes */
783 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size));
784
785 /* Create our stack frame */
786 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size));
787
788 /* 64-bit: Save TOC and load kernel TOC */
789 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
790 EMIT(PPC_RAW_STD(_R2, _R1, 24));
791 PPC64_LOAD_PACA();
792 }
793
794 /* 32-bit: save tail call count in r4 */
795 if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2)
796 EMIT(PPC_RAW_STL(_R4, _R1, r4_off));
797
798 bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off);
799
800 /* Save our return address */
801 EMIT(PPC_RAW_MFLR(_R3));
802 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
803 EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off));
804 else
805 EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
806
807 /*
808 * Save ip address of the traced function.
809 * We could recover this from LR, but we will need to address for OOL trampoline,
810 * and optional GEP area.
811 */
812 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) {
813 EMIT(PPC_RAW_LWZ(_R4, _R3, 4));
814 EMIT(PPC_RAW_SLWI(_R4, _R4, 6));
815 EMIT(PPC_RAW_SRAWI(_R4, _R4, 6));
816 EMIT(PPC_RAW_ADD(_R3, _R3, _R4));
817 EMIT(PPC_RAW_ADDI(_R3, _R3, 4));
818 }
819
820 if (flags & BPF_TRAMP_F_IP_ARG)
821 EMIT(PPC_RAW_STL(_R3, _R1, ip_off));
822
823 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
824 /* Fake our LR for unwind */
825 EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
826
827 /* Save function arg count -- see bpf_get_func_arg_cnt() */
828 EMIT(PPC_RAW_LI(_R3, nr_regs));
829 EMIT(PPC_RAW_STL(_R3, _R1, nregs_off));
830
831 /* Save nv regs */
832 EMIT(PPC_RAW_STL(_R25, _R1, nvr_off));
833 EMIT(PPC_RAW_STL(_R26, _R1, nvr_off + SZL));
834
835 if (flags & BPF_TRAMP_F_CALL_ORIG) {
836 PPC_LI_ADDR(_R3, (unsigned long)im);
837 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
838 (unsigned long)__bpf_tramp_enter);
839 if (ret)
840 return ret;
841 }
842
843 for (i = 0; i < fentry->nr_links; i++)
844 if (invoke_bpf_prog(image, ro_image, ctx, fentry->links[i], regs_off, retval_off,
845 run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET))
846 return -EINVAL;
847
848 if (fmod_ret->nr_links) {
849 branches = kcalloc(fmod_ret->nr_links, sizeof(u32), GFP_KERNEL);
850 if (!branches)
851 return -ENOMEM;
852
853 if (invoke_bpf_mod_ret(image, ro_image, ctx, fmod_ret, regs_off, retval_off,
854 run_ctx_off, branches)) {
855 ret = -EINVAL;
856 goto cleanup;
857 }
858 }
859
860 /* Call the traced function */
861 if (flags & BPF_TRAMP_F_CALL_ORIG) {
862 /*
863 * The address in LR save area points to the correct point in the original function
864 * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction
865 * sequence
866 */
867 EMIT(PPC_RAW_LL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF));
868 EMIT(PPC_RAW_MTCTR(_R3));
869
870 /* Replicate tail_call_cnt before calling the original BPF prog */
871 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
872 bpf_trampoline_setup_tail_call_cnt(image, ctx, func_frame_offset, r4_off);
873
874 /* Restore args */
875 bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off);
876
877 /* Restore TOC for 64-bit */
878 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
879 EMIT(PPC_RAW_LD(_R2, _R1, 24));
880 EMIT(PPC_RAW_BCTRL());
881 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
882 PPC64_LOAD_PACA();
883
884 /* Store return value for bpf prog to access */
885 EMIT(PPC_RAW_STL(_R3, _R1, retval_off));
886
887 /* Restore updated tail_call_cnt */
888 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
889 bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off);
890
891 /* Reserve space to patch branch instruction to skip fexit progs */
892 im->ip_after_call = &((u32 *)ro_image)[ctx->idx];
893 EMIT(PPC_RAW_NOP());
894 }
895
896 /* Update branches saved in invoke_bpf_mod_ret with address of do_fexit */
897 for (i = 0; i < fmod_ret->nr_links && image; i++) {
898 if (create_cond_branch(&branch_insn, &image[branches[i]],
899 (unsigned long)&image[ctx->idx], COND_NE << 16)) {
900 ret = -EINVAL;
901 goto cleanup;
902 }
903
904 image[branches[i]] = ppc_inst_val(branch_insn);
905 }
906
907 for (i = 0; i < fexit->nr_links; i++)
908 if (invoke_bpf_prog(image, ro_image, ctx, fexit->links[i], regs_off, retval_off,
909 run_ctx_off, false)) {
910 ret = -EINVAL;
911 goto cleanup;
912 }
913
914 if (flags & BPF_TRAMP_F_CALL_ORIG) {
915 im->ip_epilogue = &((u32 *)ro_image)[ctx->idx];
916 PPC_LI_ADDR(_R3, im);
917 ret = bpf_jit_emit_func_call_rel(image, ro_image, ctx,
918 (unsigned long)__bpf_tramp_exit);
919 if (ret)
920 goto cleanup;
921 }
922
923 if (flags & BPF_TRAMP_F_RESTORE_REGS)
924 bpf_trampoline_restore_args_regs(image, ctx, nr_regs, regs_off);
925
926 /* Restore return value of func_addr or fentry prog */
927 if (save_ret)
928 EMIT(PPC_RAW_LL(_R3, _R1, retval_off));
929
930 /* Restore nv regs */
931 EMIT(PPC_RAW_LL(_R26, _R1, nvr_off + SZL));
932 EMIT(PPC_RAW_LL(_R25, _R1, nvr_off));
933
934 /* Epilogue */
935 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL))
936 EMIT(PPC_RAW_LD(_R2, _R1, 24));
937 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
938 /* Skip the traced function and return to parent */
939 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset));
940 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
941 EMIT(PPC_RAW_MTLR(_R0));
942 EMIT(PPC_RAW_BLR());
943 } else {
944 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
945 EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off));
946 EMIT(PPC_RAW_MTLR(_R0));
947 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset));
948 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
949 EMIT(PPC_RAW_BLR());
950 } else {
951 EMIT(PPC_RAW_LL(_R0, _R1, bpf_frame_size + PPC_LR_STKOFF));
952 EMIT(PPC_RAW_MTCTR(_R0));
953 EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset));
954 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF));
955 EMIT(PPC_RAW_MTLR(_R0));
956 EMIT(PPC_RAW_BCTR());
957 }
958 }
959
960 /* Make sure the trampoline generation logic doesn't overflow */
961 if (image && WARN_ON_ONCE(&image[ctx->idx] > (u32 *)rw_image_end - BPF_INSN_SAFETY)) {
962 ret = -EFAULT;
963 goto cleanup;
964 }
965 ret = ctx->idx * 4 + BPF_INSN_SAFETY * 4;
966
967cleanup:
968 kfree(branches);
969 return ret;
970}
971
972int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
973 struct bpf_tramp_links *tlinks, void *func_addr)
974{
975 struct bpf_tramp_image im;
976 void *image;
977 int ret;
978
979 /*
980 * Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
981 * This will NOT cause fragmentation in direct map, as we do not
982 * call set_memory_*() on this buffer.
983 *
984 * We cannot use kvmalloc here, because we need image to be in
985 * module memory range.
986 */
987 image = bpf_jit_alloc_exec(PAGE_SIZE);
988 if (!image)
989 return -ENOMEM;
990
991 ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image,
992 m, flags, tlinks, func_addr);
993 bpf_jit_free_exec(image);
994
995 return ret;
996}
997
998int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
999 const struct btf_func_model *m, u32 flags,
1000 struct bpf_tramp_links *tlinks,
1001 void *func_addr)
1002{
1003 u32 size = image_end - image;
1004 void *rw_image, *tmp;
1005 int ret;
1006
1007 /*
1008 * rw_image doesn't need to be in module memory range, so we can
1009 * use kvmalloc.
1010 */
1011 rw_image = kvmalloc(size, GFP_KERNEL);
1012 if (!rw_image)
1013 return -ENOMEM;
1014
1015 ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m,
1016 flags, tlinks, func_addr);
1017 if (ret < 0)
1018 goto out;
1019
1020 if (bpf_jit_enable > 1)
1021 bpf_jit_dump(1, ret - BPF_INSN_SAFETY * 4, 1, rw_image);
1022
1023 tmp = bpf_arch_text_copy(image, rw_image, size);
1024 if (IS_ERR(tmp))
1025 ret = PTR_ERR(tmp);
1026
1027out:
1028 kvfree(rw_image);
1029 return ret;
1030}
1031
1032static int bpf_modify_inst(void *ip, ppc_inst_t old_inst, ppc_inst_t new_inst)
1033{
1034 ppc_inst_t org_inst;
1035
1036 if (copy_inst_from_kernel_nofault(&org_inst, ip)) {
1037 pr_err("0x%lx: fetching instruction failed\n", (unsigned long)ip);
1038 return -EFAULT;
1039 }
1040
1041 if (!ppc_inst_equal(org_inst, old_inst)) {
1042 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
1043 (unsigned long)ip, ppc_inst_as_ulong(old_inst), ppc_inst_as_ulong(org_inst));
1044 return -EINVAL;
1045 }
1046
1047 if (ppc_inst_equal(old_inst, new_inst))
1048 return 0;
1049
1050 return patch_instruction(ip, new_inst);
1051}
1052
1053static void do_isync(void *info __maybe_unused)
1054{
1055 isync();
1056}
1057
1058/*
1059 * A 3-step process for bpf prog entry:
1060 * 1. At bpf prog entry, a single nop/b:
1061 * bpf_func:
1062 * [nop|b] ool_stub
1063 * 2. Out-of-line stub:
1064 * ool_stub:
1065 * mflr r0
1066 * [b|bl] <bpf_prog>/<long_branch_stub>
1067 * mtlr r0 // CONFIG_PPC_FTRACE_OUT_OF_LINE only
1068 * b bpf_func + 4
1069 * 3. Long branch stub:
1070 * long_branch_stub:
1071 * .long <branch_addr>/<dummy_tramp>
1072 * mflr r11
1073 * bcl 20,31,$+4
1074 * mflr r12
1075 * ld r12, -16(r12)
1076 * mtctr r12
1077 * mtlr r11 // needed to retain ftrace ABI
1078 * bctr
1079 *
1080 * dummy_tramp is used to reduce synchronization requirements.
1081 *
1082 * When attaching a bpf trampoline to a bpf prog, we do not need any
1083 * synchronization here since we always have a valid branch target regardless
1084 * of the order in which the above stores are seen. dummy_tramp ensures that
1085 * the long_branch stub goes to a valid destination on other cpus, even when
1086 * the branch to the long_branch stub is seen before the updated trampoline
1087 * address.
1088 *
1089 * However, when detaching a bpf trampoline from a bpf prog, or if changing
1090 * the bpf trampoline address, we need synchronization to ensure that other
1091 * cpus can no longer branch into the older trampoline so that it can be
1092 * safely freed. bpf_tramp_image_put() uses rcu_tasks to ensure all cpus
1093 * make forward progress, but we still need to ensure that other cpus
1094 * execute isync (or some CSI) so that they don't go back into the
1095 * trampoline again.
1096 */
1097int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
1098 void *old_addr, void *new_addr)
1099{
1100 unsigned long bpf_func, bpf_func_end, size, offset;
1101 ppc_inst_t old_inst, new_inst;
1102 int ret = 0, branch_flags;
1103 char name[KSYM_NAME_LEN];
1104
1105 if (IS_ENABLED(CONFIG_PPC32))
1106 return -EOPNOTSUPP;
1107
1108 bpf_func = (unsigned long)ip;
1109 branch_flags = poke_type == BPF_MOD_CALL ? BRANCH_SET_LINK : 0;
1110
1111 /* We currently only support poking bpf programs */
1112 if (!__bpf_address_lookup(bpf_func, &size, &offset, name)) {
1113 pr_err("%s (0x%lx): kernel/modules are not supported\n", __func__, bpf_func);
1114 return -EOPNOTSUPP;
1115 }
1116
1117 /*
1118 * If we are not poking at bpf prog entry, then we are simply patching in/out
1119 * an unconditional branch instruction at im->ip_after_call
1120 */
1121 if (offset) {
1122 if (poke_type != BPF_MOD_JUMP) {
1123 pr_err("%s (0x%lx): calls are not supported in bpf prog body\n", __func__,
1124 bpf_func);
1125 return -EOPNOTSUPP;
1126 }
1127 old_inst = ppc_inst(PPC_RAW_NOP());
1128 if (old_addr)
1129 if (create_branch(&old_inst, ip, (unsigned long)old_addr, 0))
1130 return -ERANGE;
1131 new_inst = ppc_inst(PPC_RAW_NOP());
1132 if (new_addr)
1133 if (create_branch(&new_inst, ip, (unsigned long)new_addr, 0))
1134 return -ERANGE;
1135 mutex_lock(&text_mutex);
1136 ret = bpf_modify_inst(ip, old_inst, new_inst);
1137 mutex_unlock(&text_mutex);
1138
1139 /* Make sure all cpus see the new instruction */
1140 smp_call_function(do_isync, NULL, 1);
1141 return ret;
1142 }
1143
1144 bpf_func_end = bpf_func + size;
1145
1146 /* Address of the jmp/call instruction in the out-of-line stub */
1147 ip = (void *)(bpf_func_end - bpf_jit_ool_stub + 4);
1148
1149 if (!is_offset_in_branch_range((long)ip - 4 - bpf_func)) {
1150 pr_err("%s (0x%lx): bpf prog too large, ool stub out of branch range\n", __func__,
1151 bpf_func);
1152 return -ERANGE;
1153 }
1154
1155 old_inst = ppc_inst(PPC_RAW_NOP());
1156 if (old_addr) {
1157 if (is_offset_in_branch_range(ip - old_addr))
1158 create_branch(&old_inst, ip, (unsigned long)old_addr, branch_flags);
1159 else
1160 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_long_branch_stub,
1161 branch_flags);
1162 }
1163 new_inst = ppc_inst(PPC_RAW_NOP());
1164 if (new_addr) {
1165 if (is_offset_in_branch_range(ip - new_addr))
1166 create_branch(&new_inst, ip, (unsigned long)new_addr, branch_flags);
1167 else
1168 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_long_branch_stub,
1169 branch_flags);
1170 }
1171
1172 mutex_lock(&text_mutex);
1173
1174 /*
1175 * 1. Update the address in the long branch stub:
1176 * If new_addr is out of range, we will have to use the long branch stub, so patch new_addr
1177 * here. Otherwise, revert to dummy_tramp, but only if we had patched old_addr here.
1178 */
1179 if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) ||
1180 (old_addr && !is_offset_in_branch_range(old_addr - ip)))
1181 ret = patch_ulong((void *)(bpf_func_end - bpf_jit_long_branch_stub - SZL),
1182 (new_addr && !is_offset_in_branch_range(new_addr - ip)) ?
1183 (unsigned long)new_addr : (unsigned long)dummy_tramp);
1184 if (ret)
1185 goto out;
1186
1187 /* 2. Update the branch/call in the out-of-line stub */
1188 ret = bpf_modify_inst(ip, old_inst, new_inst);
1189 if (ret)
1190 goto out;
1191
1192 /* 3. Update instruction at bpf prog entry */
1193 ip = (void *)bpf_func;
1194 if (!old_addr || !new_addr) {
1195 if (!old_addr) {
1196 old_inst = ppc_inst(PPC_RAW_NOP());
1197 create_branch(&new_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0);
1198 } else {
1199 new_inst = ppc_inst(PPC_RAW_NOP());
1200 create_branch(&old_inst, ip, bpf_func_end - bpf_jit_ool_stub, 0);
1201 }
1202 ret = bpf_modify_inst(ip, old_inst, new_inst);
1203 }
1204
1205out:
1206 mutex_unlock(&text_mutex);
1207
1208 /*
1209 * Sync only if we are not attaching a trampoline to a bpf prog so the older
1210 * trampoline can be freed safely.
1211 */
1212 if (old_addr)
1213 smp_call_function(do_isync, NULL, 1);
1214
1215 return ret;
1216}