Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* bpf_jit_comp.c: BPF JIT compiler
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
8 */
9#include <linux/moduleloader.h>
10#include <asm/cacheflush.h>
11#include <asm/asm-compat.h>
12#include <linux/netdevice.h>
13#include <linux/filter.h>
14#include <linux/if_vlan.h>
15
16#include "bpf_jit32.h"
17
18static inline void bpf_flush_icache(void *start, void *end)
19{
20 smp_wmb();
21 flush_icache_range((unsigned long)start, (unsigned long)end);
22}
23
24static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
25 struct codegen_context *ctx)
26{
27 int i;
28 const struct sock_filter *filter = fp->insns;
29
30 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
31 /* Make stackframe */
32 if (ctx->seen & SEEN_DATAREF) {
33 /* If we call any helpers (for loads), save LR */
34 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
35 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
36
37 /* Back up non-volatile regs. */
38 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
39 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
40 }
41 if (ctx->seen & SEEN_MEM) {
42 /*
43 * Conditionally save regs r15-r31 as some will be used
44 * for M[] data.
45 */
46 for (i = r_M; i < (r_M+16); i++) {
47 if (ctx->seen & (1 << (i-r_M)))
48 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
49 }
50 }
51 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
52 }
53
54 if (ctx->seen & SEEN_DATAREF) {
55 /*
56 * If this filter needs to access skb data,
57 * prepare r_D and r_HL:
58 * r_HL = skb->len - skb->data_len
59 * r_D = skb->data
60 */
61 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
62 data_len));
63 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
64 PPC_SUB(r_HL, r_HL, r_scratch1);
65 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
66 }
67
68 if (ctx->seen & SEEN_XREG) {
69 /*
70 * TODO: Could also detect whether first instr. sets X and
71 * avoid this (as below, with A).
72 */
73 PPC_LI(r_X, 0);
74 }
75
76 /* make sure we dont leak kernel information to user */
77 if (bpf_needs_clear_a(&filter[0]))
78 PPC_LI(r_A, 0);
79}
80
81static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
82{
83 int i;
84
85 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
86 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
87 if (ctx->seen & SEEN_DATAREF) {
88 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
89 PPC_MTLR(0);
90 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
91 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
92 }
93 if (ctx->seen & SEEN_MEM) {
94 /* Restore any saved non-vol registers */
95 for (i = r_M; i < (r_M+16); i++) {
96 if (ctx->seen & (1 << (i-r_M)))
97 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
98 }
99 }
100 }
101 /* The RETs have left a return value in R3. */
102
103 PPC_BLR();
104}
105
106#define CHOOSE_LOAD_FUNC(K, func) \
107 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
108
109/* Assemble the body code between the prologue & epilogue. */
110static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
111 struct codegen_context *ctx,
112 unsigned int *addrs)
113{
114 const struct sock_filter *filter = fp->insns;
115 int flen = fp->len;
116 u8 *func;
117 unsigned int true_cond;
118 int i;
119
120 /* Start of epilogue code */
121 unsigned int exit_addr = addrs[flen];
122
123 for (i = 0; i < flen; i++) {
124 unsigned int K = filter[i].k;
125 u16 code = bpf_anc_helper(&filter[i]);
126
127 /*
128 * addrs[] maps a BPF bytecode address into a real offset from
129 * the start of the body code.
130 */
131 addrs[i] = ctx->idx * 4;
132
133 switch (code) {
134 /*** ALU ops ***/
135 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
136 ctx->seen |= SEEN_XREG;
137 PPC_ADD(r_A, r_A, r_X);
138 break;
139 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
140 if (!K)
141 break;
142 PPC_ADDI(r_A, r_A, IMM_L(K));
143 if (K >= 32768)
144 PPC_ADDIS(r_A, r_A, IMM_HA(K));
145 break;
146 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
147 ctx->seen |= SEEN_XREG;
148 PPC_SUB(r_A, r_A, r_X);
149 break;
150 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
151 if (!K)
152 break;
153 PPC_ADDI(r_A, r_A, IMM_L(-K));
154 if (K >= 32768)
155 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
156 break;
157 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
158 ctx->seen |= SEEN_XREG;
159 PPC_MULW(r_A, r_A, r_X);
160 break;
161 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
162 if (K < 32768)
163 PPC_MULI(r_A, r_A, K);
164 else {
165 PPC_LI32(r_scratch1, K);
166 PPC_MULW(r_A, r_A, r_scratch1);
167 }
168 break;
169 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
170 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
171 ctx->seen |= SEEN_XREG;
172 PPC_CMPWI(r_X, 0);
173 if (ctx->pc_ret0 != -1) {
174 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
175 } else {
176 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
177 PPC_LI(r_ret, 0);
178 PPC_JMP(exit_addr);
179 }
180 if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
181 PPC_DIVWU(r_scratch1, r_A, r_X);
182 PPC_MULW(r_scratch1, r_X, r_scratch1);
183 PPC_SUB(r_A, r_A, r_scratch1);
184 } else {
185 PPC_DIVWU(r_A, r_A, r_X);
186 }
187 break;
188 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189 PPC_LI32(r_scratch2, K);
190 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
191 PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
192 PPC_SUB(r_A, r_A, r_scratch1);
193 break;
194 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
195 if (K == 1)
196 break;
197 PPC_LI32(r_scratch1, K);
198 PPC_DIVWU(r_A, r_A, r_scratch1);
199 break;
200 case BPF_ALU | BPF_AND | BPF_X:
201 ctx->seen |= SEEN_XREG;
202 PPC_AND(r_A, r_A, r_X);
203 break;
204 case BPF_ALU | BPF_AND | BPF_K:
205 if (!IMM_H(K))
206 PPC_ANDI(r_A, r_A, K);
207 else {
208 PPC_LI32(r_scratch1, K);
209 PPC_AND(r_A, r_A, r_scratch1);
210 }
211 break;
212 case BPF_ALU | BPF_OR | BPF_X:
213 ctx->seen |= SEEN_XREG;
214 PPC_OR(r_A, r_A, r_X);
215 break;
216 case BPF_ALU | BPF_OR | BPF_K:
217 if (IMM_L(K))
218 PPC_ORI(r_A, r_A, IMM_L(K));
219 if (K >= 65536)
220 PPC_ORIS(r_A, r_A, IMM_H(K));
221 break;
222 case BPF_ANC | SKF_AD_ALU_XOR_X:
223 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
224 ctx->seen |= SEEN_XREG;
225 PPC_XOR(r_A, r_A, r_X);
226 break;
227 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
228 if (IMM_L(K))
229 PPC_XORI(r_A, r_A, IMM_L(K));
230 if (K >= 65536)
231 PPC_XORIS(r_A, r_A, IMM_H(K));
232 break;
233 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
234 ctx->seen |= SEEN_XREG;
235 PPC_SLW(r_A, r_A, r_X);
236 break;
237 case BPF_ALU | BPF_LSH | BPF_K:
238 if (K == 0)
239 break;
240 else
241 PPC_SLWI(r_A, r_A, K);
242 break;
243 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
244 ctx->seen |= SEEN_XREG;
245 PPC_SRW(r_A, r_A, r_X);
246 break;
247 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
248 if (K == 0)
249 break;
250 else
251 PPC_SRWI(r_A, r_A, K);
252 break;
253 case BPF_ALU | BPF_NEG:
254 PPC_NEG(r_A, r_A);
255 break;
256 case BPF_RET | BPF_K:
257 PPC_LI32(r_ret, K);
258 if (!K) {
259 if (ctx->pc_ret0 == -1)
260 ctx->pc_ret0 = i;
261 }
262 /*
263 * If this isn't the very last instruction, branch to
264 * the epilogue if we've stuff to clean up. Otherwise,
265 * if there's nothing to tidy, just return. If we /are/
266 * the last instruction, we're about to fall through to
267 * the epilogue to return.
268 */
269 if (i != flen - 1) {
270 /*
271 * Note: 'seen' is properly valid only on pass
272 * #2. Both parts of this conditional are the
273 * same instruction size though, meaning the
274 * first pass will still correctly determine the
275 * code size/addresses.
276 */
277 if (ctx->seen)
278 PPC_JMP(exit_addr);
279 else
280 PPC_BLR();
281 }
282 break;
283 case BPF_RET | BPF_A:
284 PPC_MR(r_ret, r_A);
285 if (i != flen - 1) {
286 if (ctx->seen)
287 PPC_JMP(exit_addr);
288 else
289 PPC_BLR();
290 }
291 break;
292 case BPF_MISC | BPF_TAX: /* X = A */
293 PPC_MR(r_X, r_A);
294 break;
295 case BPF_MISC | BPF_TXA: /* A = X */
296 ctx->seen |= SEEN_XREG;
297 PPC_MR(r_A, r_X);
298 break;
299
300 /*** Constant loads/M[] access ***/
301 case BPF_LD | BPF_IMM: /* A = K */
302 PPC_LI32(r_A, K);
303 break;
304 case BPF_LDX | BPF_IMM: /* X = K */
305 PPC_LI32(r_X, K);
306 break;
307 case BPF_LD | BPF_MEM: /* A = mem[K] */
308 PPC_MR(r_A, r_M + (K & 0xf));
309 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
310 break;
311 case BPF_LDX | BPF_MEM: /* X = mem[K] */
312 PPC_MR(r_X, r_M + (K & 0xf));
313 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
314 break;
315 case BPF_ST: /* mem[K] = A */
316 PPC_MR(r_M + (K & 0xf), r_A);
317 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
318 break;
319 case BPF_STX: /* mem[K] = X */
320 PPC_MR(r_M + (K & 0xf), r_X);
321 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
322 break;
323 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
324 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
325 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
326 break;
327 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
328 PPC_LWZ_OFFS(r_A, r_skb, K);
329 break;
330 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
332 break;
333
334 /*** Ancillary info loads ***/
335 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
336 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
337 protocol) != 2);
338 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
339 protocol));
340 break;
341 case BPF_ANC | SKF_AD_IFINDEX:
342 case BPF_ANC | SKF_AD_HATYPE:
343 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
344 ifindex) != 4);
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
346 type) != 2);
347 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
348 dev));
349 PPC_CMPDI(r_scratch1, 0);
350 if (ctx->pc_ret0 != -1) {
351 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
352 } else {
353 /* Exit, returning 0; first pass hits here. */
354 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
355 PPC_LI(r_ret, 0);
356 PPC_JMP(exit_addr);
357 }
358 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
359 PPC_LWZ_OFFS(r_A, r_scratch1,
360 offsetof(struct net_device, ifindex));
361 } else {
362 PPC_LHZ_OFFS(r_A, r_scratch1,
363 offsetof(struct net_device, type));
364 }
365
366 break;
367 case BPF_ANC | SKF_AD_MARK:
368 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
369 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
370 mark));
371 break;
372 case BPF_ANC | SKF_AD_RXHASH:
373 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
374 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375 hash));
376 break;
377 case BPF_ANC | SKF_AD_VLAN_TAG:
378 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
379
380 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
381 vlan_tci));
382 break;
383 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
384 PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
385 if (PKT_VLAN_PRESENT_BIT)
386 PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
387 if (PKT_VLAN_PRESENT_BIT < 7)
388 PPC_ANDI(r_A, r_A, 1);
389 break;
390 case BPF_ANC | SKF_AD_QUEUE:
391 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
392 queue_mapping) != 2);
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 queue_mapping));
395 break;
396 case BPF_ANC | SKF_AD_PKTTYPE:
397 PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
398 PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
399 PPC_SRWI(r_A, r_A, 5);
400 break;
401 case BPF_ANC | SKF_AD_CPU:
402 PPC_BPF_LOAD_CPU(r_A);
403 break;
404 /*** Absolute loads from packet header/data ***/
405 case BPF_LD | BPF_W | BPF_ABS:
406 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
407 goto common_load;
408 case BPF_LD | BPF_H | BPF_ABS:
409 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
410 goto common_load;
411 case BPF_LD | BPF_B | BPF_ABS:
412 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
413 common_load:
414 /* Load from [K]. */
415 ctx->seen |= SEEN_DATAREF;
416 PPC_FUNC_ADDR(r_scratch1, func);
417 PPC_MTLR(r_scratch1);
418 PPC_LI32(r_addr, K);
419 PPC_BLRL();
420 /*
421 * Helper returns 'lt' condition on error, and an
422 * appropriate return value in r3
423 */
424 PPC_BCC(COND_LT, exit_addr);
425 break;
426
427 /*** Indirect loads from packet header/data ***/
428 case BPF_LD | BPF_W | BPF_IND:
429 func = sk_load_word;
430 goto common_load_ind;
431 case BPF_LD | BPF_H | BPF_IND:
432 func = sk_load_half;
433 goto common_load_ind;
434 case BPF_LD | BPF_B | BPF_IND:
435 func = sk_load_byte;
436 common_load_ind:
437 /*
438 * Load from [X + K]. Negative offsets are tested for
439 * in the helper functions.
440 */
441 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
442 PPC_FUNC_ADDR(r_scratch1, func);
443 PPC_MTLR(r_scratch1);
444 PPC_ADDI(r_addr, r_X, IMM_L(K));
445 if (K >= 32768)
446 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
447 PPC_BLRL();
448 /* If error, cr0.LT set */
449 PPC_BCC(COND_LT, exit_addr);
450 break;
451
452 case BPF_LDX | BPF_B | BPF_MSH:
453 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
454 goto common_load;
455 break;
456
457 /*** Jump and branches ***/
458 case BPF_JMP | BPF_JA:
459 if (K != 0)
460 PPC_JMP(addrs[i + 1 + K]);
461 break;
462
463 case BPF_JMP | BPF_JGT | BPF_K:
464 case BPF_JMP | BPF_JGT | BPF_X:
465 true_cond = COND_GT;
466 goto cond_branch;
467 case BPF_JMP | BPF_JGE | BPF_K:
468 case BPF_JMP | BPF_JGE | BPF_X:
469 true_cond = COND_GE;
470 goto cond_branch;
471 case BPF_JMP | BPF_JEQ | BPF_K:
472 case BPF_JMP | BPF_JEQ | BPF_X:
473 true_cond = COND_EQ;
474 goto cond_branch;
475 case BPF_JMP | BPF_JSET | BPF_K:
476 case BPF_JMP | BPF_JSET | BPF_X:
477 true_cond = COND_NE;
478 /* Fall through */
479 cond_branch:
480 /* same targets, can avoid doing the test :) */
481 if (filter[i].jt == filter[i].jf) {
482 if (filter[i].jt > 0)
483 PPC_JMP(addrs[i + 1 + filter[i].jt]);
484 break;
485 }
486
487 switch (code) {
488 case BPF_JMP | BPF_JGT | BPF_X:
489 case BPF_JMP | BPF_JGE | BPF_X:
490 case BPF_JMP | BPF_JEQ | BPF_X:
491 ctx->seen |= SEEN_XREG;
492 PPC_CMPLW(r_A, r_X);
493 break;
494 case BPF_JMP | BPF_JSET | BPF_X:
495 ctx->seen |= SEEN_XREG;
496 PPC_AND_DOT(r_scratch1, r_A, r_X);
497 break;
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JGT | BPF_K:
500 case BPF_JMP | BPF_JGE | BPF_K:
501 if (K < 32768)
502 PPC_CMPLWI(r_A, K);
503 else {
504 PPC_LI32(r_scratch1, K);
505 PPC_CMPLW(r_A, r_scratch1);
506 }
507 break;
508 case BPF_JMP | BPF_JSET | BPF_K:
509 if (K < 32768)
510 /* PPC_ANDI is /only/ dot-form */
511 PPC_ANDI(r_scratch1, r_A, K);
512 else {
513 PPC_LI32(r_scratch1, K);
514 PPC_AND_DOT(r_scratch1, r_A,
515 r_scratch1);
516 }
517 break;
518 }
519 /* Sometimes branches are constructed "backward", with
520 * the false path being the branch and true path being
521 * a fallthrough to the next instruction.
522 */
523 if (filter[i].jt == 0)
524 /* Swap the sense of the branch */
525 PPC_BCC(true_cond ^ COND_CMP_TRUE,
526 addrs[i + 1 + filter[i].jf]);
527 else {
528 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
529 if (filter[i].jf != 0)
530 PPC_JMP(addrs[i + 1 + filter[i].jf]);
531 }
532 break;
533 default:
534 /* The filter contains something cruel & unusual.
535 * We don't handle it, but also there shouldn't be
536 * anything missing from our list.
537 */
538 if (printk_ratelimit())
539 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
540 filter[i].code, i);
541 return -ENOTSUPP;
542 }
543
544 }
545 /* Set end-of-body-code address for exit. */
546 addrs[i] = ctx->idx * 4;
547
548 return 0;
549}
550
551void bpf_jit_compile(struct bpf_prog *fp)
552{
553 unsigned int proglen;
554 unsigned int alloclen;
555 u32 *image = NULL;
556 u32 *code_base;
557 unsigned int *addrs;
558 struct codegen_context cgctx;
559 int pass;
560 int flen = fp->len;
561
562 if (!bpf_jit_enable)
563 return;
564
565 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
566 if (addrs == NULL)
567 return;
568
569 /*
570 * There are multiple assembly passes as the generated code will change
571 * size as it settles down, figuring out the max branch offsets/exit
572 * paths required.
573 *
574 * The range of standard conditional branches is +/- 32Kbytes. Since
575 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
576 * finish with 8 bytes/instruction. Not feasible, so long jumps are
577 * used, distinct from short branches.
578 *
579 * Current:
580 *
581 * For now, both branch types assemble to 2 words (short branches padded
582 * with a NOP); this is less efficient, but assembly will always complete
583 * after exactly 3 passes:
584 *
585 * First pass: No code buffer; Program is "faux-generated" -- no code
586 * emitted but maximum size of output determined (and addrs[] filled
587 * in). Also, we note whether we use M[], whether we use skb data, etc.
588 * All generation choices assumed to be 'worst-case', e.g. branches all
589 * far (2 instructions), return path code reduction not available, etc.
590 *
591 * Second pass: Code buffer allocated with size determined previously.
592 * Prologue generated to support features we have seen used. Exit paths
593 * determined and addrs[] is filled in again, as code may be slightly
594 * smaller as a result.
595 *
596 * Third pass: Code generated 'for real', and branch destinations
597 * determined from now-accurate addrs[] map.
598 *
599 * Ideal:
600 *
601 * If we optimise this, near branches will be shorter. On the
602 * first assembly pass, we should err on the side of caution and
603 * generate the biggest code. On subsequent passes, branches will be
604 * generated short or long and code size will reduce. With smaller
605 * code, more branches may fall into the short category, and code will
606 * reduce more.
607 *
608 * Finally, if we see one pass generate code the same size as the
609 * previous pass we have converged and should now generate code for
610 * real. Allocating at the end will also save the memory that would
611 * otherwise be wasted by the (small) current code shrinkage.
612 * Preferably, we should do a small number of passes (e.g. 5) and if we
613 * haven't converged by then, get impatient and force code to generate
614 * as-is, even if the odd branch would be left long. The chances of a
615 * long jump are tiny with all but the most enormous of BPF filter
616 * inputs, so we should usually converge on the third pass.
617 */
618
619 cgctx.idx = 0;
620 cgctx.seen = 0;
621 cgctx.pc_ret0 = -1;
622 /* Scouting faux-generate pass 0 */
623 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
624 /* We hit something illegal or unsupported. */
625 goto out;
626
627 /*
628 * Pretend to build prologue, given the features we've seen. This will
629 * update ctgtx.idx as it pretends to output instructions, then we can
630 * calculate total size from idx.
631 */
632 bpf_jit_build_prologue(fp, 0, &cgctx);
633 bpf_jit_build_epilogue(0, &cgctx);
634
635 proglen = cgctx.idx * 4;
636 alloclen = proglen + FUNCTION_DESCR_SIZE;
637 image = module_alloc(alloclen);
638 if (!image)
639 goto out;
640
641 code_base = image + (FUNCTION_DESCR_SIZE/4);
642
643 /* Code generation passes 1-2 */
644 for (pass = 1; pass < 3; pass++) {
645 /* Now build the prologue, body code & epilogue for real. */
646 cgctx.idx = 0;
647 bpf_jit_build_prologue(fp, code_base, &cgctx);
648 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
649 bpf_jit_build_epilogue(code_base, &cgctx);
650
651 if (bpf_jit_enable > 1)
652 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
653 proglen - (cgctx.idx * 4), cgctx.seen);
654 }
655
656 if (bpf_jit_enable > 1)
657 /* Note that we output the base address of the code_base
658 * rather than image, since opcodes are in code_base.
659 */
660 bpf_jit_dump(flen, proglen, pass, code_base);
661
662 bpf_flush_icache(code_base, code_base + (proglen/4));
663
664#ifdef CONFIG_PPC64
665 /* Function descriptor nastiness: Address + TOC */
666 ((u64 *)image)[0] = (u64)code_base;
667 ((u64 *)image)[1] = local_paca->kernel_toc;
668#endif
669
670 fp->bpf_func = (void *)image;
671 fp->jited = 1;
672
673out:
674 kfree(addrs);
675 return;
676}
677
678void bpf_jit_free(struct bpf_prog *fp)
679{
680 if (fp->jited)
681 module_memfree(fp->bpf_func);
682
683 bpf_prog_unlock_free(fp);
684}
1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#include <linux/moduleloader.h>
13#include <asm/cacheflush.h>
14#include <linux/netdevice.h>
15#include <linux/filter.h>
16#include "bpf_jit.h"
17
18#ifndef __BIG_ENDIAN
19/* There are endianness assumptions herein. */
20#error "Little-endian PPC not supported in BPF compiler"
21#endif
22
23int bpf_jit_enable __read_mostly;
24
25
26static inline void bpf_flush_icache(void *start, void *end)
27{
28 smp_wmb();
29 flush_icache_range((unsigned long)start, (unsigned long)end);
30}
31
32static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
33 struct codegen_context *ctx)
34{
35 int i;
36 const struct sock_filter *filter = fp->insns;
37
38 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
39 /* Make stackframe */
40 if (ctx->seen & SEEN_DATAREF) {
41 /* If we call any helpers (for loads), save LR */
42 EMIT(PPC_INST_MFLR | __PPC_RT(0));
43 PPC_STD(0, 1, 16);
44
45 /* Back up non-volatile regs. */
46 PPC_STD(r_D, 1, -(8*(32-r_D)));
47 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
48 }
49 if (ctx->seen & SEEN_MEM) {
50 /*
51 * Conditionally save regs r15-r31 as some will be used
52 * for M[] data.
53 */
54 for (i = r_M; i < (r_M+16); i++) {
55 if (ctx->seen & (1 << (i-r_M)))
56 PPC_STD(i, 1, -(8*(32-i)));
57 }
58 }
59 EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
60 (-BPF_PPC_STACKFRAME & 0xfffc));
61 }
62
63 if (ctx->seen & SEEN_DATAREF) {
64 /*
65 * If this filter needs to access skb data,
66 * prepare r_D and r_HL:
67 * r_HL = skb->len - skb->data_len
68 * r_D = skb->data
69 */
70 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
71 data_len));
72 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
73 PPC_SUB(r_HL, r_HL, r_scratch1);
74 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
75 }
76
77 if (ctx->seen & SEEN_XREG) {
78 /*
79 * TODO: Could also detect whether first instr. sets X and
80 * avoid this (as below, with A).
81 */
82 PPC_LI(r_X, 0);
83 }
84
85 switch (filter[0].code) {
86 case BPF_S_RET_K:
87 case BPF_S_LD_W_LEN:
88 case BPF_S_ANC_PROTOCOL:
89 case BPF_S_ANC_IFINDEX:
90 case BPF_S_ANC_MARK:
91 case BPF_S_ANC_RXHASH:
92 case BPF_S_ANC_CPU:
93 case BPF_S_ANC_QUEUE:
94 case BPF_S_LD_W_ABS:
95 case BPF_S_LD_H_ABS:
96 case BPF_S_LD_B_ABS:
97 /* first instruction sets A register (or is RET 'constant') */
98 break;
99 default:
100 /* make sure we dont leak kernel information to user */
101 PPC_LI(r_A, 0);
102 }
103}
104
105static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
106{
107 int i;
108
109 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
110 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
111 if (ctx->seen & SEEN_DATAREF) {
112 PPC_LD(0, 1, 16);
113 PPC_MTLR(0);
114 PPC_LD(r_D, 1, -(8*(32-r_D)));
115 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
116 }
117 if (ctx->seen & SEEN_MEM) {
118 /* Restore any saved non-vol registers */
119 for (i = r_M; i < (r_M+16); i++) {
120 if (ctx->seen & (1 << (i-r_M)))
121 PPC_LD(i, 1, -(8*(32-i)));
122 }
123 }
124 }
125 /* The RETs have left a return value in R3. */
126
127 PPC_BLR();
128}
129
130#define CHOOSE_LOAD_FUNC(K, func) \
131 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
132
133/* Assemble the body code between the prologue & epilogue. */
134static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
135 struct codegen_context *ctx,
136 unsigned int *addrs)
137{
138 const struct sock_filter *filter = fp->insns;
139 int flen = fp->len;
140 u8 *func;
141 unsigned int true_cond;
142 int i;
143
144 /* Start of epilogue code */
145 unsigned int exit_addr = addrs[flen];
146
147 for (i = 0; i < flen; i++) {
148 unsigned int K = filter[i].k;
149
150 /*
151 * addrs[] maps a BPF bytecode address into a real offset from
152 * the start of the body code.
153 */
154 addrs[i] = ctx->idx * 4;
155
156 switch (filter[i].code) {
157 /*** ALU ops ***/
158 case BPF_S_ALU_ADD_X: /* A += X; */
159 ctx->seen |= SEEN_XREG;
160 PPC_ADD(r_A, r_A, r_X);
161 break;
162 case BPF_S_ALU_ADD_K: /* A += K; */
163 if (!K)
164 break;
165 PPC_ADDI(r_A, r_A, IMM_L(K));
166 if (K >= 32768)
167 PPC_ADDIS(r_A, r_A, IMM_HA(K));
168 break;
169 case BPF_S_ALU_SUB_X: /* A -= X; */
170 ctx->seen |= SEEN_XREG;
171 PPC_SUB(r_A, r_A, r_X);
172 break;
173 case BPF_S_ALU_SUB_K: /* A -= K */
174 if (!K)
175 break;
176 PPC_ADDI(r_A, r_A, IMM_L(-K));
177 if (K >= 32768)
178 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
179 break;
180 case BPF_S_ALU_MUL_X: /* A *= X; */
181 ctx->seen |= SEEN_XREG;
182 PPC_MUL(r_A, r_A, r_X);
183 break;
184 case BPF_S_ALU_MUL_K: /* A *= K */
185 if (K < 32768)
186 PPC_MULI(r_A, r_A, K);
187 else {
188 PPC_LI32(r_scratch1, K);
189 PPC_MUL(r_A, r_A, r_scratch1);
190 }
191 break;
192 case BPF_S_ALU_DIV_X: /* A /= X; */
193 ctx->seen |= SEEN_XREG;
194 PPC_CMPWI(r_X, 0);
195 if (ctx->pc_ret0 != -1) {
196 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
197 } else {
198 /*
199 * Exit, returning 0; first pass hits here
200 * (longer worst-case code size).
201 */
202 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
203 PPC_LI(r_ret, 0);
204 PPC_JMP(exit_addr);
205 }
206 PPC_DIVWU(r_A, r_A, r_X);
207 break;
208 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
209 PPC_LI32(r_scratch1, K);
210 /* Top 32 bits of 64bit result -> A */
211 PPC_MULHWU(r_A, r_A, r_scratch1);
212 break;
213 case BPF_S_ALU_AND_X:
214 ctx->seen |= SEEN_XREG;
215 PPC_AND(r_A, r_A, r_X);
216 break;
217 case BPF_S_ALU_AND_K:
218 if (!IMM_H(K))
219 PPC_ANDI(r_A, r_A, K);
220 else {
221 PPC_LI32(r_scratch1, K);
222 PPC_AND(r_A, r_A, r_scratch1);
223 }
224 break;
225 case BPF_S_ALU_OR_X:
226 ctx->seen |= SEEN_XREG;
227 PPC_OR(r_A, r_A, r_X);
228 break;
229 case BPF_S_ALU_OR_K:
230 if (IMM_L(K))
231 PPC_ORI(r_A, r_A, IMM_L(K));
232 if (K >= 65536)
233 PPC_ORIS(r_A, r_A, IMM_H(K));
234 break;
235 case BPF_S_ALU_LSH_X: /* A <<= X; */
236 ctx->seen |= SEEN_XREG;
237 PPC_SLW(r_A, r_A, r_X);
238 break;
239 case BPF_S_ALU_LSH_K:
240 if (K == 0)
241 break;
242 else
243 PPC_SLWI(r_A, r_A, K);
244 break;
245 case BPF_S_ALU_RSH_X: /* A >>= X; */
246 ctx->seen |= SEEN_XREG;
247 PPC_SRW(r_A, r_A, r_X);
248 break;
249 case BPF_S_ALU_RSH_K: /* A >>= K; */
250 if (K == 0)
251 break;
252 else
253 PPC_SRWI(r_A, r_A, K);
254 break;
255 case BPF_S_ALU_NEG:
256 PPC_NEG(r_A, r_A);
257 break;
258 case BPF_S_RET_K:
259 PPC_LI32(r_ret, K);
260 if (!K) {
261 if (ctx->pc_ret0 == -1)
262 ctx->pc_ret0 = i;
263 }
264 /*
265 * If this isn't the very last instruction, branch to
266 * the epilogue if we've stuff to clean up. Otherwise,
267 * if there's nothing to tidy, just return. If we /are/
268 * the last instruction, we're about to fall through to
269 * the epilogue to return.
270 */
271 if (i != flen - 1) {
272 /*
273 * Note: 'seen' is properly valid only on pass
274 * #2. Both parts of this conditional are the
275 * same instruction size though, meaning the
276 * first pass will still correctly determine the
277 * code size/addresses.
278 */
279 if (ctx->seen)
280 PPC_JMP(exit_addr);
281 else
282 PPC_BLR();
283 }
284 break;
285 case BPF_S_RET_A:
286 PPC_MR(r_ret, r_A);
287 if (i != flen - 1) {
288 if (ctx->seen)
289 PPC_JMP(exit_addr);
290 else
291 PPC_BLR();
292 }
293 break;
294 case BPF_S_MISC_TAX: /* X = A */
295 PPC_MR(r_X, r_A);
296 break;
297 case BPF_S_MISC_TXA: /* A = X */
298 ctx->seen |= SEEN_XREG;
299 PPC_MR(r_A, r_X);
300 break;
301
302 /*** Constant loads/M[] access ***/
303 case BPF_S_LD_IMM: /* A = K */
304 PPC_LI32(r_A, K);
305 break;
306 case BPF_S_LDX_IMM: /* X = K */
307 PPC_LI32(r_X, K);
308 break;
309 case BPF_S_LD_MEM: /* A = mem[K] */
310 PPC_MR(r_A, r_M + (K & 0xf));
311 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
312 break;
313 case BPF_S_LDX_MEM: /* X = mem[K] */
314 PPC_MR(r_X, r_M + (K & 0xf));
315 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
316 break;
317 case BPF_S_ST: /* mem[K] = A */
318 PPC_MR(r_M + (K & 0xf), r_A);
319 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
320 break;
321 case BPF_S_STX: /* mem[K] = X */
322 PPC_MR(r_M + (K & 0xf), r_X);
323 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
324 break;
325 case BPF_S_LD_W_LEN: /* A = skb->len; */
326 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
327 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
328 break;
329 case BPF_S_LDX_W_LEN: /* X = skb->len; */
330 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
331 break;
332
333 /*** Ancillary info loads ***/
334
335 /* None of the BPF_S_ANC* codes appear to be passed by
336 * sk_chk_filter(). The interpreter and the x86 BPF
337 * compiler implement them so we do too -- they may be
338 * planted in future.
339 */
340 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
341 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
342 protocol) != 2);
343 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
344 protocol));
345 /* ntohs is a NOP with BE loads. */
346 break;
347 case BPF_S_ANC_IFINDEX:
348 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
349 dev));
350 PPC_CMPDI(r_scratch1, 0);
351 if (ctx->pc_ret0 != -1) {
352 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
353 } else {
354 /* Exit, returning 0; first pass hits here. */
355 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
356 PPC_LI(r_ret, 0);
357 PPC_JMP(exit_addr);
358 }
359 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
360 ifindex) != 4);
361 PPC_LWZ_OFFS(r_A, r_scratch1,
362 offsetof(struct net_device, ifindex));
363 break;
364 case BPF_S_ANC_MARK:
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
366 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
367 mark));
368 break;
369 case BPF_S_ANC_RXHASH:
370 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
371 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
372 rxhash));
373 break;
374 case BPF_S_ANC_QUEUE:
375 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
376 queue_mapping) != 2);
377 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
378 queue_mapping));
379 break;
380 case BPF_S_ANC_CPU:
381#ifdef CONFIG_SMP
382 /*
383 * PACA ptr is r13:
384 * raw_smp_processor_id() = local_paca->paca_index
385 */
386 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
387 paca_index) != 2);
388 PPC_LHZ_OFFS(r_A, 13,
389 offsetof(struct paca_struct, paca_index));
390#else
391 PPC_LI(r_A, 0);
392#endif
393 break;
394
395 /*** Absolute loads from packet header/data ***/
396 case BPF_S_LD_W_ABS:
397 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
398 goto common_load;
399 case BPF_S_LD_H_ABS:
400 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
401 goto common_load;
402 case BPF_S_LD_B_ABS:
403 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
404 common_load:
405 /* Load from [K]. */
406 ctx->seen |= SEEN_DATAREF;
407 PPC_LI64(r_scratch1, func);
408 PPC_MTLR(r_scratch1);
409 PPC_LI32(r_addr, K);
410 PPC_BLRL();
411 /*
412 * Helper returns 'lt' condition on error, and an
413 * appropriate return value in r3
414 */
415 PPC_BCC(COND_LT, exit_addr);
416 break;
417
418 /*** Indirect loads from packet header/data ***/
419 case BPF_S_LD_W_IND:
420 func = sk_load_word;
421 goto common_load_ind;
422 case BPF_S_LD_H_IND:
423 func = sk_load_half;
424 goto common_load_ind;
425 case BPF_S_LD_B_IND:
426 func = sk_load_byte;
427 common_load_ind:
428 /*
429 * Load from [X + K]. Negative offsets are tested for
430 * in the helper functions.
431 */
432 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
433 PPC_LI64(r_scratch1, func);
434 PPC_MTLR(r_scratch1);
435 PPC_ADDI(r_addr, r_X, IMM_L(K));
436 if (K >= 32768)
437 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
438 PPC_BLRL();
439 /* If error, cr0.LT set */
440 PPC_BCC(COND_LT, exit_addr);
441 break;
442
443 case BPF_S_LDX_B_MSH:
444 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
445 goto common_load;
446 break;
447
448 /*** Jump and branches ***/
449 case BPF_S_JMP_JA:
450 if (K != 0)
451 PPC_JMP(addrs[i + 1 + K]);
452 break;
453
454 case BPF_S_JMP_JGT_K:
455 case BPF_S_JMP_JGT_X:
456 true_cond = COND_GT;
457 goto cond_branch;
458 case BPF_S_JMP_JGE_K:
459 case BPF_S_JMP_JGE_X:
460 true_cond = COND_GE;
461 goto cond_branch;
462 case BPF_S_JMP_JEQ_K:
463 case BPF_S_JMP_JEQ_X:
464 true_cond = COND_EQ;
465 goto cond_branch;
466 case BPF_S_JMP_JSET_K:
467 case BPF_S_JMP_JSET_X:
468 true_cond = COND_NE;
469 /* Fall through */
470 cond_branch:
471 /* same targets, can avoid doing the test :) */
472 if (filter[i].jt == filter[i].jf) {
473 if (filter[i].jt > 0)
474 PPC_JMP(addrs[i + 1 + filter[i].jt]);
475 break;
476 }
477
478 switch (filter[i].code) {
479 case BPF_S_JMP_JGT_X:
480 case BPF_S_JMP_JGE_X:
481 case BPF_S_JMP_JEQ_X:
482 ctx->seen |= SEEN_XREG;
483 PPC_CMPLW(r_A, r_X);
484 break;
485 case BPF_S_JMP_JSET_X:
486 ctx->seen |= SEEN_XREG;
487 PPC_AND_DOT(r_scratch1, r_A, r_X);
488 break;
489 case BPF_S_JMP_JEQ_K:
490 case BPF_S_JMP_JGT_K:
491 case BPF_S_JMP_JGE_K:
492 if (K < 32768)
493 PPC_CMPLWI(r_A, K);
494 else {
495 PPC_LI32(r_scratch1, K);
496 PPC_CMPLW(r_A, r_scratch1);
497 }
498 break;
499 case BPF_S_JMP_JSET_K:
500 if (K < 32768)
501 /* PPC_ANDI is /only/ dot-form */
502 PPC_ANDI(r_scratch1, r_A, K);
503 else {
504 PPC_LI32(r_scratch1, K);
505 PPC_AND_DOT(r_scratch1, r_A,
506 r_scratch1);
507 }
508 break;
509 }
510 /* Sometimes branches are constructed "backward", with
511 * the false path being the branch and true path being
512 * a fallthrough to the next instruction.
513 */
514 if (filter[i].jt == 0)
515 /* Swap the sense of the branch */
516 PPC_BCC(true_cond ^ COND_CMP_TRUE,
517 addrs[i + 1 + filter[i].jf]);
518 else {
519 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
520 if (filter[i].jf != 0)
521 PPC_JMP(addrs[i + 1 + filter[i].jf]);
522 }
523 break;
524 default:
525 /* The filter contains something cruel & unusual.
526 * We don't handle it, but also there shouldn't be
527 * anything missing from our list.
528 */
529 if (printk_ratelimit())
530 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
531 filter[i].code, i);
532 return -ENOTSUPP;
533 }
534
535 }
536 /* Set end-of-body-code address for exit. */
537 addrs[i] = ctx->idx * 4;
538
539 return 0;
540}
541
542void bpf_jit_compile(struct sk_filter *fp)
543{
544 unsigned int proglen;
545 unsigned int alloclen;
546 u32 *image = NULL;
547 u32 *code_base;
548 unsigned int *addrs;
549 struct codegen_context cgctx;
550 int pass;
551 int flen = fp->len;
552
553 if (!bpf_jit_enable)
554 return;
555
556 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
557 if (addrs == NULL)
558 return;
559
560 /*
561 * There are multiple assembly passes as the generated code will change
562 * size as it settles down, figuring out the max branch offsets/exit
563 * paths required.
564 *
565 * The range of standard conditional branches is +/- 32Kbytes. Since
566 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
567 * finish with 8 bytes/instruction. Not feasible, so long jumps are
568 * used, distinct from short branches.
569 *
570 * Current:
571 *
572 * For now, both branch types assemble to 2 words (short branches padded
573 * with a NOP); this is less efficient, but assembly will always complete
574 * after exactly 3 passes:
575 *
576 * First pass: No code buffer; Program is "faux-generated" -- no code
577 * emitted but maximum size of output determined (and addrs[] filled
578 * in). Also, we note whether we use M[], whether we use skb data, etc.
579 * All generation choices assumed to be 'worst-case', e.g. branches all
580 * far (2 instructions), return path code reduction not available, etc.
581 *
582 * Second pass: Code buffer allocated with size determined previously.
583 * Prologue generated to support features we have seen used. Exit paths
584 * determined and addrs[] is filled in again, as code may be slightly
585 * smaller as a result.
586 *
587 * Third pass: Code generated 'for real', and branch destinations
588 * determined from now-accurate addrs[] map.
589 *
590 * Ideal:
591 *
592 * If we optimise this, near branches will be shorter. On the
593 * first assembly pass, we should err on the side of caution and
594 * generate the biggest code. On subsequent passes, branches will be
595 * generated short or long and code size will reduce. With smaller
596 * code, more branches may fall into the short category, and code will
597 * reduce more.
598 *
599 * Finally, if we see one pass generate code the same size as the
600 * previous pass we have converged and should now generate code for
601 * real. Allocating at the end will also save the memory that would
602 * otherwise be wasted by the (small) current code shrinkage.
603 * Preferably, we should do a small number of passes (e.g. 5) and if we
604 * haven't converged by then, get impatient and force code to generate
605 * as-is, even if the odd branch would be left long. The chances of a
606 * long jump are tiny with all but the most enormous of BPF filter
607 * inputs, so we should usually converge on the third pass.
608 */
609
610 cgctx.idx = 0;
611 cgctx.seen = 0;
612 cgctx.pc_ret0 = -1;
613 /* Scouting faux-generate pass 0 */
614 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
615 /* We hit something illegal or unsupported. */
616 goto out;
617
618 /*
619 * Pretend to build prologue, given the features we've seen. This will
620 * update ctgtx.idx as it pretends to output instructions, then we can
621 * calculate total size from idx.
622 */
623 bpf_jit_build_prologue(fp, 0, &cgctx);
624 bpf_jit_build_epilogue(0, &cgctx);
625
626 proglen = cgctx.idx * 4;
627 alloclen = proglen + FUNCTION_DESCR_SIZE;
628 image = module_alloc(max_t(unsigned int, alloclen,
629 sizeof(struct work_struct)));
630 if (!image)
631 goto out;
632
633 code_base = image + (FUNCTION_DESCR_SIZE/4);
634
635 /* Code generation passes 1-2 */
636 for (pass = 1; pass < 3; pass++) {
637 /* Now build the prologue, body code & epilogue for real. */
638 cgctx.idx = 0;
639 bpf_jit_build_prologue(fp, code_base, &cgctx);
640 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
641 bpf_jit_build_epilogue(code_base, &cgctx);
642
643 if (bpf_jit_enable > 1)
644 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
645 proglen - (cgctx.idx * 4), cgctx.seen);
646 }
647
648 if (bpf_jit_enable > 1)
649 pr_info("flen=%d proglen=%u pass=%d image=%p\n",
650 flen, proglen, pass, image);
651
652 if (image) {
653 if (bpf_jit_enable > 1)
654 print_hex_dump(KERN_ERR, "JIT code: ",
655 DUMP_PREFIX_ADDRESS,
656 16, 1, code_base,
657 proglen, false);
658
659 bpf_flush_icache(code_base, code_base + (proglen/4));
660 /* Function descriptor nastiness: Address + TOC */
661 ((u64 *)image)[0] = (u64)code_base;
662 ((u64 *)image)[1] = local_paca->kernel_toc;
663 fp->bpf_func = (void *)image;
664 }
665out:
666 kfree(addrs);
667 return;
668}
669
670static void jit_free_defer(struct work_struct *arg)
671{
672 module_free(NULL, arg);
673}
674
675/* run from softirq, we must use a work_struct to call
676 * module_free() from process context
677 */
678void bpf_jit_free(struct sk_filter *fp)
679{
680 if (fp->bpf_func != sk_run_filter) {
681 struct work_struct *work = (struct work_struct *)fp->bpf_func;
682
683 INIT_WORK(work, jit_free_defer);
684 schedule_work(work);
685 }
686}