Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * eBPF JIT compiler
  4 *
  5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6 *		  IBM Corporation
  7 *
  8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
 
 
 
 
 
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <asm/asm-compat.h>
 13#include <linux/netdevice.h>
 14#include <linux/filter.h>
 15#include <linux/if_vlan.h>
 16#include <asm/kprobes.h>
 17#include <linux/bpf.h>
 18
 19#include "bpf_jit.h"
 20
 21static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 
 
 22{
 23	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 
 24}
 25
 26/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
 27static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
 28				   struct codegen_context *ctx, u32 *addrs)
 29{
 30	const struct bpf_insn *insn = fp->insnsi;
 31	bool func_addr_fixed;
 32	u64 func_addr;
 33	u32 tmp_idx;
 34	int i, j, ret;
 35
 36	for (i = 0; i < fp->len; i++) {
 37		/*
 38		 * During the extra pass, only the branch target addresses for
 39		 * the subprog calls need to be fixed. All other instructions
 40		 * can left untouched.
 41		 *
 42		 * The JITed image length does not change because we already
 43		 * ensure that the JITed instruction sequence for these calls
 44		 * are of fixed length by padding them with NOPs.
 45		 */
 46		if (insn[i].code == (BPF_JMP | BPF_CALL) &&
 47		    insn[i].src_reg == BPF_PSEUDO_CALL) {
 48			ret = bpf_jit_get_func_addr(fp, &insn[i], true,
 49						    &func_addr,
 50						    &func_addr_fixed);
 51			if (ret < 0)
 52				return ret;
 53
 54			/*
 55			 * Save ctx->idx as this would currently point to the
 56			 * end of the JITed image and set it to the offset of
 57			 * the instruction sequence corresponding to the
 58			 * subprog call temporarily.
 59			 */
 60			tmp_idx = ctx->idx;
 61			ctx->idx = addrs[i] / 4;
 62			ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
 63			if (ret)
 64				return ret;
 65
 
 
 
 
 
 
 
 
 
 
 
 
 66			/*
 67			 * Restore ctx->idx here. This is safe as the length
 68			 * of the JITed sequence remains unchanged.
 69			 */
 70			ctx->idx = tmp_idx;
 71		} else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
 72			tmp_idx = ctx->idx;
 73			ctx->idx = addrs[i] / 4;
 74#ifdef CONFIG_PPC32
 75			PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
 76			PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
 77			for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
 78				EMIT(PPC_RAW_NOP());
 79#else
 80			func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
 81			PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
 82			/* overwrite rest with nops */
 83			for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
 84				EMIT(PPC_RAW_NOP());
 85#endif
 86			ctx->idx = tmp_idx;
 87			i++;
 88		}
 
 
 89	}
 90
 91	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92}
 93
 94int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 95{
 96	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
 97		PPC_JMP(exit_addr);
 98	} else if (ctx->alt_exit_addr) {
 99		if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
100			return -1;
101		PPC_JMP(ctx->alt_exit_addr);
102	} else {
103		ctx->alt_exit_addr = ctx->idx * 4;
104		bpf_jit_build_epilogue(image, ctx);
 
 
 
 
 
 
 
 
105	}
 
106
107	return 0;
108}
109
110struct powerpc64_jit_data {
111	struct bpf_binary_header *header;
112	u32 *addrs;
113	u8 *image;
114	u32 proglen;
115	struct codegen_context ctx;
116};
117
118bool bpf_jit_needs_zext(void)
 
 
 
119{
120	return true;
121}
 
 
 
122
123struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
124{
125	u32 proglen;
126	u32 alloclen;
127	u8 *image = NULL;
128	u32 *code_base;
129	u32 *addrs;
130	struct powerpc64_jit_data *jit_data;
131	struct codegen_context cgctx;
132	int pass;
133	int flen;
134	struct bpf_binary_header *bpf_hdr;
135	struct bpf_prog *org_fp = fp;
136	struct bpf_prog *tmp_fp;
137	bool bpf_blinded = false;
138	bool extra_pass = false;
139	u32 extable_len;
140	u32 fixup_len;
141
142	if (!fp->jit_requested)
143		return org_fp;
144
145	tmp_fp = bpf_jit_blind_constants(org_fp);
146	if (IS_ERR(tmp_fp))
147		return org_fp;
148
149	if (tmp_fp != org_fp) {
150		bpf_blinded = true;
151		fp = tmp_fp;
152	}
153
154	jit_data = fp->aux->jit_data;
155	if (!jit_data) {
156		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
157		if (!jit_data) {
158			fp = org_fp;
159			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160		}
161		fp->aux->jit_data = jit_data;
162	}
163
164	flen = fp->len;
165	addrs = jit_data->addrs;
166	if (addrs) {
167		cgctx = jit_data->ctx;
168		image = jit_data->image;
169		bpf_hdr = jit_data->header;
170		proglen = jit_data->proglen;
171		extra_pass = true;
172		goto skip_init_ctx;
173	}
 
 
174
175	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
176	if (addrs == NULL) {
177		fp = org_fp;
178		goto out_addrs;
179	}
180
181	memset(&cgctx, 0, sizeof(struct codegen_context));
182	bpf_jit_init_reg_mapping(&cgctx);
 
 
 
 
 
 
 
 
183
184	/* Make sure that the stack is quadword aligned. */
185	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
186
187	/* Scouting faux-generate pass 0 */
188	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
189		/* We hit something illegal or unsupported. */
190		fp = org_fp;
191		goto out_addrs;
192	}
193
194	/*
195	 * If we have seen a tail call, we need a second pass.
196	 * This is because bpf_jit_emit_common_epilogue() is called
197	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
198	 * We also need a second pass if we ended up with too large
199	 * a program so as to ensure BPF_EXIT branches are in range.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200	 */
201	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
202		cgctx.idx = 0;
203		if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
204			fp = org_fp;
205			goto out_addrs;
206		}
207	}
208
209	bpf_jit_realloc_regs(&cgctx);
 
 
 
 
 
 
 
210	/*
211	 * Pretend to build prologue, given the features we've seen.  This will
212	 * update ctgtx.idx as it pretends to output instructions, then we can
213	 * calculate total size from idx.
214	 */
215	bpf_jit_build_prologue(0, &cgctx);
216	addrs[fp->len] = cgctx.idx * 4;
217	bpf_jit_build_epilogue(0, &cgctx);
218
219	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
220	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
221
222	proglen = cgctx.idx * 4;
223	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
224
225	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
226	if (!bpf_hdr) {
227		fp = org_fp;
228		goto out_addrs;
229	}
230
231	if (extable_len)
232		fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
233
234skip_init_ctx:
235	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
236
237	if (extra_pass) {
238		/*
239		 * Do not touch the prologue and epilogue as they will remain
240		 * unchanged. Only fix the branch target address for subprog
241		 * calls in the body, and ldimm64 instructions.
242		 *
243		 * This does not change the offsets and lengths of the subprog
244		 * call instruction sequences and hence, the size of the JITed
245		 * image as well.
246		 */
247		bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
248
249		/* There is no need to perform the usual passes. */
250		goto skip_codegen_passes;
251	}
252
253	/* Code generation passes 1-2 */
254	for (pass = 1; pass < 3; pass++) {
255		/* Now build the prologue, body code & epilogue for real. */
256		cgctx.idx = 0;
257		cgctx.alt_exit_addr = 0;
258		bpf_jit_build_prologue(code_base, &cgctx);
259		if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
260			bpf_jit_binary_free(bpf_hdr);
261			fp = org_fp;
262			goto out_addrs;
263		}
264		bpf_jit_build_epilogue(code_base, &cgctx);
265
266		if (bpf_jit_enable > 1)
267			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
268				proglen - (cgctx.idx * 4), cgctx.seen);
269	}
270
271skip_codegen_passes:
272	if (bpf_jit_enable > 1)
273		/*
274		 * Note that we output the base address of the code_base
275		 * rather than image, since opcodes are in code_base.
276		 */
277		bpf_jit_dump(flen, proglen, pass, code_base);
278
279#ifdef CONFIG_PPC64_ELF_ABI_V1
280	/* Function descriptor nastiness: Address + TOC */
281	((u64 *)image)[0] = (u64)code_base;
282	((u64 *)image)[1] = local_paca->kernel_toc;
283#endif
284
285	fp->bpf_func = (void *)image;
286	fp->jited = 1;
287	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
288
289	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
290	if (!fp->is_func || extra_pass) {
291		bpf_jit_binary_lock_ro(bpf_hdr);
292		bpf_prog_fill_jited_linfo(fp, addrs);
293out_addrs:
294		kfree(addrs);
295		kfree(jit_data);
296		fp->aux->jit_data = NULL;
297	} else {
298		jit_data->addrs = addrs;
299		jit_data->ctx = cgctx;
300		jit_data->proglen = proglen;
301		jit_data->image = image;
302		jit_data->header = bpf_hdr;
303	}
304
305out:
306	if (bpf_blinded)
307		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
308
309	return fp;
310}
311
312/*
313 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
314 * this function, as this only applies to BPF_PROBE_MEM, for now.
315 */
316int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
317			  int insn_idx, int jmp_off, int dst_reg)
318{
319	off_t offset;
320	unsigned long pc;
321	struct exception_table_entry *ex;
322	u32 *fixup;
323
324	/* Populate extable entries only in the last pass */
325	if (pass != 2)
326		return 0;
327
328	if (!fp->aux->extable ||
329	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
330		return -EINVAL;
331
332	pc = (unsigned long)&image[insn_idx];
333
334	fixup = (void *)fp->aux->extable -
335		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
336		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
337
338	fixup[0] = PPC_RAW_LI(dst_reg, 0);
339	if (IS_ENABLED(CONFIG_PPC32))
340		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
341
342	fixup[BPF_FIXUP_LEN - 1] =
343		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
344
345	ex = &fp->aux->extable[ctx->exentry_idx];
346
347	offset = pc - (long)&ex->insn;
348	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
349		return -ERANGE;
350	ex->insn = offset;
351
352	offset = (long)fixup - (long)&ex->fixup;
353	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
354		return -ERANGE;
355	ex->fixup = offset;
356
357	ctx->exentry_idx++;
358	return 0;
359}
v3.15
  1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
 
 
  2 *
  3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
 
  4 *
  5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 */
 12#include <linux/moduleloader.h>
 13#include <asm/cacheflush.h>
 
 14#include <linux/netdevice.h>
 15#include <linux/filter.h>
 16#include <linux/if_vlan.h>
 
 
 17
 18#include "bpf_jit.h"
 19
 20int bpf_jit_enable __read_mostly;
 21
 22static inline void bpf_flush_icache(void *start, void *end)
 23{
 24	smp_wmb();
 25	flush_icache_range((unsigned long)start, (unsigned long)end);
 26}
 27
 28static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
 29				   struct codegen_context *ctx)
 
 30{
 31	int i;
 32	const struct sock_filter *filter = fp->insns;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
 35		/* Make stackframe */
 36		if (ctx->seen & SEEN_DATAREF) {
 37			/* If we call any helpers (for loads), save LR */
 38			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
 39			PPC_STD(0, 1, 16);
 40
 41			/* Back up non-volatile regs. */
 42			PPC_STD(r_D, 1, -(8*(32-r_D)));
 43			PPC_STD(r_HL, 1, -(8*(32-r_HL)));
 44		}
 45		if (ctx->seen & SEEN_MEM) {
 46			/*
 47			 * Conditionally save regs r15-r31 as some will be used
 48			 * for M[] data.
 49			 */
 50			for (i = r_M; i < (r_M+16); i++) {
 51				if (ctx->seen & (1 << (i-r_M)))
 52					PPC_STD(i, 1, -(8*(32-i)));
 53			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54		}
 55		EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
 56		     (-BPF_PPC_STACKFRAME & 0xfffc));
 57	}
 58
 59	if (ctx->seen & SEEN_DATAREF) {
 60		/*
 61		 * If this filter needs to access skb data,
 62		 * prepare r_D and r_HL:
 63		 *  r_HL = skb->len - skb->data_len
 64		 *  r_D	 = skb->data
 65		 */
 66		PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
 67							 data_len));
 68		PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
 69		PPC_SUB(r_HL, r_HL, r_scratch1);
 70		PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
 71	}
 72
 73	if (ctx->seen & SEEN_XREG) {
 74		/*
 75		 * TODO: Could also detect whether first instr. sets X and
 76		 * avoid this (as below, with A).
 77		 */
 78		PPC_LI(r_X, 0);
 79	}
 80
 81	switch (filter[0].code) {
 82	case BPF_S_RET_K:
 83	case BPF_S_LD_W_LEN:
 84	case BPF_S_ANC_PROTOCOL:
 85	case BPF_S_ANC_IFINDEX:
 86	case BPF_S_ANC_MARK:
 87	case BPF_S_ANC_RXHASH:
 88	case BPF_S_ANC_VLAN_TAG:
 89	case BPF_S_ANC_VLAN_TAG_PRESENT:
 90	case BPF_S_ANC_CPU:
 91	case BPF_S_ANC_QUEUE:
 92	case BPF_S_LD_W_ABS:
 93	case BPF_S_LD_H_ABS:
 94	case BPF_S_LD_B_ABS:
 95		/* first instruction sets A register (or is RET 'constant') */
 96		break;
 97	default:
 98		/* make sure we dont leak kernel information to user */
 99		PPC_LI(r_A, 0);
100	}
101}
102
103static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
104{
105	int i;
106
107	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
108		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
109		if (ctx->seen & SEEN_DATAREF) {
110			PPC_LD(0, 1, 16);
111			PPC_MTLR(0);
112			PPC_LD(r_D, 1, -(8*(32-r_D)));
113			PPC_LD(r_HL, 1, -(8*(32-r_HL)));
114		}
115		if (ctx->seen & SEEN_MEM) {
116			/* Restore any saved non-vol registers */
117			for (i = r_M; i < (r_M+16); i++) {
118				if (ctx->seen & (1 << (i-r_M)))
119					PPC_LD(i, 1, -(8*(32-i)));
120			}
121		}
122	}
123	/* The RETs have left a return value in R3. */
124
125	PPC_BLR();
126}
127
128#define CHOOSE_LOAD_FUNC(K, func) \
129	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 
 
 
 
130
131/* Assemble the body code between the prologue & epilogue. */
132static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
133			      struct codegen_context *ctx,
134			      unsigned int *addrs)
135{
136	const struct sock_filter *filter = fp->insns;
137	int flen = fp->len;
138	u8 *func;
139	unsigned int true_cond;
140	int i;
141
142	/* Start of epilogue code */
143	unsigned int exit_addr = addrs[flen];
144
145	for (i = 0; i < flen; i++) {
146		unsigned int K = filter[i].k;
147
148		/*
149		 * addrs[] maps a BPF bytecode address into a real offset from
150		 * the start of the body code.
151		 */
152		addrs[i] = ctx->idx * 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154		switch (filter[i].code) {
155			/*** ALU ops ***/
156		case BPF_S_ALU_ADD_X: /* A += X; */
157			ctx->seen |= SEEN_XREG;
158			PPC_ADD(r_A, r_A, r_X);
159			break;
160		case BPF_S_ALU_ADD_K: /* A += K; */
161			if (!K)
162				break;
163			PPC_ADDI(r_A, r_A, IMM_L(K));
164			if (K >= 32768)
165				PPC_ADDIS(r_A, r_A, IMM_HA(K));
166			break;
167		case BPF_S_ALU_SUB_X: /* A -= X; */
168			ctx->seen |= SEEN_XREG;
169			PPC_SUB(r_A, r_A, r_X);
170			break;
171		case BPF_S_ALU_SUB_K: /* A -= K */
172			if (!K)
173				break;
174			PPC_ADDI(r_A, r_A, IMM_L(-K));
175			if (K >= 32768)
176				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
177			break;
178		case BPF_S_ALU_MUL_X: /* A *= X; */
179			ctx->seen |= SEEN_XREG;
180			PPC_MUL(r_A, r_A, r_X);
181			break;
182		case BPF_S_ALU_MUL_K: /* A *= K */
183			if (K < 32768)
184				PPC_MULI(r_A, r_A, K);
185			else {
186				PPC_LI32(r_scratch1, K);
187				PPC_MUL(r_A, r_A, r_scratch1);
188			}
189			break;
190		case BPF_S_ALU_MOD_X: /* A %= X; */
191			ctx->seen |= SEEN_XREG;
192			PPC_CMPWI(r_X, 0);
193			if (ctx->pc_ret0 != -1) {
194				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
195			} else {
196				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
197				PPC_LI(r_ret, 0);
198				PPC_JMP(exit_addr);
199			}
200			PPC_DIVWU(r_scratch1, r_A, r_X);
201			PPC_MUL(r_scratch1, r_X, r_scratch1);
202			PPC_SUB(r_A, r_A, r_scratch1);
203			break;
204		case BPF_S_ALU_MOD_K: /* A %= K; */
205			PPC_LI32(r_scratch2, K);
206			PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207			PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208			PPC_SUB(r_A, r_A, r_scratch1);
209			break;
210		case BPF_S_ALU_DIV_X: /* A /= X; */
211			ctx->seen |= SEEN_XREG;
212			PPC_CMPWI(r_X, 0);
213			if (ctx->pc_ret0 != -1) {
214				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
215			} else {
216				/*
217				 * Exit, returning 0; first pass hits here
218				 * (longer worst-case code size).
219				 */
220				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
221				PPC_LI(r_ret, 0);
222				PPC_JMP(exit_addr);
223			}
224			PPC_DIVWU(r_A, r_A, r_X);
225			break;
226		case BPF_S_ALU_DIV_K: /* A /= K */
227			if (K == 1)
228				break;
229			PPC_LI32(r_scratch1, K);
230			PPC_DIVWU(r_A, r_A, r_scratch1);
231			break;
232		case BPF_S_ALU_AND_X:
233			ctx->seen |= SEEN_XREG;
234			PPC_AND(r_A, r_A, r_X);
235			break;
236		case BPF_S_ALU_AND_K:
237			if (!IMM_H(K))
238				PPC_ANDI(r_A, r_A, K);
239			else {
240				PPC_LI32(r_scratch1, K);
241				PPC_AND(r_A, r_A, r_scratch1);
242			}
243			break;
244		case BPF_S_ALU_OR_X:
245			ctx->seen |= SEEN_XREG;
246			PPC_OR(r_A, r_A, r_X);
247			break;
248		case BPF_S_ALU_OR_K:
249			if (IMM_L(K))
250				PPC_ORI(r_A, r_A, IMM_L(K));
251			if (K >= 65536)
252				PPC_ORIS(r_A, r_A, IMM_H(K));
253			break;
254		case BPF_S_ANC_ALU_XOR_X:
255		case BPF_S_ALU_XOR_X: /* A ^= X */
256			ctx->seen |= SEEN_XREG;
257			PPC_XOR(r_A, r_A, r_X);
258			break;
259		case BPF_S_ALU_XOR_K: /* A ^= K */
260			if (IMM_L(K))
261				PPC_XORI(r_A, r_A, IMM_L(K));
262			if (K >= 65536)
263				PPC_XORIS(r_A, r_A, IMM_H(K));
264			break;
265		case BPF_S_ALU_LSH_X: /* A <<= X; */
266			ctx->seen |= SEEN_XREG;
267			PPC_SLW(r_A, r_A, r_X);
268			break;
269		case BPF_S_ALU_LSH_K:
270			if (K == 0)
271				break;
272			else
273				PPC_SLWI(r_A, r_A, K);
274			break;
275		case BPF_S_ALU_RSH_X: /* A >>= X; */
276			ctx->seen |= SEEN_XREG;
277			PPC_SRW(r_A, r_A, r_X);
278			break;
279		case BPF_S_ALU_RSH_K: /* A >>= K; */
280			if (K == 0)
281				break;
282			else
283				PPC_SRWI(r_A, r_A, K);
284			break;
285		case BPF_S_ALU_NEG:
286			PPC_NEG(r_A, r_A);
287			break;
288		case BPF_S_RET_K:
289			PPC_LI32(r_ret, K);
290			if (!K) {
291				if (ctx->pc_ret0 == -1)
292					ctx->pc_ret0 = i;
293			}
294			/*
295			 * If this isn't the very last instruction, branch to
296			 * the epilogue if we've stuff to clean up.  Otherwise,
297			 * if there's nothing to tidy, just return.  If we /are/
298			 * the last instruction, we're about to fall through to
299			 * the epilogue to return.
300			 */
301			if (i != flen - 1) {
302				/*
303				 * Note: 'seen' is properly valid only on pass
304				 * #2.	Both parts of this conditional are the
305				 * same instruction size though, meaning the
306				 * first pass will still correctly determine the
307				 * code size/addresses.
308				 */
309				if (ctx->seen)
310					PPC_JMP(exit_addr);
311				else
312					PPC_BLR();
313			}
314			break;
315		case BPF_S_RET_A:
316			PPC_MR(r_ret, r_A);
317			if (i != flen - 1) {
318				if (ctx->seen)
319					PPC_JMP(exit_addr);
320				else
321					PPC_BLR();
322			}
323			break;
324		case BPF_S_MISC_TAX: /* X = A */
325			PPC_MR(r_X, r_A);
326			break;
327		case BPF_S_MISC_TXA: /* A = X */
328			ctx->seen |= SEEN_XREG;
329			PPC_MR(r_A, r_X);
330			break;
331
332			/*** Constant loads/M[] access ***/
333		case BPF_S_LD_IMM: /* A = K */
334			PPC_LI32(r_A, K);
335			break;
336		case BPF_S_LDX_IMM: /* X = K */
337			PPC_LI32(r_X, K);
338			break;
339		case BPF_S_LD_MEM: /* A = mem[K] */
340			PPC_MR(r_A, r_M + (K & 0xf));
341			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
342			break;
343		case BPF_S_LDX_MEM: /* X = mem[K] */
344			PPC_MR(r_X, r_M + (K & 0xf));
345			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
346			break;
347		case BPF_S_ST: /* mem[K] = A */
348			PPC_MR(r_M + (K & 0xf), r_A);
349			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
350			break;
351		case BPF_S_STX: /* mem[K] = X */
352			PPC_MR(r_M + (K & 0xf), r_X);
353			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
354			break;
355		case BPF_S_LD_W_LEN: /*	A = skb->len; */
356			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
357			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
358			break;
359		case BPF_S_LDX_W_LEN: /* X = skb->len; */
360			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
361			break;
362
363			/*** Ancillary info loads ***/
364		case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
365			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
366						  protocol) != 2);
367			PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
368							    protocol));
369			break;
370		case BPF_S_ANC_IFINDEX:
371			PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
372								dev));
373			PPC_CMPDI(r_scratch1, 0);
374			if (ctx->pc_ret0 != -1) {
375				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
376			} else {
377				/* Exit, returning 0; first pass hits here. */
378				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
379				PPC_LI(r_ret, 0);
380				PPC_JMP(exit_addr);
381			}
382			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
383						  ifindex) != 4);
384			PPC_LWZ_OFFS(r_A, r_scratch1,
385				     offsetof(struct net_device, ifindex));
386			break;
387		case BPF_S_ANC_MARK:
388			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
389			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
390							  mark));
391			break;
392		case BPF_S_ANC_RXHASH:
393			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
394			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395							  hash));
396			break;
397		case BPF_S_ANC_VLAN_TAG:
398		case BPF_S_ANC_VLAN_TAG_PRESENT:
399			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
400			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401							  vlan_tci));
402			if (filter[i].code == BPF_S_ANC_VLAN_TAG)
403				PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
404			else
405				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
406			break;
407		case BPF_S_ANC_QUEUE:
408			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
409						  queue_mapping) != 2);
410			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
411							  queue_mapping));
412			break;
413		case BPF_S_ANC_CPU:
414#ifdef CONFIG_SMP
415			/*
416			 * PACA ptr is r13:
417			 * raw_smp_processor_id() = local_paca->paca_index
418			 */
419			BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
420						  paca_index) != 2);
421			PPC_LHZ_OFFS(r_A, 13,
422				     offsetof(struct paca_struct, paca_index));
423#else
424			PPC_LI(r_A, 0);
425#endif
426			break;
427
428			/*** Absolute loads from packet header/data ***/
429		case BPF_S_LD_W_ABS:
430			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
431			goto common_load;
432		case BPF_S_LD_H_ABS:
433			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
434			goto common_load;
435		case BPF_S_LD_B_ABS:
436			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
437		common_load:
438			/* Load from [K]. */
439			ctx->seen |= SEEN_DATAREF;
440			PPC_LI64(r_scratch1, func);
441			PPC_MTLR(r_scratch1);
442			PPC_LI32(r_addr, K);
443			PPC_BLRL();
444			/*
445			 * Helper returns 'lt' condition on error, and an
446			 * appropriate return value in r3
447			 */
448			PPC_BCC(COND_LT, exit_addr);
449			break;
450
451			/*** Indirect loads from packet header/data ***/
452		case BPF_S_LD_W_IND:
453			func = sk_load_word;
454			goto common_load_ind;
455		case BPF_S_LD_H_IND:
456			func = sk_load_half;
457			goto common_load_ind;
458		case BPF_S_LD_B_IND:
459			func = sk_load_byte;
460		common_load_ind:
461			/*
462			 * Load from [X + K].  Negative offsets are tested for
463			 * in the helper functions.
464			 */
465			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
466			PPC_LI64(r_scratch1, func);
467			PPC_MTLR(r_scratch1);
468			PPC_ADDI(r_addr, r_X, IMM_L(K));
469			if (K >= 32768)
470				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
471			PPC_BLRL();
472			/* If error, cr0.LT set */
473			PPC_BCC(COND_LT, exit_addr);
474			break;
475
476		case BPF_S_LDX_B_MSH:
477			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
478			goto common_load;
479			break;
480
481			/*** Jump and branches ***/
482		case BPF_S_JMP_JA:
483			if (K != 0)
484				PPC_JMP(addrs[i + 1 + K]);
485			break;
486
487		case BPF_S_JMP_JGT_K:
488		case BPF_S_JMP_JGT_X:
489			true_cond = COND_GT;
490			goto cond_branch;
491		case BPF_S_JMP_JGE_K:
492		case BPF_S_JMP_JGE_X:
493			true_cond = COND_GE;
494			goto cond_branch;
495		case BPF_S_JMP_JEQ_K:
496		case BPF_S_JMP_JEQ_X:
497			true_cond = COND_EQ;
498			goto cond_branch;
499		case BPF_S_JMP_JSET_K:
500		case BPF_S_JMP_JSET_X:
501			true_cond = COND_NE;
502			/* Fall through */
503		cond_branch:
504			/* same targets, can avoid doing the test :) */
505			if (filter[i].jt == filter[i].jf) {
506				if (filter[i].jt > 0)
507					PPC_JMP(addrs[i + 1 + filter[i].jt]);
508				break;
509			}
510
511			switch (filter[i].code) {
512			case BPF_S_JMP_JGT_X:
513			case BPF_S_JMP_JGE_X:
514			case BPF_S_JMP_JEQ_X:
515				ctx->seen |= SEEN_XREG;
516				PPC_CMPLW(r_A, r_X);
517				break;
518			case BPF_S_JMP_JSET_X:
519				ctx->seen |= SEEN_XREG;
520				PPC_AND_DOT(r_scratch1, r_A, r_X);
521				break;
522			case BPF_S_JMP_JEQ_K:
523			case BPF_S_JMP_JGT_K:
524			case BPF_S_JMP_JGE_K:
525				if (K < 32768)
526					PPC_CMPLWI(r_A, K);
527				else {
528					PPC_LI32(r_scratch1, K);
529					PPC_CMPLW(r_A, r_scratch1);
530				}
531				break;
532			case BPF_S_JMP_JSET_K:
533				if (K < 32768)
534					/* PPC_ANDI is /only/ dot-form */
535					PPC_ANDI(r_scratch1, r_A, K);
536				else {
537					PPC_LI32(r_scratch1, K);
538					PPC_AND_DOT(r_scratch1, r_A,
539						    r_scratch1);
540				}
541				break;
542			}
543			/* Sometimes branches are constructed "backward", with
544			 * the false path being the branch and true path being
545			 * a fallthrough to the next instruction.
546			 */
547			if (filter[i].jt == 0)
548				/* Swap the sense of the branch */
549				PPC_BCC(true_cond ^ COND_CMP_TRUE,
550					addrs[i + 1 + filter[i].jf]);
551			else {
552				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
553				if (filter[i].jf != 0)
554					PPC_JMP(addrs[i + 1 + filter[i].jf]);
555			}
556			break;
557		default:
558			/* The filter contains something cruel & unusual.
559			 * We don't handle it, but also there shouldn't be
560			 * anything missing from our list.
561			 */
562			if (printk_ratelimit())
563				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
564				       filter[i].code, i);
565			return -ENOTSUPP;
566		}
 
 
567
 
 
 
 
 
 
 
 
 
568	}
569	/* Set end-of-body-code address for exit. */
570	addrs[i] = ctx->idx * 4;
571
572	return 0;
573}
 
 
 
574
575void bpf_jit_compile(struct sk_filter *fp)
576{
577	unsigned int proglen;
578	unsigned int alloclen;
579	u32 *image = NULL;
580	u32 *code_base;
581	unsigned int *addrs;
582	struct codegen_context cgctx;
583	int pass;
584	int flen = fp->len;
585
586	if (!bpf_jit_enable)
587		return;
588
589	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
590	if (addrs == NULL)
591		return;
 
 
 
592
593	/*
594	 * There are multiple assembly passes as the generated code will change
595	 * size as it settles down, figuring out the max branch offsets/exit
596	 * paths required.
597	 *
598	 * The range of standard conditional branches is +/- 32Kbytes.	Since
599	 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
600	 * finish with 8 bytes/instruction.  Not feasible, so long jumps are
601	 * used, distinct from short branches.
602	 *
603	 * Current:
604	 *
605	 * For now, both branch types assemble to 2 words (short branches padded
606	 * with a NOP); this is less efficient, but assembly will always complete
607	 * after exactly 3 passes:
608	 *
609	 * First pass: No code buffer; Program is "faux-generated" -- no code
610	 * emitted but maximum size of output determined (and addrs[] filled
611	 * in).	 Also, we note whether we use M[], whether we use skb data, etc.
612	 * All generation choices assumed to be 'worst-case', e.g. branches all
613	 * far (2 instructions), return path code reduction not available, etc.
614	 *
615	 * Second pass: Code buffer allocated with size determined previously.
616	 * Prologue generated to support features we have seen used.  Exit paths
617	 * determined and addrs[] is filled in again, as code may be slightly
618	 * smaller as a result.
619	 *
620	 * Third pass: Code generated 'for real', and branch destinations
621	 * determined from now-accurate addrs[] map.
622	 *
623	 * Ideal:
624	 *
625	 * If we optimise this, near branches will be shorter.	On the
626	 * first assembly pass, we should err on the side of caution and
627	 * generate the biggest code.  On subsequent passes, branches will be
628	 * generated short or long and code size will reduce.  With smaller
629	 * code, more branches may fall into the short category, and code will
630	 * reduce more.
631	 *
632	 * Finally, if we see one pass generate code the same size as the
633	 * previous pass we have converged and should now generate code for
634	 * real.  Allocating at the end will also save the memory that would
635	 * otherwise be wasted by the (small) current code shrinkage.
636	 * Preferably, we should do a small number of passes (e.g. 5) and if we
637	 * haven't converged by then, get impatient and force code to generate
638	 * as-is, even if the odd branch would be left long.  The chances of a
639	 * long jump are tiny with all but the most enormous of BPF filter
640	 * inputs, so we should usually converge on the third pass.
641	 */
 
 
 
 
 
 
 
642
643	cgctx.idx = 0;
644	cgctx.seen = 0;
645	cgctx.pc_ret0 = -1;
646	/* Scouting faux-generate pass 0 */
647	if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
648		/* We hit something illegal or unsupported. */
649		goto out;
650
651	/*
652	 * Pretend to build prologue, given the features we've seen.  This will
653	 * update ctgtx.idx as it pretends to output instructions, then we can
654	 * calculate total size from idx.
655	 */
656	bpf_jit_build_prologue(fp, 0, &cgctx);
 
657	bpf_jit_build_epilogue(0, &cgctx);
658
 
 
 
659	proglen = cgctx.idx * 4;
660	alloclen = proglen + FUNCTION_DESCR_SIZE;
661	image = module_alloc(alloclen);
662	if (!image)
663		goto out;
 
 
 
 
 
 
664
665	code_base = image + (FUNCTION_DESCR_SIZE/4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
667	/* Code generation passes 1-2 */
668	for (pass = 1; pass < 3; pass++) {
669		/* Now build the prologue, body code & epilogue for real. */
670		cgctx.idx = 0;
671		bpf_jit_build_prologue(fp, code_base, &cgctx);
672		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
 
 
 
 
 
673		bpf_jit_build_epilogue(code_base, &cgctx);
674
675		if (bpf_jit_enable > 1)
676			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
677				proglen - (cgctx.idx * 4), cgctx.seen);
678	}
679
 
680	if (bpf_jit_enable > 1)
681		/* Note that we output the base address of the code_base
 
682		 * rather than image, since opcodes are in code_base.
683		 */
684		bpf_jit_dump(flen, proglen, pass, code_base);
685
686	if (image) {
687		bpf_flush_icache(code_base, code_base + (proglen/4));
688		/* Function descriptor nastiness: Address + TOC */
689		((u64 *)image)[0] = (u64)code_base;
690		((u64 *)image)[1] = local_paca->kernel_toc;
691		fp->bpf_func = (void *)image;
692		fp->jited = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693	}
 
694out:
695	kfree(addrs);
696	return;
 
 
697}
698
699void bpf_jit_free(struct sk_filter *fp)
 
 
 
 
 
700{
701	if (fp->jited)
702		module_free(NULL, fp->bpf_func);
703	kfree(fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704}