Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * eBPF JIT compiler
  4 *
  5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6 *		  IBM Corporation
  7 *
  8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
 
 
 
 
 
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <asm/asm-compat.h>
 13#include <linux/netdevice.h>
 14#include <linux/filter.h>
 15#include <linux/if_vlan.h>
 16#include <asm/kprobes.h>
 17#include <linux/bpf.h>
 18
 19#include "bpf_jit.h"
 20
 21static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 
 
 
 
 
 
 
 
 22{
 23	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 
 24}
 25
 26/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
 27static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
 28				   struct codegen_context *ctx, u32 *addrs)
 29{
 30	const struct bpf_insn *insn = fp->insnsi;
 31	bool func_addr_fixed;
 32	u64 func_addr;
 33	u32 tmp_idx;
 34	int i, j, ret;
 35
 36	for (i = 0; i < fp->len; i++) {
 37		/*
 38		 * During the extra pass, only the branch target addresses for
 39		 * the subprog calls need to be fixed. All other instructions
 40		 * can left untouched.
 41		 *
 42		 * The JITed image length does not change because we already
 43		 * ensure that the JITed instruction sequence for these calls
 44		 * are of fixed length by padding them with NOPs.
 45		 */
 46		if (insn[i].code == (BPF_JMP | BPF_CALL) &&
 47		    insn[i].src_reg == BPF_PSEUDO_CALL) {
 48			ret = bpf_jit_get_func_addr(fp, &insn[i], true,
 49						    &func_addr,
 50						    &func_addr_fixed);
 51			if (ret < 0)
 52				return ret;
 53
 54			/*
 55			 * Save ctx->idx as this would currently point to the
 56			 * end of the JITed image and set it to the offset of
 57			 * the instruction sequence corresponding to the
 58			 * subprog call temporarily.
 59			 */
 60			tmp_idx = ctx->idx;
 61			ctx->idx = addrs[i] / 4;
 62			ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
 63			if (ret)
 64				return ret;
 65
 
 
 
 
 
 
 
 
 
 
 
 
 66			/*
 67			 * Restore ctx->idx here. This is safe as the length
 68			 * of the JITed sequence remains unchanged.
 69			 */
 70			ctx->idx = tmp_idx;
 71		} else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
 72			tmp_idx = ctx->idx;
 73			ctx->idx = addrs[i] / 4;
 74#ifdef CONFIG_PPC32
 75			PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
 76			PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
 77			for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
 78				EMIT(PPC_RAW_NOP());
 79#else
 80			func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
 81			PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
 82			/* overwrite rest with nops */
 83			for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
 84				EMIT(PPC_RAW_NOP());
 85#endif
 86			ctx->idx = tmp_idx;
 87			i++;
 88		}
 
 
 89	}
 90
 91	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92}
 93
 94int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 95{
 96	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
 97		PPC_JMP(exit_addr);
 98	} else if (ctx->alt_exit_addr) {
 99		if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
100			return -1;
101		PPC_JMP(ctx->alt_exit_addr);
102	} else {
103		ctx->alt_exit_addr = ctx->idx * 4;
104		bpf_jit_build_epilogue(image, ctx);
 
 
 
 
 
 
 
 
105	}
 
106
107	return 0;
108}
109
110struct powerpc64_jit_data {
111	struct bpf_binary_header *header;
112	u32 *addrs;
113	u8 *image;
114	u32 proglen;
115	struct codegen_context ctx;
116};
117
118bool bpf_jit_needs_zext(void)
 
 
 
119{
120	return true;
121}
 
 
 
122
123struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
124{
125	u32 proglen;
126	u32 alloclen;
127	u8 *image = NULL;
128	u32 *code_base;
129	u32 *addrs;
130	struct powerpc64_jit_data *jit_data;
131	struct codegen_context cgctx;
132	int pass;
133	int flen;
134	struct bpf_binary_header *bpf_hdr;
135	struct bpf_prog *org_fp = fp;
136	struct bpf_prog *tmp_fp;
137	bool bpf_blinded = false;
138	bool extra_pass = false;
139	u32 extable_len;
140	u32 fixup_len;
141
142	if (!fp->jit_requested)
143		return org_fp;
144
145	tmp_fp = bpf_jit_blind_constants(org_fp);
146	if (IS_ERR(tmp_fp))
147		return org_fp;
148
149	if (tmp_fp != org_fp) {
150		bpf_blinded = true;
151		fp = tmp_fp;
152	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154	jit_data = fp->aux->jit_data;
155	if (!jit_data) {
156		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
157		if (!jit_data) {
158			fp = org_fp;
159			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160		}
161		fp->aux->jit_data = jit_data;
162	}
163
164	flen = fp->len;
165	addrs = jit_data->addrs;
166	if (addrs) {
167		cgctx = jit_data->ctx;
168		image = jit_data->image;
169		bpf_hdr = jit_data->header;
170		proglen = jit_data->proglen;
171		extra_pass = true;
172		goto skip_init_ctx;
173	}
 
 
174
175	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
176	if (addrs == NULL) {
177		fp = org_fp;
178		goto out_addrs;
179	}
180
181	memset(&cgctx, 0, sizeof(struct codegen_context));
182	bpf_jit_init_reg_mapping(&cgctx);
 
 
 
 
 
 
 
 
183
184	/* Make sure that the stack is quadword aligned. */
185	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
186
187	/* Scouting faux-generate pass 0 */
188	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
189		/* We hit something illegal or unsupported. */
190		fp = org_fp;
191		goto out_addrs;
192	}
193
194	/*
195	 * If we have seen a tail call, we need a second pass.
196	 * This is because bpf_jit_emit_common_epilogue() is called
197	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
198	 * We also need a second pass if we ended up with too large
199	 * a program so as to ensure BPF_EXIT branches are in range.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200	 */
201	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
202		cgctx.idx = 0;
203		if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
204			fp = org_fp;
205			goto out_addrs;
206		}
207	}
208
209	bpf_jit_realloc_regs(&cgctx);
 
 
 
 
 
 
 
210	/*
211	 * Pretend to build prologue, given the features we've seen.  This will
212	 * update ctgtx.idx as it pretends to output instructions, then we can
213	 * calculate total size from idx.
214	 */
215	bpf_jit_build_prologue(0, &cgctx);
216	addrs[fp->len] = cgctx.idx * 4;
217	bpf_jit_build_epilogue(0, &cgctx);
218
219	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
220	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
221
222	proglen = cgctx.idx * 4;
223	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
224
225	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
226	if (!bpf_hdr) {
227		fp = org_fp;
228		goto out_addrs;
229	}
230
231	if (extable_len)
232		fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
233
234skip_init_ctx:
235	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
236
237	if (extra_pass) {
238		/*
239		 * Do not touch the prologue and epilogue as they will remain
240		 * unchanged. Only fix the branch target address for subprog
241		 * calls in the body, and ldimm64 instructions.
242		 *
243		 * This does not change the offsets and lengths of the subprog
244		 * call instruction sequences and hence, the size of the JITed
245		 * image as well.
246		 */
247		bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
248
249		/* There is no need to perform the usual passes. */
250		goto skip_codegen_passes;
251	}
252
253	/* Code generation passes 1-2 */
254	for (pass = 1; pass < 3; pass++) {
255		/* Now build the prologue, body code & epilogue for real. */
256		cgctx.idx = 0;
257		cgctx.alt_exit_addr = 0;
258		bpf_jit_build_prologue(code_base, &cgctx);
259		if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
260			bpf_jit_binary_free(bpf_hdr);
261			fp = org_fp;
262			goto out_addrs;
263		}
264		bpf_jit_build_epilogue(code_base, &cgctx);
265
266		if (bpf_jit_enable > 1)
267			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
268				proglen - (cgctx.idx * 4), cgctx.seen);
269	}
270
271skip_codegen_passes:
272	if (bpf_jit_enable > 1)
273		/*
274		 * Note that we output the base address of the code_base
275		 * rather than image, since opcodes are in code_base.
276		 */
277		bpf_jit_dump(flen, proglen, pass, code_base);
278
279#ifdef CONFIG_PPC64_ELF_ABI_V1
280	/* Function descriptor nastiness: Address + TOC */
281	((u64 *)image)[0] = (u64)code_base;
282	((u64 *)image)[1] = local_paca->kernel_toc;
283#endif
284
285	fp->bpf_func = (void *)image;
286	fp->jited = 1;
287	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
288
289	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
290	if (!fp->is_func || extra_pass) {
291		bpf_jit_binary_lock_ro(bpf_hdr);
292		bpf_prog_fill_jited_linfo(fp, addrs);
293out_addrs:
294		kfree(addrs);
295		kfree(jit_data);
296		fp->aux->jit_data = NULL;
297	} else {
298		jit_data->addrs = addrs;
299		jit_data->ctx = cgctx;
300		jit_data->proglen = proglen;
301		jit_data->image = image;
302		jit_data->header = bpf_hdr;
303	}
304
305out:
306	if (bpf_blinded)
307		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
 
308
309	return fp;
 
 
310}
311
312/*
313 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
314 * this function, as this only applies to BPF_PROBE_MEM, for now.
315 */
316int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
317			  int insn_idx, int jmp_off, int dst_reg)
318{
319	off_t offset;
320	unsigned long pc;
321	struct exception_table_entry *ex;
322	u32 *fixup;
323
324	/* Populate extable entries only in the last pass */
325	if (pass != 2)
326		return 0;
327
328	if (!fp->aux->extable ||
329	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
330		return -EINVAL;
331
332	pc = (unsigned long)&image[insn_idx];
333
334	fixup = (void *)fp->aux->extable -
335		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
336		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
337
338	fixup[0] = PPC_RAW_LI(dst_reg, 0);
339	if (IS_ENABLED(CONFIG_PPC32))
340		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
341
342	fixup[BPF_FIXUP_LEN - 1] =
343		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
344
345	ex = &fp->aux->extable[ctx->exentry_idx];
346
347	offset = pc - (long)&ex->insn;
348	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
349		return -ERANGE;
350	ex->insn = offset;
351
352	offset = (long)fixup - (long)&ex->fixup;
353	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
354		return -ERANGE;
355	ex->fixup = offset;
356
357	ctx->exentry_idx++;
358	return 0;
359}
v3.5.6
  1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
 
 
  2 *
  3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
 
  4 *
  5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 */
 12#include <linux/moduleloader.h>
 13#include <asm/cacheflush.h>
 
 14#include <linux/netdevice.h>
 15#include <linux/filter.h>
 
 
 
 
 16#include "bpf_jit.h"
 17
 18#ifndef __BIG_ENDIAN
 19/* There are endianness assumptions herein. */
 20#error "Little-endian PPC not supported in BPF compiler"
 21#endif
 22
 23int bpf_jit_enable __read_mostly;
 24
 25
 26static inline void bpf_flush_icache(void *start, void *end)
 27{
 28	smp_wmb();
 29	flush_icache_range((unsigned long)start, (unsigned long)end);
 30}
 31
 32static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
 33				   struct codegen_context *ctx)
 
 34{
 35	int i;
 36	const struct sock_filter *filter = fp->insns;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
 39		/* Make stackframe */
 40		if (ctx->seen & SEEN_DATAREF) {
 41			/* If we call any helpers (for loads), save LR */
 42			EMIT(PPC_INST_MFLR | __PPC_RT(0));
 43			PPC_STD(0, 1, 16);
 44
 45			/* Back up non-volatile regs. */
 46			PPC_STD(r_D, 1, -(8*(32-r_D)));
 47			PPC_STD(r_HL, 1, -(8*(32-r_HL)));
 48		}
 49		if (ctx->seen & SEEN_MEM) {
 50			/*
 51			 * Conditionally save regs r15-r31 as some will be used
 52			 * for M[] data.
 53			 */
 54			for (i = r_M; i < (r_M+16); i++) {
 55				if (ctx->seen & (1 << (i-r_M)))
 56					PPC_STD(i, 1, -(8*(32-i)));
 57			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58		}
 59		EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
 60		     (-BPF_PPC_STACKFRAME & 0xfffc));
 61	}
 62
 63	if (ctx->seen & SEEN_DATAREF) {
 64		/*
 65		 * If this filter needs to access skb data,
 66		 * prepare r_D and r_HL:
 67		 *  r_HL = skb->len - skb->data_len
 68		 *  r_D	 = skb->data
 69		 */
 70		PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
 71							 data_len));
 72		PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
 73		PPC_SUB(r_HL, r_HL, r_scratch1);
 74		PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
 75	}
 76
 77	if (ctx->seen & SEEN_XREG) {
 78		/*
 79		 * TODO: Could also detect whether first instr. sets X and
 80		 * avoid this (as below, with A).
 81		 */
 82		PPC_LI(r_X, 0);
 83	}
 84
 85	switch (filter[0].code) {
 86	case BPF_S_RET_K:
 87	case BPF_S_LD_W_LEN:
 88	case BPF_S_ANC_PROTOCOL:
 89	case BPF_S_ANC_IFINDEX:
 90	case BPF_S_ANC_MARK:
 91	case BPF_S_ANC_RXHASH:
 92	case BPF_S_ANC_CPU:
 93	case BPF_S_ANC_QUEUE:
 94	case BPF_S_LD_W_ABS:
 95	case BPF_S_LD_H_ABS:
 96	case BPF_S_LD_B_ABS:
 97		/* first instruction sets A register (or is RET 'constant') */
 98		break;
 99	default:
100		/* make sure we dont leak kernel information to user */
101		PPC_LI(r_A, 0);
102	}
103}
104
105static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
106{
107	int i;
108
109	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
110		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
111		if (ctx->seen & SEEN_DATAREF) {
112			PPC_LD(0, 1, 16);
113			PPC_MTLR(0);
114			PPC_LD(r_D, 1, -(8*(32-r_D)));
115			PPC_LD(r_HL, 1, -(8*(32-r_HL)));
116		}
117		if (ctx->seen & SEEN_MEM) {
118			/* Restore any saved non-vol registers */
119			for (i = r_M; i < (r_M+16); i++) {
120				if (ctx->seen & (1 << (i-r_M)))
121					PPC_LD(i, 1, -(8*(32-i)));
122			}
123		}
124	}
125	/* The RETs have left a return value in R3. */
126
127	PPC_BLR();
128}
129
130#define CHOOSE_LOAD_FUNC(K, func) \
131	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 
 
 
 
132
133/* Assemble the body code between the prologue & epilogue. */
134static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
135			      struct codegen_context *ctx,
136			      unsigned int *addrs)
137{
138	const struct sock_filter *filter = fp->insns;
139	int flen = fp->len;
140	u8 *func;
141	unsigned int true_cond;
142	int i;
143
144	/* Start of epilogue code */
145	unsigned int exit_addr = addrs[flen];
146
147	for (i = 0; i < flen; i++) {
148		unsigned int K = filter[i].k;
149
150		/*
151		 * addrs[] maps a BPF bytecode address into a real offset from
152		 * the start of the body code.
153		 */
154		addrs[i] = ctx->idx * 4;
155
156		switch (filter[i].code) {
157			/*** ALU ops ***/
158		case BPF_S_ALU_ADD_X: /* A += X; */
159			ctx->seen |= SEEN_XREG;
160			PPC_ADD(r_A, r_A, r_X);
161			break;
162		case BPF_S_ALU_ADD_K: /* A += K; */
163			if (!K)
164				break;
165			PPC_ADDI(r_A, r_A, IMM_L(K));
166			if (K >= 32768)
167				PPC_ADDIS(r_A, r_A, IMM_HA(K));
168			break;
169		case BPF_S_ALU_SUB_X: /* A -= X; */
170			ctx->seen |= SEEN_XREG;
171			PPC_SUB(r_A, r_A, r_X);
172			break;
173		case BPF_S_ALU_SUB_K: /* A -= K */
174			if (!K)
175				break;
176			PPC_ADDI(r_A, r_A, IMM_L(-K));
177			if (K >= 32768)
178				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
179			break;
180		case BPF_S_ALU_MUL_X: /* A *= X; */
181			ctx->seen |= SEEN_XREG;
182			PPC_MUL(r_A, r_A, r_X);
183			break;
184		case BPF_S_ALU_MUL_K: /* A *= K */
185			if (K < 32768)
186				PPC_MULI(r_A, r_A, K);
187			else {
188				PPC_LI32(r_scratch1, K);
189				PPC_MUL(r_A, r_A, r_scratch1);
190			}
191			break;
192		case BPF_S_ALU_DIV_X: /* A /= X; */
193			ctx->seen |= SEEN_XREG;
194			PPC_CMPWI(r_X, 0);
195			if (ctx->pc_ret0 != -1) {
196				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
197			} else {
198				/*
199				 * Exit, returning 0; first pass hits here
200				 * (longer worst-case code size).
201				 */
202				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
203				PPC_LI(r_ret, 0);
204				PPC_JMP(exit_addr);
205			}
206			PPC_DIVWU(r_A, r_A, r_X);
207			break;
208		case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
209			PPC_LI32(r_scratch1, K);
210			/* Top 32 bits of 64bit result -> A */
211			PPC_MULHWU(r_A, r_A, r_scratch1);
212			break;
213		case BPF_S_ALU_AND_X:
214			ctx->seen |= SEEN_XREG;
215			PPC_AND(r_A, r_A, r_X);
216			break;
217		case BPF_S_ALU_AND_K:
218			if (!IMM_H(K))
219				PPC_ANDI(r_A, r_A, K);
220			else {
221				PPC_LI32(r_scratch1, K);
222				PPC_AND(r_A, r_A, r_scratch1);
223			}
224			break;
225		case BPF_S_ALU_OR_X:
226			ctx->seen |= SEEN_XREG;
227			PPC_OR(r_A, r_A, r_X);
228			break;
229		case BPF_S_ALU_OR_K:
230			if (IMM_L(K))
231				PPC_ORI(r_A, r_A, IMM_L(K));
232			if (K >= 65536)
233				PPC_ORIS(r_A, r_A, IMM_H(K));
234			break;
235		case BPF_S_ALU_LSH_X: /* A <<= X; */
236			ctx->seen |= SEEN_XREG;
237			PPC_SLW(r_A, r_A, r_X);
238			break;
239		case BPF_S_ALU_LSH_K:
240			if (K == 0)
241				break;
242			else
243				PPC_SLWI(r_A, r_A, K);
244			break;
245		case BPF_S_ALU_RSH_X: /* A >>= X; */
246			ctx->seen |= SEEN_XREG;
247			PPC_SRW(r_A, r_A, r_X);
248			break;
249		case BPF_S_ALU_RSH_K: /* A >>= K; */
250			if (K == 0)
251				break;
252			else
253				PPC_SRWI(r_A, r_A, K);
254			break;
255		case BPF_S_ALU_NEG:
256			PPC_NEG(r_A, r_A);
257			break;
258		case BPF_S_RET_K:
259			PPC_LI32(r_ret, K);
260			if (!K) {
261				if (ctx->pc_ret0 == -1)
262					ctx->pc_ret0 = i;
263			}
264			/*
265			 * If this isn't the very last instruction, branch to
266			 * the epilogue if we've stuff to clean up.  Otherwise,
267			 * if there's nothing to tidy, just return.  If we /are/
268			 * the last instruction, we're about to fall through to
269			 * the epilogue to return.
270			 */
271			if (i != flen - 1) {
272				/*
273				 * Note: 'seen' is properly valid only on pass
274				 * #2.	Both parts of this conditional are the
275				 * same instruction size though, meaning the
276				 * first pass will still correctly determine the
277				 * code size/addresses.
278				 */
279				if (ctx->seen)
280					PPC_JMP(exit_addr);
281				else
282					PPC_BLR();
283			}
284			break;
285		case BPF_S_RET_A:
286			PPC_MR(r_ret, r_A);
287			if (i != flen - 1) {
288				if (ctx->seen)
289					PPC_JMP(exit_addr);
290				else
291					PPC_BLR();
292			}
293			break;
294		case BPF_S_MISC_TAX: /* X = A */
295			PPC_MR(r_X, r_A);
296			break;
297		case BPF_S_MISC_TXA: /* A = X */
298			ctx->seen |= SEEN_XREG;
299			PPC_MR(r_A, r_X);
300			break;
301
302			/*** Constant loads/M[] access ***/
303		case BPF_S_LD_IMM: /* A = K */
304			PPC_LI32(r_A, K);
305			break;
306		case BPF_S_LDX_IMM: /* X = K */
307			PPC_LI32(r_X, K);
308			break;
309		case BPF_S_LD_MEM: /* A = mem[K] */
310			PPC_MR(r_A, r_M + (K & 0xf));
311			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
312			break;
313		case BPF_S_LDX_MEM: /* X = mem[K] */
314			PPC_MR(r_X, r_M + (K & 0xf));
315			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
316			break;
317		case BPF_S_ST: /* mem[K] = A */
318			PPC_MR(r_M + (K & 0xf), r_A);
319			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
320			break;
321		case BPF_S_STX: /* mem[K] = X */
322			PPC_MR(r_M + (K & 0xf), r_X);
323			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
324			break;
325		case BPF_S_LD_W_LEN: /*	A = skb->len; */
326			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
327			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
328			break;
329		case BPF_S_LDX_W_LEN: /* X = skb->len; */
330			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
331			break;
332
333			/*** Ancillary info loads ***/
334
335			/* None of the BPF_S_ANC* codes appear to be passed by
336			 * sk_chk_filter().  The interpreter and the x86 BPF
337			 * compiler implement them so we do too -- they may be
338			 * planted in future.
339			 */
340		case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
341			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
342						  protocol) != 2);
343			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
344							  protocol));
345			/* ntohs is a NOP with BE loads. */
346			break;
347		case BPF_S_ANC_IFINDEX:
348			PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
349								dev));
350			PPC_CMPDI(r_scratch1, 0);
351			if (ctx->pc_ret0 != -1) {
352				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
353			} else {
354				/* Exit, returning 0; first pass hits here. */
355				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
356				PPC_LI(r_ret, 0);
357				PPC_JMP(exit_addr);
358			}
359			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
360						  ifindex) != 4);
361			PPC_LWZ_OFFS(r_A, r_scratch1,
362				     offsetof(struct net_device, ifindex));
363			break;
364		case BPF_S_ANC_MARK:
365			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
366			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
367							  mark));
368			break;
369		case BPF_S_ANC_RXHASH:
370			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
371			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
372							  rxhash));
373			break;
374		case BPF_S_ANC_QUEUE:
375			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
376						  queue_mapping) != 2);
377			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
378							  queue_mapping));
379			break;
380		case BPF_S_ANC_CPU:
381#ifdef CONFIG_SMP
382			/*
383			 * PACA ptr is r13:
384			 * raw_smp_processor_id() = local_paca->paca_index
385			 */
386			BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
387						  paca_index) != 2);
388			PPC_LHZ_OFFS(r_A, 13,
389				     offsetof(struct paca_struct, paca_index));
390#else
391			PPC_LI(r_A, 0);
392#endif
393			break;
394
395			/*** Absolute loads from packet header/data ***/
396		case BPF_S_LD_W_ABS:
397			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
398			goto common_load;
399		case BPF_S_LD_H_ABS:
400			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
401			goto common_load;
402		case BPF_S_LD_B_ABS:
403			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
404		common_load:
405			/* Load from [K]. */
406			ctx->seen |= SEEN_DATAREF;
407			PPC_LI64(r_scratch1, func);
408			PPC_MTLR(r_scratch1);
409			PPC_LI32(r_addr, K);
410			PPC_BLRL();
411			/*
412			 * Helper returns 'lt' condition on error, and an
413			 * appropriate return value in r3
414			 */
415			PPC_BCC(COND_LT, exit_addr);
416			break;
417
418			/*** Indirect loads from packet header/data ***/
419		case BPF_S_LD_W_IND:
420			func = sk_load_word;
421			goto common_load_ind;
422		case BPF_S_LD_H_IND:
423			func = sk_load_half;
424			goto common_load_ind;
425		case BPF_S_LD_B_IND:
426			func = sk_load_byte;
427		common_load_ind:
428			/*
429			 * Load from [X + K].  Negative offsets are tested for
430			 * in the helper functions.
431			 */
432			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
433			PPC_LI64(r_scratch1, func);
434			PPC_MTLR(r_scratch1);
435			PPC_ADDI(r_addr, r_X, IMM_L(K));
436			if (K >= 32768)
437				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
438			PPC_BLRL();
439			/* If error, cr0.LT set */
440			PPC_BCC(COND_LT, exit_addr);
441			break;
442
443		case BPF_S_LDX_B_MSH:
444			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
445			goto common_load;
446			break;
447
448			/*** Jump and branches ***/
449		case BPF_S_JMP_JA:
450			if (K != 0)
451				PPC_JMP(addrs[i + 1 + K]);
452			break;
453
454		case BPF_S_JMP_JGT_K:
455		case BPF_S_JMP_JGT_X:
456			true_cond = COND_GT;
457			goto cond_branch;
458		case BPF_S_JMP_JGE_K:
459		case BPF_S_JMP_JGE_X:
460			true_cond = COND_GE;
461			goto cond_branch;
462		case BPF_S_JMP_JEQ_K:
463		case BPF_S_JMP_JEQ_X:
464			true_cond = COND_EQ;
465			goto cond_branch;
466		case BPF_S_JMP_JSET_K:
467		case BPF_S_JMP_JSET_X:
468			true_cond = COND_NE;
469			/* Fall through */
470		cond_branch:
471			/* same targets, can avoid doing the test :) */
472			if (filter[i].jt == filter[i].jf) {
473				if (filter[i].jt > 0)
474					PPC_JMP(addrs[i + 1 + filter[i].jt]);
475				break;
476			}
477
478			switch (filter[i].code) {
479			case BPF_S_JMP_JGT_X:
480			case BPF_S_JMP_JGE_X:
481			case BPF_S_JMP_JEQ_X:
482				ctx->seen |= SEEN_XREG;
483				PPC_CMPLW(r_A, r_X);
484				break;
485			case BPF_S_JMP_JSET_X:
486				ctx->seen |= SEEN_XREG;
487				PPC_AND_DOT(r_scratch1, r_A, r_X);
488				break;
489			case BPF_S_JMP_JEQ_K:
490			case BPF_S_JMP_JGT_K:
491			case BPF_S_JMP_JGE_K:
492				if (K < 32768)
493					PPC_CMPLWI(r_A, K);
494				else {
495					PPC_LI32(r_scratch1, K);
496					PPC_CMPLW(r_A, r_scratch1);
497				}
498				break;
499			case BPF_S_JMP_JSET_K:
500				if (K < 32768)
501					/* PPC_ANDI is /only/ dot-form */
502					PPC_ANDI(r_scratch1, r_A, K);
503				else {
504					PPC_LI32(r_scratch1, K);
505					PPC_AND_DOT(r_scratch1, r_A,
506						    r_scratch1);
507				}
508				break;
509			}
510			/* Sometimes branches are constructed "backward", with
511			 * the false path being the branch and true path being
512			 * a fallthrough to the next instruction.
513			 */
514			if (filter[i].jt == 0)
515				/* Swap the sense of the branch */
516				PPC_BCC(true_cond ^ COND_CMP_TRUE,
517					addrs[i + 1 + filter[i].jf]);
518			else {
519				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
520				if (filter[i].jf != 0)
521					PPC_JMP(addrs[i + 1 + filter[i].jf]);
522			}
523			break;
524		default:
525			/* The filter contains something cruel & unusual.
526			 * We don't handle it, but also there shouldn't be
527			 * anything missing from our list.
528			 */
529			if (printk_ratelimit())
530				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
531				       filter[i].code, i);
532			return -ENOTSUPP;
533		}
 
 
534
 
 
 
 
 
 
 
 
 
535	}
536	/* Set end-of-body-code address for exit. */
537	addrs[i] = ctx->idx * 4;
538
539	return 0;
540}
 
 
 
541
542void bpf_jit_compile(struct sk_filter *fp)
543{
544	unsigned int proglen;
545	unsigned int alloclen;
546	u32 *image = NULL;
547	u32 *code_base;
548	unsigned int *addrs;
549	struct codegen_context cgctx;
550	int pass;
551	int flen = fp->len;
552
553	if (!bpf_jit_enable)
554		return;
555
556	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
557	if (addrs == NULL)
558		return;
 
 
 
559
560	/*
561	 * There are multiple assembly passes as the generated code will change
562	 * size as it settles down, figuring out the max branch offsets/exit
563	 * paths required.
564	 *
565	 * The range of standard conditional branches is +/- 32Kbytes.	Since
566	 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
567	 * finish with 8 bytes/instruction.  Not feasible, so long jumps are
568	 * used, distinct from short branches.
569	 *
570	 * Current:
571	 *
572	 * For now, both branch types assemble to 2 words (short branches padded
573	 * with a NOP); this is less efficient, but assembly will always complete
574	 * after exactly 3 passes:
575	 *
576	 * First pass: No code buffer; Program is "faux-generated" -- no code
577	 * emitted but maximum size of output determined (and addrs[] filled
578	 * in).	 Also, we note whether we use M[], whether we use skb data, etc.
579	 * All generation choices assumed to be 'worst-case', e.g. branches all
580	 * far (2 instructions), return path code reduction not available, etc.
581	 *
582	 * Second pass: Code buffer allocated with size determined previously.
583	 * Prologue generated to support features we have seen used.  Exit paths
584	 * determined and addrs[] is filled in again, as code may be slightly
585	 * smaller as a result.
586	 *
587	 * Third pass: Code generated 'for real', and branch destinations
588	 * determined from now-accurate addrs[] map.
589	 *
590	 * Ideal:
591	 *
592	 * If we optimise this, near branches will be shorter.	On the
593	 * first assembly pass, we should err on the side of caution and
594	 * generate the biggest code.  On subsequent passes, branches will be
595	 * generated short or long and code size will reduce.  With smaller
596	 * code, more branches may fall into the short category, and code will
597	 * reduce more.
598	 *
599	 * Finally, if we see one pass generate code the same size as the
600	 * previous pass we have converged and should now generate code for
601	 * real.  Allocating at the end will also save the memory that would
602	 * otherwise be wasted by the (small) current code shrinkage.
603	 * Preferably, we should do a small number of passes (e.g. 5) and if we
604	 * haven't converged by then, get impatient and force code to generate
605	 * as-is, even if the odd branch would be left long.  The chances of a
606	 * long jump are tiny with all but the most enormous of BPF filter
607	 * inputs, so we should usually converge on the third pass.
608	 */
 
 
 
 
 
 
 
609
610	cgctx.idx = 0;
611	cgctx.seen = 0;
612	cgctx.pc_ret0 = -1;
613	/* Scouting faux-generate pass 0 */
614	if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
615		/* We hit something illegal or unsupported. */
616		goto out;
617
618	/*
619	 * Pretend to build prologue, given the features we've seen.  This will
620	 * update ctgtx.idx as it pretends to output instructions, then we can
621	 * calculate total size from idx.
622	 */
623	bpf_jit_build_prologue(fp, 0, &cgctx);
 
624	bpf_jit_build_epilogue(0, &cgctx);
625
 
 
 
626	proglen = cgctx.idx * 4;
627	alloclen = proglen + FUNCTION_DESCR_SIZE;
628	image = module_alloc(max_t(unsigned int, alloclen,
629				   sizeof(struct work_struct)));
630	if (!image)
631		goto out;
 
 
 
 
 
632
633	code_base = image + (FUNCTION_DESCR_SIZE/4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
634
635	/* Code generation passes 1-2 */
636	for (pass = 1; pass < 3; pass++) {
637		/* Now build the prologue, body code & epilogue for real. */
638		cgctx.idx = 0;
639		bpf_jit_build_prologue(fp, code_base, &cgctx);
640		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
 
 
 
 
 
641		bpf_jit_build_epilogue(code_base, &cgctx);
642
643		if (bpf_jit_enable > 1)
644			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
645				proglen - (cgctx.idx * 4), cgctx.seen);
646	}
647
 
648	if (bpf_jit_enable > 1)
649		pr_info("flen=%d proglen=%u pass=%d image=%p\n",
650		       flen, proglen, pass, image);
 
 
 
 
 
 
 
 
 
651
652	if (image) {
653		if (bpf_jit_enable > 1)
654			print_hex_dump(KERN_ERR, "JIT code: ",
655				       DUMP_PREFIX_ADDRESS,
656				       16, 1, code_base,
657				       proglen, false);
658
659		bpf_flush_icache(code_base, code_base + (proglen/4));
660		/* Function descriptor nastiness: Address + TOC */
661		((u64 *)image)[0] = (u64)code_base;
662		((u64 *)image)[1] = local_paca->kernel_toc;
663		fp->bpf_func = (void *)image;
 
 
 
 
 
 
664	}
 
665out:
666	kfree(addrs);
667	return;
668}
669
670static void jit_free_defer(struct work_struct *arg)
671{
672	module_free(NULL, arg);
673}
674
675/* run from softirq, we must use a work_struct to call
676 * module_free() from process context
 
677 */
678void bpf_jit_free(struct sk_filter *fp)
 
679{
680	if (fp->bpf_func != sk_run_filter) {
681		struct work_struct *work = (struct work_struct *)fp->bpf_func;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682
683		INIT_WORK(work, jit_free_defer);
684		schedule_work(work);
685	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686}