Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * eBPF JIT compiler
  4 *
  5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6 *		  IBM Corporation
  7 *
  8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
 
 
 
 
 
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <asm/asm-compat.h>
 13#include <linux/netdevice.h>
 14#include <linux/filter.h>
 15#include <linux/if_vlan.h>
 16#include <linux/kernel.h>
 17#include <linux/memory.h>
 18#include <linux/bpf.h>
 19
 20#include <asm/kprobes.h>
 21#include <asm/code-patching.h>
 22
 23#include "bpf_jit.h"
 24
 25static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 
 
 26{
 27	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 
 28}
 29
 30int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 
 31{
 32	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
 33		PPC_JMP(exit_addr);
 34	} else if (ctx->alt_exit_addr) {
 35		if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
 36			return -1;
 37		PPC_JMP(ctx->alt_exit_addr);
 38	} else {
 39		ctx->alt_exit_addr = ctx->idx * 4;
 40		bpf_jit_build_epilogue(image, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41	}
 42
 43	return 0;
 44}
 
 
 
 
 
 
 
 
 
 
 
 45
 46struct powerpc_jit_data {
 47	/* address of rw header */
 48	struct bpf_binary_header *hdr;
 49	/* address of ro final header */
 50	struct bpf_binary_header *fhdr;
 51	u32 *addrs;
 52	u8 *fimage;
 53	u32 proglen;
 54	struct codegen_context ctx;
 55};
 56
 57bool bpf_jit_needs_zext(void)
 58{
 59	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60}
 61
 62struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 63{
 64	u32 proglen;
 65	u32 alloclen;
 66	u8 *image = NULL;
 67	u32 *code_base;
 68	u32 *addrs;
 69	struct powerpc_jit_data *jit_data;
 70	struct codegen_context cgctx;
 71	int pass;
 72	int flen;
 73	struct bpf_binary_header *fhdr = NULL;
 74	struct bpf_binary_header *hdr = NULL;
 75	struct bpf_prog *org_fp = fp;
 76	struct bpf_prog *tmp_fp;
 77	bool bpf_blinded = false;
 78	bool extra_pass = false;
 79	u8 *fimage = NULL;
 80	u32 *fcode_base;
 81	u32 extable_len;
 82	u32 fixup_len;
 83
 84	if (!fp->jit_requested)
 85		return org_fp;
 86
 87	tmp_fp = bpf_jit_blind_constants(org_fp);
 88	if (IS_ERR(tmp_fp))
 89		return org_fp;
 90
 91	if (tmp_fp != org_fp) {
 92		bpf_blinded = true;
 93		fp = tmp_fp;
 94	}
 95
 96	jit_data = fp->aux->jit_data;
 97	if (!jit_data) {
 98		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
 99		if (!jit_data) {
100			fp = org_fp;
101			goto out;
 
 
 
 
 
 
 
 
102		}
103		fp->aux->jit_data = jit_data;
104	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106	flen = fp->len;
107	addrs = jit_data->addrs;
108	if (addrs) {
109		cgctx = jit_data->ctx;
110		/*
111		 * JIT compiled to a writable location (image/code_base) first.
112		 * It is then moved to the readonly final location (fimage/fcode_base)
113		 * using instruction patching.
114		 */
115		fimage = jit_data->fimage;
116		fhdr = jit_data->fhdr;
117		proglen = jit_data->proglen;
118		hdr = jit_data->hdr;
119		image = (void *)hdr + ((void *)fimage - (void *)fhdr);
120		extra_pass = true;
121		/* During extra pass, ensure index is reset before repopulating extable entries */
122		cgctx.exentry_idx = 0;
123		goto skip_init_ctx;
124	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
127	if (addrs == NULL) {
128		fp = org_fp;
129		goto out_addrs;
130	}
 
 
131
132	memset(&cgctx, 0, sizeof(struct codegen_context));
133	bpf_jit_init_reg_mapping(&cgctx);
134
135	/* Make sure that the stack is quadword aligned. */
136	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
 
 
 
 
 
 
 
 
137
138	/* Scouting faux-generate pass 0 */
139	if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
140		/* We hit something illegal or unsupported. */
141		fp = org_fp;
142		goto out_addrs;
143	}
144
145	/*
146	 * If we have seen a tail call, we need a second pass.
147	 * This is because bpf_jit_emit_common_epilogue() is called
148	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
149	 * We also need a second pass if we ended up with too large
150	 * a program so as to ensure BPF_EXIT branches are in range.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151	 */
152	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
153		cgctx.idx = 0;
154		if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
155			fp = org_fp;
156			goto out_addrs;
157		}
158	}
159
160	bpf_jit_realloc_regs(&cgctx);
 
 
 
 
 
 
 
161	/*
162	 * Pretend to build prologue, given the features we've seen.  This will
163	 * update ctgtx.idx as it pretends to output instructions, then we can
164	 * calculate total size from idx.
165	 */
166	bpf_jit_build_prologue(NULL, &cgctx);
167	addrs[fp->len] = cgctx.idx * 4;
168	bpf_jit_build_epilogue(NULL, &cgctx);
169
170	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
171	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
172
173	proglen = cgctx.idx * 4;
174	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
175
176	fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
177					      bpf_jit_fill_ill_insns);
178	if (!fhdr) {
179		fp = org_fp;
180		goto out_addrs;
181	}
182
183	if (extable_len)
184		fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
185
186skip_init_ctx:
187	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
188	fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
189
190	/* Code generation passes 1-2 */
191	for (pass = 1; pass < 3; pass++) {
192		/* Now build the prologue, body code & epilogue for real. */
193		cgctx.idx = 0;
194		cgctx.alt_exit_addr = 0;
195		bpf_jit_build_prologue(code_base, &cgctx);
196		if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
197				       extra_pass)) {
198			bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
199			bpf_jit_binary_pack_free(fhdr, hdr);
200			fp = org_fp;
201			goto out_addrs;
202		}
203		bpf_jit_build_epilogue(code_base, &cgctx);
204
205		if (bpf_jit_enable > 1)
206			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
207				proglen - (cgctx.idx * 4), cgctx.seen);
208	}
209
210	if (bpf_jit_enable > 1)
211		/*
212		 * Note that we output the base address of the code_base
213		 * rather than image, since opcodes are in code_base.
214		 */
215		bpf_jit_dump(flen, proglen, pass, code_base);
216
217#ifdef CONFIG_PPC64_ELF_ABI_V1
218	/* Function descriptor nastiness: Address + TOC */
219	((u64 *)image)[0] = (u64)fcode_base;
220	((u64 *)image)[1] = local_paca->kernel_toc;
221#endif
222
223	fp->bpf_func = (void *)fimage;
224	fp->jited = 1;
225	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
226
227	if (!fp->is_func || extra_pass) {
228		if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
229			fp = org_fp;
230			goto out_addrs;
231		}
232		bpf_prog_fill_jited_linfo(fp, addrs);
233out_addrs:
234		kfree(addrs);
235		kfree(jit_data);
236		fp->aux->jit_data = NULL;
237	} else {
238		jit_data->addrs = addrs;
239		jit_data->ctx = cgctx;
240		jit_data->proglen = proglen;
241		jit_data->fimage = fimage;
242		jit_data->fhdr = fhdr;
243		jit_data->hdr = hdr;
244	}
245
246out:
247	if (bpf_blinded)
248		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
249
250	return fp;
251}
252
253/*
254 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
255 * this function, as this only applies to BPF_PROBE_MEM, for now.
256 */
257int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
258			  struct codegen_context *ctx, int insn_idx, int jmp_off,
259			  int dst_reg)
260{
261	off_t offset;
262	unsigned long pc;
263	struct exception_table_entry *ex, *ex_entry;
264	u32 *fixup;
265
266	/* Populate extable entries only in the last pass */
267	if (pass != 2)
268		return 0;
269
270	if (!fp->aux->extable ||
271	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
272		return -EINVAL;
273
274	/*
275	 * Program is first written to image before copying to the
276	 * final location (fimage). Accordingly, update in the image first.
277	 * As all offsets used are relative, copying as is to the
278	 * final location should be alright.
279	 */
280	pc = (unsigned long)&image[insn_idx];
281	ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
282
283	fixup = (void *)ex -
284		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
285		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
286
287	fixup[0] = PPC_RAW_LI(dst_reg, 0);
288	if (IS_ENABLED(CONFIG_PPC32))
289		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
290
291	fixup[BPF_FIXUP_LEN - 1] =
292		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
293
294	ex_entry = &ex[ctx->exentry_idx];
295
296	offset = pc - (long)&ex_entry->insn;
297	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
298		return -ERANGE;
299	ex_entry->insn = offset;
300
301	offset = (long)fixup - (long)&ex_entry->fixup;
302	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
303		return -ERANGE;
304	ex_entry->fixup = offset;
305
306	ctx->exentry_idx++;
307	return 0;
308}
309
310void *bpf_arch_text_copy(void *dst, void *src, size_t len)
311{
312	int err;
313
314	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
315		return ERR_PTR(-EINVAL);
316
317	mutex_lock(&text_mutex);
318	err = patch_instructions(dst, src, len, false);
319	mutex_unlock(&text_mutex);
320
321	return err ? ERR_PTR(err) : dst;
322}
323
324int bpf_arch_text_invalidate(void *dst, size_t len)
325{
326	u32 insn = BREAKPOINT_INSTRUCTION;
327	int ret;
328
329	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
330		return -EINVAL;
331
332	mutex_lock(&text_mutex);
333	ret = patch_instructions(dst, &insn, len, true);
334	mutex_unlock(&text_mutex);
335
336	return ret;
337}
338
339void bpf_jit_free(struct bpf_prog *fp)
340{
341	if (fp->jited) {
342		struct powerpc_jit_data *jit_data = fp->aux->jit_data;
343		struct bpf_binary_header *hdr;
344
345		/*
346		 * If we fail the final pass of JIT (from jit_subprogs),
347		 * the program may not be finalized yet. Call finalize here
348		 * before freeing it.
349		 */
350		if (jit_data) {
351			bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
352			kvfree(jit_data->addrs);
353			kfree(jit_data);
354		}
355		hdr = bpf_jit_binary_pack_hdr(fp);
356		bpf_jit_binary_pack_free(hdr, NULL);
357		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
358	}
359
360	bpf_prog_unlock_free(fp);
361}
v3.15
  1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
 
 
  2 *
  3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
 
  4 *
  5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 */
 12#include <linux/moduleloader.h>
 13#include <asm/cacheflush.h>
 
 14#include <linux/netdevice.h>
 15#include <linux/filter.h>
 16#include <linux/if_vlan.h>
 
 
 
 
 
 
 17
 18#include "bpf_jit.h"
 19
 20int bpf_jit_enable __read_mostly;
 21
 22static inline void bpf_flush_icache(void *start, void *end)
 23{
 24	smp_wmb();
 25	flush_icache_range((unsigned long)start, (unsigned long)end);
 26}
 27
 28static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
 29				   struct codegen_context *ctx)
 30{
 31	int i;
 32	const struct sock_filter *filter = fp->insns;
 33
 34	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
 35		/* Make stackframe */
 36		if (ctx->seen & SEEN_DATAREF) {
 37			/* If we call any helpers (for loads), save LR */
 38			EMIT(PPC_INST_MFLR | __PPC_RT(R0));
 39			PPC_STD(0, 1, 16);
 40
 41			/* Back up non-volatile regs. */
 42			PPC_STD(r_D, 1, -(8*(32-r_D)));
 43			PPC_STD(r_HL, 1, -(8*(32-r_HL)));
 44		}
 45		if (ctx->seen & SEEN_MEM) {
 46			/*
 47			 * Conditionally save regs r15-r31 as some will be used
 48			 * for M[] data.
 49			 */
 50			for (i = r_M; i < (r_M+16); i++) {
 51				if (ctx->seen & (1 << (i-r_M)))
 52					PPC_STD(i, 1, -(8*(32-i)));
 53			}
 54		}
 55		EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
 56		     (-BPF_PPC_STACKFRAME & 0xfffc));
 57	}
 58
 59	if (ctx->seen & SEEN_DATAREF) {
 60		/*
 61		 * If this filter needs to access skb data,
 62		 * prepare r_D and r_HL:
 63		 *  r_HL = skb->len - skb->data_len
 64		 *  r_D	 = skb->data
 65		 */
 66		PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
 67							 data_len));
 68		PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
 69		PPC_SUB(r_HL, r_HL, r_scratch1);
 70		PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
 71	}
 72
 73	if (ctx->seen & SEEN_XREG) {
 74		/*
 75		 * TODO: Could also detect whether first instr. sets X and
 76		 * avoid this (as below, with A).
 77		 */
 78		PPC_LI(r_X, 0);
 79	}
 
 
 
 80
 81	switch (filter[0].code) {
 82	case BPF_S_RET_K:
 83	case BPF_S_LD_W_LEN:
 84	case BPF_S_ANC_PROTOCOL:
 85	case BPF_S_ANC_IFINDEX:
 86	case BPF_S_ANC_MARK:
 87	case BPF_S_ANC_RXHASH:
 88	case BPF_S_ANC_VLAN_TAG:
 89	case BPF_S_ANC_VLAN_TAG_PRESENT:
 90	case BPF_S_ANC_CPU:
 91	case BPF_S_ANC_QUEUE:
 92	case BPF_S_LD_W_ABS:
 93	case BPF_S_LD_H_ABS:
 94	case BPF_S_LD_B_ABS:
 95		/* first instruction sets A register (or is RET 'constant') */
 96		break;
 97	default:
 98		/* make sure we dont leak kernel information to user */
 99		PPC_LI(r_A, 0);
100	}
101}
102
103static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
104{
105	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
107	if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
108		PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
109		if (ctx->seen & SEEN_DATAREF) {
110			PPC_LD(0, 1, 16);
111			PPC_MTLR(0);
112			PPC_LD(r_D, 1, -(8*(32-r_D)));
113			PPC_LD(r_HL, 1, -(8*(32-r_HL)));
114		}
115		if (ctx->seen & SEEN_MEM) {
116			/* Restore any saved non-vol registers */
117			for (i = r_M; i < (r_M+16); i++) {
118				if (ctx->seen & (1 << (i-r_M)))
119					PPC_LD(i, 1, -(8*(32-i)));
120			}
121		}
 
122	}
123	/* The RETs have left a return value in R3. */
124
125	PPC_BLR();
126}
127
128#define CHOOSE_LOAD_FUNC(K, func) \
129	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
130
131/* Assemble the body code between the prologue & epilogue. */
132static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
133			      struct codegen_context *ctx,
134			      unsigned int *addrs)
135{
136	const struct sock_filter *filter = fp->insns;
137	int flen = fp->len;
138	u8 *func;
139	unsigned int true_cond;
140	int i;
141
142	/* Start of epilogue code */
143	unsigned int exit_addr = addrs[flen];
144
145	for (i = 0; i < flen; i++) {
146		unsigned int K = filter[i].k;
147
 
 
 
 
148		/*
149		 * addrs[] maps a BPF bytecode address into a real offset from
150		 * the start of the body code.
 
151		 */
152		addrs[i] = ctx->idx * 4;
153
154		switch (filter[i].code) {
155			/*** ALU ops ***/
156		case BPF_S_ALU_ADD_X: /* A += X; */
157			ctx->seen |= SEEN_XREG;
158			PPC_ADD(r_A, r_A, r_X);
159			break;
160		case BPF_S_ALU_ADD_K: /* A += K; */
161			if (!K)
162				break;
163			PPC_ADDI(r_A, r_A, IMM_L(K));
164			if (K >= 32768)
165				PPC_ADDIS(r_A, r_A, IMM_HA(K));
166			break;
167		case BPF_S_ALU_SUB_X: /* A -= X; */
168			ctx->seen |= SEEN_XREG;
169			PPC_SUB(r_A, r_A, r_X);
170			break;
171		case BPF_S_ALU_SUB_K: /* A -= K */
172			if (!K)
173				break;
174			PPC_ADDI(r_A, r_A, IMM_L(-K));
175			if (K >= 32768)
176				PPC_ADDIS(r_A, r_A, IMM_HA(-K));
177			break;
178		case BPF_S_ALU_MUL_X: /* A *= X; */
179			ctx->seen |= SEEN_XREG;
180			PPC_MUL(r_A, r_A, r_X);
181			break;
182		case BPF_S_ALU_MUL_K: /* A *= K */
183			if (K < 32768)
184				PPC_MULI(r_A, r_A, K);
185			else {
186				PPC_LI32(r_scratch1, K);
187				PPC_MUL(r_A, r_A, r_scratch1);
188			}
189			break;
190		case BPF_S_ALU_MOD_X: /* A %= X; */
191			ctx->seen |= SEEN_XREG;
192			PPC_CMPWI(r_X, 0);
193			if (ctx->pc_ret0 != -1) {
194				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
195			} else {
196				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
197				PPC_LI(r_ret, 0);
198				PPC_JMP(exit_addr);
199			}
200			PPC_DIVWU(r_scratch1, r_A, r_X);
201			PPC_MUL(r_scratch1, r_X, r_scratch1);
202			PPC_SUB(r_A, r_A, r_scratch1);
203			break;
204		case BPF_S_ALU_MOD_K: /* A %= K; */
205			PPC_LI32(r_scratch2, K);
206			PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207			PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208			PPC_SUB(r_A, r_A, r_scratch1);
209			break;
210		case BPF_S_ALU_DIV_X: /* A /= X; */
211			ctx->seen |= SEEN_XREG;
212			PPC_CMPWI(r_X, 0);
213			if (ctx->pc_ret0 != -1) {
214				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
215			} else {
216				/*
217				 * Exit, returning 0; first pass hits here
218				 * (longer worst-case code size).
219				 */
220				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
221				PPC_LI(r_ret, 0);
222				PPC_JMP(exit_addr);
223			}
224			PPC_DIVWU(r_A, r_A, r_X);
225			break;
226		case BPF_S_ALU_DIV_K: /* A /= K */
227			if (K == 1)
228				break;
229			PPC_LI32(r_scratch1, K);
230			PPC_DIVWU(r_A, r_A, r_scratch1);
231			break;
232		case BPF_S_ALU_AND_X:
233			ctx->seen |= SEEN_XREG;
234			PPC_AND(r_A, r_A, r_X);
235			break;
236		case BPF_S_ALU_AND_K:
237			if (!IMM_H(K))
238				PPC_ANDI(r_A, r_A, K);
239			else {
240				PPC_LI32(r_scratch1, K);
241				PPC_AND(r_A, r_A, r_scratch1);
242			}
243			break;
244		case BPF_S_ALU_OR_X:
245			ctx->seen |= SEEN_XREG;
246			PPC_OR(r_A, r_A, r_X);
247			break;
248		case BPF_S_ALU_OR_K:
249			if (IMM_L(K))
250				PPC_ORI(r_A, r_A, IMM_L(K));
251			if (K >= 65536)
252				PPC_ORIS(r_A, r_A, IMM_H(K));
253			break;
254		case BPF_S_ANC_ALU_XOR_X:
255		case BPF_S_ALU_XOR_X: /* A ^= X */
256			ctx->seen |= SEEN_XREG;
257			PPC_XOR(r_A, r_A, r_X);
258			break;
259		case BPF_S_ALU_XOR_K: /* A ^= K */
260			if (IMM_L(K))
261				PPC_XORI(r_A, r_A, IMM_L(K));
262			if (K >= 65536)
263				PPC_XORIS(r_A, r_A, IMM_H(K));
264			break;
265		case BPF_S_ALU_LSH_X: /* A <<= X; */
266			ctx->seen |= SEEN_XREG;
267			PPC_SLW(r_A, r_A, r_X);
268			break;
269		case BPF_S_ALU_LSH_K:
270			if (K == 0)
271				break;
272			else
273				PPC_SLWI(r_A, r_A, K);
274			break;
275		case BPF_S_ALU_RSH_X: /* A >>= X; */
276			ctx->seen |= SEEN_XREG;
277			PPC_SRW(r_A, r_A, r_X);
278			break;
279		case BPF_S_ALU_RSH_K: /* A >>= K; */
280			if (K == 0)
281				break;
282			else
283				PPC_SRWI(r_A, r_A, K);
284			break;
285		case BPF_S_ALU_NEG:
286			PPC_NEG(r_A, r_A);
287			break;
288		case BPF_S_RET_K:
289			PPC_LI32(r_ret, K);
290			if (!K) {
291				if (ctx->pc_ret0 == -1)
292					ctx->pc_ret0 = i;
293			}
294			/*
295			 * If this isn't the very last instruction, branch to
296			 * the epilogue if we've stuff to clean up.  Otherwise,
297			 * if there's nothing to tidy, just return.  If we /are/
298			 * the last instruction, we're about to fall through to
299			 * the epilogue to return.
300			 */
301			if (i != flen - 1) {
302				/*
303				 * Note: 'seen' is properly valid only on pass
304				 * #2.	Both parts of this conditional are the
305				 * same instruction size though, meaning the
306				 * first pass will still correctly determine the
307				 * code size/addresses.
308				 */
309				if (ctx->seen)
310					PPC_JMP(exit_addr);
311				else
312					PPC_BLR();
313			}
314			break;
315		case BPF_S_RET_A:
316			PPC_MR(r_ret, r_A);
317			if (i != flen - 1) {
318				if (ctx->seen)
319					PPC_JMP(exit_addr);
320				else
321					PPC_BLR();
322			}
323			break;
324		case BPF_S_MISC_TAX: /* X = A */
325			PPC_MR(r_X, r_A);
326			break;
327		case BPF_S_MISC_TXA: /* A = X */
328			ctx->seen |= SEEN_XREG;
329			PPC_MR(r_A, r_X);
330			break;
331
332			/*** Constant loads/M[] access ***/
333		case BPF_S_LD_IMM: /* A = K */
334			PPC_LI32(r_A, K);
335			break;
336		case BPF_S_LDX_IMM: /* X = K */
337			PPC_LI32(r_X, K);
338			break;
339		case BPF_S_LD_MEM: /* A = mem[K] */
340			PPC_MR(r_A, r_M + (K & 0xf));
341			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
342			break;
343		case BPF_S_LDX_MEM: /* X = mem[K] */
344			PPC_MR(r_X, r_M + (K & 0xf));
345			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
346			break;
347		case BPF_S_ST: /* mem[K] = A */
348			PPC_MR(r_M + (K & 0xf), r_A);
349			ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
350			break;
351		case BPF_S_STX: /* mem[K] = X */
352			PPC_MR(r_M + (K & 0xf), r_X);
353			ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
354			break;
355		case BPF_S_LD_W_LEN: /*	A = skb->len; */
356			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
357			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
358			break;
359		case BPF_S_LDX_W_LEN: /* X = skb->len; */
360			PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
361			break;
362
363			/*** Ancillary info loads ***/
364		case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
365			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
366						  protocol) != 2);
367			PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
368							    protocol));
369			break;
370		case BPF_S_ANC_IFINDEX:
371			PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
372								dev));
373			PPC_CMPDI(r_scratch1, 0);
374			if (ctx->pc_ret0 != -1) {
375				PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
376			} else {
377				/* Exit, returning 0; first pass hits here. */
378				PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
379				PPC_LI(r_ret, 0);
380				PPC_JMP(exit_addr);
381			}
382			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
383						  ifindex) != 4);
384			PPC_LWZ_OFFS(r_A, r_scratch1,
385				     offsetof(struct net_device, ifindex));
386			break;
387		case BPF_S_ANC_MARK:
388			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
389			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
390							  mark));
391			break;
392		case BPF_S_ANC_RXHASH:
393			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
394			PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395							  hash));
396			break;
397		case BPF_S_ANC_VLAN_TAG:
398		case BPF_S_ANC_VLAN_TAG_PRESENT:
399			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
400			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401							  vlan_tci));
402			if (filter[i].code == BPF_S_ANC_VLAN_TAG)
403				PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
404			else
405				PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
406			break;
407		case BPF_S_ANC_QUEUE:
408			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
409						  queue_mapping) != 2);
410			PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
411							  queue_mapping));
412			break;
413		case BPF_S_ANC_CPU:
414#ifdef CONFIG_SMP
415			/*
416			 * PACA ptr is r13:
417			 * raw_smp_processor_id() = local_paca->paca_index
418			 */
419			BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
420						  paca_index) != 2);
421			PPC_LHZ_OFFS(r_A, 13,
422				     offsetof(struct paca_struct, paca_index));
423#else
424			PPC_LI(r_A, 0);
425#endif
426			break;
427
428			/*** Absolute loads from packet header/data ***/
429		case BPF_S_LD_W_ABS:
430			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
431			goto common_load;
432		case BPF_S_LD_H_ABS:
433			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
434			goto common_load;
435		case BPF_S_LD_B_ABS:
436			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
437		common_load:
438			/* Load from [K]. */
439			ctx->seen |= SEEN_DATAREF;
440			PPC_LI64(r_scratch1, func);
441			PPC_MTLR(r_scratch1);
442			PPC_LI32(r_addr, K);
443			PPC_BLRL();
444			/*
445			 * Helper returns 'lt' condition on error, and an
446			 * appropriate return value in r3
447			 */
448			PPC_BCC(COND_LT, exit_addr);
449			break;
450
451			/*** Indirect loads from packet header/data ***/
452		case BPF_S_LD_W_IND:
453			func = sk_load_word;
454			goto common_load_ind;
455		case BPF_S_LD_H_IND:
456			func = sk_load_half;
457			goto common_load_ind;
458		case BPF_S_LD_B_IND:
459			func = sk_load_byte;
460		common_load_ind:
461			/*
462			 * Load from [X + K].  Negative offsets are tested for
463			 * in the helper functions.
464			 */
465			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
466			PPC_LI64(r_scratch1, func);
467			PPC_MTLR(r_scratch1);
468			PPC_ADDI(r_addr, r_X, IMM_L(K));
469			if (K >= 32768)
470				PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
471			PPC_BLRL();
472			/* If error, cr0.LT set */
473			PPC_BCC(COND_LT, exit_addr);
474			break;
475
476		case BPF_S_LDX_B_MSH:
477			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
478			goto common_load;
479			break;
480
481			/*** Jump and branches ***/
482		case BPF_S_JMP_JA:
483			if (K != 0)
484				PPC_JMP(addrs[i + 1 + K]);
485			break;
486
487		case BPF_S_JMP_JGT_K:
488		case BPF_S_JMP_JGT_X:
489			true_cond = COND_GT;
490			goto cond_branch;
491		case BPF_S_JMP_JGE_K:
492		case BPF_S_JMP_JGE_X:
493			true_cond = COND_GE;
494			goto cond_branch;
495		case BPF_S_JMP_JEQ_K:
496		case BPF_S_JMP_JEQ_X:
497			true_cond = COND_EQ;
498			goto cond_branch;
499		case BPF_S_JMP_JSET_K:
500		case BPF_S_JMP_JSET_X:
501			true_cond = COND_NE;
502			/* Fall through */
503		cond_branch:
504			/* same targets, can avoid doing the test :) */
505			if (filter[i].jt == filter[i].jf) {
506				if (filter[i].jt > 0)
507					PPC_JMP(addrs[i + 1 + filter[i].jt]);
508				break;
509			}
510
511			switch (filter[i].code) {
512			case BPF_S_JMP_JGT_X:
513			case BPF_S_JMP_JGE_X:
514			case BPF_S_JMP_JEQ_X:
515				ctx->seen |= SEEN_XREG;
516				PPC_CMPLW(r_A, r_X);
517				break;
518			case BPF_S_JMP_JSET_X:
519				ctx->seen |= SEEN_XREG;
520				PPC_AND_DOT(r_scratch1, r_A, r_X);
521				break;
522			case BPF_S_JMP_JEQ_K:
523			case BPF_S_JMP_JGT_K:
524			case BPF_S_JMP_JGE_K:
525				if (K < 32768)
526					PPC_CMPLWI(r_A, K);
527				else {
528					PPC_LI32(r_scratch1, K);
529					PPC_CMPLW(r_A, r_scratch1);
530				}
531				break;
532			case BPF_S_JMP_JSET_K:
533				if (K < 32768)
534					/* PPC_ANDI is /only/ dot-form */
535					PPC_ANDI(r_scratch1, r_A, K);
536				else {
537					PPC_LI32(r_scratch1, K);
538					PPC_AND_DOT(r_scratch1, r_A,
539						    r_scratch1);
540				}
541				break;
542			}
543			/* Sometimes branches are constructed "backward", with
544			 * the false path being the branch and true path being
545			 * a fallthrough to the next instruction.
546			 */
547			if (filter[i].jt == 0)
548				/* Swap the sense of the branch */
549				PPC_BCC(true_cond ^ COND_CMP_TRUE,
550					addrs[i + 1 + filter[i].jf]);
551			else {
552				PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
553				if (filter[i].jf != 0)
554					PPC_JMP(addrs[i + 1 + filter[i].jf]);
555			}
556			break;
557		default:
558			/* The filter contains something cruel & unusual.
559			 * We don't handle it, but also there shouldn't be
560			 * anything missing from our list.
561			 */
562			if (printk_ratelimit())
563				pr_err("BPF filter opcode %04x (@%d) unsupported\n",
564				       filter[i].code, i);
565			return -ENOTSUPP;
566		}
567
 
 
 
 
568	}
569	/* Set end-of-body-code address for exit. */
570	addrs[i] = ctx->idx * 4;
571
572	return 0;
573}
574
575void bpf_jit_compile(struct sk_filter *fp)
576{
577	unsigned int proglen;
578	unsigned int alloclen;
579	u32 *image = NULL;
580	u32 *code_base;
581	unsigned int *addrs;
582	struct codegen_context cgctx;
583	int pass;
584	int flen = fp->len;
585
586	if (!bpf_jit_enable)
587		return;
588
589	addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
590	if (addrs == NULL)
591		return;
592
593	/*
594	 * There are multiple assembly passes as the generated code will change
595	 * size as it settles down, figuring out the max branch offsets/exit
596	 * paths required.
597	 *
598	 * The range of standard conditional branches is +/- 32Kbytes.	Since
599	 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
600	 * finish with 8 bytes/instruction.  Not feasible, so long jumps are
601	 * used, distinct from short branches.
602	 *
603	 * Current:
604	 *
605	 * For now, both branch types assemble to 2 words (short branches padded
606	 * with a NOP); this is less efficient, but assembly will always complete
607	 * after exactly 3 passes:
608	 *
609	 * First pass: No code buffer; Program is "faux-generated" -- no code
610	 * emitted but maximum size of output determined (and addrs[] filled
611	 * in).	 Also, we note whether we use M[], whether we use skb data, etc.
612	 * All generation choices assumed to be 'worst-case', e.g. branches all
613	 * far (2 instructions), return path code reduction not available, etc.
614	 *
615	 * Second pass: Code buffer allocated with size determined previously.
616	 * Prologue generated to support features we have seen used.  Exit paths
617	 * determined and addrs[] is filled in again, as code may be slightly
618	 * smaller as a result.
619	 *
620	 * Third pass: Code generated 'for real', and branch destinations
621	 * determined from now-accurate addrs[] map.
622	 *
623	 * Ideal:
624	 *
625	 * If we optimise this, near branches will be shorter.	On the
626	 * first assembly pass, we should err on the side of caution and
627	 * generate the biggest code.  On subsequent passes, branches will be
628	 * generated short or long and code size will reduce.  With smaller
629	 * code, more branches may fall into the short category, and code will
630	 * reduce more.
631	 *
632	 * Finally, if we see one pass generate code the same size as the
633	 * previous pass we have converged and should now generate code for
634	 * real.  Allocating at the end will also save the memory that would
635	 * otherwise be wasted by the (small) current code shrinkage.
636	 * Preferably, we should do a small number of passes (e.g. 5) and if we
637	 * haven't converged by then, get impatient and force code to generate
638	 * as-is, even if the odd branch would be left long.  The chances of a
639	 * long jump are tiny with all but the most enormous of BPF filter
640	 * inputs, so we should usually converge on the third pass.
641	 */
 
 
 
 
 
 
 
642
643	cgctx.idx = 0;
644	cgctx.seen = 0;
645	cgctx.pc_ret0 = -1;
646	/* Scouting faux-generate pass 0 */
647	if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
648		/* We hit something illegal or unsupported. */
649		goto out;
650
651	/*
652	 * Pretend to build prologue, given the features we've seen.  This will
653	 * update ctgtx.idx as it pretends to output instructions, then we can
654	 * calculate total size from idx.
655	 */
656	bpf_jit_build_prologue(fp, 0, &cgctx);
657	bpf_jit_build_epilogue(0, &cgctx);
 
 
 
 
658
659	proglen = cgctx.idx * 4;
660	alloclen = proglen + FUNCTION_DESCR_SIZE;
661	image = module_alloc(alloclen);
662	if (!image)
663		goto out;
 
 
 
 
 
 
 
664
665	code_base = image + (FUNCTION_DESCR_SIZE/4);
 
 
666
667	/* Code generation passes 1-2 */
668	for (pass = 1; pass < 3; pass++) {
669		/* Now build the prologue, body code & epilogue for real. */
670		cgctx.idx = 0;
671		bpf_jit_build_prologue(fp, code_base, &cgctx);
672		bpf_jit_build_body(fp, code_base, &cgctx, addrs);
 
 
 
 
 
 
 
673		bpf_jit_build_epilogue(code_base, &cgctx);
674
675		if (bpf_jit_enable > 1)
676			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
677				proglen - (cgctx.idx * 4), cgctx.seen);
678	}
679
680	if (bpf_jit_enable > 1)
681		/* Note that we output the base address of the code_base
 
682		 * rather than image, since opcodes are in code_base.
683		 */
684		bpf_jit_dump(flen, proglen, pass, code_base);
685
686	if (image) {
687		bpf_flush_icache(code_base, code_base + (proglen/4));
688		/* Function descriptor nastiness: Address + TOC */
689		((u64 *)image)[0] = (u64)code_base;
690		((u64 *)image)[1] = local_paca->kernel_toc;
691		fp->bpf_func = (void *)image;
692		fp->jited = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693	}
 
694out:
695	kfree(addrs);
696	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697}
698
699void bpf_jit_free(struct sk_filter *fp)
700{
701	if (fp->jited)
702		module_free(NULL, fp->bpf_func);
703	kfree(fp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704}