Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * eBPF JIT compiler
  4 *
  5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6 *		  IBM Corporation
  7 *
  8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <asm/asm-compat.h>
 13#include <linux/netdevice.h>
 14#include <linux/filter.h>
 15#include <linux/if_vlan.h>
 16#include <linux/kernel.h>
 17#include <linux/memory.h>
 18#include <linux/bpf.h>
 19
 20#include <asm/kprobes.h>
 21#include <asm/code-patching.h>
 22
 23#include "bpf_jit.h"
 24
 25static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 26{
 27	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 28}
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 31{
 32	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
 33		PPC_JMP(exit_addr);
 34	} else if (ctx->alt_exit_addr) {
 35		if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
 36			return -1;
 37		PPC_JMP(ctx->alt_exit_addr);
 38	} else {
 39		ctx->alt_exit_addr = ctx->idx * 4;
 40		bpf_jit_build_epilogue(image, ctx);
 41	}
 42
 43	return 0;
 44}
 45
 46struct powerpc_jit_data {
 47	/* address of rw header */
 48	struct bpf_binary_header *hdr;
 49	/* address of ro final header */
 50	struct bpf_binary_header *fhdr;
 51	u32 *addrs;
 52	u8 *fimage;
 53	u32 proglen;
 54	struct codegen_context ctx;
 55};
 56
 57bool bpf_jit_needs_zext(void)
 58{
 59	return true;
 60}
 61
 62struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 63{
 64	u32 proglen;
 65	u32 alloclen;
 66	u8 *image = NULL;
 67	u32 *code_base;
 68	u32 *addrs;
 69	struct powerpc_jit_data *jit_data;
 70	struct codegen_context cgctx;
 71	int pass;
 72	int flen;
 73	struct bpf_binary_header *fhdr = NULL;
 74	struct bpf_binary_header *hdr = NULL;
 75	struct bpf_prog *org_fp = fp;
 76	struct bpf_prog *tmp_fp;
 77	bool bpf_blinded = false;
 78	bool extra_pass = false;
 79	u8 *fimage = NULL;
 80	u32 *fcode_base;
 81	u32 extable_len;
 82	u32 fixup_len;
 83
 84	if (!fp->jit_requested)
 85		return org_fp;
 86
 87	tmp_fp = bpf_jit_blind_constants(org_fp);
 88	if (IS_ERR(tmp_fp))
 89		return org_fp;
 90
 91	if (tmp_fp != org_fp) {
 92		bpf_blinded = true;
 93		fp = tmp_fp;
 94	}
 95
 96	jit_data = fp->aux->jit_data;
 97	if (!jit_data) {
 98		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
 99		if (!jit_data) {
100			fp = org_fp;
101			goto out;
102		}
103		fp->aux->jit_data = jit_data;
104	}
105
106	flen = fp->len;
107	addrs = jit_data->addrs;
108	if (addrs) {
109		cgctx = jit_data->ctx;
110		/*
111		 * JIT compiled to a writable location (image/code_base) first.
112		 * It is then moved to the readonly final location (fimage/fcode_base)
113		 * using instruction patching.
114		 */
115		fimage = jit_data->fimage;
116		fhdr = jit_data->fhdr;
117		proglen = jit_data->proglen;
118		hdr = jit_data->hdr;
119		image = (void *)hdr + ((void *)fimage - (void *)fhdr);
120		extra_pass = true;
121		/* During extra pass, ensure index is reset before repopulating extable entries */
122		cgctx.exentry_idx = 0;
123		goto skip_init_ctx;
124	}
125
126	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
127	if (addrs == NULL) {
128		fp = org_fp;
129		goto out_addrs;
130	}
131
132	memset(&cgctx, 0, sizeof(struct codegen_context));
133	bpf_jit_init_reg_mapping(&cgctx);
134
135	/* Make sure that the stack is quadword aligned. */
136	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
137
138	/* Scouting faux-generate pass 0 */
139	if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
140		/* We hit something illegal or unsupported. */
141		fp = org_fp;
142		goto out_addrs;
143	}
144
145	/*
146	 * If we have seen a tail call, we need a second pass.
147	 * This is because bpf_jit_emit_common_epilogue() is called
148	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
149	 * We also need a second pass if we ended up with too large
150	 * a program so as to ensure BPF_EXIT branches are in range.
151	 */
152	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
153		cgctx.idx = 0;
154		if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
155			fp = org_fp;
156			goto out_addrs;
157		}
158	}
159
160	bpf_jit_realloc_regs(&cgctx);
161	/*
162	 * Pretend to build prologue, given the features we've seen.  This will
163	 * update ctgtx.idx as it pretends to output instructions, then we can
164	 * calculate total size from idx.
165	 */
166	bpf_jit_build_prologue(NULL, &cgctx);
167	addrs[fp->len] = cgctx.idx * 4;
168	bpf_jit_build_epilogue(NULL, &cgctx);
169
170	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
171	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
172
173	proglen = cgctx.idx * 4;
174	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
175
176	fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
177					      bpf_jit_fill_ill_insns);
178	if (!fhdr) {
179		fp = org_fp;
180		goto out_addrs;
181	}
182
183	if (extable_len)
184		fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
185
186skip_init_ctx:
187	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
188	fcode_base = (u32 *)(fimage + FUNCTION_DESCR_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
190	/* Code generation passes 1-2 */
191	for (pass = 1; pass < 3; pass++) {
192		/* Now build the prologue, body code & epilogue for real. */
193		cgctx.idx = 0;
194		cgctx.alt_exit_addr = 0;
195		bpf_jit_build_prologue(code_base, &cgctx);
196		if (bpf_jit_build_body(fp, code_base, fcode_base, &cgctx, addrs, pass,
197				       extra_pass)) {
198			bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
199			bpf_jit_binary_pack_free(fhdr, hdr);
200			fp = org_fp;
201			goto out_addrs;
202		}
203		bpf_jit_build_epilogue(code_base, &cgctx);
204
205		if (bpf_jit_enable > 1)
206			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
207				proglen - (cgctx.idx * 4), cgctx.seen);
208	}
209
 
210	if (bpf_jit_enable > 1)
211		/*
212		 * Note that we output the base address of the code_base
213		 * rather than image, since opcodes are in code_base.
214		 */
215		bpf_jit_dump(flen, proglen, pass, code_base);
216
217#ifdef CONFIG_PPC64_ELF_ABI_V1
218	/* Function descriptor nastiness: Address + TOC */
219	((u64 *)image)[0] = (u64)fcode_base;
220	((u64 *)image)[1] = local_paca->kernel_toc;
221#endif
222
223	fp->bpf_func = (void *)fimage;
224	fp->jited = 1;
225	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
226
 
227	if (!fp->is_func || extra_pass) {
228		if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) {
229			fp = org_fp;
230			goto out_addrs;
231		}
232		bpf_prog_fill_jited_linfo(fp, addrs);
233out_addrs:
234		kfree(addrs);
235		kfree(jit_data);
236		fp->aux->jit_data = NULL;
237	} else {
238		jit_data->addrs = addrs;
239		jit_data->ctx = cgctx;
240		jit_data->proglen = proglen;
241		jit_data->fimage = fimage;
242		jit_data->fhdr = fhdr;
243		jit_data->hdr = hdr;
244	}
245
246out:
247	if (bpf_blinded)
248		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
249
250	return fp;
251}
252
253/*
254 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
255 * this function, as this only applies to BPF_PROBE_MEM, for now.
256 */
257int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass,
258			  struct codegen_context *ctx, int insn_idx, int jmp_off,
259			  int dst_reg)
260{
261	off_t offset;
262	unsigned long pc;
263	struct exception_table_entry *ex, *ex_entry;
264	u32 *fixup;
265
266	/* Populate extable entries only in the last pass */
267	if (pass != 2)
268		return 0;
269
270	if (!fp->aux->extable ||
271	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
272		return -EINVAL;
273
274	/*
275	 * Program is first written to image before copying to the
276	 * final location (fimage). Accordingly, update in the image first.
277	 * As all offsets used are relative, copying as is to the
278	 * final location should be alright.
279	 */
280	pc = (unsigned long)&image[insn_idx];
281	ex = (void *)fp->aux->extable - (void *)fimage + (void *)image;
282
283	fixup = (void *)ex -
284		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
285		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
286
287	fixup[0] = PPC_RAW_LI(dst_reg, 0);
288	if (IS_ENABLED(CONFIG_PPC32))
289		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
290
291	fixup[BPF_FIXUP_LEN - 1] =
292		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
293
294	ex_entry = &ex[ctx->exentry_idx];
295
296	offset = pc - (long)&ex_entry->insn;
297	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
298		return -ERANGE;
299	ex_entry->insn = offset;
300
301	offset = (long)fixup - (long)&ex_entry->fixup;
302	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
303		return -ERANGE;
304	ex_entry->fixup = offset;
305
306	ctx->exentry_idx++;
307	return 0;
308}
309
310void *bpf_arch_text_copy(void *dst, void *src, size_t len)
311{
312	int err;
313
314	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
315		return ERR_PTR(-EINVAL);
316
317	mutex_lock(&text_mutex);
318	err = patch_instructions(dst, src, len, false);
319	mutex_unlock(&text_mutex);
320
321	return err ? ERR_PTR(err) : dst;
322}
323
324int bpf_arch_text_invalidate(void *dst, size_t len)
325{
326	u32 insn = BREAKPOINT_INSTRUCTION;
327	int ret;
328
329	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
330		return -EINVAL;
331
332	mutex_lock(&text_mutex);
333	ret = patch_instructions(dst, &insn, len, true);
334	mutex_unlock(&text_mutex);
335
336	return ret;
337}
338
339void bpf_jit_free(struct bpf_prog *fp)
340{
341	if (fp->jited) {
342		struct powerpc_jit_data *jit_data = fp->aux->jit_data;
343		struct bpf_binary_header *hdr;
344
345		/*
346		 * If we fail the final pass of JIT (from jit_subprogs),
347		 * the program may not be finalized yet. Call finalize here
348		 * before freeing it.
349		 */
350		if (jit_data) {
351			bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr);
352			kvfree(jit_data->addrs);
353			kfree(jit_data);
354		}
355		hdr = bpf_jit_binary_pack_hdr(fp);
356		bpf_jit_binary_pack_free(hdr, NULL);
357		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
358	}
359
360	bpf_prog_unlock_free(fp);
361}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * eBPF JIT compiler
  4 *
  5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6 *		  IBM Corporation
  7 *
  8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <asm/asm-compat.h>
 13#include <linux/netdevice.h>
 14#include <linux/filter.h>
 15#include <linux/if_vlan.h>
 
 
 
 
 16#include <asm/kprobes.h>
 17#include <linux/bpf.h>
 18
 19#include "bpf_jit.h"
 20
 21static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 22{
 23	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 24}
 25
 26/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
 27static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
 28				   struct codegen_context *ctx, u32 *addrs)
 29{
 30	const struct bpf_insn *insn = fp->insnsi;
 31	bool func_addr_fixed;
 32	u64 func_addr;
 33	u32 tmp_idx;
 34	int i, j, ret;
 35
 36	for (i = 0; i < fp->len; i++) {
 37		/*
 38		 * During the extra pass, only the branch target addresses for
 39		 * the subprog calls need to be fixed. All other instructions
 40		 * can left untouched.
 41		 *
 42		 * The JITed image length does not change because we already
 43		 * ensure that the JITed instruction sequence for these calls
 44		 * are of fixed length by padding them with NOPs.
 45		 */
 46		if (insn[i].code == (BPF_JMP | BPF_CALL) &&
 47		    insn[i].src_reg == BPF_PSEUDO_CALL) {
 48			ret = bpf_jit_get_func_addr(fp, &insn[i], true,
 49						    &func_addr,
 50						    &func_addr_fixed);
 51			if (ret < 0)
 52				return ret;
 53
 54			/*
 55			 * Save ctx->idx as this would currently point to the
 56			 * end of the JITed image and set it to the offset of
 57			 * the instruction sequence corresponding to the
 58			 * subprog call temporarily.
 59			 */
 60			tmp_idx = ctx->idx;
 61			ctx->idx = addrs[i] / 4;
 62			ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
 63			if (ret)
 64				return ret;
 65
 66			/*
 67			 * Restore ctx->idx here. This is safe as the length
 68			 * of the JITed sequence remains unchanged.
 69			 */
 70			ctx->idx = tmp_idx;
 71		} else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
 72			tmp_idx = ctx->idx;
 73			ctx->idx = addrs[i] / 4;
 74#ifdef CONFIG_PPC32
 75			PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
 76			PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
 77			for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
 78				EMIT(PPC_RAW_NOP());
 79#else
 80			func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
 81			PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
 82			/* overwrite rest with nops */
 83			for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
 84				EMIT(PPC_RAW_NOP());
 85#endif
 86			ctx->idx = tmp_idx;
 87			i++;
 88		}
 89	}
 90
 91	return 0;
 92}
 93
 94int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 95{
 96	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
 97		PPC_JMP(exit_addr);
 98	} else if (ctx->alt_exit_addr) {
 99		if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
100			return -1;
101		PPC_JMP(ctx->alt_exit_addr);
102	} else {
103		ctx->alt_exit_addr = ctx->idx * 4;
104		bpf_jit_build_epilogue(image, ctx);
105	}
106
107	return 0;
108}
109
110struct powerpc64_jit_data {
111	struct bpf_binary_header *header;
 
 
 
112	u32 *addrs;
113	u8 *image;
114	u32 proglen;
115	struct codegen_context ctx;
116};
117
118bool bpf_jit_needs_zext(void)
119{
120	return true;
121}
122
123struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
124{
125	u32 proglen;
126	u32 alloclen;
127	u8 *image = NULL;
128	u32 *code_base;
129	u32 *addrs;
130	struct powerpc64_jit_data *jit_data;
131	struct codegen_context cgctx;
132	int pass;
133	int flen;
134	struct bpf_binary_header *bpf_hdr;
 
135	struct bpf_prog *org_fp = fp;
136	struct bpf_prog *tmp_fp;
137	bool bpf_blinded = false;
138	bool extra_pass = false;
 
 
139	u32 extable_len;
140	u32 fixup_len;
141
142	if (!fp->jit_requested)
143		return org_fp;
144
145	tmp_fp = bpf_jit_blind_constants(org_fp);
146	if (IS_ERR(tmp_fp))
147		return org_fp;
148
149	if (tmp_fp != org_fp) {
150		bpf_blinded = true;
151		fp = tmp_fp;
152	}
153
154	jit_data = fp->aux->jit_data;
155	if (!jit_data) {
156		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
157		if (!jit_data) {
158			fp = org_fp;
159			goto out;
160		}
161		fp->aux->jit_data = jit_data;
162	}
163
164	flen = fp->len;
165	addrs = jit_data->addrs;
166	if (addrs) {
167		cgctx = jit_data->ctx;
168		image = jit_data->image;
169		bpf_hdr = jit_data->header;
 
 
 
 
 
170		proglen = jit_data->proglen;
 
 
171		extra_pass = true;
 
 
172		goto skip_init_ctx;
173	}
174
175	addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
176	if (addrs == NULL) {
177		fp = org_fp;
178		goto out_addrs;
179	}
180
181	memset(&cgctx, 0, sizeof(struct codegen_context));
182	bpf_jit_init_reg_mapping(&cgctx);
183
184	/* Make sure that the stack is quadword aligned. */
185	cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
186
187	/* Scouting faux-generate pass 0 */
188	if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
189		/* We hit something illegal or unsupported. */
190		fp = org_fp;
191		goto out_addrs;
192	}
193
194	/*
195	 * If we have seen a tail call, we need a second pass.
196	 * This is because bpf_jit_emit_common_epilogue() is called
197	 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
198	 * We also need a second pass if we ended up with too large
199	 * a program so as to ensure BPF_EXIT branches are in range.
200	 */
201	if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
202		cgctx.idx = 0;
203		if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
204			fp = org_fp;
205			goto out_addrs;
206		}
207	}
208
209	bpf_jit_realloc_regs(&cgctx);
210	/*
211	 * Pretend to build prologue, given the features we've seen.  This will
212	 * update ctgtx.idx as it pretends to output instructions, then we can
213	 * calculate total size from idx.
214	 */
215	bpf_jit_build_prologue(0, &cgctx);
216	addrs[fp->len] = cgctx.idx * 4;
217	bpf_jit_build_epilogue(0, &cgctx);
218
219	fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
220	extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
221
222	proglen = cgctx.idx * 4;
223	alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
224
225	bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
226	if (!bpf_hdr) {
 
227		fp = org_fp;
228		goto out_addrs;
229	}
230
231	if (extable_len)
232		fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
233
234skip_init_ctx:
235	code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
236
237	if (extra_pass) {
238		/*
239		 * Do not touch the prologue and epilogue as they will remain
240		 * unchanged. Only fix the branch target address for subprog
241		 * calls in the body, and ldimm64 instructions.
242		 *
243		 * This does not change the offsets and lengths of the subprog
244		 * call instruction sequences and hence, the size of the JITed
245		 * image as well.
246		 */
247		bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
248
249		/* There is no need to perform the usual passes. */
250		goto skip_codegen_passes;
251	}
252
253	/* Code generation passes 1-2 */
254	for (pass = 1; pass < 3; pass++) {
255		/* Now build the prologue, body code & epilogue for real. */
256		cgctx.idx = 0;
257		cgctx.alt_exit_addr = 0;
258		bpf_jit_build_prologue(code_base, &cgctx);
259		if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
260			bpf_jit_binary_free(bpf_hdr);
 
 
261			fp = org_fp;
262			goto out_addrs;
263		}
264		bpf_jit_build_epilogue(code_base, &cgctx);
265
266		if (bpf_jit_enable > 1)
267			pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
268				proglen - (cgctx.idx * 4), cgctx.seen);
269	}
270
271skip_codegen_passes:
272	if (bpf_jit_enable > 1)
273		/*
274		 * Note that we output the base address of the code_base
275		 * rather than image, since opcodes are in code_base.
276		 */
277		bpf_jit_dump(flen, proglen, pass, code_base);
278
279#ifdef CONFIG_PPC64_ELF_ABI_V1
280	/* Function descriptor nastiness: Address + TOC */
281	((u64 *)image)[0] = (u64)code_base;
282	((u64 *)image)[1] = local_paca->kernel_toc;
283#endif
284
285	fp->bpf_func = (void *)image;
286	fp->jited = 1;
287	fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
288
289	bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
290	if (!fp->is_func || extra_pass) {
291		bpf_jit_binary_lock_ro(bpf_hdr);
 
 
 
292		bpf_prog_fill_jited_linfo(fp, addrs);
293out_addrs:
294		kfree(addrs);
295		kfree(jit_data);
296		fp->aux->jit_data = NULL;
297	} else {
298		jit_data->addrs = addrs;
299		jit_data->ctx = cgctx;
300		jit_data->proglen = proglen;
301		jit_data->image = image;
302		jit_data->header = bpf_hdr;
 
303	}
304
305out:
306	if (bpf_blinded)
307		bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
308
309	return fp;
310}
311
312/*
313 * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
314 * this function, as this only applies to BPF_PROBE_MEM, for now.
315 */
316int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
317			  int insn_idx, int jmp_off, int dst_reg)
 
318{
319	off_t offset;
320	unsigned long pc;
321	struct exception_table_entry *ex;
322	u32 *fixup;
323
324	/* Populate extable entries only in the last pass */
325	if (pass != 2)
326		return 0;
327
328	if (!fp->aux->extable ||
329	    WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
330		return -EINVAL;
331
 
 
 
 
 
 
332	pc = (unsigned long)&image[insn_idx];
 
333
334	fixup = (void *)fp->aux->extable -
335		(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
336		(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
337
338	fixup[0] = PPC_RAW_LI(dst_reg, 0);
339	if (IS_ENABLED(CONFIG_PPC32))
340		fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
341
342	fixup[BPF_FIXUP_LEN - 1] =
343		PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
344
345	ex = &fp->aux->extable[ctx->exentry_idx];
346
347	offset = pc - (long)&ex->insn;
348	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
349		return -ERANGE;
350	ex->insn = offset;
351
352	offset = (long)fixup - (long)&ex->fixup;
353	if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
354		return -ERANGE;
355	ex->fixup = offset;
356
357	ctx->exentry_idx++;
358	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359}