Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Code for Kernel probes Jump optimization.
  4 *
  5 * Copyright 2017, Anju T, IBM Corp.
  6 */
  7
  8#include <linux/kprobes.h>
  9#include <linux/jump_label.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/list.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15#include <asm/cacheflush.h>
 16#include <asm/code-patching.h>
 17#include <asm/sstep.h>
 18#include <asm/ppc-opcode.h>
 19#include <asm/inst.h>
 20
 21#define TMPL_CALL_HDLR_IDX	\
 22	(optprobe_template_call_handler - optprobe_template_entry)
 23#define TMPL_EMULATE_IDX	\
 24	(optprobe_template_call_emulate - optprobe_template_entry)
 25#define TMPL_RET_IDX		\
 26	(optprobe_template_ret - optprobe_template_entry)
 27#define TMPL_OP_IDX		\
 28	(optprobe_template_op_address - optprobe_template_entry)
 29#define TMPL_INSN_IDX		\
 30	(optprobe_template_insn - optprobe_template_entry)
 31#define TMPL_END_IDX		\
 32	(optprobe_template_end - optprobe_template_entry)
 33
 34DEFINE_INSN_CACHE_OPS(ppc_optinsn);
 35
 36static bool insn_page_in_use;
 37
 38static void *__ppc_alloc_insn_page(void)
 39{
 40	if (insn_page_in_use)
 41		return NULL;
 42	insn_page_in_use = true;
 43	return &optinsn_slot;
 44}
 45
 46static void __ppc_free_insn_page(void *page __maybe_unused)
 47{
 48	insn_page_in_use = false;
 49}
 50
 51struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
 52	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
 53	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
 54	/* insn_size initialized later */
 55	.alloc = __ppc_alloc_insn_page,
 56	.free = __ppc_free_insn_page,
 57	.nr_garbage = 0,
 58};
 59
 60/*
 61 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 62 * be optimized and 0 otherwise.
 63 */
 64static unsigned long can_optimize(struct kprobe *p)
 65{
 66	struct pt_regs regs;
 67	struct instruction_op op;
 68	unsigned long nip = 0;
 69
 70	/*
 71	 * kprobe placed for kretprobe during boot time
 72	 * has a 'nop' instruction, which can be emulated.
 73	 * So further checks can be skipped.
 74	 */
 75	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 76		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
 77
 78	/*
 79	 * We only support optimizing kernel addresses, but not
 80	 * module addresses.
 81	 *
 82	 * FIXME: Optimize kprobes placed in module addresses.
 83	 */
 84	if (!is_kernel_addr((unsigned long)p->addr))
 85		return 0;
 86
 87	memset(&regs, 0, sizeof(struct pt_regs));
 88	regs.nip = (unsigned long)p->addr;
 89	regs.trap = 0x0;
 90	regs.msr = MSR_KERNEL;
 91
 92	/*
 93	 * Kprobe placed in conditional branch instructions are
 94	 * not optimized, as we can't predict the nip prior with
 95	 * dummy pt_regs and can not ensure that the return branch
 96	 * from detour buffer falls in the range of address (i.e 32MB).
 97	 * A branch back from trampoline is set up in the detour buffer
 98	 * to the nip returned by the analyse_instr() here.
 99	 *
100	 * Ensure that the instruction is not a conditional branch,
101	 * and that can be emulated.
102	 */
103	if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
104	    analyse_instr(&op, &regs,
105			  ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
106		emulate_update_regs(&regs, &op);
107		nip = regs.nip;
108	}
109
110	return nip;
111}
112
113static void optimized_callback(struct optimized_kprobe *op,
114			       struct pt_regs *regs)
115{
116	/* This is possible if op is under delayed unoptimizing */
117	if (kprobe_disabled(&op->kp))
118		return;
119
120	preempt_disable();
121
122	if (kprobe_running()) {
123		kprobes_inc_nmissed_count(&op->kp);
124	} else {
125		__this_cpu_write(current_kprobe, &op->kp);
126		regs->nip = (unsigned long)op->kp.addr;
127		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
128		opt_pre_handler(&op->kp, regs);
129		__this_cpu_write(current_kprobe, NULL);
130	}
131
132	preempt_enable_no_resched();
133}
134NOKPROBE_SYMBOL(optimized_callback);
135
136void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
137{
138	if (op->optinsn.insn) {
139		free_ppc_optinsn_slot(op->optinsn.insn, 1);
140		op->optinsn.insn = NULL;
141	}
142}
143
144/*
145 * emulate_step() requires insn to be emulated as
146 * second parameter. Load register 'r4' with the
147 * instruction.
148 */
149void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
150{
151	/* addis r4,0,(insn)@h */
152	patch_instruction((struct ppc_inst *)addr,
153			  ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
154				   ((val >> 16) & 0xffff)));
155	addr++;
156
157	/* ori r4,r4,(insn)@l */
158	patch_instruction((struct ppc_inst *)addr,
159			  ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
160				   ___PPC_RS(4) | (val & 0xffff)));
161}
162
163/*
164 * Generate instructions to load provided immediate 64-bit value
165 * to register 'reg' and patch these instructions at 'addr'.
166 */
167void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
168{
169	/* lis reg,(op)@highest */
170	patch_instruction((struct ppc_inst *)addr,
171			  ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
172				   ((val >> 48) & 0xffff)));
173	addr++;
174
175	/* ori reg,reg,(op)@higher */
176	patch_instruction((struct ppc_inst *)addr,
177			  ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
178				   ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
179	addr++;
180
181	/* rldicr reg,reg,32,31 */
182	patch_instruction((struct ppc_inst *)addr,
183			  ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
184				   ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
185	addr++;
186
187	/* oris reg,reg,(op)@h */
188	patch_instruction((struct ppc_inst *)addr,
189			  ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
190				   ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
191	addr++;
192
193	/* ori reg,reg,(op)@l */
194	patch_instruction((struct ppc_inst *)addr,
195			  ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
196				   ___PPC_RS(reg) | (val & 0xffff)));
197}
198
199int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
200{
201	struct ppc_inst branch_op_callback, branch_emulate_step, temp;
202	kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
203	long b_offset;
204	unsigned long nip, size;
205	int rc, i;
206
207	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
208
209	nip = can_optimize(p);
210	if (!nip)
211		return -EILSEQ;
212
213	/* Allocate instruction slot for detour buffer */
214	buff = get_ppc_optinsn_slot();
215	if (!buff)
216		return -ENOMEM;
217
218	/*
219	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
220	 *
221	 * The target address has to be relatively nearby, to permit use
222	 * of branch instruction in powerpc, because the address is specified
223	 * in an immediate field in the instruction opcode itself, ie 24 bits
224	 * in the opcode specify the address. Therefore the address should
225	 * be within 32MB on either side of the current instruction.
226	 */
227	b_offset = (unsigned long)buff - (unsigned long)p->addr;
228	if (!is_offset_in_branch_range(b_offset))
229		goto error;
230
231	/* Check if the return address is also within 32MB range */
232	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
233			(unsigned long)nip;
234	if (!is_offset_in_branch_range(b_offset))
235		goto error;
236
237	/* Setup template */
238	/* We can optimize this via patch_instruction_window later */
239	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
240	pr_devel("Copying template to %p, size %lu\n", buff, size);
241	for (i = 0; i < size; i++) {
242		rc = patch_instruction((struct ppc_inst *)(buff + i),
243				       ppc_inst(*(optprobe_template_entry + i)));
244		if (rc < 0)
245			goto error;
246	}
247
248	/*
249	 * Fixup the template with instructions to:
250	 * 1. load the address of the actual probepoint
251	 */
252	patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
253
254	/*
255	 * 2. branch to optimized_callback() and emulate_step()
256	 */
257	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
258	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
259	if (!op_callback_addr || !emulate_step_addr) {
260		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
261		goto error;
262	}
263
264	rc = create_branch(&branch_op_callback,
265			   (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
266			   (unsigned long)op_callback_addr,
267			   BRANCH_SET_LINK);
268
269	rc |= create_branch(&branch_emulate_step,
270			    (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
271			    (unsigned long)emulate_step_addr,
272			    BRANCH_SET_LINK);
273
274	if (rc)
275		goto error;
276
277	patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
278			  branch_op_callback);
279	patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
280			  branch_emulate_step);
281
282	/*
283	 * 3. load instruction to be emulated into relevant register, and
284	 */
285	temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
286	patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
287
288	/*
289	 * 4. branch back from trampoline
290	 */
291	patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
292
293	flush_icache_range((unsigned long)buff,
294			   (unsigned long)(&buff[TMPL_END_IDX]));
295
296	op->optinsn.insn = buff;
297
298	return 0;
299
300error:
301	free_ppc_optinsn_slot(buff, 0);
302	return -ERANGE;
303
304}
305
306int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
307{
308	return optinsn->insn != NULL;
309}
310
311/*
312 * On powerpc, Optprobes always replaces one instruction (4 bytes
313 * aligned and 4 bytes long). It is impossible to encounter another
314 * kprobe in this address range. So always return 0.
315 */
316int arch_check_optimized_kprobe(struct optimized_kprobe *op)
317{
318	return 0;
319}
320
321void arch_optimize_kprobes(struct list_head *oplist)
322{
323	struct ppc_inst instr;
324	struct optimized_kprobe *op;
325	struct optimized_kprobe *tmp;
326
327	list_for_each_entry_safe(op, tmp, oplist, list) {
328		/*
329		 * Backup instructions which will be replaced
330		 * by jump address
331		 */
332		memcpy(op->optinsn.copied_insn, op->kp.addr,
333					       RELATIVEJUMP_SIZE);
334		create_branch(&instr,
335			      (struct ppc_inst *)op->kp.addr,
336			      (unsigned long)op->optinsn.insn, 0);
337		patch_instruction((struct ppc_inst *)op->kp.addr, instr);
338		list_del_init(&op->list);
339	}
340}
341
342void arch_unoptimize_kprobe(struct optimized_kprobe *op)
343{
344	arch_arm_kprobe(&op->kp);
345}
346
347void arch_unoptimize_kprobes(struct list_head *oplist,
348			     struct list_head *done_list)
349{
350	struct optimized_kprobe *op;
351	struct optimized_kprobe *tmp;
352
353	list_for_each_entry_safe(op, tmp, oplist, list) {
354		arch_unoptimize_kprobe(op);
355		list_move(&op->list, done_list);
356	}
357}
358
359int arch_within_optimized_kprobe(struct optimized_kprobe *op,
360				 unsigned long addr)
361{
362	return ((unsigned long)op->kp.addr <= addr &&
363		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
364}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Code for Kernel probes Jump optimization.
  4 *
  5 * Copyright 2017, Anju T, IBM Corp.
  6 */
  7
  8#include <linux/kprobes.h>
  9#include <linux/jump_label.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/list.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15#include <asm/cacheflush.h>
 16#include <asm/code-patching.h>
 17#include <asm/sstep.h>
 18#include <asm/ppc-opcode.h>
 
 19
 20#define TMPL_CALL_HDLR_IDX	\
 21	(optprobe_template_call_handler - optprobe_template_entry)
 22#define TMPL_EMULATE_IDX	\
 23	(optprobe_template_call_emulate - optprobe_template_entry)
 24#define TMPL_RET_IDX		\
 25	(optprobe_template_ret - optprobe_template_entry)
 26#define TMPL_OP_IDX		\
 27	(optprobe_template_op_address - optprobe_template_entry)
 28#define TMPL_INSN_IDX		\
 29	(optprobe_template_insn - optprobe_template_entry)
 30#define TMPL_END_IDX		\
 31	(optprobe_template_end - optprobe_template_entry)
 32
 33DEFINE_INSN_CACHE_OPS(ppc_optinsn);
 34
 35static bool insn_page_in_use;
 36
 37static void *__ppc_alloc_insn_page(void)
 38{
 39	if (insn_page_in_use)
 40		return NULL;
 41	insn_page_in_use = true;
 42	return &optinsn_slot;
 43}
 44
 45static void __ppc_free_insn_page(void *page __maybe_unused)
 46{
 47	insn_page_in_use = false;
 48}
 49
 50struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
 51	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
 52	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
 53	/* insn_size initialized later */
 54	.alloc = __ppc_alloc_insn_page,
 55	.free = __ppc_free_insn_page,
 56	.nr_garbage = 0,
 57};
 58
 59/*
 60 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 61 * be optimized and 0 otherwise.
 62 */
 63static unsigned long can_optimize(struct kprobe *p)
 64{
 65	struct pt_regs regs;
 66	struct instruction_op op;
 67	unsigned long nip = 0;
 68
 69	/*
 70	 * kprobe placed for kretprobe during boot time
 71	 * has a 'nop' instruction, which can be emulated.
 72	 * So further checks can be skipped.
 73	 */
 74	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 75		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
 76
 77	/*
 78	 * We only support optimizing kernel addresses, but not
 79	 * module addresses.
 80	 *
 81	 * FIXME: Optimize kprobes placed in module addresses.
 82	 */
 83	if (!is_kernel_addr((unsigned long)p->addr))
 84		return 0;
 85
 86	memset(&regs, 0, sizeof(struct pt_regs));
 87	regs.nip = (unsigned long)p->addr;
 88	regs.trap = 0x0;
 89	regs.msr = MSR_KERNEL;
 90
 91	/*
 92	 * Kprobe placed in conditional branch instructions are
 93	 * not optimized, as we can't predict the nip prior with
 94	 * dummy pt_regs and can not ensure that the return branch
 95	 * from detour buffer falls in the range of address (i.e 32MB).
 96	 * A branch back from trampoline is set up in the detour buffer
 97	 * to the nip returned by the analyse_instr() here.
 98	 *
 99	 * Ensure that the instruction is not a conditional branch,
100	 * and that can be emulated.
101	 */
102	if (!is_conditional_branch(*p->ainsn.insn) &&
103			analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
 
104		emulate_update_regs(&regs, &op);
105		nip = regs.nip;
106	}
107
108	return nip;
109}
110
111static void optimized_callback(struct optimized_kprobe *op,
112			       struct pt_regs *regs)
113{
114	/* This is possible if op is under delayed unoptimizing */
115	if (kprobe_disabled(&op->kp))
116		return;
117
118	preempt_disable();
119
120	if (kprobe_running()) {
121		kprobes_inc_nmissed_count(&op->kp);
122	} else {
123		__this_cpu_write(current_kprobe, &op->kp);
124		regs->nip = (unsigned long)op->kp.addr;
125		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
126		opt_pre_handler(&op->kp, regs);
127		__this_cpu_write(current_kprobe, NULL);
128	}
129
130	preempt_enable_no_resched();
131}
132NOKPROBE_SYMBOL(optimized_callback);
133
134void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
135{
136	if (op->optinsn.insn) {
137		free_ppc_optinsn_slot(op->optinsn.insn, 1);
138		op->optinsn.insn = NULL;
139	}
140}
141
142/*
143 * emulate_step() requires insn to be emulated as
144 * second parameter. Load register 'r4' with the
145 * instruction.
146 */
147void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
148{
149	/* addis r4,0,(insn)@h */
150	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
151			  ((val >> 16) & 0xffff));
 
152	addr++;
153
154	/* ori r4,r4,(insn)@l */
155	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
156			  ___PPC_RS(4) | (val & 0xffff));
 
157}
158
159/*
160 * Generate instructions to load provided immediate 64-bit value
161 * to register 'r3' and patch these instructions at 'addr'.
162 */
163void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
164{
165	/* lis r3,(op)@highest */
166	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
167			  ((val >> 48) & 0xffff));
 
168	addr++;
169
170	/* ori r3,r3,(op)@higher */
171	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
172			  ___PPC_RS(3) | ((val >> 32) & 0xffff));
 
173	addr++;
174
175	/* rldicr r3,r3,32,31 */
176	patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
177			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
 
178	addr++;
179
180	/* oris r3,r3,(op)@h */
181	patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
182			  ___PPC_RS(3) | ((val >> 16) & 0xffff));
 
183	addr++;
184
185	/* ori r3,r3,(op)@l */
186	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
187			  ___PPC_RS(3) | (val & 0xffff));
 
188}
189
190int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
191{
192	kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
193	kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
194	long b_offset;
195	unsigned long nip, size;
196	int rc, i;
197
198	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
199
200	nip = can_optimize(p);
201	if (!nip)
202		return -EILSEQ;
203
204	/* Allocate instruction slot for detour buffer */
205	buff = get_ppc_optinsn_slot();
206	if (!buff)
207		return -ENOMEM;
208
209	/*
210	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
211	 *
212	 * The target address has to be relatively nearby, to permit use
213	 * of branch instruction in powerpc, because the address is specified
214	 * in an immediate field in the instruction opcode itself, ie 24 bits
215	 * in the opcode specify the address. Therefore the address should
216	 * be within 32MB on either side of the current instruction.
217	 */
218	b_offset = (unsigned long)buff - (unsigned long)p->addr;
219	if (!is_offset_in_branch_range(b_offset))
220		goto error;
221
222	/* Check if the return address is also within 32MB range */
223	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
224			(unsigned long)nip;
225	if (!is_offset_in_branch_range(b_offset))
226		goto error;
227
228	/* Setup template */
229	/* We can optimize this via patch_instruction_window later */
230	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
231	pr_devel("Copying template to %p, size %lu\n", buff, size);
232	for (i = 0; i < size; i++) {
233		rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
 
234		if (rc < 0)
235			goto error;
236	}
237
238	/*
239	 * Fixup the template with instructions to:
240	 * 1. load the address of the actual probepoint
241	 */
242	patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
243
244	/*
245	 * 2. branch to optimized_callback() and emulate_step()
246	 */
247	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
248	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
249	if (!op_callback_addr || !emulate_step_addr) {
250		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
251		goto error;
252	}
253
254	branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
255				(unsigned long)op_callback_addr,
256				BRANCH_SET_LINK);
257
258	branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
259				(unsigned long)emulate_step_addr,
260				BRANCH_SET_LINK);
 
 
261
262	if (!branch_op_callback || !branch_emulate_step)
263		goto error;
264
265	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
266	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
 
 
267
268	/*
269	 * 3. load instruction to be emulated into relevant register, and
270	 */
271	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
 
272
273	/*
274	 * 4. branch back from trampoline
275	 */
276	patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
277
278	flush_icache_range((unsigned long)buff,
279			   (unsigned long)(&buff[TMPL_END_IDX]));
280
281	op->optinsn.insn = buff;
282
283	return 0;
284
285error:
286	free_ppc_optinsn_slot(buff, 0);
287	return -ERANGE;
288
289}
290
291int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
292{
293	return optinsn->insn != NULL;
294}
295
296/*
297 * On powerpc, Optprobes always replaces one instruction (4 bytes
298 * aligned and 4 bytes long). It is impossible to encounter another
299 * kprobe in this address range. So always return 0.
300 */
301int arch_check_optimized_kprobe(struct optimized_kprobe *op)
302{
303	return 0;
304}
305
306void arch_optimize_kprobes(struct list_head *oplist)
307{
 
308	struct optimized_kprobe *op;
309	struct optimized_kprobe *tmp;
310
311	list_for_each_entry_safe(op, tmp, oplist, list) {
312		/*
313		 * Backup instructions which will be replaced
314		 * by jump address
315		 */
316		memcpy(op->optinsn.copied_insn, op->kp.addr,
317					       RELATIVEJUMP_SIZE);
318		patch_instruction(op->kp.addr,
319			create_branch((unsigned int *)op->kp.addr,
320				      (unsigned long)op->optinsn.insn, 0));
 
321		list_del_init(&op->list);
322	}
323}
324
325void arch_unoptimize_kprobe(struct optimized_kprobe *op)
326{
327	arch_arm_kprobe(&op->kp);
328}
329
330void arch_unoptimize_kprobes(struct list_head *oplist,
331			     struct list_head *done_list)
332{
333	struct optimized_kprobe *op;
334	struct optimized_kprobe *tmp;
335
336	list_for_each_entry_safe(op, tmp, oplist, list) {
337		arch_unoptimize_kprobe(op);
338		list_move(&op->list, done_list);
339	}
340}
341
342int arch_within_optimized_kprobe(struct optimized_kprobe *op,
343				 unsigned long addr)
344{
345	return ((unsigned long)op->kp.addr <= addr &&
346		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
347}