Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Code for Kernel probes Jump optimization.
  4 *
  5 * Copyright 2017, Anju T, IBM Corp.
  6 */
  7
  8#include <linux/kprobes.h>
  9#include <linux/jump_label.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/list.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15#include <asm/cacheflush.h>
 16#include <asm/code-patching.h>
 17#include <asm/sstep.h>
 18#include <asm/ppc-opcode.h>
 19#include <asm/inst.h>
 20
 21#define TMPL_CALL_HDLR_IDX	(optprobe_template_call_handler - optprobe_template_entry)
 22#define TMPL_EMULATE_IDX	(optprobe_template_call_emulate - optprobe_template_entry)
 23#define TMPL_RET_IDX		(optprobe_template_ret - optprobe_template_entry)
 24#define TMPL_OP_IDX		(optprobe_template_op_address - optprobe_template_entry)
 25#define TMPL_INSN_IDX		(optprobe_template_insn - optprobe_template_entry)
 26#define TMPL_END_IDX		(optprobe_template_end - optprobe_template_entry)
 
 
 
 
 
 
 
 
 27
 28static bool insn_page_in_use;
 29
 30void *alloc_optinsn_page(void)
 31{
 32	if (insn_page_in_use)
 33		return NULL;
 34	insn_page_in_use = true;
 35	return &optinsn_slot;
 36}
 37
 38void free_optinsn_page(void *page)
 39{
 40	insn_page_in_use = false;
 41}
 42
 
 
 
 
 
 
 
 
 
 43/*
 44 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 45 * be optimized and 0 otherwise.
 46 */
 47static unsigned long can_optimize(struct kprobe *p)
 48{
 49	struct pt_regs regs;
 50	struct instruction_op op;
 51	unsigned long nip = 0;
 52	unsigned long addr = (unsigned long)p->addr;
 53
 54	/*
 55	 * kprobe placed for kretprobe during boot time
 56	 * has a 'nop' instruction, which can be emulated.
 57	 * So further checks can be skipped.
 58	 */
 59	if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
 60		return addr + sizeof(kprobe_opcode_t);
 61
 62	/*
 63	 * We only support optimizing kernel addresses, but not
 64	 * module addresses.
 65	 *
 66	 * FIXME: Optimize kprobes placed in module addresses.
 67	 */
 68	if (!is_kernel_addr(addr))
 69		return 0;
 70
 71	memset(&regs, 0, sizeof(struct pt_regs));
 72	regs.nip = addr;
 73	regs.trap = 0x0;
 74	regs.msr = MSR_KERNEL;
 75
 76	/*
 77	 * Kprobe placed in conditional branch instructions are
 78	 * not optimized, as we can't predict the nip prior with
 79	 * dummy pt_regs and can not ensure that the return branch
 80	 * from detour buffer falls in the range of address (i.e 32MB).
 81	 * A branch back from trampoline is set up in the detour buffer
 82	 * to the nip returned by the analyse_instr() here.
 83	 *
 84	 * Ensure that the instruction is not a conditional branch,
 85	 * and that can be emulated.
 86	 */
 87	if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
 88	    analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
 
 89		emulate_update_regs(&regs, &op);
 90		nip = regs.nip;
 91	}
 92
 93	return nip;
 94}
 95
 96static void optimized_callback(struct optimized_kprobe *op,
 97			       struct pt_regs *regs)
 98{
 99	/* This is possible if op is under delayed unoptimizing */
100	if (kprobe_disabled(&op->kp))
101		return;
102
103	preempt_disable();
104
105	if (kprobe_running()) {
106		kprobes_inc_nmissed_count(&op->kp);
107	} else {
108		__this_cpu_write(current_kprobe, &op->kp);
109		regs_set_return_ip(regs, (unsigned long)op->kp.addr);
110		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
111		opt_pre_handler(&op->kp, regs);
112		__this_cpu_write(current_kprobe, NULL);
113	}
114
115	preempt_enable();
116}
117NOKPROBE_SYMBOL(optimized_callback);
118
119void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
120{
121	if (op->optinsn.insn) {
122		free_optinsn_slot(op->optinsn.insn, 1);
123		op->optinsn.insn = NULL;
124	}
125}
126
127static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
 
 
 
 
 
128{
129	patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
130	patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
 
 
 
 
 
 
 
 
131}
132
133/*
134 * Generate instructions to load provided immediate 64-bit value
135 * to register 'reg' and patch these instructions at 'addr'.
136 */
137static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
138{
139	patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
140	patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
141	patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
142	patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
143	patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
144}
145
146static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
147{
148	if (IS_ENABLED(CONFIG_PPC64))
149		patch_imm64_load_insns(val, reg, addr);
150	else
151		patch_imm32_load_insns(val, reg, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152}
153
154int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
155{
156	ppc_inst_t branch_op_callback, branch_emulate_step, temp;
157	unsigned long op_callback_addr, emulate_step_addr;
158	kprobe_opcode_t *buff;
159	long b_offset;
160	unsigned long nip, size;
161	int rc, i;
162
 
 
163	nip = can_optimize(p);
164	if (!nip)
165		return -EILSEQ;
166
167	/* Allocate instruction slot for detour buffer */
168	buff = get_optinsn_slot();
169	if (!buff)
170		return -ENOMEM;
171
172	/*
173	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
174	 *
175	 * The target address has to be relatively nearby, to permit use
176	 * of branch instruction in powerpc, because the address is specified
177	 * in an immediate field in the instruction opcode itself, ie 24 bits
178	 * in the opcode specify the address. Therefore the address should
179	 * be within 32MB on either side of the current instruction.
180	 */
181	b_offset = (unsigned long)buff - (unsigned long)p->addr;
182	if (!is_offset_in_branch_range(b_offset))
183		goto error;
184
185	/* Check if the return address is also within 32MB range */
186	b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
 
187	if (!is_offset_in_branch_range(b_offset))
188		goto error;
189
190	/* Setup template */
191	/* We can optimize this via patch_instruction_window later */
192	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
193	pr_devel("Copying template to %p, size %lu\n", buff, size);
194	for (i = 0; i < size; i++) {
195		rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
 
196		if (rc < 0)
197			goto error;
198	}
199
200	/*
201	 * Fixup the template with instructions to:
202	 * 1. load the address of the actual probepoint
203	 */
204	patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
205
206	/*
207	 * 2. branch to optimized_callback() and emulate_step()
208	 */
209	op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
210	emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
211	if (!op_callback_addr || !emulate_step_addr) {
212		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
213		goto error;
214	}
215
216	rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
217			   op_callback_addr, BRANCH_SET_LINK);
218
219	rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
220			    emulate_step_addr, BRANCH_SET_LINK);
 
 
 
 
221
222	if (rc)
223		goto error;
224
225	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
226	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
 
 
227
228	/*
229	 * 3. load instruction to be emulated into relevant register, and
230	 */
231	temp = ppc_inst_read(p->ainsn.insn);
232	patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
233
234	/*
235	 * 4. branch back from trampoline
236	 */
237	patch_branch(buff + TMPL_RET_IDX, nip, 0);
238
239	flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
 
240
241	op->optinsn.insn = buff;
242
243	return 0;
244
245error:
246	free_optinsn_slot(buff, 0);
247	return -ERANGE;
248
249}
250
251int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
252{
253	return optinsn->insn != NULL;
254}
255
256/*
257 * On powerpc, Optprobes always replaces one instruction (4 bytes
258 * aligned and 4 bytes long). It is impossible to encounter another
259 * kprobe in this address range. So always return 0.
260 */
261int arch_check_optimized_kprobe(struct optimized_kprobe *op)
262{
263	return 0;
264}
265
266void arch_optimize_kprobes(struct list_head *oplist)
267{
268	ppc_inst_t instr;
269	struct optimized_kprobe *op;
270	struct optimized_kprobe *tmp;
271
272	list_for_each_entry_safe(op, tmp, oplist, list) {
273		/*
274		 * Backup instructions which will be replaced
275		 * by jump address
276		 */
277		memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
278		create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
279		patch_instruction(op->kp.addr, instr);
 
 
 
280		list_del_init(&op->list);
281	}
282}
283
284void arch_unoptimize_kprobe(struct optimized_kprobe *op)
285{
286	arch_arm_kprobe(&op->kp);
287}
288
289void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
 
290{
291	struct optimized_kprobe *op;
292	struct optimized_kprobe *tmp;
293
294	list_for_each_entry_safe(op, tmp, oplist, list) {
295		arch_unoptimize_kprobe(op);
296		list_move(&op->list, done_list);
297	}
298}
299
300int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
 
301{
302	return (op->kp.addr <= addr &&
303		op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
304}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Code for Kernel probes Jump optimization.
  4 *
  5 * Copyright 2017, Anju T, IBM Corp.
  6 */
  7
  8#include <linux/kprobes.h>
  9#include <linux/jump_label.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/list.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15#include <asm/cacheflush.h>
 16#include <asm/code-patching.h>
 17#include <asm/sstep.h>
 18#include <asm/ppc-opcode.h>
 19#include <asm/inst.h>
 20
 21#define TMPL_CALL_HDLR_IDX	\
 22	(optprobe_template_call_handler - optprobe_template_entry)
 23#define TMPL_EMULATE_IDX	\
 24	(optprobe_template_call_emulate - optprobe_template_entry)
 25#define TMPL_RET_IDX		\
 26	(optprobe_template_ret - optprobe_template_entry)
 27#define TMPL_OP_IDX		\
 28	(optprobe_template_op_address - optprobe_template_entry)
 29#define TMPL_INSN_IDX		\
 30	(optprobe_template_insn - optprobe_template_entry)
 31#define TMPL_END_IDX		\
 32	(optprobe_template_end - optprobe_template_entry)
 33
 34DEFINE_INSN_CACHE_OPS(ppc_optinsn);
 35
 36static bool insn_page_in_use;
 37
 38static void *__ppc_alloc_insn_page(void)
 39{
 40	if (insn_page_in_use)
 41		return NULL;
 42	insn_page_in_use = true;
 43	return &optinsn_slot;
 44}
 45
 46static void __ppc_free_insn_page(void *page __maybe_unused)
 47{
 48	insn_page_in_use = false;
 49}
 50
 51struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
 52	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
 53	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
 54	/* insn_size initialized later */
 55	.alloc = __ppc_alloc_insn_page,
 56	.free = __ppc_free_insn_page,
 57	.nr_garbage = 0,
 58};
 59
 60/*
 61 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 62 * be optimized and 0 otherwise.
 63 */
 64static unsigned long can_optimize(struct kprobe *p)
 65{
 66	struct pt_regs regs;
 67	struct instruction_op op;
 68	unsigned long nip = 0;
 
 69
 70	/*
 71	 * kprobe placed for kretprobe during boot time
 72	 * has a 'nop' instruction, which can be emulated.
 73	 * So further checks can be skipped.
 74	 */
 75	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 76		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
 77
 78	/*
 79	 * We only support optimizing kernel addresses, but not
 80	 * module addresses.
 81	 *
 82	 * FIXME: Optimize kprobes placed in module addresses.
 83	 */
 84	if (!is_kernel_addr((unsigned long)p->addr))
 85		return 0;
 86
 87	memset(&regs, 0, sizeof(struct pt_regs));
 88	regs.nip = (unsigned long)p->addr;
 89	regs.trap = 0x0;
 90	regs.msr = MSR_KERNEL;
 91
 92	/*
 93	 * Kprobe placed in conditional branch instructions are
 94	 * not optimized, as we can't predict the nip prior with
 95	 * dummy pt_regs and can not ensure that the return branch
 96	 * from detour buffer falls in the range of address (i.e 32MB).
 97	 * A branch back from trampoline is set up in the detour buffer
 98	 * to the nip returned by the analyse_instr() here.
 99	 *
100	 * Ensure that the instruction is not a conditional branch,
101	 * and that can be emulated.
102	 */
103	if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
104	    analyse_instr(&op, &regs,
105			  ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
106		emulate_update_regs(&regs, &op);
107		nip = regs.nip;
108	}
109
110	return nip;
111}
112
113static void optimized_callback(struct optimized_kprobe *op,
114			       struct pt_regs *regs)
115{
116	/* This is possible if op is under delayed unoptimizing */
117	if (kprobe_disabled(&op->kp))
118		return;
119
120	preempt_disable();
121
122	if (kprobe_running()) {
123		kprobes_inc_nmissed_count(&op->kp);
124	} else {
125		__this_cpu_write(current_kprobe, &op->kp);
126		regs->nip = (unsigned long)op->kp.addr;
127		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
128		opt_pre_handler(&op->kp, regs);
129		__this_cpu_write(current_kprobe, NULL);
130	}
131
132	preempt_enable_no_resched();
133}
134NOKPROBE_SYMBOL(optimized_callback);
135
136void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
137{
138	if (op->optinsn.insn) {
139		free_ppc_optinsn_slot(op->optinsn.insn, 1);
140		op->optinsn.insn = NULL;
141	}
142}
143
144/*
145 * emulate_step() requires insn to be emulated as
146 * second parameter. Load register 'r4' with the
147 * instruction.
148 */
149void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
150{
151	/* addis r4,0,(insn)@h */
152	patch_instruction((struct ppc_inst *)addr,
153			  ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
154				   ((val >> 16) & 0xffff)));
155	addr++;
156
157	/* ori r4,r4,(insn)@l */
158	patch_instruction((struct ppc_inst *)addr,
159			  ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
160				   ___PPC_RS(4) | (val & 0xffff)));
161}
162
163/*
164 * Generate instructions to load provided immediate 64-bit value
165 * to register 'reg' and patch these instructions at 'addr'.
166 */
167void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
168{
169	/* lis reg,(op)@highest */
170	patch_instruction((struct ppc_inst *)addr,
171			  ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
172				   ((val >> 48) & 0xffff)));
173	addr++;
174
175	/* ori reg,reg,(op)@higher */
176	patch_instruction((struct ppc_inst *)addr,
177			  ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
178				   ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
179	addr++;
180
181	/* rldicr reg,reg,32,31 */
182	patch_instruction((struct ppc_inst *)addr,
183			  ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
184				   ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
185	addr++;
186
187	/* oris reg,reg,(op)@h */
188	patch_instruction((struct ppc_inst *)addr,
189			  ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
190				   ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
191	addr++;
192
193	/* ori reg,reg,(op)@l */
194	patch_instruction((struct ppc_inst *)addr,
195			  ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
196				   ___PPC_RS(reg) | (val & 0xffff)));
197}
198
199int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
200{
201	struct ppc_inst branch_op_callback, branch_emulate_step, temp;
202	kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
 
203	long b_offset;
204	unsigned long nip, size;
205	int rc, i;
206
207	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
208
209	nip = can_optimize(p);
210	if (!nip)
211		return -EILSEQ;
212
213	/* Allocate instruction slot for detour buffer */
214	buff = get_ppc_optinsn_slot();
215	if (!buff)
216		return -ENOMEM;
217
218	/*
219	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
220	 *
221	 * The target address has to be relatively nearby, to permit use
222	 * of branch instruction in powerpc, because the address is specified
223	 * in an immediate field in the instruction opcode itself, ie 24 bits
224	 * in the opcode specify the address. Therefore the address should
225	 * be within 32MB on either side of the current instruction.
226	 */
227	b_offset = (unsigned long)buff - (unsigned long)p->addr;
228	if (!is_offset_in_branch_range(b_offset))
229		goto error;
230
231	/* Check if the return address is also within 32MB range */
232	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
233			(unsigned long)nip;
234	if (!is_offset_in_branch_range(b_offset))
235		goto error;
236
237	/* Setup template */
238	/* We can optimize this via patch_instruction_window later */
239	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
240	pr_devel("Copying template to %p, size %lu\n", buff, size);
241	for (i = 0; i < size; i++) {
242		rc = patch_instruction((struct ppc_inst *)(buff + i),
243				       ppc_inst(*(optprobe_template_entry + i)));
244		if (rc < 0)
245			goto error;
246	}
247
248	/*
249	 * Fixup the template with instructions to:
250	 * 1. load the address of the actual probepoint
251	 */
252	patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
253
254	/*
255	 * 2. branch to optimized_callback() and emulate_step()
256	 */
257	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
258	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
259	if (!op_callback_addr || !emulate_step_addr) {
260		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
261		goto error;
262	}
263
264	rc = create_branch(&branch_op_callback,
265			   (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
266			   (unsigned long)op_callback_addr,
267			   BRANCH_SET_LINK);
268
269	rc |= create_branch(&branch_emulate_step,
270			    (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
271			    (unsigned long)emulate_step_addr,
272			    BRANCH_SET_LINK);
273
274	if (rc)
275		goto error;
276
277	patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
278			  branch_op_callback);
279	patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
280			  branch_emulate_step);
281
282	/*
283	 * 3. load instruction to be emulated into relevant register, and
284	 */
285	temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
286	patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
287
288	/*
289	 * 4. branch back from trampoline
290	 */
291	patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
292
293	flush_icache_range((unsigned long)buff,
294			   (unsigned long)(&buff[TMPL_END_IDX]));
295
296	op->optinsn.insn = buff;
297
298	return 0;
299
300error:
301	free_ppc_optinsn_slot(buff, 0);
302	return -ERANGE;
303
304}
305
306int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
307{
308	return optinsn->insn != NULL;
309}
310
311/*
312 * On powerpc, Optprobes always replaces one instruction (4 bytes
313 * aligned and 4 bytes long). It is impossible to encounter another
314 * kprobe in this address range. So always return 0.
315 */
316int arch_check_optimized_kprobe(struct optimized_kprobe *op)
317{
318	return 0;
319}
320
321void arch_optimize_kprobes(struct list_head *oplist)
322{
323	struct ppc_inst instr;
324	struct optimized_kprobe *op;
325	struct optimized_kprobe *tmp;
326
327	list_for_each_entry_safe(op, tmp, oplist, list) {
328		/*
329		 * Backup instructions which will be replaced
330		 * by jump address
331		 */
332		memcpy(op->optinsn.copied_insn, op->kp.addr,
333					       RELATIVEJUMP_SIZE);
334		create_branch(&instr,
335			      (struct ppc_inst *)op->kp.addr,
336			      (unsigned long)op->optinsn.insn, 0);
337		patch_instruction((struct ppc_inst *)op->kp.addr, instr);
338		list_del_init(&op->list);
339	}
340}
341
342void arch_unoptimize_kprobe(struct optimized_kprobe *op)
343{
344	arch_arm_kprobe(&op->kp);
345}
346
347void arch_unoptimize_kprobes(struct list_head *oplist,
348			     struct list_head *done_list)
349{
350	struct optimized_kprobe *op;
351	struct optimized_kprobe *tmp;
352
353	list_for_each_entry_safe(op, tmp, oplist, list) {
354		arch_unoptimize_kprobe(op);
355		list_move(&op->list, done_list);
356	}
357}
358
359int arch_within_optimized_kprobe(struct optimized_kprobe *op,
360				 unsigned long addr)
361{
362	return ((unsigned long)op->kp.addr <= addr &&
363		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
364}