Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Code for Kernel probes Jump optimization.
  3 *
  4 * Copyright 2017, Anju T, IBM Corp.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/kprobes.h>
 13#include <linux/jump_label.h>
 14#include <linux/types.h>
 15#include <linux/slab.h>
 16#include <linux/list.h>
 17#include <asm/kprobes.h>
 18#include <asm/ptrace.h>
 19#include <asm/cacheflush.h>
 20#include <asm/code-patching.h>
 21#include <asm/sstep.h>
 22#include <asm/ppc-opcode.h>
 23
 24#define TMPL_CALL_HDLR_IDX	\
 25	(optprobe_template_call_handler - optprobe_template_entry)
 26#define TMPL_EMULATE_IDX	\
 27	(optprobe_template_call_emulate - optprobe_template_entry)
 28#define TMPL_RET_IDX		\
 29	(optprobe_template_ret - optprobe_template_entry)
 30#define TMPL_OP_IDX		\
 31	(optprobe_template_op_address - optprobe_template_entry)
 32#define TMPL_INSN_IDX		\
 33	(optprobe_template_insn - optprobe_template_entry)
 34#define TMPL_END_IDX		\
 35	(optprobe_template_end - optprobe_template_entry)
 36
 37DEFINE_INSN_CACHE_OPS(ppc_optinsn);
 38
 39static bool insn_page_in_use;
 40
 41static void *__ppc_alloc_insn_page(void)
 42{
 43	if (insn_page_in_use)
 44		return NULL;
 45	insn_page_in_use = true;
 46	return &optinsn_slot;
 47}
 48
 49static void __ppc_free_insn_page(void *page __maybe_unused)
 50{
 51	insn_page_in_use = false;
 52}
 53
 54struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
 55	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
 56	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
 57	/* insn_size initialized later */
 58	.alloc = __ppc_alloc_insn_page,
 59	.free = __ppc_free_insn_page,
 60	.nr_garbage = 0,
 61};
 62
 63/*
 64 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 65 * be optimized and 0 otherwise.
 66 */
 67static unsigned long can_optimize(struct kprobe *p)
 68{
 69	struct pt_regs regs;
 70	struct instruction_op op;
 71	unsigned long nip = 0;
 72
 73	/*
 74	 * kprobe placed for kretprobe during boot time
 75	 * has a 'nop' instruction, which can be emulated.
 76	 * So further checks can be skipped.
 77	 */
 78	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 79		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
 80
 81	/*
 82	 * We only support optimizing kernel addresses, but not
 83	 * module addresses.
 84	 *
 85	 * FIXME: Optimize kprobes placed in module addresses.
 86	 */
 87	if (!is_kernel_addr((unsigned long)p->addr))
 88		return 0;
 89
 90	memset(&regs, 0, sizeof(struct pt_regs));
 91	regs.nip = (unsigned long)p->addr;
 92	regs.trap = 0x0;
 93	regs.msr = MSR_KERNEL;
 94
 95	/*
 96	 * Kprobe placed in conditional branch instructions are
 97	 * not optimized, as we can't predict the nip prior with
 98	 * dummy pt_regs and can not ensure that the return branch
 99	 * from detour buffer falls in the range of address (i.e 32MB).
100	 * A branch back from trampoline is set up in the detour buffer
101	 * to the nip returned by the analyse_instr() here.
102	 *
103	 * Ensure that the instruction is not a conditional branch,
104	 * and that can be emulated.
105	 */
106	if (!is_conditional_branch(*p->ainsn.insn) &&
107			analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
108		emulate_update_regs(&regs, &op);
109		nip = regs.nip;
110	}
111
112	return nip;
113}
114
115static void optimized_callback(struct optimized_kprobe *op,
116			       struct pt_regs *regs)
117{
118	/* This is possible if op is under delayed unoptimizing */
119	if (kprobe_disabled(&op->kp))
120		return;
121
122	preempt_disable();
123
124	if (kprobe_running()) {
125		kprobes_inc_nmissed_count(&op->kp);
126	} else {
127		__this_cpu_write(current_kprobe, &op->kp);
128		regs->nip = (unsigned long)op->kp.addr;
129		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
130		opt_pre_handler(&op->kp, regs);
131		__this_cpu_write(current_kprobe, NULL);
132	}
133
134	preempt_enable_no_resched();
135}
136NOKPROBE_SYMBOL(optimized_callback);
137
138void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
139{
140	if (op->optinsn.insn) {
141		free_ppc_optinsn_slot(op->optinsn.insn, 1);
142		op->optinsn.insn = NULL;
143	}
144}
145
146/*
147 * emulate_step() requires insn to be emulated as
148 * second parameter. Load register 'r4' with the
149 * instruction.
150 */
151void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
152{
153	/* addis r4,0,(insn)@h */
154	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
155			  ((val >> 16) & 0xffff));
156	addr++;
157
158	/* ori r4,r4,(insn)@l */
159	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
160			  ___PPC_RS(4) | (val & 0xffff));
161}
162
163/*
164 * Generate instructions to load provided immediate 64-bit value
165 * to register 'r3' and patch these instructions at 'addr'.
166 */
167void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
168{
169	/* lis r3,(op)@highest */
170	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
171			  ((val >> 48) & 0xffff));
172	addr++;
173
174	/* ori r3,r3,(op)@higher */
175	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
176			  ___PPC_RS(3) | ((val >> 32) & 0xffff));
177	addr++;
178
179	/* rldicr r3,r3,32,31 */
180	patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
181			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
182	addr++;
183
184	/* oris r3,r3,(op)@h */
185	patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
186			  ___PPC_RS(3) | ((val >> 16) & 0xffff));
187	addr++;
188
189	/* ori r3,r3,(op)@l */
190	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
191			  ___PPC_RS(3) | (val & 0xffff));
192}
193
194int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
195{
196	kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
197	kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
198	long b_offset;
199	unsigned long nip, size;
200	int rc, i;
201
202	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
203
204	nip = can_optimize(p);
205	if (!nip)
206		return -EILSEQ;
207
208	/* Allocate instruction slot for detour buffer */
209	buff = get_ppc_optinsn_slot();
210	if (!buff)
211		return -ENOMEM;
212
213	/*
214	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
215	 *
216	 * The target address has to be relatively nearby, to permit use
217	 * of branch instruction in powerpc, because the address is specified
218	 * in an immediate field in the instruction opcode itself, ie 24 bits
219	 * in the opcode specify the address. Therefore the address should
220	 * be within 32MB on either side of the current instruction.
221	 */
222	b_offset = (unsigned long)buff - (unsigned long)p->addr;
223	if (!is_offset_in_branch_range(b_offset))
224		goto error;
225
226	/* Check if the return address is also within 32MB range */
227	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
228			(unsigned long)nip;
229	if (!is_offset_in_branch_range(b_offset))
230		goto error;
231
232	/* Setup template */
233	/* We can optimize this via patch_instruction_window later */
234	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
235	pr_devel("Copying template to %p, size %lu\n", buff, size);
236	for (i = 0; i < size; i++) {
237		rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
238		if (rc < 0)
239			goto error;
240	}
241
242	/*
243	 * Fixup the template with instructions to:
244	 * 1. load the address of the actual probepoint
245	 */
246	patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
247
248	/*
249	 * 2. branch to optimized_callback() and emulate_step()
250	 */
251	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
252	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
253	if (!op_callback_addr || !emulate_step_addr) {
254		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
255		goto error;
256	}
257
258	branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
259				(unsigned long)op_callback_addr,
260				BRANCH_SET_LINK);
261
262	branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
263				(unsigned long)emulate_step_addr,
264				BRANCH_SET_LINK);
265
266	if (!branch_op_callback || !branch_emulate_step)
267		goto error;
268
269	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
270	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
271
272	/*
273	 * 3. load instruction to be emulated into relevant register, and
274	 */
275	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
276
277	/*
278	 * 4. branch back from trampoline
279	 */
280	patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
281
282	flush_icache_range((unsigned long)buff,
283			   (unsigned long)(&buff[TMPL_END_IDX]));
284
285	op->optinsn.insn = buff;
286
287	return 0;
288
289error:
290	free_ppc_optinsn_slot(buff, 0);
291	return -ERANGE;
292
293}
294
295int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
296{
297	return optinsn->insn != NULL;
298}
299
300/*
301 * On powerpc, Optprobes always replaces one instruction (4 bytes
302 * aligned and 4 bytes long). It is impossible to encounter another
303 * kprobe in this address range. So always return 0.
304 */
305int arch_check_optimized_kprobe(struct optimized_kprobe *op)
306{
307	return 0;
308}
309
310void arch_optimize_kprobes(struct list_head *oplist)
311{
312	struct optimized_kprobe *op;
313	struct optimized_kprobe *tmp;
314
315	list_for_each_entry_safe(op, tmp, oplist, list) {
316		/*
317		 * Backup instructions which will be replaced
318		 * by jump address
319		 */
320		memcpy(op->optinsn.copied_insn, op->kp.addr,
321					       RELATIVEJUMP_SIZE);
322		patch_instruction(op->kp.addr,
323			create_branch((unsigned int *)op->kp.addr,
324				      (unsigned long)op->optinsn.insn, 0));
325		list_del_init(&op->list);
326	}
327}
328
329void arch_unoptimize_kprobe(struct optimized_kprobe *op)
330{
331	arch_arm_kprobe(&op->kp);
332}
333
334void arch_unoptimize_kprobes(struct list_head *oplist,
335			     struct list_head *done_list)
336{
337	struct optimized_kprobe *op;
338	struct optimized_kprobe *tmp;
339
340	list_for_each_entry_safe(op, tmp, oplist, list) {
341		arch_unoptimize_kprobe(op);
342		list_move(&op->list, done_list);
343	}
344}
345
346int arch_within_optimized_kprobe(struct optimized_kprobe *op,
347				 unsigned long addr)
348{
349	return ((unsigned long)op->kp.addr <= addr &&
350		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
351}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Code for Kernel probes Jump optimization.
  4 *
  5 * Copyright 2017, Anju T, IBM Corp.
 
 
 
 
 
  6 */
  7
  8#include <linux/kprobes.h>
  9#include <linux/jump_label.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/list.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15#include <asm/cacheflush.h>
 16#include <asm/code-patching.h>
 17#include <asm/sstep.h>
 18#include <asm/ppc-opcode.h>
 19
 20#define TMPL_CALL_HDLR_IDX	\
 21	(optprobe_template_call_handler - optprobe_template_entry)
 22#define TMPL_EMULATE_IDX	\
 23	(optprobe_template_call_emulate - optprobe_template_entry)
 24#define TMPL_RET_IDX		\
 25	(optprobe_template_ret - optprobe_template_entry)
 26#define TMPL_OP_IDX		\
 27	(optprobe_template_op_address - optprobe_template_entry)
 28#define TMPL_INSN_IDX		\
 29	(optprobe_template_insn - optprobe_template_entry)
 30#define TMPL_END_IDX		\
 31	(optprobe_template_end - optprobe_template_entry)
 32
 33DEFINE_INSN_CACHE_OPS(ppc_optinsn);
 34
 35static bool insn_page_in_use;
 36
 37static void *__ppc_alloc_insn_page(void)
 38{
 39	if (insn_page_in_use)
 40		return NULL;
 41	insn_page_in_use = true;
 42	return &optinsn_slot;
 43}
 44
 45static void __ppc_free_insn_page(void *page __maybe_unused)
 46{
 47	insn_page_in_use = false;
 48}
 49
 50struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
 51	.mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
 52	.pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
 53	/* insn_size initialized later */
 54	.alloc = __ppc_alloc_insn_page,
 55	.free = __ppc_free_insn_page,
 56	.nr_garbage = 0,
 57};
 58
 59/*
 60 * Check if we can optimize this probe. Returns NIP post-emulation if this can
 61 * be optimized and 0 otherwise.
 62 */
 63static unsigned long can_optimize(struct kprobe *p)
 64{
 65	struct pt_regs regs;
 66	struct instruction_op op;
 67	unsigned long nip = 0;
 68
 69	/*
 70	 * kprobe placed for kretprobe during boot time
 71	 * has a 'nop' instruction, which can be emulated.
 72	 * So further checks can be skipped.
 73	 */
 74	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 75		return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
 76
 77	/*
 78	 * We only support optimizing kernel addresses, but not
 79	 * module addresses.
 80	 *
 81	 * FIXME: Optimize kprobes placed in module addresses.
 82	 */
 83	if (!is_kernel_addr((unsigned long)p->addr))
 84		return 0;
 85
 86	memset(&regs, 0, sizeof(struct pt_regs));
 87	regs.nip = (unsigned long)p->addr;
 88	regs.trap = 0x0;
 89	regs.msr = MSR_KERNEL;
 90
 91	/*
 92	 * Kprobe placed in conditional branch instructions are
 93	 * not optimized, as we can't predict the nip prior with
 94	 * dummy pt_regs and can not ensure that the return branch
 95	 * from detour buffer falls in the range of address (i.e 32MB).
 96	 * A branch back from trampoline is set up in the detour buffer
 97	 * to the nip returned by the analyse_instr() here.
 98	 *
 99	 * Ensure that the instruction is not a conditional branch,
100	 * and that can be emulated.
101	 */
102	if (!is_conditional_branch(*p->ainsn.insn) &&
103			analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
104		emulate_update_regs(&regs, &op);
105		nip = regs.nip;
106	}
107
108	return nip;
109}
110
111static void optimized_callback(struct optimized_kprobe *op,
112			       struct pt_regs *regs)
113{
114	/* This is possible if op is under delayed unoptimizing */
115	if (kprobe_disabled(&op->kp))
116		return;
117
118	preempt_disable();
119
120	if (kprobe_running()) {
121		kprobes_inc_nmissed_count(&op->kp);
122	} else {
123		__this_cpu_write(current_kprobe, &op->kp);
124		regs->nip = (unsigned long)op->kp.addr;
125		get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
126		opt_pre_handler(&op->kp, regs);
127		__this_cpu_write(current_kprobe, NULL);
128	}
129
130	preempt_enable_no_resched();
131}
132NOKPROBE_SYMBOL(optimized_callback);
133
134void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
135{
136	if (op->optinsn.insn) {
137		free_ppc_optinsn_slot(op->optinsn.insn, 1);
138		op->optinsn.insn = NULL;
139	}
140}
141
142/*
143 * emulate_step() requires insn to be emulated as
144 * second parameter. Load register 'r4' with the
145 * instruction.
146 */
147void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
148{
149	/* addis r4,0,(insn)@h */
150	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
151			  ((val >> 16) & 0xffff));
152	addr++;
153
154	/* ori r4,r4,(insn)@l */
155	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
156			  ___PPC_RS(4) | (val & 0xffff));
157}
158
159/*
160 * Generate instructions to load provided immediate 64-bit value
161 * to register 'r3' and patch these instructions at 'addr'.
162 */
163void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
164{
165	/* lis r3,(op)@highest */
166	patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
167			  ((val >> 48) & 0xffff));
168	addr++;
169
170	/* ori r3,r3,(op)@higher */
171	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
172			  ___PPC_RS(3) | ((val >> 32) & 0xffff));
173	addr++;
174
175	/* rldicr r3,r3,32,31 */
176	patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
177			  ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
178	addr++;
179
180	/* oris r3,r3,(op)@h */
181	patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
182			  ___PPC_RS(3) | ((val >> 16) & 0xffff));
183	addr++;
184
185	/* ori r3,r3,(op)@l */
186	patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
187			  ___PPC_RS(3) | (val & 0xffff));
188}
189
190int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
191{
192	kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
193	kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
194	long b_offset;
195	unsigned long nip, size;
196	int rc, i;
197
198	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
199
200	nip = can_optimize(p);
201	if (!nip)
202		return -EILSEQ;
203
204	/* Allocate instruction slot for detour buffer */
205	buff = get_ppc_optinsn_slot();
206	if (!buff)
207		return -ENOMEM;
208
209	/*
210	 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
211	 *
212	 * The target address has to be relatively nearby, to permit use
213	 * of branch instruction in powerpc, because the address is specified
214	 * in an immediate field in the instruction opcode itself, ie 24 bits
215	 * in the opcode specify the address. Therefore the address should
216	 * be within 32MB on either side of the current instruction.
217	 */
218	b_offset = (unsigned long)buff - (unsigned long)p->addr;
219	if (!is_offset_in_branch_range(b_offset))
220		goto error;
221
222	/* Check if the return address is also within 32MB range */
223	b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
224			(unsigned long)nip;
225	if (!is_offset_in_branch_range(b_offset))
226		goto error;
227
228	/* Setup template */
229	/* We can optimize this via patch_instruction_window later */
230	size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
231	pr_devel("Copying template to %p, size %lu\n", buff, size);
232	for (i = 0; i < size; i++) {
233		rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
234		if (rc < 0)
235			goto error;
236	}
237
238	/*
239	 * Fixup the template with instructions to:
240	 * 1. load the address of the actual probepoint
241	 */
242	patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
243
244	/*
245	 * 2. branch to optimized_callback() and emulate_step()
246	 */
247	op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
248	emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
249	if (!op_callback_addr || !emulate_step_addr) {
250		WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
251		goto error;
252	}
253
254	branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
255				(unsigned long)op_callback_addr,
256				BRANCH_SET_LINK);
257
258	branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
259				(unsigned long)emulate_step_addr,
260				BRANCH_SET_LINK);
261
262	if (!branch_op_callback || !branch_emulate_step)
263		goto error;
264
265	patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
266	patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
267
268	/*
269	 * 3. load instruction to be emulated into relevant register, and
270	 */
271	patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
272
273	/*
274	 * 4. branch back from trampoline
275	 */
276	patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
277
278	flush_icache_range((unsigned long)buff,
279			   (unsigned long)(&buff[TMPL_END_IDX]));
280
281	op->optinsn.insn = buff;
282
283	return 0;
284
285error:
286	free_ppc_optinsn_slot(buff, 0);
287	return -ERANGE;
288
289}
290
291int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
292{
293	return optinsn->insn != NULL;
294}
295
296/*
297 * On powerpc, Optprobes always replaces one instruction (4 bytes
298 * aligned and 4 bytes long). It is impossible to encounter another
299 * kprobe in this address range. So always return 0.
300 */
301int arch_check_optimized_kprobe(struct optimized_kprobe *op)
302{
303	return 0;
304}
305
306void arch_optimize_kprobes(struct list_head *oplist)
307{
308	struct optimized_kprobe *op;
309	struct optimized_kprobe *tmp;
310
311	list_for_each_entry_safe(op, tmp, oplist, list) {
312		/*
313		 * Backup instructions which will be replaced
314		 * by jump address
315		 */
316		memcpy(op->optinsn.copied_insn, op->kp.addr,
317					       RELATIVEJUMP_SIZE);
318		patch_instruction(op->kp.addr,
319			create_branch((unsigned int *)op->kp.addr,
320				      (unsigned long)op->optinsn.insn, 0));
321		list_del_init(&op->list);
322	}
323}
324
325void arch_unoptimize_kprobe(struct optimized_kprobe *op)
326{
327	arch_arm_kprobe(&op->kp);
328}
329
330void arch_unoptimize_kprobes(struct list_head *oplist,
331			     struct list_head *done_list)
332{
333	struct optimized_kprobe *op;
334	struct optimized_kprobe *tmp;
335
336	list_for_each_entry_safe(op, tmp, oplist, list) {
337		arch_unoptimize_kprobe(op);
338		list_move(&op->list, done_list);
339	}
340}
341
342int arch_within_optimized_kprobe(struct optimized_kprobe *op,
343				 unsigned long addr)
344{
345	return ((unsigned long)op->kp.addr <= addr &&
346		(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
347}