Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 *  Kernel Probes Jump Optimization (Optprobes)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17 *
 18 * Copyright (C) IBM Corporation, 2002, 2004
 19 * Copyright (C) Hitachi Ltd., 2012
 20 */
 21#include <linux/kprobes.h>
 
 22#include <linux/ptrace.h>
 23#include <linux/string.h>
 24#include <linux/slab.h>
 25#include <linux/hardirq.h>
 26#include <linux/preempt.h>
 27#include <linux/extable.h>
 28#include <linux/kdebug.h>
 29#include <linux/kallsyms.h>
 
 30#include <linux/ftrace.h>
 
 
 
 31
 32#include <asm/text-patching.h>
 33#include <asm/cacheflush.h>
 34#include <asm/desc.h>
 35#include <asm/pgtable.h>
 36#include <linux/uaccess.h>
 37#include <asm/alternative.h>
 38#include <asm/insn.h>
 39#include <asm/debugreg.h>
 
 
 
 40
 41#include "common.h"
 42
 43unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
 44{
 45	struct optimized_kprobe *op;
 46	struct kprobe *kp;
 47	long offs;
 48	int i;
 49
 50	for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
 51		kp = get_kprobe((void *)addr - i);
 52		/* This function only handles jump-optimized kprobe */
 53		if (kp && kprobe_optimized(kp)) {
 54			op = container_of(kp, struct optimized_kprobe, kp);
 55			/* If op->list is not empty, op is under optimizing */
 56			if (list_empty(&op->list))
 57				goto found;
 58		}
 59	}
 60
 61	return addr;
 62found:
 63	/*
 64	 * If the kprobe can be optimized, original bytes which can be
 65	 * overwritten by jump destination address. In this case, original
 66	 * bytes must be recovered from op->optinsn.copied_insn buffer.
 67	 */
 68	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 
 
 
 69	if (addr == (unsigned long)kp->addr) {
 70		buf[0] = kp->opcode;
 71		memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
 72	} else {
 73		offs = addr - (unsigned long)kp->addr - 1;
 74		memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
 75	}
 76
 77	return (unsigned long)buf;
 78}
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
 81static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 82{
 83#ifdef CONFIG_X86_64
 84	*addr++ = 0x48;
 85	*addr++ = 0xbf;
 86#else
 87	*addr++ = 0xb8;
 88#endif
 89	*(unsigned long *)addr = val;
 90}
 91
 92asm (
 
 
 93			".global optprobe_template_entry\n"
 94			"optprobe_template_entry:\n"
 95#ifdef CONFIG_X86_64
 96			/* We don't bother saving the ss register */
 
 97			"	pushq %rsp\n"
 98			"	pushfq\n"
 
 
 
 99			SAVE_REGS_STRING
100			"	movq %rsp, %rsi\n"
101			".global optprobe_template_val\n"
102			"optprobe_template_val:\n"
103			ASM_NOP5
104			ASM_NOP5
105			".global optprobe_template_call\n"
106			"optprobe_template_call:\n"
107			ASM_NOP5
108			/* Move flags to rsp */
109			"	movq 144(%rsp), %rdx\n"
110			"	movq %rdx, 152(%rsp)\n"
111			RESTORE_REGS_STRING
112			/* Skip flags entry */
113			"	addq $8, %rsp\n"
 
114			"	popfq\n"
115#else /* CONFIG_X86_32 */
116			"	pushf\n"
 
 
 
 
 
 
117			SAVE_REGS_STRING
118			"	movl %esp, %edx\n"
119			".global optprobe_template_val\n"
120			"optprobe_template_val:\n"
121			ASM_NOP5
122			".global optprobe_template_call\n"
123			"optprobe_template_call:\n"
124			ASM_NOP5
 
 
 
125			RESTORE_REGS_STRING
126			"	addl $4, %esp\n"	/* skip cs */
127			"	popf\n"
 
 
128#endif
129			".global optprobe_template_end\n"
130			"optprobe_template_end:\n");
 
 
 
 
131
 
 
132#define TMPL_MOVE_IDX \
133	((long)&optprobe_template_val - (long)&optprobe_template_entry)
134#define TMPL_CALL_IDX \
135	((long)&optprobe_template_call - (long)&optprobe_template_entry)
136#define TMPL_END_IDX \
137	((long)&optprobe_template_end - (long)&optprobe_template_entry)
138
139#define INT3_SIZE sizeof(kprobe_opcode_t)
140
141/* Optimized kprobe call back function: called from optinsn */
142static void
143optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
144{
145	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
146	unsigned long flags;
147
148	/* This is possible if op is under delayed unoptimizing */
149	if (kprobe_disabled(&op->kp))
150		return;
151
152	local_irq_save(flags);
153	if (kprobe_running()) {
154		kprobes_inc_nmissed_count(&op->kp);
155	} else {
 
 
 
156		/* Save skipped registers */
157#ifdef CONFIG_X86_64
158		regs->cs = __KERNEL_CS;
159#else
160		regs->cs = __KERNEL_CS | get_kernel_rpl();
161		regs->gs = 0;
162#endif
163		regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
164		regs->orig_ax = ~0UL;
165
166		__this_cpu_write(current_kprobe, &op->kp);
167		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
168		opt_pre_handler(&op->kp, regs);
169		__this_cpu_write(current_kprobe, NULL);
170	}
171	local_irq_restore(flags);
172}
173NOKPROBE_SYMBOL(optimized_callback);
174
175static int copy_optimized_instructions(u8 *dest, u8 *src)
176{
 
177	int len = 0, ret;
178
179	while (len < RELATIVEJUMP_SIZE) {
180		ret = __copy_instruction(dest + len, src + len);
181		if (!ret || !can_boost(dest + len))
182			return -EINVAL;
183		len += ret;
184	}
185	/* Check whether the address range is reserved */
186	if (ftrace_text_reserved(src, src + len - 1) ||
187	    alternatives_text_reserved(src, src + len - 1) ||
188	    jump_label_text_reserved(src, src + len - 1))
 
189		return -EBUSY;
190
191	return len;
192}
193
194/* Check whether insn is indirect jump */
195static int insn_is_indirect_jump(struct insn *insn)
196{
197	return ((insn->opcode.bytes[0] == 0xff &&
198		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
199		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
200}
201
202/* Check whether insn jumps into specified address range */
203static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
204{
205	unsigned long target = 0;
206
207	switch (insn->opcode.bytes[0]) {
208	case 0xe0:	/* loopne */
209	case 0xe1:	/* loope */
210	case 0xe2:	/* loop */
211	case 0xe3:	/* jcxz */
212	case 0xe9:	/* near relative jump */
213	case 0xeb:	/* short relative jump */
214		break;
215	case 0x0f:
216		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
217			break;
218		return 0;
219	default:
220		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
221			break;
222		return 0;
223	}
224	target = (unsigned long)insn->next_byte + insn->immediate.value;
225
226	return (start <= target && target <= start + len);
227}
228
229/* Decode whole function to ensure any instructions don't jump into target */
230static int can_optimize(unsigned long paddr)
231{
232	unsigned long addr, size = 0, offset = 0;
233	struct insn insn;
234	kprobe_opcode_t buf[MAX_INSN_SIZE];
235
236	/* Lookup symbol including addr */
237	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
238		return 0;
239
240	/*
241	 * Do not optimize in the entry code due to the unstable
242	 * stack handling.
243	 */
244	if ((paddr >= (unsigned long)__entry_text_start) &&
245	    (paddr <  (unsigned long)__entry_text_end))
246		return 0;
247
248	/* Check there is enough space for a relative jump. */
249	if (size - offset < RELATIVEJUMP_SIZE)
250		return 0;
251
252	/* Decode instructions */
253	addr = paddr - offset;
254	while (addr < paddr - offset + size) { /* Decode until function end */
255		unsigned long recovered_insn;
 
 
256		if (search_exception_tables(addr))
257			/*
258			 * Since some fixup code will jumps into this function,
259			 * we can't optimize kprobe in this function.
260			 */
261			return 0;
262		recovered_insn = recover_probed_instruction(buf, addr);
263		if (!recovered_insn)
264			return 0;
265		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
266		insn_get_length(&insn);
267		/* Another subsystem puts a breakpoint */
268		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
 
 
 
 
 
 
 
269			return 0;
 
270		/* Recover address */
271		insn.kaddr = (void *)addr;
272		insn.next_byte = (void *)(addr + insn.length);
273		/* Check any instructions don't jump into target */
274		if (insn_is_indirect_jump(&insn) ||
275		    insn_jump_into_range(&insn, paddr + INT3_SIZE,
276					 RELATIVE_ADDR_SIZE))
 
 
 
 
 
 
 
 
 
 
 
 
277			return 0;
278		addr += insn.length;
279	}
280
281	return 1;
282}
283
284/* Check optimized_kprobe can actually be optimized. */
285int arch_check_optimized_kprobe(struct optimized_kprobe *op)
286{
287	int i;
288	struct kprobe *p;
289
290	for (i = 1; i < op->optinsn.size; i++) {
291		p = get_kprobe(op->kp.addr + i);
292		if (p && !kprobe_disabled(p))
293			return -EEXIST;
294	}
295
296	return 0;
297}
298
299/* Check the addr is within the optimized instructions. */
300int arch_within_optimized_kprobe(struct optimized_kprobe *op,
301				 unsigned long addr)
302{
303	return ((unsigned long)op->kp.addr <= addr &&
304		(unsigned long)op->kp.addr + op->optinsn.size > addr);
305}
306
307/* Free optimized instruction slot */
308static
309void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
310{
311	if (op->optinsn.insn) {
312		free_optinsn_slot(op->optinsn.insn, dirty);
 
 
 
 
 
 
 
313		op->optinsn.insn = NULL;
314		op->optinsn.size = 0;
315	}
316}
317
318void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
319{
320	__arch_remove_optimized_kprobe(op, 1);
321}
322
323/*
324 * Copy replacing target instructions
325 * Target instructions MUST be relocatable (checked inside)
326 * This is called when new aggr(opt)probe is allocated or reused.
327 */
328int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
329				  struct kprobe *__unused)
330{
331	u8 *buf;
332	int ret;
333	long rel;
334
335	if (!can_optimize((unsigned long)op->kp.addr))
336		return -EILSEQ;
337
338	op->optinsn.insn = get_optinsn_slot();
339	if (!op->optinsn.insn)
340		return -ENOMEM;
341
 
 
 
 
 
 
342	/*
343	 * Verify if the address gap is in 2GB range, because this uses
344	 * a relative jump.
345	 */
346	rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
347	if (abs(rel) > 0x7fffffff) {
348		__arch_remove_optimized_kprobe(op, 0);
349		return -ERANGE;
350	}
351
352	buf = (u8 *)op->optinsn.insn;
 
353
354	/* Copy instructions into the out-of-line buffer */
355	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
356	if (ret < 0) {
357		__arch_remove_optimized_kprobe(op, 0);
358		return ret;
359	}
360	op->optinsn.size = ret;
 
361
362	/* Copy arch-dep-instance from template */
363	memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
364
365	/* Set probe information */
366	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
367
368	/* Set probe function call */
369	synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
 
370
371	/* Set returning jmp instruction at the tail of out-of-line buffer */
372	synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
373			   (u8 *)op->kp.addr + op->optinsn.size);
 
374
375	flush_icache_range((unsigned long) buf,
376			   (unsigned long) buf + TMPL_END_IDX +
377			   op->optinsn.size + RELATIVEJUMP_SIZE);
378	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
379}
380
381/*
382 * Replace breakpoints (int3) with relative jumps.
383 * Caller must call with locking kprobe_mutex and text_mutex.
 
 
 
 
384 */
385void arch_optimize_kprobes(struct list_head *oplist)
386{
387	struct optimized_kprobe *op, *tmp;
388	u8 insn_buf[RELATIVEJUMP_SIZE];
389
390	list_for_each_entry_safe(op, tmp, oplist, list) {
391		s32 rel = (s32)((long)op->optinsn.insn -
392			((long)op->kp.addr + RELATIVEJUMP_SIZE));
393
394		WARN_ON(kprobe_disabled(&op->kp));
395
396		/* Backup instructions which will be replaced by jump address */
397		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
398		       RELATIVE_ADDR_SIZE);
399
400		insn_buf[0] = RELATIVEJUMP_OPCODE;
401		*(s32 *)(&insn_buf[1]) = rel;
402
403		text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
404			     op->optinsn.insn);
405
406		list_del_init(&op->list);
407	}
408}
409
410/* Replace a relative jump with a breakpoint (int3).  */
 
 
 
 
 
 
411void arch_unoptimize_kprobe(struct optimized_kprobe *op)
412{
413	u8 insn_buf[RELATIVEJUMP_SIZE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
415	/* Set int3 to first byte for kprobes */
416	insn_buf[0] = BREAKPOINT_INSTRUCTION;
417	memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
418	text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
419		     op->optinsn.insn);
420}
421
422/*
423 * Recover original instructions and breakpoints from relative jumps.
424 * Caller must call with locking kprobe_mutex.
425 */
426extern void arch_unoptimize_kprobes(struct list_head *oplist,
427				    struct list_head *done_list)
428{
429	struct optimized_kprobe *op, *tmp;
430
431	list_for_each_entry_safe(op, tmp, oplist, list) {
432		arch_unoptimize_kprobe(op);
433		list_move(&op->list, done_list);
434	}
435}
436
437int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
438{
439	struct optimized_kprobe *op;
440
441	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
442		/* This kprobe is really able to run optimized path. */
443		op = container_of(p, struct optimized_kprobe, kp);
444		/* Detour through copied instructions */
445		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
446		if (!reenter)
447			reset_current_kprobe();
448		preempt_enable_no_resched();
449		return 1;
450	}
451	return 0;
452}
453NOKPROBE_SYMBOL(setup_detour_execution);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Kernel Probes Jump Optimization (Optprobes)
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright (C) IBM Corporation, 2002, 2004
  6 * Copyright (C) Hitachi Ltd., 2012
  7 */
  8#include <linux/kprobes.h>
  9#include <linux/perf_event.h>
 10#include <linux/ptrace.h>
 11#include <linux/string.h>
 12#include <linux/slab.h>
 13#include <linux/hardirq.h>
 14#include <linux/preempt.h>
 15#include <linux/extable.h>
 16#include <linux/kdebug.h>
 17#include <linux/kallsyms.h>
 18#include <linux/kgdb.h>
 19#include <linux/ftrace.h>
 20#include <linux/objtool.h>
 21#include <linux/pgtable.h>
 22#include <linux/static_call.h>
 23
 24#include <asm/text-patching.h>
 25#include <asm/cacheflush.h>
 26#include <asm/desc.h>
 
 27#include <linux/uaccess.h>
 28#include <asm/alternative.h>
 29#include <asm/insn.h>
 30#include <asm/debugreg.h>
 31#include <asm/set_memory.h>
 32#include <asm/sections.h>
 33#include <asm/nospec-branch.h>
 34
 35#include "common.h"
 36
 37unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
 38{
 39	struct optimized_kprobe *op;
 40	struct kprobe *kp;
 41	long offs;
 42	int i;
 43
 44	for (i = 0; i < JMP32_INSN_SIZE; i++) {
 45		kp = get_kprobe((void *)addr - i);
 46		/* This function only handles jump-optimized kprobe */
 47		if (kp && kprobe_optimized(kp)) {
 48			op = container_of(kp, struct optimized_kprobe, kp);
 49			/* If op is optimized or under unoptimizing */
 50			if (list_empty(&op->list) || optprobe_queued_unopt(op))
 51				goto found;
 52		}
 53	}
 54
 55	return addr;
 56found:
 57	/*
 58	 * If the kprobe can be optimized, original bytes which can be
 59	 * overwritten by jump destination address. In this case, original
 60	 * bytes must be recovered from op->optinsn.copied_insn buffer.
 61	 */
 62	if (copy_from_kernel_nofault(buf, (void *)addr,
 63		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
 64		return 0UL;
 65
 66	if (addr == (unsigned long)kp->addr) {
 67		buf[0] = kp->opcode;
 68		memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
 69	} else {
 70		offs = addr - (unsigned long)kp->addr - 1;
 71		memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
 72	}
 73
 74	return (unsigned long)buf;
 75}
 76
 77static void synthesize_clac(kprobe_opcode_t *addr)
 78{
 79	/*
 80	 * Can't be static_cpu_has() due to how objtool treats this feature bit.
 81	 * This isn't a fast path anyway.
 82	 */
 83	if (!boot_cpu_has(X86_FEATURE_SMAP))
 84		return;
 85
 86	/* Replace the NOP3 with CLAC */
 87	addr[0] = 0x0f;
 88	addr[1] = 0x01;
 89	addr[2] = 0xca;
 90}
 91
 92/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
 93static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 94{
 95#ifdef CONFIG_X86_64
 96	*addr++ = 0x48;
 97	*addr++ = 0xbf;
 98#else
 99	*addr++ = 0xb8;
100#endif
101	*(unsigned long *)addr = val;
102}
103
104asm (
105			".pushsection .rodata\n"
106			"optprobe_template_func:\n"
107			".global optprobe_template_entry\n"
108			"optprobe_template_entry:\n"
109#ifdef CONFIG_X86_64
110			"       pushq $" __stringify(__KERNEL_DS) "\n"
111			/* Save the 'sp - 8', this will be fixed later. */
112			"	pushq %rsp\n"
113			"	pushfq\n"
114			".global optprobe_template_clac\n"
115			"optprobe_template_clac:\n"
116			ASM_NOP3
117			SAVE_REGS_STRING
118			"	movq %rsp, %rsi\n"
119			".global optprobe_template_val\n"
120			"optprobe_template_val:\n"
121			ASM_NOP5
122			ASM_NOP5
123			".global optprobe_template_call\n"
124			"optprobe_template_call:\n"
125			ASM_NOP5
126			/* Copy 'regs->flags' into 'regs->ss'. */
127			"	movq 18*8(%rsp), %rdx\n"
128			"	movq %rdx, 20*8(%rsp)\n"
129			RESTORE_REGS_STRING
130			/* Skip 'regs->flags' and 'regs->sp'. */
131			"	addq $16, %rsp\n"
132			/* And pop flags register from 'regs->ss'. */
133			"	popfq\n"
134#else /* CONFIG_X86_32 */
135			"	pushl %ss\n"
136			/* Save the 'sp - 4', this will be fixed later. */
137			"	pushl %esp\n"
138			"	pushfl\n"
139			".global optprobe_template_clac\n"
140			"optprobe_template_clac:\n"
141			ASM_NOP3
142			SAVE_REGS_STRING
143			"	movl %esp, %edx\n"
144			".global optprobe_template_val\n"
145			"optprobe_template_val:\n"
146			ASM_NOP5
147			".global optprobe_template_call\n"
148			"optprobe_template_call:\n"
149			ASM_NOP5
150			/* Copy 'regs->flags' into 'regs->ss'. */
151			"	movl 14*4(%esp), %edx\n"
152			"	movl %edx, 16*4(%esp)\n"
153			RESTORE_REGS_STRING
154			/* Skip 'regs->flags' and 'regs->sp'. */
155			"	addl $8, %esp\n"
156			/* And pop flags register from 'regs->ss'. */
157			"	popfl\n"
158#endif
159			".global optprobe_template_end\n"
160			"optprobe_template_end:\n"
161			".popsection\n");
162
163void optprobe_template_func(void);
164STACK_FRAME_NON_STANDARD(optprobe_template_func);
165
166#define TMPL_CLAC_IDX \
167	((long)optprobe_template_clac - (long)optprobe_template_entry)
168#define TMPL_MOVE_IDX \
169	((long)optprobe_template_val - (long)optprobe_template_entry)
170#define TMPL_CALL_IDX \
171	((long)optprobe_template_call - (long)optprobe_template_entry)
172#define TMPL_END_IDX \
173	((long)optprobe_template_end - (long)optprobe_template_entry)
 
 
174
175/* Optimized kprobe call back function: called from optinsn */
176static void
177optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
178{
 
 
 
179	/* This is possible if op is under delayed unoptimizing */
180	if (kprobe_disabled(&op->kp))
181		return;
182
183	preempt_disable();
184	if (kprobe_running()) {
185		kprobes_inc_nmissed_count(&op->kp);
186	} else {
187		struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
188		/* Adjust stack pointer */
189		regs->sp += sizeof(long);
190		/* Save skipped registers */
 
191		regs->cs = __KERNEL_CS;
192#ifdef CONFIG_X86_32
 
193		regs->gs = 0;
194#endif
195		regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
196		regs->orig_ax = ~0UL;
197
198		__this_cpu_write(current_kprobe, &op->kp);
199		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
200		opt_pre_handler(&op->kp, regs);
201		__this_cpu_write(current_kprobe, NULL);
202	}
203	preempt_enable();
204}
205NOKPROBE_SYMBOL(optimized_callback);
206
207static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
208{
209	struct insn insn;
210	int len = 0, ret;
211
212	while (len < JMP32_INSN_SIZE) {
213		ret = __copy_instruction(dest + len, src + len, real + len, &insn);
214		if (!ret || !can_boost(&insn, src + len))
215			return -EINVAL;
216		len += ret;
217	}
218	/* Check whether the address range is reserved */
219	if (ftrace_text_reserved(src, src + len - 1) ||
220	    alternatives_text_reserved(src, src + len - 1) ||
221	    jump_label_text_reserved(src, src + len - 1) ||
222	    static_call_text_reserved(src, src + len - 1))
223		return -EBUSY;
224
225	return len;
226}
227
228/* Check whether insn is indirect jump */
229static int insn_is_indirect_jump(struct insn *insn)
230{
231	return ((insn->opcode.bytes[0] == 0xff &&
232		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
233		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
234}
235
236/* Check whether insn jumps into specified address range */
237static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
238{
239	unsigned long target = 0;
240
241	switch (insn->opcode.bytes[0]) {
242	case 0xe0:	/* loopne */
243	case 0xe1:	/* loope */
244	case 0xe2:	/* loop */
245	case 0xe3:	/* jcxz */
246	case 0xe9:	/* near relative jump */
247	case 0xeb:	/* short relative jump */
248		break;
249	case 0x0f:
250		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
251			break;
252		return 0;
253	default:
254		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
255			break;
256		return 0;
257	}
258	target = (unsigned long)insn->next_byte + insn->immediate.value;
259
260	return (start <= target && target <= start + len);
261}
262
263/* Decode whole function to ensure any instructions don't jump into target */
264static int can_optimize(unsigned long paddr)
265{
266	unsigned long addr, size = 0, offset = 0;
267	struct insn insn;
268	kprobe_opcode_t buf[MAX_INSN_SIZE];
269
270	/* Lookup symbol including addr */
271	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
272		return 0;
273
274	/*
275	 * Do not optimize in the entry code due to the unstable
276	 * stack handling and registers setup.
277	 */
278	if (((paddr >= (unsigned long)__entry_text_start) &&
279	     (paddr <  (unsigned long)__entry_text_end)))
280		return 0;
281
282	/* Check there is enough space for a relative jump. */
283	if (size - offset < JMP32_INSN_SIZE)
284		return 0;
285
286	/* Decode instructions */
287	addr = paddr - offset;
288	while (addr < paddr - offset + size) { /* Decode until function end */
289		unsigned long recovered_insn;
290		int ret;
291
292		if (search_exception_tables(addr))
293			/*
294			 * Since some fixup code will jumps into this function,
295			 * we can't optimize kprobe in this function.
296			 */
297			return 0;
298		recovered_insn = recover_probed_instruction(buf, addr);
299		if (!recovered_insn)
300			return 0;
301
302		ret = insn_decode_kernel(&insn, (void *)recovered_insn);
303		if (ret < 0)
304			return 0;
305#ifdef CONFIG_KGDB
306		/*
307		 * If there is a dynamically installed kgdb sw breakpoint,
308		 * this function should not be probed.
309		 */
310		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
311		    kgdb_has_hit_break(addr))
312			return 0;
313#endif
314		/* Recover address */
315		insn.kaddr = (void *)addr;
316		insn.next_byte = (void *)(addr + insn.length);
317		/*
318		 * Check any instructions don't jump into target, indirectly or
319		 * directly.
320		 *
321		 * The indirect case is present to handle a code with jump
322		 * tables. When the kernel uses retpolines, the check should in
323		 * theory additionally look for jumps to indirect thunks.
324		 * However, the kernel built with retpolines or IBT has jump
325		 * tables disabled so the check can be skipped altogether.
326		 */
327		if (!IS_ENABLED(CONFIG_RETPOLINE) &&
328		    !IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
329		    insn_is_indirect_jump(&insn))
330			return 0;
331		if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
332					 DISP32_SIZE))
333			return 0;
334		addr += insn.length;
335	}
336
337	return 1;
338}
339
340/* Check optimized_kprobe can actually be optimized. */
341int arch_check_optimized_kprobe(struct optimized_kprobe *op)
342{
343	int i;
344	struct kprobe *p;
345
346	for (i = 1; i < op->optinsn.size; i++) {
347		p = get_kprobe(op->kp.addr + i);
348		if (p && !kprobe_disarmed(p))
349			return -EEXIST;
350	}
351
352	return 0;
353}
354
355/* Check the addr is within the optimized instructions. */
356int arch_within_optimized_kprobe(struct optimized_kprobe *op,
357				 kprobe_opcode_t *addr)
358{
359	return (op->kp.addr <= addr &&
360		op->kp.addr + op->optinsn.size > addr);
361}
362
363/* Free optimized instruction slot */
364static
365void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
366{
367	u8 *slot = op->optinsn.insn;
368	if (slot) {
369		int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
370
371		/* Record the perf event before freeing the slot */
372		if (dirty)
373			perf_event_text_poke(slot, slot, len, NULL, 0);
374
375		free_optinsn_slot(slot, dirty);
376		op->optinsn.insn = NULL;
377		op->optinsn.size = 0;
378	}
379}
380
381void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
382{
383	__arch_remove_optimized_kprobe(op, 1);
384}
385
386/*
387 * Copy replacing target instructions
388 * Target instructions MUST be relocatable (checked inside)
389 * This is called when new aggr(opt)probe is allocated or reused.
390 */
391int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
392				  struct kprobe *__unused)
393{
394	u8 *buf = NULL, *slot;
395	int ret, len;
396	long rel;
397
398	if (!can_optimize((unsigned long)op->kp.addr))
399		return -EILSEQ;
400
401	buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
402	if (!buf)
403		return -ENOMEM;
404
405	op->optinsn.insn = slot = get_optinsn_slot();
406	if (!slot) {
407		ret = -ENOMEM;
408		goto out;
409	}
410
411	/*
412	 * Verify if the address gap is in 2GB range, because this uses
413	 * a relative jump.
414	 */
415	rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
416	if (abs(rel) > 0x7fffffff) {
417		ret = -ERANGE;
418		goto err;
419	}
420
421	/* Copy arch-dep-instance from template */
422	memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
423
424	/* Copy instructions into the out-of-line buffer */
425	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
426					  slot + TMPL_END_IDX);
427	if (ret < 0)
428		goto err;
 
429	op->optinsn.size = ret;
430	len = TMPL_END_IDX + op->optinsn.size;
431
432	synthesize_clac(buf + TMPL_CLAC_IDX);
 
433
434	/* Set probe information */
435	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
436
437	/* Set probe function call */
438	synthesize_relcall(buf + TMPL_CALL_IDX,
439			   slot + TMPL_CALL_IDX, optimized_callback);
440
441	/* Set returning jmp instruction at the tail of out-of-line buffer */
442	synthesize_reljump(buf + len, slot + len,
443			   (u8 *)op->kp.addr + op->optinsn.size);
444	len += JMP32_INSN_SIZE;
445
446	/*
447	 * Note	len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
448	 * used in __arch_remove_optimized_kprobe().
449	 */
450
451	/* We have to use text_poke() for instruction buffer because it is RO */
452	perf_event_text_poke(slot, NULL, 0, buf, len);
453	text_poke(slot, buf, len);
454
455	ret = 0;
456out:
457	kfree(buf);
458	return ret;
459
460err:
461	__arch_remove_optimized_kprobe(op, 0);
462	goto out;
463}
464
465/*
466 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
467 * Caller must call with locking kprobe_mutex and text_mutex.
468 *
469 * The caller will have installed a regular kprobe and after that issued
470 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
471 * the 4 bytes after the INT3 are unused and can now be overwritten.
472 */
473void arch_optimize_kprobes(struct list_head *oplist)
474{
475	struct optimized_kprobe *op, *tmp;
476	u8 insn_buff[JMP32_INSN_SIZE];
477
478	list_for_each_entry_safe(op, tmp, oplist, list) {
479		s32 rel = (s32)((long)op->optinsn.insn -
480			((long)op->kp.addr + JMP32_INSN_SIZE));
481
482		WARN_ON(kprobe_disabled(&op->kp));
483
484		/* Backup instructions which will be replaced by jump address */
485		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
486		       DISP32_SIZE);
487
488		insn_buff[0] = JMP32_INSN_OPCODE;
489		*(s32 *)(&insn_buff[1]) = rel;
490
491		text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
 
492
493		list_del_init(&op->list);
494	}
495}
496
497/*
498 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
499 *
500 * After that, we can restore the 4 bytes after the INT3 to undo what
501 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
502 * unused once the INT3 lands.
503 */
504void arch_unoptimize_kprobe(struct optimized_kprobe *op)
505{
506	u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
507	u8 old[JMP32_INSN_SIZE];
508	u8 *addr = op->kp.addr;
509
510	memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
511	memcpy(new + INT3_INSN_SIZE,
512	       op->optinsn.copied_insn,
513	       JMP32_INSN_SIZE - INT3_INSN_SIZE);
514
515	text_poke(addr, new, INT3_INSN_SIZE);
516	text_poke_sync();
517	text_poke(addr + INT3_INSN_SIZE,
518		  new + INT3_INSN_SIZE,
519		  JMP32_INSN_SIZE - INT3_INSN_SIZE);
520	text_poke_sync();
521
522	perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
 
 
 
 
523}
524
525/*
526 * Recover original instructions and breakpoints from relative jumps.
527 * Caller must call with locking kprobe_mutex.
528 */
529extern void arch_unoptimize_kprobes(struct list_head *oplist,
530				    struct list_head *done_list)
531{
532	struct optimized_kprobe *op, *tmp;
533
534	list_for_each_entry_safe(op, tmp, oplist, list) {
535		arch_unoptimize_kprobe(op);
536		list_move(&op->list, done_list);
537	}
538}
539
540int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
541{
542	struct optimized_kprobe *op;
543
544	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
545		/* This kprobe is really able to run optimized path. */
546		op = container_of(p, struct optimized_kprobe, kp);
547		/* Detour through copied instructions */
548		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
549		if (!reenter)
550			reset_current_kprobe();
 
551		return 1;
552	}
553	return 0;
554}
555NOKPROBE_SYMBOL(setup_detour_execution);