Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Dynamic function tracer architecture backend.
  3 *
  4 * Copyright IBM Corp. 2009
  5 *
  6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
  8 */
  9
 
 10#include <linux/hardirq.h>
 11#include <linux/uaccess.h>
 12#include <linux/ftrace.h>
 13#include <linux/kernel.h>
 14#include <linux/types.h>
 15#include <linux/kprobes.h>
 16#include <trace/syscall.h>
 17#include <asm/asm-offsets.h>
 
 
 18
 19#ifdef CONFIG_64BIT
 20#define MCOUNT_OFFSET_RET 12
 21#else
 22#define MCOUNT_OFFSET_RET 22
 23#endif
 24
 25#ifdef CONFIG_DYNAMIC_FTRACE
 26
 27void ftrace_disable_code(void);
 28void ftrace_enable_insn(void);
 29
 30#ifdef CONFIG_64BIT
 31/*
 32 * The 64-bit mcount code looks like this:
 33 *	stg	%r14,8(%r15)		# offset 0
 34 * >	larl	%r1,<&counter>		# offset 6
 35 * >	brasl	%r14,_mcount		# offset 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36 *	lg	%r14,8(%r15)		# offset 18
 37 * Total length is 24 bytes. The middle two instructions of the mcount
 38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
 39 * The 64-bit enabled ftrace code block looks like this:
 40 *	stg	%r14,8(%r15)		# offset 0
 41 * >	lg	%r1,__LC_FTRACE_FUNC	# offset 6
 42 * >	lgr	%r0,%r0			# offset 12
 43 * >	basr	%r14,%r1		# offset 16
 44 *	lg	%r14,8(%15)		# offset 18
 45 * The return points of the mcount/ftrace function have the same offset 18.
 46 * The 64-bit disable ftrace code block looks like this:
 47 *	stg	%r14,8(%r15)		# offset 0
 48 * >	jg	.+18			# offset 6
 49 * >	lgr	%r0,%r0			# offset 12
 50 * >	basr	%r14,%r1		# offset 16
 51 *	lg	%r14,8(%15)		# offset 18
 52 * The jg instruction branches to offset 24 to skip as many instructions
 53 * as possible.
 
 
 
 
 
 
 
 54 */
 55asm(
 56	"	.align	4\n"
 57	"ftrace_disable_code:\n"
 58	"	jg	0f\n"
 59	"	lgr	%r0,%r0\n"
 60	"	basr	%r14,%r1\n"
 61	"0:\n"
 62	"	.align	4\n"
 63	"ftrace_enable_insn:\n"
 64	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n");
 65
 66#define FTRACE_INSN_SIZE	6
 67
 68#else /* CONFIG_64BIT */
 69/*
 70 * The 31-bit mcount code looks like this:
 71 *	st	%r14,4(%r15)		# offset 0
 72 * >	bras	%r1,0f			# offset 4
 73 * >	.long	_mcount			# offset 8
 74 * >	.long	<&counter>		# offset 12
 75 * > 0:	l	%r14,0(%r1)		# offset 16
 76 * >	l	%r1,4(%r1)		# offset 20
 77 *	basr	%r14,%r14		# offset 24
 78 *	l	%r14,4(%r15)		# offset 26
 79 * Total length is 30 bytes. The twenty bytes starting from offset 4
 80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
 81 * The 31-bit enabled ftrace code block looks like this:
 82 *	st	%r14,4(%r15)		# offset 0
 83 * >	l	%r14,__LC_FTRACE_FUNC	# offset 4
 84 * >	j	0f			# offset 8
 85 * >	.fill	12,1,0x07		# offset 12
 86 *   0:	basr	%r14,%r14		# offset 24
 87 *	l	%r14,4(%r14)		# offset 26
 88 * The return points of the mcount/ftrace function have the same offset 26.
 89 * The 31-bit disabled ftrace code block looks like this:
 90 *	st	%r14,4(%r15)		# offset 0
 91 * >	j	.+26			# offset 4
 92 * >	j	0f			# offset 8
 93 * >	.fill	12,1,0x07		# offset 12
 94 *   0:	basr	%r14,%r14		# offset 24
 95 *	l	%r14,4(%r14)		# offset 26
 96 * The j instruction branches to offset 30 to skip as many instructions
 97 * as possible.
 98 */
 99asm(
100	"	.align	4\n"
101	"ftrace_disable_code:\n"
102	"	j	1f\n"
103	"	j	0f\n"
104	"	.fill	12,1,0x07\n"
105	"0:	basr	%r14,%r14\n"
106	"1:\n"
107	"	.align	4\n"
108	"ftrace_enable_insn:\n"
109	"	l	%r14,"__stringify(__LC_FTRACE_FUNC)"\n");
110
111#define FTRACE_INSN_SIZE	4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
113#endif /* CONFIG_64BIT */
 
 
 
 
 
 
114
 
 
 
 
 
115
116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
117		    unsigned long addr)
118{
119	if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
120			       MCOUNT_INSN_SIZE))
121		return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122	return 0;
123}
124
125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
126{
127	if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
128			       FTRACE_INSN_SIZE))
129		return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130	return 0;
131}
132
133int ftrace_update_ftrace_func(ftrace_func_t func)
134{
135	return 0;
136}
137
138int __init ftrace_dyn_arch_init(void *data)
139{
140	*(unsigned long *) data = 0;
141	return 0;
142}
143
144#endif /* CONFIG_DYNAMIC_FTRACE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
147/*
148 * Hook the return address and push it in the stack of return addresses
149 * in current thread info.
150 */
151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152					      unsigned long ip)
153{
154	struct ftrace_graph_ent trace;
155
156	if (unlikely(atomic_read(&current->tracing_graph_pause)))
157		goto out;
158	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159		goto out;
160	trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
 
 
161	/* Only trace if the calling function expects to. */
162	if (!ftrace_graph_entry(&trace)) {
163		current->curr_ret_stack--;
 
164		goto out;
165	}
166	parent = (unsigned long) return_to_handler;
167out:
168	return parent;
169}
 
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
 
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181	unsigned short offset;
182
183	offset = ((void *) prepare_ftrace_return -
184		  (void *) ftrace_graph_caller) / 2;
185	return probe_kernel_write(ftrace_graph_caller + 2,
186				  &offset, sizeof(offset));
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191	static unsigned short offset = 0x0002;
192
193	return probe_kernel_write(ftrace_graph_caller + 2,
194				  &offset, sizeof(offset));
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v4.6
  1/*
  2 * Dynamic function tracer architecture backend.
  3 *
  4 * Copyright IBM Corp. 2009,2014
  5 *
  6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
  8 */
  9
 10#include <linux/moduleloader.h>
 11#include <linux/hardirq.h>
 12#include <linux/uaccess.h>
 13#include <linux/ftrace.h>
 14#include <linux/kernel.h>
 15#include <linux/types.h>
 16#include <linux/kprobes.h>
 17#include <trace/syscall.h>
 18#include <asm/asm-offsets.h>
 19#include <asm/cacheflush.h>
 20#include "entry.h"
 21
 
 
 
 
 
 
 
 
 
 
 
 
 22/*
 23 * The mcount code looks like this:
 24 *	stg	%r14,8(%r15)		# offset 0
 25 *	larl	%r1,<&counter>		# offset 6
 26 *	brasl	%r14,_mcount		# offset 12
 27 *	lg	%r14,8(%r15)		# offset 18
 28 * Total length is 24 bytes. Only the first instruction will be patched
 29 * by ftrace_make_call / ftrace_make_nop.
 30 * The enabled ftrace code block looks like this:
 31 * >	brasl	%r0,ftrace_caller	# offset 0
 32 *	larl	%r1,<&counter>		# offset 6
 33 *	brasl	%r14,_mcount		# offset 12
 34 *	lg	%r14,8(%r15)		# offset 18
 35 * The ftrace function gets called with a non-standard C function call ABI
 36 * where r0 contains the return address. It is also expected that the called
 37 * function only clobbers r0 and r1, but restores r2-r15.
 38 * For module code we can't directly jump to ftrace caller, but need a
 39 * trampoline (ftrace_plt), which clobbers also r1.
 40 * The return point of the ftrace function has offset 24, so execution
 41 * continues behind the mcount block.
 42 * The disabled ftrace code block looks like this:
 43 * >	jg	.+24			# offset 0
 44 *	larl	%r1,<&counter>		# offset 6
 45 *	brasl	%r14,_mcount		# offset 12
 46 *	lg	%r14,8(%r15)		# offset 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47 * The jg instruction branches to offset 24 to skip as many instructions
 48 * as possible.
 49 * In case we use gcc's hotpatch feature the original and also the disabled
 50 * function prologue contains only a single six byte instruction and looks
 51 * like this:
 52 * >	brcl	0,0			# offset 0
 53 * To enable ftrace the code gets patched like above and afterwards looks
 54 * like this:
 55 * >	brasl	%r0,ftrace_caller	# offset 0
 56 */
 
 
 
 
 
 
 
 
 
 
 57
 58unsigned long ftrace_plt;
 59
 60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
 61{
 62#ifdef CC_USING_HOTPATCH
 63	/* brcl 0,0 */
 64	insn->opc = 0xc004;
 65	insn->disp = 0;
 66#else
 67	/* stg r14,8(r15) */
 68	insn->opc = 0xe3e0;
 69	insn->disp = 0xf0080024;
 70#endif
 71}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
 74{
 75#ifdef CONFIG_KPROBES
 76	if (insn->opc == BREAKPOINT_INSTRUCTION)
 77		return 1;
 78#endif
 79	return 0;
 80}
 81
 82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
 83{
 84#ifdef CONFIG_KPROBES
 85	insn->opc = BREAKPOINT_INSTRUCTION;
 86	insn->disp = KPROBE_ON_FTRACE_NOP;
 87#endif
 88}
 89
 90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
 91{
 92#ifdef CONFIG_KPROBES
 93	insn->opc = BREAKPOINT_INSTRUCTION;
 94	insn->disp = KPROBE_ON_FTRACE_CALL;
 95#endif
 96}
 97
 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 99		       unsigned long addr)
100{
101	return 0;
102}
103
104int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
105		    unsigned long addr)
106{
107	struct ftrace_insn orig, new, old;
108
109	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
110		return -EFAULT;
111	if (addr == MCOUNT_ADDR) {
112		/* Initial code replacement */
113		ftrace_generate_orig_insn(&orig);
114		ftrace_generate_nop_insn(&new);
115	} else if (is_kprobe_on_ftrace(&old)) {
116		/*
117		 * If we find a breakpoint instruction, a kprobe has been
118		 * placed at the beginning of the function. We write the
119		 * constant KPROBE_ON_FTRACE_NOP into the remaining four
120		 * bytes of the original instruction so that the kprobes
121		 * handler can execute a nop, if it reaches this breakpoint.
122		 */
123		ftrace_generate_kprobe_call_insn(&orig);
124		ftrace_generate_kprobe_nop_insn(&new);
125	} else {
126		/* Replace ftrace call with a nop. */
127		ftrace_generate_call_insn(&orig, rec->ip);
128		ftrace_generate_nop_insn(&new);
129	}
130	/* Verify that the to be replaced code matches what we expect. */
131	if (memcmp(&orig, &old, sizeof(old)))
132		return -EINVAL;
133	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
134	return 0;
135}
136
137int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
138{
139	struct ftrace_insn orig, new, old;
140
141	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
142		return -EFAULT;
143	if (is_kprobe_on_ftrace(&old)) {
144		/*
145		 * If we find a breakpoint instruction, a kprobe has been
146		 * placed at the beginning of the function. We write the
147		 * constant KPROBE_ON_FTRACE_CALL into the remaining four
148		 * bytes of the original instruction so that the kprobes
149		 * handler can execute a brasl if it reaches this breakpoint.
150		 */
151		ftrace_generate_kprobe_nop_insn(&orig);
152		ftrace_generate_kprobe_call_insn(&new);
153	} else {
154		/* Replace nop with an ftrace call. */
155		ftrace_generate_nop_insn(&orig);
156		ftrace_generate_call_insn(&new, rec->ip);
157	}
158	/* Verify that the to be replaced code matches what we expect. */
159	if (memcmp(&orig, &old, sizeof(old)))
160		return -EINVAL;
161	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
162	return 0;
163}
164
165int ftrace_update_ftrace_func(ftrace_func_t func)
166{
167	return 0;
168}
169
170int __init ftrace_dyn_arch_init(void)
171{
 
172	return 0;
173}
174
175static int __init ftrace_plt_init(void)
176{
177	unsigned int *ip;
178
179	ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
180	if (!ftrace_plt)
181		panic("cannot allocate ftrace plt\n");
182	ip = (unsigned int *) ftrace_plt;
183	ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
184	ip[1] = 0x100a0004;
185	ip[2] = 0x07f10000;
186	ip[3] = FTRACE_ADDR >> 32;
187	ip[4] = FTRACE_ADDR & 0xffffffff;
188	set_memory_ro(ftrace_plt, 1);
189	return 0;
190}
191device_initcall(ftrace_plt_init);
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194/*
195 * Hook the return address and push it in the stack of return addresses
196 * in current thread info.
197 */
198unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 
199{
200	struct ftrace_graph_ent trace;
201
202	if (unlikely(ftrace_graph_is_dead()))
203		goto out;
204	if (unlikely(atomic_read(&current->tracing_graph_pause)))
205		goto out;
206	ip -= MCOUNT_INSN_SIZE;
207	trace.func = ip;
208	trace.depth = current->curr_ret_stack + 1;
209	/* Only trace if the calling function expects to. */
210	if (!ftrace_graph_entry(&trace))
211		goto out;
212	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
213		goto out;
 
214	parent = (unsigned long) return_to_handler;
215out:
216	return parent;
217}
218NOKPROBE_SYMBOL(prepare_ftrace_return);
219
 
220/*
221 * Patch the kernel code at ftrace_graph_caller location. The instruction
222 * there is branch relative on condition. To enable the ftrace graph code
223 * block, we simply patch the mask field of the instruction to zero and
224 * turn the instruction into a nop.
225 * To disable the ftrace graph code the mask field will be patched to
226 * all ones, which turns the instruction into an unconditional branch.
227 */
228int ftrace_enable_ftrace_graph_caller(void)
229{
230	u8 op = 0x04; /* set mask field to zero */
231
232	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
233	return 0;
 
 
234}
235
236int ftrace_disable_ftrace_graph_caller(void)
237{
238	u8 op = 0xf4; /* set mask field to all ones */
239
240	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
241	return 0;
242}
243
 
244#endif /* CONFIG_FUNCTION_GRAPH_TRACER */