Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Dynamic function tracer architecture backend.
  3 *
  4 * Copyright IBM Corp. 2009,2014
  5 *
  6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
  8 */
  9
 10#include <linux/moduleloader.h>
 11#include <linux/hardirq.h>
 12#include <linux/uaccess.h>
 13#include <linux/ftrace.h>
 14#include <linux/kernel.h>
 15#include <linux/types.h>
 16#include <linux/kprobes.h>
 17#include <trace/syscall.h>
 18#include <asm/asm-offsets.h>
 
 19#include <asm/cacheflush.h>
 
 
 
 20#include "entry.h"
 
 21
 22/*
 23 * The mcount code looks like this:
 24 *	stg	%r14,8(%r15)		# offset 0
 25 *	larl	%r1,<&counter>		# offset 6
 26 *	brasl	%r14,_mcount		# offset 12
 27 *	lg	%r14,8(%r15)		# offset 18
 28 * Total length is 24 bytes. Only the first instruction will be patched
 29 * by ftrace_make_call / ftrace_make_nop.
 30 * The enabled ftrace code block looks like this:
 31 * >	brasl	%r0,ftrace_caller	# offset 0
 32 *	larl	%r1,<&counter>		# offset 6
 33 *	brasl	%r14,_mcount		# offset 12
 34 *	lg	%r14,8(%r15)		# offset 18
 35 * The ftrace function gets called with a non-standard C function call ABI
 36 * where r0 contains the return address. It is also expected that the called
 37 * function only clobbers r0 and r1, but restores r2-r15.
 38 * For module code we can't directly jump to ftrace caller, but need a
 39 * trampoline (ftrace_plt), which clobbers also r1.
 40 * The return point of the ftrace function has offset 24, so execution
 41 * continues behind the mcount block.
 42 * The disabled ftrace code block looks like this:
 43 * >	jg	.+24			# offset 0
 44 *	larl	%r1,<&counter>		# offset 6
 45 *	brasl	%r14,_mcount		# offset 12
 46 *	lg	%r14,8(%r15)		# offset 18
 47 * The jg instruction branches to offset 24 to skip as many instructions
 48 * as possible.
 49 * In case we use gcc's hotpatch feature the original and also the disabled
 50 * function prologue contains only a single six byte instruction and looks
 51 * like this:
 52 * >	brcl	0,0			# offset 0
 53 * To enable ftrace the code gets patched like above and afterwards looks
 54 * like this:
 55 * >	brasl	%r0,ftrace_caller	# offset 0
 56 */
 57
 58unsigned long ftrace_plt;
 59
 60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
 61{
 62#ifdef CC_USING_HOTPATCH
 63	/* brcl 0,0 */
 64	insn->opc = 0xc004;
 65	insn->disp = 0;
 66#else
 67	/* stg r14,8(r15) */
 68	insn->opc = 0xe3e0;
 69	insn->disp = 0xf0080024;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70#endif
 71}
 72
 73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
 74{
 75#ifdef CONFIG_KPROBES
 76	if (insn->opc == BREAKPOINT_INSTRUCTION)
 77		return 1;
 78#endif
 79	return 0;
 80}
 81
 82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
 83{
 84#ifdef CONFIG_KPROBES
 85	insn->opc = BREAKPOINT_INSTRUCTION;
 86	insn->disp = KPROBE_ON_FTRACE_NOP;
 87#endif
 
 
 
 
 
 
 
 
 
 
 
 88}
 89
 90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
 91{
 92#ifdef CONFIG_KPROBES
 93	insn->opc = BREAKPOINT_INSTRUCTION;
 94	insn->disp = KPROBE_ON_FTRACE_CALL;
 95#endif
 
 
 
 
 
 
 
 
 
 
 96}
 97
 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 99		       unsigned long addr)
100{
 
 
 
 
 
 
 
 
 
 
 
101	return 0;
102}
103
104int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
105		    unsigned long addr)
106{
107	struct ftrace_insn orig, new, old;
 
108
109	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
110		return -EFAULT;
111	if (addr == MCOUNT_ADDR) {
112		/* Initial code replacement */
113		ftrace_generate_orig_insn(&orig);
114		ftrace_generate_nop_insn(&new);
115	} else if (is_kprobe_on_ftrace(&old)) {
116		/*
117		 * If we find a breakpoint instruction, a kprobe has been
118		 * placed at the beginning of the function. We write the
119		 * constant KPROBE_ON_FTRACE_NOP into the remaining four
120		 * bytes of the original instruction so that the kprobes
121		 * handler can execute a nop, if it reaches this breakpoint.
122		 */
123		ftrace_generate_kprobe_call_insn(&orig);
124		ftrace_generate_kprobe_nop_insn(&new);
125	} else {
126		/* Replace ftrace call with a nop. */
127		ftrace_generate_call_insn(&orig, rec->ip);
128		ftrace_generate_nop_insn(&new);
129	}
130	/* Verify that the to be replaced code matches what we expect. */
131	if (memcmp(&orig, &old, sizeof(old)))
132		return -EINVAL;
133	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
 
 
134	return 0;
135}
136
 
 
 
 
 
 
 
137int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
138{
139	struct ftrace_insn orig, new, old;
140
141	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
142		return -EFAULT;
143	if (is_kprobe_on_ftrace(&old)) {
144		/*
145		 * If we find a breakpoint instruction, a kprobe has been
146		 * placed at the beginning of the function. We write the
147		 * constant KPROBE_ON_FTRACE_CALL into the remaining four
148		 * bytes of the original instruction so that the kprobes
149		 * handler can execute a brasl if it reaches this breakpoint.
150		 */
151		ftrace_generate_kprobe_nop_insn(&orig);
152		ftrace_generate_kprobe_call_insn(&new);
153	} else {
154		/* Replace nop with an ftrace call. */
155		ftrace_generate_nop_insn(&orig);
156		ftrace_generate_call_insn(&new, rec->ip);
157	}
158	/* Verify that the to be replaced code matches what we expect. */
159	if (memcmp(&orig, &old, sizeof(old)))
160		return -EINVAL;
161	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
162	return 0;
163}
164
165int ftrace_update_ftrace_func(ftrace_func_t func)
166{
 
167	return 0;
168}
169
170int __init ftrace_dyn_arch_init(void)
171{
172	return 0;
 
 
 
 
 
 
 
 
 
173}
174
 
 
175static int __init ftrace_plt_init(void)
176{
177	unsigned int *ip;
178
179	ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
180	if (!ftrace_plt)
181		panic("cannot allocate ftrace plt\n");
182	ip = (unsigned int *) ftrace_plt;
183	ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
184	ip[1] = 0x100a0004;
185	ip[2] = 0x07f10000;
186	ip[3] = FTRACE_ADDR >> 32;
187	ip[4] = FTRACE_ADDR & 0xffffffff;
188	set_memory_ro(ftrace_plt, 1);
189	return 0;
190}
191device_initcall(ftrace_plt_init);
192
 
 
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194/*
195 * Hook the return address and push it in the stack of return addresses
196 * in current thread info.
197 */
198unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 
199{
200	struct ftrace_graph_ent trace;
201
202	if (unlikely(ftrace_graph_is_dead()))
203		goto out;
204	if (unlikely(atomic_read(&current->tracing_graph_pause)))
205		goto out;
206	ip -= MCOUNT_INSN_SIZE;
207	trace.func = ip;
208	trace.depth = current->curr_ret_stack + 1;
209	/* Only trace if the calling function expects to. */
210	if (!ftrace_graph_entry(&trace))
211		goto out;
212	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
213				     NULL) == -EBUSY)
214		goto out;
215	parent = (unsigned long) return_to_handler;
216out:
217	return parent;
218}
219NOKPROBE_SYMBOL(prepare_ftrace_return);
220
221/*
222 * Patch the kernel code at ftrace_graph_caller location. The instruction
223 * there is branch relative on condition. To enable the ftrace graph code
224 * block, we simply patch the mask field of the instruction to zero and
225 * turn the instruction into a nop.
226 * To disable the ftrace graph code the mask field will be patched to
227 * all ones, which turns the instruction into an unconditional branch.
228 */
229int ftrace_enable_ftrace_graph_caller(void)
230{
231	u8 op = 0x04; /* set mask field to zero */
232
233	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 
 
 
 
234	return 0;
235}
236
237int ftrace_disable_ftrace_graph_caller(void)
238{
239	u8 op = 0xf4; /* set mask field to all ones */
240
241	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 
 
 
 
242	return 0;
243}
244
245#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracer architecture backend.
  4 *
  5 * Copyright IBM Corp. 2009,2014
  6 *
  7 *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 
  8 */
  9
 10#include <linux/moduleloader.h>
 11#include <linux/hardirq.h>
 12#include <linux/uaccess.h>
 13#include <linux/ftrace.h>
 14#include <linux/kernel.h>
 15#include <linux/types.h>
 16#include <linux/kprobes.h>
 17#include <trace/syscall.h>
 18#include <asm/asm-offsets.h>
 19#include <asm/text-patching.h>
 20#include <asm/cacheflush.h>
 21#include <asm/ftrace.lds.h>
 22#include <asm/nospec-branch.h>
 23#include <asm/set_memory.h>
 24#include "entry.h"
 25#include "ftrace.h"
 26
 27/*
 28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
 29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
 30 * (since gcc 9 / clang 10) is used.
 31 * In both cases the original and also the disabled function prologue contains
 32 * only a single six byte instruction and looks like this:
 33 * >	brcl	0,0			# offset 0
 34 * To enable ftrace the code gets patched like above and afterwards looks
 35 * like this:
 36 * >	brasl	%r0,ftrace_caller	# offset 0
 37 *
 38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
 
 39 * The ftrace function gets called with a non-standard C function call ABI
 40 * where r0 contains the return address. It is also expected that the called
 41 * function only clobbers r0 and r1, but restores r2-r15.
 42 * For module code we can't directly jump to ftrace caller, but need a
 43 * trampoline (ftrace_plt), which clobbers also r1.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44 */
 45
 46void *ftrace_func __read_mostly = ftrace_stub;
 47struct ftrace_insn {
 48	u16 opc;
 49	s32 disp;
 50} __packed;
 51
 52asm(
 53	"	.align 16\n"
 54	"ftrace_shared_hotpatch_trampoline_br:\n"
 55	"	lmg	%r0,%r1,2(%r1)\n"
 56	"	br	%r1\n"
 57	"ftrace_shared_hotpatch_trampoline_br_end:\n"
 58);
 59
 60#ifdef CONFIG_EXPOLINE
 61asm(
 62	"	.align 16\n"
 63	"ftrace_shared_hotpatch_trampoline_exrl:\n"
 64	"	lmg	%r0,%r1,2(%r1)\n"
 65	"	exrl	%r0,0f\n"
 66	"	j	.\n"
 67	"0:	br	%r1\n"
 68	"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
 69);
 70#endif /* CONFIG_EXPOLINE */
 71
 72#ifdef CONFIG_MODULES
 73static char *ftrace_plt;
 74#endif /* CONFIG_MODULES */
 75
 76static const char *ftrace_shared_hotpatch_trampoline(const char **end)
 77{
 78	const char *tstart, *tend;
 79
 80	tstart = ftrace_shared_hotpatch_trampoline_br;
 81	tend = ftrace_shared_hotpatch_trampoline_br_end;
 82#ifdef CONFIG_EXPOLINE
 83	if (!nospec_disable) {
 84		tstart = ftrace_shared_hotpatch_trampoline_exrl;
 85		tend = ftrace_shared_hotpatch_trampoline_exrl_end;
 86	}
 87#endif /* CONFIG_EXPOLINE */
 88	if (end)
 89		*end = tend;
 90	return tstart;
 91}
 92
 93bool ftrace_need_init_nop(void)
 94{
 95	return true;
 96}
 97
 98int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 99{
100	static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
101		__ftrace_hotpatch_trampolines_start;
102	static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
103	static struct ftrace_hotpatch_trampoline *trampoline;
104	struct ftrace_hotpatch_trampoline **next_trampoline;
105	struct ftrace_hotpatch_trampoline *trampolines_end;
106	struct ftrace_hotpatch_trampoline tmp;
107	struct ftrace_insn *insn;
108	const char *shared;
109	s32 disp;
110
111	BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
112		     SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
113
114	next_trampoline = &next_vmlinux_trampoline;
115	trampolines_end = __ftrace_hotpatch_trampolines_end;
116	shared = ftrace_shared_hotpatch_trampoline(NULL);
117#ifdef CONFIG_MODULES
118	if (mod) {
119		next_trampoline = &mod->arch.next_trampoline;
120		trampolines_end = mod->arch.trampolines_end;
121		shared = ftrace_plt;
122	}
123#endif
 
124
125	if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
126		return -ENOMEM;
127	trampoline = (*next_trampoline)++;
 
 
 
 
 
128
129	/* Check for the compiler-generated fentry nop (brcl 0, .). */
130	if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
131		return -EINVAL;
132
133	/* Generate the trampoline. */
134	tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
135	tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
136	tmp.interceptor = FTRACE_ADDR;
137	tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
138	s390_kernel_write(trampoline, &tmp, sizeof(tmp));
139
140	/* Generate a jump to the trampoline. */
141	disp = ((char *)trampoline - (char *)rec->ip) / 2;
142	insn = (struct ftrace_insn *)rec->ip;
143	s390_kernel_write(&insn->disp, &disp, sizeof(disp));
144
145	return 0;
146}
147
148static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
149{
150	struct ftrace_hotpatch_trampoline *trampoline;
151	struct ftrace_insn insn;
152	s64 disp;
153	u16 opc;
154
155	if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
156		return ERR_PTR(-EFAULT);
157	disp = (s64)insn.disp * 2;
158	trampoline = (void *)(rec->ip + disp);
159	if (get_kernel_nofault(opc, &trampoline->brasl_opc))
160		return ERR_PTR(-EFAULT);
161	if (opc != 0xc015)
162		return ERR_PTR(-EINVAL);
163	return trampoline;
164}
165
166int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
167		       unsigned long addr)
168{
169	struct ftrace_hotpatch_trampoline *trampoline;
170	u64 old;
171
172	trampoline = ftrace_get_trampoline(rec);
173	if (IS_ERR(trampoline))
174		return PTR_ERR(trampoline);
175	if (get_kernel_nofault(old, &trampoline->interceptor))
176		return -EFAULT;
177	if (old != old_addr)
178		return -EINVAL;
179	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
180	return 0;
181}
182
183static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
 
184{
185	u16 old;
186	u8 op;
187
188	if (get_kernel_nofault(old, addr))
189		return -EFAULT;
190	if (old != expected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191		return -EINVAL;
192	/* set mask field to all ones or zeroes */
193	op = enable ? 0xf4 : 0x04;
194	s390_kernel_write((char *)addr + 1, &op, sizeof(op));
195	return 0;
196}
197
198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
199		    unsigned long addr)
200{
201	/* Expect brcl 0xf,... */
202	return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
203}
204
205int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
206{
207	struct ftrace_hotpatch_trampoline *trampoline;
208
209	trampoline = ftrace_get_trampoline(rec);
210	if (IS_ERR(trampoline))
211		return PTR_ERR(trampoline);
212	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
213	/* Expect brcl 0x0,... */
214	return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215}
216
217int ftrace_update_ftrace_func(ftrace_func_t func)
218{
219	ftrace_func = func;
220	return 0;
221}
222
223void arch_ftrace_update_code(int command)
224{
225	ftrace_modify_all_code(command);
226}
227
228void ftrace_arch_code_modify_post_process(void)
229{
230	/*
231	 * Flush any pre-fetched instructions on all
232	 * CPUs to make the new code visible.
233	 */
234	text_poke_sync_lock();
235}
236
237#ifdef CONFIG_MODULES
238
239static int __init ftrace_plt_init(void)
240{
241	const char *start, *end;
242
243	ftrace_plt = module_alloc(PAGE_SIZE);
244	if (!ftrace_plt)
245		panic("cannot allocate ftrace plt\n");
246
247	start = ftrace_shared_hotpatch_trampoline(&end);
248	memcpy(ftrace_plt, start, end - start);
249	set_memory_ro((unsigned long)ftrace_plt, 1);
 
 
 
250	return 0;
251}
252device_initcall(ftrace_plt_init);
253
254#endif /* CONFIG_MODULES */
255
256#ifdef CONFIG_FUNCTION_GRAPH_TRACER
257/*
258 * Hook the return address and push it in the stack of return addresses
259 * in current thread info.
260 */
261unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
262				    unsigned long ip)
263{
 
 
264	if (unlikely(ftrace_graph_is_dead()))
265		goto out;
266	if (unlikely(atomic_read(&current->tracing_graph_pause)))
267		goto out;
268	ip -= MCOUNT_INSN_SIZE;
269	if (!function_graph_enter(ra, ip, 0, (void *) sp))
270		ra = (unsigned long) return_to_handler;
 
 
 
 
 
 
 
271out:
272	return ra;
273}
274NOKPROBE_SYMBOL(prepare_ftrace_return);
275
276/*
277 * Patch the kernel code at ftrace_graph_caller location. The instruction
278 * there is branch relative on condition. To enable the ftrace graph code
279 * block, we simply patch the mask field of the instruction to zero and
280 * turn the instruction into a nop.
281 * To disable the ftrace graph code the mask field will be patched to
282 * all ones, which turns the instruction into an unconditional branch.
283 */
284int ftrace_enable_ftrace_graph_caller(void)
285{
286	int rc;
287
288	/* Expect brc 0xf,... */
289	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
290	if (rc)
291		return rc;
292	text_poke_sync_lock();
293	return 0;
294}
295
296int ftrace_disable_ftrace_graph_caller(void)
297{
298	int rc;
299
300	/* Expect brc 0x0,... */
301	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
302	if (rc)
303		return rc;
304	text_poke_sync_lock();
305	return 0;
306}
307
308#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
309
310#ifdef CONFIG_KPROBES_ON_FTRACE
311void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
312		struct ftrace_ops *ops, struct ftrace_regs *fregs)
313{
314	struct kprobe_ctlblk *kcb;
315	struct pt_regs *regs;
316	struct kprobe *p;
317	int bit;
318
319	bit = ftrace_test_recursion_trylock(ip, parent_ip);
320	if (bit < 0)
321		return;
322
323	regs = ftrace_get_regs(fregs);
324	p = get_kprobe((kprobe_opcode_t *)ip);
325	if (!regs || unlikely(!p) || kprobe_disabled(p))
326		goto out;
327
328	if (kprobe_running()) {
329		kprobes_inc_nmissed_count(p);
330		goto out;
331	}
332
333	__this_cpu_write(current_kprobe, p);
334
335	kcb = get_kprobe_ctlblk();
336	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
337
338	instruction_pointer_set(regs, ip);
339
340	if (!p->pre_handler || !p->pre_handler(p, regs)) {
341
342		instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
343
344		if (unlikely(p->post_handler)) {
345			kcb->kprobe_status = KPROBE_HIT_SSDONE;
346			p->post_handler(p, regs, 0);
347		}
348	}
349	__this_cpu_write(current_kprobe, NULL);
350out:
351	ftrace_test_recursion_unlock(bit);
352}
353NOKPROBE_SYMBOL(kprobe_ftrace_handler);
354
355int arch_prepare_kprobe_ftrace(struct kprobe *p)
356{
357	p->ainsn.insn = NULL;
358	return 0;
359}
360#endif