Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracer architecture backend.
  4 *
  5 * Copyright IBM Corp. 2009,2014
  6 *
  7 *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 
  8 */
  9
 10#include <linux/moduleloader.h>
 11#include <linux/hardirq.h>
 12#include <linux/uaccess.h>
 13#include <linux/ftrace.h>
 14#include <linux/kernel.h>
 15#include <linux/types.h>
 16#include <linux/kprobes.h>
 17#include <trace/syscall.h>
 18#include <asm/asm-offsets.h>
 19#include <asm/text-patching.h>
 20#include <asm/cacheflush.h>
 21#include <asm/ftrace.lds.h>
 22#include <asm/nospec-branch.h>
 23#include <asm/set_memory.h>
 24#include "entry.h"
 25#include "ftrace.h"
 26
 27/*
 28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
 29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
 30 * (since gcc 9 / clang 10) is used.
 31 * In both cases the original and also the disabled function prologue contains
 32 * only a single six byte instruction and looks like this:
 33 * >	brcl	0,0			# offset 0
 34 * To enable ftrace the code gets patched like above and afterwards looks
 35 * like this:
 36 * >	brasl	%r0,ftrace_caller	# offset 0
 37 *
 38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
 39 * The ftrace function gets called with a non-standard C function call ABI
 40 * where r0 contains the return address. It is also expected that the called
 41 * function only clobbers r0 and r1, but restores r2-r15.
 42 * For module code we can't directly jump to ftrace caller, but need a
 43 * trampoline (ftrace_plt), which clobbers also r1.
 44 */
 45
 46void *ftrace_func __read_mostly = ftrace_stub;
 47struct ftrace_insn {
 48	u16 opc;
 49	s32 disp;
 50} __packed;
 51
 52#ifdef CONFIG_MODULES
 53static char *ftrace_plt;
 54#endif /* CONFIG_MODULES */
 55
 56static const char *ftrace_shared_hotpatch_trampoline(const char **end)
 57{
 58	const char *tstart, *tend;
 59
 60	tstart = ftrace_shared_hotpatch_trampoline_br;
 61	tend = ftrace_shared_hotpatch_trampoline_br_end;
 62#ifdef CONFIG_EXPOLINE
 63	if (!nospec_disable) {
 64		tstart = ftrace_shared_hotpatch_trampoline_exrl;
 65		tend = ftrace_shared_hotpatch_trampoline_exrl_end;
 66	}
 67#endif /* CONFIG_EXPOLINE */
 68	if (end)
 69		*end = tend;
 70	return tstart;
 71}
 72
 73bool ftrace_need_init_nop(void)
 74{
 75	return true;
 76}
 77
 78int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 79{
 80	static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
 81		__ftrace_hotpatch_trampolines_start;
 82	static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
 83	static struct ftrace_hotpatch_trampoline *trampoline;
 84	struct ftrace_hotpatch_trampoline **next_trampoline;
 85	struct ftrace_hotpatch_trampoline *trampolines_end;
 86	struct ftrace_hotpatch_trampoline tmp;
 87	struct ftrace_insn *insn;
 88	const char *shared;
 89	s32 disp;
 90
 91	BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
 92		     SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
 93
 94	next_trampoline = &next_vmlinux_trampoline;
 95	trampolines_end = __ftrace_hotpatch_trampolines_end;
 96	shared = ftrace_shared_hotpatch_trampoline(NULL);
 97#ifdef CONFIG_MODULES
 98	if (mod) {
 99		next_trampoline = &mod->arch.next_trampoline;
100		trampolines_end = mod->arch.trampolines_end;
101		shared = ftrace_plt;
102	}
103#endif
104
105	if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
106		return -ENOMEM;
107	trampoline = (*next_trampoline)++;
108
109	/* Check for the compiler-generated fentry nop (brcl 0, .). */
110	if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
111		return -EINVAL;
112
113	/* Generate the trampoline. */
114	tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
115	tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
116	tmp.interceptor = FTRACE_ADDR;
117	tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
118	s390_kernel_write(trampoline, &tmp, sizeof(tmp));
119
120	/* Generate a jump to the trampoline. */
121	disp = ((char *)trampoline - (char *)rec->ip) / 2;
122	insn = (struct ftrace_insn *)rec->ip;
123	s390_kernel_write(&insn->disp, &disp, sizeof(disp));
124
125	return 0;
126}
127
128static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
129{
130	struct ftrace_hotpatch_trampoline *trampoline;
131	struct ftrace_insn insn;
132	s64 disp;
133	u16 opc;
134
135	if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
136		return ERR_PTR(-EFAULT);
137	disp = (s64)insn.disp * 2;
138	trampoline = (void *)(rec->ip + disp);
139	if (get_kernel_nofault(opc, &trampoline->brasl_opc))
140		return ERR_PTR(-EFAULT);
141	if (opc != 0xc015)
142		return ERR_PTR(-EINVAL);
143	return trampoline;
144}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
147		       unsigned long addr)
148{
149	struct ftrace_hotpatch_trampoline *trampoline;
150	u64 old;
151
152	trampoline = ftrace_get_trampoline(rec);
153	if (IS_ERR(trampoline))
154		return PTR_ERR(trampoline);
155	if (get_kernel_nofault(old, &trampoline->interceptor))
156		return -EFAULT;
157	if (old != old_addr)
158		return -EINVAL;
159	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
160	return 0;
161}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
163static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
164{
165	u16 old;
166	u8 op;
167
168	if (get_kernel_nofault(old, addr))
169		return -EFAULT;
170	if (old != expected)
171		return -EINVAL;
172	/* set mask field to all ones or zeroes */
173	op = enable ? 0xf4 : 0x04;
174	s390_kernel_write((char *)addr + 1, &op, sizeof(op));
175	return 0;
176}
177
178int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
179		    unsigned long addr)
180{
181	/* Expect brcl 0xf,... */
182	return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
 
 
183}
184
185int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
186{
187	struct ftrace_hotpatch_trampoline *trampoline;
188
189	trampoline = ftrace_get_trampoline(rec);
190	if (IS_ERR(trampoline))
191		return PTR_ERR(trampoline);
192	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
193	/* Expect brcl 0x0,... */
194	return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
195}
196
197int ftrace_update_ftrace_func(ftrace_func_t func)
198{
199	ftrace_func = func;
200	return 0;
201}
202
203void arch_ftrace_update_code(int command)
204{
205	ftrace_modify_all_code(command);
206}
207
208void ftrace_arch_code_modify_post_process(void)
209{
210	/*
211	 * Flush any pre-fetched instructions on all
212	 * CPUs to make the new code visible.
213	 */
214	text_poke_sync_lock();
215}
216
217#ifdef CONFIG_MODULES
218
219static int __init ftrace_plt_init(void)
220{
221	const char *start, *end;
222
223	ftrace_plt = module_alloc(PAGE_SIZE);
224	if (!ftrace_plt)
225		panic("cannot allocate ftrace plt\n");
226
227	start = ftrace_shared_hotpatch_trampoline(&end);
228	memcpy(ftrace_plt, start, end - start);
229	set_memory_rox((unsigned long)ftrace_plt, 1);
230	return 0;
231}
232device_initcall(ftrace_plt_init);
233
234#endif /* CONFIG_MODULES */
235
236#ifdef CONFIG_FUNCTION_GRAPH_TRACER
237/*
238 * Hook the return address and push it in the stack of return addresses
239 * in current thread info.
240 */
241unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
242				    unsigned long ip)
243{
244	if (unlikely(ftrace_graph_is_dead()))
245		goto out;
246	if (unlikely(atomic_read(&current->tracing_graph_pause)))
247		goto out;
248	ip -= MCOUNT_INSN_SIZE;
249	if (!function_graph_enter(ra, ip, 0, (void *) sp))
250		ra = (unsigned long) return_to_handler;
 
 
 
 
 
 
251out:
252	return ra;
253}
254NOKPROBE_SYMBOL(prepare_ftrace_return);
255
 
256/*
257 * Patch the kernel code at ftrace_graph_caller location. The instruction
258 * there is branch relative on condition. To enable the ftrace graph code
259 * block, we simply patch the mask field of the instruction to zero and
260 * turn the instruction into a nop.
261 * To disable the ftrace graph code the mask field will be patched to
262 * all ones, which turns the instruction into an unconditional branch.
263 */
264int ftrace_enable_ftrace_graph_caller(void)
265{
266	int rc;
267
268	/* Expect brc 0xf,... */
269	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
270	if (rc)
271		return rc;
272	text_poke_sync_lock();
273	return 0;
274}
275
276int ftrace_disable_ftrace_graph_caller(void)
277{
278	int rc;
279
280	/* Expect brc 0x0,... */
281	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
282	if (rc)
283		return rc;
284	text_poke_sync_lock();
285	return 0;
286}
287
 
288#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
289
290#ifdef CONFIG_KPROBES_ON_FTRACE
291void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
292		struct ftrace_ops *ops, struct ftrace_regs *fregs)
293{
294	struct kprobe_ctlblk *kcb;
295	struct pt_regs *regs;
296	struct kprobe *p;
297	int bit;
298
299	bit = ftrace_test_recursion_trylock(ip, parent_ip);
300	if (bit < 0)
301		return;
302
303	regs = ftrace_get_regs(fregs);
304	p = get_kprobe((kprobe_opcode_t *)ip);
305	if (!regs || unlikely(!p) || kprobe_disabled(p))
306		goto out;
307
308	if (kprobe_running()) {
309		kprobes_inc_nmissed_count(p);
310		goto out;
311	}
312
313	__this_cpu_write(current_kprobe, p);
314
315	kcb = get_kprobe_ctlblk();
316	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
317
318	instruction_pointer_set(regs, ip);
319
320	if (!p->pre_handler || !p->pre_handler(p, regs)) {
321
322		instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
323
324		if (unlikely(p->post_handler)) {
325			kcb->kprobe_status = KPROBE_HIT_SSDONE;
326			p->post_handler(p, regs, 0);
327		}
328	}
329	__this_cpu_write(current_kprobe, NULL);
330out:
331	ftrace_test_recursion_unlock(bit);
332}
333NOKPROBE_SYMBOL(kprobe_ftrace_handler);
334
335int arch_prepare_kprobe_ftrace(struct kprobe *p)
336{
337	p->ainsn.insn = NULL;
338	return 0;
339}
340#endif
v3.1
 
  1/*
  2 * Dynamic function tracer architecture backend.
  3 *
  4 * Copyright IBM Corp. 2009
  5 *
  6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
  8 */
  9
 
 10#include <linux/hardirq.h>
 11#include <linux/uaccess.h>
 12#include <linux/ftrace.h>
 13#include <linux/kernel.h>
 14#include <linux/types.h>
 15#include <linux/kprobes.h>
 16#include <trace/syscall.h>
 17#include <asm/asm-offsets.h>
 
 
 
 
 
 
 
 18
 19#ifdef CONFIG_64BIT
 20#define MCOUNT_OFFSET_RET 12
 21#else
 22#define MCOUNT_OFFSET_RET 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23#endif
 24
 25#ifdef CONFIG_DYNAMIC_FTRACE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27void ftrace_disable_code(void);
 28void ftrace_enable_insn(void);
 29
 30#ifdef CONFIG_64BIT
 31/*
 32 * The 64-bit mcount code looks like this:
 33 *	stg	%r14,8(%r15)		# offset 0
 34 * >	larl	%r1,<&counter>		# offset 6
 35 * >	brasl	%r14,_mcount		# offset 12
 36 *	lg	%r14,8(%r15)		# offset 18
 37 * Total length is 24 bytes. The middle two instructions of the mcount
 38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
 39 * The 64-bit enabled ftrace code block looks like this:
 40 *	stg	%r14,8(%r15)		# offset 0
 41 * >	lg	%r1,__LC_FTRACE_FUNC	# offset 6
 42 * >	lgr	%r0,%r0			# offset 12
 43 * >	basr	%r14,%r1		# offset 16
 44 *	lg	%r14,8(%15)		# offset 18
 45 * The return points of the mcount/ftrace function have the same offset 18.
 46 * The 64-bit disable ftrace code block looks like this:
 47 *	stg	%r14,8(%r15)		# offset 0
 48 * >	jg	.+18			# offset 6
 49 * >	lgr	%r0,%r0			# offset 12
 50 * >	basr	%r14,%r1		# offset 16
 51 *	lg	%r14,8(%15)		# offset 18
 52 * The jg instruction branches to offset 24 to skip as many instructions
 53 * as possible.
 54 */
 55asm(
 56	"	.align	4\n"
 57	"ftrace_disable_code:\n"
 58	"	jg	0f\n"
 59	"	lgr	%r0,%r0\n"
 60	"	basr	%r14,%r1\n"
 61	"0:\n"
 62	"	.align	4\n"
 63	"ftrace_enable_insn:\n"
 64	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n");
 65
 66#define FTRACE_INSN_SIZE	6
 
 
 
 
 67
 68#else /* CONFIG_64BIT */
 69/*
 70 * The 31-bit mcount code looks like this:
 71 *	st	%r14,4(%r15)		# offset 0
 72 * >	bras	%r1,0f			# offset 4
 73 * >	.long	_mcount			# offset 8
 74 * >	.long	<&counter>		# offset 12
 75 * > 0:	l	%r14,0(%r1)		# offset 16
 76 * >	l	%r1,4(%r1)		# offset 20
 77 *	basr	%r14,%r14		# offset 24
 78 *	l	%r14,4(%r15)		# offset 26
 79 * Total length is 30 bytes. The twenty bytes starting from offset 4
 80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
 81 * The 31-bit enabled ftrace code block looks like this:
 82 *	st	%r14,4(%r15)		# offset 0
 83 * >	l	%r14,__LC_FTRACE_FUNC	# offset 4
 84 * >	j	0f			# offset 8
 85 * >	.fill	12,1,0x07		# offset 12
 86 *   0:	basr	%r14,%r14		# offset 24
 87 *	l	%r14,4(%r14)		# offset 26
 88 * The return points of the mcount/ftrace function have the same offset 26.
 89 * The 31-bit disabled ftrace code block looks like this:
 90 *	st	%r14,4(%r15)		# offset 0
 91 * >	j	.+26			# offset 4
 92 * >	j	0f			# offset 8
 93 * >	.fill	12,1,0x07		# offset 12
 94 *   0:	basr	%r14,%r14		# offset 24
 95 *	l	%r14,4(%r14)		# offset 26
 96 * The j instruction branches to offset 30 to skip as many instructions
 97 * as possible.
 98 */
 99asm(
100	"	.align	4\n"
101	"ftrace_disable_code:\n"
102	"	j	1f\n"
103	"	j	0f\n"
104	"	.fill	12,1,0x07\n"
105	"0:	basr	%r14,%r14\n"
106	"1:\n"
107	"	.align	4\n"
108	"ftrace_enable_insn:\n"
109	"	l	%r14,"__stringify(__LC_FTRACE_FUNC)"\n");
110
111#define FTRACE_INSN_SIZE	4
112
113#endif /* CONFIG_64BIT */
 
114
 
 
 
 
 
 
 
 
 
115
116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
117		    unsigned long addr)
118{
119	if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
120			       MCOUNT_INSN_SIZE))
121		return -EPERM;
122	return 0;
123}
124
125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
126{
127	if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
128			       FTRACE_INSN_SIZE))
129		return -EPERM;
130	return 0;
 
 
 
 
131}
132
133int ftrace_update_ftrace_func(ftrace_func_t func)
134{
 
135	return 0;
136}
137
138int __init ftrace_dyn_arch_init(void *data)
 
 
 
 
 
139{
140	*(unsigned long *) data = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141	return 0;
142}
 
143
144#endif /* CONFIG_DYNAMIC_FTRACE */
145
146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
147/*
148 * Hook the return address and push it in the stack of return addresses
149 * in current thread info.
150 */
151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152					      unsigned long ip)
153{
154	struct ftrace_graph_ent trace;
155
156	if (unlikely(atomic_read(&current->tracing_graph_pause)))
157		goto out;
158	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159		goto out;
160	trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
161	/* Only trace if the calling function expects to. */
162	if (!ftrace_graph_entry(&trace)) {
163		current->curr_ret_stack--;
164		goto out;
165	}
166	parent = (unsigned long) return_to_handler;
167out:
168	return parent;
169}
 
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
 
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181	unsigned short offset;
182
183	offset = ((void *) prepare_ftrace_return -
184		  (void *) ftrace_graph_caller) / 2;
185	return probe_kernel_write(ftrace_graph_caller + 2,
186				  &offset, sizeof(offset));
 
 
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191	static unsigned short offset = 0x0002;
192
193	return probe_kernel_write(ftrace_graph_caller + 2,
194				  &offset, sizeof(offset));
 
 
 
 
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */