Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Code for tracing calls in Linux kernel.
  4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
  5 *
  6 * based on code for x86 which is:
  7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  8 *
  9 * future possible enhancements:
 10 *	- add CONFIG_STACK_TRACER
 11 */
 12
 13#include <linux/init.h>
 14#include <linux/ftrace.h>
 15#include <linux/uaccess.h>
 16#include <linux/kprobes.h>
 17#include <linux/ptrace.h>
 
 18
 19#include <asm/assembly.h>
 20#include <asm/sections.h>
 21#include <asm/ftrace.h>
 22#include <asm/patch.h>
 23
 24#define __hot __attribute__ ((__section__ (".text.hot")))
 25
 26#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 
 27/*
 28 * Hook the return address and push it in the stack of return addrs
 29 * in current thread info.
 30 */
 31static void __hot prepare_ftrace_return(unsigned long *parent,
 32					unsigned long self_addr)
 33{
 34	unsigned long old;
 35	extern int parisc_return_to_handler;
 36
 37	if (unlikely(ftrace_graph_is_dead()))
 38		return;
 39
 40	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 41		return;
 42
 43	old = *parent;
 44
 45	if (!function_graph_enter(old, self_addr, 0, NULL))
 46		/* activate parisc_return_to_handler() as return point */
 47		*parent = (unsigned long) &parisc_return_to_handler;
 48}
 49#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 50
 51void notrace __hot ftrace_function_trampoline(unsigned long parent,
 
 
 52				unsigned long self_addr,
 53				unsigned long org_sp_gr3,
 54				struct pt_regs *regs)
 55{
 56#ifndef CONFIG_DYNAMIC_FTRACE
 57	extern ftrace_func_t ftrace_trace_function;
 58#endif
 59	extern struct ftrace_ops *function_trace_op;
 60
 61	if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
 62	    ftrace_trace_function != ftrace_stub)
 63		ftrace_trace_function(self_addr, parent,
 64				function_trace_op, regs);
 65
 66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 67	if (dereference_function_descriptor(ftrace_graph_return) !=
 68	    dereference_function_descriptor(ftrace_stub) ||
 69	    ftrace_graph_entry != ftrace_graph_entry_stub) {
 70		unsigned long *parent_rp;
 71
 72		/* calculate pointer to %rp in stack */
 73		parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
 74		/* sanity check: parent_rp should hold parent */
 75		if (*parent_rp != parent)
 76			return;
 77
 78		prepare_ftrace_return(parent_rp, self_addr);
 79		return;
 80	}
 81#endif
 82}
 83
 84#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 85int ftrace_enable_ftrace_graph_caller(void)
 86{
 
 87	return 0;
 88}
 89
 90int ftrace_disable_ftrace_graph_caller(void)
 91{
 
 92	return 0;
 93}
 94#endif
 95
 96#ifdef CONFIG_DYNAMIC_FTRACE
 97
 98int __init ftrace_dyn_arch_init(void)
 99{
100	return 0;
101}
102int ftrace_update_ftrace_func(ftrace_func_t func)
103{
 
104	return 0;
105}
106
107int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
108			unsigned long addr)
109{
110	return 0;
111}
112
113unsigned long ftrace_call_adjust(unsigned long addr)
114{
115	return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
116}
117
118int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119{
120	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
121	u32 *tramp;
122	int size, ret, i;
123	void *ip;
124
125#ifdef CONFIG_64BIT
126	unsigned long addr2 =
127		(unsigned long)dereference_function_descriptor((void *)addr);
128
129	u32 ftrace_trampoline[] = {
130		0x73c10208, /* std,ma r1,100(sp) */
131		0x0c2110c1, /* ldd -10(r1),r1 */
132		0xe820d002, /* bve,n (r1) */
133		addr2 >> 32,
134		addr2 & 0xffffffff,
135		0xe83f1fd7, /* b,l,n .-14,r1 */
136	};
137
138	u32 ftrace_trampoline_unaligned[] = {
139		addr2 >> 32,
140		addr2 & 0xffffffff,
141		0x37de0200, /* ldo 100(sp),sp */
142		0x73c13e01, /* std r1,-100(sp) */
143		0x34213ff9, /* ldo -4(r1),r1 */
144		0x50213fc1, /* ldd -20(r1),r1 */
145		0xe820d002, /* bve,n (r1) */
146		0xe83f1fcf, /* b,l,n .-20,r1 */
147	};
148
149	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
150				FTRACE_PATCHABLE_FUNCTION_SIZE);
151#else
152	u32 ftrace_trampoline[] = {
153		(u32)addr,
154		0x6fc10080, /* stw,ma r1,40(sp) */
155		0x48213fd1, /* ldw -18(r1),r1 */
156		0xe820c002, /* bv,n r0(r1) */
157		0xe83f1fdf, /* b,l,n .-c,r1 */
158	};
159#endif
160
161	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
162				FTRACE_PATCHABLE_FUNCTION_SIZE);
163
164	size = sizeof(ftrace_trampoline);
165	tramp = ftrace_trampoline;
166
167#ifdef CONFIG_64BIT
168	if (rec->ip & 0x4) {
169		size = sizeof(ftrace_trampoline_unaligned);
170		tramp = ftrace_trampoline_unaligned;
171	}
172#endif
173
174	ip = (void *)(rec->ip + 4 - size);
175
176	ret = copy_from_kernel_nofault(insn, ip, size);
177	if (ret)
178		return ret;
179
180	for (i = 0; i < size / 4; i++) {
181		if (insn[i] != INSN_NOP)
182			return -EINVAL;
183	}
184
185	__patch_text_multiple(ip, tramp, size);
186	return 0;
187}
188
189int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
190		    unsigned long addr)
191{
192	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
193	int i;
194
195	for (i = 0; i < ARRAY_SIZE(insn); i++)
196		insn[i] = INSN_NOP;
197
198	__patch_text((void *)rec->ip, INSN_NOP);
199	__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
200			      insn, sizeof(insn)-4);
201	return 0;
202}
203#endif
204
205#ifdef CONFIG_KPROBES_ON_FTRACE
206void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
207			   struct ftrace_ops *ops, struct pt_regs *regs)
208{
209	struct kprobe_ctlblk *kcb;
210	struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
 
 
211
212	if (unlikely(!p) || kprobe_disabled(p))
 
213		return;
214
 
 
 
 
 
215	if (kprobe_running()) {
216		kprobes_inc_nmissed_count(p);
217		return;
218	}
219
220	__this_cpu_write(current_kprobe, p);
221
222	kcb = get_kprobe_ctlblk();
223	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
224
225	regs->iaoq[0] = ip;
226	regs->iaoq[1] = ip + 4;
227
228	if (!p->pre_handler || !p->pre_handler(p, regs)) {
229		regs->iaoq[0] = ip + 4;
230		regs->iaoq[1] = ip + 8;
231
232		if (unlikely(p->post_handler)) {
233			kcb->kprobe_status = KPROBE_HIT_SSDONE;
234			p->post_handler(p, regs, 0);
235		}
236	}
237	__this_cpu_write(current_kprobe, NULL);
 
 
238}
239NOKPROBE_SYMBOL(kprobe_ftrace_handler);
240
241int arch_prepare_kprobe_ftrace(struct kprobe *p)
242{
243	p->ainsn.insn = NULL;
244	return 0;
245}
246#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Code for tracing calls in Linux kernel.
  4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
  5 *
  6 * based on code for x86 which is:
  7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  8 *
  9 * future possible enhancements:
 10 *	- add CONFIG_STACK_TRACER
 11 */
 12
 13#include <linux/init.h>
 14#include <linux/ftrace.h>
 15#include <linux/uaccess.h>
 16#include <linux/kprobes.h>
 17#include <linux/ptrace.h>
 18#include <linux/jump_label.h>
 19
 20#include <asm/assembly.h>
 21#include <asm/sections.h>
 22#include <asm/ftrace.h>
 23#include <asm/patch.h>
 24
 25#define __hot __section(".text.hot")
 26
 27#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 28static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
 29
 30/*
 31 * Hook the return address and push it in the stack of return addrs
 32 * in current thread info.
 33 */
 34static void __hot prepare_ftrace_return(unsigned long *parent,
 35					unsigned long self_addr)
 36{
 37	unsigned long old;
 38	extern int parisc_return_to_handler;
 39
 40	if (unlikely(ftrace_graph_is_dead()))
 41		return;
 42
 43	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 44		return;
 45
 46	old = *parent;
 47
 48	if (!function_graph_enter(old, self_addr, 0, NULL))
 49		/* activate parisc_return_to_handler() as return point */
 50		*parent = (unsigned long) &parisc_return_to_handler;
 51}
 52#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 53
 54static ftrace_func_t ftrace_func;
 55
 56asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
 57				unsigned long self_addr,
 58				unsigned long org_sp_gr3,
 59				struct ftrace_regs *fregs)
 60{
 
 
 
 61	extern struct ftrace_ops *function_trace_op;
 62
 63	ftrace_func(self_addr, parent, function_trace_op, fregs);
 
 
 
 64
 65#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 66	if (static_branch_unlikely(&ftrace_graph_enable)) {
 
 
 67		unsigned long *parent_rp;
 68
 69		/* calculate pointer to %rp in stack */
 70		parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
 71		/* sanity check: parent_rp should hold parent */
 72		if (*parent_rp != parent)
 73			return;
 74
 75		prepare_ftrace_return(parent_rp, self_addr);
 76		return;
 77	}
 78#endif
 79}
 80
 81#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
 82int ftrace_enable_ftrace_graph_caller(void)
 83{
 84	static_key_enable(&ftrace_graph_enable.key);
 85	return 0;
 86}
 87
 88int ftrace_disable_ftrace_graph_caller(void)
 89{
 90	static_key_enable(&ftrace_graph_enable.key);
 91	return 0;
 92}
 93#endif
 94
 95#ifdef CONFIG_DYNAMIC_FTRACE
 
 
 
 
 
 96int ftrace_update_ftrace_func(ftrace_func_t func)
 97{
 98	ftrace_func = func;
 99	return 0;
100}
101
102int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
103			unsigned long addr)
104{
105	return 0;
106}
107
108unsigned long ftrace_call_adjust(unsigned long addr)
109{
110	return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
111}
112
113int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
114{
115	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
116	u32 *tramp;
117	int size, ret, i;
118	void *ip;
119
120#ifdef CONFIG_64BIT
121	unsigned long addr2 =
122		(unsigned long)dereference_function_descriptor((void *)addr);
123
124	u32 ftrace_trampoline[] = {
125		0x73c10208, /* std,ma r1,100(sp) */
126		0x0c2110c1, /* ldd -10(r1),r1 */
127		0xe820d002, /* bve,n (r1) */
128		addr2 >> 32,
129		addr2 & 0xffffffff,
130		0xe83f1fd7, /* b,l,n .-14,r1 */
131	};
132
133	u32 ftrace_trampoline_unaligned[] = {
134		addr2 >> 32,
135		addr2 & 0xffffffff,
136		0x37de0200, /* ldo 100(sp),sp */
137		0x73c13e01, /* std r1,-100(sp) */
138		0x34213ff9, /* ldo -4(r1),r1 */
139		0x50213fc1, /* ldd -20(r1),r1 */
140		0xe820d002, /* bve,n (r1) */
141		0xe83f1fcf, /* b,l,n .-20,r1 */
142	};
143
144	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
145				FTRACE_PATCHABLE_FUNCTION_SIZE);
146#else
147	u32 ftrace_trampoline[] = {
148		(u32)addr,
149		0x6fc10080, /* stw,ma r1,40(sp) */
150		0x48213fd1, /* ldw -18(r1),r1 */
151		0xe820c002, /* bv,n r0(r1) */
152		0xe83f1fdf, /* b,l,n .-c,r1 */
153	};
154#endif
155
156	BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
157				FTRACE_PATCHABLE_FUNCTION_SIZE);
158
159	size = sizeof(ftrace_trampoline);
160	tramp = ftrace_trampoline;
161
162#ifdef CONFIG_64BIT
163	if (rec->ip & 0x4) {
164		size = sizeof(ftrace_trampoline_unaligned);
165		tramp = ftrace_trampoline_unaligned;
166	}
167#endif
168
169	ip = (void *)(rec->ip + 4 - size);
170
171	ret = copy_from_kernel_nofault(insn, ip, size);
172	if (ret)
173		return ret;
174
175	for (i = 0; i < size / 4; i++) {
176		if (insn[i] != INSN_NOP)
177			return -EINVAL;
178	}
179
180	__patch_text_multiple(ip, tramp, size);
181	return 0;
182}
183
184int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
185		    unsigned long addr)
186{
187	u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
188	int i;
189
190	for (i = 0; i < ARRAY_SIZE(insn); i++)
191		insn[i] = INSN_NOP;
192
193	__patch_text((void *)rec->ip, INSN_NOP);
194	__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
195			      insn, sizeof(insn)-4);
196	return 0;
197}
198#endif
199
200#ifdef CONFIG_KPROBES_ON_FTRACE
201void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
202			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
203{
204	struct kprobe_ctlblk *kcb;
205	struct pt_regs *regs;
206	struct kprobe *p;
207	int bit;
208
209	bit = ftrace_test_recursion_trylock(ip, parent_ip);
210	if (bit < 0)
211		return;
212
213	regs = ftrace_get_regs(fregs);
214	p = get_kprobe((kprobe_opcode_t *)ip);
215	if (unlikely(!p) || kprobe_disabled(p))
216		goto out;
217
218	if (kprobe_running()) {
219		kprobes_inc_nmissed_count(p);
220		goto out;
221	}
222
223	__this_cpu_write(current_kprobe, p);
224
225	kcb = get_kprobe_ctlblk();
226	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
227
228	regs->iaoq[0] = ip;
229	regs->iaoq[1] = ip + 4;
230
231	if (!p->pre_handler || !p->pre_handler(p, regs)) {
232		regs->iaoq[0] = ip + 4;
233		regs->iaoq[1] = ip + 8;
234
235		if (unlikely(p->post_handler)) {
236			kcb->kprobe_status = KPROBE_HIT_SSDONE;
237			p->post_handler(p, regs, 0);
238		}
239	}
240	__this_cpu_write(current_kprobe, NULL);
241out:
242	ftrace_test_recursion_unlock(bit);
243}
244NOKPROBE_SYMBOL(kprobe_ftrace_handler);
245
246int arch_prepare_kprobe_ftrace(struct kprobe *p)
247{
248	p->ainsn.insn = NULL;
249	return 0;
250}
251#endif