Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Dynamic function tracing support.
  3 *
  4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6 *
  7 * For licencing details, see COPYING.
  8 *
  9 * Defines low-level handling of mcount calls when the kernel
 10 * is compiled with the -pg flag. When using dynamic ftrace, the
 11 * mcount call-sites get patched with NOP till they are enabled.
 12 * All code mutation routines here are called under stop_machine().
 13 */
 14
 15#include <linux/ftrace.h>
 16#include <linux/uaccess.h>
 17#include <linux/module.h>
 18#include <linux/stop_machine.h>
 19
 20#include <asm/cacheflush.h>
 21#include <asm/opcodes.h>
 22#include <asm/ftrace.h>
 23#include <asm/insn.h>
 24#include <asm/set_memory.h>
 25#include <asm/patch.h>
 26
 27#ifdef CONFIG_THUMB2_KERNEL
 28#define	NOP		0xf85deb04	/* pop.w {lr} */
 29#else
 30#define	NOP		0xe8bd4000	/* pop {lr} */
 31#endif
 32
 33#ifdef CONFIG_DYNAMIC_FTRACE
 34
 35static int __ftrace_modify_code(void *data)
 36{
 37	int *command = data;
 38
 
 39	ftrace_modify_all_code(*command);
 
 40
 41	return 0;
 42}
 43
 44void arch_ftrace_update_code(int command)
 45{
 46	stop_machine(__ftrace_modify_code, &command, NULL);
 47}
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 50{
 51	return NOP;
 52}
 53
 54static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 55{
 56	return addr;
 57}
 
 58
 59int ftrace_arch_code_modify_prepare(void)
 60{
 
 61	return 0;
 62}
 63
 64int ftrace_arch_code_modify_post_process(void)
 65{
 
 66	/* Make sure any TLB misses during machine stop are cleared. */
 67	flush_tlb_all();
 68	return 0;
 69}
 70
 71static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 72{
 73	return arm_gen_branch_link(pc, addr);
 74}
 75
 76static int ftrace_modify_code(unsigned long pc, unsigned long old,
 77			      unsigned long new, bool validate)
 78{
 79	unsigned long replaced;
 80
 81	if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
 82		old = __opcode_to_mem_thumb32(old);
 83	else
 
 84		old = __opcode_to_mem_arm(old);
 
 
 85
 86	if (validate) {
 87		if (copy_from_kernel_nofault(&replaced, (void *)pc,
 88				MCOUNT_INSN_SIZE))
 89			return -EFAULT;
 90
 91		if (replaced != old)
 92			return -EINVAL;
 93	}
 94
 95	__patch_text((void *)pc, new);
 
 
 
 96
 97	return 0;
 98}
 99
100int ftrace_update_ftrace_func(ftrace_func_t func)
101{
102	unsigned long pc;
103	unsigned long new;
104	int ret;
105
106	pc = (unsigned long)&ftrace_call;
107	new = ftrace_call_replace(pc, (unsigned long)func);
108
109	ret = ftrace_modify_code(pc, 0, new, false);
110
111#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
112	if (!ret) {
113		pc = (unsigned long)&ftrace_regs_call;
114		new = ftrace_call_replace(pc, (unsigned long)func);
115
116		ret = ftrace_modify_code(pc, 0, new, false);
117	}
118#endif
119
 
 
 
 
 
 
 
 
 
120	return ret;
121}
122
123int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
124{
125	unsigned long new, old;
126	unsigned long ip = rec->ip;
127
128	old = ftrace_nop_replace(rec);
129
130	new = ftrace_call_replace(ip, adjust_address(rec, addr));
131
132	return ftrace_modify_code(rec->ip, old, new, true);
133}
134
135#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
136
137int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
138				unsigned long addr)
139{
140	unsigned long new, old;
141	unsigned long ip = rec->ip;
142
143	old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
144
145	new = ftrace_call_replace(ip, adjust_address(rec, addr));
146
147	return ftrace_modify_code(rec->ip, old, new, true);
148}
149
150#endif
151
152int ftrace_make_nop(struct module *mod,
153		    struct dyn_ftrace *rec, unsigned long addr)
154{
155	unsigned long ip = rec->ip;
156	unsigned long old;
157	unsigned long new;
158	int ret;
159
160	old = ftrace_call_replace(ip, adjust_address(rec, addr));
161	new = ftrace_nop_replace(rec);
162	ret = ftrace_modify_code(ip, old, new, true);
163
 
 
 
 
 
 
 
 
 
 
164	return ret;
165}
166
167int __init ftrace_dyn_arch_init(void)
168{
169	return 0;
170}
171#endif /* CONFIG_DYNAMIC_FTRACE */
172
173#ifdef CONFIG_FUNCTION_GRAPH_TRACER
174void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
175			   unsigned long frame_pointer)
176{
177	unsigned long return_hooker = (unsigned long) &return_to_handler;
 
178	unsigned long old;
 
179
180	if (unlikely(atomic_read(&current->tracing_graph_pause)))
181		return;
182
183	old = *parent;
184	*parent = return_hooker;
185
186	if (function_graph_enter(old, self_addr, frame_pointer, NULL))
 
 
 
 
187		*parent = old;
 
 
 
 
 
 
 
 
 
188}
189
190#ifdef CONFIG_DYNAMIC_FTRACE
191extern unsigned long ftrace_graph_call;
192extern unsigned long ftrace_graph_call_old;
193extern void ftrace_graph_caller_old(void);
194extern unsigned long ftrace_graph_regs_call;
195extern void ftrace_graph_regs_caller(void);
196
197static int __ftrace_modify_caller(unsigned long *callsite,
198				  void (*func) (void), bool enable)
199{
200	unsigned long caller_fn = (unsigned long) func;
201	unsigned long pc = (unsigned long) callsite;
202	unsigned long branch = arm_gen_branch(pc, caller_fn);
203	unsigned long nop = 0xe1a00000;	/* mov r0, r0 */
204	unsigned long old = enable ? nop : branch;
205	unsigned long new = enable ? branch : nop;
206
207	return ftrace_modify_code(pc, old, new, true);
208}
209
210static int ftrace_modify_graph_caller(bool enable)
211{
212	int ret;
213
214	ret = __ftrace_modify_caller(&ftrace_graph_call,
215				     ftrace_graph_caller,
216				     enable);
217
218#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
219	if (!ret)
220		ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
221				     ftrace_graph_regs_caller,
222				     enable);
223#endif
224
 
 
 
 
 
 
 
225
226	return ret;
227}
228
229int ftrace_enable_ftrace_graph_caller(void)
230{
231	return ftrace_modify_graph_caller(true);
232}
233
234int ftrace_disable_ftrace_graph_caller(void)
235{
236	return ftrace_modify_graph_caller(false);
237}
238#endif /* CONFIG_DYNAMIC_FTRACE */
239#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v4.17
  1/*
  2 * Dynamic function tracing support.
  3 *
  4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6 *
  7 * For licencing details, see COPYING.
  8 *
  9 * Defines low-level handling of mcount calls when the kernel
 10 * is compiled with the -pg flag. When using dynamic ftrace, the
 11 * mcount call-sites get patched with NOP till they are enabled.
 12 * All code mutation routines here are called under stop_machine().
 13 */
 14
 15#include <linux/ftrace.h>
 16#include <linux/uaccess.h>
 17#include <linux/module.h>
 18#include <linux/stop_machine.h>
 19
 20#include <asm/cacheflush.h>
 21#include <asm/opcodes.h>
 22#include <asm/ftrace.h>
 23#include <asm/insn.h>
 24#include <asm/set_memory.h>
 
 25
 26#ifdef CONFIG_THUMB2_KERNEL
 27#define	NOP		0xf85deb04	/* pop.w {lr} */
 28#else
 29#define	NOP		0xe8bd4000	/* pop {lr} */
 30#endif
 31
 32#ifdef CONFIG_DYNAMIC_FTRACE
 33
 34static int __ftrace_modify_code(void *data)
 35{
 36	int *command = data;
 37
 38	set_kernel_text_rw();
 39	ftrace_modify_all_code(*command);
 40	set_kernel_text_ro();
 41
 42	return 0;
 43}
 44
 45void arch_ftrace_update_code(int command)
 46{
 47	stop_machine(__ftrace_modify_code, &command, NULL);
 48}
 49
 50#ifdef CONFIG_OLD_MCOUNT
 51#define OLD_MCOUNT_ADDR	((unsigned long) mcount)
 52#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
 53
 54#define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
 55
 56static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 57{
 58	return rec->arch.old_mcount ? OLD_NOP : NOP;
 59}
 60
 61static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 62{
 63	if (!rec->arch.old_mcount)
 64		return addr;
 65
 66	if (addr == MCOUNT_ADDR)
 67		addr = OLD_MCOUNT_ADDR;
 68	else if (addr == FTRACE_ADDR)
 69		addr = OLD_FTRACE_ADDR;
 70
 71	return addr;
 72}
 73#else
 74static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
 75{
 76	return NOP;
 77}
 78
 79static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 80{
 81	return addr;
 82}
 83#endif
 84
 85int ftrace_arch_code_modify_prepare(void)
 86{
 87	set_all_modules_text_rw();
 88	return 0;
 89}
 90
 91int ftrace_arch_code_modify_post_process(void)
 92{
 93	set_all_modules_text_ro();
 94	/* Make sure any TLB misses during machine stop are cleared. */
 95	flush_tlb_all();
 96	return 0;
 97}
 98
 99static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
100{
101	return arm_gen_branch_link(pc, addr);
102}
103
104static int ftrace_modify_code(unsigned long pc, unsigned long old,
105			      unsigned long new, bool validate)
106{
107	unsigned long replaced;
108
109	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
110		old = __opcode_to_mem_thumb32(old);
111		new = __opcode_to_mem_thumb32(new);
112	} else {
113		old = __opcode_to_mem_arm(old);
114		new = __opcode_to_mem_arm(new);
115	}
116
117	if (validate) {
118		if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
 
119			return -EFAULT;
120
121		if (replaced != old)
122			return -EINVAL;
123	}
124
125	if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
126		return -EPERM;
127
128	flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
129
130	return 0;
131}
132
133int ftrace_update_ftrace_func(ftrace_func_t func)
134{
135	unsigned long pc;
136	unsigned long new;
137	int ret;
138
139	pc = (unsigned long)&ftrace_call;
140	new = ftrace_call_replace(pc, (unsigned long)func);
141
142	ret = ftrace_modify_code(pc, 0, new, false);
143
144#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
145	if (!ret) {
146		pc = (unsigned long)&ftrace_regs_call;
147		new = ftrace_call_replace(pc, (unsigned long)func);
148
149		ret = ftrace_modify_code(pc, 0, new, false);
150	}
151#endif
152
153#ifdef CONFIG_OLD_MCOUNT
154	if (!ret) {
155		pc = (unsigned long)&ftrace_call_old;
156		new = ftrace_call_replace(pc, (unsigned long)func);
157
158		ret = ftrace_modify_code(pc, 0, new, false);
159	}
160#endif
161
162	return ret;
163}
164
165int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
166{
167	unsigned long new, old;
168	unsigned long ip = rec->ip;
169
170	old = ftrace_nop_replace(rec);
171
172	new = ftrace_call_replace(ip, adjust_address(rec, addr));
173
174	return ftrace_modify_code(rec->ip, old, new, true);
175}
176
177#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
178
179int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
180				unsigned long addr)
181{
182	unsigned long new, old;
183	unsigned long ip = rec->ip;
184
185	old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
186
187	new = ftrace_call_replace(ip, adjust_address(rec, addr));
188
189	return ftrace_modify_code(rec->ip, old, new, true);
190}
191
192#endif
193
194int ftrace_make_nop(struct module *mod,
195		    struct dyn_ftrace *rec, unsigned long addr)
196{
197	unsigned long ip = rec->ip;
198	unsigned long old;
199	unsigned long new;
200	int ret;
201
202	old = ftrace_call_replace(ip, adjust_address(rec, addr));
203	new = ftrace_nop_replace(rec);
204	ret = ftrace_modify_code(ip, old, new, true);
205
206#ifdef CONFIG_OLD_MCOUNT
207	if (ret == -EINVAL && addr == MCOUNT_ADDR) {
208		rec->arch.old_mcount = true;
209
210		old = ftrace_call_replace(ip, adjust_address(rec, addr));
211		new = ftrace_nop_replace(rec);
212		ret = ftrace_modify_code(ip, old, new, true);
213	}
214#endif
215
216	return ret;
217}
218
219int __init ftrace_dyn_arch_init(void)
220{
221	return 0;
222}
223#endif /* CONFIG_DYNAMIC_FTRACE */
224
225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
226void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
227			   unsigned long frame_pointer)
228{
229	unsigned long return_hooker = (unsigned long) &return_to_handler;
230	struct ftrace_graph_ent trace;
231	unsigned long old;
232	int err;
233
234	if (unlikely(atomic_read(&current->tracing_graph_pause)))
235		return;
236
237	old = *parent;
238	*parent = return_hooker;
239
240	trace.func = self_addr;
241	trace.depth = current->curr_ret_stack + 1;
242
243	/* Only trace if the calling function expects to */
244	if (!ftrace_graph_entry(&trace)) {
245		*parent = old;
246		return;
247	}
248
249	err = ftrace_push_return_trace(old, self_addr, &trace.depth,
250				       frame_pointer, NULL);
251	if (err == -EBUSY) {
252		*parent = old;
253		return;
254	}
255}
256
257#ifdef CONFIG_DYNAMIC_FTRACE
258extern unsigned long ftrace_graph_call;
259extern unsigned long ftrace_graph_call_old;
260extern void ftrace_graph_caller_old(void);
261extern unsigned long ftrace_graph_regs_call;
262extern void ftrace_graph_regs_caller(void);
263
264static int __ftrace_modify_caller(unsigned long *callsite,
265				  void (*func) (void), bool enable)
266{
267	unsigned long caller_fn = (unsigned long) func;
268	unsigned long pc = (unsigned long) callsite;
269	unsigned long branch = arm_gen_branch(pc, caller_fn);
270	unsigned long nop = 0xe1a00000;	/* mov r0, r0 */
271	unsigned long old = enable ? nop : branch;
272	unsigned long new = enable ? branch : nop;
273
274	return ftrace_modify_code(pc, old, new, true);
275}
276
277static int ftrace_modify_graph_caller(bool enable)
278{
279	int ret;
280
281	ret = __ftrace_modify_caller(&ftrace_graph_call,
282				     ftrace_graph_caller,
283				     enable);
284
285#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
286	if (!ret)
287		ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
288				     ftrace_graph_regs_caller,
289				     enable);
290#endif
291
292
293#ifdef CONFIG_OLD_MCOUNT
294	if (!ret)
295		ret = __ftrace_modify_caller(&ftrace_graph_call_old,
296					     ftrace_graph_caller_old,
297					     enable);
298#endif
299
300	return ret;
301}
302
303int ftrace_enable_ftrace_graph_caller(void)
304{
305	return ftrace_modify_graph_caller(true);
306}
307
308int ftrace_disable_ftrace_graph_caller(void)
309{
310	return ftrace_modify_graph_caller(false);
311}
312#endif /* CONFIG_DYNAMIC_FTRACE */
313#endif /* CONFIG_FUNCTION_GRAPH_TRACER */