Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
  6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  7 *
  8 * Thanks goes to Steven Rostedt for writing the original x86 version.
  9 */
 10
 11#include <linux/uaccess.h>
 12#include <linux/init.h>
 13#include <linux/ftrace.h>
 14#include <linux/syscalls.h>
 15
 16#include <asm/asm.h>
 17#include <asm/asm-offsets.h>
 18#include <asm/cacheflush.h>
 19#include <asm/syscall.h>
 20#include <asm/uasm.h>
 21#include <asm/unistd.h>
 22
 23#include <asm-generic/sections.h>
 24
 25#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
 26#define MCOUNT_OFFSET_INSNS 5
 27#else
 28#define MCOUNT_OFFSET_INSNS 4
 29#endif
 30
 31#ifdef CONFIG_DYNAMIC_FTRACE
 32
 33/* Arch override because MIPS doesn't need to run this from stop_machine() */
 34void arch_ftrace_update_code(int command)
 35{
 36	ftrace_modify_all_code(command);
 37}
 38
 39#endif
 40
 41/*
 42 * Check if the address is in kernel space
 43 *
 44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
 45 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
 46 */
 47static inline int in_kernel_space(unsigned long ip)
 48{
 49	if (ip >= (unsigned long)_stext &&
 50	    ip <= (unsigned long)_etext)
 51		return 1;
 52	return 0;
 53}
 54
 55#ifdef CONFIG_DYNAMIC_FTRACE
 56
 57#define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
 58#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
 59#define JUMP_RANGE_MASK ((1UL << 28) - 1)
 60
 61#define INSN_NOP 0x00000000	/* nop */
 62#define INSN_JAL(addr)	\
 63	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
 64
 65static unsigned int insn_jal_ftrace_caller __read_mostly;
 66static unsigned int insn_la_mcount[2] __read_mostly;
 67static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
 68
 69static inline void ftrace_dyn_arch_init_insns(void)
 70{
 71	u32 *buf;
 72	unsigned int v1;
 73
 74	/* la v1, _mcount */
 75	v1 = 3;
 76	buf = (u32 *)&insn_la_mcount[0];
 77	UASM_i_LA(&buf, v1, MCOUNT_ADDR);
 78
 79	/* jal (ftrace_caller + 8), jump over the first two instruction */
 80	buf = (u32 *)&insn_jal_ftrace_caller;
 81	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
 82
 83#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 84	/* j ftrace_graph_caller */
 85	buf = (u32 *)&insn_j_ftrace_graph_caller;
 86	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
 87#endif
 88}
 89
 90static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 91{
 92	int faulted;
 93	mm_segment_t old_fs;
 94
 95	/* *(unsigned int *)ip = new_code; */
 96	safe_store_code(new_code, ip, faulted);
 97
 98	if (unlikely(faulted))
 99		return -EFAULT;
100
101	old_fs = get_fs();
102	set_fs(get_ds());
103	flush_icache_range(ip, ip + 8);
104	set_fs(old_fs);
105
106	return 0;
107}
108
109#ifndef CONFIG_64BIT
110static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111				unsigned int new_code2)
112{
113	int faulted;
114	mm_segment_t old_fs;
115
116	safe_store_code(new_code1, ip, faulted);
117	if (unlikely(faulted))
118		return -EFAULT;
119
120	ip += 4;
121	safe_store_code(new_code2, ip, faulted);
122	if (unlikely(faulted))
123		return -EFAULT;
124
125	ip -= 4;
126	old_fs = get_fs();
127	set_fs(get_ds());
128	flush_icache_range(ip, ip + 8);
129	set_fs(old_fs);
130
131	return 0;
132}
133
134static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
135				 unsigned int new_code2)
136{
137	int faulted;
138	mm_segment_t old_fs;
139
140	ip += 4;
141	safe_store_code(new_code2, ip, faulted);
142	if (unlikely(faulted))
143		return -EFAULT;
144
145	ip -= 4;
146	safe_store_code(new_code1, ip, faulted);
147	if (unlikely(faulted))
148		return -EFAULT;
149
150	old_fs = get_fs();
151	set_fs(get_ds());
152	flush_icache_range(ip, ip + 8);
153	set_fs(old_fs);
154
155	return 0;
156}
157#endif
158
159/*
160 * The details about the calling site of mcount on MIPS
161 *
162 * 1. For kernel:
163 *
164 * move at, ra
165 * jal _mcount		--> nop
166 *  sub sp, sp, 8	--> nop  (CONFIG_32BIT)
167 *
168 * 2. For modules:
169 *
170 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
171 *
172 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
173 * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
174 * move at, ra
175 * move $12, ra_address
176 * jalr v1
177 *  sub sp, sp, 8
178 *				    1: offset = 5 instructions
179 * 2.2 For the Other situations
180 *
181 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
182 * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
183 * move at, ra
184 * jalr v1
185 *  nop | move $12, ra_address | sub sp, sp, 8
186 *				    1: offset = 4 instructions
187 */
188
189#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
190
191int ftrace_make_nop(struct module *mod,
192		    struct dyn_ftrace *rec, unsigned long addr)
193{
194	unsigned int new;
195	unsigned long ip = rec->ip;
196
197	/*
198	 * If ip is in kernel space, no long call, otherwise, long call is
199	 * needed.
200	 */
201	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
202#ifdef CONFIG_64BIT
203	return ftrace_modify_code(ip, new);
204#else
205	/*
206	 * On 32 bit MIPS platforms, gcc adds a stack adjust
207	 * instruction in the delay slot after the branch to
208	 * mcount and expects mcount to restore the sp on return.
209	 * This is based on a legacy API and does nothing but
210	 * waste instructions so it's being removed at runtime.
211	 */
212	return ftrace_modify_code_2(ip, new, INSN_NOP);
213#endif
214}
215
216int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
217{
218	unsigned int new;
219	unsigned long ip = rec->ip;
220
221	new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
222
223#ifdef CONFIG_64BIT
224	return ftrace_modify_code(ip, new);
225#else
226	return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
227						INSN_NOP : insn_la_mcount[1]);
228#endif
229}
230
231#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
232
233int ftrace_update_ftrace_func(ftrace_func_t func)
234{
235	unsigned int new;
236
237	new = INSN_JAL((unsigned long)func);
238
239	return ftrace_modify_code(FTRACE_CALL_IP, new);
240}
241
242int __init ftrace_dyn_arch_init(void)
243{
244	/* Encode the instructions when booting */
245	ftrace_dyn_arch_init_insns();
246
247	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
248	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
249
250	return 0;
251}
252#endif	/* CONFIG_DYNAMIC_FTRACE */
253
254#ifdef CONFIG_FUNCTION_GRAPH_TRACER
255
256#ifdef CONFIG_DYNAMIC_FTRACE
257
258extern void ftrace_graph_call(void);
259#define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
260
261int ftrace_enable_ftrace_graph_caller(void)
262{
263	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
264			insn_j_ftrace_graph_caller);
265}
266
267int ftrace_disable_ftrace_graph_caller(void)
268{
269	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
270}
271
272#endif	/* CONFIG_DYNAMIC_FTRACE */
273
274#ifndef KBUILD_MCOUNT_RA_ADDRESS
275
276#define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */
277#define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
278#define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
279
280unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
281		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
282{
283	unsigned long sp, ip, tmp;
284	unsigned int code;
285	int faulted;
286
287	/*
288	 * For module, move the ip from the return address after the
289	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
290	 * kernel, move after the instruction "move ra, at"(offset is 16)
291	 */
292	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
293
294	/*
295	 * search the text until finding the non-store instruction or "s{d,w}
296	 * ra, offset(sp)" instruction
297	 */
298	do {
299		/* get the code at "ip": code = *(unsigned int *)ip; */
300		safe_load_code(code, ip, faulted);
301
302		if (unlikely(faulted))
303			return 0;
304		/*
305		 * If we hit the non-store instruction before finding where the
306		 * ra is stored, then this is a leaf function and it does not
307		 * store the ra on the stack
308		 */
309		if ((code & S_R_SP) != S_R_SP)
310			return parent_ra_addr;
311
312		/* Move to the next instruction */
313		ip -= 4;
314	} while ((code & S_RA_SP) != S_RA_SP);
315
316	sp = fp + (code & OFFSET_MASK);
317
318	/* tmp = *(unsigned long *)sp; */
319	safe_load_stack(tmp, sp, faulted);
320	if (unlikely(faulted))
321		return 0;
322
323	if (tmp == old_parent_ra)
324		return sp;
325	return 0;
326}
327
328#endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
329
330/*
331 * Hook the return address and push it in the stack of return addrs
332 * in current thread info.
333 */
334void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
335			   unsigned long fp)
336{
337	unsigned long old_parent_ra;
338	struct ftrace_graph_ent trace;
339	unsigned long return_hooker = (unsigned long)
340	    &return_to_handler;
341	int faulted, insns;
342
343	if (unlikely(ftrace_graph_is_dead()))
344		return;
345
346	if (unlikely(atomic_read(&current->tracing_graph_pause)))
347		return;
348
349	/*
350	 * "parent_ra_addr" is the stack address saved the return address of
351	 * the caller of _mcount.
352	 *
353	 * if the gcc < 4.5, a leaf function does not save the return address
354	 * in the stack address, so, we "emulate" one in _mcount's stack space,
355	 * and hijack it directly, but for a non-leaf function, it save the
356	 * return address to the its own stack space, we can not hijack it
357	 * directly, but need to find the real stack address,
358	 * ftrace_get_parent_addr() does it!
359	 *
360	 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
361	 * non-leaf function, the location of the return address will be saved
362	 * to $12 for us, and for a leaf function, only put a zero into $12. we
363	 * do it in ftrace_graph_caller of mcount.S.
 
364	 */
365
366	/* old_parent_ra = *parent_ra_addr; */
367	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
368	if (unlikely(faulted))
369		goto out;
370#ifndef KBUILD_MCOUNT_RA_ADDRESS
371	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
372			old_parent_ra, (unsigned long)parent_ra_addr, fp);
373	/*
374	 * If fails when getting the stack address of the non-leaf function's
375	 * ra, stop function graph tracer and return
376	 */
377	if (parent_ra_addr == 0)
378		goto out;
379#endif
380	/* *parent_ra_addr = return_hooker; */
381	safe_store_stack(return_hooker, parent_ra_addr, faulted);
382	if (unlikely(faulted))
383		goto out;
384
385	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
386	    == -EBUSY) {
387		*parent_ra_addr = old_parent_ra;
388		return;
389	}
390
391	/*
392	 * Get the recorded ip of the current mcount calling site in the
393	 * __mcount_loc section, which will be used to filter the function
394	 * entries configured through the tracing/set_graph_function interface.
395	 */
396
397	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
398	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
399
400	/* Only trace if the calling function expects to */
401	if (!ftrace_graph_entry(&trace)) {
402		current->curr_ret_stack--;
403		*parent_ra_addr = old_parent_ra;
404	}
405	return;
406out:
407	ftrace_graph_stop();
408	WARN_ON(1);
409}
410#endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
411
412#ifdef CONFIG_FTRACE_SYSCALLS
413
414#ifdef CONFIG_32BIT
415unsigned long __init arch_syscall_addr(int nr)
416{
417	return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
418}
419#endif
420
421#ifdef CONFIG_64BIT
422
423unsigned long __init arch_syscall_addr(int nr)
424{
425#ifdef CONFIG_MIPS32_N32
426	if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
427		return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
428#endif
429	if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
430		return (unsigned long)sys_call_table[nr - __NR_64_Linux];
431#ifdef CONFIG_MIPS32_O32
432	if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
433		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
434#endif
435
436	return (unsigned long) &sys_ni_syscall;
437}
438#endif
439
440#endif /* CONFIG_FTRACE_SYSCALLS */
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Code for replacing ftrace calls with jumps.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
  7 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  8 *
  9 * Thanks goes to Steven Rostedt for writing the original x86 version.
 10 */
 11
 12#include <linux/uaccess.h>
 13#include <linux/init.h>
 14#include <linux/ftrace.h>
 15#include <linux/syscalls.h>
 16
 17#include <asm/asm.h>
 18#include <asm/asm-offsets.h>
 19#include <asm/cacheflush.h>
 20#include <asm/syscall.h>
 21#include <asm/uasm.h>
 22#include <asm/unistd.h>
 23
 24#include <asm-generic/sections.h>
 25
 26#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
 27#define MCOUNT_OFFSET_INSNS 5
 28#else
 29#define MCOUNT_OFFSET_INSNS 4
 30#endif
 31
 32#ifdef CONFIG_DYNAMIC_FTRACE
 33
 34/* Arch override because MIPS doesn't need to run this from stop_machine() */
 35void arch_ftrace_update_code(int command)
 36{
 37	ftrace_modify_all_code(command);
 38}
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40#define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */
 41#define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */
 42#define JUMP_RANGE_MASK ((1UL << 28) - 1)
 43
 44#define INSN_NOP 0x00000000	/* nop */
 45#define INSN_JAL(addr)	\
 46	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
 47
 48static unsigned int insn_jal_ftrace_caller __read_mostly;
 49static unsigned int insn_la_mcount[2] __read_mostly;
 50static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
 51
 52static inline void ftrace_dyn_arch_init_insns(void)
 53{
 54	u32 *buf;
 55	unsigned int v1;
 56
 57	/* la v1, _mcount */
 58	v1 = 3;
 59	buf = (u32 *)&insn_la_mcount[0];
 60	UASM_i_LA(&buf, v1, MCOUNT_ADDR);
 61
 62	/* jal (ftrace_caller + 8), jump over the first two instruction */
 63	buf = (u32 *)&insn_jal_ftrace_caller;
 64	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
 65
 66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 67	/* j ftrace_graph_caller */
 68	buf = (u32 *)&insn_j_ftrace_graph_caller;
 69	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
 70#endif
 71}
 72
 73static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 74{
 75	int faulted;
 
 76
 77	/* *(unsigned int *)ip = new_code; */
 78	safe_store_code(new_code, ip, faulted);
 79
 80	if (unlikely(faulted))
 81		return -EFAULT;
 82
 
 
 83	flush_icache_range(ip, ip + 8);
 
 84
 85	return 0;
 86}
 87
 88#ifndef CONFIG_64BIT
 89static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
 90				unsigned int new_code2)
 91{
 92	int faulted;
 
 93
 94	safe_store_code(new_code1, ip, faulted);
 95	if (unlikely(faulted))
 96		return -EFAULT;
 97
 98	ip += 4;
 99	safe_store_code(new_code2, ip, faulted);
100	if (unlikely(faulted))
101		return -EFAULT;
102
103	ip -= 4;
 
 
104	flush_icache_range(ip, ip + 8);
 
105
106	return 0;
107}
108
109static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
110				 unsigned int new_code2)
111{
112	int faulted;
 
113
114	ip += 4;
115	safe_store_code(new_code2, ip, faulted);
116	if (unlikely(faulted))
117		return -EFAULT;
118
119	ip -= 4;
120	safe_store_code(new_code1, ip, faulted);
121	if (unlikely(faulted))
122		return -EFAULT;
123
 
 
124	flush_icache_range(ip, ip + 8);
 
125
126	return 0;
127}
128#endif
129
130/*
131 * The details about the calling site of mcount on MIPS
132 *
133 * 1. For kernel:
134 *
135 * move at, ra
136 * jal _mcount		--> nop
137 *  sub sp, sp, 8	--> nop  (CONFIG_32BIT)
138 *
139 * 2. For modules:
140 *
141 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
142 *
143 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005)
144 * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
145 * move at, ra
146 * move $12, ra_address
147 * jalr v1
148 *  sub sp, sp, 8
149 *				    1: offset = 5 instructions
150 * 2.2 For the Other situations
151 *
152 * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004)
153 * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
154 * move at, ra
155 * jalr v1
156 *  nop | move $12, ra_address | sub sp, sp, 8
157 *				    1: offset = 4 instructions
158 */
159
160#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
161
162int ftrace_make_nop(struct module *mod,
163		    struct dyn_ftrace *rec, unsigned long addr)
164{
165	unsigned int new;
166	unsigned long ip = rec->ip;
167
168	/*
169	 * If ip is in kernel space, no long call, otherwise, long call is
170	 * needed.
171	 */
172	new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
173#ifdef CONFIG_64BIT
174	return ftrace_modify_code(ip, new);
175#else
176	/*
177	 * On 32 bit MIPS platforms, gcc adds a stack adjust
178	 * instruction in the delay slot after the branch to
179	 * mcount and expects mcount to restore the sp on return.
180	 * This is based on a legacy API and does nothing but
181	 * waste instructions so it's being removed at runtime.
182	 */
183	return ftrace_modify_code_2(ip, new, INSN_NOP);
184#endif
185}
186
187int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
188{
189	unsigned int new;
190	unsigned long ip = rec->ip;
191
192	new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
193
194#ifdef CONFIG_64BIT
195	return ftrace_modify_code(ip, new);
196#else
197	return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
198						INSN_NOP : insn_la_mcount[1]);
199#endif
200}
201
202#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
203
204int ftrace_update_ftrace_func(ftrace_func_t func)
205{
206	unsigned int new;
207
208	new = INSN_JAL((unsigned long)func);
209
210	return ftrace_modify_code(FTRACE_CALL_IP, new);
211}
212
213int __init ftrace_dyn_arch_init(void)
214{
215	/* Encode the instructions when booting */
216	ftrace_dyn_arch_init_insns();
217
218	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
219	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
220
221	return 0;
222}
223#endif	/* CONFIG_DYNAMIC_FTRACE */
224
225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
226
227#ifdef CONFIG_DYNAMIC_FTRACE
228
229extern void ftrace_graph_call(void);
230#define FTRACE_GRAPH_CALL_IP	((unsigned long)(&ftrace_graph_call))
231
232int ftrace_enable_ftrace_graph_caller(void)
233{
234	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
235			insn_j_ftrace_graph_caller);
236}
237
238int ftrace_disable_ftrace_graph_caller(void)
239{
240	return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
241}
242
243#endif	/* CONFIG_DYNAMIC_FTRACE */
244
245#ifndef KBUILD_MCOUNT_RA_ADDRESS
246
247#define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */
248#define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */
249#define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */
250
251unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
252		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
253{
254	unsigned long sp, ip, tmp;
255	unsigned int code;
256	int faulted;
257
258	/*
259	 * For module, move the ip from the return address after the
260	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
261	 * kernel, move after the instruction "move ra, at"(offset is 16)
262	 */
263	ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
264
265	/*
266	 * search the text until finding the non-store instruction or "s{d,w}
267	 * ra, offset(sp)" instruction
268	 */
269	do {
270		/* get the code at "ip": code = *(unsigned int *)ip; */
271		safe_load_code(code, ip, faulted);
272
273		if (unlikely(faulted))
274			return 0;
275		/*
276		 * If we hit the non-store instruction before finding where the
277		 * ra is stored, then this is a leaf function and it does not
278		 * store the ra on the stack
279		 */
280		if ((code & S_R_SP) != S_R_SP)
281			return parent_ra_addr;
282
283		/* Move to the next instruction */
284		ip -= 4;
285	} while ((code & S_RA_SP) != S_RA_SP);
286
287	sp = fp + (code & OFFSET_MASK);
288
289	/* tmp = *(unsigned long *)sp; */
290	safe_load_stack(tmp, sp, faulted);
291	if (unlikely(faulted))
292		return 0;
293
294	if (tmp == old_parent_ra)
295		return sp;
296	return 0;
297}
298
299#endif	/* !KBUILD_MCOUNT_RA_ADDRESS */
300
301/*
302 * Hook the return address and push it in the stack of return addrs
303 * in current thread info.
304 */
305void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
306			   unsigned long fp)
307{
308	unsigned long old_parent_ra;
 
309	unsigned long return_hooker = (unsigned long)
310	    &return_to_handler;
311	int faulted, insns;
312
313	if (unlikely(ftrace_graph_is_dead()))
314		return;
315
316	if (unlikely(atomic_read(&current->tracing_graph_pause)))
317		return;
318
319	/*
320	 * "parent_ra_addr" is the stack address where the return address of
321	 * the caller of _mcount is saved.
322	 *
323	 * If gcc < 4.5, a leaf function does not save the return address
324	 * in the stack address, so we "emulate" one in _mcount's stack space,
325	 * and hijack it directly.
326	 * For a non-leaf function, it does save the return address to its own
327	 * stack space, so we can not hijack it directly, but need to find the
328	 * real stack address, which is done by ftrace_get_parent_addr().
329	 *
330	 * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
331	 * non-leaf function, the location of the return address will be saved
332	 * to $12 for us.
333	 * For a leaf function, it just puts a zero into $12, so we handle
334	 * it in ftrace_graph_caller() of mcount.S.
335	 */
336
337	/* old_parent_ra = *parent_ra_addr; */
338	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
339	if (unlikely(faulted))
340		goto out;
341#ifndef KBUILD_MCOUNT_RA_ADDRESS
342	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
343			old_parent_ra, (unsigned long)parent_ra_addr, fp);
344	/*
345	 * If fails when getting the stack address of the non-leaf function's
346	 * ra, stop function graph tracer and return
347	 */
348	if (parent_ra_addr == NULL)
349		goto out;
350#endif
351	/* *parent_ra_addr = return_hooker; */
352	safe_store_stack(return_hooker, parent_ra_addr, faulted);
353	if (unlikely(faulted))
354		goto out;
355
 
 
 
 
 
 
356	/*
357	 * Get the recorded ip of the current mcount calling site in the
358	 * __mcount_loc section, which will be used to filter the function
359	 * entries configured through the tracing/set_graph_function interface.
360	 */
361
362	insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
363	self_ra -= (MCOUNT_INSN_SIZE * insns);
364
365	if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
 
 
366		*parent_ra_addr = old_parent_ra;
 
367	return;
368out:
369	ftrace_graph_stop();
370	WARN_ON(1);
371}
372#endif	/* CONFIG_FUNCTION_GRAPH_TRACER */
373
374#ifdef CONFIG_FTRACE_SYSCALLS
375
376#ifdef CONFIG_32BIT
377unsigned long __init arch_syscall_addr(int nr)
378{
379	return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
380}
381#endif
382
383#ifdef CONFIG_64BIT
384
385unsigned long __init arch_syscall_addr(int nr)
386{
387#ifdef CONFIG_MIPS32_N32
388	if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
389		return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
390#endif
391	if (nr >= __NR_64_Linux  && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
392		return (unsigned long)sys_call_table[nr - __NR_64_Linux];
393#ifdef CONFIG_MIPS32_O32
394	if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
395		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
396#endif
397
398	return (unsigned long) &sys_ni_syscall;
399}
400#endif
401
402#endif /* CONFIG_FTRACE_SYSCALLS */