Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.5.6
  1/*
  2 * arch/sh/lib/mcount.S
  3 *
  4 *  Copyright (C) 2008, 2009  Paul Mundt
  5 *  Copyright (C) 2008, 2009  Matt Fleming
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file "COPYING" in the main directory of this archive
  9 * for more details.
 10 */
 11#include <asm/ftrace.h>
 12#include <asm/thread_info.h>
 13#include <asm/asm-offsets.h>
 14
 15#define MCOUNT_ENTER()		\
 16	mov.l	r4, @-r15;	\
 17	mov.l	r5, @-r15;	\
 18	mov.l	r6, @-r15;	\
 19	mov.l	r7, @-r15;	\
 20	sts.l	pr, @-r15;	\
 21				\
 22	mov.l	@(20,r15),r4;	\
 23	sts	pr, r5
 24
 25#define MCOUNT_LEAVE()		\
 26	lds.l	@r15+, pr;	\
 27	mov.l	@r15+, r7;	\
 28	mov.l	@r15+, r6;	\
 29	mov.l	@r15+, r5;	\
 30	rts;			\
 31	 mov.l	@r15+, r4
 32
 33#ifdef CONFIG_STACK_DEBUG
 34/*
 35 * Perform diagnostic checks on the state of the kernel stack.
 36 *
 37 * Check for stack overflow. If there is less than 1KB free
 38 * then it has overflowed.
 39 *
 40 * Make sure the stack pointer contains a valid address. Valid
 41 * addresses for kernel stacks are anywhere after the bss
 42 * (after _ebss) and anywhere in init_thread_union (init_stack).
 43 */
 44#define STACK_CHECK()					\
 45	mov	#(THREAD_SIZE >> 10), r0;		\
 46	shll8	r0;					\
 47	shll2	r0;					\
 48							\
 49	/* r1 = sp & (THREAD_SIZE - 1) */		\
 50	mov	#-1, r1;				\
 51	add	r0, r1;					\
 52	and	r15, r1;				\
 53							\
 54	mov	#TI_SIZE, r3;				\
 55	mov	#(STACK_WARN >> 8), r2;			\
 56	shll8	r2;					\
 57	add	r3, r2;					\
 58							\
 59	/* Is the stack overflowing? */			\
 60	cmp/hi	r2, r1;					\
 61	bf	stack_panic;				\
 62							\
 63	/* If sp > _ebss then we're OK. */		\
 64	mov.l	.L_ebss, r1;				\
 65	cmp/hi	r1, r15;				\
 66	bt	1f;					\
 67							\
 68	/* If sp < init_stack, we're not OK. */		\
 69	mov.l	.L_init_thread_union, r1;		\
 70	cmp/hs	r1, r15;				\
 71	bf	stack_panic;				\
 72							\
 73	/* If sp > init_stack && sp < _ebss, not OK. */	\
 74	add	r0, r1;					\
 75	cmp/hs	r1, r15;				\
 76	bt	stack_panic;				\
 771:
 78#else
 79#define STACK_CHECK()
 80#endif /* CONFIG_STACK_DEBUG */
 81
 82	.align 2
 83	.globl	_mcount
 84	.type	_mcount,@function
 85	.globl	mcount
 86	.type	mcount,@function
 87_mcount:
 88mcount:
 89	STACK_CHECK()
 90
 91#ifndef CONFIG_FUNCTION_TRACER
 92	rts
 93	 nop
 94#else
 95#ifndef CONFIG_DYNAMIC_FTRACE
 96	mov.l	.Lfunction_trace_stop, r0
 97	mov.l	@r0, r0
 98	tst	r0, r0
 99	bf	ftrace_stub
100#endif
101
102	MCOUNT_ENTER()
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105	.globl	mcount_call
106mcount_call:
107	mov.l	.Lftrace_stub, r6
108#else
109	mov.l	.Lftrace_trace_function, r6
110	mov.l	ftrace_stub, r7
111	cmp/eq	r6, r7
112	bt	skip_trace
113	mov.l	@r6, r6
114#endif
115
116	jsr	@r6
117	 nop
118
119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
120	mov.l   .Lftrace_graph_return, r6
121	mov.l   .Lftrace_stub, r7
122	cmp/eq  r6, r7
123	bt      1f
124
125	mov.l   .Lftrace_graph_caller, r0
126	jmp     @r0
127	 nop
128
1291:
130	mov.l	.Lftrace_graph_entry, r6
131	mov.l	.Lftrace_graph_entry_stub, r7
132	cmp/eq	r6, r7
133	bt	skip_trace
134
135	mov.l   .Lftrace_graph_caller, r0
136	jmp	@r0
137	 nop
138
139	.align 2
140.Lftrace_graph_return:
141	.long   ftrace_graph_return
142.Lftrace_graph_entry:
143	.long   ftrace_graph_entry
144.Lftrace_graph_entry_stub:
145	.long   ftrace_graph_entry_stub
146.Lftrace_graph_caller:
147	.long   ftrace_graph_caller
148#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
149
150	.globl skip_trace
151skip_trace:
152	MCOUNT_LEAVE()
153
154	.align 2
155.Lftrace_trace_function:
156	.long   ftrace_trace_function
157
158#ifdef CONFIG_DYNAMIC_FTRACE
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160/*
161 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
162 * as this will affect the calculation of GRAPH_INSN_OFFSET.
163 */
164	.globl ftrace_graph_call
165ftrace_graph_call:
166	mov.l	.Lskip_trace, r0
167	jmp	@r0
168	 nop
169
170	.align 2
171.Lskip_trace:
172	.long	skip_trace
173#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
174
175	.globl ftrace_caller
176ftrace_caller:
177	mov.l	.Lfunction_trace_stop, r0
178	mov.l	@r0, r0
179	tst	r0, r0
180	bf	ftrace_stub
181
182	MCOUNT_ENTER()
183
184	.globl ftrace_call
185ftrace_call:
186	mov.l	.Lftrace_stub, r6
187	jsr	@r6
188	 nop
189
190#ifdef CONFIG_FUNCTION_GRAPH_TRACER
191	bra	ftrace_graph_call
192	 nop
193#else
194	MCOUNT_LEAVE()
195#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
196#endif /* CONFIG_DYNAMIC_FTRACE */
197
198	.align 2
199.Lfunction_trace_stop:
200	.long	function_trace_stop
201
202/*
203 * NOTE: From here on the locations of the .Lftrace_stub label and
204 * ftrace_stub itself are fixed. Adding additional data here will skew
205 * the displacement for the memory table and break the block replacement.
206 * Place new labels either after the ftrace_stub body, or before
207 * ftrace_caller. You have been warned.
208 */
209.Lftrace_stub:
210	.long	ftrace_stub
211
212	.globl	ftrace_stub
213ftrace_stub:
214	rts
215	 nop
216
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
218	.globl	ftrace_graph_caller
219ftrace_graph_caller:
220	mov.l	2f, r0
221	mov.l	@r0, r0
222	tst	r0, r0
223	bt	1f
224
225	mov.l	3f, r1
226	jmp	@r1
227	 nop
2281:
229	/*
230	 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
231	 * the stack address containing our return address is
232	 * r15 + 20.
233	 */
234	mov	#20, r0
235	add	r15, r0
236	mov	r0, r4
237
238	mov.l	.Lprepare_ftrace_return, r0
239	jsr	@r0
240	 nop
241
242	MCOUNT_LEAVE()
243
244	.align 2
2452:	.long	function_trace_stop
2463:	.long	skip_trace
247.Lprepare_ftrace_return:
248	.long	prepare_ftrace_return
249
250	.globl	return_to_handler
251return_to_handler:
252	/*
253	 * Save the return values.
254	 */
255	mov.l	r0, @-r15
256	mov.l	r1, @-r15
257
258	mov	#0, r4
259
260	mov.l	.Lftrace_return_to_handler, r0
261	jsr	@r0
262	 nop
263
264	/*
265	 * The return value from ftrace_return_handler has the real
266	 * address that we should return to.
267	 */
268	lds	r0, pr
269	mov.l	@r15+, r1
270	rts
271	 mov.l	@r15+, r0
272
273
274	.align 2
275.Lftrace_return_to_handler:
276	.long	ftrace_return_to_handler
277#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
278#endif /* CONFIG_FUNCTION_TRACER */
279
280#ifdef CONFIG_STACK_DEBUG
281	.globl	stack_panic
282stack_panic:
283	mov.l	.Ldump_stack, r0
284	jsr	@r0
285	 nop
286
287	mov.l	.Lpanic, r0
288	jsr	@r0
289	 mov.l	.Lpanic_s, r4
290
291	rts
292	 nop
293
294	.align 2
295.L_ebss:
296	.long	_ebss
297.L_init_thread_union:
298	.long	init_thread_union
 
 
299.Lpanic:
300	.long	panic
301.Lpanic_s:
302	.long	.Lpanic_str
303.Ldump_stack:
304	.long	dump_stack
305
306	.section	.rodata
307	.align 2
308.Lpanic_str:
309	.string "Stack error"
310#endif /* CONFIG_STACK_DEBUG */
v4.6
  1/*
  2 * arch/sh/lib/mcount.S
  3 *
  4 *  Copyright (C) 2008, 2009  Paul Mundt
  5 *  Copyright (C) 2008, 2009  Matt Fleming
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file "COPYING" in the main directory of this archive
  9 * for more details.
 10 */
 11#include <asm/ftrace.h>
 12#include <asm/thread_info.h>
 13#include <asm/asm-offsets.h>
 14
 15#define MCOUNT_ENTER()		\
 16	mov.l	r4, @-r15;	\
 17	mov.l	r5, @-r15;	\
 18	mov.l	r6, @-r15;	\
 19	mov.l	r7, @-r15;	\
 20	sts.l	pr, @-r15;	\
 21				\
 22	mov.l	@(20,r15),r4;	\
 23	sts	pr, r5
 24
 25#define MCOUNT_LEAVE()		\
 26	lds.l	@r15+, pr;	\
 27	mov.l	@r15+, r7;	\
 28	mov.l	@r15+, r6;	\
 29	mov.l	@r15+, r5;	\
 30	rts;			\
 31	 mov.l	@r15+, r4
 32
 33#ifdef CONFIG_STACK_DEBUG
 34/*
 35 * Perform diagnostic checks on the state of the kernel stack.
 36 *
 37 * Check for stack overflow. If there is less than 1KB free
 38 * then it has overflowed.
 39 *
 40 * Make sure the stack pointer contains a valid address. Valid
 41 * addresses for kernel stacks are anywhere after the bss
 42 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
 43 */
 44#define STACK_CHECK()					\
 45	mov	#(THREAD_SIZE >> 10), r0;		\
 46	shll8	r0;					\
 47	shll2	r0;					\
 48							\
 49	/* r1 = sp & (THREAD_SIZE - 1) */		\
 50	mov	#-1, r1;				\
 51	add	r0, r1;					\
 52	and	r15, r1;				\
 53							\
 54	mov	#TI_SIZE, r3;				\
 55	mov	#(STACK_WARN >> 8), r2;			\
 56	shll8	r2;					\
 57	add	r3, r2;					\
 58							\
 59	/* Is the stack overflowing? */			\
 60	cmp/hi	r2, r1;					\
 61	bf	stack_panic;				\
 62							\
 63	/* If sp > __bss_stop then we're OK. */		\
 64	mov.l	.L_ebss, r1;				\
 65	cmp/hi	r1, r15;				\
 66	bt	1f;					\
 67							\
 68	/* If sp < init_stack, we're not OK. */		\
 69	mov.l	.L_init_thread_union, r1;		\
 70	cmp/hs	r1, r15;				\
 71	bf	stack_panic;				\
 72							\
 73	/* If sp > init_stack && sp < __bss_stop, not OK. */	\
 74	add	r0, r1;					\
 75	cmp/hs	r1, r15;				\
 76	bt	stack_panic;				\
 771:
 78#else
 79#define STACK_CHECK()
 80#endif /* CONFIG_STACK_DEBUG */
 81
 82	.align 2
 83	.globl	_mcount
 84	.type	_mcount,@function
 85	.globl	mcount
 86	.type	mcount,@function
 87_mcount:
 88mcount:
 89	STACK_CHECK()
 90
 91#ifndef CONFIG_FUNCTION_TRACER
 92	rts
 93	 nop
 94#else
 
 
 
 
 
 
 
 95	MCOUNT_ENTER()
 96
 97#ifdef CONFIG_DYNAMIC_FTRACE
 98	.globl	mcount_call
 99mcount_call:
100	mov.l	.Lftrace_stub, r6
101#else
102	mov.l	.Lftrace_trace_function, r6
103	mov.l	ftrace_stub, r7
104	cmp/eq	r6, r7
105	bt	skip_trace
106	mov.l	@r6, r6
107#endif
108
109	jsr	@r6
110	 nop
111
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113	mov.l   .Lftrace_graph_return, r6
114	mov.l   .Lftrace_stub, r7
115	cmp/eq  r6, r7
116	bt      1f
117
118	mov.l   .Lftrace_graph_caller, r0
119	jmp     @r0
120	 nop
121
1221:
123	mov.l	.Lftrace_graph_entry, r6
124	mov.l	.Lftrace_graph_entry_stub, r7
125	cmp/eq	r6, r7
126	bt	skip_trace
127
128	mov.l   .Lftrace_graph_caller, r0
129	jmp	@r0
130	 nop
131
132	.align 2
133.Lftrace_graph_return:
134	.long   ftrace_graph_return
135.Lftrace_graph_entry:
136	.long   ftrace_graph_entry
137.Lftrace_graph_entry_stub:
138	.long   ftrace_graph_entry_stub
139.Lftrace_graph_caller:
140	.long   ftrace_graph_caller
141#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
142
143	.globl skip_trace
144skip_trace:
145	MCOUNT_LEAVE()
146
147	.align 2
148.Lftrace_trace_function:
149	.long   ftrace_trace_function
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152#ifdef CONFIG_FUNCTION_GRAPH_TRACER
153/*
154 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
155 * as this will affect the calculation of GRAPH_INSN_OFFSET.
156 */
157	.globl ftrace_graph_call
158ftrace_graph_call:
159	mov.l	.Lskip_trace, r0
160	jmp	@r0
161	 nop
162
163	.align 2
164.Lskip_trace:
165	.long	skip_trace
166#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
167
168	.globl ftrace_caller
169ftrace_caller:
 
 
 
 
 
170	MCOUNT_ENTER()
171
172	.globl ftrace_call
173ftrace_call:
174	mov.l	.Lftrace_stub, r6
175	jsr	@r6
176	 nop
177
178#ifdef CONFIG_FUNCTION_GRAPH_TRACER
179	bra	ftrace_graph_call
180	 nop
181#else
182	MCOUNT_LEAVE()
183#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
184#endif /* CONFIG_DYNAMIC_FTRACE */
185
186	.align 2
 
 
187
188/*
189 * NOTE: From here on the locations of the .Lftrace_stub label and
190 * ftrace_stub itself are fixed. Adding additional data here will skew
191 * the displacement for the memory table and break the block replacement.
192 * Place new labels either after the ftrace_stub body, or before
193 * ftrace_caller. You have been warned.
194 */
195.Lftrace_stub:
196	.long	ftrace_stub
197
198	.globl	ftrace_stub
199ftrace_stub:
200	rts
201	 nop
202
203#ifdef CONFIG_FUNCTION_GRAPH_TRACER
204	.globl	ftrace_graph_caller
205ftrace_graph_caller:
206	mov.l	2f, r1
 
 
 
 
 
207	jmp	@r1
208	 nop
2091:
210	/*
211	 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
212	 * the stack address containing our return address is
213	 * r15 + 20.
214	 */
215	mov	#20, r0
216	add	r15, r0
217	mov	r0, r4
218
219	mov.l	.Lprepare_ftrace_return, r0
220	jsr	@r0
221	 nop
222
223	MCOUNT_LEAVE()
224
225	.align 2
2262:	.long	skip_trace
 
227.Lprepare_ftrace_return:
228	.long	prepare_ftrace_return
229
230	.globl	return_to_handler
231return_to_handler:
232	/*
233	 * Save the return values.
234	 */
235	mov.l	r0, @-r15
236	mov.l	r1, @-r15
237
238	mov	#0, r4
239
240	mov.l	.Lftrace_return_to_handler, r0
241	jsr	@r0
242	 nop
243
244	/*
245	 * The return value from ftrace_return_handler has the real
246	 * address that we should return to.
247	 */
248	lds	r0, pr
249	mov.l	@r15+, r1
250	rts
251	 mov.l	@r15+, r0
252
253
254	.align 2
255.Lftrace_return_to_handler:
256	.long	ftrace_return_to_handler
257#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
258#endif /* CONFIG_FUNCTION_TRACER */
259
260#ifdef CONFIG_STACK_DEBUG
261	.globl	stack_panic
262stack_panic:
263	mov.l	.Ldump_stack, r0
264	jsr	@r0
265	 nop
266
267	mov.l	.Lpanic, r0
268	jsr	@r0
269	 mov.l	.Lpanic_s, r4
270
271	rts
272	 nop
273
274	.align 2
 
 
275.L_init_thread_union:
276	.long	init_thread_union
277.L_ebss:
278	.long	__bss_stop
279.Lpanic:
280	.long	panic
281.Lpanic_s:
282	.long	.Lpanic_str
283.Ldump_stack:
284	.long	dump_stack
285
286	.section	.rodata
287	.align 2
288.Lpanic_str:
289	.string "Stack error"
290#endif /* CONFIG_STACK_DEBUG */