Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
 
  2 * arch/sh/lib/mcount.S
  3 *
  4 *  Copyright (C) 2008, 2009  Paul Mundt
  5 *  Copyright (C) 2008, 2009  Matt Fleming
  6 *
  7 * This file is subject to the terms and conditions of the GNU General Public
  8 * License.  See the file "COPYING" in the main directory of this archive
  9 * for more details.
 10 */
 11#include <asm/ftrace.h>
 12#include <asm/thread_info.h>
 13#include <asm/asm-offsets.h>
 14
 15#define MCOUNT_ENTER()		\
 16	mov.l	r4, @-r15;	\
 17	mov.l	r5, @-r15;	\
 18	mov.l	r6, @-r15;	\
 19	mov.l	r7, @-r15;	\
 20	sts.l	pr, @-r15;	\
 21				\
 22	mov.l	@(20,r15),r4;	\
 23	sts	pr, r5
 24
 25#define MCOUNT_LEAVE()		\
 26	lds.l	@r15+, pr;	\
 27	mov.l	@r15+, r7;	\
 28	mov.l	@r15+, r6;	\
 29	mov.l	@r15+, r5;	\
 30	rts;			\
 31	 mov.l	@r15+, r4
 32
 33#ifdef CONFIG_STACK_DEBUG
 34/*
 35 * Perform diagnostic checks on the state of the kernel stack.
 36 *
 37 * Check for stack overflow. If there is less than 1KB free
 38 * then it has overflowed.
 39 *
 40 * Make sure the stack pointer contains a valid address. Valid
 41 * addresses for kernel stacks are anywhere after the bss
 42 * (after _ebss) and anywhere in init_thread_union (init_stack).
 43 */
 44#define STACK_CHECK()					\
 45	mov	#(THREAD_SIZE >> 10), r0;		\
 46	shll8	r0;					\
 47	shll2	r0;					\
 48							\
 49	/* r1 = sp & (THREAD_SIZE - 1) */		\
 50	mov	#-1, r1;				\
 51	add	r0, r1;					\
 52	and	r15, r1;				\
 53							\
 54	mov	#TI_SIZE, r3;				\
 55	mov	#(STACK_WARN >> 8), r2;			\
 56	shll8	r2;					\
 57	add	r3, r2;					\
 58							\
 59	/* Is the stack overflowing? */			\
 60	cmp/hi	r2, r1;					\
 61	bf	stack_panic;				\
 62							\
 63	/* If sp > _ebss then we're OK. */		\
 64	mov.l	.L_ebss, r1;				\
 65	cmp/hi	r1, r15;				\
 66	bt	1f;					\
 67							\
 68	/* If sp < init_stack, we're not OK. */		\
 69	mov.l	.L_init_thread_union, r1;		\
 70	cmp/hs	r1, r15;				\
 71	bf	stack_panic;				\
 72							\
 73	/* If sp > init_stack && sp < _ebss, not OK. */	\
 74	add	r0, r1;					\
 75	cmp/hs	r1, r15;				\
 76	bt	stack_panic;				\
 771:
 78#else
 79#define STACK_CHECK()
 80#endif /* CONFIG_STACK_DEBUG */
 81
 82	.align 2
 83	.globl	_mcount
 84	.type	_mcount,@function
 85	.globl	mcount
 86	.type	mcount,@function
 87_mcount:
 88mcount:
 89	STACK_CHECK()
 90
 91#ifndef CONFIG_FUNCTION_TRACER
 92	rts
 93	 nop
 94#else
 95#ifndef CONFIG_DYNAMIC_FTRACE
 96	mov.l	.Lfunction_trace_stop, r0
 97	mov.l	@r0, r0
 98	tst	r0, r0
 99	bf	ftrace_stub
100#endif
101
102	MCOUNT_ENTER()
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105	.globl	mcount_call
106mcount_call:
107	mov.l	.Lftrace_stub, r6
108#else
109	mov.l	.Lftrace_trace_function, r6
110	mov.l	ftrace_stub, r7
111	cmp/eq	r6, r7
112	bt	skip_trace
113	mov.l	@r6, r6
114#endif
115
116	jsr	@r6
117	 nop
118
119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
120	mov.l   .Lftrace_graph_return, r6
121	mov.l   .Lftrace_stub, r7
122	cmp/eq  r6, r7
123	bt      1f
124
125	mov.l   .Lftrace_graph_caller, r0
126	jmp     @r0
127	 nop
128
1291:
130	mov.l	.Lftrace_graph_entry, r6
131	mov.l	.Lftrace_graph_entry_stub, r7
132	cmp/eq	r6, r7
133	bt	skip_trace
134
135	mov.l   .Lftrace_graph_caller, r0
136	jmp	@r0
137	 nop
138
139	.align 2
140.Lftrace_graph_return:
141	.long   ftrace_graph_return
142.Lftrace_graph_entry:
143	.long   ftrace_graph_entry
144.Lftrace_graph_entry_stub:
145	.long   ftrace_graph_entry_stub
146.Lftrace_graph_caller:
147	.long   ftrace_graph_caller
148#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
149
150	.globl skip_trace
151skip_trace:
152	MCOUNT_LEAVE()
153
154	.align 2
155.Lftrace_trace_function:
156	.long   ftrace_trace_function
157
158#ifdef CONFIG_DYNAMIC_FTRACE
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160/*
161 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
162 * as this will affect the calculation of GRAPH_INSN_OFFSET.
163 */
164	.globl ftrace_graph_call
165ftrace_graph_call:
166	mov.l	.Lskip_trace, r0
167	jmp	@r0
168	 nop
169
170	.align 2
171.Lskip_trace:
172	.long	skip_trace
173#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
174
175	.globl ftrace_caller
176ftrace_caller:
177	mov.l	.Lfunction_trace_stop, r0
178	mov.l	@r0, r0
179	tst	r0, r0
180	bf	ftrace_stub
181
182	MCOUNT_ENTER()
183
184	.globl ftrace_call
185ftrace_call:
186	mov.l	.Lftrace_stub, r6
187	jsr	@r6
188	 nop
189
190#ifdef CONFIG_FUNCTION_GRAPH_TRACER
191	bra	ftrace_graph_call
192	 nop
193#else
194	MCOUNT_LEAVE()
195#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
196#endif /* CONFIG_DYNAMIC_FTRACE */
197
198	.align 2
199.Lfunction_trace_stop:
200	.long	function_trace_stop
201
202/*
203 * NOTE: From here on the locations of the .Lftrace_stub label and
204 * ftrace_stub itself are fixed. Adding additional data here will skew
205 * the displacement for the memory table and break the block replacement.
206 * Place new labels either after the ftrace_stub body, or before
207 * ftrace_caller. You have been warned.
208 */
209.Lftrace_stub:
210	.long	ftrace_stub
211
212	.globl	ftrace_stub
213ftrace_stub:
214	rts
215	 nop
216
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
218	.globl	ftrace_graph_caller
219ftrace_graph_caller:
220	mov.l	2f, r0
221	mov.l	@r0, r0
222	tst	r0, r0
223	bt	1f
224
225	mov.l	3f, r1
226	jmp	@r1
227	 nop
2281:
229	/*
230	 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
231	 * the stack address containing our return address is
232	 * r15 + 20.
233	 */
234	mov	#20, r0
235	add	r15, r0
236	mov	r0, r4
237
238	mov.l	.Lprepare_ftrace_return, r0
239	jsr	@r0
240	 nop
241
242	MCOUNT_LEAVE()
243
244	.align 2
2452:	.long	function_trace_stop
2463:	.long	skip_trace
247.Lprepare_ftrace_return:
248	.long	prepare_ftrace_return
249
250	.globl	return_to_handler
251return_to_handler:
252	/*
253	 * Save the return values.
254	 */
255	mov.l	r0, @-r15
256	mov.l	r1, @-r15
257
258	mov	#0, r4
259
260	mov.l	.Lftrace_return_to_handler, r0
261	jsr	@r0
262	 nop
263
264	/*
265	 * The return value from ftrace_return_handler has the real
266	 * address that we should return to.
267	 */
268	lds	r0, pr
269	mov.l	@r15+, r1
270	rts
271	 mov.l	@r15+, r0
272
273
274	.align 2
275.Lftrace_return_to_handler:
276	.long	ftrace_return_to_handler
277#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
278#endif /* CONFIG_FUNCTION_TRACER */
279
280#ifdef CONFIG_STACK_DEBUG
281	.globl	stack_panic
282stack_panic:
283	mov.l	.Ldump_stack, r0
284	jsr	@r0
285	 nop
286
287	mov.l	.Lpanic, r0
288	jsr	@r0
289	 mov.l	.Lpanic_s, r4
290
291	rts
292	 nop
293
294	.align 2
295.L_ebss:
296	.long	_ebss
297.L_init_thread_union:
298	.long	init_thread_union
 
 
299.Lpanic:
300	.long	panic
301.Lpanic_s:
302	.long	.Lpanic_str
303.Ldump_stack:
304	.long	dump_stack
305
306	.section	.rodata
307	.align 2
308.Lpanic_str:
309	.string "Stack error"
310#endif /* CONFIG_STACK_DEBUG */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0
  2 *
  3 * arch/sh/lib/mcount.S
  4 *
  5 *  Copyright (C) 2008, 2009  Paul Mundt
  6 *  Copyright (C) 2008, 2009  Matt Fleming
 
 
 
 
  7 */
  8#include <asm/ftrace.h>
  9#include <asm/thread_info.h>
 10#include <asm/asm-offsets.h>
 11
 12#define MCOUNT_ENTER()		\
 13	mov.l	r4, @-r15;	\
 14	mov.l	r5, @-r15;	\
 15	mov.l	r6, @-r15;	\
 16	mov.l	r7, @-r15;	\
 17	sts.l	pr, @-r15;	\
 18				\
 19	mov.l	@(20,r15),r4;	\
 20	sts	pr, r5
 21
 22#define MCOUNT_LEAVE()		\
 23	lds.l	@r15+, pr;	\
 24	mov.l	@r15+, r7;	\
 25	mov.l	@r15+, r6;	\
 26	mov.l	@r15+, r5;	\
 27	rts;			\
 28	 mov.l	@r15+, r4
 29
 30#ifdef CONFIG_STACK_DEBUG
 31/*
 32 * Perform diagnostic checks on the state of the kernel stack.
 33 *
 34 * Check for stack overflow. If there is less than 1KB free
 35 * then it has overflowed.
 36 *
 37 * Make sure the stack pointer contains a valid address. Valid
 38 * addresses for kernel stacks are anywhere after the bss
 39 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
 40 */
 41#define STACK_CHECK()					\
 42	mov	#(THREAD_SIZE >> 10), r0;		\
 43	shll8	r0;					\
 44	shll2	r0;					\
 45							\
 46	/* r1 = sp & (THREAD_SIZE - 1) */		\
 47	mov	#-1, r1;				\
 48	add	r0, r1;					\
 49	and	r15, r1;				\
 50							\
 51	mov	#TI_SIZE, r3;				\
 52	mov	#(STACK_WARN >> 8), r2;			\
 53	shll8	r2;					\
 54	add	r3, r2;					\
 55							\
 56	/* Is the stack overflowing? */			\
 57	cmp/hi	r2, r1;					\
 58	bf	stack_panic;				\
 59							\
 60	/* If sp > __bss_stop then we're OK. */		\
 61	mov.l	.L_ebss, r1;				\
 62	cmp/hi	r1, r15;				\
 63	bt	1f;					\
 64							\
 65	/* If sp < init_stack, we're not OK. */		\
 66	mov.l	.L_init_thread_union, r1;		\
 67	cmp/hs	r1, r15;				\
 68	bf	stack_panic;				\
 69							\
 70	/* If sp > init_stack && sp < __bss_stop, not OK. */	\
 71	add	r0, r1;					\
 72	cmp/hs	r1, r15;				\
 73	bt	stack_panic;				\
 741:
 75#else
 76#define STACK_CHECK()
 77#endif /* CONFIG_STACK_DEBUG */
 78
 79	.align 2
 80	.globl	_mcount
 81	.type	_mcount,@function
 82	.globl	mcount
 83	.type	mcount,@function
 84_mcount:
 85mcount:
 86	STACK_CHECK()
 87
 88#ifndef CONFIG_FUNCTION_TRACER
 89	rts
 90	 nop
 91#else
 
 
 
 
 
 
 
 92	MCOUNT_ENTER()
 93
 94#ifdef CONFIG_DYNAMIC_FTRACE
 95	.globl	mcount_call
 96mcount_call:
 97	mov.l	.Lftrace_stub, r6
 98#else
 99	mov.l	.Lftrace_trace_function, r6
100	mov.l	ftrace_stub, r7
101	cmp/eq	r6, r7
102	bt	skip_trace
103	mov.l	@r6, r6
104#endif
105
106	jsr	@r6
107	 nop
108
109#ifdef CONFIG_FUNCTION_GRAPH_TRACER
110	mov.l   .Lftrace_graph_return, r6
111	mov.l   .Lftrace_stub, r7
112	cmp/eq  r6, r7
113	bt      1f
114
115	mov.l   .Lftrace_graph_caller, r0
116	jmp     @r0
117	 nop
118
1191:
120	mov.l	.Lftrace_graph_entry, r6
121	mov.l	.Lftrace_graph_entry_stub, r7
122	cmp/eq	r6, r7
123	bt	skip_trace
124
125	mov.l   .Lftrace_graph_caller, r0
126	jmp	@r0
127	 nop
128
129	.align 2
130.Lftrace_graph_return:
131	.long   ftrace_graph_return
132.Lftrace_graph_entry:
133	.long   ftrace_graph_entry
134.Lftrace_graph_entry_stub:
135	.long   ftrace_graph_entry_stub
136.Lftrace_graph_caller:
137	.long   ftrace_graph_caller
138#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
139
140	.globl skip_trace
141skip_trace:
142	MCOUNT_LEAVE()
143
144	.align 2
145.Lftrace_trace_function:
146	.long   ftrace_trace_function
147
148#ifdef CONFIG_DYNAMIC_FTRACE
149#ifdef CONFIG_FUNCTION_GRAPH_TRACER
150/*
151 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
152 * as this will affect the calculation of GRAPH_INSN_OFFSET.
153 */
154	.globl ftrace_graph_call
155ftrace_graph_call:
156	mov.l	.Lskip_trace, r0
157	jmp	@r0
158	 nop
159
160	.align 2
161.Lskip_trace:
162	.long	skip_trace
163#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
164
165	.globl ftrace_caller
166ftrace_caller:
 
 
 
 
 
167	MCOUNT_ENTER()
168
169	.globl ftrace_call
170ftrace_call:
171	mov.l	.Lftrace_stub, r6
172	jsr	@r6
173	 nop
174
175#ifdef CONFIG_FUNCTION_GRAPH_TRACER
176	bra	ftrace_graph_call
177	 nop
178#else
179	MCOUNT_LEAVE()
180#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
181#endif /* CONFIG_DYNAMIC_FTRACE */
182
183	.align 2
 
 
184
185/*
186 * NOTE: From here on the locations of the .Lftrace_stub label and
187 * ftrace_stub itself are fixed. Adding additional data here will skew
188 * the displacement for the memory table and break the block replacement.
189 * Place new labels either after the ftrace_stub body, or before
190 * ftrace_caller. You have been warned.
191 */
192.Lftrace_stub:
193	.long	ftrace_stub
194
195	.globl	ftrace_stub
196ftrace_stub:
197	rts
198	 nop
199
200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
201	.globl	ftrace_graph_caller
202ftrace_graph_caller:
203	mov.l	2f, r1
 
 
 
 
 
204	jmp	@r1
205	 nop
2061:
207	/*
208	 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
209	 * the stack address containing our return address is
210	 * r15 + 20.
211	 */
212	mov	#20, r0
213	add	r15, r0
214	mov	r0, r4
215
216	mov.l	.Lprepare_ftrace_return, r0
217	jsr	@r0
218	 nop
219
220	MCOUNT_LEAVE()
221
222	.align 2
2232:	.long	skip_trace
 
224.Lprepare_ftrace_return:
225	.long	prepare_ftrace_return
226
227	.globl	return_to_handler
228return_to_handler:
229	/*
230	 * Save the return values.
231	 */
232	mov.l	r0, @-r15
233	mov.l	r1, @-r15
234
235	mov	#0, r4
236
237	mov.l	.Lftrace_return_to_handler, r0
238	jsr	@r0
239	 nop
240
241	/*
242	 * The return value from ftrace_return_handler has the real
243	 * address that we should return to.
244	 */
245	lds	r0, pr
246	mov.l	@r15+, r1
247	rts
248	 mov.l	@r15+, r0
249
250
251	.align 2
252.Lftrace_return_to_handler:
253	.long	ftrace_return_to_handler
254#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
255#endif /* CONFIG_FUNCTION_TRACER */
256
257#ifdef CONFIG_STACK_DEBUG
258	.globl	stack_panic
259stack_panic:
260	mov.l	.Ldump_stack, r0
261	jsr	@r0
262	 nop
263
264	mov.l	.Lpanic, r0
265	jsr	@r0
266	 mov.l	.Lpanic_s, r4
267
268	rts
269	 nop
270
271	.align 2
 
 
272.L_init_thread_union:
273	.long	init_thread_union
274.L_ebss:
275	.long	__bss_stop
276.Lpanic:
277	.long	panic
278.Lpanic_s:
279	.long	.Lpanic_str
280.Ldump_stack:
281	.long	dump_stack
282
283	.section	.rodata
284	.align 2
285.Lpanic_str:
286	.string "Stack error"
287#endif /* CONFIG_STACK_DEBUG */