Loading...
1/*
2 * arch/sh/lib/mcount.S
3 *
4 * Copyright (C) 2008, 2009 Paul Mundt
5 * Copyright (C) 2008, 2009 Matt Fleming
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <asm/ftrace.h>
12#include <asm/thread_info.h>
13#include <asm/asm-offsets.h>
14
15#define MCOUNT_ENTER() \
16 mov.l r4, @-r15; \
17 mov.l r5, @-r15; \
18 mov.l r6, @-r15; \
19 mov.l r7, @-r15; \
20 sts.l pr, @-r15; \
21 \
22 mov.l @(20,r15),r4; \
23 sts pr, r5
24
25#define MCOUNT_LEAVE() \
26 lds.l @r15+, pr; \
27 mov.l @r15+, r7; \
28 mov.l @r15+, r6; \
29 mov.l @r15+, r5; \
30 rts; \
31 mov.l @r15+, r4
32
33#ifdef CONFIG_STACK_DEBUG
34/*
35 * Perform diagnostic checks on the state of the kernel stack.
36 *
37 * Check for stack overflow. If there is less than 1KB free
38 * then it has overflowed.
39 *
40 * Make sure the stack pointer contains a valid address. Valid
41 * addresses for kernel stacks are anywhere after the bss
42 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
43 */
44#define STACK_CHECK() \
45 mov #(THREAD_SIZE >> 10), r0; \
46 shll8 r0; \
47 shll2 r0; \
48 \
49 /* r1 = sp & (THREAD_SIZE - 1) */ \
50 mov #-1, r1; \
51 add r0, r1; \
52 and r15, r1; \
53 \
54 mov #TI_SIZE, r3; \
55 mov #(STACK_WARN >> 8), r2; \
56 shll8 r2; \
57 add r3, r2; \
58 \
59 /* Is the stack overflowing? */ \
60 cmp/hi r2, r1; \
61 bf stack_panic; \
62 \
63 /* If sp > __bss_stop then we're OK. */ \
64 mov.l .L_ebss, r1; \
65 cmp/hi r1, r15; \
66 bt 1f; \
67 \
68 /* If sp < init_stack, we're not OK. */ \
69 mov.l .L_init_thread_union, r1; \
70 cmp/hs r1, r15; \
71 bf stack_panic; \
72 \
73 /* If sp > init_stack && sp < __bss_stop, not OK. */ \
74 add r0, r1; \
75 cmp/hs r1, r15; \
76 bt stack_panic; \
771:
78#else
79#define STACK_CHECK()
80#endif /* CONFIG_STACK_DEBUG */
81
82 .align 2
83 .globl _mcount
84 .type _mcount,@function
85 .globl mcount
86 .type mcount,@function
87_mcount:
88mcount:
89 STACK_CHECK()
90
91#ifndef CONFIG_FUNCTION_TRACER
92 rts
93 nop
94#else
95 MCOUNT_ENTER()
96
97#ifdef CONFIG_DYNAMIC_FTRACE
98 .globl mcount_call
99mcount_call:
100 mov.l .Lftrace_stub, r6
101#else
102 mov.l .Lftrace_trace_function, r6
103 mov.l ftrace_stub, r7
104 cmp/eq r6, r7
105 bt skip_trace
106 mov.l @r6, r6
107#endif
108
109 jsr @r6
110 nop
111
112#ifdef CONFIG_FUNCTION_GRAPH_TRACER
113 mov.l .Lftrace_graph_return, r6
114 mov.l .Lftrace_stub, r7
115 cmp/eq r6, r7
116 bt 1f
117
118 mov.l .Lftrace_graph_caller, r0
119 jmp @r0
120 nop
121
1221:
123 mov.l .Lftrace_graph_entry, r6
124 mov.l .Lftrace_graph_entry_stub, r7
125 cmp/eq r6, r7
126 bt skip_trace
127
128 mov.l .Lftrace_graph_caller, r0
129 jmp @r0
130 nop
131
132 .align 2
133.Lftrace_graph_return:
134 .long ftrace_graph_return
135.Lftrace_graph_entry:
136 .long ftrace_graph_entry
137.Lftrace_graph_entry_stub:
138 .long ftrace_graph_entry_stub
139.Lftrace_graph_caller:
140 .long ftrace_graph_caller
141#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
142
143 .globl skip_trace
144skip_trace:
145 MCOUNT_LEAVE()
146
147 .align 2
148.Lftrace_trace_function:
149 .long ftrace_trace_function
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152#ifdef CONFIG_FUNCTION_GRAPH_TRACER
153/*
154 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
155 * as this will affect the calculation of GRAPH_INSN_OFFSET.
156 */
157 .globl ftrace_graph_call
158ftrace_graph_call:
159 mov.l .Lskip_trace, r0
160 jmp @r0
161 nop
162
163 .align 2
164.Lskip_trace:
165 .long skip_trace
166#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
167
168 .globl ftrace_caller
169ftrace_caller:
170 MCOUNT_ENTER()
171
172 .globl ftrace_call
173ftrace_call:
174 mov.l .Lftrace_stub, r6
175 jsr @r6
176 nop
177
178#ifdef CONFIG_FUNCTION_GRAPH_TRACER
179 bra ftrace_graph_call
180 nop
181#else
182 MCOUNT_LEAVE()
183#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
184#endif /* CONFIG_DYNAMIC_FTRACE */
185
186 .align 2
187
188/*
189 * NOTE: From here on the locations of the .Lftrace_stub label and
190 * ftrace_stub itself are fixed. Adding additional data here will skew
191 * the displacement for the memory table and break the block replacement.
192 * Place new labels either after the ftrace_stub body, or before
193 * ftrace_caller. You have been warned.
194 */
195.Lftrace_stub:
196 .long ftrace_stub
197
198 .globl ftrace_stub
199ftrace_stub:
200 rts
201 nop
202
203#ifdef CONFIG_FUNCTION_GRAPH_TRACER
204 .globl ftrace_graph_caller
205ftrace_graph_caller:
206 mov.l 2f, r1
207 jmp @r1
208 nop
2091:
210 /*
211 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
212 * the stack address containing our return address is
213 * r15 + 20.
214 */
215 mov #20, r0
216 add r15, r0
217 mov r0, r4
218
219 mov.l .Lprepare_ftrace_return, r0
220 jsr @r0
221 nop
222
223 MCOUNT_LEAVE()
224
225 .align 2
2262: .long skip_trace
227.Lprepare_ftrace_return:
228 .long prepare_ftrace_return
229
230 .globl return_to_handler
231return_to_handler:
232 /*
233 * Save the return values.
234 */
235 mov.l r0, @-r15
236 mov.l r1, @-r15
237
238 mov #0, r4
239
240 mov.l .Lftrace_return_to_handler, r0
241 jsr @r0
242 nop
243
244 /*
245 * The return value from ftrace_return_handler has the real
246 * address that we should return to.
247 */
248 lds r0, pr
249 mov.l @r15+, r1
250 rts
251 mov.l @r15+, r0
252
253
254 .align 2
255.Lftrace_return_to_handler:
256 .long ftrace_return_to_handler
257#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
258#endif /* CONFIG_FUNCTION_TRACER */
259
260#ifdef CONFIG_STACK_DEBUG
261 .globl stack_panic
262stack_panic:
263 mov.l .Ldump_stack, r0
264 jsr @r0
265 nop
266
267 mov.l .Lpanic, r0
268 jsr @r0
269 mov.l .Lpanic_s, r4
270
271 rts
272 nop
273
274 .align 2
275.L_init_thread_union:
276 .long init_thread_union
277.L_ebss:
278 .long __bss_stop
279.Lpanic:
280 .long panic
281.Lpanic_s:
282 .long .Lpanic_str
283.Ldump_stack:
284 .long dump_stack
285
286 .section .rodata
287 .align 2
288.Lpanic_str:
289 .string "Stack error"
290#endif /* CONFIG_STACK_DEBUG */
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * arch/sh/lib/mcount.S
4 *
5 * Copyright (C) 2008, 2009 Paul Mundt
6 * Copyright (C) 2008, 2009 Matt Fleming
7 */
8#include <asm/ftrace.h>
9#include <asm/thread_info.h>
10#include <asm/asm-offsets.h>
11
12#define MCOUNT_ENTER() \
13 mov.l r4, @-r15; \
14 mov.l r5, @-r15; \
15 mov.l r6, @-r15; \
16 mov.l r7, @-r15; \
17 sts.l pr, @-r15; \
18 \
19 mov.l @(20,r15),r4; \
20 sts pr, r5
21
22#define MCOUNT_LEAVE() \
23 lds.l @r15+, pr; \
24 mov.l @r15+, r7; \
25 mov.l @r15+, r6; \
26 mov.l @r15+, r5; \
27 rts; \
28 mov.l @r15+, r4
29
30#ifdef CONFIG_STACK_DEBUG
31/*
32 * Perform diagnostic checks on the state of the kernel stack.
33 *
34 * Check for stack overflow. If there is less than 1KB free
35 * then it has overflowed.
36 *
37 * Make sure the stack pointer contains a valid address. Valid
38 * addresses for kernel stacks are anywhere after the bss
39 * (after __bss_stop) and anywhere in init_thread_union (init_stack).
40 */
41#define STACK_CHECK() \
42 mov #(THREAD_SIZE >> 10), r0; \
43 shll8 r0; \
44 shll2 r0; \
45 \
46 /* r1 = sp & (THREAD_SIZE - 1) */ \
47 mov #-1, r1; \
48 add r0, r1; \
49 and r15, r1; \
50 \
51 mov #TI_SIZE, r3; \
52 mov #(STACK_WARN >> 8), r2; \
53 shll8 r2; \
54 add r3, r2; \
55 \
56 /* Is the stack overflowing? */ \
57 cmp/hi r2, r1; \
58 bf stack_panic; \
59 \
60 /* If sp > __bss_stop then we're OK. */ \
61 mov.l .L_ebss, r1; \
62 cmp/hi r1, r15; \
63 bt 1f; \
64 \
65 /* If sp < init_stack, we're not OK. */ \
66 mov.l .L_init_thread_union, r1; \
67 cmp/hs r1, r15; \
68 bf stack_panic; \
69 \
70 /* If sp > init_stack && sp < __bss_stop, not OK. */ \
71 add r0, r1; \
72 cmp/hs r1, r15; \
73 bt stack_panic; \
741:
75#else
76#define STACK_CHECK()
77#endif /* CONFIG_STACK_DEBUG */
78
79 .align 2
80 .globl _mcount
81 .type _mcount,@function
82 .globl mcount
83 .type mcount,@function
84_mcount:
85mcount:
86 STACK_CHECK()
87
88#ifndef CONFIG_FUNCTION_TRACER
89 rts
90 nop
91#else
92 MCOUNT_ENTER()
93
94#ifdef CONFIG_DYNAMIC_FTRACE
95 .globl mcount_call
96mcount_call:
97 mov.l .Lftrace_stub, r6
98#else
99 mov.l .Lftrace_trace_function, r6
100 mov.l ftrace_stub, r7
101 cmp/eq r6, r7
102 bt skip_trace
103 mov.l @r6, r6
104#endif
105
106 jsr @r6
107 nop
108
109#ifdef CONFIG_FUNCTION_GRAPH_TRACER
110 mov.l .Lftrace_graph_return, r6
111 mov.l .Lftrace_stub, r7
112 cmp/eq r6, r7
113 bt 1f
114
115 mov.l .Lftrace_graph_caller, r0
116 jmp @r0
117 nop
118
1191:
120 mov.l .Lftrace_graph_entry, r6
121 mov.l .Lftrace_graph_entry_stub, r7
122 cmp/eq r6, r7
123 bt skip_trace
124
125 mov.l .Lftrace_graph_caller, r0
126 jmp @r0
127 nop
128
129 .align 2
130.Lftrace_graph_return:
131 .long ftrace_graph_return
132.Lftrace_graph_entry:
133 .long ftrace_graph_entry
134.Lftrace_graph_entry_stub:
135 .long ftrace_graph_entry_stub
136.Lftrace_graph_caller:
137 .long ftrace_graph_caller
138#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
139
140 .globl skip_trace
141skip_trace:
142 MCOUNT_LEAVE()
143
144 .align 2
145.Lftrace_trace_function:
146 .long ftrace_trace_function
147
148#ifdef CONFIG_DYNAMIC_FTRACE
149#ifdef CONFIG_FUNCTION_GRAPH_TRACER
150/*
151 * NOTE: Do not move either ftrace_graph_call or ftrace_caller
152 * as this will affect the calculation of GRAPH_INSN_OFFSET.
153 */
154 .globl ftrace_graph_call
155ftrace_graph_call:
156 mov.l .Lskip_trace, r0
157 jmp @r0
158 nop
159
160 .align 2
161.Lskip_trace:
162 .long skip_trace
163#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
164
165 .globl ftrace_caller
166ftrace_caller:
167 MCOUNT_ENTER()
168
169 .globl ftrace_call
170ftrace_call:
171 mov.l .Lftrace_stub, r6
172 jsr @r6
173 nop
174
175#ifdef CONFIG_FUNCTION_GRAPH_TRACER
176 bra ftrace_graph_call
177 nop
178#else
179 MCOUNT_LEAVE()
180#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
181#endif /* CONFIG_DYNAMIC_FTRACE */
182
183 .align 2
184
185/*
186 * NOTE: From here on the locations of the .Lftrace_stub label and
187 * ftrace_stub itself are fixed. Adding additional data here will skew
188 * the displacement for the memory table and break the block replacement.
189 * Place new labels either after the ftrace_stub body, or before
190 * ftrace_caller. You have been warned.
191 */
192.Lftrace_stub:
193 .long ftrace_stub
194
195 .globl ftrace_stub
196ftrace_stub:
197 rts
198 nop
199
200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
201 .globl ftrace_graph_caller
202ftrace_graph_caller:
203 mov.l 2f, r1
204 jmp @r1
205 nop
2061:
207 /*
208 * MCOUNT_ENTER() pushed 5 registers onto the stack, so
209 * the stack address containing our return address is
210 * r15 + 20.
211 */
212 mov #20, r0
213 add r15, r0
214 mov r0, r4
215
216 mov.l .Lprepare_ftrace_return, r0
217 jsr @r0
218 nop
219
220 MCOUNT_LEAVE()
221
222 .align 2
2232: .long skip_trace
224.Lprepare_ftrace_return:
225 .long prepare_ftrace_return
226
227 .globl return_to_handler
228return_to_handler:
229 /*
230 * Save the return values.
231 */
232 mov.l r0, @-r15
233 mov.l r1, @-r15
234
235 mov #0, r4
236
237 mov.l .Lftrace_return_to_handler, r0
238 jsr @r0
239 nop
240
241 /*
242 * The return value from ftrace_return_handler has the real
243 * address that we should return to.
244 */
245 lds r0, pr
246 mov.l @r15+, r1
247 rts
248 mov.l @r15+, r0
249
250
251 .align 2
252.Lftrace_return_to_handler:
253 .long ftrace_return_to_handler
254#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
255#endif /* CONFIG_FUNCTION_TRACER */
256
257#ifdef CONFIG_STACK_DEBUG
258 .globl stack_panic
259stack_panic:
260 mov.l .Ldump_stack, r0
261 jsr @r0
262 nop
263
264 mov.l .Lpanic, r0
265 jsr @r0
266 mov.l .Lpanic_s, r4
267
268 rts
269 nop
270
271 .align 2
272.L_init_thread_union:
273 .long init_thread_union
274.L_ebss:
275 .long __bss_stop
276.Lpanic:
277 .long panic
278.Lpanic_s:
279 .long .Lpanic_str
280.Ldump_stack:
281 .long dump_stack
282
283 .section .rodata
284 .align 2
285.Lpanic_str:
286 .string "Stack error"
287#endif /* CONFIG_STACK_DEBUG */