Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Split from ftrace_64.S
4 */
5
6#include <linux/export.h>
7#include <linux/magic.h>
8#include <asm/ppc_asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11#include <asm/ppc-opcode.h>
12#include <asm/thread_info.h>
13#include <asm/bug.h>
14#include <asm/ptrace.h>
15
16/*
17 *
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
20 *
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
24 *
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
27 *
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
30 * call site in A.
31 *
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
34 */
35.macro ftrace_regs_entry allregs
36 /* Create a minimal stack frame for representing B */
37 PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
38
39 /* Create our stack frame + pt_regs */
40 PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
41
42 /* Save all gprs to pt_regs */
43 SAVE_GPR(0, r1)
44 SAVE_GPRS(3, 10, r1)
45
46#ifdef CONFIG_PPC64
47 /* Save the original return address in A's stack frame */
48 std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
49 /* Ok to continue? */
50 lbz r3, PACA_FTRACE_ENABLED(r13)
51 cmpdi r3, 0
52 beq ftrace_no_trace
53#endif
54
55 .if \allregs == 1
56 SAVE_GPR(2, r1)
57 SAVE_GPRS(11, 31, r1)
58 .else
59#ifdef CONFIG_LIVEPATCH_64
60 SAVE_GPR(14, r1)
61#endif
62 .endif
63
64 /* Save previous stack pointer (r1) */
65 addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
66 PPC_STL r8, GPR1(r1)
67
68 .if \allregs == 1
69 /* Load special regs for save below */
70 mfmsr r8
71 mfctr r9
72 mfxer r10
73 mfcr r11
74 .else
75 /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
76 li r8, 0
77 .endif
78
79 /* Get the _mcount() call site out of LR */
80 mflr r7
81 /* Save it as pt_regs->nip */
82 PPC_STL r7, _NIP(r1)
83 /* Also save it in B's stackframe header for proper unwind */
84 PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
85 /* Save the read LR in pt_regs->link */
86 PPC_STL r0, _LINK(r1)
87
88#ifdef CONFIG_PPC64
89 /* Save callee's TOC in the ABI compliant location */
90 std r2, STK_GOT(r1)
91 LOAD_PACA_TOC() /* get kernel TOC in r2 */
92 LOAD_REG_ADDR(r3, function_trace_op)
93 ld r5,0(r3)
94#else
95 lis r3,function_trace_op@ha
96 lwz r5,function_trace_op@l(r3)
97#endif
98
99#ifdef CONFIG_LIVEPATCH_64
100 mr r14, r7 /* remember old NIP */
101#endif
102
103 /* Calculate ip from nip-4 into r3 for call below */
104 subi r3, r7, MCOUNT_INSN_SIZE
105
106 /* Put the original return address in r4 as parent_ip */
107 mr r4, r0
108
109 /* Save special regs */
110 PPC_STL r8, _MSR(r1)
111 .if \allregs == 1
112 PPC_STL r9, _CTR(r1)
113 PPC_STL r10, _XER(r1)
114 PPC_STL r11, _CCR(r1)
115 .endif
116
117 /* Load &pt_regs in r6 for call below */
118 addi r6, r1, STACK_INT_FRAME_REGS
119.endm
120
121.macro ftrace_regs_exit allregs
122 /* Load ctr with the possibly modified NIP */
123 PPC_LL r3, _NIP(r1)
124 mtctr r3
125
126#ifdef CONFIG_LIVEPATCH_64
127 cmpd r14, r3 /* has NIP been altered? */
128#endif
129
130 /* Restore gprs */
131 .if \allregs == 1
132 REST_GPRS(2, 31, r1)
133 .else
134 REST_GPRS(3, 10, r1)
135#ifdef CONFIG_LIVEPATCH_64
136 REST_GPR(14, r1)
137#endif
138 .endif
139
140 /* Restore possibly modified LR */
141 PPC_LL r0, _LINK(r1)
142 mtlr r0
143
144#ifdef CONFIG_PPC64
145 /* Restore callee's TOC */
146 ld r2, STK_GOT(r1)
147#endif
148
149 /* Pop our stack frame */
150 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
151
152#ifdef CONFIG_LIVEPATCH_64
153 /* Based on the cmpd above, if the NIP was altered handle livepatch */
154 bne- livepatch_handler
155#endif
156 bctr /* jump after _mcount site */
157.endm
158
159_GLOBAL(ftrace_regs_caller)
160 ftrace_regs_entry 1
161 /* ftrace_call(r3, r4, r5, r6) */
162.globl ftrace_regs_call
163ftrace_regs_call:
164 bl ftrace_stub
165 ftrace_regs_exit 1
166
167_GLOBAL(ftrace_caller)
168 ftrace_regs_entry 0
169 /* ftrace_call(r3, r4, r5, r6) */
170.globl ftrace_call
171ftrace_call:
172 bl ftrace_stub
173 ftrace_regs_exit 0
174
175_GLOBAL(ftrace_stub)
176 blr
177
178#ifdef CONFIG_PPC64
179ftrace_no_trace:
180 mflr r3
181 mtctr r3
182 REST_GPR(3, r1)
183 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
184 mtlr r0
185 bctr
186#endif
187
188#ifdef CONFIG_LIVEPATCH_64
189 /*
190 * This function runs in the mcount context, between two functions. As
191 * such it can only clobber registers which are volatile and used in
192 * function linkage.
193 *
194 * We get here when a function A, calls another function B, but B has
195 * been live patched with a new function C.
196 *
197 * On entry:
198 * - we have no stack frame and can not allocate one
199 * - LR points back to the original caller (in A)
200 * - CTR holds the new NIP in C
201 * - r0, r11 & r12 are free
202 */
203livepatch_handler:
204 ld r12, PACA_THREAD_INFO(r13)
205
206 /* Allocate 3 x 8 bytes */
207 ld r11, TI_livepatch_sp(r12)
208 addi r11, r11, 24
209 std r11, TI_livepatch_sp(r12)
210
211 /* Save toc & real LR on livepatch stack */
212 std r2, -24(r11)
213 mflr r12
214 std r12, -16(r11)
215
216 /* Store stack end marker */
217 lis r12, STACK_END_MAGIC@h
218 ori r12, r12, STACK_END_MAGIC@l
219 std r12, -8(r11)
220
221 /* Put ctr in r12 for global entry and branch there */
222 mfctr r12
223 bctrl
224
225 /*
226 * Now we are returning from the patched function to the original
227 * caller A. We are free to use r11, r12 and we can use r2 until we
228 * restore it.
229 */
230
231 ld r12, PACA_THREAD_INFO(r13)
232
233 ld r11, TI_livepatch_sp(r12)
234
235 /* Check stack marker hasn't been trashed */
236 lis r2, STACK_END_MAGIC@h
237 ori r2, r2, STACK_END_MAGIC@l
238 ld r12, -8(r11)
2391: tdne r12, r2
240 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
241
242 /* Restore LR & toc from livepatch stack */
243 ld r12, -16(r11)
244 mtlr r12
245 ld r2, -24(r11)
246
247 /* Pop livepatch stack frame */
248 ld r12, PACA_THREAD_INFO(r13)
249 subi r11, r11, 24
250 std r11, TI_livepatch_sp(r12)
251
252 /* Return to original caller of live patched function */
253 blr
254#endif /* CONFIG_LIVEPATCH */
255
256#ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
257_GLOBAL(mcount)
258_GLOBAL(_mcount)
259EXPORT_SYMBOL(_mcount)
260 mflr r12
261 mtctr r12
262 mtlr r0
263 bctr
264#endif
265
266#ifdef CONFIG_FUNCTION_GRAPH_TRACER
267_GLOBAL(return_to_handler)
268 /* need to save return values */
269#ifdef CONFIG_PPC64
270 std r4, -32(r1)
271 std r3, -24(r1)
272 /* save TOC */
273 std r2, -16(r1)
274 std r31, -8(r1)
275 mr r31, r1
276 stdu r1, -112(r1)
277
278 /*
279 * We might be called from a module.
280 * Switch to our TOC to run inside the core kernel.
281 */
282 LOAD_PACA_TOC()
283#else
284 stwu r1, -16(r1)
285 stw r3, 8(r1)
286 stw r4, 12(r1)
287#endif
288
289 bl ftrace_return_to_handler
290 nop
291
292 /* return value has real return address */
293 mtlr r3
294
295#ifdef CONFIG_PPC64
296 ld r1, 0(r1)
297 ld r4, -32(r1)
298 ld r3, -24(r1)
299 ld r2, -16(r1)
300 ld r31, -8(r1)
301#else
302 lwz r3, 8(r1)
303 lwz r4, 12(r1)
304 addi r1, r1, 16
305#endif
306
307 /* Jump back to real return address */
308 blr
309#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
310
311.pushsection ".tramp.ftrace.text","aw",@progbits;
312.globl ftrace_tramp_text
313ftrace_tramp_text:
314 .space 32
315.popsection
316
317.pushsection ".tramp.ftrace.init","aw",@progbits;
318.globl ftrace_tramp_init
319ftrace_tramp_init:
320 .space 32
321.popsection
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Split from ftrace_64.S
4 */
5
6#include <linux/export.h>
7#include <linux/magic.h>
8#include <asm/ppc_asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
11#include <asm/ppc-opcode.h>
12#include <asm/thread_info.h>
13#include <asm/bug.h>
14#include <asm/ptrace.h>
15
16/*
17 *
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
20 *
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
24 *
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
27 *
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
30 * call site in A.
31 *
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
34 */
35.macro ftrace_regs_entry allregs
36 /* Create a minimal stack frame for representing B */
37 PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
38
39 /* Create our stack frame + pt_regs */
40 PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
41
42 .if \allregs == 1
43 SAVE_GPRS(11, 12, r1)
44 .endif
45
46 /* Get the _mcount() call site out of LR */
47 mflr r11
48
49#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
50 /* Load the ftrace_op */
51 PPC_LL r12, -(MCOUNT_INSN_SIZE*2 + SZL)(r11)
52
53 /* Load direct_call from the ftrace_op */
54 PPC_LL r12, FTRACE_OPS_DIRECT_CALL(r12)
55 PPC_LCMPI r12, 0
56 .if \allregs == 1
57 bne .Lftrace_direct_call_regs
58 .else
59 bne .Lftrace_direct_call
60 .endif
61#endif
62
63 /* Save the previous LR in pt_regs->link */
64 PPC_STL r0, _LINK(r1)
65 /* Also save it in A's stack frame */
66 PPC_STL r0, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE+LRSAVE(r1)
67
68 /* Save all gprs to pt_regs */
69 SAVE_GPR(0, r1)
70 SAVE_GPRS(3, 10, r1)
71
72#ifdef CONFIG_PPC64
73 /* Ok to continue? */
74 lbz r3, PACA_FTRACE_ENABLED(r13)
75 cmpdi r3, 0
76 beq ftrace_no_trace
77#endif
78
79 .if \allregs == 1
80 SAVE_GPR(2, r1)
81 SAVE_GPRS(13, 31, r1)
82 .else
83#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
84 SAVE_GPR(14, r1)
85#endif
86 .endif
87
88 /* Save previous stack pointer (r1) */
89 addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
90 PPC_STL r8, GPR1(r1)
91
92 .if \allregs == 1
93 /* Load special regs for save below */
94 mfcr r7
95 mfmsr r8
96 mfctr r9
97 mfxer r10
98 .else
99 /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
100 li r8, 0
101 .endif
102
103#ifdef CONFIG_PPC64
104 /* Save callee's TOC in the ABI compliant location */
105 std r2, STK_GOT(r1)
106 LOAD_PACA_TOC() /* get kernel TOC in r2 */
107#endif
108
109#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
110 /* r11 points to the instruction following the call to ftrace */
111 PPC_LL r5, -(MCOUNT_INSN_SIZE*2 + SZL)(r11)
112 PPC_LL r12, FTRACE_OPS_FUNC(r5)
113 mtctr r12
114#else /* !CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS */
115#ifdef CONFIG_PPC64
116 LOAD_REG_ADDR(r3, function_trace_op)
117 ld r5,0(r3)
118#else
119 lis r3,function_trace_op@ha
120 lwz r5,function_trace_op@l(r3)
121#endif
122#endif
123
124 /* Save special regs */
125 PPC_STL r8, _MSR(r1)
126 .if \allregs == 1
127 PPC_STL r7, _CCR(r1)
128 PPC_STL r9, _CTR(r1)
129 PPC_STL r10, _XER(r1)
130 .endif
131
132#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
133 /* Clear orig_gpr3 to later detect ftrace_direct call */
134 li r7, 0
135 PPC_STL r7, ORIG_GPR3(r1)
136#endif
137
138#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
139 /* Save our real return address in nvr for return */
140 .if \allregs == 0
141 SAVE_GPR(15, r1)
142 .endif
143 mr r15, r11
144 /*
145 * We want the ftrace location in the function, but our lr (in r11)
146 * points at the 'mtlr r0' instruction in the out of line stub. To
147 * recover the ftrace location, we read the branch instruction in the
148 * stub, and adjust our lr by the branch offset.
149 *
150 * See ftrace_init_ool_stub() for the profile sequence.
151 */
152 lwz r8, MCOUNT_INSN_SIZE(r11)
153 slwi r8, r8, 6
154 srawi r8, r8, 6
155 add r3, r11, r8
156 /*
157 * Override our nip to point past the branch in the original function.
158 * This allows reliable stack trace and the ftrace stack tracer to work as-is.
159 */
160 addi r11, r3, MCOUNT_INSN_SIZE
161#else
162 /* Calculate ip from nip-4 into r3 for call below */
163 subi r3, r11, MCOUNT_INSN_SIZE
164#endif
165
166 /* Save NIP as pt_regs->nip */
167 PPC_STL r11, _NIP(r1)
168 /* Also save it in B's stackframe header for proper unwind */
169 PPC_STL r11, LRSAVE+SWITCH_FRAME_SIZE(r1)
170#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
171 mr r14, r11 /* remember old NIP */
172#endif
173
174 /* Put the original return address in r4 as parent_ip */
175 mr r4, r0
176
177 /* Load &pt_regs in r6 for call below */
178 addi r6, r1, STACK_INT_FRAME_REGS
179.endm
180
181.macro ftrace_regs_exit allregs
182#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
183 /* Check orig_gpr3 to detect ftrace_direct call */
184 PPC_LL r3, ORIG_GPR3(r1)
185 PPC_LCMPI cr1, r3, 0
186 mtctr r3
187#endif
188
189 /* Restore possibly modified LR */
190 PPC_LL r0, _LINK(r1)
191
192#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE
193 /* Load ctr with the possibly modified NIP */
194 PPC_LL r3, _NIP(r1)
195#ifdef CONFIG_LIVEPATCH_64
196 cmpd r14, r3 /* has NIP been altered? */
197#endif
198
199#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
200 beq cr1,2f
201 mtlr r3
202 b 3f
203#endif
2042: mtctr r3
205 mtlr r0
2063:
207
208#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */
209 /* Load LR with the possibly modified NIP */
210 PPC_LL r3, _NIP(r1)
211 cmpd r14, r3 /* has NIP been altered? */
212 bne- 1f
213
214 mr r3, r15
215 .if \allregs == 0
216 REST_GPR(15, r1)
217 .endif
2181: mtlr r3
219#endif
220
221 /* Restore gprs */
222 .if \allregs == 1
223 REST_GPRS(2, 31, r1)
224 .else
225 REST_GPRS(3, 10, r1)
226#if defined(CONFIG_LIVEPATCH_64) || defined(CONFIG_PPC_FTRACE_OUT_OF_LINE)
227 REST_GPR(14, r1)
228#endif
229 .endif
230
231#ifdef CONFIG_PPC64
232 /* Restore callee's TOC */
233 ld r2, STK_GOT(r1)
234#endif
235
236 /* Pop our stack frame */
237 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
238
239#ifdef CONFIG_LIVEPATCH_64
240 /* Based on the cmpd above, if the NIP was altered handle livepatch */
241 bne- livepatch_handler
242#endif
243
244 /* jump after _mcount site */
245#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
246#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
247 bnectr cr1
248#endif
249 /*
250 * Return with blr to keep the link stack balanced. The function profiling sequence
251 * uses 'mtlr r0' to restore LR.
252 */
253 blr
254#else
255 bctr
256#endif
257.endm
258
259.macro ftrace_regs_func allregs
260#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
261 bctrl
262#else
263 .if \allregs == 1
264.globl ftrace_regs_call
265ftrace_regs_call:
266 .else
267.globl ftrace_call
268ftrace_call:
269 .endif
270 /* ftrace_call(r3, r4, r5, r6) */
271 bl ftrace_stub
272#endif
273.endm
274
275_GLOBAL(ftrace_regs_caller)
276 ftrace_regs_entry 1
277 ftrace_regs_func 1
278 ftrace_regs_exit 1
279
280_GLOBAL(ftrace_caller)
281 ftrace_regs_entry 0
282 ftrace_regs_func 0
283 ftrace_regs_exit 0
284
285_GLOBAL(ftrace_stub)
286 blr
287
288#ifdef CONFIG_PPC64
289ftrace_no_trace:
290#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
291 REST_GPR(3, r1)
292 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
293 blr
294#else
295 mflr r3
296 mtctr r3
297 REST_GPR(3, r1)
298 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
299 mtlr r0
300 bctr
301#endif
302#endif
303
304#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
305.Lftrace_direct_call_regs:
306 mtctr r12
307 REST_GPRS(11, 12, r1)
308 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
309 bctr
310.Lftrace_direct_call:
311 mtctr r12
312 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
313 bctr
314SYM_FUNC_START(ftrace_stub_direct_tramp)
315 blr
316SYM_FUNC_END(ftrace_stub_direct_tramp)
317#endif
318
319#ifdef CONFIG_LIVEPATCH_64
320 /*
321 * This function runs in the mcount context, between two functions. As
322 * such it can only clobber registers which are volatile and used in
323 * function linkage.
324 *
325 * We get here when a function A, calls another function B, but B has
326 * been live patched with a new function C.
327 *
328 * On entry, we have no stack frame and can not allocate one.
329 *
330 * With PPC_FTRACE_OUT_OF_LINE=n, on entry:
331 * - LR points back to the original caller (in A)
332 * - CTR holds the new NIP in C
333 * - r0, r11 & r12 are free
334 *
335 * With PPC_FTRACE_OUT_OF_LINE=y, on entry:
336 * - r0 points back to the original caller (in A)
337 * - LR holds the new NIP in C
338 * - r11 & r12 are free
339 */
340livepatch_handler:
341 ld r12, PACA_THREAD_INFO(r13)
342
343 /* Allocate 3 x 8 bytes */
344 ld r11, TI_livepatch_sp(r12)
345 addi r11, r11, 24
346 std r11, TI_livepatch_sp(r12)
347
348 /* Store stack end marker */
349 lis r12, STACK_END_MAGIC@h
350 ori r12, r12, STACK_END_MAGIC@l
351 std r12, -8(r11)
352
353 /* Save toc & real LR on livepatch stack */
354 std r2, -24(r11)
355#ifndef CONFIG_PPC_FTRACE_OUT_OF_LINE
356 mflr r12
357 std r12, -16(r11)
358 mfctr r12
359#else
360 std r0, -16(r11)
361 mflr r12
362 /* Put ctr in r12 for global entry and branch there */
363 mtctr r12
364#endif
365 bctrl
366
367 /*
368 * Now we are returning from the patched function to the original
369 * caller A. We are free to use r11, r12 and we can use r2 until we
370 * restore it.
371 */
372
373 ld r12, PACA_THREAD_INFO(r13)
374
375 ld r11, TI_livepatch_sp(r12)
376
377 /* Check stack marker hasn't been trashed */
378 lis r2, STACK_END_MAGIC@h
379 ori r2, r2, STACK_END_MAGIC@l
380 ld r12, -8(r11)
3811: tdne r12, r2
382 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
383
384 /* Restore LR & toc from livepatch stack */
385 ld r12, -16(r11)
386 mtlr r12
387 ld r2, -24(r11)
388
389 /* Pop livepatch stack frame */
390 ld r12, PACA_THREAD_INFO(r13)
391 subi r11, r11, 24
392 std r11, TI_livepatch_sp(r12)
393
394 /* Return to original caller of live patched function */
395 blr
396#endif /* CONFIG_LIVEPATCH */
397
398#ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
399_GLOBAL(mcount)
400_GLOBAL(_mcount)
401EXPORT_SYMBOL(_mcount)
402 mflr r12
403 mtctr r12
404 mtlr r0
405 bctr
406#endif
407
408#ifdef CONFIG_FUNCTION_GRAPH_TRACER
409_GLOBAL(return_to_handler)
410 /* need to save return values */
411#ifdef CONFIG_PPC64
412 std r4, -32(r1)
413 std r3, -24(r1)
414 /* save TOC */
415 std r2, -16(r1)
416 std r31, -8(r1)
417 mr r31, r1
418 stdu r1, -112(r1)
419
420 /*
421 * We might be called from a module.
422 * Switch to our TOC to run inside the core kernel.
423 */
424 LOAD_PACA_TOC()
425#else
426 stwu r1, -16(r1)
427 stw r3, 8(r1)
428 stw r4, 12(r1)
429#endif
430
431 bl ftrace_return_to_handler
432 nop
433
434 /* return value has real return address */
435 mtlr r3
436
437#ifdef CONFIG_PPC64
438 ld r1, 0(r1)
439 ld r4, -32(r1)
440 ld r3, -24(r1)
441 ld r2, -16(r1)
442 ld r31, -8(r1)
443#else
444 lwz r3, 8(r1)
445 lwz r4, 12(r1)
446 addi r1, r1, 16
447#endif
448
449 /* Jump back to real return address */
450 blr
451#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
452
453#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
454SYM_DATA(ftrace_ool_stub_text_count, .long CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE)
455
456SYM_START(ftrace_ool_stub_text, SYM_L_GLOBAL, .balign SZL)
457 .space CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE * FTRACE_OOL_STUB_SIZE
458SYM_CODE_END(ftrace_ool_stub_text)
459#endif
460
461.pushsection ".tramp.ftrace.text","aw",@progbits;
462.globl ftrace_tramp_text
463ftrace_tramp_text:
464 .space 32
465.popsection
466
467.pushsection ".tramp.ftrace.init","aw",@progbits;
468.globl ftrace_tramp_init
469ftrace_tramp_init:
470 .space 32
471.popsection