Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
4 */
5
6#include <linux/linkage.h>
7#include <asm/ptrace.h>
8#include <asm/ftrace.h>
9#include <asm/export.h>
10#include <asm/nospec-branch.h>
11#include <asm/unwind_hints.h>
12
13 .code64
14 .section .entry.text, "ax"
15
16#ifdef CC_USING_FENTRY
17# define function_hook __fentry__
18EXPORT_SYMBOL(__fentry__)
19#else
20# define function_hook mcount
21EXPORT_SYMBOL(mcount)
22#endif
23
24#ifdef CONFIG_FRAME_POINTER
25# ifdef CC_USING_FENTRY
26/* Save parent and function stack frames (rip and rbp) */
27# define MCOUNT_FRAME_SIZE (8+16*2)
28# else
29/* Save just function stack frame (rip and rbp) */
30# define MCOUNT_FRAME_SIZE (8+16)
31# endif
32#else
33/* No need to save a stack frame */
34# define MCOUNT_FRAME_SIZE 0
35#endif /* CONFIG_FRAME_POINTER */
36
37/* Size of stack used to save mcount regs in save_mcount_regs */
38#define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE)
39
40/*
41 * gcc -pg option adds a call to 'mcount' in most functions.
42 * When -mfentry is used, the call is to 'fentry' and not 'mcount'
43 * and is done before the function's stack frame is set up.
44 * They both require a set of regs to be saved before calling
45 * any C code and restored before returning back to the function.
46 *
47 * On boot up, all these calls are converted into nops. When tracing
48 * is enabled, the call can jump to either ftrace_caller or
49 * ftrace_regs_caller. Callbacks (tracing functions) that require
50 * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
51 * it. For this reason, the size of the pt_regs structure will be
52 * allocated on the stack and the required mcount registers will
53 * be saved in the locations that pt_regs has them in.
54 */
55
56/*
57 * @added: the amount of stack added before calling this
58 *
59 * After this is called, the following registers contain:
60 *
61 * %rdi - holds the address that called the trampoline
62 * %rsi - holds the parent function (traced function's return address)
63 * %rdx - holds the original %rbp
64 */
65.macro save_mcount_regs added=0
66
67#ifdef CONFIG_FRAME_POINTER
68 /* Save the original rbp */
69 pushq %rbp
70
71 /*
72 * Stack traces will stop at the ftrace trampoline if the frame pointer
73 * is not set up properly. If fentry is used, we need to save a frame
74 * pointer for the parent as well as the function traced, because the
75 * fentry is called before the stack frame is set up, where as mcount
76 * is called afterward.
77 */
78#ifdef CC_USING_FENTRY
79 /* Save the parent pointer (skip orig rbp and our return address) */
80 pushq \added+8*2(%rsp)
81 pushq %rbp
82 movq %rsp, %rbp
83 /* Save the return address (now skip orig rbp, rbp and parent) */
84 pushq \added+8*3(%rsp)
85#else
86 /* Can't assume that rip is before this (unless added was zero) */
87 pushq \added+8(%rsp)
88#endif
89 pushq %rbp
90 movq %rsp, %rbp
91#endif /* CONFIG_FRAME_POINTER */
92
93 /*
94 * We add enough stack to save all regs.
95 */
96 subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
97 movq %rax, RAX(%rsp)
98 movq %rcx, RCX(%rsp)
99 movq %rdx, RDX(%rsp)
100 movq %rsi, RSI(%rsp)
101 movq %rdi, RDI(%rsp)
102 movq %r8, R8(%rsp)
103 movq %r9, R9(%rsp)
104 /*
105 * Save the original RBP. Even though the mcount ABI does not
106 * require this, it helps out callers.
107 */
108#ifdef CONFIG_FRAME_POINTER
109 movq MCOUNT_REG_SIZE-8(%rsp), %rdx
110#else
111 movq %rbp, %rdx
112#endif
113 movq %rdx, RBP(%rsp)
114
115 /* Copy the parent address into %rsi (second parameter) */
116#ifdef CC_USING_FENTRY
117 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
118#else
119 /* %rdx contains original %rbp */
120 movq 8(%rdx), %rsi
121#endif
122
123 /* Move RIP to its proper location */
124 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
125 movq %rdi, RIP(%rsp)
126
127 /*
128 * Now %rdi (the first parameter) has the return address of
129 * where ftrace_call returns. But the callbacks expect the
130 * address of the call itself.
131 */
132 subq $MCOUNT_INSN_SIZE, %rdi
133 .endm
134
135.macro restore_mcount_regs
136 movq R9(%rsp), %r9
137 movq R8(%rsp), %r8
138 movq RDI(%rsp), %rdi
139 movq RSI(%rsp), %rsi
140 movq RDX(%rsp), %rdx
141 movq RCX(%rsp), %rcx
142 movq RAX(%rsp), %rax
143
144 /* ftrace_regs_caller can modify %rbp */
145 movq RBP(%rsp), %rbp
146
147 addq $MCOUNT_REG_SIZE, %rsp
148
149 .endm
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
153ENTRY(function_hook)
154 retq
155ENDPROC(function_hook)
156
157ENTRY(ftrace_caller)
158 /* save_mcount_regs fills in first two parameters */
159 save_mcount_regs
160
161GLOBAL(ftrace_caller_op_ptr)
162 /* Load the ftrace_ops into the 3rd parameter */
163 movq function_trace_op(%rip), %rdx
164
165 /* regs go into 4th parameter (but make it NULL) */
166 movq $0, %rcx
167
168GLOBAL(ftrace_call)
169 call ftrace_stub
170
171 restore_mcount_regs
172
173 /*
174 * The copied trampoline must call ftrace_epilogue as it
175 * still may need to call the function graph tracer.
176 *
177 * The code up to this label is copied into trampolines so
178 * think twice before adding any new code or changing the
179 * layout here.
180 */
181GLOBAL(ftrace_epilogue)
182
183#ifdef CONFIG_FUNCTION_GRAPH_TRACER
184GLOBAL(ftrace_graph_call)
185 jmp ftrace_stub
186#endif
187
188/* This is weak to keep gas from relaxing the jumps */
189WEAK(ftrace_stub)
190 retq
191ENDPROC(ftrace_caller)
192
193ENTRY(ftrace_regs_caller)
194 /* Save the current flags before any operations that can change them */
195 pushfq
196
197 /* added 8 bytes to save flags */
198 save_mcount_regs 8
199 /* save_mcount_regs fills in first two parameters */
200
201GLOBAL(ftrace_regs_caller_op_ptr)
202 /* Load the ftrace_ops into the 3rd parameter */
203 movq function_trace_op(%rip), %rdx
204
205 /* Save the rest of pt_regs */
206 movq %r15, R15(%rsp)
207 movq %r14, R14(%rsp)
208 movq %r13, R13(%rsp)
209 movq %r12, R12(%rsp)
210 movq %r11, R11(%rsp)
211 movq %r10, R10(%rsp)
212 movq %rbx, RBX(%rsp)
213 /* Copy saved flags */
214 movq MCOUNT_REG_SIZE(%rsp), %rcx
215 movq %rcx, EFLAGS(%rsp)
216 /* Kernel segments */
217 movq $__KERNEL_DS, %rcx
218 movq %rcx, SS(%rsp)
219 movq $__KERNEL_CS, %rcx
220 movq %rcx, CS(%rsp)
221 /* Stack - skipping return address and flags */
222 leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
223 movq %rcx, RSP(%rsp)
224
225 /* regs go into 4th parameter */
226 leaq (%rsp), %rcx
227
228GLOBAL(ftrace_regs_call)
229 call ftrace_stub
230
231 /* Copy flags back to SS, to restore them */
232 movq EFLAGS(%rsp), %rax
233 movq %rax, MCOUNT_REG_SIZE(%rsp)
234
235 /* Handlers can change the RIP */
236 movq RIP(%rsp), %rax
237 movq %rax, MCOUNT_REG_SIZE+8(%rsp)
238
239 /* restore the rest of pt_regs */
240 movq R15(%rsp), %r15
241 movq R14(%rsp), %r14
242 movq R13(%rsp), %r13
243 movq R12(%rsp), %r12
244 movq R10(%rsp), %r10
245 movq RBX(%rsp), %rbx
246
247 restore_mcount_regs
248
249 /* Restore flags */
250 popfq
251
252 /*
253 * As this jmp to ftrace_epilogue can be a short jump
254 * it must not be copied into the trampoline.
255 * The trampoline will add the code to jump
256 * to the return.
257 */
258GLOBAL(ftrace_regs_caller_end)
259
260 jmp ftrace_epilogue
261
262ENDPROC(ftrace_regs_caller)
263
264
265#else /* ! CONFIG_DYNAMIC_FTRACE */
266
267ENTRY(function_hook)
268 cmpq $ftrace_stub, ftrace_trace_function
269 jnz trace
270
271fgraph_trace:
272#ifdef CONFIG_FUNCTION_GRAPH_TRACER
273 cmpq $ftrace_stub, ftrace_graph_return
274 jnz ftrace_graph_caller
275
276 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
277 jnz ftrace_graph_caller
278#endif
279
280GLOBAL(ftrace_stub)
281 retq
282
283trace:
284 /* save_mcount_regs fills in first two parameters */
285 save_mcount_regs
286
287 /*
288 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
289 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
290 * ip and parent ip are used and the list function is called when
291 * function tracing is enabled.
292 */
293 movq ftrace_trace_function, %r8
294 CALL_NOSPEC %r8
295 restore_mcount_regs
296
297 jmp fgraph_trace
298ENDPROC(function_hook)
299#endif /* CONFIG_DYNAMIC_FTRACE */
300
301#ifdef CONFIG_FUNCTION_GRAPH_TRACER
302ENTRY(ftrace_graph_caller)
303 /* Saves rbp into %rdx and fills first parameter */
304 save_mcount_regs
305
306#ifdef CC_USING_FENTRY
307 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
308 movq $0, %rdx /* No framepointers needed */
309#else
310 /* Save address of the return address of traced function */
311 leaq 8(%rdx), %rsi
312 /* ftrace does sanity checks against frame pointers */
313 movq (%rdx), %rdx
314#endif
315 call prepare_ftrace_return
316
317 restore_mcount_regs
318
319 retq
320ENDPROC(ftrace_graph_caller)
321
322ENTRY(return_to_handler)
323 UNWIND_HINT_EMPTY
324 subq $24, %rsp
325
326 /* Save the return values */
327 movq %rax, (%rsp)
328 movq %rdx, 8(%rsp)
329 movq %rbp, %rdi
330
331 call ftrace_return_to_handler
332
333 movq %rax, %rdi
334 movq 8(%rsp), %rdx
335 movq (%rsp), %rax
336 addq $24, %rsp
337 JMP_NOSPEC %rdi
338END(return_to_handler)
339#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2014 Steven Rostedt, Red Hat Inc
4 */
5
6#include <linux/linkage.h>
7#include <asm/ptrace.h>
8#include <asm/ftrace.h>
9#include <asm/export.h>
10#include <asm/nospec-branch.h>
11#include <asm/unwind_hints.h>
12#include <asm/frame.h>
13
14 .code64
15 .section .text, "ax"
16
17#ifdef CONFIG_FRAME_POINTER
18/* Save parent and function stack frames (rip and rbp) */
19# define MCOUNT_FRAME_SIZE (8+16*2)
20#else
21/* No need to save a stack frame */
22# define MCOUNT_FRAME_SIZE 0
23#endif /* CONFIG_FRAME_POINTER */
24
25/* Size of stack used to save mcount regs in save_mcount_regs */
26#define MCOUNT_REG_SIZE (FRAME_SIZE + MCOUNT_FRAME_SIZE)
27
28/*
29 * gcc -pg option adds a call to 'mcount' in most functions.
30 * When -mfentry is used, the call is to 'fentry' and not 'mcount'
31 * and is done before the function's stack frame is set up.
32 * They both require a set of regs to be saved before calling
33 * any C code and restored before returning back to the function.
34 *
35 * On boot up, all these calls are converted into nops. When tracing
36 * is enabled, the call can jump to either ftrace_caller or
37 * ftrace_regs_caller. Callbacks (tracing functions) that require
38 * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
39 * it. For this reason, the size of the pt_regs structure will be
40 * allocated on the stack and the required mcount registers will
41 * be saved in the locations that pt_regs has them in.
42 */
43
44/*
45 * @added: the amount of stack added before calling this
46 *
47 * After this is called, the following registers contain:
48 *
49 * %rdi - holds the address that called the trampoline
50 * %rsi - holds the parent function (traced function's return address)
51 * %rdx - holds the original %rbp
52 */
53.macro save_mcount_regs added=0
54
55#ifdef CONFIG_FRAME_POINTER
56 /* Save the original rbp */
57 pushq %rbp
58
59 /*
60 * Stack traces will stop at the ftrace trampoline if the frame pointer
61 * is not set up properly. If fentry is used, we need to save a frame
62 * pointer for the parent as well as the function traced, because the
63 * fentry is called before the stack frame is set up, where as mcount
64 * is called afterward.
65 */
66
67 /* Save the parent pointer (skip orig rbp and our return address) */
68 pushq \added+8*2(%rsp)
69 pushq %rbp
70 movq %rsp, %rbp
71 /* Save the return address (now skip orig rbp, rbp and parent) */
72 pushq \added+8*3(%rsp)
73 pushq %rbp
74 movq %rsp, %rbp
75#endif /* CONFIG_FRAME_POINTER */
76
77 /*
78 * We add enough stack to save all regs.
79 */
80 subq $(FRAME_SIZE), %rsp
81 movq %rax, RAX(%rsp)
82 movq %rcx, RCX(%rsp)
83 movq %rdx, RDX(%rsp)
84 movq %rsi, RSI(%rsp)
85 movq %rdi, RDI(%rsp)
86 movq %r8, R8(%rsp)
87 movq %r9, R9(%rsp)
88 movq $0, ORIG_RAX(%rsp)
89 /*
90 * Save the original RBP. Even though the mcount ABI does not
91 * require this, it helps out callers.
92 */
93#ifdef CONFIG_FRAME_POINTER
94 movq MCOUNT_REG_SIZE-8(%rsp), %rdx
95#else
96 movq %rbp, %rdx
97#endif
98 movq %rdx, RBP(%rsp)
99
100 /* Copy the parent address into %rsi (second parameter) */
101 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
102
103 /* Move RIP to its proper location */
104 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
105 movq %rdi, RIP(%rsp)
106
107 /*
108 * Now %rdi (the first parameter) has the return address of
109 * where ftrace_call returns. But the callbacks expect the
110 * address of the call itself.
111 */
112 subq $MCOUNT_INSN_SIZE, %rdi
113 .endm
114
115.macro restore_mcount_regs save=0
116
117 /* ftrace_regs_caller or frame pointers require this */
118 movq RBP(%rsp), %rbp
119
120 movq R9(%rsp), %r9
121 movq R8(%rsp), %r8
122 movq RDI(%rsp), %rdi
123 movq RSI(%rsp), %rsi
124 movq RDX(%rsp), %rdx
125 movq RCX(%rsp), %rcx
126 movq RAX(%rsp), %rax
127
128 addq $MCOUNT_REG_SIZE-\save, %rsp
129
130 .endm
131
132#ifdef CONFIG_DYNAMIC_FTRACE
133
134SYM_FUNC_START(__fentry__)
135 retq
136SYM_FUNC_END(__fentry__)
137EXPORT_SYMBOL(__fentry__)
138
139SYM_FUNC_START(ftrace_caller)
140 /* save_mcount_regs fills in first two parameters */
141 save_mcount_regs
142
143SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
144 /* Load the ftrace_ops into the 3rd parameter */
145 movq function_trace_op(%rip), %rdx
146
147 /* regs go into 4th parameter (but make it NULL) */
148 movq $0, %rcx
149
150SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
151 call ftrace_stub
152
153 restore_mcount_regs
154
155 /*
156 * The code up to this label is copied into trampolines so
157 * think twice before adding any new code or changing the
158 * layout here.
159 */
160SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
161
162 jmp ftrace_epilogue
163SYM_FUNC_END(ftrace_caller);
164
165SYM_FUNC_START(ftrace_epilogue)
166#ifdef CONFIG_FUNCTION_GRAPH_TRACER
167SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
168 jmp ftrace_stub
169#endif
170
171/*
172 * This is weak to keep gas from relaxing the jumps.
173 * It is also used to copy the retq for trampolines.
174 */
175SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
176 retq
177SYM_FUNC_END(ftrace_epilogue)
178
179SYM_FUNC_START(ftrace_regs_caller)
180 /* Save the current flags before any operations that can change them */
181 pushfq
182
183 /* added 8 bytes to save flags */
184 save_mcount_regs 8
185 /* save_mcount_regs fills in first two parameters */
186
187SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
188 /* Load the ftrace_ops into the 3rd parameter */
189 movq function_trace_op(%rip), %rdx
190
191 /* Save the rest of pt_regs */
192 movq %r15, R15(%rsp)
193 movq %r14, R14(%rsp)
194 movq %r13, R13(%rsp)
195 movq %r12, R12(%rsp)
196 movq %r11, R11(%rsp)
197 movq %r10, R10(%rsp)
198 movq %rbx, RBX(%rsp)
199 /* Copy saved flags */
200 movq MCOUNT_REG_SIZE(%rsp), %rcx
201 movq %rcx, EFLAGS(%rsp)
202 /* Kernel segments */
203 movq $__KERNEL_DS, %rcx
204 movq %rcx, SS(%rsp)
205 movq $__KERNEL_CS, %rcx
206 movq %rcx, CS(%rsp)
207 /* Stack - skipping return address and flags */
208 leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
209 movq %rcx, RSP(%rsp)
210
211 ENCODE_FRAME_POINTER
212
213 /* regs go into 4th parameter */
214 leaq (%rsp), %rcx
215
216SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
217 call ftrace_stub
218
219 /* Copy flags back to SS, to restore them */
220 movq EFLAGS(%rsp), %rax
221 movq %rax, MCOUNT_REG_SIZE(%rsp)
222
223 /* Handlers can change the RIP */
224 movq RIP(%rsp), %rax
225 movq %rax, MCOUNT_REG_SIZE+8(%rsp)
226
227 /* restore the rest of pt_regs */
228 movq R15(%rsp), %r15
229 movq R14(%rsp), %r14
230 movq R13(%rsp), %r13
231 movq R12(%rsp), %r12
232 movq R10(%rsp), %r10
233 movq RBX(%rsp), %rbx
234
235 movq ORIG_RAX(%rsp), %rax
236 movq %rax, MCOUNT_REG_SIZE-8(%rsp)
237
238 /*
239 * If ORIG_RAX is anything but zero, make this a call to that.
240 * See arch_ftrace_set_direct_caller().
241 */
242 movq ORIG_RAX(%rsp), %rax
243 testq %rax, %rax
244SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
245 jnz 1f
246
247 restore_mcount_regs
248 /* Restore flags */
249 popfq
250
251 /*
252 * As this jmp to ftrace_epilogue can be a short jump
253 * it must not be copied into the trampoline.
254 * The trampoline will add the code to jump
255 * to the return.
256 */
257SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
258 jmp ftrace_epilogue
259
260 /* Swap the flags with orig_rax */
2611: movq MCOUNT_REG_SIZE(%rsp), %rdi
262 movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
263 movq %rax, MCOUNT_REG_SIZE(%rsp)
264
265 restore_mcount_regs 8
266 /* Restore flags */
267 popfq
268 UNWIND_HINT_RET_OFFSET
269 jmp ftrace_epilogue
270
271SYM_FUNC_END(ftrace_regs_caller)
272
273
274#else /* ! CONFIG_DYNAMIC_FTRACE */
275
276SYM_FUNC_START(__fentry__)
277 cmpq $ftrace_stub, ftrace_trace_function
278 jnz trace
279
280fgraph_trace:
281#ifdef CONFIG_FUNCTION_GRAPH_TRACER
282 cmpq $ftrace_stub, ftrace_graph_return
283 jnz ftrace_graph_caller
284
285 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
286 jnz ftrace_graph_caller
287#endif
288
289SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
290 retq
291
292trace:
293 /* save_mcount_regs fills in first two parameters */
294 save_mcount_regs
295
296 /*
297 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
298 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
299 * ip and parent ip are used and the list function is called when
300 * function tracing is enabled.
301 */
302 movq ftrace_trace_function, %r8
303 CALL_NOSPEC r8
304 restore_mcount_regs
305
306 jmp fgraph_trace
307SYM_FUNC_END(__fentry__)
308EXPORT_SYMBOL(__fentry__)
309#endif /* CONFIG_DYNAMIC_FTRACE */
310
311#ifdef CONFIG_FUNCTION_GRAPH_TRACER
312SYM_FUNC_START(ftrace_graph_caller)
313 /* Saves rbp into %rdx and fills first parameter */
314 save_mcount_regs
315
316 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
317 movq $0, %rdx /* No framepointers needed */
318 call prepare_ftrace_return
319
320 restore_mcount_regs
321
322 retq
323SYM_FUNC_END(ftrace_graph_caller)
324
325SYM_CODE_START(return_to_handler)
326 UNWIND_HINT_EMPTY
327 subq $24, %rsp
328
329 /* Save the return values */
330 movq %rax, (%rsp)
331 movq %rdx, 8(%rsp)
332 movq %rbp, %rdi
333
334 call ftrace_return_to_handler
335
336 movq %rax, %rdi
337 movq 8(%rsp), %rdx
338 movq (%rsp), %rax
339 addq $24, %rsp
340 JMP_NOSPEC rdi
341SYM_CODE_END(return_to_handler)
342#endif