Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017 Steven Rostedt, VMware Inc.
4 */
5
6#include <linux/linkage.h>
7#include <asm/page_types.h>
8#include <asm/segment.h>
9#include <asm/export.h>
10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h>
12
13#ifdef CC_USING_FENTRY
14# define function_hook __fentry__
15EXPORT_SYMBOL(__fentry__)
16#else
17# define function_hook mcount
18EXPORT_SYMBOL(mcount)
19#endif
20
21#ifdef CONFIG_DYNAMIC_FTRACE
22
23/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
24#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
25# define USING_FRAME_POINTER
26#endif
27
28#ifdef USING_FRAME_POINTER
29# define MCOUNT_FRAME 1 /* using frame = true */
30#else
31# define MCOUNT_FRAME 0 /* using frame = false */
32#endif
33
34ENTRY(function_hook)
35 ret
36END(function_hook)
37
38ENTRY(ftrace_caller)
39
40#ifdef USING_FRAME_POINTER
41# ifdef CC_USING_FENTRY
42 /*
43 * Frame pointers are of ip followed by bp.
44 * Since fentry is an immediate jump, we are left with
45 * parent-ip, function-ip. We need to add a frame with
46 * parent-ip followed by ebp.
47 */
48 pushl 4(%esp) /* parent ip */
49 pushl %ebp
50 movl %esp, %ebp
51 pushl 2*4(%esp) /* function ip */
52# endif
53 /* For mcount, the function ip is directly above */
54 pushl %ebp
55 movl %esp, %ebp
56#endif
57 pushl %eax
58 pushl %ecx
59 pushl %edx
60 pushl $0 /* Pass NULL as regs pointer */
61
62#ifdef USING_FRAME_POINTER
63 /* Load parent ebp into edx */
64 movl 4*4(%esp), %edx
65#else
66 /* There's no frame pointer, load the appropriate stack addr instead */
67 lea 4*4(%esp), %edx
68#endif
69
70 movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */
71 /* Get the parent ip */
72 movl 4(%edx), %edx /* edx has ebp */
73
74 movl function_trace_op, %ecx
75 subl $MCOUNT_INSN_SIZE, %eax
76
77.globl ftrace_call
78ftrace_call:
79 call ftrace_stub
80
81 addl $4, %esp /* skip NULL pointer */
82 popl %edx
83 popl %ecx
84 popl %eax
85#ifdef USING_FRAME_POINTER
86 popl %ebp
87# ifdef CC_USING_FENTRY
88 addl $4,%esp /* skip function ip */
89 popl %ebp /* this is the orig bp */
90 addl $4, %esp /* skip parent ip */
91# endif
92#endif
93.Lftrace_ret:
94#ifdef CONFIG_FUNCTION_GRAPH_TRACER
95.globl ftrace_graph_call
96ftrace_graph_call:
97 jmp ftrace_stub
98#endif
99
100/* This is weak to keep gas from relaxing the jumps */
101WEAK(ftrace_stub)
102 ret
103END(ftrace_caller)
104
105ENTRY(ftrace_regs_caller)
106 /*
107 * i386 does not save SS and ESP when coming from kernel.
108 * Instead, to get sp, ®s->sp is used (see ptrace.h).
109 * Unfortunately, that means eflags must be at the same location
110 * as the current return ip is. We move the return ip into the
111 * regs->ip location, and move flags into the return ip location.
112 */
113 pushl $__KERNEL_CS
114 pushl 4(%esp) /* Save the return ip */
115 pushl $0 /* Load 0 into orig_ax */
116 pushl %gs
117 pushl %fs
118 pushl %es
119 pushl %ds
120 pushl %eax
121
122 /* Get flags and place them into the return ip slot */
123 pushf
124 popl %eax
125 movl %eax, 8*4(%esp)
126
127 pushl %ebp
128 pushl %edi
129 pushl %esi
130 pushl %edx
131 pushl %ecx
132 pushl %ebx
133
134 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
135 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
136#ifdef CC_USING_FENTRY
137 movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
138#else
139 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
140#endif
141 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
142 pushl %esp /* Save pt_regs as 4th parameter */
143
144GLOBAL(ftrace_regs_call)
145 call ftrace_stub
146
147 addl $4, %esp /* Skip pt_regs */
148
149 /* restore flags */
150 push 14*4(%esp)
151 popf
152
153 /* Move return ip back to its original location */
154 movl 12*4(%esp), %eax
155 movl %eax, 14*4(%esp)
156
157 popl %ebx
158 popl %ecx
159 popl %edx
160 popl %esi
161 popl %edi
162 popl %ebp
163 popl %eax
164 popl %ds
165 popl %es
166 popl %fs
167 popl %gs
168
169 /* use lea to not affect flags */
170 lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
171
172 jmp .Lftrace_ret
173#else /* ! CONFIG_DYNAMIC_FTRACE */
174
175ENTRY(function_hook)
176 cmpl $__PAGE_OFFSET, %esp
177 jb ftrace_stub /* Paging not enabled yet? */
178
179 cmpl $ftrace_stub, ftrace_trace_function
180 jnz .Ltrace
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182 cmpl $ftrace_stub, ftrace_graph_return
183 jnz ftrace_graph_caller
184
185 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
186 jnz ftrace_graph_caller
187#endif
188.globl ftrace_stub
189ftrace_stub:
190 ret
191
192 /* taken from glibc */
193.Ltrace:
194 pushl %eax
195 pushl %ecx
196 pushl %edx
197 movl 0xc(%esp), %eax
198 movl 0x4(%ebp), %edx
199 subl $MCOUNT_INSN_SIZE, %eax
200
201 movl ftrace_trace_function, %ecx
202 CALL_NOSPEC %ecx
203
204 popl %edx
205 popl %ecx
206 popl %eax
207 jmp ftrace_stub
208END(function_hook)
209#endif /* CONFIG_DYNAMIC_FTRACE */
210
211#ifdef CONFIG_FUNCTION_GRAPH_TRACER
212ENTRY(ftrace_graph_caller)
213 pushl %eax
214 pushl %ecx
215 pushl %edx
216 movl 3*4(%esp), %eax
217 /* Even with frame pointers, fentry doesn't have one here */
218#ifdef CC_USING_FENTRY
219 lea 4*4(%esp), %edx
220 movl $0, %ecx
221#else
222 lea 0x4(%ebp), %edx
223 movl (%ebp), %ecx
224#endif
225 subl $MCOUNT_INSN_SIZE, %eax
226 call prepare_ftrace_return
227 popl %edx
228 popl %ecx
229 popl %eax
230 ret
231END(ftrace_graph_caller)
232
233.globl return_to_handler
234return_to_handler:
235 pushl %eax
236 pushl %edx
237#ifdef CC_USING_FENTRY
238 movl $0, %eax
239#else
240 movl %ebp, %eax
241#endif
242 call ftrace_return_to_handler
243 movl %eax, %ecx
244 popl %edx
245 popl %eax
246 JMP_NOSPEC %ecx
247#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2017 Steven Rostedt, VMware Inc.
4 */
5
6#include <linux/linkage.h>
7#include <asm/page_types.h>
8#include <asm/segment.h>
9#include <asm/export.h>
10#include <asm/ftrace.h>
11#include <asm/nospec-branch.h>
12#include <asm/frame.h>
13#include <asm/asm-offsets.h>
14
15#ifdef CONFIG_FRAME_POINTER
16# define MCOUNT_FRAME 1 /* using frame = true */
17#else
18# define MCOUNT_FRAME 0 /* using frame = false */
19#endif
20
21SYM_FUNC_START(__fentry__)
22 ret
23SYM_FUNC_END(__fentry__)
24EXPORT_SYMBOL(__fentry__)
25
26SYM_CODE_START(ftrace_caller)
27
28#ifdef CONFIG_FRAME_POINTER
29 /*
30 * Frame pointers are of ip followed by bp.
31 * Since fentry is an immediate jump, we are left with
32 * parent-ip, function-ip. We need to add a frame with
33 * parent-ip followed by ebp.
34 */
35 pushl 4(%esp) /* parent ip */
36 pushl %ebp
37 movl %esp, %ebp
38 pushl 2*4(%esp) /* function ip */
39
40 /* For mcount, the function ip is directly above */
41 pushl %ebp
42 movl %esp, %ebp
43#endif
44 pushl %eax
45 pushl %ecx
46 pushl %edx
47 pushl $0 /* Pass NULL as regs pointer */
48
49#ifdef CONFIG_FRAME_POINTER
50 /* Load parent ebp into edx */
51 movl 4*4(%esp), %edx
52#else
53 /* There's no frame pointer, load the appropriate stack addr instead */
54 lea 4*4(%esp), %edx
55#endif
56
57 movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */
58 /* Get the parent ip */
59 movl 4(%edx), %edx /* edx has ebp */
60
61 movl function_trace_op, %ecx
62 subl $MCOUNT_INSN_SIZE, %eax
63
64.globl ftrace_call
65ftrace_call:
66 call ftrace_stub
67
68 addl $4, %esp /* skip NULL pointer */
69 popl %edx
70 popl %ecx
71 popl %eax
72#ifdef CONFIG_FRAME_POINTER
73 popl %ebp
74 addl $4,%esp /* skip function ip */
75 popl %ebp /* this is the orig bp */
76 addl $4, %esp /* skip parent ip */
77#endif
78.Lftrace_ret:
79#ifdef CONFIG_FUNCTION_GRAPH_TRACER
80.globl ftrace_graph_call
81ftrace_graph_call:
82 jmp ftrace_stub
83#endif
84
85/* This is weak to keep gas from relaxing the jumps */
86SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
87 ret
88SYM_CODE_END(ftrace_caller)
89
90SYM_CODE_START(ftrace_regs_caller)
91 /*
92 * We're here from an mcount/fentry CALL, and the stack frame looks like:
93 *
94 * <previous context>
95 * RET-IP
96 *
97 * The purpose of this function is to call out in an emulated INT3
98 * environment with a stack frame like:
99 *
100 * <previous context>
101 * gap / RET-IP
102 * gap
103 * gap
104 * gap
105 * pt_regs
106 *
107 * We do _NOT_ restore: ss, flags, cs, gs, fs, es, ds
108 */
109 subl $3*4, %esp # RET-IP + 3 gaps
110 pushl %ss # ss
111 pushl %esp # points at ss
112 addl $5*4, (%esp) # make it point at <previous context>
113 pushfl # flags
114 pushl $__KERNEL_CS # cs
115 pushl 7*4(%esp) # ip <- RET-IP
116 pushl $0 # orig_eax
117
118 pushl %gs
119 pushl %fs
120 pushl %es
121 pushl %ds
122
123 pushl %eax
124 pushl %ebp
125 pushl %edi
126 pushl %esi
127 pushl %edx
128 pushl %ecx
129 pushl %ebx
130
131 ENCODE_FRAME_POINTER
132
133 movl PT_EIP(%esp), %eax # 1st argument: IP
134 subl $MCOUNT_INSN_SIZE, %eax
135 movl 21*4(%esp), %edx # 2nd argument: parent ip
136 movl function_trace_op, %ecx # 3rd argument: ftrace_pos
137 pushl %esp # 4th argument: pt_regs
138
139SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
140 call ftrace_stub
141
142 addl $4, %esp # skip 4th argument
143
144 /* place IP below the new SP */
145 movl PT_OLDESP(%esp), %eax
146 movl PT_EIP(%esp), %ecx
147 movl %ecx, -4(%eax)
148
149 /* place EAX below that */
150 movl PT_EAX(%esp), %ecx
151 movl %ecx, -8(%eax)
152
153 popl %ebx
154 popl %ecx
155 popl %edx
156 popl %esi
157 popl %edi
158 popl %ebp
159
160 lea -8(%eax), %esp
161 popl %eax
162
163 jmp .Lftrace_ret
164SYM_CODE_END(ftrace_regs_caller)
165
166#ifdef CONFIG_FUNCTION_GRAPH_TRACER
167SYM_CODE_START(ftrace_graph_caller)
168 pushl %eax
169 pushl %ecx
170 pushl %edx
171 movl 3*4(%esp), %eax
172 /* Even with frame pointers, fentry doesn't have one here */
173 lea 4*4(%esp), %edx
174 movl $0, %ecx
175 subl $MCOUNT_INSN_SIZE, %eax
176 call prepare_ftrace_return
177 popl %edx
178 popl %ecx
179 popl %eax
180 ret
181SYM_CODE_END(ftrace_graph_caller)
182
183.globl return_to_handler
184return_to_handler:
185 pushl %eax
186 pushl %edx
187 movl $0, %eax
188 call ftrace_return_to_handler
189 movl %eax, %ecx
190 popl %edx
191 popl %eax
192 JMP_NOSPEC ecx
193#endif