Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm64/kernel/ftrace.c
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8
9#include <linux/ftrace.h>
10#include <linux/module.h>
11#include <linux/swab.h>
12#include <linux/uaccess.h>
13
14#include <asm/cacheflush.h>
15#include <asm/debug-monitors.h>
16#include <asm/ftrace.h>
17#include <asm/insn.h>
18#include <asm/patching.h>
19
20#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
21struct fregs_offset {
22 const char *name;
23 int offset;
24};
25
26#define FREGS_OFFSET(n, field) \
27{ \
28 .name = n, \
29 .offset = offsetof(struct ftrace_regs, field), \
30}
31
32static const struct fregs_offset fregs_offsets[] = {
33 FREGS_OFFSET("x0", regs[0]),
34 FREGS_OFFSET("x1", regs[1]),
35 FREGS_OFFSET("x2", regs[2]),
36 FREGS_OFFSET("x3", regs[3]),
37 FREGS_OFFSET("x4", regs[4]),
38 FREGS_OFFSET("x5", regs[5]),
39 FREGS_OFFSET("x6", regs[6]),
40 FREGS_OFFSET("x7", regs[7]),
41 FREGS_OFFSET("x8", regs[8]),
42
43 FREGS_OFFSET("x29", fp),
44 FREGS_OFFSET("x30", lr),
45 FREGS_OFFSET("lr", lr),
46
47 FREGS_OFFSET("sp", sp),
48 FREGS_OFFSET("pc", pc),
49};
50
51int ftrace_regs_query_register_offset(const char *name)
52{
53 for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
54 const struct fregs_offset *roff = &fregs_offsets[i];
55 if (!strcmp(roff->name, name))
56 return roff->offset;
57 }
58
59 return -EINVAL;
60}
61#endif
62
63/*
64 * Replace a single instruction, which may be a branch or NOP.
65 * If @validate == true, a replaced instruction is checked against 'old'.
66 */
67static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
68 bool validate)
69{
70 u32 replaced;
71
72 /*
73 * Note:
74 * We are paranoid about modifying text, as if a bug were to happen, it
75 * could cause us to read or write to someplace that could cause harm.
76 * Carefully read and modify the code with aarch64_insn_*() which uses
77 * probe_kernel_*(), and make sure what we read is what we expected it
78 * to be before modifying it.
79 */
80 if (validate) {
81 if (aarch64_insn_read((void *)pc, &replaced))
82 return -EFAULT;
83
84 if (replaced != old)
85 return -EINVAL;
86 }
87 if (aarch64_insn_patch_text_nosync((void *)pc, new))
88 return -EPERM;
89
90 return 0;
91}
92
93/*
94 * Replace tracer function in ftrace_caller()
95 */
96int ftrace_update_ftrace_func(ftrace_func_t func)
97{
98 unsigned long pc;
99 u32 new;
100
101 pc = (unsigned long)ftrace_call;
102 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
103 AARCH64_INSN_BRANCH_LINK);
104
105 return ftrace_modify_code(pc, 0, new, false);
106}
107
108static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
109{
110#ifdef CONFIG_ARM64_MODULE_PLTS
111 struct plt_entry *plt = mod->arch.ftrace_trampolines;
112
113 if (addr == FTRACE_ADDR)
114 return &plt[FTRACE_PLT_IDX];
115#endif
116 return NULL;
117}
118
119/*
120 * Find the address the callsite must branch to in order to reach '*addr'.
121 *
122 * Due to the limited range of 'BL' instructions, modules may be placed too far
123 * away to branch directly and must use a PLT.
124 *
125 * Returns true when '*addr' contains a reachable target address, or has been
126 * modified to contain a PLT address. Returns false otherwise.
127 */
128static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
129 struct module *mod,
130 unsigned long *addr)
131{
132 unsigned long pc = rec->ip;
133 long offset = (long)*addr - (long)pc;
134 struct plt_entry *plt;
135
136 /*
137 * When the target is within range of the 'BL' instruction, use 'addr'
138 * as-is and branch to that directly.
139 */
140 if (offset >= -SZ_128M && offset < SZ_128M)
141 return true;
142
143 /*
144 * When the target is outside of the range of a 'BL' instruction, we
145 * must use a PLT to reach it. We can only place PLTs for modules, and
146 * only when module PLT support is built-in.
147 */
148 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
149 return false;
150
151 /*
152 * 'mod' is only set at module load time, but if we end up
153 * dealing with an out-of-range condition, we can assume it
154 * is due to a module being loaded far away from the kernel.
155 *
156 * NOTE: __module_text_address() must be called with preemption
157 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
158 * retains its validity throughout the remainder of this code.
159 */
160 if (!mod) {
161 preempt_disable();
162 mod = __module_text_address(pc);
163 preempt_enable();
164 }
165
166 if (WARN_ON(!mod))
167 return false;
168
169 plt = get_ftrace_plt(mod, *addr);
170 if (!plt) {
171 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
172 return false;
173 }
174
175 *addr = (unsigned long)plt;
176 return true;
177}
178
179/*
180 * Turn on the call to ftrace_caller() in instrumented function
181 */
182int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
183{
184 unsigned long pc = rec->ip;
185 u32 old, new;
186
187 if (!ftrace_find_callable_addr(rec, NULL, &addr))
188 return -EINVAL;
189
190 old = aarch64_insn_gen_nop();
191 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
192
193 return ftrace_modify_code(pc, old, new, true);
194}
195
196#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
197/*
198 * The compiler has inserted two NOPs before the regular function prologue.
199 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
200 * and x9-x18 are free for our use.
201 *
202 * At runtime we want to be able to swing a single NOP <-> BL to enable or
203 * disable the ftrace call. The BL requires us to save the original LR value,
204 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
205 * before the regular prologue are:
206 *
207 * | Compiled | Disabled | Enabled |
208 * +----------+------------+------------+
209 * | NOP | MOV X9, LR | MOV X9, LR |
210 * | NOP | NOP | BL <entry> |
211 *
212 * The LR value will be recovered by ftrace_regs_entry, and restored into LR
213 * before returning to the regular function prologue. When a function is not
214 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
215 *
216 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
217 * the BL.
218 */
219int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
220{
221 unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
222 u32 old, new;
223
224 old = aarch64_insn_gen_nop();
225 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
226 AARCH64_INSN_REG_LR,
227 AARCH64_INSN_VARIANT_64BIT);
228 return ftrace_modify_code(pc, old, new, true);
229}
230#endif
231
232/*
233 * Turn off the call to ftrace_caller() in instrumented function
234 */
235int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
236 unsigned long addr)
237{
238 unsigned long pc = rec->ip;
239 u32 old = 0, new;
240
241 new = aarch64_insn_gen_nop();
242
243 /*
244 * When using mcount, callsites in modules may have been initalized to
245 * call an arbitrary module PLT (which redirects to the _mcount stub)
246 * rather than the ftrace PLT we'll use at runtime (which redirects to
247 * the ftrace trampoline). We can ignore the old PLT when initializing
248 * the callsite.
249 *
250 * Note: 'mod' is only set at module load time.
251 */
252 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
253 IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
254 return aarch64_insn_patch_text_nosync((void *)pc, new);
255 }
256
257 if (!ftrace_find_callable_addr(rec, mod, &addr))
258 return -EINVAL;
259
260 old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
261
262 return ftrace_modify_code(pc, old, new, true);
263}
264
265void arch_ftrace_update_code(int command)
266{
267 command |= FTRACE_MAY_SLEEP;
268 ftrace_modify_all_code(command);
269}
270
271#ifdef CONFIG_FUNCTION_GRAPH_TRACER
272/*
273 * function_graph tracer expects ftrace_return_to_handler() to be called
274 * on the way back to parent. For this purpose, this function is called
275 * in _mcount() or ftrace_caller() to replace return address (*parent) on
276 * the call stack to return_to_handler.
277 */
278void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
279 unsigned long frame_pointer)
280{
281 unsigned long return_hooker = (unsigned long)&return_to_handler;
282 unsigned long old;
283
284 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
285 return;
286
287 /*
288 * Note:
289 * No protection against faulting at *parent, which may be seen
290 * on other archs. It's unlikely on AArch64.
291 */
292 old = *parent;
293
294 if (!function_graph_enter(old, self_addr, frame_pointer,
295 (void *)frame_pointer)) {
296 *parent = return_hooker;
297 }
298}
299
300#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
301void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
302 struct ftrace_ops *op, struct ftrace_regs *fregs)
303{
304 prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
305}
306#else
307/*
308 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
309 * depending on @enable.
310 */
311static int ftrace_modify_graph_caller(bool enable)
312{
313 unsigned long pc = (unsigned long)&ftrace_graph_call;
314 u32 branch, nop;
315
316 branch = aarch64_insn_gen_branch_imm(pc,
317 (unsigned long)ftrace_graph_caller,
318 AARCH64_INSN_BRANCH_NOLINK);
319 nop = aarch64_insn_gen_nop();
320
321 if (enable)
322 return ftrace_modify_code(pc, nop, branch, true);
323 else
324 return ftrace_modify_code(pc, branch, nop, true);
325}
326
327int ftrace_enable_ftrace_graph_caller(void)
328{
329 return ftrace_modify_graph_caller(true);
330}
331
332int ftrace_disable_ftrace_graph_caller(void)
333{
334 return ftrace_modify_graph_caller(false);
335}
336#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
337#endif /* CONFIG_FUNCTION_GRAPH_TRACER */