Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm64/include/asm/ftrace.h
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8#ifndef __ASM_FTRACE_H
9#define __ASM_FTRACE_H
10
11#include <asm/insn.h>
12
13#define HAVE_FUNCTION_GRAPH_FP_TEST
14#define MCOUNT_ADDR ((unsigned long)_mcount)
15#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
16
17/*
18 * Currently, gcc tends to save the link register after the local variables
19 * on the stack. This causes the max stack tracer to report the function
20 * frame sizes for the wrong functions. By defining
21 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
22 * to find the return address on the stack after the local variables have
23 * been set up.
24 *
25 * Note, this may change in the future, and we will need to deal with that
26 * if it were to happen.
27 */
28#define ARCH_FTRACE_SHIFT_STACK_TRACER 1
29
30#ifndef __ASSEMBLY__
31#include <linux/compat.h>
32
33extern void _mcount(unsigned long);
34extern void *return_address(unsigned int);
35
36struct dyn_arch_ftrace {
37 /* No extra data needed for arm64 */
38};
39
40extern unsigned long ftrace_graph_call;
41
42extern void return_to_handler(void);
43
44static inline unsigned long ftrace_call_adjust(unsigned long addr)
45{
46 /*
47 * addr is the address of the mcount call instruction.
48 * recordmcount does the necessary offset calculation.
49 */
50 return addr;
51}
52
53#define ftrace_return_address(n) return_address(n)
54
55/*
56 * Because AArch32 mode does not share the same syscall table with AArch64,
57 * tracing compat syscalls may result in reporting bogus syscalls or even
58 * hang-up, so just do not trace them.
59 * See kernel/trace/trace_syscalls.c
60 *
61 * x86 code says:
62 * If the user really wants these, then they should use the
63 * raw syscall tracepoints with filtering.
64 */
65#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
66static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
67{
68 return is_compat_task();
69}
70
71#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
72
73static inline bool arch_syscall_match_sym_name(const char *sym,
74 const char *name)
75{
76 /*
77 * Since all syscall functions have __arm64_ prefix, we must skip it.
78 * However, as we described above, we decided to ignore compat
79 * syscalls, so we don't care about __arm64_compat_ prefix here.
80 */
81 return !strcmp(sym + 8, name);
82}
83#endif /* ifndef __ASSEMBLY__ */
84
85#endif /* __ASM_FTRACE_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm64/include/asm/ftrace.h
4 *
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 */
8#ifndef __ASM_FTRACE_H
9#define __ASM_FTRACE_H
10
11#include <asm/insn.h>
12
13#define HAVE_FUNCTION_GRAPH_FP_TEST
14
15/*
16 * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
17 * "return address pointer" which can be used to uniquely identify a return
18 * address which has been overwritten.
19 *
20 * On arm64 we use the address of the caller's frame record, which remains the
21 * same for the lifetime of the instrumented function, unlike the return
22 * address in the LR.
23 */
24#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
25
26#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
27#define ARCH_SUPPORTS_FTRACE_OPS 1
28#else
29#define MCOUNT_ADDR ((unsigned long)_mcount)
30#endif
31
32/* The BL at the callsite's adjusted rec->ip */
33#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
34
35#define FTRACE_PLT_IDX 0
36#define NR_FTRACE_PLTS 1
37
38/*
39 * Currently, gcc tends to save the link register after the local variables
40 * on the stack. This causes the max stack tracer to report the function
41 * frame sizes for the wrong functions. By defining
42 * ARCH_FTRACE_SHIFT_STACK_TRACER, it will tell the stack tracer to expect
43 * to find the return address on the stack after the local variables have
44 * been set up.
45 *
46 * Note, this may change in the future, and we will need to deal with that
47 * if it were to happen.
48 */
49#define ARCH_FTRACE_SHIFT_STACK_TRACER 1
50
51#ifndef __ASSEMBLY__
52#include <linux/compat.h>
53
54extern void _mcount(unsigned long);
55extern void *return_address(unsigned int);
56
57struct dyn_arch_ftrace {
58 /* No extra data needed for arm64 */
59};
60
61extern unsigned long ftrace_graph_call;
62
63extern void return_to_handler(void);
64
65static inline unsigned long ftrace_call_adjust(unsigned long addr)
66{
67 /*
68 * Adjust addr to point at the BL in the callsite.
69 * See ftrace_init_nop() for the callsite sequence.
70 */
71 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
72 return addr + AARCH64_INSN_SIZE;
73 /*
74 * addr is the address of the mcount call instruction.
75 * recordmcount does the necessary offset calculation.
76 */
77 return addr;
78}
79
80#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
81struct dyn_ftrace;
82struct ftrace_ops;
83
84#define arch_ftrace_get_regs(regs) NULL
85
86struct ftrace_regs {
87 /* x0 - x8 */
88 unsigned long regs[9];
89 unsigned long __unused;
90
91 unsigned long fp;
92 unsigned long lr;
93
94 unsigned long sp;
95 unsigned long pc;
96};
97
98static __always_inline unsigned long
99ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
100{
101 return fregs->pc;
102}
103
104static __always_inline void
105ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
106 unsigned long pc)
107{
108 fregs->pc = pc;
109}
110
111static __always_inline unsigned long
112ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
113{
114 return fregs->sp;
115}
116
117static __always_inline unsigned long
118ftrace_regs_get_argument(struct ftrace_regs *fregs, unsigned int n)
119{
120 if (n < 8)
121 return fregs->regs[n];
122 return 0;
123}
124
125static __always_inline unsigned long
126ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
127{
128 return fregs->regs[0];
129}
130
131static __always_inline void
132ftrace_regs_set_return_value(struct ftrace_regs *fregs,
133 unsigned long ret)
134{
135 fregs->regs[0] = ret;
136}
137
138static __always_inline void
139ftrace_override_function_with_return(struct ftrace_regs *fregs)
140{
141 fregs->pc = fregs->lr;
142}
143
144int ftrace_regs_query_register_offset(const char *name);
145
146int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
147#define ftrace_init_nop ftrace_init_nop
148
149void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
150 struct ftrace_ops *op, struct ftrace_regs *fregs);
151#define ftrace_graph_func ftrace_graph_func
152#endif
153
154#define ftrace_return_address(n) return_address(n)
155
156/*
157 * Because AArch32 mode does not share the same syscall table with AArch64,
158 * tracing compat syscalls may result in reporting bogus syscalls or even
159 * hang-up, so just do not trace them.
160 * See kernel/trace/trace_syscalls.c
161 *
162 * x86 code says:
163 * If the user really wants these, then they should use the
164 * raw syscall tracepoints with filtering.
165 */
166#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
167static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
168{
169 return is_compat_task();
170}
171
172#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
173
174static inline bool arch_syscall_match_sym_name(const char *sym,
175 const char *name)
176{
177 /*
178 * Since all syscall functions have __arm64_ prefix, we must skip it.
179 * However, as we described above, we decided to ignore compat
180 * syscalls, so we don't care about __arm64_compat_ prefix here.
181 */
182 return !strcmp(sym + 8, name);
183}
184#endif /* ifndef __ASSEMBLY__ */
185
186#endif /* __ASM_FTRACE_H */