Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8#include <linux/ftrace.h>
9#include <linux/uaccess.h>
10#include <asm/cacheflush.h>
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13static int ftrace_check_current_call(unsigned long hook_pos,
14 unsigned int *expected)
15{
16 unsigned int replaced[2];
17 unsigned int nops[2] = {NOP4, NOP4};
18
19 /* we expect nops at the hook position */
20 if (!expected)
21 expected = nops;
22
23 /*
24 * Read the text we want to modify;
25 * return must be -EFAULT on read error
26 */
27 if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
28 return -EFAULT;
29
30 /*
31 * Make sure it is what we expect it to be;
32 * return must be -EINVAL on failed comparison
33 */
34 if (memcmp(expected, replaced, sizeof(replaced))) {
35 pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
36 (void *)hook_pos, expected[0], expected[1], replaced[0],
37 replaced[1]);
38 return -EINVAL;
39 }
40
41 return 0;
42}
43
44static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
45 bool enable)
46{
47 unsigned int call[2];
48 unsigned int nops[2] = {NOP4, NOP4};
49 int ret = 0;
50
51 make_call(hook_pos, target, call);
52
53 /* replace the auipc-jalr pair at once */
54 ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
55 MCOUNT_INSN_SIZE);
56 /* return must be -EPERM on write error */
57 if (ret)
58 return -EPERM;
59
60 smp_mb();
61 flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
62
63 return 0;
64}
65
66int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
67{
68 int ret = ftrace_check_current_call(rec->ip, NULL);
69
70 if (ret)
71 return ret;
72
73 return __ftrace_modify_call(rec->ip, addr, true);
74}
75
76int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
77 unsigned long addr)
78{
79 unsigned int call[2];
80 int ret;
81
82 make_call(rec->ip, addr, call);
83 ret = ftrace_check_current_call(rec->ip, call);
84
85 if (ret)
86 return ret;
87
88 return __ftrace_modify_call(rec->ip, addr, false);
89}
90
91int ftrace_update_ftrace_func(ftrace_func_t func)
92{
93 int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
94 (unsigned long)func, true);
95 if (!ret) {
96 ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
97 (unsigned long)func, true);
98 }
99
100 return ret;
101}
102
103int __init ftrace_dyn_arch_init(void)
104{
105 return 0;
106}
107#endif
108
109#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
110int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
111 unsigned long addr)
112{
113 unsigned int call[2];
114 int ret;
115
116 make_call(rec->ip, old_addr, call);
117 ret = ftrace_check_current_call(rec->ip, call);
118
119 if (ret)
120 return ret;
121
122 return __ftrace_modify_call(rec->ip, addr, true);
123}
124#endif
125
126#ifdef CONFIG_FUNCTION_GRAPH_TRACER
127/*
128 * Most of this function is copied from arm64.
129 */
130void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
131 unsigned long frame_pointer)
132{
133 unsigned long return_hooker = (unsigned long)&return_to_handler;
134 unsigned long old;
135 struct ftrace_graph_ent trace;
136 int err;
137
138 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
139 return;
140
141 /*
142 * We don't suffer access faults, so no extra fault-recovery assembly
143 * is needed here.
144 */
145 old = *parent;
146
147 trace.func = self_addr;
148 trace.depth = current->curr_ret_stack + 1;
149
150 if (!ftrace_graph_entry(&trace))
151 return;
152
153 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
154 frame_pointer, parent);
155 if (err == -EBUSY)
156 return;
157 *parent = return_hooker;
158}
159
160#ifdef CONFIG_DYNAMIC_FTRACE
161extern void ftrace_graph_call(void);
162int ftrace_enable_ftrace_graph_caller(void)
163{
164 unsigned int call[2];
165 static int init_graph = 1;
166 int ret;
167
168 make_call(&ftrace_graph_call, &ftrace_stub, call);
169
170 /*
171 * When enabling graph tracer for the first time, ftrace_graph_call
172 * should contains a call to ftrace_stub. Once it has been disabled,
173 * the 8-bytes at the position becomes NOPs.
174 */
175 if (init_graph) {
176 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
177 call);
178 init_graph = 0;
179 } else {
180 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
181 NULL);
182 }
183
184 if (ret)
185 return ret;
186
187 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
188 (unsigned long)&prepare_ftrace_return, true);
189}
190
191int ftrace_disable_ftrace_graph_caller(void)
192{
193 unsigned int call[2];
194 int ret;
195
196 make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
197
198 /*
199 * This is to make sure that ftrace_enable_ftrace_graph_caller
200 * did the right thing.
201 */
202 ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
203 call);
204
205 if (ret)
206 return ret;
207
208 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
209 (unsigned long)&prepare_ftrace_return, false);
210}
211#endif /* CONFIG_DYNAMIC_FTRACE */
212#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2013 Linaro Limited
4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
5 * Copyright (C) 2017 Andes Technology Corporation
6 */
7
8#include <linux/ftrace.h>
9#include <linux/uaccess.h>
10#include <linux/memory.h>
11#include <linux/stop_machine.h>
12#include <asm/cacheflush.h>
13#include <asm/text-patching.h>
14
15#ifdef CONFIG_DYNAMIC_FTRACE
16void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
17{
18 mutex_lock(&text_mutex);
19
20 /*
21 * The code sequences we use for ftrace can't be patched while the
22 * kernel is running, so we need to use stop_machine() to modify them
23 * for now. This doesn't play nice with text_mutex, we use this flag
24 * to elide the check.
25 */
26 riscv_patch_in_stop_machine = true;
27}
28
29void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
30{
31 riscv_patch_in_stop_machine = false;
32 mutex_unlock(&text_mutex);
33}
34
35static int ftrace_check_current_call(unsigned long hook_pos,
36 unsigned int *expected)
37{
38 unsigned int replaced[2];
39 unsigned int nops[2] = {NOP4, NOP4};
40
41 /* we expect nops at the hook position */
42 if (!expected)
43 expected = nops;
44
45 /*
46 * Read the text we want to modify;
47 * return must be -EFAULT on read error
48 */
49 if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
50 MCOUNT_INSN_SIZE))
51 return -EFAULT;
52
53 /*
54 * Make sure it is what we expect it to be;
55 * return must be -EINVAL on failed comparison
56 */
57 if (memcmp(expected, replaced, sizeof(replaced))) {
58 pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
59 (void *)hook_pos, expected[0], expected[1], replaced[0],
60 replaced[1]);
61 return -EINVAL;
62 }
63
64 return 0;
65}
66
67static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
68 bool enable, bool ra)
69{
70 unsigned int call[2];
71 unsigned int nops[2] = {NOP4, NOP4};
72
73 if (ra)
74 make_call_ra(hook_pos, target, call);
75 else
76 make_call_t0(hook_pos, target, call);
77
78 /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
79 if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
80 return -EPERM;
81
82 return 0;
83}
84
85int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
86{
87 unsigned int call[2];
88
89 make_call_t0(rec->ip, addr, call);
90
91 if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
92 return -EPERM;
93
94 return 0;
95}
96
97int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
98 unsigned long addr)
99{
100 unsigned int nops[2] = {NOP4, NOP4};
101
102 if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
103 return -EPERM;
104
105 return 0;
106}
107
108/*
109 * This is called early on, and isn't wrapped by
110 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
111 * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
112 * just directly poke the text, but it's simpler to just take the lock
113 * ourselves.
114 */
115int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
116{
117 int out;
118
119 mutex_lock(&text_mutex);
120 out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
121 mutex_unlock(&text_mutex);
122
123 return out;
124}
125
126int ftrace_update_ftrace_func(ftrace_func_t func)
127{
128 int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
129 (unsigned long)func, true, true);
130
131 return ret;
132}
133
134struct ftrace_modify_param {
135 int command;
136 atomic_t cpu_count;
137};
138
139static int __ftrace_modify_code(void *data)
140{
141 struct ftrace_modify_param *param = data;
142
143 if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
144 ftrace_modify_all_code(param->command);
145 /*
146 * Make sure the patching store is effective *before* we
147 * increment the counter which releases all waiting CPUs
148 * by using the release variant of atomic increment. The
149 * release pairs with the call to local_flush_icache_all()
150 * on the waiting CPU.
151 */
152 atomic_inc_return_release(¶m->cpu_count);
153 } else {
154 while (atomic_read(¶m->cpu_count) <= num_online_cpus())
155 cpu_relax();
156
157 local_flush_icache_all();
158 }
159
160 return 0;
161}
162
163void arch_ftrace_update_code(int command)
164{
165 struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
166
167 stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
168}
169#endif
170
171#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
172int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
173 unsigned long addr)
174{
175 unsigned int call[2];
176 unsigned long caller = rec->ip;
177 int ret;
178
179 make_call_t0(caller, old_addr, call);
180 ret = ftrace_check_current_call(caller, call);
181
182 if (ret)
183 return ret;
184
185 return __ftrace_modify_call(caller, addr, true, false);
186}
187#endif
188
189#ifdef CONFIG_FUNCTION_GRAPH_TRACER
190/*
191 * Most of this function is copied from arm64.
192 */
193void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
194 unsigned long frame_pointer)
195{
196 unsigned long return_hooker = (unsigned long)&return_to_handler;
197 unsigned long old;
198
199 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
200 return;
201
202 /*
203 * We don't suffer access faults, so no extra fault-recovery assembly
204 * is needed here.
205 */
206 old = *parent;
207
208 if (!function_graph_enter(old, self_addr, frame_pointer, parent))
209 *parent = return_hooker;
210}
211
212#ifdef CONFIG_DYNAMIC_FTRACE
213#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
214void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
215 struct ftrace_ops *op, struct ftrace_regs *fregs)
216{
217 prepare_ftrace_return(&arch_ftrace_regs(fregs)->ra, ip, arch_ftrace_regs(fregs)->s0);
218}
219#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
220extern void ftrace_graph_call(void);
221int ftrace_enable_ftrace_graph_caller(void)
222{
223 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
224 (unsigned long)&prepare_ftrace_return, true, true);
225}
226
227int ftrace_disable_ftrace_graph_caller(void)
228{
229 return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
230 (unsigned long)&prepare_ftrace_return, false, true);
231}
232#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
233#endif /* CONFIG_DYNAMIC_FTRACE */
234#endif /* CONFIG_FUNCTION_GRAPH_TRACER */