Loading...
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6 *
7 * For licencing details, see COPYING.
8 *
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
13 */
14
15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
17#include <linux/module.h>
18#include <linux/stop_machine.h>
19
20#include <asm/cacheflush.h>
21#include <asm/opcodes.h>
22#include <asm/ftrace.h>
23#include <asm/insn.h>
24#include <asm/set_memory.h>
25#include <asm/patch.h>
26
27#ifdef CONFIG_THUMB2_KERNEL
28#define NOP 0xf85deb04 /* pop.w {lr} */
29#else
30#define NOP 0xe8bd4000 /* pop {lr} */
31#endif
32
33#ifdef CONFIG_DYNAMIC_FTRACE
34
35static int __ftrace_modify_code(void *data)
36{
37 int *command = data;
38
39 ftrace_modify_all_code(*command);
40
41 return 0;
42}
43
44void arch_ftrace_update_code(int command)
45{
46 stop_machine(__ftrace_modify_code, &command, NULL);
47}
48
49static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
50{
51 return NOP;
52}
53
54static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
55{
56 return addr;
57}
58
59int ftrace_arch_code_modify_prepare(void)
60{
61 return 0;
62}
63
64int ftrace_arch_code_modify_post_process(void)
65{
66 /* Make sure any TLB misses during machine stop are cleared. */
67 flush_tlb_all();
68 return 0;
69}
70
71static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
72{
73 return arm_gen_branch_link(pc, addr);
74}
75
76static int ftrace_modify_code(unsigned long pc, unsigned long old,
77 unsigned long new, bool validate)
78{
79 unsigned long replaced;
80
81 if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
82 old = __opcode_to_mem_thumb32(old);
83 else
84 old = __opcode_to_mem_arm(old);
85
86 if (validate) {
87 if (copy_from_kernel_nofault(&replaced, (void *)pc,
88 MCOUNT_INSN_SIZE))
89 return -EFAULT;
90
91 if (replaced != old)
92 return -EINVAL;
93 }
94
95 __patch_text((void *)pc, new);
96
97 return 0;
98}
99
100int ftrace_update_ftrace_func(ftrace_func_t func)
101{
102 unsigned long pc;
103 unsigned long new;
104 int ret;
105
106 pc = (unsigned long)&ftrace_call;
107 new = ftrace_call_replace(pc, (unsigned long)func);
108
109 ret = ftrace_modify_code(pc, 0, new, false);
110
111#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
112 if (!ret) {
113 pc = (unsigned long)&ftrace_regs_call;
114 new = ftrace_call_replace(pc, (unsigned long)func);
115
116 ret = ftrace_modify_code(pc, 0, new, false);
117 }
118#endif
119
120 return ret;
121}
122
123int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
124{
125 unsigned long new, old;
126 unsigned long ip = rec->ip;
127
128 old = ftrace_nop_replace(rec);
129
130 new = ftrace_call_replace(ip, adjust_address(rec, addr));
131
132 return ftrace_modify_code(rec->ip, old, new, true);
133}
134
135#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
136
137int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
138 unsigned long addr)
139{
140 unsigned long new, old;
141 unsigned long ip = rec->ip;
142
143 old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
144
145 new = ftrace_call_replace(ip, adjust_address(rec, addr));
146
147 return ftrace_modify_code(rec->ip, old, new, true);
148}
149
150#endif
151
152int ftrace_make_nop(struct module *mod,
153 struct dyn_ftrace *rec, unsigned long addr)
154{
155 unsigned long ip = rec->ip;
156 unsigned long old;
157 unsigned long new;
158 int ret;
159
160 old = ftrace_call_replace(ip, adjust_address(rec, addr));
161 new = ftrace_nop_replace(rec);
162 ret = ftrace_modify_code(ip, old, new, true);
163
164 return ret;
165}
166
167int __init ftrace_dyn_arch_init(void)
168{
169 return 0;
170}
171#endif /* CONFIG_DYNAMIC_FTRACE */
172
173#ifdef CONFIG_FUNCTION_GRAPH_TRACER
174void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
175 unsigned long frame_pointer)
176{
177 unsigned long return_hooker = (unsigned long) &return_to_handler;
178 unsigned long old;
179
180 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
181 return;
182
183 old = *parent;
184 *parent = return_hooker;
185
186 if (function_graph_enter(old, self_addr, frame_pointer, NULL))
187 *parent = old;
188}
189
190#ifdef CONFIG_DYNAMIC_FTRACE
191extern unsigned long ftrace_graph_call;
192extern unsigned long ftrace_graph_call_old;
193extern void ftrace_graph_caller_old(void);
194extern unsigned long ftrace_graph_regs_call;
195extern void ftrace_graph_regs_caller(void);
196
197static int __ftrace_modify_caller(unsigned long *callsite,
198 void (*func) (void), bool enable)
199{
200 unsigned long caller_fn = (unsigned long) func;
201 unsigned long pc = (unsigned long) callsite;
202 unsigned long branch = arm_gen_branch(pc, caller_fn);
203 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
204 unsigned long old = enable ? nop : branch;
205 unsigned long new = enable ? branch : nop;
206
207 return ftrace_modify_code(pc, old, new, true);
208}
209
210static int ftrace_modify_graph_caller(bool enable)
211{
212 int ret;
213
214 ret = __ftrace_modify_caller(&ftrace_graph_call,
215 ftrace_graph_caller,
216 enable);
217
218#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
219 if (!ret)
220 ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
221 ftrace_graph_regs_caller,
222 enable);
223#endif
224
225
226 return ret;
227}
228
229int ftrace_enable_ftrace_graph_caller(void)
230{
231 return ftrace_modify_graph_caller(true);
232}
233
234int ftrace_disable_ftrace_graph_caller(void)
235{
236 return ftrace_modify_graph_caller(false);
237}
238#endif /* CONFIG_DYNAMIC_FTRACE */
239#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
6 *
7 * For licencing details, see COPYING.
8 *
9 * Defines low-level handling of mcount calls when the kernel
10 * is compiled with the -pg flag. When using dynamic ftrace, the
11 * mcount call-sites get patched with NOP till they are enabled.
12 * All code mutation routines here are called under stop_machine().
13 */
14
15#include <linux/ftrace.h>
16#include <linux/uaccess.h>
17#include <linux/module.h>
18#include <linux/stop_machine.h>
19
20#include <asm/cacheflush.h>
21#include <asm/opcodes.h>
22#include <asm/ftrace.h>
23#include <asm/insn.h>
24#include <asm/set_memory.h>
25
26#ifdef CONFIG_THUMB2_KERNEL
27#define NOP 0xf85deb04 /* pop.w {lr} */
28#else
29#define NOP 0xe8bd4000 /* pop {lr} */
30#endif
31
32#ifdef CONFIG_DYNAMIC_FTRACE
33
34static int __ftrace_modify_code(void *data)
35{
36 int *command = data;
37
38 set_kernel_text_rw();
39 ftrace_modify_all_code(*command);
40 set_kernel_text_ro();
41
42 return 0;
43}
44
45void arch_ftrace_update_code(int command)
46{
47 stop_machine(__ftrace_modify_code, &command, NULL);
48}
49
50static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
51{
52 return NOP;
53}
54
55static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
56{
57 return addr;
58}
59
60int ftrace_arch_code_modify_prepare(void)
61{
62 set_all_modules_text_rw();
63 return 0;
64}
65
66int ftrace_arch_code_modify_post_process(void)
67{
68 set_all_modules_text_ro();
69 /* Make sure any TLB misses during machine stop are cleared. */
70 flush_tlb_all();
71 return 0;
72}
73
74static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
75{
76 return arm_gen_branch_link(pc, addr);
77}
78
79static int ftrace_modify_code(unsigned long pc, unsigned long old,
80 unsigned long new, bool validate)
81{
82 unsigned long replaced;
83
84 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
85 old = __opcode_to_mem_thumb32(old);
86 new = __opcode_to_mem_thumb32(new);
87 } else {
88 old = __opcode_to_mem_arm(old);
89 new = __opcode_to_mem_arm(new);
90 }
91
92 if (validate) {
93 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
94 return -EFAULT;
95
96 if (replaced != old)
97 return -EINVAL;
98 }
99
100 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
101 return -EPERM;
102
103 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
104
105 return 0;
106}
107
108int ftrace_update_ftrace_func(ftrace_func_t func)
109{
110 unsigned long pc;
111 unsigned long new;
112 int ret;
113
114 pc = (unsigned long)&ftrace_call;
115 new = ftrace_call_replace(pc, (unsigned long)func);
116
117 ret = ftrace_modify_code(pc, 0, new, false);
118
119#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
120 if (!ret) {
121 pc = (unsigned long)&ftrace_regs_call;
122 new = ftrace_call_replace(pc, (unsigned long)func);
123
124 ret = ftrace_modify_code(pc, 0, new, false);
125 }
126#endif
127
128 return ret;
129}
130
131int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132{
133 unsigned long new, old;
134 unsigned long ip = rec->ip;
135
136 old = ftrace_nop_replace(rec);
137
138 new = ftrace_call_replace(ip, adjust_address(rec, addr));
139
140 return ftrace_modify_code(rec->ip, old, new, true);
141}
142
143#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
144
145int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
146 unsigned long addr)
147{
148 unsigned long new, old;
149 unsigned long ip = rec->ip;
150
151 old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
152
153 new = ftrace_call_replace(ip, adjust_address(rec, addr));
154
155 return ftrace_modify_code(rec->ip, old, new, true);
156}
157
158#endif
159
160int ftrace_make_nop(struct module *mod,
161 struct dyn_ftrace *rec, unsigned long addr)
162{
163 unsigned long ip = rec->ip;
164 unsigned long old;
165 unsigned long new;
166 int ret;
167
168 old = ftrace_call_replace(ip, adjust_address(rec, addr));
169 new = ftrace_nop_replace(rec);
170 ret = ftrace_modify_code(ip, old, new, true);
171
172 return ret;
173}
174
175int __init ftrace_dyn_arch_init(void)
176{
177 return 0;
178}
179#endif /* CONFIG_DYNAMIC_FTRACE */
180
181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
182void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
183 unsigned long frame_pointer)
184{
185 unsigned long return_hooker = (unsigned long) &return_to_handler;
186 unsigned long old;
187
188 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
189 return;
190
191 old = *parent;
192 *parent = return_hooker;
193
194 if (function_graph_enter(old, self_addr, frame_pointer, NULL))
195 *parent = old;
196}
197
198#ifdef CONFIG_DYNAMIC_FTRACE
199extern unsigned long ftrace_graph_call;
200extern unsigned long ftrace_graph_call_old;
201extern void ftrace_graph_caller_old(void);
202extern unsigned long ftrace_graph_regs_call;
203extern void ftrace_graph_regs_caller(void);
204
205static int __ftrace_modify_caller(unsigned long *callsite,
206 void (*func) (void), bool enable)
207{
208 unsigned long caller_fn = (unsigned long) func;
209 unsigned long pc = (unsigned long) callsite;
210 unsigned long branch = arm_gen_branch(pc, caller_fn);
211 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
212 unsigned long old = enable ? nop : branch;
213 unsigned long new = enable ? branch : nop;
214
215 return ftrace_modify_code(pc, old, new, true);
216}
217
218static int ftrace_modify_graph_caller(bool enable)
219{
220 int ret;
221
222 ret = __ftrace_modify_caller(&ftrace_graph_call,
223 ftrace_graph_caller,
224 enable);
225
226#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
227 if (!ret)
228 ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
229 ftrace_graph_regs_caller,
230 enable);
231#endif
232
233
234 return ret;
235}
236
237int ftrace_enable_ftrace_graph_caller(void)
238{
239 return ftrace_modify_graph_caller(true);
240}
241
242int ftrace_disable_ftrace_graph_caller(void)
243{
244 return ftrace_modify_graph_caller(false);
245}
246#endif /* CONFIG_DYNAMIC_FTRACE */
247#endif /* CONFIG_FUNCTION_GRAPH_TRACER */