Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Code for tracing calls in Linux kernel.
4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
5 *
6 * based on code for x86 which is:
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 * future possible enhancements:
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15#include <linux/uaccess.h>
16#include <linux/kprobes.h>
17#include <linux/ptrace.h>
18#include <linux/jump_label.h>
19
20#include <asm/assembly.h>
21#include <asm/sections.h>
22#include <asm/ftrace.h>
23#include <asm/text-patching.h>
24
25#define __hot __section(".text.hot")
26
27#ifdef CONFIG_FUNCTION_GRAPH_TRACER
28static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
29
30/*
31 * Hook the return address and push it in the stack of return addrs
32 * in current thread info.
33 */
34static void __hot prepare_ftrace_return(unsigned long *parent,
35 unsigned long self_addr)
36{
37 unsigned long old;
38 extern int parisc_return_to_handler;
39
40 if (unlikely(ftrace_graph_is_dead()))
41 return;
42
43 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
44 return;
45
46 old = *parent;
47
48 if (!function_graph_enter(old, self_addr, 0, NULL))
49 /* activate parisc_return_to_handler() as return point */
50 *parent = (unsigned long) &parisc_return_to_handler;
51}
52#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
53
54static ftrace_func_t ftrace_func;
55
56asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
57 unsigned long self_addr,
58 unsigned long org_sp_gr3,
59 struct ftrace_regs *fregs)
60{
61 extern struct ftrace_ops *function_trace_op;
62
63 ftrace_func(self_addr, parent, function_trace_op, fregs);
64
65#ifdef CONFIG_FUNCTION_GRAPH_TRACER
66 if (static_branch_unlikely(&ftrace_graph_enable)) {
67 unsigned long *parent_rp;
68
69 /* calculate pointer to %rp in stack */
70 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
71 /* sanity check: parent_rp should hold parent */
72 if (*parent_rp != parent)
73 return;
74
75 prepare_ftrace_return(parent_rp, self_addr);
76 return;
77 }
78#endif
79}
80
81#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
82int ftrace_enable_ftrace_graph_caller(void)
83{
84 static_key_enable(&ftrace_graph_enable.key);
85 return 0;
86}
87
88int ftrace_disable_ftrace_graph_caller(void)
89{
90 static_key_disable(&ftrace_graph_enable.key);
91 return 0;
92}
93#endif
94
95#ifdef CONFIG_DYNAMIC_FTRACE
96int ftrace_update_ftrace_func(ftrace_func_t func)
97{
98 ftrace_func = func;
99 return 0;
100}
101
102int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
103 unsigned long addr)
104{
105 return 0;
106}
107
108unsigned long ftrace_call_adjust(unsigned long addr)
109{
110 return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
111}
112
113int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
114{
115 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
116 u32 *tramp;
117 int size, ret, i;
118 void *ip;
119
120#ifdef CONFIG_64BIT
121 unsigned long addr2 =
122 (unsigned long)dereference_function_descriptor((void *)addr);
123
124 u32 ftrace_trampoline[] = {
125 0x73c10208, /* std,ma r1,100(sp) */
126 0x0c2110c1, /* ldd -10(r1),r1 */
127 0xe820d002, /* bve,n (r1) */
128 addr2 >> 32,
129 addr2 & 0xffffffff,
130 0xe83f1fd7, /* b,l,n .-14,r1 */
131 };
132
133 u32 ftrace_trampoline_unaligned[] = {
134 addr2 >> 32,
135 addr2 & 0xffffffff,
136 0x37de0200, /* ldo 100(sp),sp */
137 0x73c13e01, /* std r1,-100(sp) */
138 0x34213ff9, /* ldo -4(r1),r1 */
139 0x50213fc1, /* ldd -20(r1),r1 */
140 0xe820d002, /* bve,n (r1) */
141 0xe83f1fcf, /* b,l,n .-20,r1 */
142 };
143
144 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
145 FTRACE_PATCHABLE_FUNCTION_SIZE);
146#else
147 u32 ftrace_trampoline[] = {
148 (u32)addr,
149 0x6fc10080, /* stw,ma r1,40(sp) */
150 0x48213fd1, /* ldw -18(r1),r1 */
151 0xe820c002, /* bv,n r0(r1) */
152 0xe83f1fdf, /* b,l,n .-c,r1 */
153 };
154#endif
155
156 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
157 FTRACE_PATCHABLE_FUNCTION_SIZE);
158
159 size = sizeof(ftrace_trampoline);
160 tramp = ftrace_trampoline;
161
162#ifdef CONFIG_64BIT
163 if (rec->ip & 0x4) {
164 size = sizeof(ftrace_trampoline_unaligned);
165 tramp = ftrace_trampoline_unaligned;
166 }
167#endif
168
169 ip = (void *)(rec->ip + 4 - size);
170
171 ret = copy_from_kernel_nofault(insn, ip, size);
172 if (ret)
173 return ret;
174
175 for (i = 0; i < size / 4; i++) {
176 if (insn[i] != INSN_NOP)
177 return -EINVAL;
178 }
179
180 __patch_text_multiple(ip, tramp, size);
181 return 0;
182}
183
184int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
185 unsigned long addr)
186{
187 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
188 int i;
189
190 for (i = 0; i < ARRAY_SIZE(insn); i++)
191 insn[i] = INSN_NOP;
192
193 __patch_text((void *)rec->ip, INSN_NOP);
194 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
195 insn, sizeof(insn)-4);
196 return 0;
197}
198#endif
199
200#ifdef CONFIG_KPROBES_ON_FTRACE
201void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
202 struct ftrace_ops *ops, struct ftrace_regs *fregs)
203{
204 struct kprobe_ctlblk *kcb;
205 struct pt_regs *regs;
206 struct kprobe *p;
207 int bit;
208
209 if (unlikely(kprobe_ftrace_disabled))
210 return;
211
212 bit = ftrace_test_recursion_trylock(ip, parent_ip);
213 if (bit < 0)
214 return;
215
216 regs = ftrace_get_regs(fregs);
217 p = get_kprobe((kprobe_opcode_t *)ip);
218 if (unlikely(!p) || kprobe_disabled(p))
219 goto out;
220
221 if (kprobe_running()) {
222 kprobes_inc_nmissed_count(p);
223 goto out;
224 }
225
226 __this_cpu_write(current_kprobe, p);
227
228 kcb = get_kprobe_ctlblk();
229 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
230
231 regs->iaoq[0] = ip;
232 regs->iaoq[1] = ip + 4;
233
234 if (!p->pre_handler || !p->pre_handler(p, regs)) {
235 regs->iaoq[0] = ip + 4;
236 regs->iaoq[1] = ip + 8;
237
238 if (unlikely(p->post_handler)) {
239 kcb->kprobe_status = KPROBE_HIT_SSDONE;
240 p->post_handler(p, regs, 0);
241 }
242 }
243 __this_cpu_write(current_kprobe, NULL);
244out:
245 ftrace_test_recursion_unlock(bit);
246}
247NOKPROBE_SYMBOL(kprobe_ftrace_handler);
248
249int arch_prepare_kprobe_ftrace(struct kprobe *p)
250{
251 p->ainsn.insn = NULL;
252 return 0;
253}
254#endif
1/*
2 * Code for tracing calls in Linux kernel.
3 * Copyright (C) 2009 Helge Deller <deller@gmx.de>
4 *
5 * based on code for x86 which is:
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 *
8 * future possible enhancements:
9 * - add CONFIG_DYNAMIC_FTRACE
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15
16#include <asm/sections.h>
17#include <asm/ftrace.h>
18
19
20
21#ifdef CONFIG_FUNCTION_GRAPH_TRACER
22
23/* Add a function return address to the trace stack on thread info.*/
24static int push_return_trace(unsigned long ret, unsigned long long time,
25 unsigned long func, int *depth)
26{
27 int index;
28
29 if (!current->ret_stack)
30 return -EBUSY;
31
32 /* The return trace stack is full */
33 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
34 atomic_inc(¤t->trace_overrun);
35 return -EBUSY;
36 }
37
38 index = ++current->curr_ret_stack;
39 barrier();
40 current->ret_stack[index].ret = ret;
41 current->ret_stack[index].func = func;
42 current->ret_stack[index].calltime = time;
43 *depth = index;
44
45 return 0;
46}
47
48/* Retrieve a function return address to the trace stack on thread info.*/
49static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
50{
51 int index;
52
53 index = current->curr_ret_stack;
54
55 if (unlikely(index < 0)) {
56 ftrace_graph_stop();
57 WARN_ON(1);
58 /* Might as well panic, otherwise we have no where to go */
59 *ret = (unsigned long)
60 dereference_function_descriptor(&panic);
61 return;
62 }
63
64 *ret = current->ret_stack[index].ret;
65 trace->func = current->ret_stack[index].func;
66 trace->calltime = current->ret_stack[index].calltime;
67 trace->overrun = atomic_read(¤t->trace_overrun);
68 trace->depth = index;
69 barrier();
70 current->curr_ret_stack--;
71
72}
73
74/*
75 * Send the trace to the ring-buffer.
76 * @return the original return address.
77 */
78unsigned long ftrace_return_to_handler(unsigned long retval0,
79 unsigned long retval1)
80{
81 struct ftrace_graph_ret trace;
82 unsigned long ret;
83
84 pop_return_trace(&trace, &ret);
85 trace.rettime = local_clock();
86 ftrace_graph_return(&trace);
87
88 if (unlikely(!ret)) {
89 ftrace_graph_stop();
90 WARN_ON(1);
91 /* Might as well panic. What else to do? */
92 ret = (unsigned long)
93 dereference_function_descriptor(&panic);
94 }
95
96 /* HACK: we hand over the old functions' return values
97 in %r23 and %r24. Assembly in entry.S will take care
98 and move those to their final registers %ret0 and %ret1 */
99 asm( "copy %0, %%r23 \n\t"
100 "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
101
102 return ret;
103}
104
105/*
106 * Hook the return address and push it in the stack of return addrs
107 * in current thread info.
108 */
109void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
110{
111 unsigned long old;
112 unsigned long long calltime;
113 struct ftrace_graph_ent trace;
114
115 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
116 return;
117
118 old = *parent;
119 *parent = (unsigned long)
120 dereference_function_descriptor(&return_to_handler);
121
122 if (unlikely(!__kernel_text_address(old))) {
123 ftrace_graph_stop();
124 *parent = old;
125 WARN_ON(1);
126 return;
127 }
128
129 calltime = local_clock();
130
131 if (push_return_trace(old, calltime,
132 self_addr, &trace.depth) == -EBUSY) {
133 *parent = old;
134 return;
135 }
136
137 trace.func = self_addr;
138
139 /* Only trace if the calling function expects to */
140 if (!ftrace_graph_entry(&trace)) {
141 current->curr_ret_stack--;
142 *parent = old;
143 }
144}
145
146#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
147
148
149void ftrace_function_trampoline(unsigned long parent,
150 unsigned long self_addr,
151 unsigned long org_sp_gr3)
152{
153 extern ftrace_func_t ftrace_trace_function;
154
155 if (function_trace_stop)
156 return;
157
158 if (ftrace_trace_function != ftrace_stub) {
159 ftrace_trace_function(parent, self_addr);
160 return;
161 }
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
163 if (ftrace_graph_entry && ftrace_graph_return) {
164 unsigned long sp;
165 unsigned long *parent_rp;
166
167 asm volatile ("copy %%r30, %0" : "=r"(sp));
168 /* sanity check: is stack pointer which we got from
169 assembler function in entry.S in a reasonable
170 range compared to current stack pointer? */
171 if ((sp - org_sp_gr3) > 0x400)
172 return;
173
174 /* calculate pointer to %rp in stack */
175 parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
176 /* sanity check: parent_rp should hold parent */
177 if (*parent_rp != parent)
178 return;
179
180 prepare_ftrace_return(parent_rp, self_addr);
181 return;
182 }
183#endif
184}
185