Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Code for tracing calls in Linux kernel.
4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
5 *
6 * based on code for x86 which is:
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 * future possible enhancements:
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15#include <linux/uaccess.h>
16#include <linux/kprobes.h>
17#include <linux/ptrace.h>
18
19#include <asm/assembly.h>
20#include <asm/sections.h>
21#include <asm/ftrace.h>
22#include <asm/patch.h>
23
24#define __hot __attribute__ ((__section__ (".text.hot")))
25
26#ifdef CONFIG_FUNCTION_GRAPH_TRACER
27/*
28 * Hook the return address and push it in the stack of return addrs
29 * in current thread info.
30 */
31static void __hot prepare_ftrace_return(unsigned long *parent,
32 unsigned long self_addr)
33{
34 unsigned long old;
35 extern int parisc_return_to_handler;
36
37 if (unlikely(ftrace_graph_is_dead()))
38 return;
39
40 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
41 return;
42
43 old = *parent;
44
45 if (!function_graph_enter(old, self_addr, 0, NULL))
46 /* activate parisc_return_to_handler() as return point */
47 *parent = (unsigned long) &parisc_return_to_handler;
48}
49#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
50
51void notrace __hot ftrace_function_trampoline(unsigned long parent,
52 unsigned long self_addr,
53 unsigned long org_sp_gr3,
54 struct pt_regs *regs)
55{
56#ifndef CONFIG_DYNAMIC_FTRACE
57 extern ftrace_func_t ftrace_trace_function;
58#endif
59 extern struct ftrace_ops *function_trace_op;
60
61 if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
62 ftrace_trace_function != ftrace_stub)
63 ftrace_trace_function(self_addr, parent,
64 function_trace_op, regs);
65
66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
67 if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
68 ftrace_graph_entry != ftrace_graph_entry_stub) {
69 unsigned long *parent_rp;
70
71 /* calculate pointer to %rp in stack */
72 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
73 /* sanity check: parent_rp should hold parent */
74 if (*parent_rp != parent)
75 return;
76
77 prepare_ftrace_return(parent_rp, self_addr);
78 return;
79 }
80#endif
81}
82
83#ifdef CONFIG_FUNCTION_GRAPH_TRACER
84int ftrace_enable_ftrace_graph_caller(void)
85{
86 return 0;
87}
88
89int ftrace_disable_ftrace_graph_caller(void)
90{
91 return 0;
92}
93#endif
94
95#ifdef CONFIG_DYNAMIC_FTRACE
96
97int __init ftrace_dyn_arch_init(void)
98{
99 return 0;
100}
101int ftrace_update_ftrace_func(ftrace_func_t func)
102{
103 return 0;
104}
105
106int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
107 unsigned long addr)
108{
109 return 0;
110}
111
112unsigned long ftrace_call_adjust(unsigned long addr)
113{
114 return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
115}
116
117int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
118{
119 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
120 u32 *tramp;
121 int size, ret, i;
122 void *ip;
123
124#ifdef CONFIG_64BIT
125 unsigned long addr2 =
126 (unsigned long)dereference_function_descriptor((void *)addr);
127
128 u32 ftrace_trampoline[] = {
129 0x73c10208, /* std,ma r1,100(sp) */
130 0x0c2110c1, /* ldd -10(r1),r1 */
131 0xe820d002, /* bve,n (r1) */
132 addr2 >> 32,
133 addr2 & 0xffffffff,
134 0xe83f1fd7, /* b,l,n .-14,r1 */
135 };
136
137 u32 ftrace_trampoline_unaligned[] = {
138 addr2 >> 32,
139 addr2 & 0xffffffff,
140 0x37de0200, /* ldo 100(sp),sp */
141 0x73c13e01, /* std r1,-100(sp) */
142 0x34213ff9, /* ldo -4(r1),r1 */
143 0x50213fc1, /* ldd -20(r1),r1 */
144 0xe820d002, /* bve,n (r1) */
145 0xe83f1fcf, /* b,l,n .-20,r1 */
146 };
147
148 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
149 FTRACE_PATCHABLE_FUNCTION_SIZE);
150#else
151 u32 ftrace_trampoline[] = {
152 (u32)addr,
153 0x6fc10080, /* stw,ma r1,40(sp) */
154 0x48213fd1, /* ldw -18(r1),r1 */
155 0xe820c002, /* bv,n r0(r1) */
156 0xe83f1fdf, /* b,l,n .-c,r1 */
157 };
158#endif
159
160 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
161 FTRACE_PATCHABLE_FUNCTION_SIZE);
162
163 size = sizeof(ftrace_trampoline);
164 tramp = ftrace_trampoline;
165
166#ifdef CONFIG_64BIT
167 if (rec->ip & 0x4) {
168 size = sizeof(ftrace_trampoline_unaligned);
169 tramp = ftrace_trampoline_unaligned;
170 }
171#endif
172
173 ip = (void *)(rec->ip + 4 - size);
174
175 ret = probe_kernel_read(insn, ip, size);
176 if (ret)
177 return ret;
178
179 for (i = 0; i < size / 4; i++) {
180 if (insn[i] != INSN_NOP)
181 return -EINVAL;
182 }
183
184 __patch_text_multiple(ip, tramp, size);
185 return 0;
186}
187
188int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
189 unsigned long addr)
190{
191 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
192 int i;
193
194 for (i = 0; i < ARRAY_SIZE(insn); i++)
195 insn[i] = INSN_NOP;
196
197 __patch_text((void *)rec->ip, INSN_NOP);
198 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
199 insn, sizeof(insn)-4);
200 return 0;
201}
202#endif
203
204#ifdef CONFIG_KPROBES_ON_FTRACE
205void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
206 struct ftrace_ops *ops, struct pt_regs *regs)
207{
208 struct kprobe_ctlblk *kcb;
209 struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
210
211 if (unlikely(!p) || kprobe_disabled(p))
212 return;
213
214 if (kprobe_running()) {
215 kprobes_inc_nmissed_count(p);
216 return;
217 }
218
219 __this_cpu_write(current_kprobe, p);
220
221 kcb = get_kprobe_ctlblk();
222 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
223
224 regs->iaoq[0] = ip;
225 regs->iaoq[1] = ip + 4;
226
227 if (!p->pre_handler || !p->pre_handler(p, regs)) {
228 regs->iaoq[0] = ip + 4;
229 regs->iaoq[1] = ip + 8;
230
231 if (unlikely(p->post_handler)) {
232 kcb->kprobe_status = KPROBE_HIT_SSDONE;
233 p->post_handler(p, regs, 0);
234 }
235 }
236 __this_cpu_write(current_kprobe, NULL);
237}
238NOKPROBE_SYMBOL(kprobe_ftrace_handler);
239
240int arch_prepare_kprobe_ftrace(struct kprobe *p)
241{
242 p->ainsn.insn = NULL;
243 return 0;
244}
245#endif
1/*
2 * Code for tracing calls in Linux kernel.
3 * Copyright (C) 2009 Helge Deller <deller@gmx.de>
4 *
5 * based on code for x86 which is:
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 *
8 * future possible enhancements:
9 * - add CONFIG_DYNAMIC_FTRACE
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15
16#include <asm/sections.h>
17#include <asm/ftrace.h>
18
19
20
21#ifdef CONFIG_FUNCTION_GRAPH_TRACER
22
23/* Add a function return address to the trace stack on thread info.*/
24static int push_return_trace(unsigned long ret, unsigned long long time,
25 unsigned long func, int *depth)
26{
27 int index;
28
29 if (!current->ret_stack)
30 return -EBUSY;
31
32 /* The return trace stack is full */
33 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
34 atomic_inc(¤t->trace_overrun);
35 return -EBUSY;
36 }
37
38 index = ++current->curr_ret_stack;
39 barrier();
40 current->ret_stack[index].ret = ret;
41 current->ret_stack[index].func = func;
42 current->ret_stack[index].calltime = time;
43 *depth = index;
44
45 return 0;
46}
47
48/* Retrieve a function return address to the trace stack on thread info.*/
49static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
50{
51 int index;
52
53 index = current->curr_ret_stack;
54
55 if (unlikely(index < 0)) {
56 ftrace_graph_stop();
57 WARN_ON(1);
58 /* Might as well panic, otherwise we have no where to go */
59 *ret = (unsigned long)
60 dereference_function_descriptor(&panic);
61 return;
62 }
63
64 *ret = current->ret_stack[index].ret;
65 trace->func = current->ret_stack[index].func;
66 trace->calltime = current->ret_stack[index].calltime;
67 trace->overrun = atomic_read(¤t->trace_overrun);
68 trace->depth = index;
69 barrier();
70 current->curr_ret_stack--;
71
72}
73
74/*
75 * Send the trace to the ring-buffer.
76 * @return the original return address.
77 */
78unsigned long ftrace_return_to_handler(unsigned long retval0,
79 unsigned long retval1)
80{
81 struct ftrace_graph_ret trace;
82 unsigned long ret;
83
84 pop_return_trace(&trace, &ret);
85 trace.rettime = local_clock();
86 ftrace_graph_return(&trace);
87
88 if (unlikely(!ret)) {
89 ftrace_graph_stop();
90 WARN_ON(1);
91 /* Might as well panic. What else to do? */
92 ret = (unsigned long)
93 dereference_function_descriptor(&panic);
94 }
95
96 /* HACK: we hand over the old functions' return values
97 in %r23 and %r24. Assembly in entry.S will take care
98 and move those to their final registers %ret0 and %ret1 */
99 asm( "copy %0, %%r23 \n\t"
100 "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
101
102 return ret;
103}
104
105/*
106 * Hook the return address and push it in the stack of return addrs
107 * in current thread info.
108 */
109void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
110{
111 unsigned long old;
112 unsigned long long calltime;
113 struct ftrace_graph_ent trace;
114
115 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
116 return;
117
118 old = *parent;
119 *parent = (unsigned long)
120 dereference_function_descriptor(&return_to_handler);
121
122 if (unlikely(!__kernel_text_address(old))) {
123 ftrace_graph_stop();
124 *parent = old;
125 WARN_ON(1);
126 return;
127 }
128
129 calltime = local_clock();
130
131 if (push_return_trace(old, calltime,
132 self_addr, &trace.depth) == -EBUSY) {
133 *parent = old;
134 return;
135 }
136
137 trace.func = self_addr;
138
139 /* Only trace if the calling function expects to */
140 if (!ftrace_graph_entry(&trace)) {
141 current->curr_ret_stack--;
142 *parent = old;
143 }
144}
145
146#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
147
148
149void ftrace_function_trampoline(unsigned long parent,
150 unsigned long self_addr,
151 unsigned long org_sp_gr3)
152{
153 extern ftrace_func_t ftrace_trace_function;
154
155 if (function_trace_stop)
156 return;
157
158 if (ftrace_trace_function != ftrace_stub) {
159 ftrace_trace_function(parent, self_addr);
160 return;
161 }
162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
163 if (ftrace_graph_entry && ftrace_graph_return) {
164 unsigned long sp;
165 unsigned long *parent_rp;
166
167 asm volatile ("copy %%r30, %0" : "=r"(sp));
168 /* sanity check: is stack pointer which we got from
169 assembler function in entry.S in a reasonable
170 range compared to current stack pointer? */
171 if ((sp - org_sp_gr3) > 0x400)
172 return;
173
174 /* calculate pointer to %rp in stack */
175 parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
176 /* sanity check: parent_rp should hold parent */
177 if (*parent_rp != parent)
178 return;
179
180 prepare_ftrace_return(parent_rp, self_addr);
181 return;
182 }
183#endif
184}
185