Loading...
1/*
2 * Code for tracing calls in Linux kernel.
3 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
4 *
5 * based on code for x86 which is:
6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 *
8 * future possible enhancements:
9 * - add CONFIG_DYNAMIC_FTRACE
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15
16#include <asm/assembly.h>
17#include <asm/sections.h>
18#include <asm/ftrace.h>
19
20
21#ifdef CONFIG_FUNCTION_GRAPH_TRACER
22/*
23 * Hook the return address and push it in the stack of return addrs
24 * in current thread info.
25 */
26static void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
27{
28 unsigned long old;
29 struct ftrace_graph_ent trace;
30 extern int parisc_return_to_handler;
31
32 if (unlikely(ftrace_graph_is_dead()))
33 return;
34
35 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
36 return;
37
38 old = *parent;
39
40 trace.func = self_addr;
41 trace.depth = current->curr_ret_stack + 1;
42
43 /* Only trace if the calling function expects to */
44 if (!ftrace_graph_entry(&trace))
45 return;
46
47 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
48 0 ) == -EBUSY)
49 return;
50
51 /* activate parisc_return_to_handler() as return point */
52 *parent = (unsigned long) &parisc_return_to_handler;
53}
54#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
55
56void notrace ftrace_function_trampoline(unsigned long parent,
57 unsigned long self_addr,
58 unsigned long org_sp_gr3)
59{
60 extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
61 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
62
63 if (ftrace_trace_function != ftrace_stub) {
64 /* struct ftrace_ops *op, struct pt_regs *regs); */
65 ftrace_trace_function(parent, self_addr, NULL, NULL);
66 return;
67 }
68
69#ifdef CONFIG_FUNCTION_GRAPH_TRACER
70 if (ftrace_graph_return != (trace_func_graph_ret_t) ftrace_stub ||
71 ftrace_graph_entry != ftrace_graph_entry_stub) {
72 unsigned long *parent_rp;
73
74 /* calculate pointer to %rp in stack */
75 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
76 /* sanity check: parent_rp should hold parent */
77 if (*parent_rp != parent)
78 return;
79
80 prepare_ftrace_return(parent_rp, self_addr);
81 return;
82 }
83#endif
84}
85
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Code for tracing calls in Linux kernel.
4 * Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
5 *
6 * based on code for x86 which is:
7 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8 *
9 * future possible enhancements:
10 * - add CONFIG_STACK_TRACER
11 */
12
13#include <linux/init.h>
14#include <linux/ftrace.h>
15#include <linux/uaccess.h>
16#include <linux/kprobes.h>
17#include <linux/ptrace.h>
18
19#include <asm/assembly.h>
20#include <asm/sections.h>
21#include <asm/ftrace.h>
22#include <asm/patch.h>
23
24#define __hot __section(".text.hot")
25
26#ifdef CONFIG_FUNCTION_GRAPH_TRACER
27/*
28 * Hook the return address and push it in the stack of return addrs
29 * in current thread info.
30 */
31static void __hot prepare_ftrace_return(unsigned long *parent,
32 unsigned long self_addr)
33{
34 unsigned long old;
35 extern int parisc_return_to_handler;
36
37 if (unlikely(ftrace_graph_is_dead()))
38 return;
39
40 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
41 return;
42
43 old = *parent;
44
45 if (!function_graph_enter(old, self_addr, 0, NULL))
46 /* activate parisc_return_to_handler() as return point */
47 *parent = (unsigned long) &parisc_return_to_handler;
48}
49#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
50
51void notrace __hot ftrace_function_trampoline(unsigned long parent,
52 unsigned long self_addr,
53 unsigned long org_sp_gr3,
54 struct ftrace_regs *fregs)
55{
56#ifndef CONFIG_DYNAMIC_FTRACE
57 extern ftrace_func_t ftrace_trace_function;
58#endif
59 extern struct ftrace_ops *function_trace_op;
60
61 if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
62 ftrace_trace_function != ftrace_stub)
63 ftrace_trace_function(self_addr, parent,
64 function_trace_op, fregs);
65
66#ifdef CONFIG_FUNCTION_GRAPH_TRACER
67 if (dereference_function_descriptor(ftrace_graph_return) !=
68 dereference_function_descriptor(ftrace_stub) ||
69 ftrace_graph_entry != ftrace_graph_entry_stub) {
70 unsigned long *parent_rp;
71
72 /* calculate pointer to %rp in stack */
73 parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
74 /* sanity check: parent_rp should hold parent */
75 if (*parent_rp != parent)
76 return;
77
78 prepare_ftrace_return(parent_rp, self_addr);
79 return;
80 }
81#endif
82}
83
84#ifdef CONFIG_FUNCTION_GRAPH_TRACER
85int ftrace_enable_ftrace_graph_caller(void)
86{
87 return 0;
88}
89
90int ftrace_disable_ftrace_graph_caller(void)
91{
92 return 0;
93}
94#endif
95
96#ifdef CONFIG_DYNAMIC_FTRACE
97
98int __init ftrace_dyn_arch_init(void)
99{
100 return 0;
101}
102int ftrace_update_ftrace_func(ftrace_func_t func)
103{
104 return 0;
105}
106
107int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
108 unsigned long addr)
109{
110 return 0;
111}
112
113unsigned long ftrace_call_adjust(unsigned long addr)
114{
115 return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
116}
117
118int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119{
120 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
121 u32 *tramp;
122 int size, ret, i;
123 void *ip;
124
125#ifdef CONFIG_64BIT
126 unsigned long addr2 =
127 (unsigned long)dereference_function_descriptor((void *)addr);
128
129 u32 ftrace_trampoline[] = {
130 0x73c10208, /* std,ma r1,100(sp) */
131 0x0c2110c1, /* ldd -10(r1),r1 */
132 0xe820d002, /* bve,n (r1) */
133 addr2 >> 32,
134 addr2 & 0xffffffff,
135 0xe83f1fd7, /* b,l,n .-14,r1 */
136 };
137
138 u32 ftrace_trampoline_unaligned[] = {
139 addr2 >> 32,
140 addr2 & 0xffffffff,
141 0x37de0200, /* ldo 100(sp),sp */
142 0x73c13e01, /* std r1,-100(sp) */
143 0x34213ff9, /* ldo -4(r1),r1 */
144 0x50213fc1, /* ldd -20(r1),r1 */
145 0xe820d002, /* bve,n (r1) */
146 0xe83f1fcf, /* b,l,n .-20,r1 */
147 };
148
149 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
150 FTRACE_PATCHABLE_FUNCTION_SIZE);
151#else
152 u32 ftrace_trampoline[] = {
153 (u32)addr,
154 0x6fc10080, /* stw,ma r1,40(sp) */
155 0x48213fd1, /* ldw -18(r1),r1 */
156 0xe820c002, /* bv,n r0(r1) */
157 0xe83f1fdf, /* b,l,n .-c,r1 */
158 };
159#endif
160
161 BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
162 FTRACE_PATCHABLE_FUNCTION_SIZE);
163
164 size = sizeof(ftrace_trampoline);
165 tramp = ftrace_trampoline;
166
167#ifdef CONFIG_64BIT
168 if (rec->ip & 0x4) {
169 size = sizeof(ftrace_trampoline_unaligned);
170 tramp = ftrace_trampoline_unaligned;
171 }
172#endif
173
174 ip = (void *)(rec->ip + 4 - size);
175
176 ret = copy_from_kernel_nofault(insn, ip, size);
177 if (ret)
178 return ret;
179
180 for (i = 0; i < size / 4; i++) {
181 if (insn[i] != INSN_NOP)
182 return -EINVAL;
183 }
184
185 __patch_text_multiple(ip, tramp, size);
186 return 0;
187}
188
189int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
190 unsigned long addr)
191{
192 u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
193 int i;
194
195 for (i = 0; i < ARRAY_SIZE(insn); i++)
196 insn[i] = INSN_NOP;
197
198 __patch_text((void *)rec->ip, INSN_NOP);
199 __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
200 insn, sizeof(insn)-4);
201 return 0;
202}
203#endif
204
205#ifdef CONFIG_KPROBES_ON_FTRACE
206void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
207 struct ftrace_ops *ops, struct ftrace_regs *fregs)
208{
209 struct kprobe_ctlblk *kcb;
210 struct pt_regs *regs;
211 struct kprobe *p;
212 int bit;
213
214 bit = ftrace_test_recursion_trylock(ip, parent_ip);
215 if (bit < 0)
216 return;
217
218 regs = ftrace_get_regs(fregs);
219 preempt_disable_notrace();
220 p = get_kprobe((kprobe_opcode_t *)ip);
221 if (unlikely(!p) || kprobe_disabled(p))
222 goto out;
223
224 if (kprobe_running()) {
225 kprobes_inc_nmissed_count(p);
226 goto out;
227 }
228
229 __this_cpu_write(current_kprobe, p);
230
231 kcb = get_kprobe_ctlblk();
232 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
233
234 regs->iaoq[0] = ip;
235 regs->iaoq[1] = ip + 4;
236
237 if (!p->pre_handler || !p->pre_handler(p, regs)) {
238 regs->iaoq[0] = ip + 4;
239 regs->iaoq[1] = ip + 8;
240
241 if (unlikely(p->post_handler)) {
242 kcb->kprobe_status = KPROBE_HIT_SSDONE;
243 p->post_handler(p, regs, 0);
244 }
245 }
246 __this_cpu_write(current_kprobe, NULL);
247out:
248 preempt_enable_notrace();
249 ftrace_test_recursion_unlock(bit);
250}
251NOKPROBE_SYMBOL(kprobe_ftrace_handler);
252
253int arch_prepare_kprobe_ftrace(struct kprobe *p)
254{
255 p->ainsn.insn = NULL;
256 return 0;
257}
258#endif