Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/spinlock.h>
3#include <linux/hardirq.h>
4#include <linux/ftrace.h>
5#include <linux/percpu.h>
6#include <linux/init.h>
7#include <linux/list.h>
8#include <trace/syscall.h>
9
10#include <asm/ftrace.h>
11
12#ifdef CONFIG_DYNAMIC_FTRACE
13static const u32 ftrace_nop = 0x01000000;
14
15static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
16{
17 u32 call;
18 s32 off;
19
20 off = ((s32)addr - (s32)ip);
21 call = 0x40000000 | ((u32)off >> 2);
22
23 return call;
24}
25
26static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
27{
28 u32 replaced;
29 int faulted;
30
31 __asm__ __volatile__(
32 "1: cas [%[ip]], %[old], %[new]\n"
33 " flush %[ip]\n"
34 " mov 0, %[faulted]\n"
35 "2:\n"
36 " .section .fixup,#alloc,#execinstr\n"
37 " .align 4\n"
38 "3: sethi %%hi(2b), %[faulted]\n"
39 " jmpl %[faulted] + %%lo(2b), %%g0\n"
40 " mov 1, %[faulted]\n"
41 " .previous\n"
42 " .section __ex_table,\"a\"\n"
43 " .align 4\n"
44 " .word 1b, 3b\n"
45 " .previous\n"
46 : "=r" (replaced), [faulted] "=r" (faulted)
47 : [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
48 : "memory");
49
50 if (replaced != old && replaced != new)
51 faulted = 2;
52
53 return faulted;
54}
55
56int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
57{
58 unsigned long ip = rec->ip;
59 u32 old, new;
60
61 old = ftrace_call_replace(ip, addr);
62 new = ftrace_nop;
63 return ftrace_modify_code(ip, old, new);
64}
65
66int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
67{
68 unsigned long ip = rec->ip;
69 u32 old, new;
70
71 old = ftrace_nop;
72 new = ftrace_call_replace(ip, addr);
73 return ftrace_modify_code(ip, old, new);
74}
75
76int ftrace_update_ftrace_func(ftrace_func_t func)
77{
78 unsigned long ip = (unsigned long)(&ftrace_call);
79 u32 old, new;
80
81 old = *(u32 *) &ftrace_call;
82 new = ftrace_call_replace(ip, (unsigned long)func);
83 return ftrace_modify_code(ip, old, new);
84}
85
86int __init ftrace_dyn_arch_init(void)
87{
88 return 0;
89}
90#endif
91
92#ifdef CONFIG_FUNCTION_GRAPH_TRACER
93
94#ifdef CONFIG_DYNAMIC_FTRACE
95extern void ftrace_graph_call(void);
96
97int ftrace_enable_ftrace_graph_caller(void)
98{
99 unsigned long ip = (unsigned long)(&ftrace_graph_call);
100 u32 old, new;
101
102 old = *(u32 *) &ftrace_graph_call;
103 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
104 return ftrace_modify_code(ip, old, new);
105}
106
107int ftrace_disable_ftrace_graph_caller(void)
108{
109 unsigned long ip = (unsigned long)(&ftrace_graph_call);
110 u32 old, new;
111
112 old = *(u32 *) &ftrace_graph_call;
113 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
114
115 return ftrace_modify_code(ip, old, new);
116}
117
118#endif /* !CONFIG_DYNAMIC_FTRACE */
119
120/*
121 * Hook the return address and push it in the stack of return addrs
122 * in current thread info.
123 */
124unsigned long prepare_ftrace_return(unsigned long parent,
125 unsigned long self_addr,
126 unsigned long frame_pointer)
127{
128 unsigned long return_hooker = (unsigned long) &return_to_handler;
129 struct ftrace_graph_ent trace;
130
131 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
132 return parent + 8UL;
133
134 trace.func = self_addr;
135 trace.depth = current->curr_ret_stack + 1;
136
137 /* Only trace if the calling function expects to */
138 if (!ftrace_graph_entry(&trace))
139 return parent + 8UL;
140
141 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
142 frame_pointer, NULL) == -EBUSY)
143 return parent + 8UL;
144
145 return return_hooker;
146}
147#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1#include <linux/spinlock.h>
2#include <linux/hardirq.h>
3#include <linux/ftrace.h>
4#include <linux/percpu.h>
5#include <linux/init.h>
6#include <linux/list.h>
7#include <trace/syscall.h>
8
9#include <asm/ftrace.h>
10
11#ifdef CONFIG_DYNAMIC_FTRACE
12static const u32 ftrace_nop = 0x01000000;
13
14static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
15{
16 u32 call;
17 s32 off;
18
19 off = ((s32)addr - (s32)ip);
20 call = 0x40000000 | ((u32)off >> 2);
21
22 return call;
23}
24
25static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
26{
27 u32 replaced;
28 int faulted;
29
30 __asm__ __volatile__(
31 "1: cas [%[ip]], %[old], %[new]\n"
32 " flush %[ip]\n"
33 " mov 0, %[faulted]\n"
34 "2:\n"
35 " .section .fixup,#alloc,#execinstr\n"
36 " .align 4\n"
37 "3: sethi %%hi(2b), %[faulted]\n"
38 " jmpl %[faulted] + %%lo(2b), %%g0\n"
39 " mov 1, %[faulted]\n"
40 " .previous\n"
41 " .section __ex_table,\"a\"\n"
42 " .align 4\n"
43 " .word 1b, 3b\n"
44 " .previous\n"
45 : "=r" (replaced), [faulted] "=r" (faulted)
46 : [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
47 : "memory");
48
49 if (replaced != old && replaced != new)
50 faulted = 2;
51
52 return faulted;
53}
54
55int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
56{
57 unsigned long ip = rec->ip;
58 u32 old, new;
59
60 old = ftrace_call_replace(ip, addr);
61 new = ftrace_nop;
62 return ftrace_modify_code(ip, old, new);
63}
64
65int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
66{
67 unsigned long ip = rec->ip;
68 u32 old, new;
69
70 old = ftrace_nop;
71 new = ftrace_call_replace(ip, addr);
72 return ftrace_modify_code(ip, old, new);
73}
74
75int ftrace_update_ftrace_func(ftrace_func_t func)
76{
77 unsigned long ip = (unsigned long)(&ftrace_call);
78 u32 old, new;
79
80 old = *(u32 *) &ftrace_call;
81 new = ftrace_call_replace(ip, (unsigned long)func);
82 return ftrace_modify_code(ip, old, new);
83}
84
85int __init ftrace_dyn_arch_init(void *data)
86{
87 unsigned long *p = data;
88
89 *p = 0;
90
91 return 0;
92}
93#endif
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96
97#ifdef CONFIG_DYNAMIC_FTRACE
98extern void ftrace_graph_call(void);
99
100int ftrace_enable_ftrace_graph_caller(void)
101{
102 unsigned long ip = (unsigned long)(&ftrace_graph_call);
103 u32 old, new;
104
105 old = *(u32 *) &ftrace_graph_call;
106 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller);
107 return ftrace_modify_code(ip, old, new);
108}
109
110int ftrace_disable_ftrace_graph_caller(void)
111{
112 unsigned long ip = (unsigned long)(&ftrace_graph_call);
113 u32 old, new;
114
115 old = *(u32 *) &ftrace_graph_call;
116 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub);
117
118 return ftrace_modify_code(ip, old, new);
119}
120
121#endif /* !CONFIG_DYNAMIC_FTRACE */
122
123/*
124 * Hook the return address and push it in the stack of return addrs
125 * in current thread info.
126 */
127unsigned long prepare_ftrace_return(unsigned long parent,
128 unsigned long self_addr,
129 unsigned long frame_pointer)
130{
131 unsigned long return_hooker = (unsigned long) &return_to_handler;
132 struct ftrace_graph_ent trace;
133
134 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
135 return parent + 8UL;
136
137 if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
138 frame_pointer) == -EBUSY)
139 return parent + 8UL;
140
141 trace.func = self_addr;
142
143 /* Only trace if the calling function expects to */
144 if (!ftrace_graph_entry(&trace)) {
145 current->curr_ret_stack--;
146 return parent + 8UL;
147 }
148
149 return return_hooker;
150}
151#endif /* CONFIG_FUNCTION_GRAPH_TRACER */