Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracer architecture backend.
4 *
5 * Copyright IBM Corp. 2009,2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/moduleloader.h>
11#include <linux/hardirq.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/kprobes.h>
17#include <trace/syscall.h>
18#include <asm/asm-offsets.h>
19#include <asm/text-patching.h>
20#include <asm/cacheflush.h>
21#include <asm/ftrace.lds.h>
22#include <asm/nospec-branch.h>
23#include <asm/set_memory.h>
24#include "entry.h"
25#include "ftrace.h"
26
27/*
28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
30 * (since gcc 9 / clang 10) is used.
31 * In both cases the original and also the disabled function prologue contains
32 * only a single six byte instruction and looks like this:
33 * > brcl 0,0 # offset 0
34 * To enable ftrace the code gets patched like above and afterwards looks
35 * like this:
36 * > brasl %r0,ftrace_caller # offset 0
37 *
38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
39 * The ftrace function gets called with a non-standard C function call ABI
40 * where r0 contains the return address. It is also expected that the called
41 * function only clobbers r0 and r1, but restores r2-r15.
42 * For module code we can't directly jump to ftrace caller, but need a
43 * trampoline (ftrace_plt), which clobbers also r1.
44 */
45
46void *ftrace_func __read_mostly = ftrace_stub;
47struct ftrace_insn {
48 u16 opc;
49 s32 disp;
50} __packed;
51
52asm(
53 " .align 16\n"
54 "ftrace_shared_hotpatch_trampoline_br:\n"
55 " lmg %r0,%r1,2(%r1)\n"
56 " br %r1\n"
57 "ftrace_shared_hotpatch_trampoline_br_end:\n"
58);
59
60#ifdef CONFIG_EXPOLINE
61asm(
62 " .align 16\n"
63 "ftrace_shared_hotpatch_trampoline_exrl:\n"
64 " lmg %r0,%r1,2(%r1)\n"
65 " exrl %r0,0f\n"
66 " j .\n"
67 "0: br %r1\n"
68 "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
69);
70#endif /* CONFIG_EXPOLINE */
71
72#ifdef CONFIG_MODULES
73static char *ftrace_plt;
74#endif /* CONFIG_MODULES */
75
76static const char *ftrace_shared_hotpatch_trampoline(const char **end)
77{
78 const char *tstart, *tend;
79
80 tstart = ftrace_shared_hotpatch_trampoline_br;
81 tend = ftrace_shared_hotpatch_trampoline_br_end;
82#ifdef CONFIG_EXPOLINE
83 if (!nospec_disable) {
84 tstart = ftrace_shared_hotpatch_trampoline_exrl;
85 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
86 }
87#endif /* CONFIG_EXPOLINE */
88 if (end)
89 *end = tend;
90 return tstart;
91}
92
93bool ftrace_need_init_nop(void)
94{
95 return true;
96}
97
98int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
99{
100 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
101 __ftrace_hotpatch_trampolines_start;
102 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
103 static struct ftrace_hotpatch_trampoline *trampoline;
104 struct ftrace_hotpatch_trampoline **next_trampoline;
105 struct ftrace_hotpatch_trampoline *trampolines_end;
106 struct ftrace_hotpatch_trampoline tmp;
107 struct ftrace_insn *insn;
108 const char *shared;
109 s32 disp;
110
111 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
112 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
113
114 next_trampoline = &next_vmlinux_trampoline;
115 trampolines_end = __ftrace_hotpatch_trampolines_end;
116 shared = ftrace_shared_hotpatch_trampoline(NULL);
117#ifdef CONFIG_MODULES
118 if (mod) {
119 next_trampoline = &mod->arch.next_trampoline;
120 trampolines_end = mod->arch.trampolines_end;
121 shared = ftrace_plt;
122 }
123#endif
124
125 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
126 return -ENOMEM;
127 trampoline = (*next_trampoline)++;
128
129 /* Check for the compiler-generated fentry nop (brcl 0, .). */
130 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
131 return -EINVAL;
132
133 /* Generate the trampoline. */
134 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
135 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
136 tmp.interceptor = FTRACE_ADDR;
137 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
138 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
139
140 /* Generate a jump to the trampoline. */
141 disp = ((char *)trampoline - (char *)rec->ip) / 2;
142 insn = (struct ftrace_insn *)rec->ip;
143 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
144
145 return 0;
146}
147
148static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
149{
150 struct ftrace_hotpatch_trampoline *trampoline;
151 struct ftrace_insn insn;
152 s64 disp;
153 u16 opc;
154
155 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
156 return ERR_PTR(-EFAULT);
157 disp = (s64)insn.disp * 2;
158 trampoline = (void *)(rec->ip + disp);
159 if (get_kernel_nofault(opc, &trampoline->brasl_opc))
160 return ERR_PTR(-EFAULT);
161 if (opc != 0xc015)
162 return ERR_PTR(-EINVAL);
163 return trampoline;
164}
165
166int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
167 unsigned long addr)
168{
169 struct ftrace_hotpatch_trampoline *trampoline;
170 u64 old;
171
172 trampoline = ftrace_get_trampoline(rec);
173 if (IS_ERR(trampoline))
174 return PTR_ERR(trampoline);
175 if (get_kernel_nofault(old, &trampoline->interceptor))
176 return -EFAULT;
177 if (old != old_addr)
178 return -EINVAL;
179 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
180 return 0;
181}
182
183static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
184{
185 u16 old;
186 u8 op;
187
188 if (get_kernel_nofault(old, addr))
189 return -EFAULT;
190 if (old != expected)
191 return -EINVAL;
192 /* set mask field to all ones or zeroes */
193 op = enable ? 0xf4 : 0x04;
194 s390_kernel_write((char *)addr + 1, &op, sizeof(op));
195 return 0;
196}
197
198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
199 unsigned long addr)
200{
201 /* Expect brcl 0xf,... */
202 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
203}
204
205int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
206{
207 struct ftrace_hotpatch_trampoline *trampoline;
208
209 trampoline = ftrace_get_trampoline(rec);
210 if (IS_ERR(trampoline))
211 return PTR_ERR(trampoline);
212 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
213 /* Expect brcl 0x0,... */
214 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
215}
216
217int ftrace_update_ftrace_func(ftrace_func_t func)
218{
219 ftrace_func = func;
220 return 0;
221}
222
223void arch_ftrace_update_code(int command)
224{
225 ftrace_modify_all_code(command);
226}
227
228void ftrace_arch_code_modify_post_process(void)
229{
230 /*
231 * Flush any pre-fetched instructions on all
232 * CPUs to make the new code visible.
233 */
234 text_poke_sync_lock();
235}
236
237#ifdef CONFIG_MODULES
238
239static int __init ftrace_plt_init(void)
240{
241 const char *start, *end;
242
243 ftrace_plt = module_alloc(PAGE_SIZE);
244 if (!ftrace_plt)
245 panic("cannot allocate ftrace plt\n");
246
247 start = ftrace_shared_hotpatch_trampoline(&end);
248 memcpy(ftrace_plt, start, end - start);
249 set_memory_ro((unsigned long)ftrace_plt, 1);
250 return 0;
251}
252device_initcall(ftrace_plt_init);
253
254#endif /* CONFIG_MODULES */
255
256#ifdef CONFIG_FUNCTION_GRAPH_TRACER
257/*
258 * Hook the return address and push it in the stack of return addresses
259 * in current thread info.
260 */
261unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
262 unsigned long ip)
263{
264 if (unlikely(ftrace_graph_is_dead()))
265 goto out;
266 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
267 goto out;
268 ip -= MCOUNT_INSN_SIZE;
269 if (!function_graph_enter(ra, ip, 0, (void *) sp))
270 ra = (unsigned long) return_to_handler;
271out:
272 return ra;
273}
274NOKPROBE_SYMBOL(prepare_ftrace_return);
275
276/*
277 * Patch the kernel code at ftrace_graph_caller location. The instruction
278 * there is branch relative on condition. To enable the ftrace graph code
279 * block, we simply patch the mask field of the instruction to zero and
280 * turn the instruction into a nop.
281 * To disable the ftrace graph code the mask field will be patched to
282 * all ones, which turns the instruction into an unconditional branch.
283 */
284int ftrace_enable_ftrace_graph_caller(void)
285{
286 int rc;
287
288 /* Expect brc 0xf,... */
289 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
290 if (rc)
291 return rc;
292 text_poke_sync_lock();
293 return 0;
294}
295
296int ftrace_disable_ftrace_graph_caller(void)
297{
298 int rc;
299
300 /* Expect brc 0x0,... */
301 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
302 if (rc)
303 return rc;
304 text_poke_sync_lock();
305 return 0;
306}
307
308#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
309
310#ifdef CONFIG_KPROBES_ON_FTRACE
311void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
312 struct ftrace_ops *ops, struct ftrace_regs *fregs)
313{
314 struct kprobe_ctlblk *kcb;
315 struct pt_regs *regs;
316 struct kprobe *p;
317 int bit;
318
319 bit = ftrace_test_recursion_trylock(ip, parent_ip);
320 if (bit < 0)
321 return;
322
323 regs = ftrace_get_regs(fregs);
324 p = get_kprobe((kprobe_opcode_t *)ip);
325 if (!regs || unlikely(!p) || kprobe_disabled(p))
326 goto out;
327
328 if (kprobe_running()) {
329 kprobes_inc_nmissed_count(p);
330 goto out;
331 }
332
333 __this_cpu_write(current_kprobe, p);
334
335 kcb = get_kprobe_ctlblk();
336 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
337
338 instruction_pointer_set(regs, ip);
339
340 if (!p->pre_handler || !p->pre_handler(p, regs)) {
341
342 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
343
344 if (unlikely(p->post_handler)) {
345 kcb->kprobe_status = KPROBE_HIT_SSDONE;
346 p->post_handler(p, regs, 0);
347 }
348 }
349 __this_cpu_write(current_kprobe, NULL);
350out:
351 ftrace_test_recursion_unlock(bit);
352}
353NOKPROBE_SYMBOL(kprobe_ftrace_handler);
354
355int arch_prepare_kprobe_ftrace(struct kprobe *p)
356{
357 p->ainsn.insn = NULL;
358 return 0;
359}
360#endif
1/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/hardirq.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/kprobes.h>
16#include <trace/syscall.h>
17#include <asm/asm-offsets.h>
18
19#ifdef CONFIG_64BIT
20#define MCOUNT_OFFSET_RET 12
21#else
22#define MCOUNT_OFFSET_RET 22
23#endif
24
25#ifdef CONFIG_DYNAMIC_FTRACE
26
27void ftrace_disable_code(void);
28void ftrace_enable_insn(void);
29
30#ifdef CONFIG_64BIT
31/*
32 * The 64-bit mcount code looks like this:
33 * stg %r14,8(%r15) # offset 0
34 * > larl %r1,<&counter> # offset 6
35 * > brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * Total length is 24 bytes. The middle two instructions of the mcount
38 * block get overwritten by ftrace_make_nop / ftrace_make_call.
39 * The 64-bit enabled ftrace code block looks like this:
40 * stg %r14,8(%r15) # offset 0
41 * > lg %r1,__LC_FTRACE_FUNC # offset 6
42 * > lgr %r0,%r0 # offset 12
43 * > basr %r14,%r1 # offset 16
44 * lg %r14,8(%15) # offset 18
45 * The return points of the mcount/ftrace function have the same offset 18.
46 * The 64-bit disable ftrace code block looks like this:
47 * stg %r14,8(%r15) # offset 0
48 * > jg .+18 # offset 6
49 * > lgr %r0,%r0 # offset 12
50 * > basr %r14,%r1 # offset 16
51 * lg %r14,8(%15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible.
54 */
55asm(
56 " .align 4\n"
57 "ftrace_disable_code:\n"
58 " jg 0f\n"
59 " lgr %r0,%r0\n"
60 " basr %r14,%r1\n"
61 "0:\n"
62 " .align 4\n"
63 "ftrace_enable_insn:\n"
64 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
65
66#define FTRACE_INSN_SIZE 6
67
68#else /* CONFIG_64BIT */
69/*
70 * The 31-bit mcount code looks like this:
71 * st %r14,4(%r15) # offset 0
72 * > bras %r1,0f # offset 4
73 * > .long _mcount # offset 8
74 * > .long <&counter> # offset 12
75 * > 0: l %r14,0(%r1) # offset 16
76 * > l %r1,4(%r1) # offset 20
77 * basr %r14,%r14 # offset 24
78 * l %r14,4(%r15) # offset 26
79 * Total length is 30 bytes. The twenty bytes starting from offset 4
80 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
81 * The 31-bit enabled ftrace code block looks like this:
82 * st %r14,4(%r15) # offset 0
83 * > l %r14,__LC_FTRACE_FUNC # offset 4
84 * > j 0f # offset 8
85 * > .fill 12,1,0x07 # offset 12
86 * 0: basr %r14,%r14 # offset 24
87 * l %r14,4(%r14) # offset 26
88 * The return points of the mcount/ftrace function have the same offset 26.
89 * The 31-bit disabled ftrace code block looks like this:
90 * st %r14,4(%r15) # offset 0
91 * > j .+26 # offset 4
92 * > j 0f # offset 8
93 * > .fill 12,1,0x07 # offset 12
94 * 0: basr %r14,%r14 # offset 24
95 * l %r14,4(%r14) # offset 26
96 * The j instruction branches to offset 30 to skip as many instructions
97 * as possible.
98 */
99asm(
100 " .align 4\n"
101 "ftrace_disable_code:\n"
102 " j 1f\n"
103 " j 0f\n"
104 " .fill 12,1,0x07\n"
105 "0: basr %r14,%r14\n"
106 "1:\n"
107 " .align 4\n"
108 "ftrace_enable_insn:\n"
109 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
110
111#define FTRACE_INSN_SIZE 4
112
113#endif /* CONFIG_64BIT */
114
115
116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
117 unsigned long addr)
118{
119 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
120 MCOUNT_INSN_SIZE))
121 return -EPERM;
122 return 0;
123}
124
125int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
126{
127 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
128 FTRACE_INSN_SIZE))
129 return -EPERM;
130 return 0;
131}
132
133int ftrace_update_ftrace_func(ftrace_func_t func)
134{
135 return 0;
136}
137
138int __init ftrace_dyn_arch_init(void *data)
139{
140 *(unsigned long *) data = 0;
141 return 0;
142}
143
144#endif /* CONFIG_DYNAMIC_FTRACE */
145
146#ifdef CONFIG_FUNCTION_GRAPH_TRACER
147/*
148 * Hook the return address and push it in the stack of return addresses
149 * in current thread info.
150 */
151unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
152 unsigned long ip)
153{
154 struct ftrace_graph_ent trace;
155
156 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
157 goto out;
158 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159 goto out;
160 trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
161 /* Only trace if the calling function expects to. */
162 if (!ftrace_graph_entry(&trace)) {
163 current->curr_ret_stack--;
164 goto out;
165 }
166 parent = (unsigned long) return_to_handler;
167out:
168 return parent;
169}
170
171#ifdef CONFIG_DYNAMIC_FTRACE
172/*
173 * Patch the kernel code at ftrace_graph_caller location. The instruction
174 * there is branch relative and save to prepare_ftrace_return. To disable
175 * the call to prepare_ftrace_return we patch the bras offset to point
176 * directly after the instructions. To enable the call we calculate
177 * the original offset to prepare_ftrace_return and put it back.
178 */
179int ftrace_enable_ftrace_graph_caller(void)
180{
181 unsigned short offset;
182
183 offset = ((void *) prepare_ftrace_return -
184 (void *) ftrace_graph_caller) / 2;
185 return probe_kernel_write(ftrace_graph_caller + 2,
186 &offset, sizeof(offset));
187}
188
189int ftrace_disable_ftrace_graph_caller(void)
190{
191 static unsigned short offset = 0x0002;
192
193 return probe_kernel_write(ftrace_graph_caller + 2,
194 &offset, sizeof(offset));
195}
196
197#endif /* CONFIG_DYNAMIC_FTRACE */
198#endif /* CONFIG_FUNCTION_GRAPH_TRACER */