Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Code for replacing ftrace calls with jumps.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 *
9 * Added function graph tracer code, taken from x86 that was written
10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11 *
12 */
13
14#define pr_fmt(fmt) "ftrace-powerpc: " fmt
15
16#include <linux/spinlock.h>
17#include <linux/hardirq.h>
18#include <linux/uaccess.h>
19#include <linux/module.h>
20#include <linux/ftrace.h>
21#include <linux/percpu.h>
22#include <linux/init.h>
23#include <linux/list.h>
24
25#include <asm/asm-prototypes.h>
26#include <asm/cacheflush.h>
27#include <asm/code-patching.h>
28#include <asm/ftrace.h>
29#include <asm/syscall.h>
30
31
32#ifdef CONFIG_DYNAMIC_FTRACE
33static unsigned int
34ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
35{
36 unsigned int op;
37
38 addr = ppc_function_entry((void *)addr);
39
40 /* if (link) set op to 'bl' else 'b' */
41 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
42
43 return op;
44}
45
46static int
47ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
48{
49 unsigned int replaced;
50
51 /*
52 * Note:
53 * We are paranoid about modifying text, as if a bug was to happen, it
54 * could cause us to read or write to someplace that could cause harm.
55 * Carefully read and modify the code with probe_kernel_*(), and make
56 * sure what we read is what we expected it to be before modifying it.
57 */
58
59 /* read the text we want to modify */
60 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
61 return -EFAULT;
62
63 /* Make sure it is what we expect it to be */
64 if (replaced != old) {
65 pr_err("%p: replaced (%#x) != old (%#x)",
66 (void *)ip, replaced, old);
67 return -EINVAL;
68 }
69
70 /* replace the text with the new text */
71 if (patch_instruction((unsigned int *)ip, new))
72 return -EPERM;
73
74 return 0;
75}
76
77/*
78 * Helper functions that are the same for both PPC64 and PPC32.
79 */
80static int test_24bit_addr(unsigned long ip, unsigned long addr)
81{
82 addr = ppc_function_entry((void *)addr);
83
84 /* use the create_branch to verify that this offset can be branched */
85 return create_branch((unsigned int *)ip, addr, 0);
86}
87
88#ifdef CONFIG_MODULES
89
90static int is_bl_op(unsigned int op)
91{
92 return (op & 0xfc000003) == 0x48000001;
93}
94
95static unsigned long find_bl_target(unsigned long ip, unsigned int op)
96{
97 static int offset;
98
99 offset = (op & 0x03fffffc);
100 /* make it signed */
101 if (offset & 0x02000000)
102 offset |= 0xfe000000;
103
104 return ip + (long)offset;
105}
106
107#ifdef CONFIG_PPC64
108static int
109__ftrace_make_nop(struct module *mod,
110 struct dyn_ftrace *rec, unsigned long addr)
111{
112 unsigned long entry, ptr, tramp;
113 unsigned long ip = rec->ip;
114 unsigned int op, pop;
115
116 /* read where this goes */
117 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
118 pr_err("Fetching opcode failed.\n");
119 return -EFAULT;
120 }
121
122 /* Make sure that that this is still a 24bit jump */
123 if (!is_bl_op(op)) {
124 pr_err("Not expected bl: opcode is %x\n", op);
125 return -EINVAL;
126 }
127
128 /* lets find where the pointer goes */
129 tramp = find_bl_target(ip, op);
130
131 pr_devel("ip:%lx jumps to %lx", ip, tramp);
132
133 if (module_trampoline_target(mod, tramp, &ptr)) {
134 pr_err("Failed to get trampoline target\n");
135 return -EFAULT;
136 }
137
138 pr_devel("trampoline target %lx", ptr);
139
140 entry = ppc_global_function_entry((void *)addr);
141 /* This should match what was called */
142 if (ptr != entry) {
143 pr_err("addr %lx does not match expected %lx\n", ptr, entry);
144 return -EINVAL;
145 }
146
147#ifdef CC_USING_MPROFILE_KERNEL
148 /* When using -mkernel_profile there is no load to jump over */
149 pop = PPC_INST_NOP;
150
151 if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
152 pr_err("Fetching instruction at %lx failed.\n", ip - 4);
153 return -EFAULT;
154 }
155
156 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
157 if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
158 pr_err("Unexpected instruction %08x around bl _mcount\n", op);
159 return -EINVAL;
160 }
161#else
162 /*
163 * Our original call site looks like:
164 *
165 * bl <tramp>
166 * ld r2,XX(r1)
167 *
168 * Milton Miller pointed out that we can not simply nop the branch.
169 * If a task was preempted when calling a trace function, the nops
170 * will remove the way to restore the TOC in r2 and the r2 TOC will
171 * get corrupted.
172 *
173 * Use a b +8 to jump over the load.
174 */
175
176 pop = PPC_INST_BRANCH | 8; /* b +8 */
177
178 /*
179 * Check what is in the next instruction. We can see ld r2,40(r1), but
180 * on first pass after boot we will see mflr r0.
181 */
182 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
183 pr_err("Fetching op failed.\n");
184 return -EFAULT;
185 }
186
187 if (op != PPC_INST_LD_TOC) {
188 pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
189 return -EINVAL;
190 }
191#endif /* CC_USING_MPROFILE_KERNEL */
192
193 if (patch_instruction((unsigned int *)ip, pop)) {
194 pr_err("Patching NOP failed.\n");
195 return -EPERM;
196 }
197
198 return 0;
199}
200
201#else /* !PPC64 */
202static int
203__ftrace_make_nop(struct module *mod,
204 struct dyn_ftrace *rec, unsigned long addr)
205{
206 unsigned int op;
207 unsigned int jmp[4];
208 unsigned long ip = rec->ip;
209 unsigned long tramp;
210
211 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
212 return -EFAULT;
213
214 /* Make sure that that this is still a 24bit jump */
215 if (!is_bl_op(op)) {
216 pr_err("Not expected bl: opcode is %x\n", op);
217 return -EINVAL;
218 }
219
220 /* lets find where the pointer goes */
221 tramp = find_bl_target(ip, op);
222
223 /*
224 * On PPC32 the trampoline looks like:
225 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
226 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
227 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
228 * 0x4e, 0x80, 0x04, 0x20 bctr
229 */
230
231 pr_devel("ip:%lx jumps to %lx", ip, tramp);
232
233 /* Find where the trampoline jumps to */
234 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
235 pr_err("Failed to read %lx\n", tramp);
236 return -EFAULT;
237 }
238
239 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
240
241 /* verify that this is what we expect it to be */
242 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
243 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
244 (jmp[2] != 0x7d8903a6) ||
245 (jmp[3] != 0x4e800420)) {
246 pr_err("Not a trampoline\n");
247 return -EINVAL;
248 }
249
250 tramp = (jmp[1] & 0xffff) |
251 ((jmp[0] & 0xffff) << 16);
252 if (tramp & 0x8000)
253 tramp -= 0x10000;
254
255 pr_devel(" %lx ", tramp);
256
257 if (tramp != addr) {
258 pr_err("Trampoline location %08lx does not match addr\n",
259 tramp);
260 return -EINVAL;
261 }
262
263 op = PPC_INST_NOP;
264
265 if (patch_instruction((unsigned int *)ip, op))
266 return -EPERM;
267
268 return 0;
269}
270#endif /* PPC64 */
271#endif /* CONFIG_MODULES */
272
273int ftrace_make_nop(struct module *mod,
274 struct dyn_ftrace *rec, unsigned long addr)
275{
276 unsigned long ip = rec->ip;
277 unsigned int old, new;
278
279 /*
280 * If the calling address is more that 24 bits away,
281 * then we had to use a trampoline to make the call.
282 * Otherwise just update the call site.
283 */
284 if (test_24bit_addr(ip, addr)) {
285 /* within range */
286 old = ftrace_call_replace(ip, addr, 1);
287 new = PPC_INST_NOP;
288 return ftrace_modify_code(ip, old, new);
289 }
290
291#ifdef CONFIG_MODULES
292 /*
293 * Out of range jumps are called from modules.
294 * We should either already have a pointer to the module
295 * or it has been passed in.
296 */
297 if (!rec->arch.mod) {
298 if (!mod) {
299 pr_err("No module loaded addr=%lx\n", addr);
300 return -EFAULT;
301 }
302 rec->arch.mod = mod;
303 } else if (mod) {
304 if (mod != rec->arch.mod) {
305 pr_err("Record mod %p not equal to passed in mod %p\n",
306 rec->arch.mod, mod);
307 return -EINVAL;
308 }
309 /* nothing to do if mod == rec->arch.mod */
310 } else
311 mod = rec->arch.mod;
312
313 return __ftrace_make_nop(mod, rec, addr);
314#else
315 /* We should not get here without modules */
316 return -EINVAL;
317#endif /* CONFIG_MODULES */
318}
319
320#ifdef CONFIG_MODULES
321#ifdef CONFIG_PPC64
322/*
323 * Examine the existing instructions for __ftrace_make_call.
324 * They should effectively be a NOP, and follow formal constraints,
325 * depending on the ABI. Return false if they don't.
326 */
327#ifndef CC_USING_MPROFILE_KERNEL
328static int
329expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
330{
331 /*
332 * We expect to see:
333 *
334 * b +8
335 * ld r2,XX(r1)
336 *
337 * The load offset is different depending on the ABI. For simplicity
338 * just mask it out when doing the compare.
339 */
340 if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
341 return 0;
342 return 1;
343}
344#else
345static int
346expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
347{
348 /* look for patched "NOP" on ppc64 with -mprofile-kernel */
349 if (op0 != PPC_INST_NOP)
350 return 0;
351 return 1;
352}
353#endif
354
355static int
356__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
357{
358 unsigned int op[2];
359 void *ip = (void *)rec->ip;
360
361 /* read where this goes */
362 if (probe_kernel_read(op, ip, sizeof(op)))
363 return -EFAULT;
364
365 if (!expected_nop_sequence(ip, op[0], op[1])) {
366 pr_err("Unexpected call sequence at %p: %x %x\n",
367 ip, op[0], op[1]);
368 return -EINVAL;
369 }
370
371 /* If we never set up a trampoline to ftrace_caller, then bail */
372 if (!rec->arch.mod->arch.tramp) {
373 pr_err("No ftrace trampoline\n");
374 return -EINVAL;
375 }
376
377 /* Ensure branch is within 24 bits */
378 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
379 pr_err("Branch out of range\n");
380 return -EINVAL;
381 }
382
383 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
384 pr_err("REL24 out of range!\n");
385 return -EINVAL;
386 }
387
388 return 0;
389}
390
391#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
392int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
393 unsigned long addr)
394{
395 return ftrace_make_call(rec, addr);
396}
397#endif
398
399#else /* !CONFIG_PPC64: */
400static int
401__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
402{
403 unsigned int op;
404 unsigned long ip = rec->ip;
405
406 /* read where this goes */
407 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
408 return -EFAULT;
409
410 /* It should be pointing to a nop */
411 if (op != PPC_INST_NOP) {
412 pr_err("Expected NOP but have %x\n", op);
413 return -EINVAL;
414 }
415
416 /* If we never set up a trampoline to ftrace_caller, then bail */
417 if (!rec->arch.mod->arch.tramp) {
418 pr_err("No ftrace trampoline\n");
419 return -EINVAL;
420 }
421
422 /* create the branch to the trampoline */
423 op = create_branch((unsigned int *)ip,
424 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
425 if (!op) {
426 pr_err("REL24 out of range!\n");
427 return -EINVAL;
428 }
429
430 pr_devel("write to %lx\n", rec->ip);
431
432 if (patch_instruction((unsigned int *)ip, op))
433 return -EPERM;
434
435 return 0;
436}
437#endif /* CONFIG_PPC64 */
438#endif /* CONFIG_MODULES */
439
440int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
441{
442 unsigned long ip = rec->ip;
443 unsigned int old, new;
444
445 /*
446 * If the calling address is more that 24 bits away,
447 * then we had to use a trampoline to make the call.
448 * Otherwise just update the call site.
449 */
450 if (test_24bit_addr(ip, addr)) {
451 /* within range */
452 old = PPC_INST_NOP;
453 new = ftrace_call_replace(ip, addr, 1);
454 return ftrace_modify_code(ip, old, new);
455 }
456
457#ifdef CONFIG_MODULES
458 /*
459 * Out of range jumps are called from modules.
460 * Being that we are converting from nop, it had better
461 * already have a module defined.
462 */
463 if (!rec->arch.mod) {
464 pr_err("No module loaded\n");
465 return -EINVAL;
466 }
467
468 return __ftrace_make_call(rec, addr);
469#else
470 /* We should not get here without modules */
471 return -EINVAL;
472#endif /* CONFIG_MODULES */
473}
474
475int ftrace_update_ftrace_func(ftrace_func_t func)
476{
477 unsigned long ip = (unsigned long)(&ftrace_call);
478 unsigned int old, new;
479 int ret;
480
481 old = *(unsigned int *)&ftrace_call;
482 new = ftrace_call_replace(ip, (unsigned long)func, 1);
483 ret = ftrace_modify_code(ip, old, new);
484
485 return ret;
486}
487
488static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
489{
490 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
491 int ret;
492
493 ret = ftrace_update_record(rec, enable);
494
495 switch (ret) {
496 case FTRACE_UPDATE_IGNORE:
497 return 0;
498 case FTRACE_UPDATE_MAKE_CALL:
499 return ftrace_make_call(rec, ftrace_addr);
500 case FTRACE_UPDATE_MAKE_NOP:
501 return ftrace_make_nop(NULL, rec, ftrace_addr);
502 }
503
504 return 0;
505}
506
507void ftrace_replace_code(int enable)
508{
509 struct ftrace_rec_iter *iter;
510 struct dyn_ftrace *rec;
511 int ret;
512
513 for (iter = ftrace_rec_iter_start(); iter;
514 iter = ftrace_rec_iter_next(iter)) {
515 rec = ftrace_rec_iter_record(iter);
516 ret = __ftrace_replace_code(rec, enable);
517 if (ret) {
518 ftrace_bug(ret, rec);
519 return;
520 }
521 }
522}
523
524/*
525 * Use the default ftrace_modify_all_code, but without
526 * stop_machine().
527 */
528void arch_ftrace_update_code(int command)
529{
530 ftrace_modify_all_code(command);
531}
532
533int __init ftrace_dyn_arch_init(void)
534{
535 return 0;
536}
537#endif /* CONFIG_DYNAMIC_FTRACE */
538
539#ifdef CONFIG_FUNCTION_GRAPH_TRACER
540
541#ifdef CONFIG_DYNAMIC_FTRACE
542extern void ftrace_graph_call(void);
543extern void ftrace_graph_stub(void);
544
545int ftrace_enable_ftrace_graph_caller(void)
546{
547 unsigned long ip = (unsigned long)(&ftrace_graph_call);
548 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
549 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
550 unsigned int old, new;
551
552 old = ftrace_call_replace(ip, stub, 0);
553 new = ftrace_call_replace(ip, addr, 0);
554
555 return ftrace_modify_code(ip, old, new);
556}
557
558int ftrace_disable_ftrace_graph_caller(void)
559{
560 unsigned long ip = (unsigned long)(&ftrace_graph_call);
561 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
562 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
563 unsigned int old, new;
564
565 old = ftrace_call_replace(ip, addr, 0);
566 new = ftrace_call_replace(ip, stub, 0);
567
568 return ftrace_modify_code(ip, old, new);
569}
570#endif /* CONFIG_DYNAMIC_FTRACE */
571
572/*
573 * Hook the return address and push it in the stack of return addrs
574 * in current thread info. Return the address we want to divert to.
575 */
576unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
577{
578 struct ftrace_graph_ent trace;
579 unsigned long return_hooker;
580
581 if (unlikely(ftrace_graph_is_dead()))
582 goto out;
583
584 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
585 goto out;
586
587 return_hooker = ppc_function_entry(return_to_handler);
588
589 trace.func = ip;
590 trace.depth = current->curr_ret_stack + 1;
591
592 /* Only trace if the calling function expects to */
593 if (!ftrace_graph_entry(&trace))
594 goto out;
595
596 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
597 NULL) == -EBUSY)
598 goto out;
599
600 parent = return_hooker;
601out:
602 return parent;
603}
604#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
605
606#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
607unsigned long __init arch_syscall_addr(int nr)
608{
609 return sys_call_table[nr*2];
610}
611#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
612
613#ifdef PPC64_ELF_ABI_v1
614char *arch_ftrace_match_adjust(char *str, const char *search)
615{
616 if (str[0] == '.' && search[0] != '.')
617 return str + 1;
618 else
619 return str;
620}
621#endif /* PPC64_ELF_ABI_v1 */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Code for replacing ftrace calls with jumps.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 *
9 * Added function graph tracer code, taken from x86 that was written
10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11 *
12 */
13
14#define pr_fmt(fmt) "ftrace-powerpc: " fmt
15
16#include <linux/spinlock.h>
17#include <linux/hardirq.h>
18#include <linux/uaccess.h>
19#include <linux/module.h>
20#include <linux/ftrace.h>
21#include <linux/percpu.h>
22#include <linux/init.h>
23#include <linux/list.h>
24
25#include <asm/cacheflush.h>
26#include <asm/code-patching.h>
27#include <asm/ftrace.h>
28#include <asm/syscall.h>
29#include <asm/inst.h>
30#include <asm/sections.h>
31
32#define NUM_FTRACE_TRAMPS 2
33static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
34
35unsigned long ftrace_call_adjust(unsigned long addr)
36{
37 if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
38 return 0;
39
40 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
41 addr += MCOUNT_INSN_SIZE;
42
43 return addr;
44}
45
46static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
47{
48 ppc_inst_t op;
49
50 WARN_ON(!is_offset_in_branch_range(addr - ip));
51 create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
52
53 return op;
54}
55
56static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
57{
58 if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
59 pr_err("0x%lx: fetching instruction failed\n", ip);
60 return -EFAULT;
61 }
62
63 return 0;
64}
65
66static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
67{
68 ppc_inst_t op;
69 int ret;
70
71 ret = ftrace_read_inst(ip, &op);
72 if (!ret && !ppc_inst_equal(op, inst)) {
73 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
74 ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
75 ret = -EINVAL;
76 }
77
78 return ret;
79}
80
81static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
82{
83 int ret = ftrace_validate_inst(ip, old);
84
85 if (!ret)
86 ret = patch_instruction((u32 *)ip, new);
87
88 return ret;
89}
90
91static int is_bl_op(ppc_inst_t op)
92{
93 return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
94}
95
96static unsigned long find_ftrace_tramp(unsigned long ip)
97{
98 int i;
99
100 for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
101 if (!ftrace_tramps[i])
102 continue;
103 else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
104 return ftrace_tramps[i];
105
106 return 0;
107}
108
109static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
110{
111 unsigned long ip = rec->ip;
112 unsigned long stub;
113
114 if (is_offset_in_branch_range(addr - ip)) {
115 /* Within range */
116 stub = addr;
117#ifdef CONFIG_MODULES
118 } else if (rec->arch.mod) {
119 /* Module code would be going to one of the module stubs */
120 stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
121 rec->arch.mod->arch.tramp_regs);
122#endif
123 } else if (core_kernel_text(ip)) {
124 /* We would be branching to one of our ftrace stubs */
125 stub = find_ftrace_tramp(ip);
126 if (!stub) {
127 pr_err("0x%lx: No ftrace stubs reachable\n", ip);
128 return -EINVAL;
129 }
130 } else {
131 return -EINVAL;
132 }
133
134 *call_inst = ftrace_create_branch_inst(ip, stub, 1);
135 return 0;
136}
137
138#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
139int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
140{
141 /* This should never be called since we override ftrace_replace_code() */
142 WARN_ON(1);
143 return -EINVAL;
144}
145#endif
146
147int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
148{
149 ppc_inst_t old, new;
150 int ret;
151
152 /* This can only ever be called during module load */
153 if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
154 return -EINVAL;
155
156 old = ppc_inst(PPC_RAW_NOP());
157 ret = ftrace_get_call_inst(rec, addr, &new);
158 if (ret)
159 return ret;
160
161 return ftrace_modify_code(rec->ip, old, new);
162}
163
164int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
165{
166 /*
167 * This should never be called since we override ftrace_replace_code(),
168 * as well as ftrace_init_nop()
169 */
170 WARN_ON(1);
171 return -EINVAL;
172}
173
174void ftrace_replace_code(int enable)
175{
176 ppc_inst_t old, new, call_inst, new_call_inst;
177 ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
178 unsigned long ip, new_addr, addr;
179 struct ftrace_rec_iter *iter;
180 struct dyn_ftrace *rec;
181 int ret = 0, update;
182
183 for_ftrace_rec_iter(iter) {
184 rec = ftrace_rec_iter_record(iter);
185 ip = rec->ip;
186
187 if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
188 continue;
189
190 addr = ftrace_get_addr_curr(rec);
191 new_addr = ftrace_get_addr_new(rec);
192 update = ftrace_update_record(rec, enable);
193
194 switch (update) {
195 case FTRACE_UPDATE_IGNORE:
196 default:
197 continue;
198 case FTRACE_UPDATE_MODIFY_CALL:
199 ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
200 ret |= ftrace_get_call_inst(rec, addr, &call_inst);
201 old = call_inst;
202 new = new_call_inst;
203 break;
204 case FTRACE_UPDATE_MAKE_NOP:
205 ret = ftrace_get_call_inst(rec, addr, &call_inst);
206 old = call_inst;
207 new = nop_inst;
208 break;
209 case FTRACE_UPDATE_MAKE_CALL:
210 ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
211 old = nop_inst;
212 new = call_inst;
213 break;
214 }
215
216 if (!ret)
217 ret = ftrace_modify_code(ip, old, new);
218 if (ret)
219 goto out;
220 }
221
222out:
223 if (ret)
224 ftrace_bug(ret, rec);
225 return;
226}
227
228int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
229{
230 unsigned long addr, ip = rec->ip;
231 ppc_inst_t old, new;
232 int ret = 0;
233
234 /* Verify instructions surrounding the ftrace location */
235 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
236 /* Expect nops */
237 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
238 if (!ret)
239 ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
240 } else if (IS_ENABLED(CONFIG_PPC32)) {
241 /* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
242 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
243 if (!ret)
244 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
245 } else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
246 /* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
247 ret = ftrace_read_inst(ip - 4, &old);
248 if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
249 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
250 ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
251 }
252 } else {
253 return -EINVAL;
254 }
255
256 if (ret)
257 return ret;
258
259 if (!core_kernel_text(ip)) {
260 if (!mod) {
261 pr_err("0x%lx: No module provided for non-kernel address\n", ip);
262 return -EFAULT;
263 }
264 rec->arch.mod = mod;
265 }
266
267 /* Nop-out the ftrace location */
268 new = ppc_inst(PPC_RAW_NOP());
269 addr = MCOUNT_ADDR;
270 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
271 /* we instead patch-in the 'mflr r0' */
272 old = ppc_inst(PPC_RAW_NOP());
273 new = ppc_inst(PPC_RAW_MFLR(_R0));
274 ret = ftrace_modify_code(ip - 4, old, new);
275 } else if (is_offset_in_branch_range(addr - ip)) {
276 /* Within range */
277 old = ftrace_create_branch_inst(ip, addr, 1);
278 ret = ftrace_modify_code(ip, old, new);
279 } else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
280 /*
281 * We would be branching to a linker-generated stub, or to the module _mcount
282 * stub. Let's just confirm we have a 'bl' here.
283 */
284 ret = ftrace_read_inst(ip, &old);
285 if (ret)
286 return ret;
287 if (!is_bl_op(old)) {
288 pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
289 return -EINVAL;
290 }
291 ret = patch_instruction((u32 *)ip, new);
292 } else {
293 return -EINVAL;
294 }
295
296 return ret;
297}
298
299int ftrace_update_ftrace_func(ftrace_func_t func)
300{
301 unsigned long ip = (unsigned long)(&ftrace_call);
302 ppc_inst_t old, new;
303 int ret;
304
305 old = ppc_inst_read((u32 *)&ftrace_call);
306 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
307 ret = ftrace_modify_code(ip, old, new);
308
309 /* Also update the regs callback function */
310 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
311 ip = (unsigned long)(&ftrace_regs_call);
312 old = ppc_inst_read((u32 *)&ftrace_regs_call);
313 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
314 ret = ftrace_modify_code(ip, old, new);
315 }
316
317 return ret;
318}
319
320/*
321 * Use the default ftrace_modify_all_code, but without
322 * stop_machine().
323 */
324void arch_ftrace_update_code(int command)
325{
326 ftrace_modify_all_code(command);
327}
328
329void ftrace_free_init_tramp(void)
330{
331 int i;
332
333 for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
334 if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
335 ftrace_tramps[i] = 0;
336 return;
337 }
338}
339
340static void __init add_ftrace_tramp(unsigned long tramp)
341{
342 int i;
343
344 for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
345 if (!ftrace_tramps[i]) {
346 ftrace_tramps[i] = tramp;
347 return;
348 }
349}
350
351int __init ftrace_dyn_arch_init(void)
352{
353 unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
354 unsigned long addr = FTRACE_REGS_ADDR;
355 long reladdr;
356 int i;
357 u32 stub_insns[] = {
358#ifdef CONFIG_PPC_KERNEL_PCREL
359 /* pla r12,addr */
360 PPC_PREFIX_MLS | __PPC_PRFX_R(1),
361 PPC_INST_PADDI | ___PPC_RT(_R12),
362 PPC_RAW_MTCTR(_R12),
363 PPC_RAW_BCTR()
364#elif defined(CONFIG_PPC64)
365 PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
366 PPC_RAW_ADDIS(_R12, _R12, 0),
367 PPC_RAW_ADDI(_R12, _R12, 0),
368 PPC_RAW_MTCTR(_R12),
369 PPC_RAW_BCTR()
370#else
371 PPC_RAW_LIS(_R12, 0),
372 PPC_RAW_ADDI(_R12, _R12, 0),
373 PPC_RAW_MTCTR(_R12),
374 PPC_RAW_BCTR()
375#endif
376 };
377
378 if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
379 for (i = 0; i < 2; i++) {
380 reladdr = addr - (unsigned long)tramp[i];
381
382 if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
383 pr_err("Address of %ps out of range of pcrel address.\n",
384 (void *)addr);
385 return -1;
386 }
387
388 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
389 tramp[i][0] |= IMM_H18(reladdr);
390 tramp[i][1] |= IMM_L(reladdr);
391 add_ftrace_tramp((unsigned long)tramp[i]);
392 }
393 } else if (IS_ENABLED(CONFIG_PPC64)) {
394 reladdr = addr - kernel_toc_addr();
395
396 if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
397 pr_err("Address of %ps out of range of kernel_toc.\n",
398 (void *)addr);
399 return -1;
400 }
401
402 for (i = 0; i < 2; i++) {
403 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
404 tramp[i][1] |= PPC_HA(reladdr);
405 tramp[i][2] |= PPC_LO(reladdr);
406 add_ftrace_tramp((unsigned long)tramp[i]);
407 }
408 } else {
409 for (i = 0; i < 2; i++) {
410 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
411 tramp[i][0] |= PPC_HA(addr);
412 tramp[i][1] |= PPC_LO(addr);
413 add_ftrace_tramp((unsigned long)tramp[i]);
414 }
415 }
416
417 return 0;
418}
419
420#ifdef CONFIG_FUNCTION_GRAPH_TRACER
421void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
422 struct ftrace_ops *op, struct ftrace_regs *fregs)
423{
424 unsigned long sp = fregs->regs.gpr[1];
425 int bit;
426
427 if (unlikely(ftrace_graph_is_dead()))
428 goto out;
429
430 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
431 goto out;
432
433 bit = ftrace_test_recursion_trylock(ip, parent_ip);
434 if (bit < 0)
435 goto out;
436
437 if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
438 parent_ip = ppc_function_entry(return_to_handler);
439
440 ftrace_test_recursion_unlock(bit);
441out:
442 fregs->regs.link = parent_ip;
443}
444#endif /* CONFIG_FUNCTION_GRAPH_TRACER */