Loading...
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/spinlock.h>
15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
17#include <linux/ftrace.h>
18#include <linux/percpu.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/module.h>
23
24#include <trace/syscall.h>
25
26#include <asm/cacheflush.h>
27#include <asm/kprobes.h>
28#include <asm/ftrace.h>
29#include <asm/nops.h>
30
31#ifdef CONFIG_DYNAMIC_FTRACE
32
33int ftrace_arch_code_modify_prepare(void)
34{
35 set_kernel_text_rw();
36 set_all_modules_text_rw();
37 return 0;
38}
39
40int ftrace_arch_code_modify_post_process(void)
41{
42 set_all_modules_text_ro();
43 set_kernel_text_ro();
44 return 0;
45}
46
47union ftrace_code_union {
48 char code[MCOUNT_INSN_SIZE];
49 struct {
50 char e8;
51 int offset;
52 } __attribute__((packed));
53};
54
55static int ftrace_calc_offset(long ip, long addr)
56{
57 return (int)(addr - ip);
58}
59
60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
61{
62 static union ftrace_code_union calc;
63
64 calc.e8 = 0xe8;
65 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
66
67 /*
68 * No locking needed, this must be called via kstop_machine
69 * which in essence is like running on a uniprocessor machine.
70 */
71 return calc.code;
72}
73
74static inline int
75within(unsigned long addr, unsigned long start, unsigned long end)
76{
77 return addr >= start && addr < end;
78}
79
80static int
81do_ftrace_mod_code(unsigned long ip, const void *new_code)
82{
83 /*
84 * On x86_64, kernel text mappings are mapped read-only with
85 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
86 * of the kernel text mapping to modify the kernel text.
87 *
88 * For 32bit kernels, these mappings are same and we can use
89 * kernel identity mapping to modify code.
90 */
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
92 ip = (unsigned long)__va(__pa(ip));
93
94 return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
95}
96
97static const unsigned char *ftrace_nop_replace(void)
98{
99 return ideal_nops[NOP_ATOMIC5];
100}
101
102static int
103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104 unsigned const char *new_code)
105{
106 unsigned char replaced[MCOUNT_INSN_SIZE];
107
108 /*
109 * Note: Due to modules and __init, code can
110 * disappear and change, we need to protect against faulting
111 * as well as code changing. We do this by using the
112 * probe_kernel_* functions.
113 *
114 * No real locking needed, this code is run through
115 * kstop_machine, or before SMP starts.
116 */
117
118 /* read the text we want to modify */
119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
120 return -EFAULT;
121
122 /* Make sure it is what we expect it to be */
123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124 return -EINVAL;
125
126 /* replace the text with the new text */
127 if (do_ftrace_mod_code(ip, new_code))
128 return -EPERM;
129
130 sync_core();
131
132 return 0;
133}
134
135int ftrace_make_nop(struct module *mod,
136 struct dyn_ftrace *rec, unsigned long addr)
137{
138 unsigned const char *new, *old;
139 unsigned long ip = rec->ip;
140
141 old = ftrace_call_replace(ip, addr);
142 new = ftrace_nop_replace();
143
144 /*
145 * On boot up, and when modules are loaded, the MCOUNT_ADDR
146 * is converted to a nop, and will never become MCOUNT_ADDR
147 * again. This code is either running before SMP (on boot up)
148 * or before the code will ever be executed (module load).
149 * We do not want to use the breakpoint version in this case,
150 * just modify the code directly.
151 */
152 if (addr == MCOUNT_ADDR)
153 return ftrace_modify_code_direct(rec->ip, old, new);
154
155 /* Normal cases use add_brk_on_nop */
156 WARN_ONCE(1, "invalid use of ftrace_make_nop");
157 return -EINVAL;
158}
159
160int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
161{
162 unsigned const char *new, *old;
163 unsigned long ip = rec->ip;
164
165 old = ftrace_nop_replace();
166 new = ftrace_call_replace(ip, addr);
167
168 /* Should only be called when module is loaded */
169 return ftrace_modify_code_direct(rec->ip, old, new);
170}
171
172/*
173 * The modifying_ftrace_code is used to tell the breakpoint
174 * handler to call ftrace_int3_handler(). If it fails to
175 * call this handler for a breakpoint added by ftrace, then
176 * the kernel may crash.
177 *
178 * As atomic_writes on x86 do not need a barrier, we do not
179 * need to add smp_mb()s for this to work. It is also considered
180 * that we can not read the modifying_ftrace_code before
181 * executing the breakpoint. That would be quite remarkable if
182 * it could do that. Here's the flow that is required:
183 *
184 * CPU-0 CPU-1
185 *
186 * atomic_inc(mfc);
187 * write int3s
188 * <trap-int3> // implicit (r)mb
189 * if (atomic_read(mfc))
190 * call ftrace_int3_handler()
191 *
192 * Then when we are finished:
193 *
194 * atomic_dec(mfc);
195 *
196 * If we hit a breakpoint that was not set by ftrace, it does not
197 * matter if ftrace_int3_handler() is called or not. It will
198 * simply be ignored. But it is crucial that a ftrace nop/caller
199 * breakpoint is handled. No other user should ever place a
200 * breakpoint on an ftrace nop/caller location. It must only
201 * be done by this code.
202 */
203atomic_t modifying_ftrace_code __read_mostly;
204
205static int
206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207 unsigned const char *new_code);
208
209int ftrace_update_ftrace_func(ftrace_func_t func)
210{
211 unsigned long ip = (unsigned long)(&ftrace_call);
212 unsigned char old[MCOUNT_INSN_SIZE], *new;
213 int ret;
214
215 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
216 new = ftrace_call_replace(ip, (unsigned long)func);
217
218 /* See comment above by declaration of modifying_ftrace_code */
219 atomic_inc(&modifying_ftrace_code);
220
221 ret = ftrace_modify_code(ip, old, new);
222
223 atomic_dec(&modifying_ftrace_code);
224
225 return ret;
226}
227
228/*
229 * A breakpoint was added to the code address we are about to
230 * modify, and this is the handle that will just skip over it.
231 * We are either changing a nop into a trace call, or a trace
232 * call to a nop. While the change is taking place, we treat
233 * it just like it was a nop.
234 */
235int ftrace_int3_handler(struct pt_regs *regs)
236{
237 if (WARN_ON_ONCE(!regs))
238 return 0;
239
240 if (!ftrace_location(regs->ip - 1))
241 return 0;
242
243 regs->ip += MCOUNT_INSN_SIZE - 1;
244
245 return 1;
246}
247
248static int ftrace_write(unsigned long ip, const char *val, int size)
249{
250 /*
251 * On x86_64, kernel text mappings are mapped read-only with
252 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
253 * of the kernel text mapping to modify the kernel text.
254 *
255 * For 32bit kernels, these mappings are same and we can use
256 * kernel identity mapping to modify code.
257 */
258 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
259 ip = (unsigned long)__va(__pa(ip));
260
261 return probe_kernel_write((void *)ip, val, size);
262}
263
264static int add_break(unsigned long ip, const char *old)
265{
266 unsigned char replaced[MCOUNT_INSN_SIZE];
267 unsigned char brk = BREAKPOINT_INSTRUCTION;
268
269 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
270 return -EFAULT;
271
272 /* Make sure it is what we expect it to be */
273 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
274 return -EINVAL;
275
276 if (ftrace_write(ip, &brk, 1))
277 return -EPERM;
278
279 return 0;
280}
281
282static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
283{
284 unsigned const char *old;
285 unsigned long ip = rec->ip;
286
287 old = ftrace_call_replace(ip, addr);
288
289 return add_break(rec->ip, old);
290}
291
292
293static int add_brk_on_nop(struct dyn_ftrace *rec)
294{
295 unsigned const char *old;
296
297 old = ftrace_nop_replace();
298
299 return add_break(rec->ip, old);
300}
301
302static int add_breakpoints(struct dyn_ftrace *rec, int enable)
303{
304 unsigned long ftrace_addr;
305 int ret;
306
307 ret = ftrace_test_record(rec, enable);
308
309 ftrace_addr = (unsigned long)FTRACE_ADDR;
310
311 switch (ret) {
312 case FTRACE_UPDATE_IGNORE:
313 return 0;
314
315 case FTRACE_UPDATE_MAKE_CALL:
316 /* converting nop to call */
317 return add_brk_on_nop(rec);
318
319 case FTRACE_UPDATE_MAKE_NOP:
320 /* converting a call to a nop */
321 return add_brk_on_call(rec, ftrace_addr);
322 }
323 return 0;
324}
325
326/*
327 * On error, we need to remove breakpoints. This needs to
328 * be done caefully. If the address does not currently have a
329 * breakpoint, we know we are done. Otherwise, we look at the
330 * remaining 4 bytes of the instruction. If it matches a nop
331 * we replace the breakpoint with the nop. Otherwise we replace
332 * it with the call instruction.
333 */
334static int remove_breakpoint(struct dyn_ftrace *rec)
335{
336 unsigned char ins[MCOUNT_INSN_SIZE];
337 unsigned char brk = BREAKPOINT_INSTRUCTION;
338 const unsigned char *nop;
339 unsigned long ftrace_addr;
340 unsigned long ip = rec->ip;
341
342 /* If we fail the read, just give up */
343 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
344 return -EFAULT;
345
346 /* If this does not have a breakpoint, we are done */
347 if (ins[0] != brk)
348 return -1;
349
350 nop = ftrace_nop_replace();
351
352 /*
353 * If the last 4 bytes of the instruction do not match
354 * a nop, then we assume that this is a call to ftrace_addr.
355 */
356 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
357 /*
358 * For extra paranoidism, we check if the breakpoint is on
359 * a call that would actually jump to the ftrace_addr.
360 * If not, don't touch the breakpoint, we make just create
361 * a disaster.
362 */
363 ftrace_addr = (unsigned long)FTRACE_ADDR;
364 nop = ftrace_call_replace(ip, ftrace_addr);
365
366 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
367 return -EINVAL;
368 }
369
370 return probe_kernel_write((void *)ip, &nop[0], 1);
371}
372
373static int add_update_code(unsigned long ip, unsigned const char *new)
374{
375 /* skip breakpoint */
376 ip++;
377 new++;
378 if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
379 return -EPERM;
380 return 0;
381}
382
383static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
384{
385 unsigned long ip = rec->ip;
386 unsigned const char *new;
387
388 new = ftrace_call_replace(ip, addr);
389 return add_update_code(ip, new);
390}
391
392static int add_update_nop(struct dyn_ftrace *rec)
393{
394 unsigned long ip = rec->ip;
395 unsigned const char *new;
396
397 new = ftrace_nop_replace();
398 return add_update_code(ip, new);
399}
400
401static int add_update(struct dyn_ftrace *rec, int enable)
402{
403 unsigned long ftrace_addr;
404 int ret;
405
406 ret = ftrace_test_record(rec, enable);
407
408 ftrace_addr = (unsigned long)FTRACE_ADDR;
409
410 switch (ret) {
411 case FTRACE_UPDATE_IGNORE:
412 return 0;
413
414 case FTRACE_UPDATE_MAKE_CALL:
415 /* converting nop to call */
416 return add_update_call(rec, ftrace_addr);
417
418 case FTRACE_UPDATE_MAKE_NOP:
419 /* converting a call to a nop */
420 return add_update_nop(rec);
421 }
422
423 return 0;
424}
425
426static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
427{
428 unsigned long ip = rec->ip;
429 unsigned const char *new;
430
431 new = ftrace_call_replace(ip, addr);
432
433 if (ftrace_write(ip, new, 1))
434 return -EPERM;
435
436 return 0;
437}
438
439static int finish_update_nop(struct dyn_ftrace *rec)
440{
441 unsigned long ip = rec->ip;
442 unsigned const char *new;
443
444 new = ftrace_nop_replace();
445
446 if (ftrace_write(ip, new, 1))
447 return -EPERM;
448 return 0;
449}
450
451static int finish_update(struct dyn_ftrace *rec, int enable)
452{
453 unsigned long ftrace_addr;
454 int ret;
455
456 ret = ftrace_update_record(rec, enable);
457
458 ftrace_addr = (unsigned long)FTRACE_ADDR;
459
460 switch (ret) {
461 case FTRACE_UPDATE_IGNORE:
462 return 0;
463
464 case FTRACE_UPDATE_MAKE_CALL:
465 /* converting nop to call */
466 return finish_update_call(rec, ftrace_addr);
467
468 case FTRACE_UPDATE_MAKE_NOP:
469 /* converting a call to a nop */
470 return finish_update_nop(rec);
471 }
472
473 return 0;
474}
475
476static void do_sync_core(void *data)
477{
478 sync_core();
479}
480
481static void run_sync(void)
482{
483 int enable_irqs = irqs_disabled();
484
485 /* We may be called with interrupts disbled (on bootup). */
486 if (enable_irqs)
487 local_irq_enable();
488 on_each_cpu(do_sync_core, NULL, 1);
489 if (enable_irqs)
490 local_irq_disable();
491}
492
493void ftrace_replace_code(int enable)
494{
495 struct ftrace_rec_iter *iter;
496 struct dyn_ftrace *rec;
497 const char *report = "adding breakpoints";
498 int count = 0;
499 int ret;
500
501 for_ftrace_rec_iter(iter) {
502 rec = ftrace_rec_iter_record(iter);
503
504 ret = add_breakpoints(rec, enable);
505 if (ret)
506 goto remove_breakpoints;
507 count++;
508 }
509
510 run_sync();
511
512 report = "updating code";
513
514 for_ftrace_rec_iter(iter) {
515 rec = ftrace_rec_iter_record(iter);
516
517 ret = add_update(rec, enable);
518 if (ret)
519 goto remove_breakpoints;
520 }
521
522 run_sync();
523
524 report = "removing breakpoints";
525
526 for_ftrace_rec_iter(iter) {
527 rec = ftrace_rec_iter_record(iter);
528
529 ret = finish_update(rec, enable);
530 if (ret)
531 goto remove_breakpoints;
532 }
533
534 run_sync();
535
536 return;
537
538 remove_breakpoints:
539 ftrace_bug(ret, rec ? rec->ip : 0);
540 printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
541 for_ftrace_rec_iter(iter) {
542 rec = ftrace_rec_iter_record(iter);
543 remove_breakpoint(rec);
544 }
545}
546
547static int
548ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
549 unsigned const char *new_code)
550{
551 int ret;
552
553 ret = add_break(ip, old_code);
554 if (ret)
555 goto out;
556
557 run_sync();
558
559 ret = add_update_code(ip, new_code);
560 if (ret)
561 goto fail_update;
562
563 run_sync();
564
565 ret = ftrace_write(ip, new_code, 1);
566 if (ret) {
567 ret = -EPERM;
568 goto out;
569 }
570 run_sync();
571 out:
572 return ret;
573
574 fail_update:
575 probe_kernel_write((void *)ip, &old_code[0], 1);
576 goto out;
577}
578
579void arch_ftrace_update_code(int command)
580{
581 /* See comment above by declaration of modifying_ftrace_code */
582 atomic_inc(&modifying_ftrace_code);
583
584 ftrace_modify_all_code(command);
585
586 atomic_dec(&modifying_ftrace_code);
587}
588
589int __init ftrace_dyn_arch_init(void *data)
590{
591 /* The return code is retured via data */
592 *(unsigned long *)data = 0;
593
594 return 0;
595}
596#endif
597
598#ifdef CONFIG_FUNCTION_GRAPH_TRACER
599
600#ifdef CONFIG_DYNAMIC_FTRACE
601extern void ftrace_graph_call(void);
602
603static int ftrace_mod_jmp(unsigned long ip,
604 int old_offset, int new_offset)
605{
606 unsigned char code[MCOUNT_INSN_SIZE];
607
608 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
609 return -EFAULT;
610
611 if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
612 return -EINVAL;
613
614 *(int *)(&code[1]) = new_offset;
615
616 if (do_ftrace_mod_code(ip, &code))
617 return -EPERM;
618
619 return 0;
620}
621
622int ftrace_enable_ftrace_graph_caller(void)
623{
624 unsigned long ip = (unsigned long)(&ftrace_graph_call);
625 int old_offset, new_offset;
626
627 old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
628 new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
629
630 return ftrace_mod_jmp(ip, old_offset, new_offset);
631}
632
633int ftrace_disable_ftrace_graph_caller(void)
634{
635 unsigned long ip = (unsigned long)(&ftrace_graph_call);
636 int old_offset, new_offset;
637
638 old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
639 new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
640
641 return ftrace_mod_jmp(ip, old_offset, new_offset);
642}
643
644#endif /* !CONFIG_DYNAMIC_FTRACE */
645
646/*
647 * Hook the return address and push it in the stack of return addrs
648 * in current thread info.
649 */
650void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
651 unsigned long frame_pointer)
652{
653 unsigned long old;
654 int faulted;
655 struct ftrace_graph_ent trace;
656 unsigned long return_hooker = (unsigned long)
657 &return_to_handler;
658
659 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
660 return;
661
662 /*
663 * Protect against fault, even if it shouldn't
664 * happen. This tool is too much intrusive to
665 * ignore such a protection.
666 */
667 asm volatile(
668 "1: " _ASM_MOV " (%[parent]), %[old]\n"
669 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
670 " movl $0, %[faulted]\n"
671 "3:\n"
672
673 ".section .fixup, \"ax\"\n"
674 "4: movl $1, %[faulted]\n"
675 " jmp 3b\n"
676 ".previous\n"
677
678 _ASM_EXTABLE(1b, 4b)
679 _ASM_EXTABLE(2b, 4b)
680
681 : [old] "=&r" (old), [faulted] "=r" (faulted)
682 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
683 : "memory"
684 );
685
686 if (unlikely(faulted)) {
687 ftrace_graph_stop();
688 WARN_ON(1);
689 return;
690 }
691
692 trace.func = self_addr;
693 trace.depth = current->curr_ret_stack + 1;
694
695 /* Only trace if the calling function expects to */
696 if (!ftrace_graph_entry(&trace)) {
697 *parent = old;
698 return;
699 }
700
701 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
702 frame_pointer) == -EBUSY) {
703 *parent = old;
704 return;
705 }
706}
707#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/uaccess.h>
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/memory.h>
26#include <linux/vmalloc.h>
27
28#include <trace/syscall.h>
29
30#include <asm/set_memory.h>
31#include <asm/kprobes.h>
32#include <asm/ftrace.h>
33#include <asm/nops.h>
34#include <asm/text-patching.h>
35
36#ifdef CONFIG_DYNAMIC_FTRACE
37
38static int ftrace_poke_late = 0;
39
40int ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex)
42{
43 /*
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
47 */
48 mutex_lock(&text_mutex);
49 ftrace_poke_late = 1;
50 return 0;
51}
52
53int ftrace_arch_code_modify_post_process(void)
54 __releases(&text_mutex)
55{
56 /*
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
59 * that they do, here.
60 */
61 text_poke_finish();
62 ftrace_poke_late = 0;
63 mutex_unlock(&text_mutex);
64 return 0;
65}
66
67static const char *ftrace_nop_replace(void)
68{
69 return ideal_nops[NOP_ATOMIC5];
70}
71
72static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
73{
74 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
75}
76
77static int ftrace_verify_code(unsigned long ip, const char *old_code)
78{
79 char cur_code[MCOUNT_INSN_SIZE];
80
81 /*
82 * Note:
83 * We are paranoid about modifying text, as if a bug was to happen, it
84 * could cause us to read or write to someplace that could cause harm.
85 * Carefully read and modify the code with probe_kernel_*(), and make
86 * sure what we read is what we expected it to be before modifying it.
87 */
88 /* read the text we want to modify */
89 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
90 WARN_ON(1);
91 return -EFAULT;
92 }
93
94 /* Make sure it is what we expect it to be */
95 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
96 WARN_ON(1);
97 return -EINVAL;
98 }
99
100 return 0;
101}
102
103/*
104 * Marked __ref because it calls text_poke_early() which is .init.text. That is
105 * ok because that call will happen early, during boot, when .init sections are
106 * still present.
107 */
108static int __ref
109ftrace_modify_code_direct(unsigned long ip, const char *old_code,
110 const char *new_code)
111{
112 int ret = ftrace_verify_code(ip, old_code);
113 if (ret)
114 return ret;
115
116 /* replace the text with the new text */
117 if (ftrace_poke_late)
118 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
119 else
120 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
121 return 0;
122}
123
124int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
125{
126 unsigned long ip = rec->ip;
127 const char *new, *old;
128
129 old = ftrace_call_replace(ip, addr);
130 new = ftrace_nop_replace();
131
132 /*
133 * On boot up, and when modules are loaded, the MCOUNT_ADDR
134 * is converted to a nop, and will never become MCOUNT_ADDR
135 * again. This code is either running before SMP (on boot up)
136 * or before the code will ever be executed (module load).
137 * We do not want to use the breakpoint version in this case,
138 * just modify the code directly.
139 */
140 if (addr == MCOUNT_ADDR)
141 return ftrace_modify_code_direct(ip, old, new);
142
143 /*
144 * x86 overrides ftrace_replace_code -- this function will never be used
145 * in this case.
146 */
147 WARN_ONCE(1, "invalid use of ftrace_make_nop");
148 return -EINVAL;
149}
150
151int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152{
153 unsigned long ip = rec->ip;
154 const char *new, *old;
155
156 old = ftrace_nop_replace();
157 new = ftrace_call_replace(ip, addr);
158
159 /* Should only be called when module is loaded */
160 return ftrace_modify_code_direct(rec->ip, old, new);
161}
162
163/*
164 * Should never be called:
165 * As it is only called by __ftrace_replace_code() which is called by
166 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
167 * which is called to turn mcount into nops or nops into function calls
168 * but not to convert a function from not using regs to one that uses
169 * regs, which ftrace_modify_call() is for.
170 */
171int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
172 unsigned long addr)
173{
174 WARN_ON(1);
175 return -EINVAL;
176}
177
178int ftrace_update_ftrace_func(ftrace_func_t func)
179{
180 unsigned long ip;
181 const char *new;
182
183 ip = (unsigned long)(&ftrace_call);
184 new = ftrace_call_replace(ip, (unsigned long)func);
185 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
186
187 ip = (unsigned long)(&ftrace_regs_call);
188 new = ftrace_call_replace(ip, (unsigned long)func);
189 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
190
191 return 0;
192}
193
194void ftrace_replace_code(int enable)
195{
196 struct ftrace_rec_iter *iter;
197 struct dyn_ftrace *rec;
198 const char *new, *old;
199 int ret;
200
201 for_ftrace_rec_iter(iter) {
202 rec = ftrace_rec_iter_record(iter);
203
204 switch (ftrace_test_record(rec, enable)) {
205 case FTRACE_UPDATE_IGNORE:
206 default:
207 continue;
208
209 case FTRACE_UPDATE_MAKE_CALL:
210 old = ftrace_nop_replace();
211 break;
212
213 case FTRACE_UPDATE_MODIFY_CALL:
214 case FTRACE_UPDATE_MAKE_NOP:
215 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
216 break;
217 }
218
219 ret = ftrace_verify_code(rec->ip, old);
220 if (ret) {
221 ftrace_bug(ret, rec);
222 return;
223 }
224 }
225
226 for_ftrace_rec_iter(iter) {
227 rec = ftrace_rec_iter_record(iter);
228
229 switch (ftrace_test_record(rec, enable)) {
230 case FTRACE_UPDATE_IGNORE:
231 default:
232 continue;
233
234 case FTRACE_UPDATE_MAKE_CALL:
235 case FTRACE_UPDATE_MODIFY_CALL:
236 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
237 break;
238
239 case FTRACE_UPDATE_MAKE_NOP:
240 new = ftrace_nop_replace();
241 break;
242 }
243
244 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
245 ftrace_update_record(rec, enable);
246 }
247 text_poke_finish();
248}
249
250void arch_ftrace_update_code(int command)
251{
252 ftrace_modify_all_code(command);
253}
254
255int __init ftrace_dyn_arch_init(void)
256{
257 return 0;
258}
259
260/* Currently only x86_64 supports dynamic trampolines */
261#ifdef CONFIG_X86_64
262
263#ifdef CONFIG_MODULES
264#include <linux/moduleloader.h>
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
268 return module_alloc(size);
269}
270static inline void tramp_free(void *tramp)
271{
272 module_memfree(tramp);
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278 return NULL;
279}
280static inline void tramp_free(void *tramp) { }
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
284extern void ftrace_regs_caller_end(void);
285extern void ftrace_regs_caller_ret(void);
286extern void ftrace_caller_end(void);
287extern void ftrace_caller_op_ptr(void);
288extern void ftrace_regs_caller_op_ptr(void);
289extern void ftrace_regs_caller_jmp(void);
290
291/* movq function_trace_op(%rip), %rdx */
292/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
293#define OP_REF_SIZE 7
294
295/*
296 * The ftrace_ops is passed to the function callback. Since the
297 * trampoline only services a single ftrace_ops, we can pass in
298 * that ops directly.
299 *
300 * The ftrace_op_code_union is used to create a pointer to the
301 * ftrace_ops that will be passed to the callback function.
302 */
303union ftrace_op_code_union {
304 char code[OP_REF_SIZE];
305 struct {
306 char op[3];
307 int offset;
308 } __attribute__((packed));
309};
310
311#define RET_SIZE 1
312
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315{
316 unsigned long start_offset;
317 unsigned long end_offset;
318 unsigned long op_offset;
319 unsigned long call_offset;
320 unsigned long jmp_offset;
321 unsigned long offset;
322 unsigned long npages;
323 unsigned long size;
324 unsigned long retq;
325 unsigned long *ptr;
326 void *trampoline;
327 void *ip;
328 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
329 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
330 union ftrace_op_code_union op_ptr;
331 int ret;
332
333 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334 start_offset = (unsigned long)ftrace_regs_caller;
335 end_offset = (unsigned long)ftrace_regs_caller_end;
336 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337 call_offset = (unsigned long)ftrace_regs_call;
338 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339 } else {
340 start_offset = (unsigned long)ftrace_caller;
341 end_offset = (unsigned long)ftrace_caller_end;
342 op_offset = (unsigned long)ftrace_caller_op_ptr;
343 call_offset = (unsigned long)ftrace_call;
344 jmp_offset = 0;
345 }
346
347 size = end_offset - start_offset;
348
349 /*
350 * Allocate enough size to store the ftrace_caller code,
351 * the iret , as well as the address of the ftrace_ops this
352 * trampoline is used for.
353 */
354 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355 if (!trampoline)
356 return 0;
357
358 *tramp_size = size + RET_SIZE + sizeof(void *);
359 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361 /* Copy ftrace_caller onto the trampoline memory */
362 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363 if (WARN_ON(ret < 0))
364 goto fail;
365
366 ip = trampoline + size;
367
368 /* The trampoline ends with ret(q) */
369 retq = (unsigned long)ftrace_stub;
370 ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
371 if (WARN_ON(ret < 0))
372 goto fail;
373
374 /* No need to test direct calls on created trampolines */
375 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
376 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
377 ip = trampoline + (jmp_offset - start_offset);
378 if (WARN_ON(*(char *)ip != 0x75))
379 goto fail;
380 ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
381 if (ret < 0)
382 goto fail;
383 }
384
385 /*
386 * The address of the ftrace_ops that is used for this trampoline
387 * is stored at the end of the trampoline. This will be used to
388 * load the third parameter for the callback. Basically, that
389 * location at the end of the trampoline takes the place of
390 * the global function_trace_op variable.
391 */
392
393 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
394 *ptr = (unsigned long)ops;
395
396 op_offset -= start_offset;
397 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
398
399 /* Are we pointing to the reference? */
400 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
401 goto fail;
402
403 /* Load the contents of ptr into the callback parameter */
404 offset = (unsigned long)ptr;
405 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
406
407 op_ptr.offset = offset;
408
409 /* put in the new offset to the ftrace_ops */
410 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
411
412 /* put in the call to the function */
413 mutex_lock(&text_mutex);
414 call_offset -= start_offset;
415 memcpy(trampoline + call_offset,
416 text_gen_insn(CALL_INSN_OPCODE,
417 trampoline + call_offset,
418 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
419 mutex_unlock(&text_mutex);
420
421 /* ALLOC_TRAMP flags lets us know we created it */
422 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
423
424 set_vm_flush_reset_perms(trampoline);
425
426 if (likely(system_state != SYSTEM_BOOTING))
427 set_memory_ro((unsigned long)trampoline, npages);
428 set_memory_x((unsigned long)trampoline, npages);
429 return (unsigned long)trampoline;
430fail:
431 tramp_free(trampoline);
432 return 0;
433}
434
435void set_ftrace_ops_ro(void)
436{
437 struct ftrace_ops *ops;
438 unsigned long start_offset;
439 unsigned long end_offset;
440 unsigned long npages;
441 unsigned long size;
442
443 do_for_each_ftrace_op(ops, ftrace_ops_list) {
444 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
445 continue;
446
447 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
448 start_offset = (unsigned long)ftrace_regs_caller;
449 end_offset = (unsigned long)ftrace_regs_caller_end;
450 } else {
451 start_offset = (unsigned long)ftrace_caller;
452 end_offset = (unsigned long)ftrace_caller_end;
453 }
454 size = end_offset - start_offset;
455 size = size + RET_SIZE + sizeof(void *);
456 npages = DIV_ROUND_UP(size, PAGE_SIZE);
457 set_memory_ro((unsigned long)ops->trampoline, npages);
458 } while_for_each_ftrace_op(ops);
459}
460
461static unsigned long calc_trampoline_call_offset(bool save_regs)
462{
463 unsigned long start_offset;
464 unsigned long call_offset;
465
466 if (save_regs) {
467 start_offset = (unsigned long)ftrace_regs_caller;
468 call_offset = (unsigned long)ftrace_regs_call;
469 } else {
470 start_offset = (unsigned long)ftrace_caller;
471 call_offset = (unsigned long)ftrace_call;
472 }
473
474 return call_offset - start_offset;
475}
476
477void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
478{
479 ftrace_func_t func;
480 unsigned long offset;
481 unsigned long ip;
482 unsigned int size;
483 const char *new;
484
485 if (!ops->trampoline) {
486 ops->trampoline = create_trampoline(ops, &size);
487 if (!ops->trampoline)
488 return;
489 ops->trampoline_size = size;
490 return;
491 }
492
493 /*
494 * The ftrace_ops caller may set up its own trampoline.
495 * In such a case, this code must not modify it.
496 */
497 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
498 return;
499
500 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
501 ip = ops->trampoline + offset;
502 func = ftrace_ops_get_func(ops);
503
504 mutex_lock(&text_mutex);
505 /* Do a safe modify in case the trampoline is executing */
506 new = ftrace_call_replace(ip, (unsigned long)func);
507 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
508 mutex_unlock(&text_mutex);
509}
510
511/* Return the address of the function the trampoline calls */
512static void *addr_from_call(void *ptr)
513{
514 union text_poke_insn call;
515 int ret;
516
517 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
518 if (WARN_ON_ONCE(ret < 0))
519 return NULL;
520
521 /* Make sure this is a call */
522 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
523 pr_warn("Expected E8, got %x\n", call.opcode);
524 return NULL;
525 }
526
527 return ptr + CALL_INSN_SIZE + call.disp;
528}
529
530void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
531 unsigned long frame_pointer);
532
533/*
534 * If the ops->trampoline was not allocated, then it probably
535 * has a static trampoline func, or is the ftrace caller itself.
536 */
537static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
538{
539 unsigned long offset;
540 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
541 void *ptr;
542
543 if (ops && ops->trampoline) {
544#ifdef CONFIG_FUNCTION_GRAPH_TRACER
545 /*
546 * We only know about function graph tracer setting as static
547 * trampoline.
548 */
549 if (ops->trampoline == FTRACE_GRAPH_ADDR)
550 return (void *)prepare_ftrace_return;
551#endif
552 return NULL;
553 }
554
555 offset = calc_trampoline_call_offset(save_regs);
556
557 if (save_regs)
558 ptr = (void *)FTRACE_REGS_ADDR + offset;
559 else
560 ptr = (void *)FTRACE_ADDR + offset;
561
562 return addr_from_call(ptr);
563}
564
565void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
566{
567 unsigned long offset;
568
569 /* If we didn't allocate this trampoline, consider it static */
570 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
571 return static_tramp_func(ops, rec);
572
573 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
574 return addr_from_call((void *)ops->trampoline + offset);
575}
576
577void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
578{
579 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
580 return;
581
582 tramp_free((void *)ops->trampoline);
583 ops->trampoline = 0;
584}
585
586#endif /* CONFIG_X86_64 */
587#endif /* CONFIG_DYNAMIC_FTRACE */
588
589#ifdef CONFIG_FUNCTION_GRAPH_TRACER
590
591#ifdef CONFIG_DYNAMIC_FTRACE
592extern void ftrace_graph_call(void);
593
594static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
595{
596 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
597}
598
599static int ftrace_mod_jmp(unsigned long ip, void *func)
600{
601 const char *new;
602
603 new = ftrace_jmp_replace(ip, (unsigned long)func);
604 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
605 return 0;
606}
607
608int ftrace_enable_ftrace_graph_caller(void)
609{
610 unsigned long ip = (unsigned long)(&ftrace_graph_call);
611
612 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
613}
614
615int ftrace_disable_ftrace_graph_caller(void)
616{
617 unsigned long ip = (unsigned long)(&ftrace_graph_call);
618
619 return ftrace_mod_jmp(ip, &ftrace_stub);
620}
621
622#endif /* !CONFIG_DYNAMIC_FTRACE */
623
624/*
625 * Hook the return address and push it in the stack of return addrs
626 * in current thread info.
627 */
628void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
629 unsigned long frame_pointer)
630{
631 unsigned long return_hooker = (unsigned long)&return_to_handler;
632 unsigned long old;
633 int faulted;
634
635 /*
636 * When resuming from suspend-to-ram, this function can be indirectly
637 * called from early CPU startup code while the CPU is in real mode,
638 * which would fail miserably. Make sure the stack pointer is a
639 * virtual address.
640 *
641 * This check isn't as accurate as virt_addr_valid(), but it should be
642 * good enough for this purpose, and it's fast.
643 */
644 if (unlikely((long)__builtin_frame_address(0) >= 0))
645 return;
646
647 if (unlikely(ftrace_graph_is_dead()))
648 return;
649
650 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
651 return;
652
653 /*
654 * Protect against fault, even if it shouldn't
655 * happen. This tool is too much intrusive to
656 * ignore such a protection.
657 */
658 asm volatile(
659 "1: " _ASM_MOV " (%[parent]), %[old]\n"
660 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
661 " movl $0, %[faulted]\n"
662 "3:\n"
663
664 ".section .fixup, \"ax\"\n"
665 "4: movl $1, %[faulted]\n"
666 " jmp 3b\n"
667 ".previous\n"
668
669 _ASM_EXTABLE(1b, 4b)
670 _ASM_EXTABLE(2b, 4b)
671
672 : [old] "=&r" (old), [faulted] "=r" (faulted)
673 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
674 : "memory"
675 );
676
677 if (unlikely(faulted)) {
678 ftrace_graph_stop();
679 WARN_ON(1);
680 return;
681 }
682
683 if (function_graph_enter(old, self_addr, frame_pointer, parent))
684 *parent = old;
685}
686#endif /* CONFIG_FUNCTION_GRAPH_TRACER */