Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/uaccess.h>
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25
26#include <trace/syscall.h>
27
28#include <asm/set_memory.h>
29#include <asm/kprobes.h>
30#include <asm/ftrace.h>
31#include <asm/nops.h>
32
33#ifdef CONFIG_DYNAMIC_FTRACE
34
35int ftrace_arch_code_modify_prepare(void)
36{
37 set_kernel_text_rw();
38 set_all_modules_text_rw();
39 return 0;
40}
41
42int ftrace_arch_code_modify_post_process(void)
43{
44 set_all_modules_text_ro();
45 set_kernel_text_ro();
46 return 0;
47}
48
49union ftrace_code_union {
50 char code[MCOUNT_INSN_SIZE];
51 struct {
52 unsigned char e8;
53 int offset;
54 } __attribute__((packed));
55};
56
57static int ftrace_calc_offset(long ip, long addr)
58{
59 return (int)(addr - ip);
60}
61
62static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
63{
64 static union ftrace_code_union calc;
65
66 calc.e8 = 0xe8;
67 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
68
69 /*
70 * No locking needed, this must be called via kstop_machine
71 * which in essence is like running on a uniprocessor machine.
72 */
73 return calc.code;
74}
75
76static inline int
77within(unsigned long addr, unsigned long start, unsigned long end)
78{
79 return addr >= start && addr < end;
80}
81
82static unsigned long text_ip_addr(unsigned long ip)
83{
84 /*
85 * On x86_64, kernel text mappings are mapped read-only, so we use
86 * the kernel identity mapping instead of the kernel text mapping
87 * to modify the kernel text.
88 *
89 * For 32bit kernels, these mappings are same and we can use
90 * kernel identity mapping to modify code.
91 */
92 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
93 ip = (unsigned long)__va(__pa_symbol(ip));
94
95 return ip;
96}
97
98static const unsigned char *ftrace_nop_replace(void)
99{
100 return ideal_nops[NOP_ATOMIC5];
101}
102
103static int
104ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
105 unsigned const char *new_code)
106{
107 unsigned char replaced[MCOUNT_INSN_SIZE];
108
109 ftrace_expected = old_code;
110
111 /*
112 * Note:
113 * We are paranoid about modifying text, as if a bug was to happen, it
114 * could cause us to read or write to someplace that could cause harm.
115 * Carefully read and modify the code with probe_kernel_*(), and make
116 * sure what we read is what we expected it to be before modifying it.
117 */
118
119 /* read the text we want to modify */
120 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
121 return -EFAULT;
122
123 /* Make sure it is what we expect it to be */
124 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
125 return -EINVAL;
126
127 ip = text_ip_addr(ip);
128
129 /* replace the text with the new text */
130 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
131 return -EPERM;
132
133 sync_core();
134
135 return 0;
136}
137
138int ftrace_make_nop(struct module *mod,
139 struct dyn_ftrace *rec, unsigned long addr)
140{
141 unsigned const char *new, *old;
142 unsigned long ip = rec->ip;
143
144 old = ftrace_call_replace(ip, addr);
145 new = ftrace_nop_replace();
146
147 /*
148 * On boot up, and when modules are loaded, the MCOUNT_ADDR
149 * is converted to a nop, and will never become MCOUNT_ADDR
150 * again. This code is either running before SMP (on boot up)
151 * or before the code will ever be executed (module load).
152 * We do not want to use the breakpoint version in this case,
153 * just modify the code directly.
154 */
155 if (addr == MCOUNT_ADDR)
156 return ftrace_modify_code_direct(rec->ip, old, new);
157
158 ftrace_expected = NULL;
159
160 /* Normal cases use add_brk_on_nop */
161 WARN_ONCE(1, "invalid use of ftrace_make_nop");
162 return -EINVAL;
163}
164
165int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
166{
167 unsigned const char *new, *old;
168 unsigned long ip = rec->ip;
169
170 old = ftrace_nop_replace();
171 new = ftrace_call_replace(ip, addr);
172
173 /* Should only be called when module is loaded */
174 return ftrace_modify_code_direct(rec->ip, old, new);
175}
176
177/*
178 * The modifying_ftrace_code is used to tell the breakpoint
179 * handler to call ftrace_int3_handler(). If it fails to
180 * call this handler for a breakpoint added by ftrace, then
181 * the kernel may crash.
182 *
183 * As atomic_writes on x86 do not need a barrier, we do not
184 * need to add smp_mb()s for this to work. It is also considered
185 * that we can not read the modifying_ftrace_code before
186 * executing the breakpoint. That would be quite remarkable if
187 * it could do that. Here's the flow that is required:
188 *
189 * CPU-0 CPU-1
190 *
191 * atomic_inc(mfc);
192 * write int3s
193 * <trap-int3> // implicit (r)mb
194 * if (atomic_read(mfc))
195 * call ftrace_int3_handler()
196 *
197 * Then when we are finished:
198 *
199 * atomic_dec(mfc);
200 *
201 * If we hit a breakpoint that was not set by ftrace, it does not
202 * matter if ftrace_int3_handler() is called or not. It will
203 * simply be ignored. But it is crucial that a ftrace nop/caller
204 * breakpoint is handled. No other user should ever place a
205 * breakpoint on an ftrace nop/caller location. It must only
206 * be done by this code.
207 */
208atomic_t modifying_ftrace_code __read_mostly;
209
210static int
211ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
212 unsigned const char *new_code);
213
214/*
215 * Should never be called:
216 * As it is only called by __ftrace_replace_code() which is called by
217 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
218 * which is called to turn mcount into nops or nops into function calls
219 * but not to convert a function from not using regs to one that uses
220 * regs, which ftrace_modify_call() is for.
221 */
222int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
223 unsigned long addr)
224{
225 WARN_ON(1);
226 ftrace_expected = NULL;
227 return -EINVAL;
228}
229
230static unsigned long ftrace_update_func;
231
232static int update_ftrace_func(unsigned long ip, void *new)
233{
234 unsigned char old[MCOUNT_INSN_SIZE];
235 int ret;
236
237 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
238
239 ftrace_update_func = ip;
240 /* Make sure the breakpoints see the ftrace_update_func update */
241 smp_wmb();
242
243 /* See comment above by declaration of modifying_ftrace_code */
244 atomic_inc(&modifying_ftrace_code);
245
246 ret = ftrace_modify_code(ip, old, new);
247
248 atomic_dec(&modifying_ftrace_code);
249
250 return ret;
251}
252
253int ftrace_update_ftrace_func(ftrace_func_t func)
254{
255 unsigned long ip = (unsigned long)(&ftrace_call);
256 unsigned char *new;
257 int ret;
258
259 new = ftrace_call_replace(ip, (unsigned long)func);
260 ret = update_ftrace_func(ip, new);
261
262 /* Also update the regs callback function */
263 if (!ret) {
264 ip = (unsigned long)(&ftrace_regs_call);
265 new = ftrace_call_replace(ip, (unsigned long)func);
266 ret = update_ftrace_func(ip, new);
267 }
268
269 return ret;
270}
271
272static int is_ftrace_caller(unsigned long ip)
273{
274 if (ip == ftrace_update_func)
275 return 1;
276
277 return 0;
278}
279
280/*
281 * A breakpoint was added to the code address we are about to
282 * modify, and this is the handle that will just skip over it.
283 * We are either changing a nop into a trace call, or a trace
284 * call to a nop. While the change is taking place, we treat
285 * it just like it was a nop.
286 */
287int ftrace_int3_handler(struct pt_regs *regs)
288{
289 unsigned long ip;
290
291 if (WARN_ON_ONCE(!regs))
292 return 0;
293
294 ip = regs->ip - 1;
295 if (!ftrace_location(ip) && !is_ftrace_caller(ip))
296 return 0;
297
298 regs->ip += MCOUNT_INSN_SIZE - 1;
299
300 return 1;
301}
302
303static int ftrace_write(unsigned long ip, const char *val, int size)
304{
305 ip = text_ip_addr(ip);
306
307 if (probe_kernel_write((void *)ip, val, size))
308 return -EPERM;
309
310 return 0;
311}
312
313static int add_break(unsigned long ip, const char *old)
314{
315 unsigned char replaced[MCOUNT_INSN_SIZE];
316 unsigned char brk = BREAKPOINT_INSTRUCTION;
317
318 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
319 return -EFAULT;
320
321 ftrace_expected = old;
322
323 /* Make sure it is what we expect it to be */
324 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
325 return -EINVAL;
326
327 return ftrace_write(ip, &brk, 1);
328}
329
330static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
331{
332 unsigned const char *old;
333 unsigned long ip = rec->ip;
334
335 old = ftrace_call_replace(ip, addr);
336
337 return add_break(rec->ip, old);
338}
339
340
341static int add_brk_on_nop(struct dyn_ftrace *rec)
342{
343 unsigned const char *old;
344
345 old = ftrace_nop_replace();
346
347 return add_break(rec->ip, old);
348}
349
350static int add_breakpoints(struct dyn_ftrace *rec, int enable)
351{
352 unsigned long ftrace_addr;
353 int ret;
354
355 ftrace_addr = ftrace_get_addr_curr(rec);
356
357 ret = ftrace_test_record(rec, enable);
358
359 switch (ret) {
360 case FTRACE_UPDATE_IGNORE:
361 return 0;
362
363 case FTRACE_UPDATE_MAKE_CALL:
364 /* converting nop to call */
365 return add_brk_on_nop(rec);
366
367 case FTRACE_UPDATE_MODIFY_CALL:
368 case FTRACE_UPDATE_MAKE_NOP:
369 /* converting a call to a nop */
370 return add_brk_on_call(rec, ftrace_addr);
371 }
372 return 0;
373}
374
375/*
376 * On error, we need to remove breakpoints. This needs to
377 * be done caefully. If the address does not currently have a
378 * breakpoint, we know we are done. Otherwise, we look at the
379 * remaining 4 bytes of the instruction. If it matches a nop
380 * we replace the breakpoint with the nop. Otherwise we replace
381 * it with the call instruction.
382 */
383static int remove_breakpoint(struct dyn_ftrace *rec)
384{
385 unsigned char ins[MCOUNT_INSN_SIZE];
386 unsigned char brk = BREAKPOINT_INSTRUCTION;
387 const unsigned char *nop;
388 unsigned long ftrace_addr;
389 unsigned long ip = rec->ip;
390
391 /* If we fail the read, just give up */
392 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
393 return -EFAULT;
394
395 /* If this does not have a breakpoint, we are done */
396 if (ins[0] != brk)
397 return 0;
398
399 nop = ftrace_nop_replace();
400
401 /*
402 * If the last 4 bytes of the instruction do not match
403 * a nop, then we assume that this is a call to ftrace_addr.
404 */
405 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
406 /*
407 * For extra paranoidism, we check if the breakpoint is on
408 * a call that would actually jump to the ftrace_addr.
409 * If not, don't touch the breakpoint, we make just create
410 * a disaster.
411 */
412 ftrace_addr = ftrace_get_addr_new(rec);
413 nop = ftrace_call_replace(ip, ftrace_addr);
414
415 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
416 goto update;
417
418 /* Check both ftrace_addr and ftrace_old_addr */
419 ftrace_addr = ftrace_get_addr_curr(rec);
420 nop = ftrace_call_replace(ip, ftrace_addr);
421
422 ftrace_expected = nop;
423
424 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
425 return -EINVAL;
426 }
427
428 update:
429 return ftrace_write(ip, nop, 1);
430}
431
432static int add_update_code(unsigned long ip, unsigned const char *new)
433{
434 /* skip breakpoint */
435 ip++;
436 new++;
437 return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
438}
439
440static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
441{
442 unsigned long ip = rec->ip;
443 unsigned const char *new;
444
445 new = ftrace_call_replace(ip, addr);
446 return add_update_code(ip, new);
447}
448
449static int add_update_nop(struct dyn_ftrace *rec)
450{
451 unsigned long ip = rec->ip;
452 unsigned const char *new;
453
454 new = ftrace_nop_replace();
455 return add_update_code(ip, new);
456}
457
458static int add_update(struct dyn_ftrace *rec, int enable)
459{
460 unsigned long ftrace_addr;
461 int ret;
462
463 ret = ftrace_test_record(rec, enable);
464
465 ftrace_addr = ftrace_get_addr_new(rec);
466
467 switch (ret) {
468 case FTRACE_UPDATE_IGNORE:
469 return 0;
470
471 case FTRACE_UPDATE_MODIFY_CALL:
472 case FTRACE_UPDATE_MAKE_CALL:
473 /* converting nop to call */
474 return add_update_call(rec, ftrace_addr);
475
476 case FTRACE_UPDATE_MAKE_NOP:
477 /* converting a call to a nop */
478 return add_update_nop(rec);
479 }
480
481 return 0;
482}
483
484static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
485{
486 unsigned long ip = rec->ip;
487 unsigned const char *new;
488
489 new = ftrace_call_replace(ip, addr);
490
491 return ftrace_write(ip, new, 1);
492}
493
494static int finish_update_nop(struct dyn_ftrace *rec)
495{
496 unsigned long ip = rec->ip;
497 unsigned const char *new;
498
499 new = ftrace_nop_replace();
500
501 return ftrace_write(ip, new, 1);
502}
503
504static int finish_update(struct dyn_ftrace *rec, int enable)
505{
506 unsigned long ftrace_addr;
507 int ret;
508
509 ret = ftrace_update_record(rec, enable);
510
511 ftrace_addr = ftrace_get_addr_new(rec);
512
513 switch (ret) {
514 case FTRACE_UPDATE_IGNORE:
515 return 0;
516
517 case FTRACE_UPDATE_MODIFY_CALL:
518 case FTRACE_UPDATE_MAKE_CALL:
519 /* converting nop to call */
520 return finish_update_call(rec, ftrace_addr);
521
522 case FTRACE_UPDATE_MAKE_NOP:
523 /* converting a call to a nop */
524 return finish_update_nop(rec);
525 }
526
527 return 0;
528}
529
530static void do_sync_core(void *data)
531{
532 sync_core();
533}
534
535static void run_sync(void)
536{
537 int enable_irqs;
538
539 /* No need to sync if there's only one CPU */
540 if (num_online_cpus() == 1)
541 return;
542
543 enable_irqs = irqs_disabled();
544
545 /* We may be called with interrupts disabled (on bootup). */
546 if (enable_irqs)
547 local_irq_enable();
548 on_each_cpu(do_sync_core, NULL, 1);
549 if (enable_irqs)
550 local_irq_disable();
551}
552
553void ftrace_replace_code(int enable)
554{
555 struct ftrace_rec_iter *iter;
556 struct dyn_ftrace *rec;
557 const char *report = "adding breakpoints";
558 int count = 0;
559 int ret;
560
561 for_ftrace_rec_iter(iter) {
562 rec = ftrace_rec_iter_record(iter);
563
564 ret = add_breakpoints(rec, enable);
565 if (ret)
566 goto remove_breakpoints;
567 count++;
568 }
569
570 run_sync();
571
572 report = "updating code";
573 count = 0;
574
575 for_ftrace_rec_iter(iter) {
576 rec = ftrace_rec_iter_record(iter);
577
578 ret = add_update(rec, enable);
579 if (ret)
580 goto remove_breakpoints;
581 count++;
582 }
583
584 run_sync();
585
586 report = "removing breakpoints";
587 count = 0;
588
589 for_ftrace_rec_iter(iter) {
590 rec = ftrace_rec_iter_record(iter);
591
592 ret = finish_update(rec, enable);
593 if (ret)
594 goto remove_breakpoints;
595 count++;
596 }
597
598 run_sync();
599
600 return;
601
602 remove_breakpoints:
603 pr_warn("Failed on %s (%d):\n", report, count);
604 ftrace_bug(ret, rec);
605 for_ftrace_rec_iter(iter) {
606 rec = ftrace_rec_iter_record(iter);
607 /*
608 * Breakpoints are handled only when this function is in
609 * progress. The system could not work with them.
610 */
611 if (remove_breakpoint(rec))
612 BUG();
613 }
614 run_sync();
615}
616
617static int
618ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
619 unsigned const char *new_code)
620{
621 int ret;
622
623 ret = add_break(ip, old_code);
624 if (ret)
625 goto out;
626
627 run_sync();
628
629 ret = add_update_code(ip, new_code);
630 if (ret)
631 goto fail_update;
632
633 run_sync();
634
635 ret = ftrace_write(ip, new_code, 1);
636 /*
637 * The breakpoint is handled only when this function is in progress.
638 * The system could not work if we could not remove it.
639 */
640 BUG_ON(ret);
641 out:
642 run_sync();
643 return ret;
644
645 fail_update:
646 /* Also here the system could not work with the breakpoint */
647 if (ftrace_write(ip, old_code, 1))
648 BUG();
649 goto out;
650}
651
652void arch_ftrace_update_code(int command)
653{
654 /* See comment above by declaration of modifying_ftrace_code */
655 atomic_inc(&modifying_ftrace_code);
656
657 ftrace_modify_all_code(command);
658
659 atomic_dec(&modifying_ftrace_code);
660}
661
662int __init ftrace_dyn_arch_init(void)
663{
664 return 0;
665}
666
667#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
668static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
669{
670 static union ftrace_code_union calc;
671
672 /* Jmp not a call (ignore the .e8) */
673 calc.e8 = 0xe9;
674 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
675
676 /*
677 * ftrace external locks synchronize the access to the static variable.
678 */
679 return calc.code;
680}
681#endif
682
683/* Currently only x86_64 supports dynamic trampolines */
684#ifdef CONFIG_X86_64
685
686#ifdef CONFIG_MODULES
687#include <linux/moduleloader.h>
688/* Module allocation simplifies allocating memory for code */
689static inline void *alloc_tramp(unsigned long size)
690{
691 return module_alloc(size);
692}
693static inline void tramp_free(void *tramp, int size)
694{
695 int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
696
697 set_memory_nx((unsigned long)tramp, npages);
698 set_memory_rw((unsigned long)tramp, npages);
699 module_memfree(tramp);
700}
701#else
702/* Trampolines can only be created if modules are supported */
703static inline void *alloc_tramp(unsigned long size)
704{
705 return NULL;
706}
707static inline void tramp_free(void *tramp, int size) { }
708#endif
709
710/* Defined as markers to the end of the ftrace default trampolines */
711extern void ftrace_regs_caller_end(void);
712extern void ftrace_epilogue(void);
713extern void ftrace_caller_op_ptr(void);
714extern void ftrace_regs_caller_op_ptr(void);
715
716/* movq function_trace_op(%rip), %rdx */
717/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
718#define OP_REF_SIZE 7
719
720/*
721 * The ftrace_ops is passed to the function callback. Since the
722 * trampoline only services a single ftrace_ops, we can pass in
723 * that ops directly.
724 *
725 * The ftrace_op_code_union is used to create a pointer to the
726 * ftrace_ops that will be passed to the callback function.
727 */
728union ftrace_op_code_union {
729 char code[OP_REF_SIZE];
730 struct {
731 char op[3];
732 int offset;
733 } __attribute__((packed));
734};
735
736static unsigned long
737create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
738{
739 unsigned const char *jmp;
740 unsigned long start_offset;
741 unsigned long end_offset;
742 unsigned long op_offset;
743 unsigned long offset;
744 unsigned long size;
745 unsigned long ip;
746 unsigned long *ptr;
747 void *trampoline;
748 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
749 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
750 union ftrace_op_code_union op_ptr;
751 int ret;
752
753 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
754 start_offset = (unsigned long)ftrace_regs_caller;
755 end_offset = (unsigned long)ftrace_regs_caller_end;
756 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
757 } else {
758 start_offset = (unsigned long)ftrace_caller;
759 end_offset = (unsigned long)ftrace_epilogue;
760 op_offset = (unsigned long)ftrace_caller_op_ptr;
761 }
762
763 size = end_offset - start_offset;
764
765 /*
766 * Allocate enough size to store the ftrace_caller code,
767 * the jmp to ftrace_epilogue, as well as the address of
768 * the ftrace_ops this trampoline is used for.
769 */
770 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
771 if (!trampoline)
772 return 0;
773
774 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
775
776 /* Copy ftrace_caller onto the trampoline memory */
777 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
778 if (WARN_ON(ret < 0)) {
779 tramp_free(trampoline, *tramp_size);
780 return 0;
781 }
782
783 ip = (unsigned long)trampoline + size;
784
785 /* The trampoline ends with a jmp to ftrace_epilogue */
786 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
787 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
788
789 /*
790 * The address of the ftrace_ops that is used for this trampoline
791 * is stored at the end of the trampoline. This will be used to
792 * load the third parameter for the callback. Basically, that
793 * location at the end of the trampoline takes the place of
794 * the global function_trace_op variable.
795 */
796
797 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
798 *ptr = (unsigned long)ops;
799
800 op_offset -= start_offset;
801 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
802
803 /* Are we pointing to the reference? */
804 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
805 tramp_free(trampoline, *tramp_size);
806 return 0;
807 }
808
809 /* Load the contents of ptr into the callback parameter */
810 offset = (unsigned long)ptr;
811 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
812
813 op_ptr.offset = offset;
814
815 /* put in the new offset to the ftrace_ops */
816 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
817
818 /* ALLOC_TRAMP flags lets us know we created it */
819 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
820
821 return (unsigned long)trampoline;
822}
823
824static unsigned long calc_trampoline_call_offset(bool save_regs)
825{
826 unsigned long start_offset;
827 unsigned long call_offset;
828
829 if (save_regs) {
830 start_offset = (unsigned long)ftrace_regs_caller;
831 call_offset = (unsigned long)ftrace_regs_call;
832 } else {
833 start_offset = (unsigned long)ftrace_caller;
834 call_offset = (unsigned long)ftrace_call;
835 }
836
837 return call_offset - start_offset;
838}
839
840void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
841{
842 ftrace_func_t func;
843 unsigned char *new;
844 unsigned long offset;
845 unsigned long ip;
846 unsigned int size;
847 int ret, npages;
848
849 if (ops->trampoline) {
850 /*
851 * The ftrace_ops caller may set up its own trampoline.
852 * In such a case, this code must not modify it.
853 */
854 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
855 return;
856 npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
857 set_memory_rw(ops->trampoline, npages);
858 } else {
859 ops->trampoline = create_trampoline(ops, &size);
860 if (!ops->trampoline)
861 return;
862 ops->trampoline_size = size;
863 npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
864 }
865
866 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
867 ip = ops->trampoline + offset;
868
869 func = ftrace_ops_get_func(ops);
870
871 /* Do a safe modify in case the trampoline is executing */
872 new = ftrace_call_replace(ip, (unsigned long)func);
873 ret = update_ftrace_func(ip, new);
874 set_memory_ro(ops->trampoline, npages);
875
876 /* The update should never fail */
877 WARN_ON(ret);
878}
879
880/* Return the address of the function the trampoline calls */
881static void *addr_from_call(void *ptr)
882{
883 union ftrace_code_union calc;
884 int ret;
885
886 ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
887 if (WARN_ON_ONCE(ret < 0))
888 return NULL;
889
890 /* Make sure this is a call */
891 if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
892 pr_warn("Expected e8, got %x\n", calc.e8);
893 return NULL;
894 }
895
896 return ptr + MCOUNT_INSN_SIZE + calc.offset;
897}
898
899void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
900 unsigned long frame_pointer);
901
902/*
903 * If the ops->trampoline was not allocated, then it probably
904 * has a static trampoline func, or is the ftrace caller itself.
905 */
906static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
907{
908 unsigned long offset;
909 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
910 void *ptr;
911
912 if (ops && ops->trampoline) {
913#ifdef CONFIG_FUNCTION_GRAPH_TRACER
914 /*
915 * We only know about function graph tracer setting as static
916 * trampoline.
917 */
918 if (ops->trampoline == FTRACE_GRAPH_ADDR)
919 return (void *)prepare_ftrace_return;
920#endif
921 return NULL;
922 }
923
924 offset = calc_trampoline_call_offset(save_regs);
925
926 if (save_regs)
927 ptr = (void *)FTRACE_REGS_ADDR + offset;
928 else
929 ptr = (void *)FTRACE_ADDR + offset;
930
931 return addr_from_call(ptr);
932}
933
934void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
935{
936 unsigned long offset;
937
938 /* If we didn't allocate this trampoline, consider it static */
939 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
940 return static_tramp_func(ops, rec);
941
942 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
943 return addr_from_call((void *)ops->trampoline + offset);
944}
945
946void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
947{
948 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
949 return;
950
951 tramp_free((void *)ops->trampoline, ops->trampoline_size);
952 ops->trampoline = 0;
953}
954
955#endif /* CONFIG_X86_64 */
956#endif /* CONFIG_DYNAMIC_FTRACE */
957
958#ifdef CONFIG_FUNCTION_GRAPH_TRACER
959
960#ifdef CONFIG_DYNAMIC_FTRACE
961extern void ftrace_graph_call(void);
962
963static int ftrace_mod_jmp(unsigned long ip, void *func)
964{
965 unsigned char *new;
966
967 new = ftrace_jmp_replace(ip, (unsigned long)func);
968
969 return update_ftrace_func(ip, new);
970}
971
972int ftrace_enable_ftrace_graph_caller(void)
973{
974 unsigned long ip = (unsigned long)(&ftrace_graph_call);
975
976 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
977}
978
979int ftrace_disable_ftrace_graph_caller(void)
980{
981 unsigned long ip = (unsigned long)(&ftrace_graph_call);
982
983 return ftrace_mod_jmp(ip, &ftrace_stub);
984}
985
986#endif /* !CONFIG_DYNAMIC_FTRACE */
987
988/*
989 * Hook the return address and push it in the stack of return addrs
990 * in current thread info.
991 */
992void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
993 unsigned long frame_pointer)
994{
995 unsigned long old;
996 int faulted;
997 struct ftrace_graph_ent trace;
998 unsigned long return_hooker = (unsigned long)
999 &return_to_handler;
1000
1001 /*
1002 * When resuming from suspend-to-ram, this function can be indirectly
1003 * called from early CPU startup code while the CPU is in real mode,
1004 * which would fail miserably. Make sure the stack pointer is a
1005 * virtual address.
1006 *
1007 * This check isn't as accurate as virt_addr_valid(), but it should be
1008 * good enough for this purpose, and it's fast.
1009 */
1010 if (unlikely((long)__builtin_frame_address(0) >= 0))
1011 return;
1012
1013 if (unlikely(ftrace_graph_is_dead()))
1014 return;
1015
1016 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
1017 return;
1018
1019 /*
1020 * Protect against fault, even if it shouldn't
1021 * happen. This tool is too much intrusive to
1022 * ignore such a protection.
1023 */
1024 asm volatile(
1025 "1: " _ASM_MOV " (%[parent]), %[old]\n"
1026 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
1027 " movl $0, %[faulted]\n"
1028 "3:\n"
1029
1030 ".section .fixup, \"ax\"\n"
1031 "4: movl $1, %[faulted]\n"
1032 " jmp 3b\n"
1033 ".previous\n"
1034
1035 _ASM_EXTABLE(1b, 4b)
1036 _ASM_EXTABLE(2b, 4b)
1037
1038 : [old] "=&r" (old), [faulted] "=r" (faulted)
1039 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
1040 : "memory"
1041 );
1042
1043 if (unlikely(faulted)) {
1044 ftrace_graph_stop();
1045 WARN_ON(1);
1046 return;
1047 }
1048
1049 trace.func = self_addr;
1050 trace.depth = current->curr_ret_stack + 1;
1051
1052 /* Only trace if the calling function expects to */
1053 if (!ftrace_graph_entry(&trace)) {
1054 *parent = old;
1055 return;
1056 }
1057
1058 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1059 frame_pointer, parent) == -EBUSY) {
1060 *parent = old;
1061 return;
1062 }
1063}
1064#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/uaccess.h>
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/memory.h>
26#include <linux/vmalloc.h>
27#include <linux/set_memory.h>
28
29#include <trace/syscall.h>
30
31#include <asm/kprobes.h>
32#include <asm/ftrace.h>
33#include <asm/nops.h>
34#include <asm/text-patching.h>
35
36#ifdef CONFIG_DYNAMIC_FTRACE
37
38static int ftrace_poke_late = 0;
39
40void ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex)
42{
43 /*
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
47 */
48 mutex_lock(&text_mutex);
49 ftrace_poke_late = 1;
50}
51
52void ftrace_arch_code_modify_post_process(void)
53 __releases(&text_mutex)
54{
55 /*
56 * ftrace_make_{call,nop}() may be called during
57 * module load, and we need to finish the text_poke_queue()
58 * that they do, here.
59 */
60 text_poke_finish();
61 ftrace_poke_late = 0;
62 mutex_unlock(&text_mutex);
63}
64
65static const char *ftrace_nop_replace(void)
66{
67 return x86_nops[5];
68}
69
70static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
71{
72 /*
73 * No need to translate into a callthunk. The trampoline does
74 * the depth accounting itself.
75 */
76 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
77}
78
79static int ftrace_verify_code(unsigned long ip, const char *old_code)
80{
81 char cur_code[MCOUNT_INSN_SIZE];
82
83 /*
84 * Note:
85 * We are paranoid about modifying text, as if a bug was to happen, it
86 * could cause us to read or write to someplace that could cause harm.
87 * Carefully read and modify the code with probe_kernel_*(), and make
88 * sure what we read is what we expected it to be before modifying it.
89 */
90 /* read the text we want to modify */
91 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
92 WARN_ON(1);
93 return -EFAULT;
94 }
95
96 /* Make sure it is what we expect it to be */
97 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
98 ftrace_expected = old_code;
99 WARN_ON(1);
100 return -EINVAL;
101 }
102
103 return 0;
104}
105
106/*
107 * Marked __ref because it calls text_poke_early() which is .init.text. That is
108 * ok because that call will happen early, during boot, when .init sections are
109 * still present.
110 */
111static int __ref
112ftrace_modify_code_direct(unsigned long ip, const char *old_code,
113 const char *new_code)
114{
115 int ret = ftrace_verify_code(ip, old_code);
116 if (ret)
117 return ret;
118
119 /* replace the text with the new text */
120 if (ftrace_poke_late)
121 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
122 else
123 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
124 return 0;
125}
126
127int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
128{
129 unsigned long ip = rec->ip;
130 const char *new, *old;
131
132 old = ftrace_call_replace(ip, addr);
133 new = ftrace_nop_replace();
134
135 /*
136 * On boot up, and when modules are loaded, the MCOUNT_ADDR
137 * is converted to a nop, and will never become MCOUNT_ADDR
138 * again. This code is either running before SMP (on boot up)
139 * or before the code will ever be executed (module load).
140 * We do not want to use the breakpoint version in this case,
141 * just modify the code directly.
142 */
143 if (addr == MCOUNT_ADDR)
144 return ftrace_modify_code_direct(ip, old, new);
145
146 /*
147 * x86 overrides ftrace_replace_code -- this function will never be used
148 * in this case.
149 */
150 WARN_ONCE(1, "invalid use of ftrace_make_nop");
151 return -EINVAL;
152}
153
154int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155{
156 unsigned long ip = rec->ip;
157 const char *new, *old;
158
159 old = ftrace_nop_replace();
160 new = ftrace_call_replace(ip, addr);
161
162 /* Should only be called when module is loaded */
163 return ftrace_modify_code_direct(rec->ip, old, new);
164}
165
166/*
167 * Should never be called:
168 * As it is only called by __ftrace_replace_code() which is called by
169 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
170 * which is called to turn mcount into nops or nops into function calls
171 * but not to convert a function from not using regs to one that uses
172 * regs, which ftrace_modify_call() is for.
173 */
174int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
175 unsigned long addr)
176{
177 WARN_ON(1);
178 return -EINVAL;
179}
180
181int ftrace_update_ftrace_func(ftrace_func_t func)
182{
183 unsigned long ip;
184 const char *new;
185
186 ip = (unsigned long)(&ftrace_call);
187 new = ftrace_call_replace(ip, (unsigned long)func);
188 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
189
190 ip = (unsigned long)(&ftrace_regs_call);
191 new = ftrace_call_replace(ip, (unsigned long)func);
192 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194 return 0;
195}
196
197void ftrace_replace_code(int enable)
198{
199 struct ftrace_rec_iter *iter;
200 struct dyn_ftrace *rec;
201 const char *new, *old;
202 int ret;
203
204 for_ftrace_rec_iter(iter) {
205 rec = ftrace_rec_iter_record(iter);
206
207 switch (ftrace_test_record(rec, enable)) {
208 case FTRACE_UPDATE_IGNORE:
209 default:
210 continue;
211
212 case FTRACE_UPDATE_MAKE_CALL:
213 old = ftrace_nop_replace();
214 break;
215
216 case FTRACE_UPDATE_MODIFY_CALL:
217 case FTRACE_UPDATE_MAKE_NOP:
218 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
219 break;
220 }
221
222 ret = ftrace_verify_code(rec->ip, old);
223 if (ret) {
224 ftrace_expected = old;
225 ftrace_bug(ret, rec);
226 ftrace_expected = NULL;
227 return;
228 }
229 }
230
231 for_ftrace_rec_iter(iter) {
232 rec = ftrace_rec_iter_record(iter);
233
234 switch (ftrace_test_record(rec, enable)) {
235 case FTRACE_UPDATE_IGNORE:
236 default:
237 continue;
238
239 case FTRACE_UPDATE_MAKE_CALL:
240 case FTRACE_UPDATE_MODIFY_CALL:
241 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
242 break;
243
244 case FTRACE_UPDATE_MAKE_NOP:
245 new = ftrace_nop_replace();
246 break;
247 }
248
249 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
250 ftrace_update_record(rec, enable);
251 }
252 text_poke_finish();
253}
254
255void arch_ftrace_update_code(int command)
256{
257 ftrace_modify_all_code(command);
258}
259
260/* Currently only x86_64 supports dynamic trampolines */
261#ifdef CONFIG_X86_64
262
263#ifdef CONFIG_MODULES
264#include <linux/moduleloader.h>
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
268 return module_alloc(size);
269}
270static inline void tramp_free(void *tramp)
271{
272 module_memfree(tramp);
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278 return NULL;
279}
280static inline void tramp_free(void *tramp) { }
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
284extern void ftrace_regs_caller_end(void);
285extern void ftrace_caller_end(void);
286extern void ftrace_caller_op_ptr(void);
287extern void ftrace_regs_caller_op_ptr(void);
288extern void ftrace_regs_caller_jmp(void);
289
290/* movq function_trace_op(%rip), %rdx */
291/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292#define OP_REF_SIZE 7
293
294/*
295 * The ftrace_ops is passed to the function callback. Since the
296 * trampoline only services a single ftrace_ops, we can pass in
297 * that ops directly.
298 *
299 * The ftrace_op_code_union is used to create a pointer to the
300 * ftrace_ops that will be passed to the callback function.
301 */
302union ftrace_op_code_union {
303 char code[OP_REF_SIZE];
304 struct {
305 char op[3];
306 int offset;
307 } __attribute__((packed));
308};
309
310#define RET_SIZE \
311 (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
312
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315{
316 unsigned long start_offset;
317 unsigned long end_offset;
318 unsigned long op_offset;
319 unsigned long call_offset;
320 unsigned long jmp_offset;
321 unsigned long offset;
322 unsigned long npages;
323 unsigned long size;
324 unsigned long *ptr;
325 void *trampoline;
326 void *ip, *dest;
327 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
328 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
329 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
330 union ftrace_op_code_union op_ptr;
331 int ret;
332
333 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334 start_offset = (unsigned long)ftrace_regs_caller;
335 end_offset = (unsigned long)ftrace_regs_caller_end;
336 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337 call_offset = (unsigned long)ftrace_regs_call;
338 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339 } else {
340 start_offset = (unsigned long)ftrace_caller;
341 end_offset = (unsigned long)ftrace_caller_end;
342 op_offset = (unsigned long)ftrace_caller_op_ptr;
343 call_offset = (unsigned long)ftrace_call;
344 jmp_offset = 0;
345 }
346
347 size = end_offset - start_offset;
348
349 /*
350 * Allocate enough size to store the ftrace_caller code,
351 * the iret , as well as the address of the ftrace_ops this
352 * trampoline is used for.
353 */
354 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355 if (!trampoline)
356 return 0;
357
358 *tramp_size = size + RET_SIZE + sizeof(void *);
359 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361 /* Copy ftrace_caller onto the trampoline memory */
362 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363 if (WARN_ON(ret < 0))
364 goto fail;
365
366 ip = trampoline + size;
367 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
368 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
369 else
370 memcpy(ip, retq, sizeof(retq));
371
372 /* No need to test direct calls on created trampolines */
373 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
374 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
375 ip = trampoline + (jmp_offset - start_offset);
376 if (WARN_ON(*(char *)ip != 0x75))
377 goto fail;
378 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
379 if (ret < 0)
380 goto fail;
381 }
382
383 /*
384 * The address of the ftrace_ops that is used for this trampoline
385 * is stored at the end of the trampoline. This will be used to
386 * load the third parameter for the callback. Basically, that
387 * location at the end of the trampoline takes the place of
388 * the global function_trace_op variable.
389 */
390
391 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
392 *ptr = (unsigned long)ops;
393
394 op_offset -= start_offset;
395 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
396
397 /* Are we pointing to the reference? */
398 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
399 goto fail;
400
401 /* Load the contents of ptr into the callback parameter */
402 offset = (unsigned long)ptr;
403 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
404
405 op_ptr.offset = offset;
406
407 /* put in the new offset to the ftrace_ops */
408 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
409
410 /* put in the call to the function */
411 mutex_lock(&text_mutex);
412 call_offset -= start_offset;
413 /*
414 * No need to translate into a callthunk. The trampoline does
415 * the depth accounting before the call already.
416 */
417 dest = ftrace_ops_get_func(ops);
418 memcpy(trampoline + call_offset,
419 text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
420 CALL_INSN_SIZE);
421 mutex_unlock(&text_mutex);
422
423 /* ALLOC_TRAMP flags lets us know we created it */
424 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
425
426 set_memory_rox((unsigned long)trampoline, npages);
427 return (unsigned long)trampoline;
428fail:
429 tramp_free(trampoline);
430 return 0;
431}
432
433void set_ftrace_ops_ro(void)
434{
435 struct ftrace_ops *ops;
436 unsigned long start_offset;
437 unsigned long end_offset;
438 unsigned long npages;
439 unsigned long size;
440
441 do_for_each_ftrace_op(ops, ftrace_ops_list) {
442 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
443 continue;
444
445 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
446 start_offset = (unsigned long)ftrace_regs_caller;
447 end_offset = (unsigned long)ftrace_regs_caller_end;
448 } else {
449 start_offset = (unsigned long)ftrace_caller;
450 end_offset = (unsigned long)ftrace_caller_end;
451 }
452 size = end_offset - start_offset;
453 size = size + RET_SIZE + sizeof(void *);
454 npages = DIV_ROUND_UP(size, PAGE_SIZE);
455 set_memory_ro((unsigned long)ops->trampoline, npages);
456 } while_for_each_ftrace_op(ops);
457}
458
459static unsigned long calc_trampoline_call_offset(bool save_regs)
460{
461 unsigned long start_offset;
462 unsigned long call_offset;
463
464 if (save_regs) {
465 start_offset = (unsigned long)ftrace_regs_caller;
466 call_offset = (unsigned long)ftrace_regs_call;
467 } else {
468 start_offset = (unsigned long)ftrace_caller;
469 call_offset = (unsigned long)ftrace_call;
470 }
471
472 return call_offset - start_offset;
473}
474
475void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
476{
477 ftrace_func_t func;
478 unsigned long offset;
479 unsigned long ip;
480 unsigned int size;
481 const char *new;
482
483 if (!ops->trampoline) {
484 ops->trampoline = create_trampoline(ops, &size);
485 if (!ops->trampoline)
486 return;
487 ops->trampoline_size = size;
488 return;
489 }
490
491 /*
492 * The ftrace_ops caller may set up its own trampoline.
493 * In such a case, this code must not modify it.
494 */
495 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
496 return;
497
498 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
499 ip = ops->trampoline + offset;
500 func = ftrace_ops_get_func(ops);
501
502 mutex_lock(&text_mutex);
503 /* Do a safe modify in case the trampoline is executing */
504 new = ftrace_call_replace(ip, (unsigned long)func);
505 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
506 mutex_unlock(&text_mutex);
507}
508
509/* Return the address of the function the trampoline calls */
510static void *addr_from_call(void *ptr)
511{
512 union text_poke_insn call;
513 int ret;
514
515 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
516 if (WARN_ON_ONCE(ret < 0))
517 return NULL;
518
519 /* Make sure this is a call */
520 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
521 pr_warn("Expected E8, got %x\n", call.opcode);
522 return NULL;
523 }
524
525 return ptr + CALL_INSN_SIZE + call.disp;
526}
527
528/*
529 * If the ops->trampoline was not allocated, then it probably
530 * has a static trampoline func, or is the ftrace caller itself.
531 */
532static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
533{
534 unsigned long offset;
535 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
536 void *ptr;
537
538 if (ops && ops->trampoline) {
539#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
540 defined(CONFIG_FUNCTION_GRAPH_TRACER)
541 /*
542 * We only know about function graph tracer setting as static
543 * trampoline.
544 */
545 if (ops->trampoline == FTRACE_GRAPH_ADDR)
546 return (void *)prepare_ftrace_return;
547#endif
548 return NULL;
549 }
550
551 offset = calc_trampoline_call_offset(save_regs);
552
553 if (save_regs)
554 ptr = (void *)FTRACE_REGS_ADDR + offset;
555 else
556 ptr = (void *)FTRACE_ADDR + offset;
557
558 return addr_from_call(ptr);
559}
560
561void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
562{
563 unsigned long offset;
564
565 /* If we didn't allocate this trampoline, consider it static */
566 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
567 return static_tramp_func(ops, rec);
568
569 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
570 return addr_from_call((void *)ops->trampoline + offset);
571}
572
573void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
574{
575 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
576 return;
577
578 tramp_free((void *)ops->trampoline);
579 ops->trampoline = 0;
580}
581
582#endif /* CONFIG_X86_64 */
583#endif /* CONFIG_DYNAMIC_FTRACE */
584
585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
586
587#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
588extern void ftrace_graph_call(void);
589static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
590{
591 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
592}
593
594static int ftrace_mod_jmp(unsigned long ip, void *func)
595{
596 const char *new;
597
598 new = ftrace_jmp_replace(ip, (unsigned long)func);
599 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
600 return 0;
601}
602
603int ftrace_enable_ftrace_graph_caller(void)
604{
605 unsigned long ip = (unsigned long)(&ftrace_graph_call);
606
607 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
608}
609
610int ftrace_disable_ftrace_graph_caller(void)
611{
612 unsigned long ip = (unsigned long)(&ftrace_graph_call);
613
614 return ftrace_mod_jmp(ip, &ftrace_stub);
615}
616#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
617
618/*
619 * Hook the return address and push it in the stack of return addrs
620 * in current thread info.
621 */
622void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
623 unsigned long frame_pointer)
624{
625 unsigned long return_hooker = (unsigned long)&return_to_handler;
626 int bit;
627
628 /*
629 * When resuming from suspend-to-ram, this function can be indirectly
630 * called from early CPU startup code while the CPU is in real mode,
631 * which would fail miserably. Make sure the stack pointer is a
632 * virtual address.
633 *
634 * This check isn't as accurate as virt_addr_valid(), but it should be
635 * good enough for this purpose, and it's fast.
636 */
637 if (unlikely((long)__builtin_frame_address(0) >= 0))
638 return;
639
640 if (unlikely(ftrace_graph_is_dead()))
641 return;
642
643 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
644 return;
645
646 bit = ftrace_test_recursion_trylock(ip, *parent);
647 if (bit < 0)
648 return;
649
650 if (!function_graph_enter(*parent, ip, frame_pointer, parent))
651 *parent = return_hooker;
652
653 ftrace_test_recursion_unlock(bit);
654}
655
656#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
657void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
658 struct ftrace_ops *op, struct ftrace_regs *fregs)
659{
660 struct pt_regs *regs = &fregs->regs;
661 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
662
663 prepare_ftrace_return(ip, (unsigned long *)stack, 0);
664}
665#endif
666
667#endif /* CONFIG_FUNCTION_GRAPH_TRACER */