Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracing support.
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 *
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/spinlock.h>
16#include <linux/hardirq.h>
17#include <linux/uaccess.h>
18#include <linux/ftrace.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/memory.h>
26#include <linux/vmalloc.h>
27#include <linux/set_memory.h>
28#include <linux/execmem.h>
29
30#include <trace/syscall.h>
31
32#include <asm/kprobes.h>
33#include <asm/ftrace.h>
34#include <asm/nops.h>
35#include <asm/text-patching.h>
36
37#ifdef CONFIG_DYNAMIC_FTRACE
38
39static int ftrace_poke_late = 0;
40
41void ftrace_arch_code_modify_prepare(void)
42 __acquires(&text_mutex)
43{
44 /*
45 * Need to grab text_mutex to prevent a race from module loading
46 * and live kernel patching from changing the text permissions while
47 * ftrace has it set to "read/write".
48 */
49 mutex_lock(&text_mutex);
50 ftrace_poke_late = 1;
51}
52
53void ftrace_arch_code_modify_post_process(void)
54 __releases(&text_mutex)
55{
56 /*
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
59 * that they do, here.
60 */
61 text_poke_finish();
62 ftrace_poke_late = 0;
63 mutex_unlock(&text_mutex);
64}
65
66static const char *ftrace_nop_replace(void)
67{
68 return x86_nops[5];
69}
70
71static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
72{
73 /*
74 * No need to translate into a callthunk. The trampoline does
75 * the depth accounting itself.
76 */
77 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
78}
79
80static int ftrace_verify_code(unsigned long ip, const char *old_code)
81{
82 char cur_code[MCOUNT_INSN_SIZE];
83
84 /*
85 * Note:
86 * We are paranoid about modifying text, as if a bug was to happen, it
87 * could cause us to read or write to someplace that could cause harm.
88 * Carefully read and modify the code with probe_kernel_*(), and make
89 * sure what we read is what we expected it to be before modifying it.
90 */
91 /* read the text we want to modify */
92 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
93 WARN_ON(1);
94 return -EFAULT;
95 }
96
97 /* Make sure it is what we expect it to be */
98 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
99 ftrace_expected = old_code;
100 WARN_ON(1);
101 return -EINVAL;
102 }
103
104 return 0;
105}
106
107/*
108 * Marked __ref because it calls text_poke_early() which is .init.text. That is
109 * ok because that call will happen early, during boot, when .init sections are
110 * still present.
111 */
112static int __ref
113ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114 const char *new_code)
115{
116 int ret = ftrace_verify_code(ip, old_code);
117 if (ret)
118 return ret;
119
120 /* replace the text with the new text */
121 if (ftrace_poke_late) {
122 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123 } else {
124 mutex_lock(&text_mutex);
125 text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
126 mutex_unlock(&text_mutex);
127 }
128 return 0;
129}
130
131int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
132{
133 unsigned long ip = rec->ip;
134 const char *new, *old;
135
136 old = ftrace_call_replace(ip, addr);
137 new = ftrace_nop_replace();
138
139 /*
140 * On boot up, and when modules are loaded, the MCOUNT_ADDR
141 * is converted to a nop, and will never become MCOUNT_ADDR
142 * again. This code is either running before SMP (on boot up)
143 * or before the code will ever be executed (module load).
144 * We do not want to use the breakpoint version in this case,
145 * just modify the code directly.
146 */
147 if (addr == MCOUNT_ADDR)
148 return ftrace_modify_code_direct(ip, old, new);
149
150 /*
151 * x86 overrides ftrace_replace_code -- this function will never be used
152 * in this case.
153 */
154 WARN_ONCE(1, "invalid use of ftrace_make_nop");
155 return -EINVAL;
156}
157
158int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159{
160 unsigned long ip = rec->ip;
161 const char *new, *old;
162
163 old = ftrace_nop_replace();
164 new = ftrace_call_replace(ip, addr);
165
166 /* Should only be called when module is loaded */
167 return ftrace_modify_code_direct(rec->ip, old, new);
168}
169
170/*
171 * Should never be called:
172 * As it is only called by __ftrace_replace_code() which is called by
173 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
174 * which is called to turn mcount into nops or nops into function calls
175 * but not to convert a function from not using regs to one that uses
176 * regs, which ftrace_modify_call() is for.
177 */
178int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179 unsigned long addr)
180{
181 WARN_ON(1);
182 return -EINVAL;
183}
184
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187 unsigned long ip;
188 const char *new;
189
190 ip = (unsigned long)(&ftrace_call);
191 new = ftrace_call_replace(ip, (unsigned long)func);
192 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194 ip = (unsigned long)(&ftrace_regs_call);
195 new = ftrace_call_replace(ip, (unsigned long)func);
196 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
197
198 return 0;
199}
200
201void ftrace_replace_code(int enable)
202{
203 struct ftrace_rec_iter *iter;
204 struct dyn_ftrace *rec;
205 const char *new, *old;
206 int ret;
207
208 for_ftrace_rec_iter(iter) {
209 rec = ftrace_rec_iter_record(iter);
210
211 switch (ftrace_test_record(rec, enable)) {
212 case FTRACE_UPDATE_IGNORE:
213 default:
214 continue;
215
216 case FTRACE_UPDATE_MAKE_CALL:
217 old = ftrace_nop_replace();
218 break;
219
220 case FTRACE_UPDATE_MODIFY_CALL:
221 case FTRACE_UPDATE_MAKE_NOP:
222 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
223 break;
224 }
225
226 ret = ftrace_verify_code(rec->ip, old);
227 if (ret) {
228 ftrace_expected = old;
229 ftrace_bug(ret, rec);
230 ftrace_expected = NULL;
231 return;
232 }
233 }
234
235 for_ftrace_rec_iter(iter) {
236 rec = ftrace_rec_iter_record(iter);
237
238 switch (ftrace_test_record(rec, enable)) {
239 case FTRACE_UPDATE_IGNORE:
240 default:
241 continue;
242
243 case FTRACE_UPDATE_MAKE_CALL:
244 case FTRACE_UPDATE_MODIFY_CALL:
245 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
246 break;
247
248 case FTRACE_UPDATE_MAKE_NOP:
249 new = ftrace_nop_replace();
250 break;
251 }
252
253 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
254 ftrace_update_record(rec, enable);
255 }
256 text_poke_finish();
257}
258
259void arch_ftrace_update_code(int command)
260{
261 ftrace_modify_all_code(command);
262}
263
264/* Currently only x86_64 supports dynamic trampolines */
265#ifdef CONFIG_X86_64
266
267static inline void *alloc_tramp(unsigned long size)
268{
269 return execmem_alloc(EXECMEM_FTRACE, size);
270}
271static inline void tramp_free(void *tramp)
272{
273 execmem_free(tramp);
274}
275
276/* Defined as markers to the end of the ftrace default trampolines */
277extern void ftrace_regs_caller_end(void);
278extern void ftrace_caller_end(void);
279extern void ftrace_caller_op_ptr(void);
280extern void ftrace_regs_caller_op_ptr(void);
281extern void ftrace_regs_caller_jmp(void);
282
283/* movq function_trace_op(%rip), %rdx */
284/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
285#define OP_REF_SIZE 7
286
287/*
288 * The ftrace_ops is passed to the function callback. Since the
289 * trampoline only services a single ftrace_ops, we can pass in
290 * that ops directly.
291 *
292 * The ftrace_op_code_union is used to create a pointer to the
293 * ftrace_ops that will be passed to the callback function.
294 */
295union ftrace_op_code_union {
296 char code[OP_REF_SIZE];
297 struct {
298 char op[3];
299 int offset;
300 } __attribute__((packed));
301};
302
303#define RET_SIZE \
304 (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
305
306static unsigned long
307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
308{
309 unsigned long start_offset;
310 unsigned long end_offset;
311 unsigned long op_offset;
312 unsigned long call_offset;
313 unsigned long jmp_offset;
314 unsigned long offset;
315 unsigned long npages;
316 unsigned long size;
317 unsigned long *ptr;
318 void *trampoline;
319 void *ip, *dest;
320 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
322 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
323 union ftrace_op_code_union op_ptr;
324 void *ret;
325
326 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327 start_offset = (unsigned long)ftrace_regs_caller;
328 end_offset = (unsigned long)ftrace_regs_caller_end;
329 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
330 call_offset = (unsigned long)ftrace_regs_call;
331 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
332 } else {
333 start_offset = (unsigned long)ftrace_caller;
334 end_offset = (unsigned long)ftrace_caller_end;
335 op_offset = (unsigned long)ftrace_caller_op_ptr;
336 call_offset = (unsigned long)ftrace_call;
337 jmp_offset = 0;
338 }
339
340 size = end_offset - start_offset;
341
342 /*
343 * Allocate enough size to store the ftrace_caller code,
344 * the iret , as well as the address of the ftrace_ops this
345 * trampoline is used for.
346 */
347 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
348 if (!trampoline)
349 return 0;
350
351 *tramp_size = size + RET_SIZE + sizeof(void *);
352 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
353
354 /* Copy ftrace_caller onto the trampoline memory */
355 ret = text_poke_copy(trampoline, (void *)start_offset, size);
356 if (WARN_ON(!ret))
357 goto fail;
358
359 ip = trampoline + size;
360 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
362 else
363 text_poke_copy(ip, retq, sizeof(retq));
364
365 /* No need to test direct calls on created trampolines */
366 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368 ip = trampoline + (jmp_offset - start_offset);
369 if (WARN_ON(*(char *)ip != 0x75))
370 goto fail;
371 if (!text_poke_copy(ip, x86_nops[2], 2))
372 goto fail;
373 }
374
375 /*
376 * The address of the ftrace_ops that is used for this trampoline
377 * is stored at the end of the trampoline. This will be used to
378 * load the third parameter for the callback. Basically, that
379 * location at the end of the trampoline takes the place of
380 * the global function_trace_op variable.
381 */
382
383 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
384 text_poke_copy(ptr, &ops, sizeof(unsigned long));
385
386 op_offset -= start_offset;
387 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
388
389 /* Are we pointing to the reference? */
390 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
391 goto fail;
392
393 /* Load the contents of ptr into the callback parameter */
394 offset = (unsigned long)ptr;
395 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
396
397 op_ptr.offset = offset;
398
399 /* put in the new offset to the ftrace_ops */
400 text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
401
402 /* put in the call to the function */
403 mutex_lock(&text_mutex);
404 call_offset -= start_offset;
405 /*
406 * No need to translate into a callthunk. The trampoline does
407 * the depth accounting before the call already.
408 */
409 dest = ftrace_ops_get_func(ops);
410 text_poke_copy_locked(trampoline + call_offset,
411 text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
412 CALL_INSN_SIZE, false);
413 mutex_unlock(&text_mutex);
414
415 /* ALLOC_TRAMP flags lets us know we created it */
416 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
417
418 set_memory_rox((unsigned long)trampoline, npages);
419 return (unsigned long)trampoline;
420fail:
421 tramp_free(trampoline);
422 return 0;
423}
424
425void set_ftrace_ops_ro(void)
426{
427 struct ftrace_ops *ops;
428 unsigned long start_offset;
429 unsigned long end_offset;
430 unsigned long npages;
431 unsigned long size;
432
433 do_for_each_ftrace_op(ops, ftrace_ops_list) {
434 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
435 continue;
436
437 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
438 start_offset = (unsigned long)ftrace_regs_caller;
439 end_offset = (unsigned long)ftrace_regs_caller_end;
440 } else {
441 start_offset = (unsigned long)ftrace_caller;
442 end_offset = (unsigned long)ftrace_caller_end;
443 }
444 size = end_offset - start_offset;
445 size = size + RET_SIZE + sizeof(void *);
446 npages = DIV_ROUND_UP(size, PAGE_SIZE);
447 set_memory_ro((unsigned long)ops->trampoline, npages);
448 } while_for_each_ftrace_op(ops);
449}
450
451static unsigned long calc_trampoline_call_offset(bool save_regs)
452{
453 unsigned long start_offset;
454 unsigned long call_offset;
455
456 if (save_regs) {
457 start_offset = (unsigned long)ftrace_regs_caller;
458 call_offset = (unsigned long)ftrace_regs_call;
459 } else {
460 start_offset = (unsigned long)ftrace_caller;
461 call_offset = (unsigned long)ftrace_call;
462 }
463
464 return call_offset - start_offset;
465}
466
467void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
468{
469 ftrace_func_t func;
470 unsigned long offset;
471 unsigned long ip;
472 unsigned int size;
473 const char *new;
474
475 if (!ops->trampoline) {
476 ops->trampoline = create_trampoline(ops, &size);
477 if (!ops->trampoline)
478 return;
479 ops->trampoline_size = size;
480 return;
481 }
482
483 /*
484 * The ftrace_ops caller may set up its own trampoline.
485 * In such a case, this code must not modify it.
486 */
487 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
488 return;
489
490 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
491 ip = ops->trampoline + offset;
492 func = ftrace_ops_get_func(ops);
493
494 mutex_lock(&text_mutex);
495 /* Do a safe modify in case the trampoline is executing */
496 new = ftrace_call_replace(ip, (unsigned long)func);
497 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
498 mutex_unlock(&text_mutex);
499}
500
501/* Return the address of the function the trampoline calls */
502static void *addr_from_call(void *ptr)
503{
504 union text_poke_insn call;
505 int ret;
506
507 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
508 if (WARN_ON_ONCE(ret < 0))
509 return NULL;
510
511 /* Make sure this is a call */
512 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
513 pr_warn("Expected E8, got %x\n", call.opcode);
514 return NULL;
515 }
516
517 return ptr + CALL_INSN_SIZE + call.disp;
518}
519
520/*
521 * If the ops->trampoline was not allocated, then it probably
522 * has a static trampoline func, or is the ftrace caller itself.
523 */
524static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
525{
526 unsigned long offset;
527 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
528 void *ptr;
529
530 if (ops && ops->trampoline) {
531#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
532 defined(CONFIG_FUNCTION_GRAPH_TRACER)
533 /*
534 * We only know about function graph tracer setting as static
535 * trampoline.
536 */
537 if (ops->trampoline == FTRACE_GRAPH_ADDR)
538 return (void *)prepare_ftrace_return;
539#endif
540 return NULL;
541 }
542
543 offset = calc_trampoline_call_offset(save_regs);
544
545 if (save_regs)
546 ptr = (void *)FTRACE_REGS_ADDR + offset;
547 else
548 ptr = (void *)FTRACE_ADDR + offset;
549
550 return addr_from_call(ptr);
551}
552
553void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
554{
555 unsigned long offset;
556
557 /* If we didn't allocate this trampoline, consider it static */
558 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
559 return static_tramp_func(ops, rec);
560
561 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
562 return addr_from_call((void *)ops->trampoline + offset);
563}
564
565void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
566{
567 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
568 return;
569
570 tramp_free((void *)ops->trampoline);
571 ops->trampoline = 0;
572}
573
574#endif /* CONFIG_X86_64 */
575#endif /* CONFIG_DYNAMIC_FTRACE */
576
577#ifdef CONFIG_FUNCTION_GRAPH_TRACER
578
579#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
580extern void ftrace_graph_call(void);
581static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
582{
583 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
584}
585
586static int ftrace_mod_jmp(unsigned long ip, void *func)
587{
588 const char *new;
589
590 new = ftrace_jmp_replace(ip, (unsigned long)func);
591 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
592 return 0;
593}
594
595int ftrace_enable_ftrace_graph_caller(void)
596{
597 unsigned long ip = (unsigned long)(&ftrace_graph_call);
598
599 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
600}
601
602int ftrace_disable_ftrace_graph_caller(void)
603{
604 unsigned long ip = (unsigned long)(&ftrace_graph_call);
605
606 return ftrace_mod_jmp(ip, &ftrace_stub);
607}
608#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
609
610/*
611 * Hook the return address and push it in the stack of return addrs
612 * in current thread info.
613 */
614void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
615 unsigned long frame_pointer)
616{
617 unsigned long return_hooker = (unsigned long)&return_to_handler;
618 int bit;
619
620 /*
621 * When resuming from suspend-to-ram, this function can be indirectly
622 * called from early CPU startup code while the CPU is in real mode,
623 * which would fail miserably. Make sure the stack pointer is a
624 * virtual address.
625 *
626 * This check isn't as accurate as virt_addr_valid(), but it should be
627 * good enough for this purpose, and it's fast.
628 */
629 if (unlikely((long)__builtin_frame_address(0) >= 0))
630 return;
631
632 if (unlikely(ftrace_graph_is_dead()))
633 return;
634
635 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
636 return;
637
638 bit = ftrace_test_recursion_trylock(ip, *parent);
639 if (bit < 0)
640 return;
641
642 if (!function_graph_enter(*parent, ip, frame_pointer, parent))
643 *parent = return_hooker;
644
645 ftrace_test_recursion_unlock(bit);
646}
647
648#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
649void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
650 struct ftrace_ops *op, struct ftrace_regs *fregs)
651{
652 struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
653 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
654
655 prepare_ftrace_return(ip, (unsigned long *)stack, 0);
656}
657#endif
658
659#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1/*
2 * Dynamic function tracing support.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/spinlock.h>
15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
17#include <linux/ftrace.h>
18#include <linux/percpu.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/init.h>
22#include <linux/list.h>
23#include <linux/module.h>
24
25#include <trace/syscall.h>
26
27#include <asm/cacheflush.h>
28#include <asm/kprobes.h>
29#include <asm/ftrace.h>
30#include <asm/nops.h>
31
32#ifdef CONFIG_DYNAMIC_FTRACE
33
34int ftrace_arch_code_modify_prepare(void)
35{
36 set_kernel_text_rw();
37 set_all_modules_text_rw();
38 return 0;
39}
40
41int ftrace_arch_code_modify_post_process(void)
42{
43 set_all_modules_text_ro();
44 set_kernel_text_ro();
45 return 0;
46}
47
48union ftrace_code_union {
49 char code[MCOUNT_INSN_SIZE];
50 struct {
51 unsigned char e8;
52 int offset;
53 } __attribute__((packed));
54};
55
56static int ftrace_calc_offset(long ip, long addr)
57{
58 return (int)(addr - ip);
59}
60
61static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
62{
63 static union ftrace_code_union calc;
64
65 calc.e8 = 0xe8;
66 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
67
68 /*
69 * No locking needed, this must be called via kstop_machine
70 * which in essence is like running on a uniprocessor machine.
71 */
72 return calc.code;
73}
74
75static inline int
76within(unsigned long addr, unsigned long start, unsigned long end)
77{
78 return addr >= start && addr < end;
79}
80
81static unsigned long text_ip_addr(unsigned long ip)
82{
83 /*
84 * On x86_64, kernel text mappings are mapped read-only, so we use
85 * the kernel identity mapping instead of the kernel text mapping
86 * to modify the kernel text.
87 *
88 * For 32bit kernels, these mappings are same and we can use
89 * kernel identity mapping to modify code.
90 */
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
92 ip = (unsigned long)__va(__pa_symbol(ip));
93
94 return ip;
95}
96
97static const unsigned char *ftrace_nop_replace(void)
98{
99 return ideal_nops[NOP_ATOMIC5];
100}
101
102static int
103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104 unsigned const char *new_code)
105{
106 unsigned char replaced[MCOUNT_INSN_SIZE];
107
108 ftrace_expected = old_code;
109
110 /*
111 * Note:
112 * We are paranoid about modifying text, as if a bug was to happen, it
113 * could cause us to read or write to someplace that could cause harm.
114 * Carefully read and modify the code with probe_kernel_*(), and make
115 * sure what we read is what we expected it to be before modifying it.
116 */
117
118 /* read the text we want to modify */
119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
120 return -EFAULT;
121
122 /* Make sure it is what we expect it to be */
123 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124 return -EINVAL;
125
126 ip = text_ip_addr(ip);
127
128 /* replace the text with the new text */
129 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
130 return -EPERM;
131
132 sync_core();
133
134 return 0;
135}
136
137int ftrace_make_nop(struct module *mod,
138 struct dyn_ftrace *rec, unsigned long addr)
139{
140 unsigned const char *new, *old;
141 unsigned long ip = rec->ip;
142
143 old = ftrace_call_replace(ip, addr);
144 new = ftrace_nop_replace();
145
146 /*
147 * On boot up, and when modules are loaded, the MCOUNT_ADDR
148 * is converted to a nop, and will never become MCOUNT_ADDR
149 * again. This code is either running before SMP (on boot up)
150 * or before the code will ever be executed (module load).
151 * We do not want to use the breakpoint version in this case,
152 * just modify the code directly.
153 */
154 if (addr == MCOUNT_ADDR)
155 return ftrace_modify_code_direct(rec->ip, old, new);
156
157 ftrace_expected = NULL;
158
159 /* Normal cases use add_brk_on_nop */
160 WARN_ONCE(1, "invalid use of ftrace_make_nop");
161 return -EINVAL;
162}
163
164int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
165{
166 unsigned const char *new, *old;
167 unsigned long ip = rec->ip;
168
169 old = ftrace_nop_replace();
170 new = ftrace_call_replace(ip, addr);
171
172 /* Should only be called when module is loaded */
173 return ftrace_modify_code_direct(rec->ip, old, new);
174}
175
176/*
177 * The modifying_ftrace_code is used to tell the breakpoint
178 * handler to call ftrace_int3_handler(). If it fails to
179 * call this handler for a breakpoint added by ftrace, then
180 * the kernel may crash.
181 *
182 * As atomic_writes on x86 do not need a barrier, we do not
183 * need to add smp_mb()s for this to work. It is also considered
184 * that we can not read the modifying_ftrace_code before
185 * executing the breakpoint. That would be quite remarkable if
186 * it could do that. Here's the flow that is required:
187 *
188 * CPU-0 CPU-1
189 *
190 * atomic_inc(mfc);
191 * write int3s
192 * <trap-int3> // implicit (r)mb
193 * if (atomic_read(mfc))
194 * call ftrace_int3_handler()
195 *
196 * Then when we are finished:
197 *
198 * atomic_dec(mfc);
199 *
200 * If we hit a breakpoint that was not set by ftrace, it does not
201 * matter if ftrace_int3_handler() is called or not. It will
202 * simply be ignored. But it is crucial that a ftrace nop/caller
203 * breakpoint is handled. No other user should ever place a
204 * breakpoint on an ftrace nop/caller location. It must only
205 * be done by this code.
206 */
207atomic_t modifying_ftrace_code __read_mostly;
208
209static int
210ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
211 unsigned const char *new_code);
212
213/*
214 * Should never be called:
215 * As it is only called by __ftrace_replace_code() which is called by
216 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
217 * which is called to turn mcount into nops or nops into function calls
218 * but not to convert a function from not using regs to one that uses
219 * regs, which ftrace_modify_call() is for.
220 */
221int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
222 unsigned long addr)
223{
224 WARN_ON(1);
225 ftrace_expected = NULL;
226 return -EINVAL;
227}
228
229static unsigned long ftrace_update_func;
230
231static int update_ftrace_func(unsigned long ip, void *new)
232{
233 unsigned char old[MCOUNT_INSN_SIZE];
234 int ret;
235
236 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
237
238 ftrace_update_func = ip;
239 /* Make sure the breakpoints see the ftrace_update_func update */
240 smp_wmb();
241
242 /* See comment above by declaration of modifying_ftrace_code */
243 atomic_inc(&modifying_ftrace_code);
244
245 ret = ftrace_modify_code(ip, old, new);
246
247 atomic_dec(&modifying_ftrace_code);
248
249 return ret;
250}
251
252int ftrace_update_ftrace_func(ftrace_func_t func)
253{
254 unsigned long ip = (unsigned long)(&ftrace_call);
255 unsigned char *new;
256 int ret;
257
258 new = ftrace_call_replace(ip, (unsigned long)func);
259 ret = update_ftrace_func(ip, new);
260
261 /* Also update the regs callback function */
262 if (!ret) {
263 ip = (unsigned long)(&ftrace_regs_call);
264 new = ftrace_call_replace(ip, (unsigned long)func);
265 ret = update_ftrace_func(ip, new);
266 }
267
268 return ret;
269}
270
271static int is_ftrace_caller(unsigned long ip)
272{
273 if (ip == ftrace_update_func)
274 return 1;
275
276 return 0;
277}
278
279/*
280 * A breakpoint was added to the code address we are about to
281 * modify, and this is the handle that will just skip over it.
282 * We are either changing a nop into a trace call, or a trace
283 * call to a nop. While the change is taking place, we treat
284 * it just like it was a nop.
285 */
286int ftrace_int3_handler(struct pt_regs *regs)
287{
288 unsigned long ip;
289
290 if (WARN_ON_ONCE(!regs))
291 return 0;
292
293 ip = regs->ip - 1;
294 if (!ftrace_location(ip) && !is_ftrace_caller(ip))
295 return 0;
296
297 regs->ip += MCOUNT_INSN_SIZE - 1;
298
299 return 1;
300}
301
302static int ftrace_write(unsigned long ip, const char *val, int size)
303{
304 ip = text_ip_addr(ip);
305
306 if (probe_kernel_write((void *)ip, val, size))
307 return -EPERM;
308
309 return 0;
310}
311
312static int add_break(unsigned long ip, const char *old)
313{
314 unsigned char replaced[MCOUNT_INSN_SIZE];
315 unsigned char brk = BREAKPOINT_INSTRUCTION;
316
317 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
318 return -EFAULT;
319
320 ftrace_expected = old;
321
322 /* Make sure it is what we expect it to be */
323 if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
324 return -EINVAL;
325
326 return ftrace_write(ip, &brk, 1);
327}
328
329static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
330{
331 unsigned const char *old;
332 unsigned long ip = rec->ip;
333
334 old = ftrace_call_replace(ip, addr);
335
336 return add_break(rec->ip, old);
337}
338
339
340static int add_brk_on_nop(struct dyn_ftrace *rec)
341{
342 unsigned const char *old;
343
344 old = ftrace_nop_replace();
345
346 return add_break(rec->ip, old);
347}
348
349static int add_breakpoints(struct dyn_ftrace *rec, int enable)
350{
351 unsigned long ftrace_addr;
352 int ret;
353
354 ftrace_addr = ftrace_get_addr_curr(rec);
355
356 ret = ftrace_test_record(rec, enable);
357
358 switch (ret) {
359 case FTRACE_UPDATE_IGNORE:
360 return 0;
361
362 case FTRACE_UPDATE_MAKE_CALL:
363 /* converting nop to call */
364 return add_brk_on_nop(rec);
365
366 case FTRACE_UPDATE_MODIFY_CALL:
367 case FTRACE_UPDATE_MAKE_NOP:
368 /* converting a call to a nop */
369 return add_brk_on_call(rec, ftrace_addr);
370 }
371 return 0;
372}
373
374/*
375 * On error, we need to remove breakpoints. This needs to
376 * be done caefully. If the address does not currently have a
377 * breakpoint, we know we are done. Otherwise, we look at the
378 * remaining 4 bytes of the instruction. If it matches a nop
379 * we replace the breakpoint with the nop. Otherwise we replace
380 * it with the call instruction.
381 */
382static int remove_breakpoint(struct dyn_ftrace *rec)
383{
384 unsigned char ins[MCOUNT_INSN_SIZE];
385 unsigned char brk = BREAKPOINT_INSTRUCTION;
386 const unsigned char *nop;
387 unsigned long ftrace_addr;
388 unsigned long ip = rec->ip;
389
390 /* If we fail the read, just give up */
391 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
392 return -EFAULT;
393
394 /* If this does not have a breakpoint, we are done */
395 if (ins[0] != brk)
396 return 0;
397
398 nop = ftrace_nop_replace();
399
400 /*
401 * If the last 4 bytes of the instruction do not match
402 * a nop, then we assume that this is a call to ftrace_addr.
403 */
404 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
405 /*
406 * For extra paranoidism, we check if the breakpoint is on
407 * a call that would actually jump to the ftrace_addr.
408 * If not, don't touch the breakpoint, we make just create
409 * a disaster.
410 */
411 ftrace_addr = ftrace_get_addr_new(rec);
412 nop = ftrace_call_replace(ip, ftrace_addr);
413
414 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
415 goto update;
416
417 /* Check both ftrace_addr and ftrace_old_addr */
418 ftrace_addr = ftrace_get_addr_curr(rec);
419 nop = ftrace_call_replace(ip, ftrace_addr);
420
421 ftrace_expected = nop;
422
423 if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
424 return -EINVAL;
425 }
426
427 update:
428 return ftrace_write(ip, nop, 1);
429}
430
431static int add_update_code(unsigned long ip, unsigned const char *new)
432{
433 /* skip breakpoint */
434 ip++;
435 new++;
436 return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
437}
438
439static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
440{
441 unsigned long ip = rec->ip;
442 unsigned const char *new;
443
444 new = ftrace_call_replace(ip, addr);
445 return add_update_code(ip, new);
446}
447
448static int add_update_nop(struct dyn_ftrace *rec)
449{
450 unsigned long ip = rec->ip;
451 unsigned const char *new;
452
453 new = ftrace_nop_replace();
454 return add_update_code(ip, new);
455}
456
457static int add_update(struct dyn_ftrace *rec, int enable)
458{
459 unsigned long ftrace_addr;
460 int ret;
461
462 ret = ftrace_test_record(rec, enable);
463
464 ftrace_addr = ftrace_get_addr_new(rec);
465
466 switch (ret) {
467 case FTRACE_UPDATE_IGNORE:
468 return 0;
469
470 case FTRACE_UPDATE_MODIFY_CALL:
471 case FTRACE_UPDATE_MAKE_CALL:
472 /* converting nop to call */
473 return add_update_call(rec, ftrace_addr);
474
475 case FTRACE_UPDATE_MAKE_NOP:
476 /* converting a call to a nop */
477 return add_update_nop(rec);
478 }
479
480 return 0;
481}
482
483static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
484{
485 unsigned long ip = rec->ip;
486 unsigned const char *new;
487
488 new = ftrace_call_replace(ip, addr);
489
490 return ftrace_write(ip, new, 1);
491}
492
493static int finish_update_nop(struct dyn_ftrace *rec)
494{
495 unsigned long ip = rec->ip;
496 unsigned const char *new;
497
498 new = ftrace_nop_replace();
499
500 return ftrace_write(ip, new, 1);
501}
502
503static int finish_update(struct dyn_ftrace *rec, int enable)
504{
505 unsigned long ftrace_addr;
506 int ret;
507
508 ret = ftrace_update_record(rec, enable);
509
510 ftrace_addr = ftrace_get_addr_new(rec);
511
512 switch (ret) {
513 case FTRACE_UPDATE_IGNORE:
514 return 0;
515
516 case FTRACE_UPDATE_MODIFY_CALL:
517 case FTRACE_UPDATE_MAKE_CALL:
518 /* converting nop to call */
519 return finish_update_call(rec, ftrace_addr);
520
521 case FTRACE_UPDATE_MAKE_NOP:
522 /* converting a call to a nop */
523 return finish_update_nop(rec);
524 }
525
526 return 0;
527}
528
529static void do_sync_core(void *data)
530{
531 sync_core();
532}
533
534static void run_sync(void)
535{
536 int enable_irqs = irqs_disabled();
537
538 /* We may be called with interrupts disbled (on bootup). */
539 if (enable_irqs)
540 local_irq_enable();
541 on_each_cpu(do_sync_core, NULL, 1);
542 if (enable_irqs)
543 local_irq_disable();
544}
545
546void ftrace_replace_code(int enable)
547{
548 struct ftrace_rec_iter *iter;
549 struct dyn_ftrace *rec;
550 const char *report = "adding breakpoints";
551 int count = 0;
552 int ret;
553
554 for_ftrace_rec_iter(iter) {
555 rec = ftrace_rec_iter_record(iter);
556
557 ret = add_breakpoints(rec, enable);
558 if (ret)
559 goto remove_breakpoints;
560 count++;
561 }
562
563 run_sync();
564
565 report = "updating code";
566 count = 0;
567
568 for_ftrace_rec_iter(iter) {
569 rec = ftrace_rec_iter_record(iter);
570
571 ret = add_update(rec, enable);
572 if (ret)
573 goto remove_breakpoints;
574 count++;
575 }
576
577 run_sync();
578
579 report = "removing breakpoints";
580 count = 0;
581
582 for_ftrace_rec_iter(iter) {
583 rec = ftrace_rec_iter_record(iter);
584
585 ret = finish_update(rec, enable);
586 if (ret)
587 goto remove_breakpoints;
588 count++;
589 }
590
591 run_sync();
592
593 return;
594
595 remove_breakpoints:
596 pr_warn("Failed on %s (%d):\n", report, count);
597 ftrace_bug(ret, rec);
598 for_ftrace_rec_iter(iter) {
599 rec = ftrace_rec_iter_record(iter);
600 /*
601 * Breakpoints are handled only when this function is in
602 * progress. The system could not work with them.
603 */
604 if (remove_breakpoint(rec))
605 BUG();
606 }
607 run_sync();
608}
609
610static int
611ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
612 unsigned const char *new_code)
613{
614 int ret;
615
616 ret = add_break(ip, old_code);
617 if (ret)
618 goto out;
619
620 run_sync();
621
622 ret = add_update_code(ip, new_code);
623 if (ret)
624 goto fail_update;
625
626 run_sync();
627
628 ret = ftrace_write(ip, new_code, 1);
629 /*
630 * The breakpoint is handled only when this function is in progress.
631 * The system could not work if we could not remove it.
632 */
633 BUG_ON(ret);
634 out:
635 run_sync();
636 return ret;
637
638 fail_update:
639 /* Also here the system could not work with the breakpoint */
640 if (ftrace_write(ip, old_code, 1))
641 BUG();
642 goto out;
643}
644
645void arch_ftrace_update_code(int command)
646{
647 /* See comment above by declaration of modifying_ftrace_code */
648 atomic_inc(&modifying_ftrace_code);
649
650 ftrace_modify_all_code(command);
651
652 atomic_dec(&modifying_ftrace_code);
653}
654
655int __init ftrace_dyn_arch_init(void)
656{
657 return 0;
658}
659
660#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
661static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
662{
663 static union ftrace_code_union calc;
664
665 /* Jmp not a call (ignore the .e8) */
666 calc.e8 = 0xe9;
667 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
668
669 /*
670 * ftrace external locks synchronize the access to the static variable.
671 */
672 return calc.code;
673}
674#endif
675
676/* Currently only x86_64 supports dynamic trampolines */
677#ifdef CONFIG_X86_64
678
679#ifdef CONFIG_MODULES
680#include <linux/moduleloader.h>
681/* Module allocation simplifies allocating memory for code */
682static inline void *alloc_tramp(unsigned long size)
683{
684 return module_alloc(size);
685}
686static inline void tramp_free(void *tramp)
687{
688 module_memfree(tramp);
689}
690#else
691/* Trampolines can only be created if modules are supported */
692static inline void *alloc_tramp(unsigned long size)
693{
694 return NULL;
695}
696static inline void tramp_free(void *tramp) { }
697#endif
698
699/* Defined as markers to the end of the ftrace default trampolines */
700extern void ftrace_regs_caller_end(void);
701extern void ftrace_epilogue(void);
702extern void ftrace_caller_op_ptr(void);
703extern void ftrace_regs_caller_op_ptr(void);
704
705/* movq function_trace_op(%rip), %rdx */
706/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
707#define OP_REF_SIZE 7
708
709/*
710 * The ftrace_ops is passed to the function callback. Since the
711 * trampoline only services a single ftrace_ops, we can pass in
712 * that ops directly.
713 *
714 * The ftrace_op_code_union is used to create a pointer to the
715 * ftrace_ops that will be passed to the callback function.
716 */
717union ftrace_op_code_union {
718 char code[OP_REF_SIZE];
719 struct {
720 char op[3];
721 int offset;
722 } __attribute__((packed));
723};
724
725static unsigned long
726create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
727{
728 unsigned const char *jmp;
729 unsigned long start_offset;
730 unsigned long end_offset;
731 unsigned long op_offset;
732 unsigned long offset;
733 unsigned long size;
734 unsigned long ip;
735 unsigned long *ptr;
736 void *trampoline;
737 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
738 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
739 union ftrace_op_code_union op_ptr;
740 int ret;
741
742 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
743 start_offset = (unsigned long)ftrace_regs_caller;
744 end_offset = (unsigned long)ftrace_regs_caller_end;
745 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
746 } else {
747 start_offset = (unsigned long)ftrace_caller;
748 end_offset = (unsigned long)ftrace_epilogue;
749 op_offset = (unsigned long)ftrace_caller_op_ptr;
750 }
751
752 size = end_offset - start_offset;
753
754 /*
755 * Allocate enough size to store the ftrace_caller code,
756 * the jmp to ftrace_epilogue, as well as the address of
757 * the ftrace_ops this trampoline is used for.
758 */
759 trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
760 if (!trampoline)
761 return 0;
762
763 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
764
765 /* Copy ftrace_caller onto the trampoline memory */
766 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
767 if (WARN_ON(ret < 0)) {
768 tramp_free(trampoline);
769 return 0;
770 }
771
772 ip = (unsigned long)trampoline + size;
773
774 /* The trampoline ends with a jmp to ftrace_epilogue */
775 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
776 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
777
778 /*
779 * The address of the ftrace_ops that is used for this trampoline
780 * is stored at the end of the trampoline. This will be used to
781 * load the third parameter for the callback. Basically, that
782 * location at the end of the trampoline takes the place of
783 * the global function_trace_op variable.
784 */
785
786 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
787 *ptr = (unsigned long)ops;
788
789 op_offset -= start_offset;
790 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
791
792 /* Are we pointing to the reference? */
793 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
794 tramp_free(trampoline);
795 return 0;
796 }
797
798 /* Load the contents of ptr into the callback parameter */
799 offset = (unsigned long)ptr;
800 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
801
802 op_ptr.offset = offset;
803
804 /* put in the new offset to the ftrace_ops */
805 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
806
807 /* ALLOC_TRAMP flags lets us know we created it */
808 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
809
810 return (unsigned long)trampoline;
811}
812
813static unsigned long calc_trampoline_call_offset(bool save_regs)
814{
815 unsigned long start_offset;
816 unsigned long call_offset;
817
818 if (save_regs) {
819 start_offset = (unsigned long)ftrace_regs_caller;
820 call_offset = (unsigned long)ftrace_regs_call;
821 } else {
822 start_offset = (unsigned long)ftrace_caller;
823 call_offset = (unsigned long)ftrace_call;
824 }
825
826 return call_offset - start_offset;
827}
828
829void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
830{
831 ftrace_func_t func;
832 unsigned char *new;
833 unsigned long offset;
834 unsigned long ip;
835 unsigned int size;
836 int ret;
837
838 if (ops->trampoline) {
839 /*
840 * The ftrace_ops caller may set up its own trampoline.
841 * In such a case, this code must not modify it.
842 */
843 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
844 return;
845 } else {
846 ops->trampoline = create_trampoline(ops, &size);
847 if (!ops->trampoline)
848 return;
849 ops->trampoline_size = size;
850 }
851
852 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
853 ip = ops->trampoline + offset;
854
855 func = ftrace_ops_get_func(ops);
856
857 /* Do a safe modify in case the trampoline is executing */
858 new = ftrace_call_replace(ip, (unsigned long)func);
859 ret = update_ftrace_func(ip, new);
860
861 /* The update should never fail */
862 WARN_ON(ret);
863}
864
865/* Return the address of the function the trampoline calls */
866static void *addr_from_call(void *ptr)
867{
868 union ftrace_code_union calc;
869 int ret;
870
871 ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
872 if (WARN_ON_ONCE(ret < 0))
873 return NULL;
874
875 /* Make sure this is a call */
876 if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
877 pr_warn("Expected e8, got %x\n", calc.e8);
878 return NULL;
879 }
880
881 return ptr + MCOUNT_INSN_SIZE + calc.offset;
882}
883
884void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
885 unsigned long frame_pointer);
886
887/*
888 * If the ops->trampoline was not allocated, then it probably
889 * has a static trampoline func, or is the ftrace caller itself.
890 */
891static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
892{
893 unsigned long offset;
894 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
895 void *ptr;
896
897 if (ops && ops->trampoline) {
898#ifdef CONFIG_FUNCTION_GRAPH_TRACER
899 /*
900 * We only know about function graph tracer setting as static
901 * trampoline.
902 */
903 if (ops->trampoline == FTRACE_GRAPH_ADDR)
904 return (void *)prepare_ftrace_return;
905#endif
906 return NULL;
907 }
908
909 offset = calc_trampoline_call_offset(save_regs);
910
911 if (save_regs)
912 ptr = (void *)FTRACE_REGS_ADDR + offset;
913 else
914 ptr = (void *)FTRACE_ADDR + offset;
915
916 return addr_from_call(ptr);
917}
918
919void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
920{
921 unsigned long offset;
922
923 /* If we didn't allocate this trampoline, consider it static */
924 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
925 return static_tramp_func(ops, rec);
926
927 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
928 return addr_from_call((void *)ops->trampoline + offset);
929}
930
931void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
932{
933 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
934 return;
935
936 tramp_free((void *)ops->trampoline);
937 ops->trampoline = 0;
938}
939
940#endif /* CONFIG_X86_64 */
941#endif /* CONFIG_DYNAMIC_FTRACE */
942
943#ifdef CONFIG_FUNCTION_GRAPH_TRACER
944
945#ifdef CONFIG_DYNAMIC_FTRACE
946extern void ftrace_graph_call(void);
947
948static int ftrace_mod_jmp(unsigned long ip, void *func)
949{
950 unsigned char *new;
951
952 new = ftrace_jmp_replace(ip, (unsigned long)func);
953
954 return update_ftrace_func(ip, new);
955}
956
957int ftrace_enable_ftrace_graph_caller(void)
958{
959 unsigned long ip = (unsigned long)(&ftrace_graph_call);
960
961 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
962}
963
964int ftrace_disable_ftrace_graph_caller(void)
965{
966 unsigned long ip = (unsigned long)(&ftrace_graph_call);
967
968 return ftrace_mod_jmp(ip, &ftrace_stub);
969}
970
971#endif /* !CONFIG_DYNAMIC_FTRACE */
972
973/*
974 * Hook the return address and push it in the stack of return addrs
975 * in current thread info.
976 */
977void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
978 unsigned long frame_pointer)
979{
980 unsigned long old;
981 int faulted;
982 struct ftrace_graph_ent trace;
983 unsigned long return_hooker = (unsigned long)
984 &return_to_handler;
985
986 if (unlikely(ftrace_graph_is_dead()))
987 return;
988
989 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
990 return;
991
992 /*
993 * Protect against fault, even if it shouldn't
994 * happen. This tool is too much intrusive to
995 * ignore such a protection.
996 */
997 asm volatile(
998 "1: " _ASM_MOV " (%[parent]), %[old]\n"
999 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
1000 " movl $0, %[faulted]\n"
1001 "3:\n"
1002
1003 ".section .fixup, \"ax\"\n"
1004 "4: movl $1, %[faulted]\n"
1005 " jmp 3b\n"
1006 ".previous\n"
1007
1008 _ASM_EXTABLE(1b, 4b)
1009 _ASM_EXTABLE(2b, 4b)
1010
1011 : [old] "=&r" (old), [faulted] "=r" (faulted)
1012 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
1013 : "memory"
1014 );
1015
1016 if (unlikely(faulted)) {
1017 ftrace_graph_stop();
1018 WARN_ON(1);
1019 return;
1020 }
1021
1022 trace.func = self_addr;
1023 trace.depth = current->curr_ret_stack + 1;
1024
1025 /* Only trace if the calling function expects to */
1026 if (!ftrace_graph_entry(&trace)) {
1027 *parent = old;
1028 return;
1029 }
1030
1031 if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1032 frame_pointer) == -EBUSY) {
1033 *parent = old;
1034 return;
1035 }
1036}
1037#endif /* CONFIG_FUNCTION_GRAPH_TRACER */