Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracing support.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes to Ingo Molnar, for suggesting the idea.
  8 * Mathieu Desnoyers, for suggesting postponing the modifications.
  9 * Arjan van de Ven, for keeping me straight, and explaining to me
 10 * the dangers of modifying code on the run.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/ftrace.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/memory.h>
 26#include <linux/vmalloc.h>
 27#include <linux/set_memory.h>
 28#include <linux/execmem.h>
 29
 30#include <trace/syscall.h>
 31
 
 32#include <asm/kprobes.h>
 33#include <asm/ftrace.h>
 34#include <asm/nops.h>
 35#include <asm/text-patching.h>
 36
 37#ifdef CONFIG_DYNAMIC_FTRACE
 38
 39static int ftrace_poke_late = 0;
 
 
 
 
 
 40
 41void ftrace_arch_code_modify_prepare(void)
 42    __acquires(&text_mutex)
 43{
 44	/*
 45	 * Need to grab text_mutex to prevent a race from module loading
 46	 * and live kernel patching from changing the text permissions while
 47	 * ftrace has it set to "read/write".
 48	 */
 49	mutex_lock(&text_mutex);
 50	ftrace_poke_late = 1;
 51}
 52
 53void ftrace_arch_code_modify_post_process(void)
 54    __releases(&text_mutex)
 
 
 
 
 
 
 
 
 
 
 
 
 55{
 
 
 
 
 
 56	/*
 57	 * ftrace_make_{call,nop}() may be called during
 58	 * module load, and we need to finish the text_poke_queue()
 59	 * that they do, here.
 60	 */
 61	text_poke_finish();
 62	ftrace_poke_late = 0;
 63	mutex_unlock(&text_mutex);
 64}
 65
 66static const char *ftrace_nop_replace(void)
 
 67{
 68	return x86_nops[5];
 69}
 70
 71static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 72{
 73	/*
 74	 * No need to translate into a callthunk. The trampoline does
 75	 * the depth accounting itself.
 
 
 
 
 76	 */
 77	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 
 
 
 
 
 
 
 
 78}
 79
 80static int ftrace_verify_code(unsigned long ip, const char *old_code)
 
 
 81{
 82	char cur_code[MCOUNT_INSN_SIZE];
 83
 84	/*
 85	 * Note:
 86	 * We are paranoid about modifying text, as if a bug was to happen, it
 87	 * could cause us to read or write to someplace that could cause harm.
 88	 * Carefully read and modify the code with probe_kernel_*(), and make
 89	 * sure what we read is what we expected it to be before modifying it.
 
 
 90	 */
 
 91	/* read the text we want to modify */
 92	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
 93		WARN_ON(1);
 94		return -EFAULT;
 95	}
 96
 97	/* Make sure it is what we expect it to be */
 98	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
 99		ftrace_expected = old_code;
100		WARN_ON(1);
101		return -EINVAL;
102	}
103
104	return 0;
105}
106
107/*
108 * Marked __ref because it calls text_poke_early() which is .init.text. That is
109 * ok because that call will happen early, during boot, when .init sections are
110 * still present.
111 */
112static int __ref
113ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114			  const char *new_code)
115{
116	int ret = ftrace_verify_code(ip, old_code);
117	if (ret)
118		return ret;
119
120	/* replace the text with the new text */
121	if (ftrace_poke_late) {
122		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123	} else {
124		mutex_lock(&text_mutex);
125		text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
126		mutex_unlock(&text_mutex);
127	}
128	return 0;
129}
130
131int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
 
132{
 
133	unsigned long ip = rec->ip;
134	const char *new, *old;
135
136	old = ftrace_call_replace(ip, addr);
137	new = ftrace_nop_replace();
138
139	/*
140	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
141	 * is converted to a nop, and will never become MCOUNT_ADDR
142	 * again. This code is either running before SMP (on boot up)
143	 * or before the code will ever be executed (module load).
144	 * We do not want to use the breakpoint version in this case,
145	 * just modify the code directly.
146	 */
147	if (addr == MCOUNT_ADDR)
148		return ftrace_modify_code_direct(ip, old, new);
149
150	/*
151	 * x86 overrides ftrace_replace_code -- this function will never be used
152	 * in this case.
153	 */
154	WARN_ONCE(1, "invalid use of ftrace_make_nop");
155	return -EINVAL;
156}
157
158int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159{
 
160	unsigned long ip = rec->ip;
161	const char *new, *old;
162
163	old = ftrace_nop_replace();
164	new = ftrace_call_replace(ip, addr);
165
166	/* Should only be called when module is loaded */
167	return ftrace_modify_code_direct(rec->ip, old, new);
168}
169
170/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171 * Should never be called:
172 *  As it is only called by __ftrace_replace_code() which is called by
173 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
174 *  which is called to turn mcount into nops or nops into function calls
175 *  but not to convert a function from not using regs to one that uses
176 *  regs, which ftrace_modify_call() is for.
177 */
178int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179				 unsigned long addr)
180{
181	WARN_ON(1);
182	return -EINVAL;
183}
184
185int ftrace_update_ftrace_func(ftrace_func_t func)
 
 
186{
187	unsigned long ip;
188	const char *new;
189
190	ip = (unsigned long)(&ftrace_call);
191	new = ftrace_call_replace(ip, (unsigned long)func);
192	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194	ip = (unsigned long)(&ftrace_regs_call);
195	new = ftrace_call_replace(ip, (unsigned long)func);
196	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
197
198	return 0;
 
 
 
 
 
 
 
199}
200
201void ftrace_replace_code(int enable)
202{
203	struct ftrace_rec_iter *iter;
204	struct dyn_ftrace *rec;
205	const char *new, *old;
206	int ret;
207
208	for_ftrace_rec_iter(iter) {
209		rec = ftrace_rec_iter_record(iter);
210
211		switch (ftrace_test_record(rec, enable)) {
212		case FTRACE_UPDATE_IGNORE:
213		default:
214			continue;
215
216		case FTRACE_UPDATE_MAKE_CALL:
217			old = ftrace_nop_replace();
218			break;
219
220		case FTRACE_UPDATE_MODIFY_CALL:
221		case FTRACE_UPDATE_MAKE_NOP:
222			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
223			break;
224		}
225
226		ret = ftrace_verify_code(rec->ip, old);
227		if (ret) {
228			ftrace_expected = old;
229			ftrace_bug(ret, rec);
230			ftrace_expected = NULL;
231			return;
232		}
233	}
234
235	for_ftrace_rec_iter(iter) {
236		rec = ftrace_rec_iter_record(iter);
237
238		switch (ftrace_test_record(rec, enable)) {
239		case FTRACE_UPDATE_IGNORE:
240		default:
241			continue;
242
243		case FTRACE_UPDATE_MAKE_CALL:
244		case FTRACE_UPDATE_MODIFY_CALL:
245			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
246			break;
247
248		case FTRACE_UPDATE_MAKE_NOP:
249			new = ftrace_nop_replace();
250			break;
251		}
252
253		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
254		ftrace_update_record(rec, enable);
255	}
256	text_poke_finish();
257}
258
259void arch_ftrace_update_code(int command)
 
 
 
 
 
 
 
260{
261	ftrace_modify_all_code(command);
 
 
 
 
 
 
 
 
 
 
 
262}
263
264/* Currently only x86_64 supports dynamic trampolines */
265#ifdef CONFIG_X86_64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
267static inline void *alloc_tramp(unsigned long size)
268{
269	return execmem_alloc(EXECMEM_FTRACE, size);
 
 
 
 
 
 
 
 
 
 
270}
271static inline void tramp_free(void *tramp)
 
272{
273	execmem_free(tramp);
 
 
 
 
 
274}
275
276/* Defined as markers to the end of the ftrace default trampolines */
277extern void ftrace_regs_caller_end(void);
278extern void ftrace_caller_end(void);
279extern void ftrace_caller_op_ptr(void);
280extern void ftrace_regs_caller_op_ptr(void);
281extern void ftrace_regs_caller_jmp(void);
282
283/* movq function_trace_op(%rip), %rdx */
284/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
285#define OP_REF_SIZE	7
 
 
 
 
 
286
287/*
288 * The ftrace_ops is passed to the function callback. Since the
289 * trampoline only services a single ftrace_ops, we can pass in
290 * that ops directly.
291 *
292 * The ftrace_op_code_union is used to create a pointer to the
293 * ftrace_ops that will be passed to the callback function.
294 */
295union ftrace_op_code_union {
296	char code[OP_REF_SIZE];
297	struct {
298		char op[3];
299		int offset;
300	} __attribute__((packed));
301};
302
303#define RET_SIZE \
304	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
 
 
 
 
 
 
 
 
 
 
305
306static unsigned long
307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
308{
309	unsigned long start_offset;
310	unsigned long end_offset;
311	unsigned long op_offset;
312	unsigned long call_offset;
313	unsigned long jmp_offset;
314	unsigned long offset;
315	unsigned long npages;
316	unsigned long size;
317	unsigned long *ptr;
318	void *trampoline;
319	void *ip, *dest;
320	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
322	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
323	union ftrace_op_code_union op_ptr;
324	void *ret;
325
326	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327		start_offset = (unsigned long)ftrace_regs_caller;
328		end_offset = (unsigned long)ftrace_regs_caller_end;
329		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
330		call_offset = (unsigned long)ftrace_regs_call;
331		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
332	} else {
333		start_offset = (unsigned long)ftrace_caller;
334		end_offset = (unsigned long)ftrace_caller_end;
335		op_offset = (unsigned long)ftrace_caller_op_ptr;
336		call_offset = (unsigned long)ftrace_call;
337		jmp_offset = 0;
338	}
 
 
339
340	size = end_offset - start_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
342	/*
343	 * Allocate enough size to store the ftrace_caller code,
344	 * the iret , as well as the address of the ftrace_ops this
345	 * trampoline is used for.
346	 */
347	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
348	if (!trampoline)
349		return 0;
 
 
 
 
 
 
350
351	*tramp_size = size + RET_SIZE + sizeof(void *);
352	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
353
354	/* Copy ftrace_caller onto the trampoline memory */
355	ret = text_poke_copy(trampoline, (void *)start_offset, size);
356	if (WARN_ON(!ret))
357		goto fail;
358
359	ip = trampoline + size;
360	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
362	else
363		text_poke_copy(ip, retq, sizeof(retq));
364
365	/* No need to test direct calls on created trampolines */
366	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368		ip = trampoline + (jmp_offset - start_offset);
369		if (WARN_ON(*(char *)ip != 0x75))
370			goto fail;
371		if (!text_poke_copy(ip, x86_nops[2], 2))
372			goto fail;
373	}
374
375	/*
376	 * The address of the ftrace_ops that is used for this trampoline
377	 * is stored at the end of the trampoline. This will be used to
378	 * load the third parameter for the callback. Basically, that
379	 * location at the end of the trampoline takes the place of
380	 * the global function_trace_op variable.
381	 */
382
383	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
384	text_poke_copy(ptr, &ops, sizeof(unsigned long));
 
 
 
 
 
385
386	op_offset -= start_offset;
387	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
 
 
388
389	/* Are we pointing to the reference? */
390	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
391		goto fail;
 
 
 
 
 
 
 
 
 
392
393	/* Load the contents of ptr into the callback parameter */
394	offset = (unsigned long)ptr;
395	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
 
396
397	op_ptr.offset = offset;
398
399	/* put in the new offset to the ftrace_ops */
400	text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
402	/* put in the call to the function */
403	mutex_lock(&text_mutex);
404	call_offset -= start_offset;
405	/*
406	 * No need to translate into a callthunk. The trampoline does
407	 * the depth accounting before the call already.
408	 */
409	dest = ftrace_ops_get_func(ops);
410	text_poke_copy_locked(trampoline + call_offset,
411	      text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
412	      CALL_INSN_SIZE, false);
413	mutex_unlock(&text_mutex);
414
415	/* ALLOC_TRAMP flags lets us know we created it */
416	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
417
418	set_memory_rox((unsigned long)trampoline, npages);
419	return (unsigned long)trampoline;
420fail:
421	tramp_free(trampoline);
422	return 0;
423}
424
425void set_ftrace_ops_ro(void)
426{
427	struct ftrace_ops *ops;
428	unsigned long start_offset;
429	unsigned long end_offset;
430	unsigned long npages;
431	unsigned long size;
432
433	do_for_each_ftrace_op(ops, ftrace_ops_list) {
434		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
435			continue;
436
437		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
438			start_offset = (unsigned long)ftrace_regs_caller;
439			end_offset = (unsigned long)ftrace_regs_caller_end;
440		} else {
441			start_offset = (unsigned long)ftrace_caller;
442			end_offset = (unsigned long)ftrace_caller_end;
443		}
444		size = end_offset - start_offset;
445		size = size + RET_SIZE + sizeof(void *);
446		npages = DIV_ROUND_UP(size, PAGE_SIZE);
447		set_memory_ro((unsigned long)ops->trampoline, npages);
448	} while_for_each_ftrace_op(ops);
449}
450
451static unsigned long calc_trampoline_call_offset(bool save_regs)
452{
453	unsigned long start_offset;
454	unsigned long call_offset;
455
456	if (save_regs) {
457		start_offset = (unsigned long)ftrace_regs_caller;
458		call_offset = (unsigned long)ftrace_regs_call;
459	} else {
460		start_offset = (unsigned long)ftrace_caller;
461		call_offset = (unsigned long)ftrace_call;
462	}
463
464	return call_offset - start_offset;
465}
466
467void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
468{
469	ftrace_func_t func;
470	unsigned long offset;
471	unsigned long ip;
472	unsigned int size;
473	const char *new;
474
475	if (!ops->trampoline) {
476		ops->trampoline = create_trampoline(ops, &size);
477		if (!ops->trampoline)
478			return;
479		ops->trampoline_size = size;
480		return;
 
 
 
 
 
 
 
 
 
 
 
481	}
482
483	/*
484	 * The ftrace_ops caller may set up its own trampoline.
485	 * In such a case, this code must not modify it.
486	 */
487	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
488		return;
489
490	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
491	ip = ops->trampoline + offset;
492	func = ftrace_ops_get_func(ops);
 
493
494	mutex_lock(&text_mutex);
495	/* Do a safe modify in case the trampoline is executing */
496	new = ftrace_call_replace(ip, (unsigned long)func);
497	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
498	mutex_unlock(&text_mutex);
 
 
 
 
 
499}
500
501/* Return the address of the function the trampoline calls */
502static void *addr_from_call(void *ptr)
503{
504	union text_poke_insn call;
 
 
 
505	int ret;
506
507	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
508	if (WARN_ON_ONCE(ret < 0))
509		return NULL;
510
511	/* Make sure this is a call */
512	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
513		pr_warn("Expected E8, got %x\n", call.opcode);
514		return NULL;
515	}
516
517	return ptr + CALL_INSN_SIZE + call.disp;
518}
519
520/*
521 * If the ops->trampoline was not allocated, then it probably
522 * has a static trampoline func, or is the ftrace caller itself.
523 */
524static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
525{
526	unsigned long offset;
527	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
528	void *ptr;
529
530	if (ops && ops->trampoline) {
531#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
532	defined(CONFIG_FUNCTION_GRAPH_TRACER)
533		/*
534		 * We only know about function graph tracer setting as static
535		 * trampoline.
536		 */
537		if (ops->trampoline == FTRACE_GRAPH_ADDR)
538			return (void *)prepare_ftrace_return;
539#endif
540		return NULL;
541	}
542
543	offset = calc_trampoline_call_offset(save_regs);
544
545	if (save_regs)
546		ptr = (void *)FTRACE_REGS_ADDR + offset;
547	else
548		ptr = (void *)FTRACE_ADDR + offset;
549
550	return addr_from_call(ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551}
552
553void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 
 
554{
555	unsigned long offset;
 
 
 
 
 
 
 
 
 
 
556
557	/* If we didn't allocate this trampoline, consider it static */
558	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
559		return static_tramp_func(ops, rec);
560
561	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
562	return addr_from_call((void *)ops->trampoline + offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
563}
564
565void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
566{
567	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
568		return;
 
 
569
570	tramp_free((void *)ops->trampoline);
571	ops->trampoline = 0;
572}
573
574#endif /* CONFIG_X86_64 */
575#endif /* CONFIG_DYNAMIC_FTRACE */
 
 
 
576
577#ifdef CONFIG_FUNCTION_GRAPH_TRACER
578
579#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
580extern void ftrace_graph_call(void);
581static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 
582{
583	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
 
 
 
 
 
 
 
 
 
584}
585
586static int ftrace_mod_jmp(unsigned long ip, void *func)
587{
588	const char *new;
589
590	new = ftrace_jmp_replace(ip, (unsigned long)func);
591	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
592	return 0;
593}
594
595int ftrace_enable_ftrace_graph_caller(void)
596{
597	unsigned long ip = (unsigned long)(&ftrace_graph_call);
598
599	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
600}
601
602int ftrace_disable_ftrace_graph_caller(void)
603{
604	unsigned long ip = (unsigned long)(&ftrace_graph_call);
605
606	return ftrace_mod_jmp(ip, &ftrace_stub);
607}
608#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
609
610/*
611 * Hook the return address and push it in the stack of return addrs
612 * in current thread info.
613 */
614void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
615			   unsigned long frame_pointer)
616{
617	unsigned long return_hooker = (unsigned long)&return_to_handler;
618	int bit;
619
620	/*
621	 * When resuming from suspend-to-ram, this function can be indirectly
622	 * called from early CPU startup code while the CPU is in real mode,
623	 * which would fail miserably.  Make sure the stack pointer is a
624	 * virtual address.
625	 *
626	 * This check isn't as accurate as virt_addr_valid(), but it should be
627	 * good enough for this purpose, and it's fast.
628	 */
629	if (unlikely((long)__builtin_frame_address(0) >= 0))
630		return;
631
632	if (unlikely(ftrace_graph_is_dead()))
633		return;
634
635	if (unlikely(atomic_read(&current->tracing_graph_pause)))
636		return;
637
638	bit = ftrace_test_recursion_trylock(ip, *parent);
639	if (bit < 0)
640		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
642	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
643		*parent = return_hooker;
 
 
 
644
645	ftrace_test_recursion_unlock(bit);
646}
647
648#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
649void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
650		       struct ftrace_ops *op, struct ftrace_regs *fregs)
651{
652	struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
653	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
654
655	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
 
 
 
 
656}
657#endif
658
659#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v3.15
 
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 * Thanks goes to Ingo Molnar, for suggesting the idea.
  7 * Mathieu Desnoyers, for suggesting postponing the modifications.
  8 * Arjan van de Ven, for keeping me straight, and explaining to me
  9 * the dangers of modifying code on the run.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/spinlock.h>
 15#include <linux/hardirq.h>
 16#include <linux/uaccess.h>
 17#include <linux/ftrace.h>
 18#include <linux/percpu.h>
 19#include <linux/sched.h>
 
 20#include <linux/init.h>
 21#include <linux/list.h>
 22#include <linux/module.h>
 
 
 
 
 23
 24#include <trace/syscall.h>
 25
 26#include <asm/cacheflush.h>
 27#include <asm/kprobes.h>
 28#include <asm/ftrace.h>
 29#include <asm/nops.h>
 
 30
 31#ifdef CONFIG_DYNAMIC_FTRACE
 32
 33int ftrace_arch_code_modify_prepare(void)
 34{
 35	set_kernel_text_rw();
 36	set_all_modules_text_rw();
 37	return 0;
 38}
 39
 40int ftrace_arch_code_modify_post_process(void)
 
 41{
 42	set_all_modules_text_ro();
 43	set_kernel_text_ro();
 44	return 0;
 
 
 
 
 45}
 46
 47union ftrace_code_union {
 48	char code[MCOUNT_INSN_SIZE];
 49	struct {
 50		char e8;
 51		int offset;
 52	} __attribute__((packed));
 53};
 54
 55static int ftrace_calc_offset(long ip, long addr)
 56{
 57	return (int)(addr - ip);
 58}
 59
 60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 61{
 62	static union ftrace_code_union calc;
 63
 64	calc.e8		= 0xe8;
 65	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 66
 67	/*
 68	 * No locking needed, this must be called via kstop_machine
 69	 * which in essence is like running on a uniprocessor machine.
 
 70	 */
 71	return calc.code;
 
 
 72}
 73
 74static inline int
 75within(unsigned long addr, unsigned long start, unsigned long end)
 76{
 77	return addr >= start && addr < end;
 78}
 79
 80static unsigned long text_ip_addr(unsigned long ip)
 81{
 82	/*
 83	 * On x86_64, kernel text mappings are mapped read-only with
 84	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
 85	 * of the kernel text mapping to modify the kernel text.
 86	 *
 87	 * For 32bit kernels, these mappings are same and we can use
 88	 * kernel identity mapping to modify code.
 89	 */
 90	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 91		ip = (unsigned long)__va(__pa_symbol(ip));
 92
 93	return ip;
 94}
 95
 96static const unsigned char *ftrace_nop_replace(void)
 97{
 98	return ideal_nops[NOP_ATOMIC5];
 99}
100
101static int
102ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
103		   unsigned const char *new_code)
104{
105	unsigned char replaced[MCOUNT_INSN_SIZE];
106
107	/*
108	 * Note: Due to modules and __init, code can
109	 *  disappear and change, we need to protect against faulting
110	 *  as well as code changing. We do this by using the
111	 *  probe_kernel_* functions.
112	 *
113	 * No real locking needed, this code is run through
114	 * kstop_machine, or before SMP starts.
115	 */
116
117	/* read the text we want to modify */
118	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 
119		return -EFAULT;
 
120
121	/* Make sure it is what we expect it to be */
122	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
 
 
123		return -EINVAL;
 
 
 
 
124
125	ip = text_ip_addr(ip);
 
 
 
 
 
 
 
 
 
 
 
126
127	/* replace the text with the new text */
128	if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
129		return -EPERM;
130
131	sync_core();
132
 
 
133	return 0;
134}
135
136int ftrace_make_nop(struct module *mod,
137		    struct dyn_ftrace *rec, unsigned long addr)
138{
139	unsigned const char *new, *old;
140	unsigned long ip = rec->ip;
 
141
142	old = ftrace_call_replace(ip, addr);
143	new = ftrace_nop_replace();
144
145	/*
146	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
147	 * is converted to a nop, and will never become MCOUNT_ADDR
148	 * again. This code is either running before SMP (on boot up)
149	 * or before the code will ever be executed (module load).
150	 * We do not want to use the breakpoint version in this case,
151	 * just modify the code directly.
152	 */
153	if (addr == MCOUNT_ADDR)
154		return ftrace_modify_code_direct(rec->ip, old, new);
155
156	/* Normal cases use add_brk_on_nop */
 
 
 
157	WARN_ONCE(1, "invalid use of ftrace_make_nop");
158	return -EINVAL;
159}
160
161int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
162{
163	unsigned const char *new, *old;
164	unsigned long ip = rec->ip;
 
165
166	old = ftrace_nop_replace();
167	new = ftrace_call_replace(ip, addr);
168
169	/* Should only be called when module is loaded */
170	return ftrace_modify_code_direct(rec->ip, old, new);
171}
172
173/*
174 * The modifying_ftrace_code is used to tell the breakpoint
175 * handler to call ftrace_int3_handler(). If it fails to
176 * call this handler for a breakpoint added by ftrace, then
177 * the kernel may crash.
178 *
179 * As atomic_writes on x86 do not need a barrier, we do not
180 * need to add smp_mb()s for this to work. It is also considered
181 * that we can not read the modifying_ftrace_code before
182 * executing the breakpoint. That would be quite remarkable if
183 * it could do that. Here's the flow that is required:
184 *
185 *   CPU-0                          CPU-1
186 *
187 * atomic_inc(mfc);
188 * write int3s
189 *				<trap-int3> // implicit (r)mb
190 *				if (atomic_read(mfc))
191 *					call ftrace_int3_handler()
192 *
193 * Then when we are finished:
194 *
195 * atomic_dec(mfc);
196 *
197 * If we hit a breakpoint that was not set by ftrace, it does not
198 * matter if ftrace_int3_handler() is called or not. It will
199 * simply be ignored. But it is crucial that a ftrace nop/caller
200 * breakpoint is handled. No other user should ever place a
201 * breakpoint on an ftrace nop/caller location. It must only
202 * be done by this code.
203 */
204atomic_t modifying_ftrace_code __read_mostly;
205
206static int
207ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
208		   unsigned const char *new_code);
209
210/*
211 * Should never be called:
212 *  As it is only called by __ftrace_replace_code() which is called by
213 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
214 *  which is called to turn mcount into nops or nops into function calls
215 *  but not to convert a function from not using regs to one that uses
216 *  regs, which ftrace_modify_call() is for.
217 */
218int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
219				 unsigned long addr)
220{
221	WARN_ON(1);
222	return -EINVAL;
223}
224
225static unsigned long ftrace_update_func;
226
227static int update_ftrace_func(unsigned long ip, void *new)
228{
229	unsigned char old[MCOUNT_INSN_SIZE];
230	int ret;
231
232	memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
 
 
233
234	ftrace_update_func = ip;
235	/* Make sure the breakpoints see the ftrace_update_func update */
236	smp_wmb();
237
238	/* See comment above by declaration of modifying_ftrace_code */
239	atomic_inc(&modifying_ftrace_code);
240
241	ret = ftrace_modify_code(ip, old, new);
242
243	atomic_dec(&modifying_ftrace_code);
244
245	return ret;
246}
247
248int ftrace_update_ftrace_func(ftrace_func_t func)
249{
250	unsigned long ip = (unsigned long)(&ftrace_call);
251	unsigned char *new;
 
252	int ret;
253
254	new = ftrace_call_replace(ip, (unsigned long)func);
255	ret = update_ftrace_func(ip, new);
256
257	/* Also update the regs callback function */
258	if (!ret) {
259		ip = (unsigned long)(&ftrace_regs_call);
260		new = ftrace_call_replace(ip, (unsigned long)func);
261		ret = update_ftrace_func(ip, new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262	}
263
264	return ret;
265}
266
267static int is_ftrace_caller(unsigned long ip)
268{
269	if (ip == ftrace_update_func)
270		return 1;
 
 
 
 
 
 
 
 
 
 
271
272	return 0;
 
 
 
273}
274
275/*
276 * A breakpoint was added to the code address we are about to
277 * modify, and this is the handle that will just skip over it.
278 * We are either changing a nop into a trace call, or a trace
279 * call to a nop. While the change is taking place, we treat
280 * it just like it was a nop.
281 */
282int ftrace_int3_handler(struct pt_regs *regs)
283{
284	unsigned long ip;
285
286	if (WARN_ON_ONCE(!regs))
287		return 0;
288
289	ip = regs->ip - 1;
290	if (!ftrace_location(ip) && !is_ftrace_caller(ip))
291		return 0;
292
293	regs->ip += MCOUNT_INSN_SIZE - 1;
294
295	return 1;
296}
297
298static int ftrace_write(unsigned long ip, const char *val, int size)
299{
300	/*
301	 * On x86_64, kernel text mappings are mapped read-only with
302	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
303	 * of the kernel text mapping to modify the kernel text.
304	 *
305	 * For 32bit kernels, these mappings are same and we can use
306	 * kernel identity mapping to modify code.
307	 */
308	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309		ip = (unsigned long)__va(__pa_symbol(ip));
310
311	if (probe_kernel_write((void *)ip, val, size))
312		return -EPERM;
313
314	return 0;
315}
316
317static int add_break(unsigned long ip, const char *old)
318{
319	unsigned char replaced[MCOUNT_INSN_SIZE];
320	unsigned char brk = BREAKPOINT_INSTRUCTION;
321
322	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
323		return -EFAULT;
324
325	/* Make sure it is what we expect it to be */
326	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
327		return -EINVAL;
328
329	return ftrace_write(ip, &brk, 1);
330}
331
332static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
333{
334	unsigned const char *old;
335	unsigned long ip = rec->ip;
336
337	old = ftrace_call_replace(ip, addr);
338
339	return add_break(rec->ip, old);
340}
341
 
 
 
 
 
 
342
343static int add_brk_on_nop(struct dyn_ftrace *rec)
344{
345	unsigned const char *old;
346
347	old = ftrace_nop_replace();
348
349	return add_break(rec->ip, old);
350}
351
352/*
353 * If the record has the FTRACE_FL_REGS set, that means that it
354 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
355 * is not not set, then it wants to convert to the normal callback.
 
 
 
356 */
357static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
358{
359	if (rec->flags & FTRACE_FL_REGS)
360		return (unsigned long)FTRACE_REGS_ADDR;
361	else
362		return (unsigned long)FTRACE_ADDR;
363}
364
365/*
366 * The FTRACE_FL_REGS_EN is set when the record already points to
367 * a function that saves all the regs. Basically the '_EN' version
368 * represents the current state of the function.
369 */
370static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
371{
372	if (rec->flags & FTRACE_FL_REGS_EN)
373		return (unsigned long)FTRACE_REGS_ADDR;
374	else
375		return (unsigned long)FTRACE_ADDR;
376}
377
378static int add_breakpoints(struct dyn_ftrace *rec, int enable)
 
379{
380	unsigned long ftrace_addr;
381	int ret;
382
383	ret = ftrace_test_record(rec, enable);
384
385	ftrace_addr = get_ftrace_addr(rec);
386
387	switch (ret) {
388	case FTRACE_UPDATE_IGNORE:
389		return 0;
390
391	case FTRACE_UPDATE_MAKE_CALL:
392		/* converting nop to call */
393		return add_brk_on_nop(rec);
394
395	case FTRACE_UPDATE_MODIFY_CALL_REGS:
396	case FTRACE_UPDATE_MODIFY_CALL:
397		ftrace_addr = get_ftrace_old_addr(rec);
398		/* fall through */
399	case FTRACE_UPDATE_MAKE_NOP:
400		/* converting a call to a nop */
401		return add_brk_on_call(rec, ftrace_addr);
 
 
 
 
 
 
 
402	}
403	return 0;
404}
405
406/*
407 * On error, we need to remove breakpoints. This needs to
408 * be done caefully. If the address does not currently have a
409 * breakpoint, we know we are done. Otherwise, we look at the
410 * remaining 4 bytes of the instruction. If it matches a nop
411 * we replace the breakpoint with the nop. Otherwise we replace
412 * it with the call instruction.
413 */
414static int remove_breakpoint(struct dyn_ftrace *rec)
415{
416	unsigned char ins[MCOUNT_INSN_SIZE];
417	unsigned char brk = BREAKPOINT_INSTRUCTION;
418	const unsigned char *nop;
419	unsigned long ftrace_addr;
420	unsigned long ip = rec->ip;
421
422	/* If we fail the read, just give up */
423	if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
424		return -EFAULT;
425
426	/* If this does not have a breakpoint, we are done */
427	if (ins[0] != brk)
428		return 0;
429
430	nop = ftrace_nop_replace();
431
432	/*
433	 * If the last 4 bytes of the instruction do not match
434	 * a nop, then we assume that this is a call to ftrace_addr.
 
435	 */
436	if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
437		/*
438		 * For extra paranoidism, we check if the breakpoint is on
439		 * a call that would actually jump to the ftrace_addr.
440		 * If not, don't touch the breakpoint, we make just create
441		 * a disaster.
442		 */
443		ftrace_addr = get_ftrace_addr(rec);
444		nop = ftrace_call_replace(ip, ftrace_addr);
445
446		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
447			goto update;
448
449		/* Check both ftrace_addr and ftrace_old_addr */
450		ftrace_addr = get_ftrace_old_addr(rec);
451		nop = ftrace_call_replace(ip, ftrace_addr);
 
 
 
 
 
 
 
452
453		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
454			return -EINVAL;
 
 
 
 
 
 
455	}
456
457 update:
458	return ftrace_write(ip, nop, 1);
459}
 
 
 
 
460
461static int add_update_code(unsigned long ip, unsigned const char *new)
462{
463	/* skip breakpoint */
464	ip++;
465	new++;
466	return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
467}
468
469static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
470{
471	unsigned long ip = rec->ip;
472	unsigned const char *new;
473
474	new = ftrace_call_replace(ip, addr);
475	return add_update_code(ip, new);
476}
477
478static int add_update_nop(struct dyn_ftrace *rec)
479{
480	unsigned long ip = rec->ip;
481	unsigned const char *new;
482
483	new = ftrace_nop_replace();
484	return add_update_code(ip, new);
485}
486
487static int add_update(struct dyn_ftrace *rec, int enable)
488{
489	unsigned long ftrace_addr;
490	int ret;
491
492	ret = ftrace_test_record(rec, enable);
493
494	ftrace_addr  = get_ftrace_addr(rec);
495
496	switch (ret) {
497	case FTRACE_UPDATE_IGNORE:
498		return 0;
499
500	case FTRACE_UPDATE_MODIFY_CALL_REGS:
501	case FTRACE_UPDATE_MODIFY_CALL:
502	case FTRACE_UPDATE_MAKE_CALL:
503		/* converting nop to call */
504		return add_update_call(rec, ftrace_addr);
505
506	case FTRACE_UPDATE_MAKE_NOP:
507		/* converting a call to a nop */
508		return add_update_nop(rec);
509	}
510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511	return 0;
512}
513
514static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
515{
516	unsigned long ip = rec->ip;
517	unsigned const char *new;
 
 
 
518
519	new = ftrace_call_replace(ip, addr);
 
 
520
521	return ftrace_write(ip, new, 1);
 
 
 
 
 
 
 
 
 
 
 
522}
523
524static int finish_update_nop(struct dyn_ftrace *rec)
525{
526	unsigned long ip = rec->ip;
527	unsigned const char *new;
528
529	new = ftrace_nop_replace();
 
 
 
 
 
 
530
531	return ftrace_write(ip, new, 1);
532}
533
534static int finish_update(struct dyn_ftrace *rec, int enable)
535{
536	unsigned long ftrace_addr;
537	int ret;
 
 
 
538
539	ret = ftrace_update_record(rec, enable);
540
541	ftrace_addr = get_ftrace_addr(rec);
542
543	switch (ret) {
544	case FTRACE_UPDATE_IGNORE:
545		return 0;
546
547	case FTRACE_UPDATE_MODIFY_CALL_REGS:
548	case FTRACE_UPDATE_MODIFY_CALL:
549	case FTRACE_UPDATE_MAKE_CALL:
550		/* converting nop to call */
551		return finish_update_call(rec, ftrace_addr);
552
553	case FTRACE_UPDATE_MAKE_NOP:
554		/* converting a call to a nop */
555		return finish_update_nop(rec);
556	}
557
558	return 0;
559}
 
 
 
 
560
561static void do_sync_core(void *data)
562{
563	sync_core();
564}
565
566static void run_sync(void)
567{
568	int enable_irqs = irqs_disabled();
569
570	/* We may be called with interrupts disbled (on bootup). */
571	if (enable_irqs)
572		local_irq_enable();
573	on_each_cpu(do_sync_core, NULL, 1);
574	if (enable_irqs)
575		local_irq_disable();
576}
577
578void ftrace_replace_code(int enable)
 
579{
580	struct ftrace_rec_iter *iter;
581	struct dyn_ftrace *rec;
582	const char *report = "adding breakpoints";
583	int count = 0;
584	int ret;
585
586	for_ftrace_rec_iter(iter) {
587		rec = ftrace_rec_iter_record(iter);
 
588
589		ret = add_breakpoints(rec, enable);
590		if (ret)
591			goto remove_breakpoints;
592		count++;
593	}
594
595	run_sync();
 
596
597	report = "updating code";
598
599	for_ftrace_rec_iter(iter) {
600		rec = ftrace_rec_iter_record(iter);
601
602		ret = add_update(rec, enable);
603		if (ret)
604			goto remove_breakpoints;
 
 
 
 
 
 
 
 
 
 
 
 
 
605	}
606
607	run_sync();
608
609	report = "removing breakpoints";
 
 
 
610
611	for_ftrace_rec_iter(iter) {
612		rec = ftrace_rec_iter_record(iter);
613
614		ret = finish_update(rec, enable);
615		if (ret)
616			goto remove_breakpoints;
617	}
618
619	run_sync();
620
621	return;
622
623 remove_breakpoints:
624	ftrace_bug(ret, rec ? rec->ip : 0);
625	printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
626	for_ftrace_rec_iter(iter) {
627		rec = ftrace_rec_iter_record(iter);
628		/*
629		 * Breakpoints are handled only when this function is in
630		 * progress. The system could not work with them.
631		 */
632		if (remove_breakpoint(rec))
633			BUG();
634	}
635	run_sync();
636}
637
638static int
639ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
640		   unsigned const char *new_code)
641{
642	int ret;
643
644	ret = add_break(ip, old_code);
645	if (ret)
646		goto out;
647
648	run_sync();
649
650	ret = add_update_code(ip, new_code);
651	if (ret)
652		goto fail_update;
653
654	run_sync();
 
 
655
656	ret = ftrace_write(ip, new_code, 1);
657	/*
658	 * The breakpoint is handled only when this function is in progress.
659	 * The system could not work if we could not remove it.
660	 */
661	BUG_ON(ret);
662 out:
663	run_sync();
664	return ret;
665
666 fail_update:
667	/* Also here the system could not work with the breakpoint */
668	if (ftrace_write(ip, old_code, 1))
669		BUG();
670	goto out;
671}
672
673void arch_ftrace_update_code(int command)
674{
675	/* See comment above by declaration of modifying_ftrace_code */
676	atomic_inc(&modifying_ftrace_code);
677
678	ftrace_modify_all_code(command);
679
680	atomic_dec(&modifying_ftrace_code);
 
681}
682
683int __init ftrace_dyn_arch_init(void)
684{
685	return 0;
686}
687#endif
688
689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
690
691#ifdef CONFIG_DYNAMIC_FTRACE
692extern void ftrace_graph_call(void);
693
694static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
695{
696	static union ftrace_code_union calc;
697
698	/* Jmp not a call (ignore the .e8) */
699	calc.e8		= 0xe9;
700	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
701
702	/*
703	 * ftrace external locks synchronize the access to the static variable.
704	 */
705	return calc.code;
706}
707
708static int ftrace_mod_jmp(unsigned long ip, void *func)
709{
710	unsigned char *new;
711
712	new = ftrace_jmp_replace(ip, (unsigned long)func);
713
714	return update_ftrace_func(ip, new);
715}
716
717int ftrace_enable_ftrace_graph_caller(void)
718{
719	unsigned long ip = (unsigned long)(&ftrace_graph_call);
720
721	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
722}
723
724int ftrace_disable_ftrace_graph_caller(void)
725{
726	unsigned long ip = (unsigned long)(&ftrace_graph_call);
727
728	return ftrace_mod_jmp(ip, &ftrace_stub);
729}
730
731#endif /* !CONFIG_DYNAMIC_FTRACE */
732
733/*
734 * Hook the return address and push it in the stack of return addrs
735 * in current thread info.
736 */
737void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
738			   unsigned long frame_pointer)
739{
740	unsigned long old;
741	int faulted;
742	struct ftrace_graph_ent trace;
743	unsigned long return_hooker = (unsigned long)
744				&return_to_handler;
 
 
 
 
 
 
 
 
 
 
 
 
745
746	if (unlikely(atomic_read(&current->tracing_graph_pause)))
747		return;
748
749	/*
750	 * Protect against fault, even if it shouldn't
751	 * happen. This tool is too much intrusive to
752	 * ignore such a protection.
753	 */
754	asm volatile(
755		"1: " _ASM_MOV " (%[parent]), %[old]\n"
756		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
757		"   movl $0, %[faulted]\n"
758		"3:\n"
759
760		".section .fixup, \"ax\"\n"
761		"4: movl $1, %[faulted]\n"
762		"   jmp 3b\n"
763		".previous\n"
764
765		_ASM_EXTABLE(1b, 4b)
766		_ASM_EXTABLE(2b, 4b)
767
768		: [old] "=&r" (old), [faulted] "=r" (faulted)
769		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
770		: "memory"
771	);
772
773	if (unlikely(faulted)) {
774		ftrace_graph_stop();
775		WARN_ON(1);
776		return;
777	}
778
779	trace.func = self_addr;
780	trace.depth = current->curr_ret_stack + 1;
781
782	/* Only trace if the calling function expects to */
783	if (!ftrace_graph_entry(&trace)) {
784		*parent = old;
785		return;
786	}
 
787
788	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
789		    frame_pointer) == -EBUSY) {
790		*parent = old;
791		return;
792	}
793}
 
 
794#endif /* CONFIG_FUNCTION_GRAPH_TRACER */