Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracing support.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes to Ingo Molnar, for suggesting the idea.
  8 * Mathieu Desnoyers, for suggesting postponing the modifications.
  9 * Arjan van de Ven, for keeping me straight, and explaining to me
 10 * the dangers of modifying code on the run.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/ftrace.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/memory.h>
 26#include <linux/vmalloc.h>
 27#include <linux/set_memory.h>
 28#include <linux/execmem.h>
 29
 30#include <trace/syscall.h>
 31
 
 32#include <asm/kprobes.h>
 33#include <asm/ftrace.h>
 34#include <asm/nops.h>
 35#include <asm/text-patching.h>
 36
 37#ifdef CONFIG_DYNAMIC_FTRACE
 38
 39static int ftrace_poke_late = 0;
 40
 41void ftrace_arch_code_modify_prepare(void)
 42    __acquires(&text_mutex)
 43{
 44	/*
 45	 * Need to grab text_mutex to prevent a race from module loading
 46	 * and live kernel patching from changing the text permissions while
 47	 * ftrace has it set to "read/write".
 48	 */
 49	mutex_lock(&text_mutex);
 50	ftrace_poke_late = 1;
 
 51}
 52
 53void ftrace_arch_code_modify_post_process(void)
 54    __releases(&text_mutex)
 55{
 56	/*
 57	 * ftrace_make_{call,nop}() may be called during
 58	 * module load, and we need to finish the text_poke_queue()
 59	 * that they do, here.
 60	 */
 61	text_poke_finish();
 62	ftrace_poke_late = 0;
 63	mutex_unlock(&text_mutex);
 
 64}
 65
 66static const char *ftrace_nop_replace(void)
 67{
 68	return x86_nops[5];
 69}
 70
 71static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 72{
 73	/*
 74	 * No need to translate into a callthunk. The trampoline does
 75	 * the depth accounting itself.
 76	 */
 77	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 78}
 79
 80static int ftrace_verify_code(unsigned long ip, const char *old_code)
 81{
 82	char cur_code[MCOUNT_INSN_SIZE];
 83
 84	/*
 85	 * Note:
 86	 * We are paranoid about modifying text, as if a bug was to happen, it
 87	 * could cause us to read or write to someplace that could cause harm.
 88	 * Carefully read and modify the code with probe_kernel_*(), and make
 89	 * sure what we read is what we expected it to be before modifying it.
 90	 */
 91	/* read the text we want to modify */
 92	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
 93		WARN_ON(1);
 94		return -EFAULT;
 95	}
 96
 97	/* Make sure it is what we expect it to be */
 98	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
 99		ftrace_expected = old_code;
100		WARN_ON(1);
101		return -EINVAL;
102	}
103
104	return 0;
105}
106
107/*
108 * Marked __ref because it calls text_poke_early() which is .init.text. That is
109 * ok because that call will happen early, during boot, when .init sections are
110 * still present.
111 */
112static int __ref
113ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114			  const char *new_code)
115{
116	int ret = ftrace_verify_code(ip, old_code);
117	if (ret)
118		return ret;
119
120	/* replace the text with the new text */
121	if (ftrace_poke_late) {
122		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123	} else {
124		mutex_lock(&text_mutex);
125		text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
126		mutex_unlock(&text_mutex);
127	}
128	return 0;
129}
130
131int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
132{
133	unsigned long ip = rec->ip;
134	const char *new, *old;
135
136	old = ftrace_call_replace(ip, addr);
137	new = ftrace_nop_replace();
138
139	/*
140	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
141	 * is converted to a nop, and will never become MCOUNT_ADDR
142	 * again. This code is either running before SMP (on boot up)
143	 * or before the code will ever be executed (module load).
144	 * We do not want to use the breakpoint version in this case,
145	 * just modify the code directly.
146	 */
147	if (addr == MCOUNT_ADDR)
148		return ftrace_modify_code_direct(ip, old, new);
149
150	/*
151	 * x86 overrides ftrace_replace_code -- this function will never be used
152	 * in this case.
153	 */
154	WARN_ONCE(1, "invalid use of ftrace_make_nop");
155	return -EINVAL;
156}
157
158int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159{
160	unsigned long ip = rec->ip;
161	const char *new, *old;
162
163	old = ftrace_nop_replace();
164	new = ftrace_call_replace(ip, addr);
165
166	/* Should only be called when module is loaded */
167	return ftrace_modify_code_direct(rec->ip, old, new);
168}
169
170/*
171 * Should never be called:
172 *  As it is only called by __ftrace_replace_code() which is called by
173 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
174 *  which is called to turn mcount into nops or nops into function calls
175 *  but not to convert a function from not using regs to one that uses
176 *  regs, which ftrace_modify_call() is for.
177 */
178int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179				 unsigned long addr)
180{
181	WARN_ON(1);
182	return -EINVAL;
183}
184
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187	unsigned long ip;
188	const char *new;
189
190	ip = (unsigned long)(&ftrace_call);
191	new = ftrace_call_replace(ip, (unsigned long)func);
192	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194	ip = (unsigned long)(&ftrace_regs_call);
195	new = ftrace_call_replace(ip, (unsigned long)func);
196	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
197
198	return 0;
199}
200
201void ftrace_replace_code(int enable)
202{
203	struct ftrace_rec_iter *iter;
204	struct dyn_ftrace *rec;
205	const char *new, *old;
206	int ret;
207
208	for_ftrace_rec_iter(iter) {
209		rec = ftrace_rec_iter_record(iter);
210
211		switch (ftrace_test_record(rec, enable)) {
212		case FTRACE_UPDATE_IGNORE:
213		default:
214			continue;
215
216		case FTRACE_UPDATE_MAKE_CALL:
217			old = ftrace_nop_replace();
218			break;
219
220		case FTRACE_UPDATE_MODIFY_CALL:
221		case FTRACE_UPDATE_MAKE_NOP:
222			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
223			break;
224		}
225
226		ret = ftrace_verify_code(rec->ip, old);
227		if (ret) {
228			ftrace_expected = old;
229			ftrace_bug(ret, rec);
230			ftrace_expected = NULL;
231			return;
232		}
233	}
234
235	for_ftrace_rec_iter(iter) {
236		rec = ftrace_rec_iter_record(iter);
237
238		switch (ftrace_test_record(rec, enable)) {
239		case FTRACE_UPDATE_IGNORE:
240		default:
241			continue;
242
243		case FTRACE_UPDATE_MAKE_CALL:
244		case FTRACE_UPDATE_MODIFY_CALL:
245			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
246			break;
247
248		case FTRACE_UPDATE_MAKE_NOP:
249			new = ftrace_nop_replace();
250			break;
251		}
252
253		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
254		ftrace_update_record(rec, enable);
255	}
256	text_poke_finish();
257}
258
259void arch_ftrace_update_code(int command)
260{
261	ftrace_modify_all_code(command);
262}
263
 
 
 
 
 
264/* Currently only x86_64 supports dynamic trampolines */
265#ifdef CONFIG_X86_64
266
 
 
 
267static inline void *alloc_tramp(unsigned long size)
268{
269	return execmem_alloc(EXECMEM_FTRACE, size);
270}
271static inline void tramp_free(void *tramp)
272{
273	execmem_free(tramp);
274}
 
 
 
 
 
 
 
 
275
276/* Defined as markers to the end of the ftrace default trampolines */
277extern void ftrace_regs_caller_end(void);
 
278extern void ftrace_caller_end(void);
279extern void ftrace_caller_op_ptr(void);
280extern void ftrace_regs_caller_op_ptr(void);
281extern void ftrace_regs_caller_jmp(void);
282
283/* movq function_trace_op(%rip), %rdx */
284/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
285#define OP_REF_SIZE	7
286
287/*
288 * The ftrace_ops is passed to the function callback. Since the
289 * trampoline only services a single ftrace_ops, we can pass in
290 * that ops directly.
291 *
292 * The ftrace_op_code_union is used to create a pointer to the
293 * ftrace_ops that will be passed to the callback function.
294 */
295union ftrace_op_code_union {
296	char code[OP_REF_SIZE];
297	struct {
298		char op[3];
299		int offset;
300	} __attribute__((packed));
301};
302
303#define RET_SIZE \
304	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
305
306static unsigned long
307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
308{
309	unsigned long start_offset;
310	unsigned long end_offset;
311	unsigned long op_offset;
312	unsigned long call_offset;
313	unsigned long jmp_offset;
314	unsigned long offset;
315	unsigned long npages;
316	unsigned long size;
 
317	unsigned long *ptr;
318	void *trampoline;
319	void *ip, *dest;
320	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
322	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
323	union ftrace_op_code_union op_ptr;
324	void *ret;
325
326	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327		start_offset = (unsigned long)ftrace_regs_caller;
328		end_offset = (unsigned long)ftrace_regs_caller_end;
329		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
330		call_offset = (unsigned long)ftrace_regs_call;
331		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
332	} else {
333		start_offset = (unsigned long)ftrace_caller;
334		end_offset = (unsigned long)ftrace_caller_end;
335		op_offset = (unsigned long)ftrace_caller_op_ptr;
336		call_offset = (unsigned long)ftrace_call;
337		jmp_offset = 0;
338	}
339
340	size = end_offset - start_offset;
341
342	/*
343	 * Allocate enough size to store the ftrace_caller code,
344	 * the iret , as well as the address of the ftrace_ops this
345	 * trampoline is used for.
346	 */
347	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
348	if (!trampoline)
349		return 0;
350
351	*tramp_size = size + RET_SIZE + sizeof(void *);
352	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
353
354	/* Copy ftrace_caller onto the trampoline memory */
355	ret = text_poke_copy(trampoline, (void *)start_offset, size);
356	if (WARN_ON(!ret))
357		goto fail;
358
359	ip = trampoline + size;
360	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
362	else
363		text_poke_copy(ip, retq, sizeof(retq));
 
 
364
365	/* No need to test direct calls on created trampolines */
366	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368		ip = trampoline + (jmp_offset - start_offset);
369		if (WARN_ON(*(char *)ip != 0x75))
370			goto fail;
371		if (!text_poke_copy(ip, x86_nops[2], 2))
 
372			goto fail;
373	}
374
375	/*
376	 * The address of the ftrace_ops that is used for this trampoline
377	 * is stored at the end of the trampoline. This will be used to
378	 * load the third parameter for the callback. Basically, that
379	 * location at the end of the trampoline takes the place of
380	 * the global function_trace_op variable.
381	 */
382
383	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
384	text_poke_copy(ptr, &ops, sizeof(unsigned long));
385
386	op_offset -= start_offset;
387	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
388
389	/* Are we pointing to the reference? */
390	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
391		goto fail;
392
393	/* Load the contents of ptr into the callback parameter */
394	offset = (unsigned long)ptr;
395	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
396
397	op_ptr.offset = offset;
398
399	/* put in the new offset to the ftrace_ops */
400	text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
401
402	/* put in the call to the function */
403	mutex_lock(&text_mutex);
404	call_offset -= start_offset;
405	/*
406	 * No need to translate into a callthunk. The trampoline does
407	 * the depth accounting before the call already.
408	 */
409	dest = ftrace_ops_get_func(ops);
410	text_poke_copy_locked(trampoline + call_offset,
411	      text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
412	      CALL_INSN_SIZE, false);
413	mutex_unlock(&text_mutex);
414
415	/* ALLOC_TRAMP flags lets us know we created it */
416	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
417
418	set_memory_rox((unsigned long)trampoline, npages);
 
 
 
 
419	return (unsigned long)trampoline;
420fail:
421	tramp_free(trampoline);
422	return 0;
423}
424
425void set_ftrace_ops_ro(void)
426{
427	struct ftrace_ops *ops;
428	unsigned long start_offset;
429	unsigned long end_offset;
430	unsigned long npages;
431	unsigned long size;
432
433	do_for_each_ftrace_op(ops, ftrace_ops_list) {
434		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
435			continue;
436
437		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
438			start_offset = (unsigned long)ftrace_regs_caller;
439			end_offset = (unsigned long)ftrace_regs_caller_end;
440		} else {
441			start_offset = (unsigned long)ftrace_caller;
442			end_offset = (unsigned long)ftrace_caller_end;
443		}
444		size = end_offset - start_offset;
445		size = size + RET_SIZE + sizeof(void *);
446		npages = DIV_ROUND_UP(size, PAGE_SIZE);
447		set_memory_ro((unsigned long)ops->trampoline, npages);
448	} while_for_each_ftrace_op(ops);
449}
450
451static unsigned long calc_trampoline_call_offset(bool save_regs)
452{
453	unsigned long start_offset;
454	unsigned long call_offset;
455
456	if (save_regs) {
457		start_offset = (unsigned long)ftrace_regs_caller;
458		call_offset = (unsigned long)ftrace_regs_call;
459	} else {
460		start_offset = (unsigned long)ftrace_caller;
461		call_offset = (unsigned long)ftrace_call;
462	}
463
464	return call_offset - start_offset;
465}
466
467void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
468{
469	ftrace_func_t func;
470	unsigned long offset;
471	unsigned long ip;
472	unsigned int size;
473	const char *new;
474
475	if (!ops->trampoline) {
476		ops->trampoline = create_trampoline(ops, &size);
477		if (!ops->trampoline)
478			return;
479		ops->trampoline_size = size;
480		return;
481	}
482
483	/*
484	 * The ftrace_ops caller may set up its own trampoline.
485	 * In such a case, this code must not modify it.
486	 */
487	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
488		return;
489
490	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
491	ip = ops->trampoline + offset;
492	func = ftrace_ops_get_func(ops);
493
494	mutex_lock(&text_mutex);
495	/* Do a safe modify in case the trampoline is executing */
496	new = ftrace_call_replace(ip, (unsigned long)func);
497	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
498	mutex_unlock(&text_mutex);
499}
500
501/* Return the address of the function the trampoline calls */
502static void *addr_from_call(void *ptr)
503{
504	union text_poke_insn call;
505	int ret;
506
507	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
508	if (WARN_ON_ONCE(ret < 0))
509		return NULL;
510
511	/* Make sure this is a call */
512	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
513		pr_warn("Expected E8, got %x\n", call.opcode);
514		return NULL;
515	}
516
517	return ptr + CALL_INSN_SIZE + call.disp;
518}
519
 
 
 
520/*
521 * If the ops->trampoline was not allocated, then it probably
522 * has a static trampoline func, or is the ftrace caller itself.
523 */
524static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
525{
526	unsigned long offset;
527	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
528	void *ptr;
529
530	if (ops && ops->trampoline) {
531#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
532	defined(CONFIG_FUNCTION_GRAPH_TRACER)
533		/*
534		 * We only know about function graph tracer setting as static
535		 * trampoline.
536		 */
537		if (ops->trampoline == FTRACE_GRAPH_ADDR)
538			return (void *)prepare_ftrace_return;
539#endif
540		return NULL;
541	}
542
543	offset = calc_trampoline_call_offset(save_regs);
544
545	if (save_regs)
546		ptr = (void *)FTRACE_REGS_ADDR + offset;
547	else
548		ptr = (void *)FTRACE_ADDR + offset;
549
550	return addr_from_call(ptr);
551}
552
553void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
554{
555	unsigned long offset;
556
557	/* If we didn't allocate this trampoline, consider it static */
558	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
559		return static_tramp_func(ops, rec);
560
561	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
562	return addr_from_call((void *)ops->trampoline + offset);
563}
564
565void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
566{
567	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
568		return;
569
570	tramp_free((void *)ops->trampoline);
571	ops->trampoline = 0;
572}
573
574#endif /* CONFIG_X86_64 */
575#endif /* CONFIG_DYNAMIC_FTRACE */
576
577#ifdef CONFIG_FUNCTION_GRAPH_TRACER
578
579#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
580extern void ftrace_graph_call(void);
 
581static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
582{
583	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
584}
585
586static int ftrace_mod_jmp(unsigned long ip, void *func)
587{
588	const char *new;
589
590	new = ftrace_jmp_replace(ip, (unsigned long)func);
591	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
592	return 0;
593}
594
595int ftrace_enable_ftrace_graph_caller(void)
596{
597	unsigned long ip = (unsigned long)(&ftrace_graph_call);
598
599	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
600}
601
602int ftrace_disable_ftrace_graph_caller(void)
603{
604	unsigned long ip = (unsigned long)(&ftrace_graph_call);
605
606	return ftrace_mod_jmp(ip, &ftrace_stub);
607}
608#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
609
610/*
611 * Hook the return address and push it in the stack of return addrs
612 * in current thread info.
613 */
614void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
615			   unsigned long frame_pointer)
616{
617	unsigned long return_hooker = (unsigned long)&return_to_handler;
618	int bit;
 
619
620	/*
621	 * When resuming from suspend-to-ram, this function can be indirectly
622	 * called from early CPU startup code while the CPU is in real mode,
623	 * which would fail miserably.  Make sure the stack pointer is a
624	 * virtual address.
625	 *
626	 * This check isn't as accurate as virt_addr_valid(), but it should be
627	 * good enough for this purpose, and it's fast.
628	 */
629	if (unlikely((long)__builtin_frame_address(0) >= 0))
630		return;
631
632	if (unlikely(ftrace_graph_is_dead()))
633		return;
634
635	if (unlikely(atomic_read(&current->tracing_graph_pause)))
636		return;
637
638	bit = ftrace_test_recursion_trylock(ip, *parent);
639	if (bit < 0)
640		return;
641
642	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
643		*parent = return_hooker;
644
645	ftrace_test_recursion_unlock(bit);
646}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647
648#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
649void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
650		       struct ftrace_ops *op, struct ftrace_regs *fregs)
651{
652	struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
653	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
654
655	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
 
656}
657#endif
658
659#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracing support.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes to Ingo Molnar, for suggesting the idea.
  8 * Mathieu Desnoyers, for suggesting postponing the modifications.
  9 * Arjan van de Ven, for keeping me straight, and explaining to me
 10 * the dangers of modifying code on the run.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/ftrace.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/memory.h>
 26#include <linux/vmalloc.h>
 
 
 27
 28#include <trace/syscall.h>
 29
 30#include <asm/set_memory.h>
 31#include <asm/kprobes.h>
 32#include <asm/ftrace.h>
 33#include <asm/nops.h>
 34#include <asm/text-patching.h>
 35
 36#ifdef CONFIG_DYNAMIC_FTRACE
 37
 38static int ftrace_poke_late = 0;
 39
 40int ftrace_arch_code_modify_prepare(void)
 41    __acquires(&text_mutex)
 42{
 43	/*
 44	 * Need to grab text_mutex to prevent a race from module loading
 45	 * and live kernel patching from changing the text permissions while
 46	 * ftrace has it set to "read/write".
 47	 */
 48	mutex_lock(&text_mutex);
 49	ftrace_poke_late = 1;
 50	return 0;
 51}
 52
 53int ftrace_arch_code_modify_post_process(void)
 54    __releases(&text_mutex)
 55{
 56	/*
 57	 * ftrace_make_{call,nop}() may be called during
 58	 * module load, and we need to finish the text_poke_queue()
 59	 * that they do, here.
 60	 */
 61	text_poke_finish();
 62	ftrace_poke_late = 0;
 63	mutex_unlock(&text_mutex);
 64	return 0;
 65}
 66
 67static const char *ftrace_nop_replace(void)
 68{
 69	return ideal_nops[NOP_ATOMIC5];
 70}
 71
 72static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 73{
 
 
 
 
 74	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 75}
 76
 77static int ftrace_verify_code(unsigned long ip, const char *old_code)
 78{
 79	char cur_code[MCOUNT_INSN_SIZE];
 80
 81	/*
 82	 * Note:
 83	 * We are paranoid about modifying text, as if a bug was to happen, it
 84	 * could cause us to read or write to someplace that could cause harm.
 85	 * Carefully read and modify the code with probe_kernel_*(), and make
 86	 * sure what we read is what we expected it to be before modifying it.
 87	 */
 88	/* read the text we want to modify */
 89	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
 90		WARN_ON(1);
 91		return -EFAULT;
 92	}
 93
 94	/* Make sure it is what we expect it to be */
 95	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
 
 96		WARN_ON(1);
 97		return -EINVAL;
 98	}
 99
100	return 0;
101}
102
103/*
104 * Marked __ref because it calls text_poke_early() which is .init.text. That is
105 * ok because that call will happen early, during boot, when .init sections are
106 * still present.
107 */
108static int __ref
109ftrace_modify_code_direct(unsigned long ip, const char *old_code,
110			  const char *new_code)
111{
112	int ret = ftrace_verify_code(ip, old_code);
113	if (ret)
114		return ret;
115
116	/* replace the text with the new text */
117	if (ftrace_poke_late)
118		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
119	else
120		text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
 
 
 
121	return 0;
122}
123
124int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
125{
126	unsigned long ip = rec->ip;
127	const char *new, *old;
128
129	old = ftrace_call_replace(ip, addr);
130	new = ftrace_nop_replace();
131
132	/*
133	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
134	 * is converted to a nop, and will never become MCOUNT_ADDR
135	 * again. This code is either running before SMP (on boot up)
136	 * or before the code will ever be executed (module load).
137	 * We do not want to use the breakpoint version in this case,
138	 * just modify the code directly.
139	 */
140	if (addr == MCOUNT_ADDR)
141		return ftrace_modify_code_direct(ip, old, new);
142
143	/*
144	 * x86 overrides ftrace_replace_code -- this function will never be used
145	 * in this case.
146	 */
147	WARN_ONCE(1, "invalid use of ftrace_make_nop");
148	return -EINVAL;
149}
150
151int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152{
153	unsigned long ip = rec->ip;
154	const char *new, *old;
155
156	old = ftrace_nop_replace();
157	new = ftrace_call_replace(ip, addr);
158
159	/* Should only be called when module is loaded */
160	return ftrace_modify_code_direct(rec->ip, old, new);
161}
162
163/*
164 * Should never be called:
165 *  As it is only called by __ftrace_replace_code() which is called by
166 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
167 *  which is called to turn mcount into nops or nops into function calls
168 *  but not to convert a function from not using regs to one that uses
169 *  regs, which ftrace_modify_call() is for.
170 */
171int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
172				 unsigned long addr)
173{
174	WARN_ON(1);
175	return -EINVAL;
176}
177
178int ftrace_update_ftrace_func(ftrace_func_t func)
179{
180	unsigned long ip;
181	const char *new;
182
183	ip = (unsigned long)(&ftrace_call);
184	new = ftrace_call_replace(ip, (unsigned long)func);
185	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
186
187	ip = (unsigned long)(&ftrace_regs_call);
188	new = ftrace_call_replace(ip, (unsigned long)func);
189	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
190
191	return 0;
192}
193
194void ftrace_replace_code(int enable)
195{
196	struct ftrace_rec_iter *iter;
197	struct dyn_ftrace *rec;
198	const char *new, *old;
199	int ret;
200
201	for_ftrace_rec_iter(iter) {
202		rec = ftrace_rec_iter_record(iter);
203
204		switch (ftrace_test_record(rec, enable)) {
205		case FTRACE_UPDATE_IGNORE:
206		default:
207			continue;
208
209		case FTRACE_UPDATE_MAKE_CALL:
210			old = ftrace_nop_replace();
211			break;
212
213		case FTRACE_UPDATE_MODIFY_CALL:
214		case FTRACE_UPDATE_MAKE_NOP:
215			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
216			break;
217		}
218
219		ret = ftrace_verify_code(rec->ip, old);
220		if (ret) {
 
221			ftrace_bug(ret, rec);
 
222			return;
223		}
224	}
225
226	for_ftrace_rec_iter(iter) {
227		rec = ftrace_rec_iter_record(iter);
228
229		switch (ftrace_test_record(rec, enable)) {
230		case FTRACE_UPDATE_IGNORE:
231		default:
232			continue;
233
234		case FTRACE_UPDATE_MAKE_CALL:
235		case FTRACE_UPDATE_MODIFY_CALL:
236			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
237			break;
238
239		case FTRACE_UPDATE_MAKE_NOP:
240			new = ftrace_nop_replace();
241			break;
242		}
243
244		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
245		ftrace_update_record(rec, enable);
246	}
247	text_poke_finish();
248}
249
250void arch_ftrace_update_code(int command)
251{
252	ftrace_modify_all_code(command);
253}
254
255int __init ftrace_dyn_arch_init(void)
256{
257	return 0;
258}
259
260/* Currently only x86_64 supports dynamic trampolines */
261#ifdef CONFIG_X86_64
262
263#ifdef CONFIG_MODULES
264#include <linux/moduleloader.h>
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
268	return module_alloc(size);
269}
270static inline void tramp_free(void *tramp)
271{
272	module_memfree(tramp);
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278	return NULL;
279}
280static inline void tramp_free(void *tramp) { }
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
284extern void ftrace_regs_caller_end(void);
285extern void ftrace_regs_caller_ret(void);
286extern void ftrace_caller_end(void);
287extern void ftrace_caller_op_ptr(void);
288extern void ftrace_regs_caller_op_ptr(void);
289extern void ftrace_regs_caller_jmp(void);
290
291/* movq function_trace_op(%rip), %rdx */
292/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
293#define OP_REF_SIZE	7
294
295/*
296 * The ftrace_ops is passed to the function callback. Since the
297 * trampoline only services a single ftrace_ops, we can pass in
298 * that ops directly.
299 *
300 * The ftrace_op_code_union is used to create a pointer to the
301 * ftrace_ops that will be passed to the callback function.
302 */
303union ftrace_op_code_union {
304	char code[OP_REF_SIZE];
305	struct {
306		char op[3];
307		int offset;
308	} __attribute__((packed));
309};
310
311#define RET_SIZE		1
 
312
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315{
316	unsigned long start_offset;
317	unsigned long end_offset;
318	unsigned long op_offset;
319	unsigned long call_offset;
320	unsigned long jmp_offset;
321	unsigned long offset;
322	unsigned long npages;
323	unsigned long size;
324	unsigned long retq;
325	unsigned long *ptr;
326	void *trampoline;
327	void *ip;
328	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
329	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
 
330	union ftrace_op_code_union op_ptr;
331	int ret;
332
333	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334		start_offset = (unsigned long)ftrace_regs_caller;
335		end_offset = (unsigned long)ftrace_regs_caller_end;
336		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337		call_offset = (unsigned long)ftrace_regs_call;
338		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339	} else {
340		start_offset = (unsigned long)ftrace_caller;
341		end_offset = (unsigned long)ftrace_caller_end;
342		op_offset = (unsigned long)ftrace_caller_op_ptr;
343		call_offset = (unsigned long)ftrace_call;
344		jmp_offset = 0;
345	}
346
347	size = end_offset - start_offset;
348
349	/*
350	 * Allocate enough size to store the ftrace_caller code,
351	 * the iret , as well as the address of the ftrace_ops this
352	 * trampoline is used for.
353	 */
354	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355	if (!trampoline)
356		return 0;
357
358	*tramp_size = size + RET_SIZE + sizeof(void *);
359	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361	/* Copy ftrace_caller onto the trampoline memory */
362	ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363	if (WARN_ON(ret < 0))
364		goto fail;
365
366	ip = trampoline + size;
367
368	/* The trampoline ends with ret(q) */
369	retq = (unsigned long)ftrace_stub;
370	ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
371	if (WARN_ON(ret < 0))
372		goto fail;
373
374	/* No need to test direct calls on created trampolines */
375	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
376		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
377		ip = trampoline + (jmp_offset - start_offset);
378		if (WARN_ON(*(char *)ip != 0x75))
379			goto fail;
380		ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
381		if (ret < 0)
382			goto fail;
383	}
384
385	/*
386	 * The address of the ftrace_ops that is used for this trampoline
387	 * is stored at the end of the trampoline. This will be used to
388	 * load the third parameter for the callback. Basically, that
389	 * location at the end of the trampoline takes the place of
390	 * the global function_trace_op variable.
391	 */
392
393	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
394	*ptr = (unsigned long)ops;
395
396	op_offset -= start_offset;
397	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
398
399	/* Are we pointing to the reference? */
400	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
401		goto fail;
402
403	/* Load the contents of ptr into the callback parameter */
404	offset = (unsigned long)ptr;
405	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
406
407	op_ptr.offset = offset;
408
409	/* put in the new offset to the ftrace_ops */
410	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
411
412	/* put in the call to the function */
413	mutex_lock(&text_mutex);
414	call_offset -= start_offset;
415	memcpy(trampoline + call_offset,
416	       text_gen_insn(CALL_INSN_OPCODE,
417			     trampoline + call_offset,
418			     ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
 
 
 
 
419	mutex_unlock(&text_mutex);
420
421	/* ALLOC_TRAMP flags lets us know we created it */
422	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
423
424	set_vm_flush_reset_perms(trampoline);
425
426	if (likely(system_state != SYSTEM_BOOTING))
427		set_memory_ro((unsigned long)trampoline, npages);
428	set_memory_x((unsigned long)trampoline, npages);
429	return (unsigned long)trampoline;
430fail:
431	tramp_free(trampoline);
432	return 0;
433}
434
435void set_ftrace_ops_ro(void)
436{
437	struct ftrace_ops *ops;
438	unsigned long start_offset;
439	unsigned long end_offset;
440	unsigned long npages;
441	unsigned long size;
442
443	do_for_each_ftrace_op(ops, ftrace_ops_list) {
444		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
445			continue;
446
447		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
448			start_offset = (unsigned long)ftrace_regs_caller;
449			end_offset = (unsigned long)ftrace_regs_caller_end;
450		} else {
451			start_offset = (unsigned long)ftrace_caller;
452			end_offset = (unsigned long)ftrace_caller_end;
453		}
454		size = end_offset - start_offset;
455		size = size + RET_SIZE + sizeof(void *);
456		npages = DIV_ROUND_UP(size, PAGE_SIZE);
457		set_memory_ro((unsigned long)ops->trampoline, npages);
458	} while_for_each_ftrace_op(ops);
459}
460
461static unsigned long calc_trampoline_call_offset(bool save_regs)
462{
463	unsigned long start_offset;
464	unsigned long call_offset;
465
466	if (save_regs) {
467		start_offset = (unsigned long)ftrace_regs_caller;
468		call_offset = (unsigned long)ftrace_regs_call;
469	} else {
470		start_offset = (unsigned long)ftrace_caller;
471		call_offset = (unsigned long)ftrace_call;
472	}
473
474	return call_offset - start_offset;
475}
476
477void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
478{
479	ftrace_func_t func;
480	unsigned long offset;
481	unsigned long ip;
482	unsigned int size;
483	const char *new;
484
485	if (!ops->trampoline) {
486		ops->trampoline = create_trampoline(ops, &size);
487		if (!ops->trampoline)
488			return;
489		ops->trampoline_size = size;
490		return;
491	}
492
493	/*
494	 * The ftrace_ops caller may set up its own trampoline.
495	 * In such a case, this code must not modify it.
496	 */
497	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
498		return;
499
500	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
501	ip = ops->trampoline + offset;
502	func = ftrace_ops_get_func(ops);
503
504	mutex_lock(&text_mutex);
505	/* Do a safe modify in case the trampoline is executing */
506	new = ftrace_call_replace(ip, (unsigned long)func);
507	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
508	mutex_unlock(&text_mutex);
509}
510
511/* Return the address of the function the trampoline calls */
512static void *addr_from_call(void *ptr)
513{
514	union text_poke_insn call;
515	int ret;
516
517	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
518	if (WARN_ON_ONCE(ret < 0))
519		return NULL;
520
521	/* Make sure this is a call */
522	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
523		pr_warn("Expected E8, got %x\n", call.opcode);
524		return NULL;
525	}
526
527	return ptr + CALL_INSN_SIZE + call.disp;
528}
529
530void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
531			   unsigned long frame_pointer);
532
533/*
534 * If the ops->trampoline was not allocated, then it probably
535 * has a static trampoline func, or is the ftrace caller itself.
536 */
537static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
538{
539	unsigned long offset;
540	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
541	void *ptr;
542
543	if (ops && ops->trampoline) {
544#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
545		/*
546		 * We only know about function graph tracer setting as static
547		 * trampoline.
548		 */
549		if (ops->trampoline == FTRACE_GRAPH_ADDR)
550			return (void *)prepare_ftrace_return;
551#endif
552		return NULL;
553	}
554
555	offset = calc_trampoline_call_offset(save_regs);
556
557	if (save_regs)
558		ptr = (void *)FTRACE_REGS_ADDR + offset;
559	else
560		ptr = (void *)FTRACE_ADDR + offset;
561
562	return addr_from_call(ptr);
563}
564
565void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
566{
567	unsigned long offset;
568
569	/* If we didn't allocate this trampoline, consider it static */
570	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
571		return static_tramp_func(ops, rec);
572
573	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
574	return addr_from_call((void *)ops->trampoline + offset);
575}
576
577void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
578{
579	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
580		return;
581
582	tramp_free((void *)ops->trampoline);
583	ops->trampoline = 0;
584}
585
586#endif /* CONFIG_X86_64 */
587#endif /* CONFIG_DYNAMIC_FTRACE */
588
589#ifdef CONFIG_FUNCTION_GRAPH_TRACER
590
591#ifdef CONFIG_DYNAMIC_FTRACE
592extern void ftrace_graph_call(void);
593
594static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
595{
596	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
597}
598
599static int ftrace_mod_jmp(unsigned long ip, void *func)
600{
601	const char *new;
602
603	new = ftrace_jmp_replace(ip, (unsigned long)func);
604	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
605	return 0;
606}
607
608int ftrace_enable_ftrace_graph_caller(void)
609{
610	unsigned long ip = (unsigned long)(&ftrace_graph_call);
611
612	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
613}
614
615int ftrace_disable_ftrace_graph_caller(void)
616{
617	unsigned long ip = (unsigned long)(&ftrace_graph_call);
618
619	return ftrace_mod_jmp(ip, &ftrace_stub);
620}
621
622#endif /* !CONFIG_DYNAMIC_FTRACE */
623
624/*
625 * Hook the return address and push it in the stack of return addrs
626 * in current thread info.
627 */
628void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
629			   unsigned long frame_pointer)
630{
631	unsigned long return_hooker = (unsigned long)&return_to_handler;
632	unsigned long old;
633	int faulted;
634
635	/*
636	 * When resuming from suspend-to-ram, this function can be indirectly
637	 * called from early CPU startup code while the CPU is in real mode,
638	 * which would fail miserably.  Make sure the stack pointer is a
639	 * virtual address.
640	 *
641	 * This check isn't as accurate as virt_addr_valid(), but it should be
642	 * good enough for this purpose, and it's fast.
643	 */
644	if (unlikely((long)__builtin_frame_address(0) >= 0))
645		return;
646
647	if (unlikely(ftrace_graph_is_dead()))
648		return;
649
650	if (unlikely(atomic_read(&current->tracing_graph_pause)))
651		return;
652
653	/*
654	 * Protect against fault, even if it shouldn't
655	 * happen. This tool is too much intrusive to
656	 * ignore such a protection.
657	 */
658	asm volatile(
659		"1: " _ASM_MOV " (%[parent]), %[old]\n"
660		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
661		"   movl $0, %[faulted]\n"
662		"3:\n"
663
664		".section .fixup, \"ax\"\n"
665		"4: movl $1, %[faulted]\n"
666		"   jmp 3b\n"
667		".previous\n"
668
669		_ASM_EXTABLE(1b, 4b)
670		_ASM_EXTABLE(2b, 4b)
671
672		: [old] "=&r" (old), [faulted] "=r" (faulted)
673		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
674		: "memory"
675	);
676
677	if (unlikely(faulted)) {
678		ftrace_graph_stop();
679		WARN_ON(1);
680		return;
681	}
 
682
683	if (function_graph_enter(old, self_addr, frame_pointer, parent))
684		*parent = old;
685}
 
 
686#endif /* CONFIG_FUNCTION_GRAPH_TRACER */