Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 * Thanks goes to Ingo Molnar, for suggesting the idea.
  7 * Mathieu Desnoyers, for suggesting postponing the modifications.
  8 * Arjan van de Ven, for keeping me straight, and explaining to me
  9 * the dangers of modifying code on the run.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/spinlock.h>
 15#include <linux/hardirq.h>
 16#include <linux/uaccess.h>
 17#include <linux/ftrace.h>
 18#include <linux/percpu.h>
 19#include <linux/sched.h>
 
 20#include <linux/init.h>
 21#include <linux/list.h>
 22#include <linux/module.h>
 
 
 
 23
 24#include <trace/syscall.h>
 25
 26#include <asm/cacheflush.h>
 27#include <asm/kprobes.h>
 28#include <asm/ftrace.h>
 29#include <asm/nops.h>
 
 30
 31#ifdef CONFIG_DYNAMIC_FTRACE
 32
 33int ftrace_arch_code_modify_prepare(void)
 34{
 35	set_kernel_text_rw();
 36	set_all_modules_text_rw();
 37	return 0;
 38}
 39
 40int ftrace_arch_code_modify_post_process(void)
 
 41{
 42	set_all_modules_text_ro();
 43	set_kernel_text_ro();
 44	return 0;
 45}
 46
 47union ftrace_code_union {
 48	char code[MCOUNT_INSN_SIZE];
 49	struct {
 50		char e8;
 51		int offset;
 52	} __attribute__((packed));
 53};
 54
 55static int ftrace_calc_offset(long ip, long addr)
 56{
 57	return (int)(addr - ip);
 58}
 59
 60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 
 61{
 62	static union ftrace_code_union calc;
 63
 64	calc.e8		= 0xe8;
 65	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 66
 67	/*
 68	 * No locking needed, this must be called via kstop_machine
 69	 * which in essence is like running on a uniprocessor machine.
 
 70	 */
 71	return calc.code;
 
 
 72}
 73
 74static inline int
 75within(unsigned long addr, unsigned long start, unsigned long end)
 76{
 77	return addr >= start && addr < end;
 78}
 79
 80static int
 81do_ftrace_mod_code(unsigned long ip, const void *new_code)
 82{
 83	/*
 84	 * On x86_64, kernel text mappings are mapped read-only with
 85	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
 86	 * of the kernel text mapping to modify the kernel text.
 87	 *
 88	 * For 32bit kernels, these mappings are same and we can use
 89	 * kernel identity mapping to modify code.
 90	 */
 91	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 92		ip = (unsigned long)__va(__pa(ip));
 93
 94	return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
 95}
 96
 97static const unsigned char *ftrace_nop_replace(void)
 98{
 99	return ideal_nops[NOP_ATOMIC5];
100}
101
102static int
103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104		   unsigned const char *new_code)
105{
106	unsigned char replaced[MCOUNT_INSN_SIZE];
107
108	/*
109	 * Note: Due to modules and __init, code can
110	 *  disappear and change, we need to protect against faulting
111	 *  as well as code changing. We do this by using the
112	 *  probe_kernel_* functions.
113	 *
114	 * No real locking needed, this code is run through
115	 * kstop_machine, or before SMP starts.
116	 */
117
118	/* read the text we want to modify */
119	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 
120		return -EFAULT;
 
121
122	/* Make sure it is what we expect it to be */
123	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
 
 
124		return -EINVAL;
 
125
126	/* replace the text with the new text */
127	if (do_ftrace_mod_code(ip, new_code))
128		return -EPERM;
129
130	sync_core();
 
 
 
 
 
 
 
 
 
 
 
131
 
 
 
 
 
132	return 0;
133}
134
135int ftrace_make_nop(struct module *mod,
136		    struct dyn_ftrace *rec, unsigned long addr)
137{
138	unsigned const char *new, *old;
139	unsigned long ip = rec->ip;
 
140
141	old = ftrace_call_replace(ip, addr);
142	new = ftrace_nop_replace();
143
144	/*
145	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
146	 * is converted to a nop, and will never become MCOUNT_ADDR
147	 * again. This code is either running before SMP (on boot up)
148	 * or before the code will ever be executed (module load).
149	 * We do not want to use the breakpoint version in this case,
150	 * just modify the code directly.
151	 */
152	if (addr == MCOUNT_ADDR)
153		return ftrace_modify_code_direct(rec->ip, old, new);
154
155	/* Normal cases use add_brk_on_nop */
 
 
 
156	WARN_ONCE(1, "invalid use of ftrace_make_nop");
157	return -EINVAL;
158}
159
160int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
161{
162	unsigned const char *new, *old;
163	unsigned long ip = rec->ip;
 
164
165	old = ftrace_nop_replace();
166	new = ftrace_call_replace(ip, addr);
167
168	/* Should only be called when module is loaded */
169	return ftrace_modify_code_direct(rec->ip, old, new);
170}
171
172/*
173 * The modifying_ftrace_code is used to tell the breakpoint
174 * handler to call ftrace_int3_handler(). If it fails to
175 * call this handler for a breakpoint added by ftrace, then
176 * the kernel may crash.
177 *
178 * As atomic_writes on x86 do not need a barrier, we do not
179 * need to add smp_mb()s for this to work. It is also considered
180 * that we can not read the modifying_ftrace_code before
181 * executing the breakpoint. That would be quite remarkable if
182 * it could do that. Here's the flow that is required:
183 *
184 *   CPU-0                          CPU-1
185 *
186 * atomic_inc(mfc);
187 * write int3s
188 *				<trap-int3> // implicit (r)mb
189 *				if (atomic_read(mfc))
190 *					call ftrace_int3_handler()
191 *
192 * Then when we are finished:
193 *
194 * atomic_dec(mfc);
195 *
196 * If we hit a breakpoint that was not set by ftrace, it does not
197 * matter if ftrace_int3_handler() is called or not. It will
198 * simply be ignored. But it is crucial that a ftrace nop/caller
199 * breakpoint is handled. No other user should ever place a
200 * breakpoint on an ftrace nop/caller location. It must only
201 * be done by this code.
202 */
203atomic_t modifying_ftrace_code __read_mostly;
204
205static int
206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207		   unsigned const char *new_code);
 
208
209int ftrace_update_ftrace_func(ftrace_func_t func)
210{
211	unsigned long ip = (unsigned long)(&ftrace_call);
212	unsigned char old[MCOUNT_INSN_SIZE], *new;
213	int ret;
214
215	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
216	new = ftrace_call_replace(ip, (unsigned long)func);
 
217
218	/* See comment above by declaration of modifying_ftrace_code */
219	atomic_inc(&modifying_ftrace_code);
220
221	ret = ftrace_modify_code(ip, old, new);
222
223	atomic_dec(&modifying_ftrace_code);
224
225	return ret;
226}
227
228/*
229 * A breakpoint was added to the code address we are about to
230 * modify, and this is the handle that will just skip over it.
231 * We are either changing a nop into a trace call, or a trace
232 * call to a nop. While the change is taking place, we treat
233 * it just like it was a nop.
234 */
235int ftrace_int3_handler(struct pt_regs *regs)
236{
237	if (WARN_ON_ONCE(!regs))
238		return 0;
 
 
239
240	if (!ftrace_location(regs->ip - 1))
241		return 0;
242
243	regs->ip += MCOUNT_INSN_SIZE - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245	return 1;
246}
247
248static int ftrace_write(unsigned long ip, const char *val, int size)
249{
250	/*
251	 * On x86_64, kernel text mappings are mapped read-only with
252	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
253	 * of the kernel text mapping to modify the kernel text.
254	 *
255	 * For 32bit kernels, these mappings are same and we can use
256	 * kernel identity mapping to modify code.
257	 */
258	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
259		ip = (unsigned long)__va(__pa(ip));
 
 
260
261	return probe_kernel_write((void *)ip, val, size);
 
 
 
262}
263
264static int add_break(unsigned long ip, const char *old)
265{
266	unsigned char replaced[MCOUNT_INSN_SIZE];
267	unsigned char brk = BREAKPOINT_INSTRUCTION;
268
269	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
270		return -EFAULT;
271
272	/* Make sure it is what we expect it to be */
273	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
274		return -EINVAL;
275
276	if (ftrace_write(ip, &brk, 1))
277		return -EPERM;
278
279	return 0;
280}
281
282static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
283{
284	unsigned const char *old;
285	unsigned long ip = rec->ip;
286
287	old = ftrace_call_replace(ip, addr);
288
289	return add_break(rec->ip, old);
 
 
 
290}
291
292
293static int add_brk_on_nop(struct dyn_ftrace *rec)
294{
295	unsigned const char *old;
296
297	old = ftrace_nop_replace();
298
299	return add_break(rec->ip, old);
300}
301
302static int add_breakpoints(struct dyn_ftrace *rec, int enable)
 
303{
304	unsigned long ftrace_addr;
305	int ret;
306
307	ret = ftrace_test_record(rec, enable);
308
309	ftrace_addr = (unsigned long)FTRACE_ADDR;
310
311	switch (ret) {
312	case FTRACE_UPDATE_IGNORE:
313		return 0;
314
315	case FTRACE_UPDATE_MAKE_CALL:
316		/* converting nop to call */
317		return add_brk_on_nop(rec);
318
319	case FTRACE_UPDATE_MAKE_NOP:
320		/* converting a call to a nop */
321		return add_brk_on_call(rec, ftrace_addr);
322	}
323	return 0;
324}
 
 
 
 
 
 
 
 
 
 
 
 
 
325
326/*
327 * On error, we need to remove breakpoints. This needs to
328 * be done caefully. If the address does not currently have a
329 * breakpoint, we know we are done. Otherwise, we look at the
330 * remaining 4 bytes of the instruction. If it matches a nop
331 * we replace the breakpoint with the nop. Otherwise we replace
332 * it with the call instruction.
333 */
334static int remove_breakpoint(struct dyn_ftrace *rec)
335{
336	unsigned char ins[MCOUNT_INSN_SIZE];
337	unsigned char brk = BREAKPOINT_INSTRUCTION;
338	const unsigned char *nop;
339	unsigned long ftrace_addr;
340	unsigned long ip = rec->ip;
341
342	/* If we fail the read, just give up */
343	if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
344		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
346	/* If this does not have a breakpoint, we are done */
347	if (ins[0] != brk)
348		return -1;
 
 
 
 
 
 
 
 
 
 
349
350	nop = ftrace_nop_replace();
351
352	/*
353	 * If the last 4 bytes of the instruction do not match
354	 * a nop, then we assume that this is a call to ftrace_addr.
 
355	 */
356	if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
357		/*
358		 * For extra paranoidism, we check if the breakpoint is on
359		 * a call that would actually jump to the ftrace_addr.
360		 * If not, don't touch the breakpoint, we make just create
361		 * a disaster.
362		 */
363		ftrace_addr = (unsigned long)FTRACE_ADDR;
364		nop = ftrace_call_replace(ip, ftrace_addr);
365
366		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
367			return -EINVAL;
368	}
369
370	return probe_kernel_write((void *)ip, &nop[0], 1);
371}
372
373static int add_update_code(unsigned long ip, unsigned const char *new)
374{
375	/* skip breakpoint */
376	ip++;
377	new++;
378	if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
379		return -EPERM;
380	return 0;
381}
 
 
 
 
 
 
 
 
 
 
 
 
382
383static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
384{
385	unsigned long ip = rec->ip;
386	unsigned const char *new;
 
 
 
387
388	new = ftrace_call_replace(ip, addr);
389	return add_update_code(ip, new);
390}
391
392static int add_update_nop(struct dyn_ftrace *rec)
393{
394	unsigned long ip = rec->ip;
395	unsigned const char *new;
396
397	new = ftrace_nop_replace();
398	return add_update_code(ip, new);
399}
400
401static int add_update(struct dyn_ftrace *rec, int enable)
402{
403	unsigned long ftrace_addr;
404	int ret;
405
406	ret = ftrace_test_record(rec, enable);
407
408	ftrace_addr = (unsigned long)FTRACE_ADDR;
 
409
410	switch (ret) {
411	case FTRACE_UPDATE_IGNORE:
412		return 0;
 
 
 
 
 
 
 
 
 
413
414	case FTRACE_UPDATE_MAKE_CALL:
415		/* converting nop to call */
416		return add_update_call(rec, ftrace_addr);
417
418	case FTRACE_UPDATE_MAKE_NOP:
419		/* converting a call to a nop */
420		return add_update_nop(rec);
421	}
422
 
 
 
 
423	return 0;
424}
425
426static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
427{
428	unsigned long ip = rec->ip;
429	unsigned const char *new;
 
 
 
430
431	new = ftrace_call_replace(ip, addr);
 
 
432
433	if (ftrace_write(ip, new, 1))
434		return -EPERM;
435
436	return 0;
 
 
 
 
 
 
 
 
437}
438
439static int finish_update_nop(struct dyn_ftrace *rec)
440{
441	unsigned long ip = rec->ip;
442	unsigned const char *new;
443
444	new = ftrace_nop_replace();
 
 
 
 
 
 
445
446	if (ftrace_write(ip, new, 1))
447		return -EPERM;
448	return 0;
449}
450
451static int finish_update(struct dyn_ftrace *rec, int enable)
452{
453	unsigned long ftrace_addr;
454	int ret;
 
 
 
455
456	ret = ftrace_update_record(rec, enable);
457
458	ftrace_addr = (unsigned long)FTRACE_ADDR;
459
460	switch (ret) {
461	case FTRACE_UPDATE_IGNORE:
462		return 0;
463
464	case FTRACE_UPDATE_MAKE_CALL:
465		/* converting nop to call */
466		return finish_update_call(rec, ftrace_addr);
467
468	case FTRACE_UPDATE_MAKE_NOP:
469		/* converting a call to a nop */
470		return finish_update_nop(rec);
471	}
472
473	return 0;
474}
475
476static void do_sync_core(void *data)
477{
478	sync_core();
479}
480
481static void run_sync(void)
482{
483	int enable_irqs = irqs_disabled();
484
485	/* We may be called with interrupts disbled (on bootup). */
486	if (enable_irqs)
487		local_irq_enable();
488	on_each_cpu(do_sync_core, NULL, 1);
489	if (enable_irqs)
490		local_irq_disable();
491}
492
493void ftrace_replace_code(int enable)
 
494{
495	struct ftrace_rec_iter *iter;
496	struct dyn_ftrace *rec;
497	const char *report = "adding breakpoints";
498	int count = 0;
499	int ret;
500
501	for_ftrace_rec_iter(iter) {
502		rec = ftrace_rec_iter_record(iter);
 
503
504		ret = add_breakpoints(rec, enable);
505		if (ret)
506			goto remove_breakpoints;
507		count++;
508	}
509
510	run_sync();
511
512	report = "updating code";
513
514	for_ftrace_rec_iter(iter) {
515		rec = ftrace_rec_iter_record(iter);
516
517		ret = add_update(rec, enable);
518		if (ret)
519			goto remove_breakpoints;
520	}
521
522	run_sync();
523
524	report = "removing breakpoints";
525
526	for_ftrace_rec_iter(iter) {
527		rec = ftrace_rec_iter_record(iter);
528
529		ret = finish_update(rec, enable);
530		if (ret)
531			goto remove_breakpoints;
532	}
533
534	run_sync();
535
536	return;
537
538 remove_breakpoints:
539	ftrace_bug(ret, rec ? rec->ip : 0);
540	printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
541	for_ftrace_rec_iter(iter) {
542		rec = ftrace_rec_iter_record(iter);
543		remove_breakpoint(rec);
544	}
545}
546
547static int
548ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
549		   unsigned const char *new_code)
 
 
550{
551	int ret;
552
553	ret = add_break(ip, old_code);
554	if (ret)
555		goto out;
556
557	run_sync();
 
 
 
 
 
 
 
 
 
558
559	ret = add_update_code(ip, new_code);
560	if (ret)
561		goto fail_update;
562
563	run_sync();
 
 
 
564
565	ret = ftrace_write(ip, new_code, 1);
566	if (ret) {
567		ret = -EPERM;
568		goto out;
569	}
570	run_sync();
571 out:
572	return ret;
573
574 fail_update:
575	probe_kernel_write((void *)ip, &old_code[0], 1);
576	goto out;
577}
578
579void arch_ftrace_update_code(int command)
580{
581	/* See comment above by declaration of modifying_ftrace_code */
582	atomic_inc(&modifying_ftrace_code);
583
584	ftrace_modify_all_code(command);
 
 
585
586	atomic_dec(&modifying_ftrace_code);
 
587}
588
589int __init ftrace_dyn_arch_init(void *data)
590{
591	/* The return code is retured via data */
592	*(unsigned long *)data = 0;
593
594	return 0;
 
595}
596#endif
 
 
597
598#ifdef CONFIG_FUNCTION_GRAPH_TRACER
599
600#ifdef CONFIG_DYNAMIC_FTRACE
601extern void ftrace_graph_call(void);
602
603static int ftrace_mod_jmp(unsigned long ip,
604			  int old_offset, int new_offset)
605{
606	unsigned char code[MCOUNT_INSN_SIZE];
607
608	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
609		return -EFAULT;
610
611	if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
612		return -EINVAL;
613
614	*(int *)(&code[1]) = new_offset;
615
616	if (do_ftrace_mod_code(ip, &code))
617		return -EPERM;
 
618
 
 
619	return 0;
620}
621
622int ftrace_enable_ftrace_graph_caller(void)
623{
624	unsigned long ip = (unsigned long)(&ftrace_graph_call);
625	int old_offset, new_offset;
626
627	old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
628	new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
629
630	return ftrace_mod_jmp(ip, old_offset, new_offset);
631}
632
633int ftrace_disable_ftrace_graph_caller(void)
634{
635	unsigned long ip = (unsigned long)(&ftrace_graph_call);
636	int old_offset, new_offset;
637
638	old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
639	new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
640
641	return ftrace_mod_jmp(ip, old_offset, new_offset);
642}
643
644#endif /* !CONFIG_DYNAMIC_FTRACE */
645
646/*
647 * Hook the return address and push it in the stack of return addrs
648 * in current thread info.
649 */
650void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
651			   unsigned long frame_pointer)
652{
653	unsigned long old;
654	int faulted;
655	struct ftrace_graph_ent trace;
656	unsigned long return_hooker = (unsigned long)
657				&return_to_handler;
658
659	if (unlikely(atomic_read(&current->tracing_graph_pause)))
660		return;
661
662	/*
663	 * Protect against fault, even if it shouldn't
664	 * happen. This tool is too much intrusive to
665	 * ignore such a protection.
666	 */
667	asm volatile(
668		"1: " _ASM_MOV " (%[parent]), %[old]\n"
669		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
670		"   movl $0, %[faulted]\n"
671		"3:\n"
672
673		".section .fixup, \"ax\"\n"
674		"4: movl $1, %[faulted]\n"
675		"   jmp 3b\n"
676		".previous\n"
677
678		_ASM_EXTABLE(1b, 4b)
679		_ASM_EXTABLE(2b, 4b)
680
681		: [old] "=&r" (old), [faulted] "=r" (faulted)
682		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
683		: "memory"
684	);
685
686	if (unlikely(faulted)) {
687		ftrace_graph_stop();
688		WARN_ON(1);
689		return;
690	}
691
692	trace.func = self_addr;
693	trace.depth = current->curr_ret_stack + 1;
694
695	/* Only trace if the calling function expects to */
696	if (!ftrace_graph_entry(&trace)) {
697		*parent = old;
698		return;
699	}
700
701	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
702		    frame_pointer) == -EBUSY) {
703		*parent = old;
704		return;
705	}
 
 
 
 
706}
 
 
 
 
 
 
 
 
 
 
 
 
707#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracing support.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes to Ingo Molnar, for suggesting the idea.
  8 * Mathieu Desnoyers, for suggesting postponing the modifications.
  9 * Arjan van de Ven, for keeping me straight, and explaining to me
 10 * the dangers of modifying code on the run.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/ftrace.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/memory.h>
 26#include <linux/vmalloc.h>
 27#include <linux/set_memory.h>
 28
 29#include <trace/syscall.h>
 30
 
 31#include <asm/kprobes.h>
 32#include <asm/ftrace.h>
 33#include <asm/nops.h>
 34#include <asm/text-patching.h>
 35
 36#ifdef CONFIG_DYNAMIC_FTRACE
 37
 38static int ftrace_poke_late = 0;
 
 
 
 
 
 39
 40void ftrace_arch_code_modify_prepare(void)
 41    __acquires(&text_mutex)
 42{
 43	/*
 44	 * Need to grab text_mutex to prevent a race from module loading
 45	 * and live kernel patching from changing the text permissions while
 46	 * ftrace has it set to "read/write".
 47	 */
 48	mutex_lock(&text_mutex);
 49	ftrace_poke_late = 1;
 
 
 
 
 
 
 
 
 
 50}
 51
 52void ftrace_arch_code_modify_post_process(void)
 53    __releases(&text_mutex)
 54{
 
 
 
 
 
 55	/*
 56	 * ftrace_make_{call,nop}() may be called during
 57	 * module load, and we need to finish the text_poke_queue()
 58	 * that they do, here.
 59	 */
 60	text_poke_finish();
 61	ftrace_poke_late = 0;
 62	mutex_unlock(&text_mutex);
 63}
 64
 65static const char *ftrace_nop_replace(void)
 
 66{
 67	return x86_nops[5];
 68}
 69
 70static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 
 71{
 72	/*
 73	 * No need to translate into a callthunk. The trampoline does
 74	 * the depth accounting itself.
 
 
 
 
 75	 */
 76	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 
 
 
 
 
 
 
 
 77}
 78
 79static int ftrace_verify_code(unsigned long ip, const char *old_code)
 
 
 80{
 81	char cur_code[MCOUNT_INSN_SIZE];
 82
 83	/*
 84	 * Note:
 85	 * We are paranoid about modifying text, as if a bug was to happen, it
 86	 * could cause us to read or write to someplace that could cause harm.
 87	 * Carefully read and modify the code with probe_kernel_*(), and make
 88	 * sure what we read is what we expected it to be before modifying it.
 
 
 89	 */
 
 90	/* read the text we want to modify */
 91	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
 92		WARN_ON(1);
 93		return -EFAULT;
 94	}
 95
 96	/* Make sure it is what we expect it to be */
 97	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
 98		ftrace_expected = old_code;
 99		WARN_ON(1);
100		return -EINVAL;
101	}
102
103	return 0;
104}
 
105
106/*
107 * Marked __ref because it calls text_poke_early() which is .init.text. That is
108 * ok because that call will happen early, during boot, when .init sections are
109 * still present.
110 */
111static int __ref
112ftrace_modify_code_direct(unsigned long ip, const char *old_code,
113			  const char *new_code)
114{
115	int ret = ftrace_verify_code(ip, old_code);
116	if (ret)
117		return ret;
118
119	/* replace the text with the new text */
120	if (ftrace_poke_late)
121		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
122	else
123		text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
124	return 0;
125}
126
127int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
 
128{
 
129	unsigned long ip = rec->ip;
130	const char *new, *old;
131
132	old = ftrace_call_replace(ip, addr);
133	new = ftrace_nop_replace();
134
135	/*
136	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
137	 * is converted to a nop, and will never become MCOUNT_ADDR
138	 * again. This code is either running before SMP (on boot up)
139	 * or before the code will ever be executed (module load).
140	 * We do not want to use the breakpoint version in this case,
141	 * just modify the code directly.
142	 */
143	if (addr == MCOUNT_ADDR)
144		return ftrace_modify_code_direct(ip, old, new);
145
146	/*
147	 * x86 overrides ftrace_replace_code -- this function will never be used
148	 * in this case.
149	 */
150	WARN_ONCE(1, "invalid use of ftrace_make_nop");
151	return -EINVAL;
152}
153
154int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
155{
 
156	unsigned long ip = rec->ip;
157	const char *new, *old;
158
159	old = ftrace_nop_replace();
160	new = ftrace_call_replace(ip, addr);
161
162	/* Should only be called when module is loaded */
163	return ftrace_modify_code_direct(rec->ip, old, new);
164}
165
166/*
167 * Should never be called:
168 *  As it is only called by __ftrace_replace_code() which is called by
169 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
170 *  which is called to turn mcount into nops or nops into function calls
171 *  but not to convert a function from not using regs to one that uses
172 *  regs, which ftrace_modify_call() is for.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173 */
174int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
175				 unsigned long addr)
176{
177	WARN_ON(1);
178	return -EINVAL;
179}
180
181int ftrace_update_ftrace_func(ftrace_func_t func)
182{
183	unsigned long ip;
184	const char *new;
 
185
186	ip = (unsigned long)(&ftrace_call);
187	new = ftrace_call_replace(ip, (unsigned long)func);
188	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
189
190	ip = (unsigned long)(&ftrace_regs_call);
191	new = ftrace_call_replace(ip, (unsigned long)func);
192	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
 
 
 
193
194	return 0;
195}
196
197void ftrace_replace_code(int enable)
 
 
 
 
 
 
 
198{
199	struct ftrace_rec_iter *iter;
200	struct dyn_ftrace *rec;
201	const char *new, *old;
202	int ret;
203
204	for_ftrace_rec_iter(iter) {
205		rec = ftrace_rec_iter_record(iter);
206
207		switch (ftrace_test_record(rec, enable)) {
208		case FTRACE_UPDATE_IGNORE:
209		default:
210			continue;
211
212		case FTRACE_UPDATE_MAKE_CALL:
213			old = ftrace_nop_replace();
214			break;
215
216		case FTRACE_UPDATE_MODIFY_CALL:
217		case FTRACE_UPDATE_MAKE_NOP:
218			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
219			break;
220		}
221
222		ret = ftrace_verify_code(rec->ip, old);
223		if (ret) {
224			ftrace_expected = old;
225			ftrace_bug(ret, rec);
226			ftrace_expected = NULL;
227			return;
228		}
229	}
230
231	for_ftrace_rec_iter(iter) {
232		rec = ftrace_rec_iter_record(iter);
233
234		switch (ftrace_test_record(rec, enable)) {
235		case FTRACE_UPDATE_IGNORE:
236		default:
237			continue;
238
239		case FTRACE_UPDATE_MAKE_CALL:
240		case FTRACE_UPDATE_MODIFY_CALL:
241			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
242			break;
243
244		case FTRACE_UPDATE_MAKE_NOP:
245			new = ftrace_nop_replace();
246			break;
247		}
248
249		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
250		ftrace_update_record(rec, enable);
251	}
252	text_poke_finish();
253}
254
255void arch_ftrace_update_code(int command)
256{
257	ftrace_modify_all_code(command);
 
 
 
 
 
 
 
 
 
 
 
 
 
258}
259
260/* Currently only x86_64 supports dynamic trampolines */
261#ifdef CONFIG_X86_64
 
 
262
263#ifdef CONFIG_MODULES
264#include <linux/moduleloader.h>
265/* Module allocation simplifies allocating memory for code */
266static inline void *alloc_tramp(unsigned long size)
267{
268	return module_alloc(size);
269}
270static inline void tramp_free(void *tramp)
 
 
271{
272	module_memfree(tramp);
 
 
 
 
273}
274#else
275/* Trampolines can only be created if modules are supported */
276static inline void *alloc_tramp(unsigned long size)
277{
278	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279}
280static inline void tramp_free(void *tramp) { }
281#endif
282
283/* Defined as markers to the end of the ftrace default trampolines */
284extern void ftrace_regs_caller_end(void);
285extern void ftrace_caller_end(void);
286extern void ftrace_caller_op_ptr(void);
287extern void ftrace_regs_caller_op_ptr(void);
288extern void ftrace_regs_caller_jmp(void);
289
290/* movq function_trace_op(%rip), %rdx */
291/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
292#define OP_REF_SIZE	7
293
294/*
295 * The ftrace_ops is passed to the function callback. Since the
296 * trampoline only services a single ftrace_ops, we can pass in
297 * that ops directly.
298 *
299 * The ftrace_op_code_union is used to create a pointer to the
300 * ftrace_ops that will be passed to the callback function.
301 */
302union ftrace_op_code_union {
303	char code[OP_REF_SIZE];
304	struct {
305		char op[3];
306		int offset;
307	} __attribute__((packed));
308};
309
310#define RET_SIZE \
311	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
312
313static unsigned long
314create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
315{
316	unsigned long start_offset;
317	unsigned long end_offset;
318	unsigned long op_offset;
319	unsigned long call_offset;
320	unsigned long jmp_offset;
321	unsigned long offset;
322	unsigned long npages;
323	unsigned long size;
324	unsigned long *ptr;
325	void *trampoline;
326	void *ip, *dest;
327	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
328	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
329	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
330	union ftrace_op_code_union op_ptr;
331	int ret;
332
333	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
334		start_offset = (unsigned long)ftrace_regs_caller;
335		end_offset = (unsigned long)ftrace_regs_caller_end;
336		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
337		call_offset = (unsigned long)ftrace_regs_call;
338		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
339	} else {
340		start_offset = (unsigned long)ftrace_caller;
341		end_offset = (unsigned long)ftrace_caller_end;
342		op_offset = (unsigned long)ftrace_caller_op_ptr;
343		call_offset = (unsigned long)ftrace_call;
344		jmp_offset = 0;
345	}
346
347	size = end_offset - start_offset;
348
349	/*
350	 * Allocate enough size to store the ftrace_caller code,
351	 * the iret , as well as the address of the ftrace_ops this
352	 * trampoline is used for.
353	 */
354	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
355	if (!trampoline)
356		return 0;
 
 
 
 
 
 
 
 
 
 
357
358	*tramp_size = size + RET_SIZE + sizeof(void *);
359	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
360
361	/* Copy ftrace_caller onto the trampoline memory */
362	ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
363	if (WARN_ON(ret < 0))
364		goto fail;
365
366	ip = trampoline + size;
367	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
368		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
369	else
370		memcpy(ip, retq, sizeof(retq));
371
372	/* No need to test direct calls on created trampolines */
373	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
374		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
375		ip = trampoline + (jmp_offset - start_offset);
376		if (WARN_ON(*(char *)ip != 0x75))
377			goto fail;
378		ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
379		if (ret < 0)
380			goto fail;
381	}
382
383	/*
384	 * The address of the ftrace_ops that is used for this trampoline
385	 * is stored at the end of the trampoline. This will be used to
386	 * load the third parameter for the callback. Basically, that
387	 * location at the end of the trampoline takes the place of
388	 * the global function_trace_op variable.
389	 */
390
391	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
392	*ptr = (unsigned long)ops;
 
393
394	op_offset -= start_offset;
395	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
 
 
396
397	/* Are we pointing to the reference? */
398	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
399		goto fail;
400
401	/* Load the contents of ptr into the callback parameter */
402	offset = (unsigned long)ptr;
403	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
 
404
405	op_ptr.offset = offset;
406
407	/* put in the new offset to the ftrace_ops */
408	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
409
410	/* put in the call to the function */
411	mutex_lock(&text_mutex);
412	call_offset -= start_offset;
413	/*
414	 * No need to translate into a callthunk. The trampoline does
415	 * the depth accounting before the call already.
416	 */
417	dest = ftrace_ops_get_func(ops);
418	memcpy(trampoline + call_offset,
419	       text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
420	       CALL_INSN_SIZE);
421	mutex_unlock(&text_mutex);
422
423	/* ALLOC_TRAMP flags lets us know we created it */
424	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
 
 
 
 
 
 
425
426	set_memory_rox((unsigned long)trampoline, npages);
427	return (unsigned long)trampoline;
428fail:
429	tramp_free(trampoline);
430	return 0;
431}
432
433void set_ftrace_ops_ro(void)
434{
435	struct ftrace_ops *ops;
436	unsigned long start_offset;
437	unsigned long end_offset;
438	unsigned long npages;
439	unsigned long size;
440
441	do_for_each_ftrace_op(ops, ftrace_ops_list) {
442		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
443			continue;
444
445		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
446			start_offset = (unsigned long)ftrace_regs_caller;
447			end_offset = (unsigned long)ftrace_regs_caller_end;
448		} else {
449			start_offset = (unsigned long)ftrace_caller;
450			end_offset = (unsigned long)ftrace_caller_end;
451		}
452		size = end_offset - start_offset;
453		size = size + RET_SIZE + sizeof(void *);
454		npages = DIV_ROUND_UP(size, PAGE_SIZE);
455		set_memory_ro((unsigned long)ops->trampoline, npages);
456	} while_for_each_ftrace_op(ops);
457}
458
459static unsigned long calc_trampoline_call_offset(bool save_regs)
460{
461	unsigned long start_offset;
462	unsigned long call_offset;
463
464	if (save_regs) {
465		start_offset = (unsigned long)ftrace_regs_caller;
466		call_offset = (unsigned long)ftrace_regs_call;
467	} else {
468		start_offset = (unsigned long)ftrace_caller;
469		call_offset = (unsigned long)ftrace_call;
470	}
471
472	return call_offset - start_offset;
 
 
473}
474
475void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
476{
477	ftrace_func_t func;
478	unsigned long offset;
479	unsigned long ip;
480	unsigned int size;
481	const char *new;
482
483	if (!ops->trampoline) {
484		ops->trampoline = create_trampoline(ops, &size);
485		if (!ops->trampoline)
486			return;
487		ops->trampoline_size = size;
488		return;
 
 
 
 
 
 
 
 
 
489	}
490
491	/*
492	 * The ftrace_ops caller may set up its own trampoline.
493	 * In such a case, this code must not modify it.
494	 */
495	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
496		return;
 
497
498	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
499	ip = ops->trampoline + offset;
500	func = ftrace_ops_get_func(ops);
501
502	mutex_lock(&text_mutex);
503	/* Do a safe modify in case the trampoline is executing */
504	new = ftrace_call_replace(ip, (unsigned long)func);
505	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
506	mutex_unlock(&text_mutex);
 
507}
508
509/* Return the address of the function the trampoline calls */
510static void *addr_from_call(void *ptr)
511{
512	union text_poke_insn call;
 
 
 
513	int ret;
514
515	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
516	if (WARN_ON_ONCE(ret < 0))
517		return NULL;
518
519	/* Make sure this is a call */
520	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
521		pr_warn("Expected E8, got %x\n", call.opcode);
522		return NULL;
523	}
524
525	return ptr + CALL_INSN_SIZE + call.disp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526}
527
528/*
529 * If the ops->trampoline was not allocated, then it probably
530 * has a static trampoline func, or is the ftrace caller itself.
531 */
532static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
533{
534	unsigned long offset;
535	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
536	void *ptr;
537
538	if (ops && ops->trampoline) {
539#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
540	defined(CONFIG_FUNCTION_GRAPH_TRACER)
541		/*
542		 * We only know about function graph tracer setting as static
543		 * trampoline.
544		 */
545		if (ops->trampoline == FTRACE_GRAPH_ADDR)
546			return (void *)prepare_ftrace_return;
547#endif
548		return NULL;
549	}
550
551	offset = calc_trampoline_call_offset(save_regs);
 
 
552
553	if (save_regs)
554		ptr = (void *)FTRACE_REGS_ADDR + offset;
555	else
556		ptr = (void *)FTRACE_ADDR + offset;
557
558	return addr_from_call(ptr);
 
 
 
 
 
 
 
 
 
 
 
559}
560
561void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
562{
563	unsigned long offset;
 
564
565	/* If we didn't allocate this trampoline, consider it static */
566	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
567		return static_tramp_func(ops, rec);
568
569	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
570	return addr_from_call((void *)ops->trampoline + offset);
571}
572
573void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
574{
575	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
576		return;
577
578	tramp_free((void *)ops->trampoline);
579	ops->trampoline = 0;
580}
581
582#endif /* CONFIG_X86_64 */
583#endif /* CONFIG_DYNAMIC_FTRACE */
584
585#ifdef CONFIG_FUNCTION_GRAPH_TRACER
586
587#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
588extern void ftrace_graph_call(void);
589static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 
 
590{
591	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
592}
 
 
 
 
 
 
 
593
594static int ftrace_mod_jmp(unsigned long ip, void *func)
595{
596	const char *new;
597
598	new = ftrace_jmp_replace(ip, (unsigned long)func);
599	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
600	return 0;
601}
602
603int ftrace_enable_ftrace_graph_caller(void)
604{
605	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 
606
607	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
 
 
 
608}
609
610int ftrace_disable_ftrace_graph_caller(void)
611{
612	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 
 
 
 
613
614	return ftrace_mod_jmp(ip, &ftrace_stub);
615}
616#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
617
618/*
619 * Hook the return address and push it in the stack of return addrs
620 * in current thread info.
621 */
622void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
623			   unsigned long frame_pointer)
624{
625	unsigned long return_hooker = (unsigned long)&return_to_handler;
626	int bit;
 
 
 
 
 
 
627
628	/*
629	 * When resuming from suspend-to-ram, this function can be indirectly
630	 * called from early CPU startup code while the CPU is in real mode,
631	 * which would fail miserably.  Make sure the stack pointer is a
632	 * virtual address.
633	 *
634	 * This check isn't as accurate as virt_addr_valid(), but it should be
635	 * good enough for this purpose, and it's fast.
636	 */
637	if (unlikely((long)__builtin_frame_address(0) >= 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638		return;
 
639
640	if (unlikely(ftrace_graph_is_dead()))
641		return;
642
643	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 
 
644		return;
 
645
646	bit = ftrace_test_recursion_trylock(ip, *parent);
647	if (bit < 0)
 
648		return;
649
650	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
651		*parent = return_hooker;
652
653	ftrace_test_recursion_unlock(bit);
654}
655
656#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
657void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
658		       struct ftrace_ops *op, struct ftrace_regs *fregs)
659{
660	struct pt_regs *regs = &fregs->regs;
661	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
662
663	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
664}
665#endif
666
667#endif /* CONFIG_FUNCTION_GRAPH_TRACER */