Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 * Thanks goes to Ingo Molnar, for suggesting the idea.
  7 * Mathieu Desnoyers, for suggesting postponing the modifications.
  8 * Arjan van de Ven, for keeping me straight, and explaining to me
  9 * the dangers of modifying code on the run.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/spinlock.h>
 15#include <linux/hardirq.h>
 16#include <linux/uaccess.h>
 17#include <linux/ftrace.h>
 18#include <linux/percpu.h>
 19#include <linux/sched.h>
 20#include <linux/init.h>
 21#include <linux/list.h>
 22#include <linux/module.h>
 23
 24#include <trace/syscall.h>
 25
 26#include <asm/cacheflush.h>
 27#include <asm/kprobes.h>
 28#include <asm/ftrace.h>
 29#include <asm/nops.h>
 30
 31#ifdef CONFIG_DYNAMIC_FTRACE
 32
 33int ftrace_arch_code_modify_prepare(void)
 34{
 35	set_kernel_text_rw();
 36	set_all_modules_text_rw();
 37	return 0;
 38}
 39
 40int ftrace_arch_code_modify_post_process(void)
 41{
 42	set_all_modules_text_ro();
 43	set_kernel_text_ro();
 44	return 0;
 45}
 46
 47union ftrace_code_union {
 48	char code[MCOUNT_INSN_SIZE];
 49	struct {
 50		char e8;
 51		int offset;
 52	} __attribute__((packed));
 53};
 54
 55static int ftrace_calc_offset(long ip, long addr)
 56{
 57	return (int)(addr - ip);
 58}
 59
 60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 61{
 62	static union ftrace_code_union calc;
 63
 64	calc.e8		= 0xe8;
 65	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 66
 67	/*
 68	 * No locking needed, this must be called via kstop_machine
 69	 * which in essence is like running on a uniprocessor machine.
 70	 */
 71	return calc.code;
 72}
 73
 74static inline int
 75within(unsigned long addr, unsigned long start, unsigned long end)
 76{
 77	return addr >= start && addr < end;
 78}
 79
 80static int
 81do_ftrace_mod_code(unsigned long ip, const void *new_code)
 82{
 83	/*
 84	 * On x86_64, kernel text mappings are mapped read-only with
 85	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
 86	 * of the kernel text mapping to modify the kernel text.
 87	 *
 88	 * For 32bit kernels, these mappings are same and we can use
 89	 * kernel identity mapping to modify code.
 90	 */
 91	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 92		ip = (unsigned long)__va(__pa(ip));
 93
 94	return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
 95}
 96
 97static const unsigned char *ftrace_nop_replace(void)
 98{
 99	return ideal_nops[NOP_ATOMIC5];
100}
101
102static int
103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104		   unsigned const char *new_code)
105{
106	unsigned char replaced[MCOUNT_INSN_SIZE];
107
108	/*
109	 * Note: Due to modules and __init, code can
110	 *  disappear and change, we need to protect against faulting
111	 *  as well as code changing. We do this by using the
112	 *  probe_kernel_* functions.
113	 *
114	 * No real locking needed, this code is run through
115	 * kstop_machine, or before SMP starts.
116	 */
117
118	/* read the text we want to modify */
119	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
120		return -EFAULT;
121
122	/* Make sure it is what we expect it to be */
123	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
124		return -EINVAL;
125
 
 
126	/* replace the text with the new text */
127	if (do_ftrace_mod_code(ip, new_code))
128		return -EPERM;
129
130	sync_core();
131
132	return 0;
133}
134
135int ftrace_make_nop(struct module *mod,
136		    struct dyn_ftrace *rec, unsigned long addr)
137{
138	unsigned const char *new, *old;
139	unsigned long ip = rec->ip;
140
141	old = ftrace_call_replace(ip, addr);
142	new = ftrace_nop_replace();
143
144	/*
145	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
146	 * is converted to a nop, and will never become MCOUNT_ADDR
147	 * again. This code is either running before SMP (on boot up)
148	 * or before the code will ever be executed (module load).
149	 * We do not want to use the breakpoint version in this case,
150	 * just modify the code directly.
151	 */
152	if (addr == MCOUNT_ADDR)
153		return ftrace_modify_code_direct(rec->ip, old, new);
154
155	/* Normal cases use add_brk_on_nop */
156	WARN_ONCE(1, "invalid use of ftrace_make_nop");
157	return -EINVAL;
158}
159
160int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
161{
162	unsigned const char *new, *old;
163	unsigned long ip = rec->ip;
164
165	old = ftrace_nop_replace();
166	new = ftrace_call_replace(ip, addr);
167
168	/* Should only be called when module is loaded */
169	return ftrace_modify_code_direct(rec->ip, old, new);
170}
171
172/*
173 * The modifying_ftrace_code is used to tell the breakpoint
174 * handler to call ftrace_int3_handler(). If it fails to
175 * call this handler for a breakpoint added by ftrace, then
176 * the kernel may crash.
177 *
178 * As atomic_writes on x86 do not need a barrier, we do not
179 * need to add smp_mb()s for this to work. It is also considered
180 * that we can not read the modifying_ftrace_code before
181 * executing the breakpoint. That would be quite remarkable if
182 * it could do that. Here's the flow that is required:
183 *
184 *   CPU-0                          CPU-1
185 *
186 * atomic_inc(mfc);
187 * write int3s
188 *				<trap-int3> // implicit (r)mb
189 *				if (atomic_read(mfc))
190 *					call ftrace_int3_handler()
191 *
192 * Then when we are finished:
193 *
194 * atomic_dec(mfc);
195 *
196 * If we hit a breakpoint that was not set by ftrace, it does not
197 * matter if ftrace_int3_handler() is called or not. It will
198 * simply be ignored. But it is crucial that a ftrace nop/caller
199 * breakpoint is handled. No other user should ever place a
200 * breakpoint on an ftrace nop/caller location. It must only
201 * be done by this code.
202 */
203atomic_t modifying_ftrace_code __read_mostly;
204
205static int
206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207		   unsigned const char *new_code);
208
209int ftrace_update_ftrace_func(ftrace_func_t func)
 
 
 
 
 
 
 
 
 
210{
211	unsigned long ip = (unsigned long)(&ftrace_call);
212	unsigned char old[MCOUNT_INSN_SIZE], *new;
 
 
 
 
 
 
 
213	int ret;
214
215	memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
216	new = ftrace_call_replace(ip, (unsigned long)func);
 
 
 
217
218	/* See comment above by declaration of modifying_ftrace_code */
219	atomic_inc(&modifying_ftrace_code);
220
221	ret = ftrace_modify_code(ip, old, new);
222
223	atomic_dec(&modifying_ftrace_code);
224
225	return ret;
226}
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228/*
229 * A breakpoint was added to the code address we are about to
230 * modify, and this is the handle that will just skip over it.
231 * We are either changing a nop into a trace call, or a trace
232 * call to a nop. While the change is taking place, we treat
233 * it just like it was a nop.
234 */
235int ftrace_int3_handler(struct pt_regs *regs)
236{
 
 
237	if (WARN_ON_ONCE(!regs))
238		return 0;
239
240	if (!ftrace_location(regs->ip - 1))
 
241		return 0;
242
243	regs->ip += MCOUNT_INSN_SIZE - 1;
244
245	return 1;
246}
247
248static int ftrace_write(unsigned long ip, const char *val, int size)
249{
250	/*
251	 * On x86_64, kernel text mappings are mapped read-only with
252	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
253	 * of the kernel text mapping to modify the kernel text.
254	 *
255	 * For 32bit kernels, these mappings are same and we can use
256	 * kernel identity mapping to modify code.
257	 */
258	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
259		ip = (unsigned long)__va(__pa(ip));
 
 
 
260
261	return probe_kernel_write((void *)ip, val, size);
262}
263
264static int add_break(unsigned long ip, const char *old)
265{
266	unsigned char replaced[MCOUNT_INSN_SIZE];
267	unsigned char brk = BREAKPOINT_INSTRUCTION;
268
269	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
270		return -EFAULT;
271
272	/* Make sure it is what we expect it to be */
273	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
274		return -EINVAL;
275
276	if (ftrace_write(ip, &brk, 1))
277		return -EPERM;
278
279	return 0;
280}
281
282static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
283{
284	unsigned const char *old;
285	unsigned long ip = rec->ip;
286
287	old = ftrace_call_replace(ip, addr);
288
289	return add_break(rec->ip, old);
290}
291
292
293static int add_brk_on_nop(struct dyn_ftrace *rec)
294{
295	unsigned const char *old;
296
297	old = ftrace_nop_replace();
298
299	return add_break(rec->ip, old);
300}
301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302static int add_breakpoints(struct dyn_ftrace *rec, int enable)
303{
304	unsigned long ftrace_addr;
305	int ret;
306
307	ret = ftrace_test_record(rec, enable);
308
309	ftrace_addr = (unsigned long)FTRACE_ADDR;
310
311	switch (ret) {
312	case FTRACE_UPDATE_IGNORE:
313		return 0;
314
315	case FTRACE_UPDATE_MAKE_CALL:
316		/* converting nop to call */
317		return add_brk_on_nop(rec);
318
 
 
 
 
319	case FTRACE_UPDATE_MAKE_NOP:
320		/* converting a call to a nop */
321		return add_brk_on_call(rec, ftrace_addr);
322	}
323	return 0;
324}
325
326/*
327 * On error, we need to remove breakpoints. This needs to
328 * be done caefully. If the address does not currently have a
329 * breakpoint, we know we are done. Otherwise, we look at the
330 * remaining 4 bytes of the instruction. If it matches a nop
331 * we replace the breakpoint with the nop. Otherwise we replace
332 * it with the call instruction.
333 */
334static int remove_breakpoint(struct dyn_ftrace *rec)
335{
336	unsigned char ins[MCOUNT_INSN_SIZE];
337	unsigned char brk = BREAKPOINT_INSTRUCTION;
338	const unsigned char *nop;
339	unsigned long ftrace_addr;
340	unsigned long ip = rec->ip;
341
342	/* If we fail the read, just give up */
343	if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
344		return -EFAULT;
345
346	/* If this does not have a breakpoint, we are done */
347	if (ins[0] != brk)
348		return -1;
349
350	nop = ftrace_nop_replace();
351
352	/*
353	 * If the last 4 bytes of the instruction do not match
354	 * a nop, then we assume that this is a call to ftrace_addr.
355	 */
356	if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
357		/*
358		 * For extra paranoidism, we check if the breakpoint is on
359		 * a call that would actually jump to the ftrace_addr.
360		 * If not, don't touch the breakpoint, we make just create
361		 * a disaster.
362		 */
363		ftrace_addr = (unsigned long)FTRACE_ADDR;
 
 
 
 
 
 
 
364		nop = ftrace_call_replace(ip, ftrace_addr);
365
366		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
367			return -EINVAL;
368	}
369
370	return probe_kernel_write((void *)ip, &nop[0], 1);
 
371}
372
373static int add_update_code(unsigned long ip, unsigned const char *new)
374{
375	/* skip breakpoint */
376	ip++;
377	new++;
378	if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
379		return -EPERM;
380	return 0;
381}
382
383static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
384{
385	unsigned long ip = rec->ip;
386	unsigned const char *new;
387
388	new = ftrace_call_replace(ip, addr);
389	return add_update_code(ip, new);
390}
391
392static int add_update_nop(struct dyn_ftrace *rec)
393{
394	unsigned long ip = rec->ip;
395	unsigned const char *new;
396
397	new = ftrace_nop_replace();
398	return add_update_code(ip, new);
399}
400
401static int add_update(struct dyn_ftrace *rec, int enable)
402{
403	unsigned long ftrace_addr;
404	int ret;
405
406	ret = ftrace_test_record(rec, enable);
407
408	ftrace_addr = (unsigned long)FTRACE_ADDR;
409
410	switch (ret) {
411	case FTRACE_UPDATE_IGNORE:
412		return 0;
413
 
 
414	case FTRACE_UPDATE_MAKE_CALL:
415		/* converting nop to call */
416		return add_update_call(rec, ftrace_addr);
417
418	case FTRACE_UPDATE_MAKE_NOP:
419		/* converting a call to a nop */
420		return add_update_nop(rec);
421	}
422
423	return 0;
424}
425
426static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
427{
428	unsigned long ip = rec->ip;
429	unsigned const char *new;
430
431	new = ftrace_call_replace(ip, addr);
432
433	if (ftrace_write(ip, new, 1))
434		return -EPERM;
435
436	return 0;
437}
438
439static int finish_update_nop(struct dyn_ftrace *rec)
440{
441	unsigned long ip = rec->ip;
442	unsigned const char *new;
443
444	new = ftrace_nop_replace();
445
446	if (ftrace_write(ip, new, 1))
447		return -EPERM;
448	return 0;
449}
450
451static int finish_update(struct dyn_ftrace *rec, int enable)
452{
453	unsigned long ftrace_addr;
454	int ret;
455
456	ret = ftrace_update_record(rec, enable);
457
458	ftrace_addr = (unsigned long)FTRACE_ADDR;
459
460	switch (ret) {
461	case FTRACE_UPDATE_IGNORE:
462		return 0;
463
 
 
464	case FTRACE_UPDATE_MAKE_CALL:
465		/* converting nop to call */
466		return finish_update_call(rec, ftrace_addr);
467
468	case FTRACE_UPDATE_MAKE_NOP:
469		/* converting a call to a nop */
470		return finish_update_nop(rec);
471	}
472
473	return 0;
474}
475
476static void do_sync_core(void *data)
477{
478	sync_core();
479}
480
481static void run_sync(void)
482{
483	int enable_irqs = irqs_disabled();
484
485	/* We may be called with interrupts disbled (on bootup). */
486	if (enable_irqs)
487		local_irq_enable();
488	on_each_cpu(do_sync_core, NULL, 1);
489	if (enable_irqs)
490		local_irq_disable();
491}
492
493void ftrace_replace_code(int enable)
494{
495	struct ftrace_rec_iter *iter;
496	struct dyn_ftrace *rec;
497	const char *report = "adding breakpoints";
498	int count = 0;
499	int ret;
500
501	for_ftrace_rec_iter(iter) {
502		rec = ftrace_rec_iter_record(iter);
503
504		ret = add_breakpoints(rec, enable);
505		if (ret)
506			goto remove_breakpoints;
507		count++;
508	}
509
510	run_sync();
511
512	report = "updating code";
513
514	for_ftrace_rec_iter(iter) {
515		rec = ftrace_rec_iter_record(iter);
516
517		ret = add_update(rec, enable);
518		if (ret)
519			goto remove_breakpoints;
520	}
521
522	run_sync();
523
524	report = "removing breakpoints";
525
526	for_ftrace_rec_iter(iter) {
527		rec = ftrace_rec_iter_record(iter);
528
529		ret = finish_update(rec, enable);
530		if (ret)
531			goto remove_breakpoints;
532	}
533
534	run_sync();
535
536	return;
537
538 remove_breakpoints:
539	ftrace_bug(ret, rec ? rec->ip : 0);
540	printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
541	for_ftrace_rec_iter(iter) {
542		rec = ftrace_rec_iter_record(iter);
543		remove_breakpoint(rec);
 
 
 
 
 
544	}
 
545}
546
547static int
548ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
549		   unsigned const char *new_code)
550{
551	int ret;
552
553	ret = add_break(ip, old_code);
554	if (ret)
555		goto out;
556
557	run_sync();
558
559	ret = add_update_code(ip, new_code);
560	if (ret)
561		goto fail_update;
562
563	run_sync();
564
565	ret = ftrace_write(ip, new_code, 1);
566	if (ret) {
567		ret = -EPERM;
568		goto out;
569	}
570	run_sync();
571 out:
 
572	return ret;
573
574 fail_update:
575	probe_kernel_write((void *)ip, &old_code[0], 1);
 
 
576	goto out;
577}
578
579void arch_ftrace_update_code(int command)
580{
581	/* See comment above by declaration of modifying_ftrace_code */
582	atomic_inc(&modifying_ftrace_code);
583
584	ftrace_modify_all_code(command);
585
586	atomic_dec(&modifying_ftrace_code);
587}
588
589int __init ftrace_dyn_arch_init(void *data)
590{
591	/* The return code is retured via data */
592	*(unsigned long *)data = 0;
593
594	return 0;
595}
596#endif
597
598#ifdef CONFIG_FUNCTION_GRAPH_TRACER
599
600#ifdef CONFIG_DYNAMIC_FTRACE
601extern void ftrace_graph_call(void);
602
603static int ftrace_mod_jmp(unsigned long ip,
604			  int old_offset, int new_offset)
605{
606	unsigned char code[MCOUNT_INSN_SIZE];
607
608	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
609		return -EFAULT;
 
610
611	if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
612		return -EINVAL;
 
 
 
613
614	*(int *)(&code[1]) = new_offset;
 
 
615
616	if (do_ftrace_mod_code(ip, &code))
617		return -EPERM;
618
619	return 0;
620}
621
622int ftrace_enable_ftrace_graph_caller(void)
623{
624	unsigned long ip = (unsigned long)(&ftrace_graph_call);
625	int old_offset, new_offset;
626
627	old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
628	new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
629
630	return ftrace_mod_jmp(ip, old_offset, new_offset);
631}
632
633int ftrace_disable_ftrace_graph_caller(void)
634{
635	unsigned long ip = (unsigned long)(&ftrace_graph_call);
636	int old_offset, new_offset;
637
638	old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
639	new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
640
641	return ftrace_mod_jmp(ip, old_offset, new_offset);
642}
643
644#endif /* !CONFIG_DYNAMIC_FTRACE */
645
646/*
647 * Hook the return address and push it in the stack of return addrs
648 * in current thread info.
649 */
650void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
651			   unsigned long frame_pointer)
652{
653	unsigned long old;
654	int faulted;
655	struct ftrace_graph_ent trace;
656	unsigned long return_hooker = (unsigned long)
657				&return_to_handler;
658
659	if (unlikely(atomic_read(&current->tracing_graph_pause)))
660		return;
661
662	/*
663	 * Protect against fault, even if it shouldn't
664	 * happen. This tool is too much intrusive to
665	 * ignore such a protection.
666	 */
667	asm volatile(
668		"1: " _ASM_MOV " (%[parent]), %[old]\n"
669		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
670		"   movl $0, %[faulted]\n"
671		"3:\n"
672
673		".section .fixup, \"ax\"\n"
674		"4: movl $1, %[faulted]\n"
675		"   jmp 3b\n"
676		".previous\n"
677
678		_ASM_EXTABLE(1b, 4b)
679		_ASM_EXTABLE(2b, 4b)
680
681		: [old] "=&r" (old), [faulted] "=r" (faulted)
682		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
683		: "memory"
684	);
685
686	if (unlikely(faulted)) {
687		ftrace_graph_stop();
688		WARN_ON(1);
689		return;
690	}
691
692	trace.func = self_addr;
693	trace.depth = current->curr_ret_stack + 1;
694
695	/* Only trace if the calling function expects to */
696	if (!ftrace_graph_entry(&trace)) {
697		*parent = old;
698		return;
699	}
700
701	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
702		    frame_pointer) == -EBUSY) {
703		*parent = old;
704		return;
705	}
706}
707#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v3.15
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 * Thanks goes to Ingo Molnar, for suggesting the idea.
  7 * Mathieu Desnoyers, for suggesting postponing the modifications.
  8 * Arjan van de Ven, for keeping me straight, and explaining to me
  9 * the dangers of modifying code on the run.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/spinlock.h>
 15#include <linux/hardirq.h>
 16#include <linux/uaccess.h>
 17#include <linux/ftrace.h>
 18#include <linux/percpu.h>
 19#include <linux/sched.h>
 20#include <linux/init.h>
 21#include <linux/list.h>
 22#include <linux/module.h>
 23
 24#include <trace/syscall.h>
 25
 26#include <asm/cacheflush.h>
 27#include <asm/kprobes.h>
 28#include <asm/ftrace.h>
 29#include <asm/nops.h>
 30
 31#ifdef CONFIG_DYNAMIC_FTRACE
 32
 33int ftrace_arch_code_modify_prepare(void)
 34{
 35	set_kernel_text_rw();
 36	set_all_modules_text_rw();
 37	return 0;
 38}
 39
 40int ftrace_arch_code_modify_post_process(void)
 41{
 42	set_all_modules_text_ro();
 43	set_kernel_text_ro();
 44	return 0;
 45}
 46
 47union ftrace_code_union {
 48	char code[MCOUNT_INSN_SIZE];
 49	struct {
 50		char e8;
 51		int offset;
 52	} __attribute__((packed));
 53};
 54
 55static int ftrace_calc_offset(long ip, long addr)
 56{
 57	return (int)(addr - ip);
 58}
 59
 60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 61{
 62	static union ftrace_code_union calc;
 63
 64	calc.e8		= 0xe8;
 65	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 66
 67	/*
 68	 * No locking needed, this must be called via kstop_machine
 69	 * which in essence is like running on a uniprocessor machine.
 70	 */
 71	return calc.code;
 72}
 73
 74static inline int
 75within(unsigned long addr, unsigned long start, unsigned long end)
 76{
 77	return addr >= start && addr < end;
 78}
 79
 80static unsigned long text_ip_addr(unsigned long ip)
 
 81{
 82	/*
 83	 * On x86_64, kernel text mappings are mapped read-only with
 84	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
 85	 * of the kernel text mapping to modify the kernel text.
 86	 *
 87	 * For 32bit kernels, these mappings are same and we can use
 88	 * kernel identity mapping to modify code.
 89	 */
 90	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 91		ip = (unsigned long)__va(__pa_symbol(ip));
 92
 93	return ip;
 94}
 95
 96static const unsigned char *ftrace_nop_replace(void)
 97{
 98	return ideal_nops[NOP_ATOMIC5];
 99}
100
101static int
102ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
103		   unsigned const char *new_code)
104{
105	unsigned char replaced[MCOUNT_INSN_SIZE];
106
107	/*
108	 * Note: Due to modules and __init, code can
109	 *  disappear and change, we need to protect against faulting
110	 *  as well as code changing. We do this by using the
111	 *  probe_kernel_* functions.
112	 *
113	 * No real locking needed, this code is run through
114	 * kstop_machine, or before SMP starts.
115	 */
116
117	/* read the text we want to modify */
118	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
119		return -EFAULT;
120
121	/* Make sure it is what we expect it to be */
122	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
123		return -EINVAL;
124
125	ip = text_ip_addr(ip);
126
127	/* replace the text with the new text */
128	if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
129		return -EPERM;
130
131	sync_core();
132
133	return 0;
134}
135
136int ftrace_make_nop(struct module *mod,
137		    struct dyn_ftrace *rec, unsigned long addr)
138{
139	unsigned const char *new, *old;
140	unsigned long ip = rec->ip;
141
142	old = ftrace_call_replace(ip, addr);
143	new = ftrace_nop_replace();
144
145	/*
146	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
147	 * is converted to a nop, and will never become MCOUNT_ADDR
148	 * again. This code is either running before SMP (on boot up)
149	 * or before the code will ever be executed (module load).
150	 * We do not want to use the breakpoint version in this case,
151	 * just modify the code directly.
152	 */
153	if (addr == MCOUNT_ADDR)
154		return ftrace_modify_code_direct(rec->ip, old, new);
155
156	/* Normal cases use add_brk_on_nop */
157	WARN_ONCE(1, "invalid use of ftrace_make_nop");
158	return -EINVAL;
159}
160
161int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
162{
163	unsigned const char *new, *old;
164	unsigned long ip = rec->ip;
165
166	old = ftrace_nop_replace();
167	new = ftrace_call_replace(ip, addr);
168
169	/* Should only be called when module is loaded */
170	return ftrace_modify_code_direct(rec->ip, old, new);
171}
172
173/*
174 * The modifying_ftrace_code is used to tell the breakpoint
175 * handler to call ftrace_int3_handler(). If it fails to
176 * call this handler for a breakpoint added by ftrace, then
177 * the kernel may crash.
178 *
179 * As atomic_writes on x86 do not need a barrier, we do not
180 * need to add smp_mb()s for this to work. It is also considered
181 * that we can not read the modifying_ftrace_code before
182 * executing the breakpoint. That would be quite remarkable if
183 * it could do that. Here's the flow that is required:
184 *
185 *   CPU-0                          CPU-1
186 *
187 * atomic_inc(mfc);
188 * write int3s
189 *				<trap-int3> // implicit (r)mb
190 *				if (atomic_read(mfc))
191 *					call ftrace_int3_handler()
192 *
193 * Then when we are finished:
194 *
195 * atomic_dec(mfc);
196 *
197 * If we hit a breakpoint that was not set by ftrace, it does not
198 * matter if ftrace_int3_handler() is called or not. It will
199 * simply be ignored. But it is crucial that a ftrace nop/caller
200 * breakpoint is handled. No other user should ever place a
201 * breakpoint on an ftrace nop/caller location. It must only
202 * be done by this code.
203 */
204atomic_t modifying_ftrace_code __read_mostly;
205
206static int
207ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
208		   unsigned const char *new_code);
209
210/*
211 * Should never be called:
212 *  As it is only called by __ftrace_replace_code() which is called by
213 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
214 *  which is called to turn mcount into nops or nops into function calls
215 *  but not to convert a function from not using regs to one that uses
216 *  regs, which ftrace_modify_call() is for.
217 */
218int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
219				 unsigned long addr)
220{
221	WARN_ON(1);
222	return -EINVAL;
223}
224
225static unsigned long ftrace_update_func;
226
227static int update_ftrace_func(unsigned long ip, void *new)
228{
229	unsigned char old[MCOUNT_INSN_SIZE];
230	int ret;
231
232	memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
233
234	ftrace_update_func = ip;
235	/* Make sure the breakpoints see the ftrace_update_func update */
236	smp_wmb();
237
238	/* See comment above by declaration of modifying_ftrace_code */
239	atomic_inc(&modifying_ftrace_code);
240
241	ret = ftrace_modify_code(ip, old, new);
242
243	atomic_dec(&modifying_ftrace_code);
244
245	return ret;
246}
247
248int ftrace_update_ftrace_func(ftrace_func_t func)
249{
250	unsigned long ip = (unsigned long)(&ftrace_call);
251	unsigned char *new;
252	int ret;
253
254	new = ftrace_call_replace(ip, (unsigned long)func);
255	ret = update_ftrace_func(ip, new);
256
257	/* Also update the regs callback function */
258	if (!ret) {
259		ip = (unsigned long)(&ftrace_regs_call);
260		new = ftrace_call_replace(ip, (unsigned long)func);
261		ret = update_ftrace_func(ip, new);
262	}
263
264	return ret;
265}
266
267static int is_ftrace_caller(unsigned long ip)
268{
269	if (ip == ftrace_update_func)
270		return 1;
271
272	return 0;
273}
274
275/*
276 * A breakpoint was added to the code address we are about to
277 * modify, and this is the handle that will just skip over it.
278 * We are either changing a nop into a trace call, or a trace
279 * call to a nop. While the change is taking place, we treat
280 * it just like it was a nop.
281 */
282int ftrace_int3_handler(struct pt_regs *regs)
283{
284	unsigned long ip;
285
286	if (WARN_ON_ONCE(!regs))
287		return 0;
288
289	ip = regs->ip - 1;
290	if (!ftrace_location(ip) && !is_ftrace_caller(ip))
291		return 0;
292
293	regs->ip += MCOUNT_INSN_SIZE - 1;
294
295	return 1;
296}
297
298static int ftrace_write(unsigned long ip, const char *val, int size)
299{
300	/*
301	 * On x86_64, kernel text mappings are mapped read-only with
302	 * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
303	 * of the kernel text mapping to modify the kernel text.
304	 *
305	 * For 32bit kernels, these mappings are same and we can use
306	 * kernel identity mapping to modify code.
307	 */
308	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
309		ip = (unsigned long)__va(__pa_symbol(ip));
310
311	if (probe_kernel_write((void *)ip, val, size))
312		return -EPERM;
313
314	return 0;
315}
316
317static int add_break(unsigned long ip, const char *old)
318{
319	unsigned char replaced[MCOUNT_INSN_SIZE];
320	unsigned char brk = BREAKPOINT_INSTRUCTION;
321
322	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
323		return -EFAULT;
324
325	/* Make sure it is what we expect it to be */
326	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
327		return -EINVAL;
328
329	return ftrace_write(ip, &brk, 1);
 
 
 
330}
331
332static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
333{
334	unsigned const char *old;
335	unsigned long ip = rec->ip;
336
337	old = ftrace_call_replace(ip, addr);
338
339	return add_break(rec->ip, old);
340}
341
342
343static int add_brk_on_nop(struct dyn_ftrace *rec)
344{
345	unsigned const char *old;
346
347	old = ftrace_nop_replace();
348
349	return add_break(rec->ip, old);
350}
351
352/*
353 * If the record has the FTRACE_FL_REGS set, that means that it
354 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
355 * is not not set, then it wants to convert to the normal callback.
356 */
357static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
358{
359	if (rec->flags & FTRACE_FL_REGS)
360		return (unsigned long)FTRACE_REGS_ADDR;
361	else
362		return (unsigned long)FTRACE_ADDR;
363}
364
365/*
366 * The FTRACE_FL_REGS_EN is set when the record already points to
367 * a function that saves all the regs. Basically the '_EN' version
368 * represents the current state of the function.
369 */
370static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
371{
372	if (rec->flags & FTRACE_FL_REGS_EN)
373		return (unsigned long)FTRACE_REGS_ADDR;
374	else
375		return (unsigned long)FTRACE_ADDR;
376}
377
378static int add_breakpoints(struct dyn_ftrace *rec, int enable)
379{
380	unsigned long ftrace_addr;
381	int ret;
382
383	ret = ftrace_test_record(rec, enable);
384
385	ftrace_addr = get_ftrace_addr(rec);
386
387	switch (ret) {
388	case FTRACE_UPDATE_IGNORE:
389		return 0;
390
391	case FTRACE_UPDATE_MAKE_CALL:
392		/* converting nop to call */
393		return add_brk_on_nop(rec);
394
395	case FTRACE_UPDATE_MODIFY_CALL_REGS:
396	case FTRACE_UPDATE_MODIFY_CALL:
397		ftrace_addr = get_ftrace_old_addr(rec);
398		/* fall through */
399	case FTRACE_UPDATE_MAKE_NOP:
400		/* converting a call to a nop */
401		return add_brk_on_call(rec, ftrace_addr);
402	}
403	return 0;
404}
405
406/*
407 * On error, we need to remove breakpoints. This needs to
408 * be done caefully. If the address does not currently have a
409 * breakpoint, we know we are done. Otherwise, we look at the
410 * remaining 4 bytes of the instruction. If it matches a nop
411 * we replace the breakpoint with the nop. Otherwise we replace
412 * it with the call instruction.
413 */
414static int remove_breakpoint(struct dyn_ftrace *rec)
415{
416	unsigned char ins[MCOUNT_INSN_SIZE];
417	unsigned char brk = BREAKPOINT_INSTRUCTION;
418	const unsigned char *nop;
419	unsigned long ftrace_addr;
420	unsigned long ip = rec->ip;
421
422	/* If we fail the read, just give up */
423	if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
424		return -EFAULT;
425
426	/* If this does not have a breakpoint, we are done */
427	if (ins[0] != brk)
428		return 0;
429
430	nop = ftrace_nop_replace();
431
432	/*
433	 * If the last 4 bytes of the instruction do not match
434	 * a nop, then we assume that this is a call to ftrace_addr.
435	 */
436	if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
437		/*
438		 * For extra paranoidism, we check if the breakpoint is on
439		 * a call that would actually jump to the ftrace_addr.
440		 * If not, don't touch the breakpoint, we make just create
441		 * a disaster.
442		 */
443		ftrace_addr = get_ftrace_addr(rec);
444		nop = ftrace_call_replace(ip, ftrace_addr);
445
446		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
447			goto update;
448
449		/* Check both ftrace_addr and ftrace_old_addr */
450		ftrace_addr = get_ftrace_old_addr(rec);
451		nop = ftrace_call_replace(ip, ftrace_addr);
452
453		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
454			return -EINVAL;
455	}
456
457 update:
458	return ftrace_write(ip, nop, 1);
459}
460
461static int add_update_code(unsigned long ip, unsigned const char *new)
462{
463	/* skip breakpoint */
464	ip++;
465	new++;
466	return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
 
 
467}
468
469static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
470{
471	unsigned long ip = rec->ip;
472	unsigned const char *new;
473
474	new = ftrace_call_replace(ip, addr);
475	return add_update_code(ip, new);
476}
477
478static int add_update_nop(struct dyn_ftrace *rec)
479{
480	unsigned long ip = rec->ip;
481	unsigned const char *new;
482
483	new = ftrace_nop_replace();
484	return add_update_code(ip, new);
485}
486
487static int add_update(struct dyn_ftrace *rec, int enable)
488{
489	unsigned long ftrace_addr;
490	int ret;
491
492	ret = ftrace_test_record(rec, enable);
493
494	ftrace_addr  = get_ftrace_addr(rec);
495
496	switch (ret) {
497	case FTRACE_UPDATE_IGNORE:
498		return 0;
499
500	case FTRACE_UPDATE_MODIFY_CALL_REGS:
501	case FTRACE_UPDATE_MODIFY_CALL:
502	case FTRACE_UPDATE_MAKE_CALL:
503		/* converting nop to call */
504		return add_update_call(rec, ftrace_addr);
505
506	case FTRACE_UPDATE_MAKE_NOP:
507		/* converting a call to a nop */
508		return add_update_nop(rec);
509	}
510
511	return 0;
512}
513
514static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
515{
516	unsigned long ip = rec->ip;
517	unsigned const char *new;
518
519	new = ftrace_call_replace(ip, addr);
520
521	return ftrace_write(ip, new, 1);
 
 
 
522}
523
524static int finish_update_nop(struct dyn_ftrace *rec)
525{
526	unsigned long ip = rec->ip;
527	unsigned const char *new;
528
529	new = ftrace_nop_replace();
530
531	return ftrace_write(ip, new, 1);
 
 
532}
533
534static int finish_update(struct dyn_ftrace *rec, int enable)
535{
536	unsigned long ftrace_addr;
537	int ret;
538
539	ret = ftrace_update_record(rec, enable);
540
541	ftrace_addr = get_ftrace_addr(rec);
542
543	switch (ret) {
544	case FTRACE_UPDATE_IGNORE:
545		return 0;
546
547	case FTRACE_UPDATE_MODIFY_CALL_REGS:
548	case FTRACE_UPDATE_MODIFY_CALL:
549	case FTRACE_UPDATE_MAKE_CALL:
550		/* converting nop to call */
551		return finish_update_call(rec, ftrace_addr);
552
553	case FTRACE_UPDATE_MAKE_NOP:
554		/* converting a call to a nop */
555		return finish_update_nop(rec);
556	}
557
558	return 0;
559}
560
561static void do_sync_core(void *data)
562{
563	sync_core();
564}
565
566static void run_sync(void)
567{
568	int enable_irqs = irqs_disabled();
569
570	/* We may be called with interrupts disbled (on bootup). */
571	if (enable_irqs)
572		local_irq_enable();
573	on_each_cpu(do_sync_core, NULL, 1);
574	if (enable_irqs)
575		local_irq_disable();
576}
577
578void ftrace_replace_code(int enable)
579{
580	struct ftrace_rec_iter *iter;
581	struct dyn_ftrace *rec;
582	const char *report = "adding breakpoints";
583	int count = 0;
584	int ret;
585
586	for_ftrace_rec_iter(iter) {
587		rec = ftrace_rec_iter_record(iter);
588
589		ret = add_breakpoints(rec, enable);
590		if (ret)
591			goto remove_breakpoints;
592		count++;
593	}
594
595	run_sync();
596
597	report = "updating code";
598
599	for_ftrace_rec_iter(iter) {
600		rec = ftrace_rec_iter_record(iter);
601
602		ret = add_update(rec, enable);
603		if (ret)
604			goto remove_breakpoints;
605	}
606
607	run_sync();
608
609	report = "removing breakpoints";
610
611	for_ftrace_rec_iter(iter) {
612		rec = ftrace_rec_iter_record(iter);
613
614		ret = finish_update(rec, enable);
615		if (ret)
616			goto remove_breakpoints;
617	}
618
619	run_sync();
620
621	return;
622
623 remove_breakpoints:
624	ftrace_bug(ret, rec ? rec->ip : 0);
625	printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
626	for_ftrace_rec_iter(iter) {
627		rec = ftrace_rec_iter_record(iter);
628		/*
629		 * Breakpoints are handled only when this function is in
630		 * progress. The system could not work with them.
631		 */
632		if (remove_breakpoint(rec))
633			BUG();
634	}
635	run_sync();
636}
637
638static int
639ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
640		   unsigned const char *new_code)
641{
642	int ret;
643
644	ret = add_break(ip, old_code);
645	if (ret)
646		goto out;
647
648	run_sync();
649
650	ret = add_update_code(ip, new_code);
651	if (ret)
652		goto fail_update;
653
654	run_sync();
655
656	ret = ftrace_write(ip, new_code, 1);
657	/*
658	 * The breakpoint is handled only when this function is in progress.
659	 * The system could not work if we could not remove it.
660	 */
661	BUG_ON(ret);
662 out:
663	run_sync();
664	return ret;
665
666 fail_update:
667	/* Also here the system could not work with the breakpoint */
668	if (ftrace_write(ip, old_code, 1))
669		BUG();
670	goto out;
671}
672
673void arch_ftrace_update_code(int command)
674{
675	/* See comment above by declaration of modifying_ftrace_code */
676	atomic_inc(&modifying_ftrace_code);
677
678	ftrace_modify_all_code(command);
679
680	atomic_dec(&modifying_ftrace_code);
681}
682
683int __init ftrace_dyn_arch_init(void)
684{
 
 
 
685	return 0;
686}
687#endif
688
689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
690
691#ifdef CONFIG_DYNAMIC_FTRACE
692extern void ftrace_graph_call(void);
693
694static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 
695{
696	static union ftrace_code_union calc;
697
698	/* Jmp not a call (ignore the .e8) */
699	calc.e8		= 0xe9;
700	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
701
702	/*
703	 * ftrace external locks synchronize the access to the static variable.
704	 */
705	return calc.code;
706}
707
708static int ftrace_mod_jmp(unsigned long ip, void *func)
709{
710	unsigned char *new;
711
712	new = ftrace_jmp_replace(ip, (unsigned long)func);
 
713
714	return update_ftrace_func(ip, new);
715}
716
717int ftrace_enable_ftrace_graph_caller(void)
718{
719	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 
720
721	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
 
 
 
722}
723
724int ftrace_disable_ftrace_graph_caller(void)
725{
726	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 
 
 
 
727
728	return ftrace_mod_jmp(ip, &ftrace_stub);
729}
730
731#endif /* !CONFIG_DYNAMIC_FTRACE */
732
733/*
734 * Hook the return address and push it in the stack of return addrs
735 * in current thread info.
736 */
737void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
738			   unsigned long frame_pointer)
739{
740	unsigned long old;
741	int faulted;
742	struct ftrace_graph_ent trace;
743	unsigned long return_hooker = (unsigned long)
744				&return_to_handler;
745
746	if (unlikely(atomic_read(&current->tracing_graph_pause)))
747		return;
748
749	/*
750	 * Protect against fault, even if it shouldn't
751	 * happen. This tool is too much intrusive to
752	 * ignore such a protection.
753	 */
754	asm volatile(
755		"1: " _ASM_MOV " (%[parent]), %[old]\n"
756		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
757		"   movl $0, %[faulted]\n"
758		"3:\n"
759
760		".section .fixup, \"ax\"\n"
761		"4: movl $1, %[faulted]\n"
762		"   jmp 3b\n"
763		".previous\n"
764
765		_ASM_EXTABLE(1b, 4b)
766		_ASM_EXTABLE(2b, 4b)
767
768		: [old] "=&r" (old), [faulted] "=r" (faulted)
769		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
770		: "memory"
771	);
772
773	if (unlikely(faulted)) {
774		ftrace_graph_stop();
775		WARN_ON(1);
776		return;
777	}
778
779	trace.func = self_addr;
780	trace.depth = current->curr_ret_stack + 1;
781
782	/* Only trace if the calling function expects to */
783	if (!ftrace_graph_entry(&trace)) {
784		*parent = old;
785		return;
786	}
787
788	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
789		    frame_pointer) == -EBUSY) {
790		*parent = old;
791		return;
792	}
793}
794#endif /* CONFIG_FUNCTION_GRAPH_TRACER */