Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dynamic function tracing support.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes to Ingo Molnar, for suggesting the idea.
  8 * Mathieu Desnoyers, for suggesting postponing the modifications.
  9 * Arjan van de Ven, for keeping me straight, and explaining to me
 10 * the dangers of modifying code on the run.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/ftrace.h>
 19#include <linux/percpu.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/memory.h>
 26#include <linux/vmalloc.h>
 27#include <linux/set_memory.h>
 28#include <linux/execmem.h>
 29
 30#include <trace/syscall.h>
 31
 
 32#include <asm/kprobes.h>
 33#include <asm/ftrace.h>
 34#include <asm/nops.h>
 35#include <asm/text-patching.h>
 36
 37#ifdef CONFIG_DYNAMIC_FTRACE
 38
 39static int ftrace_poke_late = 0;
 
 
 
 
 
 40
 41void ftrace_arch_code_modify_prepare(void)
 42    __acquires(&text_mutex)
 43{
 44	/*
 45	 * Need to grab text_mutex to prevent a race from module loading
 46	 * and live kernel patching from changing the text permissions while
 47	 * ftrace has it set to "read/write".
 48	 */
 49	mutex_lock(&text_mutex);
 50	ftrace_poke_late = 1;
 
 
 
 
 
 
 
 
 
 51}
 52
 53void ftrace_arch_code_modify_post_process(void)
 54    __releases(&text_mutex)
 55{
 
 
 
 
 
 56	/*
 57	 * ftrace_make_{call,nop}() may be called during
 58	 * module load, and we need to finish the text_poke_queue()
 59	 * that they do, here.
 60	 */
 61	text_poke_finish();
 62	ftrace_poke_late = 0;
 63	mutex_unlock(&text_mutex);
 64}
 65
 66static const char *ftrace_nop_replace(void)
 
 67{
 68	return x86_nops[5];
 69}
 70
 71static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 72{
 73	/*
 74	 * No need to translate into a callthunk. The trampoline does
 75	 * the depth accounting itself.
 
 
 
 
 76	 */
 77	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
 
 
 
 
 
 
 
 
 78}
 79
 80static int ftrace_verify_code(unsigned long ip, const char *old_code)
 
 
 81{
 82	char cur_code[MCOUNT_INSN_SIZE];
 
 
 83
 84	/*
 85	 * Note:
 86	 * We are paranoid about modifying text, as if a bug was to happen, it
 87	 * could cause us to read or write to someplace that could cause harm.
 88	 * Carefully read and modify the code with probe_kernel_*(), and make
 89	 * sure what we read is what we expected it to be before modifying it.
 90	 */
 
 91	/* read the text we want to modify */
 92	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
 93		WARN_ON(1);
 94		return -EFAULT;
 95	}
 96
 97	/* Make sure it is what we expect it to be */
 98	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
 99		ftrace_expected = old_code;
100		WARN_ON(1);
101		return -EINVAL;
102	}
103
104	return 0;
105}
106
107/*
108 * Marked __ref because it calls text_poke_early() which is .init.text. That is
109 * ok because that call will happen early, during boot, when .init sections are
110 * still present.
111 */
112static int __ref
113ftrace_modify_code_direct(unsigned long ip, const char *old_code,
114			  const char *new_code)
115{
116	int ret = ftrace_verify_code(ip, old_code);
117	if (ret)
118		return ret;
119
120	/* replace the text with the new text */
121	if (ftrace_poke_late) {
122		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
123	} else {
124		mutex_lock(&text_mutex);
125		text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
126		mutex_unlock(&text_mutex);
127	}
128	return 0;
129}
130
131int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
 
132{
 
133	unsigned long ip = rec->ip;
134	const char *new, *old;
135
136	old = ftrace_call_replace(ip, addr);
137	new = ftrace_nop_replace();
138
139	/*
140	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
141	 * is converted to a nop, and will never become MCOUNT_ADDR
142	 * again. This code is either running before SMP (on boot up)
143	 * or before the code will ever be executed (module load).
144	 * We do not want to use the breakpoint version in this case,
145	 * just modify the code directly.
146	 */
147	if (addr == MCOUNT_ADDR)
148		return ftrace_modify_code_direct(ip, old, new);
149
150	/*
151	 * x86 overrides ftrace_replace_code -- this function will never be used
152	 * in this case.
153	 */
154	WARN_ONCE(1, "invalid use of ftrace_make_nop");
155	return -EINVAL;
156}
157
158int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
159{
 
160	unsigned long ip = rec->ip;
161	const char *new, *old;
162
163	old = ftrace_nop_replace();
164	new = ftrace_call_replace(ip, addr);
165
166	/* Should only be called when module is loaded */
167	return ftrace_modify_code_direct(rec->ip, old, new);
168}
169
170/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171 * Should never be called:
172 *  As it is only called by __ftrace_replace_code() which is called by
173 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
174 *  which is called to turn mcount into nops or nops into function calls
175 *  but not to convert a function from not using regs to one that uses
176 *  regs, which ftrace_modify_call() is for.
177 */
178int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179				 unsigned long addr)
180{
181	WARN_ON(1);
 
182	return -EINVAL;
183}
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187	unsigned long ip;
188	const char *new;
 
189
190	ip = (unsigned long)(&ftrace_call);
191	new = ftrace_call_replace(ip, (unsigned long)func);
192	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
193
194	ip = (unsigned long)(&ftrace_regs_call);
195	new = ftrace_call_replace(ip, (unsigned long)func);
196	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	return 0;
199}
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201void ftrace_replace_code(int enable)
202{
203	struct ftrace_rec_iter *iter;
204	struct dyn_ftrace *rec;
205	const char *new, *old;
 
206	int ret;
207
208	for_ftrace_rec_iter(iter) {
209		rec = ftrace_rec_iter_record(iter);
210
211		switch (ftrace_test_record(rec, enable)) {
212		case FTRACE_UPDATE_IGNORE:
213		default:
214			continue;
215
216		case FTRACE_UPDATE_MAKE_CALL:
217			old = ftrace_nop_replace();
218			break;
219
220		case FTRACE_UPDATE_MODIFY_CALL:
221		case FTRACE_UPDATE_MAKE_NOP:
222			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
223			break;
224		}
225
226		ret = ftrace_verify_code(rec->ip, old);
227		if (ret) {
228			ftrace_expected = old;
229			ftrace_bug(ret, rec);
230			ftrace_expected = NULL;
231			return;
232		}
233	}
234
 
 
 
 
 
235	for_ftrace_rec_iter(iter) {
236		rec = ftrace_rec_iter_record(iter);
237
238		switch (ftrace_test_record(rec, enable)) {
239		case FTRACE_UPDATE_IGNORE:
240		default:
241			continue;
242
243		case FTRACE_UPDATE_MAKE_CALL:
244		case FTRACE_UPDATE_MODIFY_CALL:
245			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
246			break;
247
248		case FTRACE_UPDATE_MAKE_NOP:
249			new = ftrace_nop_replace();
250			break;
251		}
252
253		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
254		ftrace_update_record(rec, enable);
 
 
 
 
 
 
 
 
255	}
256	text_poke_finish();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257}
258
259void arch_ftrace_update_code(int command)
260{
 
 
 
261	ftrace_modify_all_code(command);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262}
 
263
264/* Currently only x86_64 supports dynamic trampolines */
265#ifdef CONFIG_X86_64
266
 
 
 
267static inline void *alloc_tramp(unsigned long size)
268{
269	return execmem_alloc(EXECMEM_FTRACE, size);
270}
271static inline void tramp_free(void *tramp)
272{
273	execmem_free(tramp);
 
 
 
 
274}
 
 
 
 
 
 
 
 
275
276/* Defined as markers to the end of the ftrace default trampolines */
277extern void ftrace_regs_caller_end(void);
278extern void ftrace_caller_end(void);
279extern void ftrace_caller_op_ptr(void);
280extern void ftrace_regs_caller_op_ptr(void);
281extern void ftrace_regs_caller_jmp(void);
282
283/* movq function_trace_op(%rip), %rdx */
284/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
285#define OP_REF_SIZE	7
286
287/*
288 * The ftrace_ops is passed to the function callback. Since the
289 * trampoline only services a single ftrace_ops, we can pass in
290 * that ops directly.
291 *
292 * The ftrace_op_code_union is used to create a pointer to the
293 * ftrace_ops that will be passed to the callback function.
294 */
295union ftrace_op_code_union {
296	char code[OP_REF_SIZE];
297	struct {
298		char op[3];
299		int offset;
300	} __attribute__((packed));
301};
302
303#define RET_SIZE \
304	(IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_MITIGATION_SLS))
305
306static unsigned long
307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
308{
 
309	unsigned long start_offset;
310	unsigned long end_offset;
311	unsigned long op_offset;
312	unsigned long call_offset;
313	unsigned long jmp_offset;
314	unsigned long offset;
315	unsigned long npages;
316	unsigned long size;
 
317	unsigned long *ptr;
318	void *trampoline;
319	void *ip, *dest;
320	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
321	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
322	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
323	union ftrace_op_code_union op_ptr;
324	void *ret;
325
326	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
327		start_offset = (unsigned long)ftrace_regs_caller;
328		end_offset = (unsigned long)ftrace_regs_caller_end;
329		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
330		call_offset = (unsigned long)ftrace_regs_call;
331		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
332	} else {
333		start_offset = (unsigned long)ftrace_caller;
334		end_offset = (unsigned long)ftrace_caller_end;
335		op_offset = (unsigned long)ftrace_caller_op_ptr;
336		call_offset = (unsigned long)ftrace_call;
337		jmp_offset = 0;
338	}
339
340	size = end_offset - start_offset;
341
342	/*
343	 * Allocate enough size to store the ftrace_caller code,
344	 * the iret , as well as the address of the ftrace_ops this
345	 * trampoline is used for.
346	 */
347	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
348	if (!trampoline)
349		return 0;
350
351	*tramp_size = size + RET_SIZE + sizeof(void *);
352	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
353
354	/* Copy ftrace_caller onto the trampoline memory */
355	ret = text_poke_copy(trampoline, (void *)start_offset, size);
356	if (WARN_ON(!ret))
357		goto fail;
358
359	ip = trampoline + size;
360	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
361		__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
362	else
363		text_poke_copy(ip, retq, sizeof(retq));
364
365	/* No need to test direct calls on created trampolines */
366	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
367		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
368		ip = trampoline + (jmp_offset - start_offset);
369		if (WARN_ON(*(char *)ip != 0x75))
370			goto fail;
371		if (!text_poke_copy(ip, x86_nops[2], 2))
372			goto fail;
373	}
374
 
 
 
 
 
 
375	/*
376	 * The address of the ftrace_ops that is used for this trampoline
377	 * is stored at the end of the trampoline. This will be used to
378	 * load the third parameter for the callback. Basically, that
379	 * location at the end of the trampoline takes the place of
380	 * the global function_trace_op variable.
381	 */
382
383	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
384	text_poke_copy(ptr, &ops, sizeof(unsigned long));
385
386	op_offset -= start_offset;
387	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
388
389	/* Are we pointing to the reference? */
390	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
391		goto fail;
 
 
392
393	/* Load the contents of ptr into the callback parameter */
394	offset = (unsigned long)ptr;
395	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
396
397	op_ptr.offset = offset;
398
399	/* put in the new offset to the ftrace_ops */
400	text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
401
402	/* put in the call to the function */
403	mutex_lock(&text_mutex);
404	call_offset -= start_offset;
405	/*
406	 * No need to translate into a callthunk. The trampoline does
407	 * the depth accounting before the call already.
408	 */
409	dest = ftrace_ops_get_func(ops);
410	text_poke_copy_locked(trampoline + call_offset,
411	      text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
412	      CALL_INSN_SIZE, false);
413	mutex_unlock(&text_mutex);
414
415	/* ALLOC_TRAMP flags lets us know we created it */
416	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
417
418	set_memory_rox((unsigned long)trampoline, npages);
419	return (unsigned long)trampoline;
420fail:
421	tramp_free(trampoline);
422	return 0;
423}
424
425void set_ftrace_ops_ro(void)
426{
427	struct ftrace_ops *ops;
428	unsigned long start_offset;
429	unsigned long end_offset;
430	unsigned long npages;
431	unsigned long size;
432
433	do_for_each_ftrace_op(ops, ftrace_ops_list) {
434		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
435			continue;
436
437		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
438			start_offset = (unsigned long)ftrace_regs_caller;
439			end_offset = (unsigned long)ftrace_regs_caller_end;
440		} else {
441			start_offset = (unsigned long)ftrace_caller;
442			end_offset = (unsigned long)ftrace_caller_end;
443		}
444		size = end_offset - start_offset;
445		size = size + RET_SIZE + sizeof(void *);
446		npages = DIV_ROUND_UP(size, PAGE_SIZE);
447		set_memory_ro((unsigned long)ops->trampoline, npages);
448	} while_for_each_ftrace_op(ops);
449}
450
451static unsigned long calc_trampoline_call_offset(bool save_regs)
452{
453	unsigned long start_offset;
454	unsigned long call_offset;
455
456	if (save_regs) {
457		start_offset = (unsigned long)ftrace_regs_caller;
458		call_offset = (unsigned long)ftrace_regs_call;
459	} else {
460		start_offset = (unsigned long)ftrace_caller;
461		call_offset = (unsigned long)ftrace_call;
462	}
463
464	return call_offset - start_offset;
465}
466
467void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
468{
469	ftrace_func_t func;
 
470	unsigned long offset;
471	unsigned long ip;
472	unsigned int size;
473	const char *new;
474
475	if (!ops->trampoline) {
 
 
 
 
 
 
 
 
 
476		ops->trampoline = create_trampoline(ops, &size);
477		if (!ops->trampoline)
478			return;
479		ops->trampoline_size = size;
480		return;
481	}
482
483	/*
484	 * The ftrace_ops caller may set up its own trampoline.
485	 * In such a case, this code must not modify it.
486	 */
487	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
488		return;
489
490	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
491	ip = ops->trampoline + offset;
 
492	func = ftrace_ops_get_func(ops);
493
494	mutex_lock(&text_mutex);
495	/* Do a safe modify in case the trampoline is executing */
496	new = ftrace_call_replace(ip, (unsigned long)func);
497	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
498	mutex_unlock(&text_mutex);
 
 
 
499}
500
501/* Return the address of the function the trampoline calls */
502static void *addr_from_call(void *ptr)
503{
504	union text_poke_insn call;
505	int ret;
506
507	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
508	if (WARN_ON_ONCE(ret < 0))
509		return NULL;
510
511	/* Make sure this is a call */
512	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
513		pr_warn("Expected E8, got %x\n", call.opcode);
514		return NULL;
515	}
516
517	return ptr + CALL_INSN_SIZE + call.disp;
518}
519
 
 
 
520/*
521 * If the ops->trampoline was not allocated, then it probably
522 * has a static trampoline func, or is the ftrace caller itself.
523 */
524static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
525{
526	unsigned long offset;
527	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
528	void *ptr;
529
530	if (ops && ops->trampoline) {
531#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
532	defined(CONFIG_FUNCTION_GRAPH_TRACER)
533		/*
534		 * We only know about function graph tracer setting as static
535		 * trampoline.
536		 */
537		if (ops->trampoline == FTRACE_GRAPH_ADDR)
538			return (void *)prepare_ftrace_return;
539#endif
540		return NULL;
541	}
542
543	offset = calc_trampoline_call_offset(save_regs);
544
545	if (save_regs)
546		ptr = (void *)FTRACE_REGS_ADDR + offset;
547	else
548		ptr = (void *)FTRACE_ADDR + offset;
549
550	return addr_from_call(ptr);
551}
552
553void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
554{
555	unsigned long offset;
556
557	/* If we didn't allocate this trampoline, consider it static */
558	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
559		return static_tramp_func(ops, rec);
560
561	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
562	return addr_from_call((void *)ops->trampoline + offset);
563}
564
565void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
566{
567	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
568		return;
569
570	tramp_free((void *)ops->trampoline);
571	ops->trampoline = 0;
572}
573
574#endif /* CONFIG_X86_64 */
575#endif /* CONFIG_DYNAMIC_FTRACE */
576
577#ifdef CONFIG_FUNCTION_GRAPH_TRACER
578
579#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
580extern void ftrace_graph_call(void);
581static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
582{
583	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
584}
585
586static int ftrace_mod_jmp(unsigned long ip, void *func)
587{
588	const char *new;
589
590	new = ftrace_jmp_replace(ip, (unsigned long)func);
591	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
592	return 0;
593}
594
595int ftrace_enable_ftrace_graph_caller(void)
596{
597	unsigned long ip = (unsigned long)(&ftrace_graph_call);
598
599	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
600}
601
602int ftrace_disable_ftrace_graph_caller(void)
603{
604	unsigned long ip = (unsigned long)(&ftrace_graph_call);
605
606	return ftrace_mod_jmp(ip, &ftrace_stub);
607}
608#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
 
609
610/*
611 * Hook the return address and push it in the stack of return addrs
612 * in current thread info.
613 */
614void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
615			   unsigned long frame_pointer)
616{
617	unsigned long return_hooker = (unsigned long)&return_to_handler;
618	int bit;
 
 
 
619
620	/*
621	 * When resuming from suspend-to-ram, this function can be indirectly
622	 * called from early CPU startup code while the CPU is in real mode,
623	 * which would fail miserably.  Make sure the stack pointer is a
624	 * virtual address.
625	 *
626	 * This check isn't as accurate as virt_addr_valid(), but it should be
627	 * good enough for this purpose, and it's fast.
628	 */
629	if (unlikely((long)__builtin_frame_address(0) >= 0))
630		return;
631
632	if (unlikely(ftrace_graph_is_dead()))
633		return;
634
635	if (unlikely(atomic_read(&current->tracing_graph_pause)))
636		return;
637
638	bit = ftrace_test_recursion_trylock(ip, *parent);
639	if (bit < 0)
640		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
642	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
643		*parent = return_hooker;
 
 
 
644
645	ftrace_test_recursion_unlock(bit);
646}
647
648#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
649void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
650		       struct ftrace_ops *op, struct ftrace_regs *fregs)
651{
652	struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
653	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
654
655	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
 
 
 
 
656}
657#endif
658
659#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Dynamic function tracing support.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 *
   7 * Thanks goes to Ingo Molnar, for suggesting the idea.
   8 * Mathieu Desnoyers, for suggesting postponing the modifications.
   9 * Arjan van de Ven, for keeping me straight, and explaining to me
  10 * the dangers of modifying code on the run.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/spinlock.h>
  16#include <linux/hardirq.h>
  17#include <linux/uaccess.h>
  18#include <linux/ftrace.h>
  19#include <linux/percpu.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/init.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
 
 
 
 
  25
  26#include <trace/syscall.h>
  27
  28#include <asm/set_memory.h>
  29#include <asm/kprobes.h>
  30#include <asm/ftrace.h>
  31#include <asm/nops.h>
 
  32
  33#ifdef CONFIG_DYNAMIC_FTRACE
  34
  35int ftrace_arch_code_modify_prepare(void)
  36{
  37	set_kernel_text_rw();
  38	set_all_modules_text_rw();
  39	return 0;
  40}
  41
  42int ftrace_arch_code_modify_post_process(void)
 
  43{
  44	set_all_modules_text_ro();
  45	set_kernel_text_ro();
  46	return 0;
  47}
  48
  49union ftrace_code_union {
  50	char code[MCOUNT_INSN_SIZE];
  51	struct {
  52		unsigned char e8;
  53		int offset;
  54	} __attribute__((packed));
  55};
  56
  57static int ftrace_calc_offset(long ip, long addr)
  58{
  59	return (int)(addr - ip);
  60}
  61
  62static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 
  63{
  64	static union ftrace_code_union calc;
  65
  66	calc.e8		= 0xe8;
  67	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  68
  69	/*
  70	 * No locking needed, this must be called via kstop_machine
  71	 * which in essence is like running on a uniprocessor machine.
 
  72	 */
  73	return calc.code;
 
 
  74}
  75
  76static inline int
  77within(unsigned long addr, unsigned long start, unsigned long end)
  78{
  79	return addr >= start && addr < end;
  80}
  81
  82static unsigned long text_ip_addr(unsigned long ip)
  83{
  84	/*
  85	 * On x86_64, kernel text mappings are mapped read-only, so we use
  86	 * the kernel identity mapping instead of the kernel text mapping
  87	 * to modify the kernel text.
  88	 *
  89	 * For 32bit kernels, these mappings are same and we can use
  90	 * kernel identity mapping to modify code.
  91	 */
  92	if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  93		ip = (unsigned long)__va(__pa_symbol(ip));
  94
  95	return ip;
  96}
  97
  98static const unsigned char *ftrace_nop_replace(void)
  99{
 100	return ideal_nops[NOP_ATOMIC5];
 101}
 102
 103static int
 104ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
 105		   unsigned const char *new_code)
 106{
 107	unsigned char replaced[MCOUNT_INSN_SIZE];
 108
 109	ftrace_expected = old_code;
 110
 111	/*
 112	 * Note:
 113	 * We are paranoid about modifying text, as if a bug was to happen, it
 114	 * could cause us to read or write to someplace that could cause harm.
 115	 * Carefully read and modify the code with probe_kernel_*(), and make
 116	 * sure what we read is what we expected it to be before modifying it.
 117	 */
 118
 119	/* read the text we want to modify */
 120	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 
 121		return -EFAULT;
 
 122
 123	/* Make sure it is what we expect it to be */
 124	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
 
 
 125		return -EINVAL;
 
 
 
 
 126
 127	ip = text_ip_addr(ip);
 
 
 
 
 
 
 
 
 
 
 
 128
 129	/* replace the text with the new text */
 130	if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
 131		return -EPERM;
 132
 133	sync_core();
 134
 
 
 135	return 0;
 136}
 137
 138int ftrace_make_nop(struct module *mod,
 139		    struct dyn_ftrace *rec, unsigned long addr)
 140{
 141	unsigned const char *new, *old;
 142	unsigned long ip = rec->ip;
 
 143
 144	old = ftrace_call_replace(ip, addr);
 145	new = ftrace_nop_replace();
 146
 147	/*
 148	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
 149	 * is converted to a nop, and will never become MCOUNT_ADDR
 150	 * again. This code is either running before SMP (on boot up)
 151	 * or before the code will ever be executed (module load).
 152	 * We do not want to use the breakpoint version in this case,
 153	 * just modify the code directly.
 154	 */
 155	if (addr == MCOUNT_ADDR)
 156		return ftrace_modify_code_direct(rec->ip, old, new);
 157
 158	ftrace_expected = NULL;
 159
 160	/* Normal cases use add_brk_on_nop */
 
 161	WARN_ONCE(1, "invalid use of ftrace_make_nop");
 162	return -EINVAL;
 163}
 164
 165int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 166{
 167	unsigned const char *new, *old;
 168	unsigned long ip = rec->ip;
 
 169
 170	old = ftrace_nop_replace();
 171	new = ftrace_call_replace(ip, addr);
 172
 173	/* Should only be called when module is loaded */
 174	return ftrace_modify_code_direct(rec->ip, old, new);
 175}
 176
 177/*
 178 * The modifying_ftrace_code is used to tell the breakpoint
 179 * handler to call ftrace_int3_handler(). If it fails to
 180 * call this handler for a breakpoint added by ftrace, then
 181 * the kernel may crash.
 182 *
 183 * As atomic_writes on x86 do not need a barrier, we do not
 184 * need to add smp_mb()s for this to work. It is also considered
 185 * that we can not read the modifying_ftrace_code before
 186 * executing the breakpoint. That would be quite remarkable if
 187 * it could do that. Here's the flow that is required:
 188 *
 189 *   CPU-0                          CPU-1
 190 *
 191 * atomic_inc(mfc);
 192 * write int3s
 193 *				<trap-int3> // implicit (r)mb
 194 *				if (atomic_read(mfc))
 195 *					call ftrace_int3_handler()
 196 *
 197 * Then when we are finished:
 198 *
 199 * atomic_dec(mfc);
 200 *
 201 * If we hit a breakpoint that was not set by ftrace, it does not
 202 * matter if ftrace_int3_handler() is called or not. It will
 203 * simply be ignored. But it is crucial that a ftrace nop/caller
 204 * breakpoint is handled. No other user should ever place a
 205 * breakpoint on an ftrace nop/caller location. It must only
 206 * be done by this code.
 207 */
 208atomic_t modifying_ftrace_code __read_mostly;
 209
 210static int
 211ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
 212		   unsigned const char *new_code);
 213
 214/*
 215 * Should never be called:
 216 *  As it is only called by __ftrace_replace_code() which is called by
 217 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
 218 *  which is called to turn mcount into nops or nops into function calls
 219 *  but not to convert a function from not using regs to one that uses
 220 *  regs, which ftrace_modify_call() is for.
 221 */
 222int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 223				 unsigned long addr)
 224{
 225	WARN_ON(1);
 226	ftrace_expected = NULL;
 227	return -EINVAL;
 228}
 229
 230static unsigned long ftrace_update_func;
 231
 232static int update_ftrace_func(unsigned long ip, void *new)
 233{
 234	unsigned char old[MCOUNT_INSN_SIZE];
 235	int ret;
 236
 237	memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
 238
 239	ftrace_update_func = ip;
 240	/* Make sure the breakpoints see the ftrace_update_func update */
 241	smp_wmb();
 242
 243	/* See comment above by declaration of modifying_ftrace_code */
 244	atomic_inc(&modifying_ftrace_code);
 245
 246	ret = ftrace_modify_code(ip, old, new);
 247
 248	atomic_dec(&modifying_ftrace_code);
 249
 250	return ret;
 251}
 252
 253int ftrace_update_ftrace_func(ftrace_func_t func)
 254{
 255	unsigned long ip = (unsigned long)(&ftrace_call);
 256	unsigned char *new;
 257	int ret;
 258
 
 259	new = ftrace_call_replace(ip, (unsigned long)func);
 260	ret = update_ftrace_func(ip, new);
 261
 262	/* Also update the regs callback function */
 263	if (!ret) {
 264		ip = (unsigned long)(&ftrace_regs_call);
 265		new = ftrace_call_replace(ip, (unsigned long)func);
 266		ret = update_ftrace_func(ip, new);
 267	}
 268
 269	return ret;
 270}
 271
 272static int is_ftrace_caller(unsigned long ip)
 273{
 274	if (ip == ftrace_update_func)
 275		return 1;
 276
 277	return 0;
 278}
 279
 280/*
 281 * A breakpoint was added to the code address we are about to
 282 * modify, and this is the handle that will just skip over it.
 283 * We are either changing a nop into a trace call, or a trace
 284 * call to a nop. While the change is taking place, we treat
 285 * it just like it was a nop.
 286 */
 287int ftrace_int3_handler(struct pt_regs *regs)
 288{
 289	unsigned long ip;
 290
 291	if (WARN_ON_ONCE(!regs))
 292		return 0;
 293
 294	ip = regs->ip - 1;
 295	if (!ftrace_location(ip) && !is_ftrace_caller(ip))
 296		return 0;
 297
 298	regs->ip += MCOUNT_INSN_SIZE - 1;
 299
 300	return 1;
 301}
 302
 303static int ftrace_write(unsigned long ip, const char *val, int size)
 304{
 305	ip = text_ip_addr(ip);
 306
 307	if (probe_kernel_write((void *)ip, val, size))
 308		return -EPERM;
 309
 310	return 0;
 311}
 312
 313static int add_break(unsigned long ip, const char *old)
 314{
 315	unsigned char replaced[MCOUNT_INSN_SIZE];
 316	unsigned char brk = BREAKPOINT_INSTRUCTION;
 317
 318	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 319		return -EFAULT;
 320
 321	ftrace_expected = old;
 322
 323	/* Make sure it is what we expect it to be */
 324	if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
 325		return -EINVAL;
 326
 327	return ftrace_write(ip, &brk, 1);
 328}
 329
 330static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
 331{
 332	unsigned const char *old;
 333	unsigned long ip = rec->ip;
 334
 335	old = ftrace_call_replace(ip, addr);
 336
 337	return add_break(rec->ip, old);
 338}
 339
 340
 341static int add_brk_on_nop(struct dyn_ftrace *rec)
 342{
 343	unsigned const char *old;
 344
 345	old = ftrace_nop_replace();
 346
 347	return add_break(rec->ip, old);
 348}
 349
 350static int add_breakpoints(struct dyn_ftrace *rec, int enable)
 351{
 352	unsigned long ftrace_addr;
 353	int ret;
 354
 355	ftrace_addr = ftrace_get_addr_curr(rec);
 356
 357	ret = ftrace_test_record(rec, enable);
 358
 359	switch (ret) {
 360	case FTRACE_UPDATE_IGNORE:
 361		return 0;
 362
 363	case FTRACE_UPDATE_MAKE_CALL:
 364		/* converting nop to call */
 365		return add_brk_on_nop(rec);
 366
 367	case FTRACE_UPDATE_MODIFY_CALL:
 368	case FTRACE_UPDATE_MAKE_NOP:
 369		/* converting a call to a nop */
 370		return add_brk_on_call(rec, ftrace_addr);
 371	}
 372	return 0;
 373}
 374
 375/*
 376 * On error, we need to remove breakpoints. This needs to
 377 * be done caefully. If the address does not currently have a
 378 * breakpoint, we know we are done. Otherwise, we look at the
 379 * remaining 4 bytes of the instruction. If it matches a nop
 380 * we replace the breakpoint with the nop. Otherwise we replace
 381 * it with the call instruction.
 382 */
 383static int remove_breakpoint(struct dyn_ftrace *rec)
 384{
 385	unsigned char ins[MCOUNT_INSN_SIZE];
 386	unsigned char brk = BREAKPOINT_INSTRUCTION;
 387	const unsigned char *nop;
 388	unsigned long ftrace_addr;
 389	unsigned long ip = rec->ip;
 390
 391	/* If we fail the read, just give up */
 392	if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
 393		return -EFAULT;
 394
 395	/* If this does not have a breakpoint, we are done */
 396	if (ins[0] != brk)
 397		return 0;
 398
 399	nop = ftrace_nop_replace();
 400
 401	/*
 402	 * If the last 4 bytes of the instruction do not match
 403	 * a nop, then we assume that this is a call to ftrace_addr.
 404	 */
 405	if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
 406		/*
 407		 * For extra paranoidism, we check if the breakpoint is on
 408		 * a call that would actually jump to the ftrace_addr.
 409		 * If not, don't touch the breakpoint, we make just create
 410		 * a disaster.
 411		 */
 412		ftrace_addr = ftrace_get_addr_new(rec);
 413		nop = ftrace_call_replace(ip, ftrace_addr);
 414
 415		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
 416			goto update;
 417
 418		/* Check both ftrace_addr and ftrace_old_addr */
 419		ftrace_addr = ftrace_get_addr_curr(rec);
 420		nop = ftrace_call_replace(ip, ftrace_addr);
 421
 422		ftrace_expected = nop;
 423
 424		if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
 425			return -EINVAL;
 426	}
 427
 428 update:
 429	return ftrace_write(ip, nop, 1);
 430}
 431
 432static int add_update_code(unsigned long ip, unsigned const char *new)
 433{
 434	/* skip breakpoint */
 435	ip++;
 436	new++;
 437	return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1);
 438}
 439
 440static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
 441{
 442	unsigned long ip = rec->ip;
 443	unsigned const char *new;
 444
 445	new = ftrace_call_replace(ip, addr);
 446	return add_update_code(ip, new);
 447}
 448
 449static int add_update_nop(struct dyn_ftrace *rec)
 450{
 451	unsigned long ip = rec->ip;
 452	unsigned const char *new;
 453
 454	new = ftrace_nop_replace();
 455	return add_update_code(ip, new);
 456}
 457
 458static int add_update(struct dyn_ftrace *rec, int enable)
 459{
 460	unsigned long ftrace_addr;
 461	int ret;
 462
 463	ret = ftrace_test_record(rec, enable);
 464
 465	ftrace_addr  = ftrace_get_addr_new(rec);
 466
 467	switch (ret) {
 468	case FTRACE_UPDATE_IGNORE:
 469		return 0;
 470
 471	case FTRACE_UPDATE_MODIFY_CALL:
 472	case FTRACE_UPDATE_MAKE_CALL:
 473		/* converting nop to call */
 474		return add_update_call(rec, ftrace_addr);
 475
 476	case FTRACE_UPDATE_MAKE_NOP:
 477		/* converting a call to a nop */
 478		return add_update_nop(rec);
 479	}
 480
 481	return 0;
 482}
 483
 484static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
 485{
 486	unsigned long ip = rec->ip;
 487	unsigned const char *new;
 488
 489	new = ftrace_call_replace(ip, addr);
 490
 491	return ftrace_write(ip, new, 1);
 492}
 493
 494static int finish_update_nop(struct dyn_ftrace *rec)
 495{
 496	unsigned long ip = rec->ip;
 497	unsigned const char *new;
 498
 499	new = ftrace_nop_replace();
 500
 501	return ftrace_write(ip, new, 1);
 502}
 503
 504static int finish_update(struct dyn_ftrace *rec, int enable)
 505{
 506	unsigned long ftrace_addr;
 507	int ret;
 508
 509	ret = ftrace_update_record(rec, enable);
 510
 511	ftrace_addr = ftrace_get_addr_new(rec);
 512
 513	switch (ret) {
 514	case FTRACE_UPDATE_IGNORE:
 515		return 0;
 516
 517	case FTRACE_UPDATE_MODIFY_CALL:
 518	case FTRACE_UPDATE_MAKE_CALL:
 519		/* converting nop to call */
 520		return finish_update_call(rec, ftrace_addr);
 521
 522	case FTRACE_UPDATE_MAKE_NOP:
 523		/* converting a call to a nop */
 524		return finish_update_nop(rec);
 525	}
 526
 527	return 0;
 528}
 529
 530static void do_sync_core(void *data)
 531{
 532	sync_core();
 533}
 534
 535static void run_sync(void)
 536{
 537	int enable_irqs;
 538
 539	/* No need to sync if there's only one CPU */
 540	if (num_online_cpus() == 1)
 541		return;
 542
 543	enable_irqs = irqs_disabled();
 544
 545	/* We may be called with interrupts disabled (on bootup). */
 546	if (enable_irqs)
 547		local_irq_enable();
 548	on_each_cpu(do_sync_core, NULL, 1);
 549	if (enable_irqs)
 550		local_irq_disable();
 551}
 552
 553void ftrace_replace_code(int enable)
 554{
 555	struct ftrace_rec_iter *iter;
 556	struct dyn_ftrace *rec;
 557	const char *report = "adding breakpoints";
 558	int count = 0;
 559	int ret;
 560
 561	for_ftrace_rec_iter(iter) {
 562		rec = ftrace_rec_iter_record(iter);
 563
 564		ret = add_breakpoints(rec, enable);
 565		if (ret)
 566			goto remove_breakpoints;
 567		count++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568	}
 569
 570	run_sync();
 571
 572	report = "updating code";
 573	count = 0;
 574
 575	for_ftrace_rec_iter(iter) {
 576		rec = ftrace_rec_iter_record(iter);
 577
 578		ret = add_update(rec, enable);
 579		if (ret)
 580			goto remove_breakpoints;
 581		count++;
 582	}
 583
 584	run_sync();
 
 
 
 
 
 
 
 585
 586	report = "removing breakpoints";
 587	count = 0;
 588
 589	for_ftrace_rec_iter(iter) {
 590		rec = ftrace_rec_iter_record(iter);
 591
 592		ret = finish_update(rec, enable);
 593		if (ret)
 594			goto remove_breakpoints;
 595		count++;
 596	}
 597
 598	run_sync();
 599
 600	return;
 601
 602 remove_breakpoints:
 603	pr_warn("Failed on %s (%d):\n", report, count);
 604	ftrace_bug(ret, rec);
 605	for_ftrace_rec_iter(iter) {
 606		rec = ftrace_rec_iter_record(iter);
 607		/*
 608		 * Breakpoints are handled only when this function is in
 609		 * progress. The system could not work with them.
 610		 */
 611		if (remove_breakpoint(rec))
 612			BUG();
 613	}
 614	run_sync();
 615}
 616
 617static int
 618ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
 619		   unsigned const char *new_code)
 620{
 621	int ret;
 622
 623	ret = add_break(ip, old_code);
 624	if (ret)
 625		goto out;
 626
 627	run_sync();
 628
 629	ret = add_update_code(ip, new_code);
 630	if (ret)
 631		goto fail_update;
 632
 633	run_sync();
 634
 635	ret = ftrace_write(ip, new_code, 1);
 636	/*
 637	 * The breakpoint is handled only when this function is in progress.
 638	 * The system could not work if we could not remove it.
 639	 */
 640	BUG_ON(ret);
 641 out:
 642	run_sync();
 643	return ret;
 644
 645 fail_update:
 646	/* Also here the system could not work with the breakpoint */
 647	if (ftrace_write(ip, old_code, 1))
 648		BUG();
 649	goto out;
 650}
 651
 652void arch_ftrace_update_code(int command)
 653{
 654	/* See comment above by declaration of modifying_ftrace_code */
 655	atomic_inc(&modifying_ftrace_code);
 656
 657	ftrace_modify_all_code(command);
 658
 659	atomic_dec(&modifying_ftrace_code);
 660}
 661
 662int __init ftrace_dyn_arch_init(void)
 663{
 664	return 0;
 665}
 666
 667#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
 668static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
 669{
 670	static union ftrace_code_union calc;
 671
 672	/* Jmp not a call (ignore the .e8) */
 673	calc.e8		= 0xe9;
 674	calc.offset	= ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
 675
 676	/*
 677	 * ftrace external locks synchronize the access to the static variable.
 678	 */
 679	return calc.code;
 680}
 681#endif
 682
 683/* Currently only x86_64 supports dynamic trampolines */
 684#ifdef CONFIG_X86_64
 685
 686#ifdef CONFIG_MODULES
 687#include <linux/moduleloader.h>
 688/* Module allocation simplifies allocating memory for code */
 689static inline void *alloc_tramp(unsigned long size)
 690{
 691	return module_alloc(size);
 692}
 693static inline void tramp_free(void *tramp, int size)
 694{
 695	int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 696
 697	set_memory_nx((unsigned long)tramp, npages);
 698	set_memory_rw((unsigned long)tramp, npages);
 699	module_memfree(tramp);
 700}
 701#else
 702/* Trampolines can only be created if modules are supported */
 703static inline void *alloc_tramp(unsigned long size)
 704{
 705	return NULL;
 706}
 707static inline void tramp_free(void *tramp, int size) { }
 708#endif
 709
 710/* Defined as markers to the end of the ftrace default trampolines */
 711extern void ftrace_regs_caller_end(void);
 712extern void ftrace_epilogue(void);
 713extern void ftrace_caller_op_ptr(void);
 714extern void ftrace_regs_caller_op_ptr(void);
 
 715
 716/* movq function_trace_op(%rip), %rdx */
 717/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
 718#define OP_REF_SIZE	7
 719
 720/*
 721 * The ftrace_ops is passed to the function callback. Since the
 722 * trampoline only services a single ftrace_ops, we can pass in
 723 * that ops directly.
 724 *
 725 * The ftrace_op_code_union is used to create a pointer to the
 726 * ftrace_ops that will be passed to the callback function.
 727 */
 728union ftrace_op_code_union {
 729	char code[OP_REF_SIZE];
 730	struct {
 731		char op[3];
 732		int offset;
 733	} __attribute__((packed));
 734};
 735
 
 
 
 736static unsigned long
 737create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
 738{
 739	unsigned const char *jmp;
 740	unsigned long start_offset;
 741	unsigned long end_offset;
 742	unsigned long op_offset;
 
 
 743	unsigned long offset;
 
 744	unsigned long size;
 745	unsigned long ip;
 746	unsigned long *ptr;
 747	void *trampoline;
 
 748	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
 749	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
 
 750	union ftrace_op_code_union op_ptr;
 751	int ret;
 752
 753	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
 754		start_offset = (unsigned long)ftrace_regs_caller;
 755		end_offset = (unsigned long)ftrace_regs_caller_end;
 756		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
 
 
 757	} else {
 758		start_offset = (unsigned long)ftrace_caller;
 759		end_offset = (unsigned long)ftrace_epilogue;
 760		op_offset = (unsigned long)ftrace_caller_op_ptr;
 
 
 761	}
 762
 763	size = end_offset - start_offset;
 764
 765	/*
 766	 * Allocate enough size to store the ftrace_caller code,
 767	 * the jmp to ftrace_epilogue, as well as the address of
 768	 * the ftrace_ops this trampoline is used for.
 769	 */
 770	trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
 771	if (!trampoline)
 772		return 0;
 773
 774	*tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
 
 775
 776	/* Copy ftrace_caller onto the trampoline memory */
 777	ret = probe_kernel_read(trampoline, (void *)start_offset, size);
 778	if (WARN_ON(ret < 0)) {
 779		tramp_free(trampoline, *tramp_size);
 780		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781	}
 782
 783	ip = (unsigned long)trampoline + size;
 784
 785	/* The trampoline ends with a jmp to ftrace_epilogue */
 786	jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue);
 787	memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
 788
 789	/*
 790	 * The address of the ftrace_ops that is used for this trampoline
 791	 * is stored at the end of the trampoline. This will be used to
 792	 * load the third parameter for the callback. Basically, that
 793	 * location at the end of the trampoline takes the place of
 794	 * the global function_trace_op variable.
 795	 */
 796
 797	ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
 798	*ptr = (unsigned long)ops;
 799
 800	op_offset -= start_offset;
 801	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
 802
 803	/* Are we pointing to the reference? */
 804	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
 805		tramp_free(trampoline, *tramp_size);
 806		return 0;
 807	}
 808
 809	/* Load the contents of ptr into the callback parameter */
 810	offset = (unsigned long)ptr;
 811	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
 812
 813	op_ptr.offset = offset;
 814
 815	/* put in the new offset to the ftrace_ops */
 816	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 817
 818	/* ALLOC_TRAMP flags lets us know we created it */
 819	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
 820
 
 821	return (unsigned long)trampoline;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 822}
 823
 824static unsigned long calc_trampoline_call_offset(bool save_regs)
 825{
 826	unsigned long start_offset;
 827	unsigned long call_offset;
 828
 829	if (save_regs) {
 830		start_offset = (unsigned long)ftrace_regs_caller;
 831		call_offset = (unsigned long)ftrace_regs_call;
 832	} else {
 833		start_offset = (unsigned long)ftrace_caller;
 834		call_offset = (unsigned long)ftrace_call;
 835	}
 836
 837	return call_offset - start_offset;
 838}
 839
 840void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
 841{
 842	ftrace_func_t func;
 843	unsigned char *new;
 844	unsigned long offset;
 845	unsigned long ip;
 846	unsigned int size;
 847	int ret, npages;
 848
 849	if (ops->trampoline) {
 850		/*
 851		 * The ftrace_ops caller may set up its own trampoline.
 852		 * In such a case, this code must not modify it.
 853		 */
 854		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
 855			return;
 856		npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
 857		set_memory_rw(ops->trampoline, npages);
 858	} else {
 859		ops->trampoline = create_trampoline(ops, &size);
 860		if (!ops->trampoline)
 861			return;
 862		ops->trampoline_size = size;
 863		npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 864	}
 865
 
 
 
 
 
 
 
 866	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
 867	ip = ops->trampoline + offset;
 868
 869	func = ftrace_ops_get_func(ops);
 870
 
 871	/* Do a safe modify in case the trampoline is executing */
 872	new = ftrace_call_replace(ip, (unsigned long)func);
 873	ret = update_ftrace_func(ip, new);
 874	set_memory_ro(ops->trampoline, npages);
 875
 876	/* The update should never fail */
 877	WARN_ON(ret);
 878}
 879
 880/* Return the address of the function the trampoline calls */
 881static void *addr_from_call(void *ptr)
 882{
 883	union ftrace_code_union calc;
 884	int ret;
 885
 886	ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
 887	if (WARN_ON_ONCE(ret < 0))
 888		return NULL;
 889
 890	/* Make sure this is a call */
 891	if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
 892		pr_warn("Expected e8, got %x\n", calc.e8);
 893		return NULL;
 894	}
 895
 896	return ptr + MCOUNT_INSN_SIZE + calc.offset;
 897}
 898
 899void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 900			   unsigned long frame_pointer);
 901
 902/*
 903 * If the ops->trampoline was not allocated, then it probably
 904 * has a static trampoline func, or is the ftrace caller itself.
 905 */
 906static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 907{
 908	unsigned long offset;
 909	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
 910	void *ptr;
 911
 912	if (ops && ops->trampoline) {
 913#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 914		/*
 915		 * We only know about function graph tracer setting as static
 916		 * trampoline.
 917		 */
 918		if (ops->trampoline == FTRACE_GRAPH_ADDR)
 919			return (void *)prepare_ftrace_return;
 920#endif
 921		return NULL;
 922	}
 923
 924	offset = calc_trampoline_call_offset(save_regs);
 925
 926	if (save_regs)
 927		ptr = (void *)FTRACE_REGS_ADDR + offset;
 928	else
 929		ptr = (void *)FTRACE_ADDR + offset;
 930
 931	return addr_from_call(ptr);
 932}
 933
 934void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
 935{
 936	unsigned long offset;
 937
 938	/* If we didn't allocate this trampoline, consider it static */
 939	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
 940		return static_tramp_func(ops, rec);
 941
 942	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
 943	return addr_from_call((void *)ops->trampoline + offset);
 944}
 945
 946void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 947{
 948	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
 949		return;
 950
 951	tramp_free((void *)ops->trampoline, ops->trampoline_size);
 952	ops->trampoline = 0;
 953}
 954
 955#endif /* CONFIG_X86_64 */
 956#endif /* CONFIG_DYNAMIC_FTRACE */
 957
 958#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 959
 960#ifdef CONFIG_DYNAMIC_FTRACE
 961extern void ftrace_graph_call(void);
 
 
 
 
 962
 963static int ftrace_mod_jmp(unsigned long ip, void *func)
 964{
 965	unsigned char *new;
 966
 967	new = ftrace_jmp_replace(ip, (unsigned long)func);
 968
 969	return update_ftrace_func(ip, new);
 970}
 971
 972int ftrace_enable_ftrace_graph_caller(void)
 973{
 974	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 975
 976	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
 977}
 978
 979int ftrace_disable_ftrace_graph_caller(void)
 980{
 981	unsigned long ip = (unsigned long)(&ftrace_graph_call);
 982
 983	return ftrace_mod_jmp(ip, &ftrace_stub);
 984}
 985
 986#endif /* !CONFIG_DYNAMIC_FTRACE */
 987
 988/*
 989 * Hook the return address and push it in the stack of return addrs
 990 * in current thread info.
 991 */
 992void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 993			   unsigned long frame_pointer)
 994{
 995	unsigned long old;
 996	int faulted;
 997	struct ftrace_graph_ent trace;
 998	unsigned long return_hooker = (unsigned long)
 999				&return_to_handler;
1000
1001	/*
1002	 * When resuming from suspend-to-ram, this function can be indirectly
1003	 * called from early CPU startup code while the CPU is in real mode,
1004	 * which would fail miserably.  Make sure the stack pointer is a
1005	 * virtual address.
1006	 *
1007	 * This check isn't as accurate as virt_addr_valid(), but it should be
1008	 * good enough for this purpose, and it's fast.
1009	 */
1010	if (unlikely((long)__builtin_frame_address(0) >= 0))
1011		return;
1012
1013	if (unlikely(ftrace_graph_is_dead()))
1014		return;
1015
1016	if (unlikely(atomic_read(&current->tracing_graph_pause)))
1017		return;
1018
1019	/*
1020	 * Protect against fault, even if it shouldn't
1021	 * happen. This tool is too much intrusive to
1022	 * ignore such a protection.
1023	 */
1024	asm volatile(
1025		"1: " _ASM_MOV " (%[parent]), %[old]\n"
1026		"2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
1027		"   movl $0, %[faulted]\n"
1028		"3:\n"
1029
1030		".section .fixup, \"ax\"\n"
1031		"4: movl $1, %[faulted]\n"
1032		"   jmp 3b\n"
1033		".previous\n"
1034
1035		_ASM_EXTABLE(1b, 4b)
1036		_ASM_EXTABLE(2b, 4b)
1037
1038		: [old] "=&r" (old), [faulted] "=r" (faulted)
1039		: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
1040		: "memory"
1041	);
1042
1043	if (unlikely(faulted)) {
1044		ftrace_graph_stop();
1045		WARN_ON(1);
1046		return;
1047	}
1048
1049	trace.func = self_addr;
1050	trace.depth = current->curr_ret_stack + 1;
1051
1052	/* Only trace if the calling function expects to */
1053	if (!ftrace_graph_entry(&trace)) {
1054		*parent = old;
1055		return;
1056	}
 
1057
1058	if (ftrace_push_return_trace(old, self_addr, &trace.depth,
1059				     frame_pointer, parent) == -EBUSY) {
1060		*parent = old;
1061		return;
1062	}
1063}
 
 
1064#endif /* CONFIG_FUNCTION_GRAPH_TRACER */