Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Dynamic function tracing support.
  3 *
  4 * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
  5 *
  6 * For licencing details, see COPYING.
  7 *
  8 * Defines low-level handling of mcount calls when the kernel
  9 * is compiled with the -pg flag. When using dynamic ftrace, the
 10 * mcount call-sites get patched lazily with NOP till they are
 11 * enabled. All code mutation routines here take effect atomically.
 12 */
 13
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16
 17#include <asm/cacheflush.h>
 18#include <asm/patch.h>
 19
 20/* In IA64, each function will be added below two bundles with -pg option */
 21static unsigned char __attribute__((aligned(8)))
 22ftrace_orig_code[MCOUNT_INSN_SIZE] = {
 23	0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
 24	0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
 25	0x05, 0x00, 0xc4, 0x00,             /* mov r42=b0 */
 26	0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
 27	0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
 28	0x08, 0x00, 0x00, 0x50              /* br.call.sptk.many b0 = _mcount;; */
 29};
 30
 31struct ftrace_orig_insn {
 32	u64 dummy1, dummy2, dummy3;
 33	u64 dummy4:64-41+13;
 34	u64 imm20:20;
 35	u64 dummy5:3;
 36	u64 sign:1;
 37	u64 dummy6:4;
 38};
 39
 40/* mcount stub will be converted below for nop */
 41static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
 42	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
 43	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
 44	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
 45	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
 46	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
 47	0x00, 0x00, 0x04, 0x00
 48};
 49
 50static unsigned char *ftrace_nop_replace(void)
 51{
 52	return ftrace_nop_code;
 53}
 54
 55/*
 56 * mcount stub will be converted below for call
 57 * Note: Just the last instruction is changed against nop
 58 * */
 59static unsigned char __attribute__((aligned(8)))
 60ftrace_call_code[MCOUNT_INSN_SIZE] = {
 61	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
 62	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
 63	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
 64	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
 65	0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
 66	0xf8, 0xff, 0xff, 0xc8
 67};
 68
 69struct ftrace_call_insn {
 70	u64 dummy1, dummy2;
 71	u64 dummy3:48;
 72	u64 imm39_l:16;
 73	u64 imm39_h:23;
 74	u64 dummy4:13;
 75	u64 imm20:20;
 76	u64 dummy5:3;
 77	u64 i:1;
 78	u64 dummy6:4;
 79};
 80
 81static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 82{
 83	struct ftrace_call_insn *code = (void *)ftrace_call_code;
 84	unsigned long offset = addr - (ip + 0x10);
 85
 86	code->imm39_l = offset >> 24;
 87	code->imm39_h = offset >> 40;
 88	code->imm20 = offset >> 4;
 89	code->i = offset >> 63;
 90	return ftrace_call_code;
 91}
 92
 93static int
 94ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 95		   unsigned char *new_code, int do_check)
 96{
 97	unsigned char replaced[MCOUNT_INSN_SIZE];
 98
 99	/*
100	 * Note: Due to modules and __init, code can
101	 *  disappear and change, we need to protect against faulting
102	 *  as well as code changing. We do this by using the
103	 *  probe_kernel_* functions.
104	 *
105	 * No real locking needed, this code is run through
106	 * kstop_machine, or before SMP starts.
107	 */
108
109	if (!do_check)
110		goto skip_check;
111
112	/* read the text we want to modify */
113	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
114		return -EFAULT;
115
116	/* Make sure it is what we expect it to be */
117	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
118		return -EINVAL;
119
120skip_check:
121	/* replace the text with the new text */
122	if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
123		return -EPERM;
124	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
125
126	return 0;
127}
128
129static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
130{
131	unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
132	unsigned long ip = rec->ip;
133
134	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
135		return -EFAULT;
136	if (rec->flags & FTRACE_FL_CONVERTED) {
137		struct ftrace_call_insn *call_insn, *tmp_call;
138
139		call_insn = (void *)ftrace_call_code;
140		tmp_call = (void *)replaced;
141		call_insn->imm39_l = tmp_call->imm39_l;
142		call_insn->imm39_h = tmp_call->imm39_h;
143		call_insn->imm20 = tmp_call->imm20;
144		call_insn->i = tmp_call->i;
145		if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
146			return -EINVAL;
147		return 0;
148	} else {
149		struct ftrace_orig_insn *call_insn, *tmp_call;
150
151		call_insn = (void *)ftrace_orig_code;
152		tmp_call = (void *)replaced;
153		call_insn->sign = tmp_call->sign;
154		call_insn->imm20 = tmp_call->imm20;
155		if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
156			return -EINVAL;
157		return 0;
158	}
159}
160
161int ftrace_make_nop(struct module *mod,
162		    struct dyn_ftrace *rec, unsigned long addr)
163{
164	int ret;
165	char *new;
166
167	ret = ftrace_make_nop_check(rec, addr);
168	if (ret)
169		return ret;
170	new = ftrace_nop_replace();
171	return ftrace_modify_code(rec->ip, NULL, new, 0);
172}
173
174int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
175{
176	unsigned long ip = rec->ip;
177	unsigned char *old, *new;
178
179	old=  ftrace_nop_replace();
180	new = ftrace_call_replace(ip, addr);
181	return ftrace_modify_code(ip, old, new, 1);
182}
183
184/* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
185int ftrace_update_ftrace_func(ftrace_func_t func)
186{
187	unsigned long ip;
188	unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
189
190	if (func == ftrace_stub)
191		return 0;
192	ip = ((struct fnptr *)func)->ip;
193
194	ia64_patch_imm64(addr + 2, ip);
195
196	flush_icache_range(addr, addr + 16);
197	return 0;
198}
199
200/* run from kstop_machine */
201int __init ftrace_dyn_arch_init(void *data)
202{
203	*(unsigned long *)data = 0;
204
205	return 0;
206}
v4.6
  1/*
  2 * Dynamic function tracing support.
  3 *
  4 * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com>
  5 *
  6 * For licencing details, see COPYING.
  7 *
  8 * Defines low-level handling of mcount calls when the kernel
  9 * is compiled with the -pg flag. When using dynamic ftrace, the
 10 * mcount call-sites get patched lazily with NOP till they are
 11 * enabled. All code mutation routines here take effect atomically.
 12 */
 13
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16
 17#include <asm/cacheflush.h>
 18#include <asm/patch.h>
 19
 20/* In IA64, each function will be added below two bundles with -pg option */
 21static unsigned char __attribute__((aligned(8)))
 22ftrace_orig_code[MCOUNT_INSN_SIZE] = {
 23	0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */
 24	0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */
 25	0x05, 0x00, 0xc4, 0x00,             /* mov r42=b0 */
 26	0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */
 27	0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */
 28	0x08, 0x00, 0x00, 0x50              /* br.call.sptk.many b0 = _mcount;; */
 29};
 30
 31struct ftrace_orig_insn {
 32	u64 dummy1, dummy2, dummy3;
 33	u64 dummy4:64-41+13;
 34	u64 imm20:20;
 35	u64 dummy5:3;
 36	u64 sign:1;
 37	u64 dummy6:4;
 38};
 39
 40/* mcount stub will be converted below for nop */
 41static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = {
 42	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
 43	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
 44	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
 45	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
 46	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */
 47	0x00, 0x00, 0x04, 0x00
 48};
 49
 50static unsigned char *ftrace_nop_replace(void)
 51{
 52	return ftrace_nop_code;
 53}
 54
 55/*
 56 * mcount stub will be converted below for call
 57 * Note: Just the last instruction is changed against nop
 58 * */
 59static unsigned char __attribute__((aligned(8)))
 60ftrace_call_code[MCOUNT_INSN_SIZE] = {
 61	0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */
 62	0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
 63	0x00, 0x00, 0x04, 0x00,             /* nop.i 0x0 */
 64	0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */
 65	0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/
 66	0xf8, 0xff, 0xff, 0xc8
 67};
 68
 69struct ftrace_call_insn {
 70	u64 dummy1, dummy2;
 71	u64 dummy3:48;
 72	u64 imm39_l:16;
 73	u64 imm39_h:23;
 74	u64 dummy4:13;
 75	u64 imm20:20;
 76	u64 dummy5:3;
 77	u64 i:1;
 78	u64 dummy6:4;
 79};
 80
 81static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 82{
 83	struct ftrace_call_insn *code = (void *)ftrace_call_code;
 84	unsigned long offset = addr - (ip + 0x10);
 85
 86	code->imm39_l = offset >> 24;
 87	code->imm39_h = offset >> 40;
 88	code->imm20 = offset >> 4;
 89	code->i = offset >> 63;
 90	return ftrace_call_code;
 91}
 92
 93static int
 94ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 95		   unsigned char *new_code, int do_check)
 96{
 97	unsigned char replaced[MCOUNT_INSN_SIZE];
 98
 99	/*
100	 * Note:
101	 * We are paranoid about modifying text, as if a bug was to happen, it
102	 * could cause us to read or write to someplace that could cause harm.
103	 * Carefully read and modify the code with probe_kernel_*(), and make
104	 * sure what we read is what we expected it to be before modifying it.
 
 
105	 */
106
107	if (!do_check)
108		goto skip_check;
109
110	/* read the text we want to modify */
111	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
112		return -EFAULT;
113
114	/* Make sure it is what we expect it to be */
115	if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
116		return -EINVAL;
117
118skip_check:
119	/* replace the text with the new text */
120	if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
121		return -EPERM;
122	flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
123
124	return 0;
125}
126
127static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
128{
129	unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
130	unsigned long ip = rec->ip;
131
132	if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
133		return -EFAULT;
134	if (rec->flags & FTRACE_FL_CONVERTED) {
135		struct ftrace_call_insn *call_insn, *tmp_call;
136
137		call_insn = (void *)ftrace_call_code;
138		tmp_call = (void *)replaced;
139		call_insn->imm39_l = tmp_call->imm39_l;
140		call_insn->imm39_h = tmp_call->imm39_h;
141		call_insn->imm20 = tmp_call->imm20;
142		call_insn->i = tmp_call->i;
143		if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0)
144			return -EINVAL;
145		return 0;
146	} else {
147		struct ftrace_orig_insn *call_insn, *tmp_call;
148
149		call_insn = (void *)ftrace_orig_code;
150		tmp_call = (void *)replaced;
151		call_insn->sign = tmp_call->sign;
152		call_insn->imm20 = tmp_call->imm20;
153		if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0)
154			return -EINVAL;
155		return 0;
156	}
157}
158
159int ftrace_make_nop(struct module *mod,
160		    struct dyn_ftrace *rec, unsigned long addr)
161{
162	int ret;
163	char *new;
164
165	ret = ftrace_make_nop_check(rec, addr);
166	if (ret)
167		return ret;
168	new = ftrace_nop_replace();
169	return ftrace_modify_code(rec->ip, NULL, new, 0);
170}
171
172int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
173{
174	unsigned long ip = rec->ip;
175	unsigned char *old, *new;
176
177	old=  ftrace_nop_replace();
178	new = ftrace_call_replace(ip, addr);
179	return ftrace_modify_code(ip, old, new, 1);
180}
181
182/* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */
183int ftrace_update_ftrace_func(ftrace_func_t func)
184{
185	unsigned long ip;
186	unsigned long addr = ((struct fnptr *)ftrace_call)->ip;
187
188	if (func == ftrace_stub)
189		return 0;
190	ip = ((struct fnptr *)func)->ip;
191
192	ia64_patch_imm64(addr + 2, ip);
193
194	flush_icache_range(addr, addr + 16);
195	return 0;
196}
197
198/* run from kstop_machine */
199int __init ftrace_dyn_arch_init(void)
200{
 
 
201	return 0;
202}