Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Based on arch/arm64/kernel/ftrace.c
  4 *
  5 * Copyright (C) 2022 Loongson Technology Corporation Limited
  6 */
  7
  8#include <linux/ftrace.h>
  9#include <linux/kprobes.h>
 10#include <linux/uaccess.h>
 11
 12#include <asm/inst.h>
 13#include <asm/module.h>
 14
 15static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
 16{
 17	u32 replaced;
 18
 19	if (validate) {
 20		if (larch_insn_read((void *)pc, &replaced))
 21			return -EFAULT;
 22
 23		if (replaced != old)
 24			return -EINVAL;
 25	}
 26
 27	if (larch_insn_patch_text((void *)pc, new))
 28		return -EPERM;
 29
 30	return 0;
 31}
 32
 
 
 33#ifdef CONFIG_MODULES
 34static bool reachable_by_bl(unsigned long addr, unsigned long pc)
 35{
 36	long offset = (long)addr - (long)pc;
 
 
 
 
 
 37
 38	return offset >= -SZ_128M && offset < SZ_128M;
 39}
 40
 41static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
 42{
 43	struct plt_entry *plt = mod->arch.ftrace_trampolines;
 44
 45	if (addr == FTRACE_ADDR)
 46		return &plt[FTRACE_PLT_IDX];
 47	if (addr == FTRACE_REGS_ADDR &&
 48			IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
 49		return &plt[FTRACE_REGS_PLT_IDX];
 50
 51	return NULL;
 52}
 53
 54/*
 55 * Find the address the callsite must branch to in order to reach '*addr'.
 56 *
 57 * Due to the limited range of 'bl' instruction, modules may be placed too far
 58 * away to branch directly and we must use a PLT.
 59 *
 60 * Returns true when '*addr' contains a reachable target address, or has been
 61 * modified to contain a PLT address. Returns false otherwise.
 62 */
 63static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
 64{
 65	unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
 66	struct plt_entry *plt;
 67
 68	/*
 69	 * If a custom trampoline is unreachable, rely on the ftrace_regs_caller
 70	 * trampoline which knows how to indirectly reach that trampoline through
 71	 * ops->direct_call.
 72	 */
 73	if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
 74		*addr = FTRACE_REGS_ADDR;
 75
 76	/*
 77	 * When the target is within range of the 'bl' instruction, use 'addr'
 78	 * as-is and branch to that directly.
 79	 */
 80	if (reachable_by_bl(*addr, pc))
 81		return true;
 82
 83	/*
 84	 * 'mod' is only set at module load time, but if we end up
 85	 * dealing with an out-of-range condition, we can assume it
 86	 * is due to a module being loaded far away from the kernel.
 87	 *
 88	 * NOTE: __module_text_address() must be called with preemption
 89	 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
 90	 * retains its validity throughout the remainder of this code.
 91	 */
 92	if (!mod) {
 93		preempt_disable();
 94		mod = __module_text_address(pc);
 95		preempt_enable();
 96	}
 97
 98	if (WARN_ON(!mod))
 99		return false;
100
101	plt = get_ftrace_plt(mod, *addr);
102	if (!plt) {
103		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
104		return false;
105	}
106
107	*addr = (unsigned long)plt;
108	return true;
109}
110#else /* !CONFIG_MODULES */
111static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
112{
113	return true;
114}
115#endif
116
117#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
118int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
119{
120	u32 old, new;
121	unsigned long pc;
 
122
123	pc = rec->ip + LOONGARCH_INSN_SIZE;
124
125	if (!ftrace_find_callable_addr(rec, NULL, &addr))
126		return -EINVAL;
127
128	if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
129		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
130
131	new = larch_insn_gen_bl(pc, addr);
132	old = larch_insn_gen_bl(pc, old_addr);
133
134	return ftrace_modify_code(pc, old, new, true);
135}
 
136#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
137
138int ftrace_update_ftrace_func(ftrace_func_t func)
139{
140	u32 new;
141	unsigned long pc;
142
143	pc = (unsigned long)&ftrace_call;
144	new = larch_insn_gen_bl(pc, (unsigned long)func);
145
146	return ftrace_modify_code(pc, 0, new, false);
147}
148
149/*
150 * The compiler has inserted 2 NOPs before the regular function prologue.
151 * T series registers are available and safe because of LoongArch's psABI.
152 *
153 * At runtime, we can replace nop with bl to enable ftrace call and replace bl
154 * with nop to disable ftrace call. The bl requires us to save the original RA
155 * value, so it saves RA at t0 here.
156 *
157 * Details are:
158 *
159 * | Compiled   |       Disabled         |        Enabled         |
160 * +------------+------------------------+------------------------+
161 * | nop        | move     t0, ra        | move     t0, ra        |
162 * | nop        | nop                    | bl       ftrace_caller |
163 * | func_body  | func_body              | func_body              |
164 *
165 * The RA value will be recovered by ftrace_regs_entry, and restored into RA
166 * before returning to the regular function prologue. When a function is not
167 * being traced, the "move t0, ra" is not harmful.
168 */
169
170int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
171{
172	u32 old, new;
173	unsigned long pc;
174
175	pc = rec->ip;
176	old = larch_insn_gen_nop();
177	new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
178
179	return ftrace_modify_code(pc, old, new, true);
180}
181
182int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
183{
184	u32 old, new;
185	unsigned long pc;
 
186
187	pc = rec->ip + LOONGARCH_INSN_SIZE;
188
189	if (!ftrace_find_callable_addr(rec, NULL, &addr))
190		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
191
192	old = larch_insn_gen_nop();
193	new = larch_insn_gen_bl(pc, addr);
194
195	return ftrace_modify_code(pc, old, new, true);
196}
197
198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
199{
200	u32 old, new;
201	unsigned long pc;
 
202
203	pc = rec->ip + LOONGARCH_INSN_SIZE;
204
205	if (!ftrace_find_callable_addr(rec, NULL, &addr))
206		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
207
208	new = larch_insn_gen_nop();
209	old = larch_insn_gen_bl(pc, addr);
210
211	return ftrace_modify_code(pc, old, new, true);
212}
213
214void arch_ftrace_update_code(int command)
215{
216	command |= FTRACE_MAY_SLEEP;
217	ftrace_modify_all_code(command);
218}
219
220int __init ftrace_dyn_arch_init(void)
221{
222	return 0;
223}
224
225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
226void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
227{
228	unsigned long old;
229	unsigned long return_hooker = (unsigned long)&return_to_handler;
230
231	if (unlikely(atomic_read(&current->tracing_graph_pause)))
232		return;
233
234	old = *parent;
235
236	if (!function_graph_enter(old, self_addr, 0, parent))
237		*parent = return_hooker;
238}
239
240#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
241void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
242		       struct ftrace_ops *op, struct ftrace_regs *fregs)
243{
244	struct pt_regs *regs = &fregs->regs;
245	unsigned long *parent = (unsigned long *)&regs->regs[1];
246
247	prepare_ftrace_return(ip, (unsigned long *)parent);
248}
249#else
250static int ftrace_modify_graph_caller(bool enable)
251{
252	u32 branch, nop;
253	unsigned long pc, func;
254	extern void ftrace_graph_call(void);
255
256	pc = (unsigned long)&ftrace_graph_call;
257	func = (unsigned long)&ftrace_graph_caller;
258
259	nop = larch_insn_gen_nop();
260	branch = larch_insn_gen_b(pc, func);
261
262	if (enable)
263		return ftrace_modify_code(pc, nop, branch, true);
264	else
265		return ftrace_modify_code(pc, branch, nop, true);
266}
267
268int ftrace_enable_ftrace_graph_caller(void)
269{
270	return ftrace_modify_graph_caller(true);
271}
272
273int ftrace_disable_ftrace_graph_caller(void)
274{
275	return ftrace_modify_graph_caller(false);
276}
277#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
278#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
279
280#ifdef CONFIG_KPROBES_ON_FTRACE
281/* Ftrace callback handler for kprobes -- called under preepmt disabled */
282void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
283			   struct ftrace_ops *ops, struct ftrace_regs *fregs)
284{
285	int bit;
286	struct pt_regs *regs;
287	struct kprobe *p;
288	struct kprobe_ctlblk *kcb;
289
290	bit = ftrace_test_recursion_trylock(ip, parent_ip);
291	if (bit < 0)
292		return;
293
294	p = get_kprobe((kprobe_opcode_t *)ip);
295	if (unlikely(!p) || kprobe_disabled(p))
296		goto out;
297
298	regs = ftrace_get_regs(fregs);
299	if (!regs)
300		goto out;
301
302	kcb = get_kprobe_ctlblk();
303	if (kprobe_running()) {
304		kprobes_inc_nmissed_count(p);
305	} else {
306		unsigned long orig_ip = instruction_pointer(regs);
307
308		instruction_pointer_set(regs, ip);
309
310		__this_cpu_write(current_kprobe, p);
311		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
312		if (!p->pre_handler || !p->pre_handler(p, regs)) {
313			/*
314			 * Emulate singlestep (and also recover regs->csr_era)
315			 * as if there is a nop
316			 */
317			instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
318			if (unlikely(p->post_handler)) {
319				kcb->kprobe_status = KPROBE_HIT_SSDONE;
320				p->post_handler(p, regs, 0);
321			}
322			instruction_pointer_set(regs, orig_ip);
323		}
324
325		/*
326		 * If pre_handler returns !0, it changes regs->csr_era. We have to
327		 * skip emulating post_handler.
328		 */
329		__this_cpu_write(current_kprobe, NULL);
330	}
331out:
332	ftrace_test_recursion_unlock(bit);
333}
334NOKPROBE_SYMBOL(kprobe_ftrace_handler);
335
336int arch_prepare_kprobe_ftrace(struct kprobe *p)
337{
338	p->ainsn.insn = NULL;
339	return 0;
340}
341#endif /* CONFIG_KPROBES_ON_FTRACE */
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Based on arch/arm64/kernel/ftrace.c
  4 *
  5 * Copyright (C) 2022 Loongson Technology Corporation Limited
  6 */
  7
  8#include <linux/ftrace.h>
 
  9#include <linux/uaccess.h>
 10
 11#include <asm/inst.h>
 12#include <asm/module.h>
 13
 14static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
 15{
 16	u32 replaced;
 17
 18	if (validate) {
 19		if (larch_insn_read((void *)pc, &replaced))
 20			return -EFAULT;
 21
 22		if (replaced != old)
 23			return -EINVAL;
 24	}
 25
 26	if (larch_insn_patch_text((void *)pc, new))
 27		return -EPERM;
 28
 29	return 0;
 30}
 31
 32#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 33
 34#ifdef CONFIG_MODULES
 35static inline int __get_mod(struct module **mod, unsigned long addr)
 36{
 37	preempt_disable();
 38	*mod = __module_text_address(addr);
 39	preempt_enable();
 40
 41	if (WARN_ON(!(*mod)))
 42		return -EINVAL;
 43
 44	return 0;
 45}
 46
 47static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
 48{
 49	struct plt_entry *plt = mod->arch.ftrace_trampolines;
 50
 51	if (addr == FTRACE_ADDR)
 52		return &plt[FTRACE_PLT_IDX];
 53	if (addr == FTRACE_REGS_ADDR &&
 54			IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
 55		return &plt[FTRACE_REGS_PLT_IDX];
 56
 57	return NULL;
 58}
 59
 60static unsigned long get_plt_addr(struct module *mod, unsigned long addr)
 
 
 
 
 
 
 
 
 
 61{
 
 62	struct plt_entry *plt;
 63
 64	plt = get_ftrace_plt(mod, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65	if (!plt) {
 66		pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
 67		return -EINVAL;
 68	}
 69
 70	return (unsigned long)plt;
 
 
 
 
 
 
 71}
 72#endif
 73
 
 74int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
 75{
 76	u32 old, new;
 77	unsigned long pc;
 78	long offset __maybe_unused;
 79
 80	pc = rec->ip + LOONGARCH_INSN_SIZE;
 81
 82#ifdef CONFIG_MODULES
 83	offset = (long)pc - (long)addr;
 84
 85	if (offset < -SZ_128M || offset >= SZ_128M) {
 86		int ret;
 87		struct module *mod;
 88
 89		ret = __get_mod(&mod, pc);
 90		if (ret)
 91			return ret;
 92
 93		addr = get_plt_addr(mod, addr);
 94
 95		old_addr = get_plt_addr(mod, old_addr);
 96	}
 97#endif
 98
 99	new = larch_insn_gen_bl(pc, addr);
100	old = larch_insn_gen_bl(pc, old_addr);
101
102	return ftrace_modify_code(pc, old, new, true);
103}
104
105#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
106
107int ftrace_update_ftrace_func(ftrace_func_t func)
108{
109	u32 new;
110	unsigned long pc;
111
112	pc = (unsigned long)&ftrace_call;
113	new = larch_insn_gen_bl(pc, (unsigned long)func);
114
115	return ftrace_modify_code(pc, 0, new, false);
116}
117
118/*
119 * The compiler has inserted 2 NOPs before the regular function prologue.
120 * T series registers are available and safe because of LoongArch's psABI.
121 *
122 * At runtime, we can replace nop with bl to enable ftrace call and replace bl
123 * with nop to disable ftrace call. The bl requires us to save the original RA
124 * value, so it saves RA at t0 here.
125 *
126 * Details are:
127 *
128 * | Compiled   |       Disabled         |        Enabled         |
129 * +------------+------------------------+------------------------+
130 * | nop        | move     t0, ra        | move     t0, ra        |
131 * | nop        | nop                    | bl       ftrace_caller |
132 * | func_body  | func_body              | func_body              |
133 *
134 * The RA value will be recovered by ftrace_regs_entry, and restored into RA
135 * before returning to the regular function prologue. When a function is not
136 * being traced, the "move t0, ra" is not harmful.
137 */
138
139int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
140{
141	u32 old, new;
142	unsigned long pc;
143
144	pc = rec->ip;
145	old = larch_insn_gen_nop();
146	new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
147
148	return ftrace_modify_code(pc, old, new, true);
149}
150
151int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152{
153	u32 old, new;
154	unsigned long pc;
155	long offset __maybe_unused;
156
157	pc = rec->ip + LOONGARCH_INSN_SIZE;
158
159#ifdef CONFIG_MODULES
160	offset = (long)pc - (long)addr;
161
162	if (offset < -SZ_128M || offset >= SZ_128M) {
163		int ret;
164		struct module *mod;
165
166		ret = __get_mod(&mod, pc);
167		if (ret)
168			return ret;
169
170		addr = get_plt_addr(mod, addr);
171	}
172#endif
173
174	old = larch_insn_gen_nop();
175	new = larch_insn_gen_bl(pc, addr);
176
177	return ftrace_modify_code(pc, old, new, true);
178}
179
180int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
181{
182	u32 old, new;
183	unsigned long pc;
184	long offset __maybe_unused;
185
186	pc = rec->ip + LOONGARCH_INSN_SIZE;
187
188#ifdef CONFIG_MODULES
189	offset = (long)pc - (long)addr;
190
191	if (offset < -SZ_128M || offset >= SZ_128M) {
192		int ret;
193		struct module *mod;
194
195		ret = __get_mod(&mod, pc);
196		if (ret)
197			return ret;
198
199		addr = get_plt_addr(mod, addr);
200	}
201#endif
202
203	new = larch_insn_gen_nop();
204	old = larch_insn_gen_bl(pc, addr);
205
206	return ftrace_modify_code(pc, old, new, true);
207}
208
209void arch_ftrace_update_code(int command)
210{
211	command |= FTRACE_MAY_SLEEP;
212	ftrace_modify_all_code(command);
213}
214
215int __init ftrace_dyn_arch_init(void)
216{
217	return 0;
218}
219
220#ifdef CONFIG_FUNCTION_GRAPH_TRACER
221void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
222{
223	unsigned long old;
224	unsigned long return_hooker = (unsigned long)&return_to_handler;
225
226	if (unlikely(atomic_read(&current->tracing_graph_pause)))
227		return;
228
229	old = *parent;
230
231	if (!function_graph_enter(old, self_addr, 0, parent))
232		*parent = return_hooker;
233}
234
235#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
236void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
237		       struct ftrace_ops *op, struct ftrace_regs *fregs)
238{
239	struct pt_regs *regs = &fregs->regs;
240	unsigned long *parent = (unsigned long *)&regs->regs[1];
241
242	prepare_ftrace_return(ip, (unsigned long *)parent);
243}
244#else
245static int ftrace_modify_graph_caller(bool enable)
246{
247	u32 branch, nop;
248	unsigned long pc, func;
249	extern void ftrace_graph_call(void);
250
251	pc = (unsigned long)&ftrace_graph_call;
252	func = (unsigned long)&ftrace_graph_caller;
253
254	nop = larch_insn_gen_nop();
255	branch = larch_insn_gen_b(pc, func);
256
257	if (enable)
258		return ftrace_modify_code(pc, nop, branch, true);
259	else
260		return ftrace_modify_code(pc, branch, nop, true);
261}
262
263int ftrace_enable_ftrace_graph_caller(void)
264{
265	return ftrace_modify_graph_caller(true);
266}
267
268int ftrace_disable_ftrace_graph_caller(void)
269{
270	return ftrace_modify_graph_caller(false);
271}
272#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
273#endif /* CONFIG_FUNCTION_GRAPH_TRACER */