Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Code for replacing ftrace calls with jumps.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  8 *
  9 * Added function graph tracer code, taken from x86 that was written
 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
 11 *
 12 */
 13
 14#define pr_fmt(fmt) "ftrace-powerpc: " fmt
 15
 16#include <linux/spinlock.h>
 17#include <linux/hardirq.h>
 18#include <linux/uaccess.h>
 19#include <linux/module.h>
 20#include <linux/ftrace.h>
 21#include <linux/percpu.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24
 25#include <asm/cacheflush.h>
 26#include <asm/code-patching.h>
 27#include <asm/ftrace.h>
 28#include <asm/syscall.h>
 29#include <asm/inst.h>
 30#include <asm/sections.h>
 31
 32#define	NUM_FTRACE_TRAMPS	2
 33static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
 34
 35unsigned long ftrace_call_adjust(unsigned long addr)
 36{
 37	if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
 38		return 0;
 39
 40	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
 
 41		addr += MCOUNT_INSN_SIZE;
 
 
 
 42
 43	return addr;
 44}
 45
 46static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
 47{
 48	ppc_inst_t op;
 49
 50	WARN_ON(!is_offset_in_branch_range(addr - ip));
 51	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
 52
 53	return op;
 54}
 55
 56static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
 57{
 58	if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
 59		pr_err("0x%lx: fetching instruction failed\n", ip);
 60		return -EFAULT;
 61	}
 62
 63	return 0;
 64}
 65
 66static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
 67{
 68	ppc_inst_t op;
 69	int ret;
 70
 71	ret = ftrace_read_inst(ip, &op);
 72	if (!ret && !ppc_inst_equal(op, inst)) {
 73		pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
 74		       ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
 75		ret = -EINVAL;
 76	}
 77
 78	return ret;
 79}
 80
 81static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
 82{
 83	int ret = ftrace_validate_inst(ip, old);
 84
 85	if (!ret)
 86		ret = patch_instruction((u32 *)ip, new);
 87
 88	return ret;
 89}
 90
 91static int is_bl_op(ppc_inst_t op)
 92{
 93	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
 94}
 95
 96static unsigned long find_ftrace_tramp(unsigned long ip)
 97{
 98	int i;
 99
100	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
101		if (!ftrace_tramps[i])
102			continue;
103		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
104			return ftrace_tramps[i];
105
106	return 0;
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
110{
111	unsigned long ip = rec->ip;
112	unsigned long stub;
113
114	if (is_offset_in_branch_range(addr - ip)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115		/* Within range */
116		stub = addr;
117#ifdef CONFIG_MODULES
118	} else if (rec->arch.mod) {
119		/* Module code would be going to one of the module stubs */
120		stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
121							       rec->arch.mod->arch.tramp_regs);
122#endif
123	} else if (core_kernel_text(ip)) {
124		/* We would be branching to one of our ftrace stubs */
125		stub = find_ftrace_tramp(ip);
126		if (!stub) {
127			pr_err("0x%lx: No ftrace stubs reachable\n", ip);
128			return -EINVAL;
129		}
130	} else {
131		return -EINVAL;
132	}
133
134	*call_inst = ftrace_create_branch_inst(ip, stub, 1);
135	return 0;
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
139int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
140{
141	/* This should never be called since we override ftrace_replace_code() */
142	WARN_ON(1);
143	return -EINVAL;
144}
145#endif
146
147int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
148{
149	ppc_inst_t old, new;
150	int ret;
 
151
152	/* This can only ever be called during module load */
153	if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
154		return -EINVAL;
155
156	old = ppc_inst(PPC_RAW_NOP());
157	ret = ftrace_get_call_inst(rec, addr, &new);
 
 
 
 
 
 
 
 
 
 
158	if (ret)
159		return ret;
160
161	return ftrace_modify_code(rec->ip, old, new);
 
 
 
 
162}
163
164int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
165{
166	/*
167	 * This should never be called since we override ftrace_replace_code(),
168	 * as well as ftrace_init_nop()
169	 */
170	WARN_ON(1);
171	return -EINVAL;
172}
173
174void ftrace_replace_code(int enable)
175{
176	ppc_inst_t old, new, call_inst, new_call_inst;
177	ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
178	unsigned long ip, new_addr, addr;
179	struct ftrace_rec_iter *iter;
180	struct dyn_ftrace *rec;
181	int ret = 0, update;
182
183	for_ftrace_rec_iter(iter) {
184		rec = ftrace_rec_iter_record(iter);
185		ip = rec->ip;
186
187		if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
188			continue;
189
190		addr = ftrace_get_addr_curr(rec);
191		new_addr = ftrace_get_addr_new(rec);
192		update = ftrace_update_record(rec, enable);
193
 
 
 
 
 
 
 
194		switch (update) {
195		case FTRACE_UPDATE_IGNORE:
196		default:
197			continue;
198		case FTRACE_UPDATE_MODIFY_CALL:
199			ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
200			ret |= ftrace_get_call_inst(rec, addr, &call_inst);
 
201			old = call_inst;
202			new = new_call_inst;
203			break;
204		case FTRACE_UPDATE_MAKE_NOP:
205			ret = ftrace_get_call_inst(rec, addr, &call_inst);
 
206			old = call_inst;
207			new = nop_inst;
208			break;
209		case FTRACE_UPDATE_MAKE_CALL:
210			ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
 
211			old = nop_inst;
212			new = call_inst;
213			break;
214		}
215
216		if (!ret)
217			ret = ftrace_modify_code(ip, old, new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218		if (ret)
219			goto out;
220	}
221
222out:
223	if (ret)
224		ftrace_bug(ret, rec);
225	return;
226}
227
228int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
229{
230	unsigned long addr, ip = rec->ip;
231	ppc_inst_t old, new;
232	int ret = 0;
233
234	/* Verify instructions surrounding the ftrace location */
235	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
236		/* Expect nops */
237		ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
 
238		if (!ret)
239			ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
240	} else if (IS_ENABLED(CONFIG_PPC32)) {
241		/* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
242		ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
243		if (!ret)
244			ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
 
 
245	} else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
246		/* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
247		ret = ftrace_read_inst(ip - 4, &old);
248		if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
 
249			ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
250			ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
 
 
 
251		}
252	} else {
253		return -EINVAL;
254	}
255
256	if (ret)
257		return ret;
258
259	if (!core_kernel_text(ip)) {
260		if (!mod) {
261			pr_err("0x%lx: No module provided for non-kernel address\n", ip);
262			return -EFAULT;
263		}
264		rec->arch.mod = mod;
265	}
266
267	/* Nop-out the ftrace location */
268	new = ppc_inst(PPC_RAW_NOP());
269	addr = MCOUNT_ADDR;
270	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
271		/* we instead patch-in the 'mflr r0' */
272		old = ppc_inst(PPC_RAW_NOP());
273		new = ppc_inst(PPC_RAW_MFLR(_R0));
274		ret = ftrace_modify_code(ip - 4, old, new);
275	} else if (is_offset_in_branch_range(addr - ip)) {
276		/* Within range */
277		old = ftrace_create_branch_inst(ip, addr, 1);
278		ret = ftrace_modify_code(ip, old, new);
279	} else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
280		/*
281		 * We would be branching to a linker-generated stub, or to the module _mcount
282		 * stub. Let's just confirm we have a 'bl' here.
283		 */
284		ret = ftrace_read_inst(ip, &old);
285		if (ret)
286			return ret;
287		if (!is_bl_op(old)) {
288			pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
289			return -EINVAL;
290		}
291		ret = patch_instruction((u32 *)ip, new);
292	} else {
293		return -EINVAL;
294	}
295
296	return ret;
297}
298
299int ftrace_update_ftrace_func(ftrace_func_t func)
300{
301	unsigned long ip = (unsigned long)(&ftrace_call);
302	ppc_inst_t old, new;
303	int ret;
304
 
 
 
 
 
 
 
305	old = ppc_inst_read((u32 *)&ftrace_call);
306	new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
307	ret = ftrace_modify_code(ip, old, new);
308
309	/* Also update the regs callback function */
310	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
311		ip = (unsigned long)(&ftrace_regs_call);
312		old = ppc_inst_read((u32 *)&ftrace_regs_call);
313		new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
314		ret = ftrace_modify_code(ip, old, new);
315	}
316
317	return ret;
318}
319
320/*
321 * Use the default ftrace_modify_all_code, but without
322 * stop_machine().
323 */
324void arch_ftrace_update_code(int command)
325{
326	ftrace_modify_all_code(command);
327}
328
329void ftrace_free_init_tramp(void)
330{
331	int i;
332
333	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
334		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
335			ftrace_tramps[i] = 0;
336			return;
337		}
338}
339
340static void __init add_ftrace_tramp(unsigned long tramp)
341{
342	int i;
343
344	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
345		if (!ftrace_tramps[i]) {
346			ftrace_tramps[i] = tramp;
347			return;
348		}
349}
350
351int __init ftrace_dyn_arch_init(void)
352{
353	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
354	unsigned long addr = FTRACE_REGS_ADDR;
355	long reladdr;
356	int i;
357	u32 stub_insns[] = {
358#ifdef CONFIG_PPC_KERNEL_PCREL
359		/* pla r12,addr */
360		PPC_PREFIX_MLS | __PPC_PRFX_R(1),
361		PPC_INST_PADDI | ___PPC_RT(_R12),
362		PPC_RAW_MTCTR(_R12),
363		PPC_RAW_BCTR()
364#elif defined(CONFIG_PPC64)
365		PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
366		PPC_RAW_ADDIS(_R12, _R12, 0),
367		PPC_RAW_ADDI(_R12, _R12, 0),
368		PPC_RAW_MTCTR(_R12),
369		PPC_RAW_BCTR()
370#else
371		PPC_RAW_LIS(_R12, 0),
372		PPC_RAW_ADDI(_R12, _R12, 0),
373		PPC_RAW_MTCTR(_R12),
374		PPC_RAW_BCTR()
375#endif
376	};
377
378	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
379		for (i = 0; i < 2; i++) {
380			reladdr = addr - (unsigned long)tramp[i];
381
382			if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
383				pr_err("Address of %ps out of range of pcrel address.\n",
384					(void *)addr);
385				return -1;
386			}
387
388			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
389			tramp[i][0] |= IMM_H18(reladdr);
390			tramp[i][1] |= IMM_L(reladdr);
391			add_ftrace_tramp((unsigned long)tramp[i]);
392		}
393	} else if (IS_ENABLED(CONFIG_PPC64)) {
394		reladdr = addr - kernel_toc_addr();
395
396		if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
397			pr_err("Address of %ps out of range of kernel_toc.\n",
398				(void *)addr);
399			return -1;
400		}
401
402		for (i = 0; i < 2; i++) {
403			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
404			tramp[i][1] |= PPC_HA(reladdr);
405			tramp[i][2] |= PPC_LO(reladdr);
406			add_ftrace_tramp((unsigned long)tramp[i]);
407		}
408	} else {
409		for (i = 0; i < 2; i++) {
410			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
411			tramp[i][0] |= PPC_HA(addr);
412			tramp[i][1] |= PPC_LO(addr);
413			add_ftrace_tramp((unsigned long)tramp[i]);
414		}
415	}
416
417	return 0;
418}
419
420#ifdef CONFIG_FUNCTION_GRAPH_TRACER
421void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
422		       struct ftrace_ops *op, struct ftrace_regs *fregs)
423{
424	unsigned long sp = fregs->regs.gpr[1];
425	int bit;
426
427	if (unlikely(ftrace_graph_is_dead()))
428		goto out;
429
430	if (unlikely(atomic_read(&current->tracing_graph_pause)))
431		goto out;
432
433	bit = ftrace_test_recursion_trylock(ip, parent_ip);
434	if (bit < 0)
435		goto out;
436
437	if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
438		parent_ip = ppc_function_entry(return_to_handler);
439
440	ftrace_test_recursion_unlock(bit);
441out:
442	fregs->regs.link = parent_ip;
443}
444#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Code for replacing ftrace calls with jumps.
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 *
  7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  8 *
  9 * Added function graph tracer code, taken from x86 that was written
 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
 11 *
 12 */
 13
 14#define pr_fmt(fmt) "ftrace-powerpc: " fmt
 15
 16#include <linux/spinlock.h>
 17#include <linux/hardirq.h>
 18#include <linux/uaccess.h>
 19#include <linux/module.h>
 20#include <linux/ftrace.h>
 21#include <linux/percpu.h>
 22#include <linux/init.h>
 23#include <linux/list.h>
 24
 25#include <asm/cacheflush.h>
 26#include <asm/text-patching.h>
 27#include <asm/ftrace.h>
 28#include <asm/syscall.h>
 29#include <asm/inst.h>
 30#include <asm/sections.h>
 31
 32#define	NUM_FTRACE_TRAMPS	2
 33static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
 34
 35unsigned long ftrace_call_adjust(unsigned long addr)
 36{
 37	if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
 38		return 0;
 39
 40	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) &&
 41	    !IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
 42		addr += MCOUNT_INSN_SIZE;
 43		if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
 44			addr += MCOUNT_INSN_SIZE;
 45	}
 46
 47	return addr;
 48}
 49
 50static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
 51{
 52	ppc_inst_t op;
 53
 54	WARN_ON(!is_offset_in_branch_range(addr - ip));
 55	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
 56
 57	return op;
 58}
 59
 60static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
 61{
 62	if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
 63		pr_err("0x%lx: fetching instruction failed\n", ip);
 64		return -EFAULT;
 65	}
 66
 67	return 0;
 68}
 69
 70static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
 71{
 72	ppc_inst_t op;
 73	int ret;
 74
 75	ret = ftrace_read_inst(ip, &op);
 76	if (!ret && !ppc_inst_equal(op, inst)) {
 77		pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
 78		       ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
 79		ret = -EINVAL;
 80	}
 81
 82	return ret;
 83}
 84
 85static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
 86{
 87	int ret = ftrace_validate_inst(ip, old);
 88
 89	if (!ret && !ppc_inst_equal(old, new))
 90		ret = patch_instruction((u32 *)ip, new);
 91
 92	return ret;
 93}
 94
 95static int is_bl_op(ppc_inst_t op)
 96{
 97	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
 98}
 99
100static unsigned long find_ftrace_tramp(unsigned long ip)
101{
102	int i;
103
104	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
105		if (!ftrace_tramps[i])
106			continue;
107		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
108			return ftrace_tramps[i];
109
110	return 0;
111}
112
113#ifdef CONFIG_MODULES
114static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr)
115{
116	struct module *mod = NULL;
117
118	preempt_disable();
119	mod = __module_text_address(ip);
120	preempt_enable();
121
122	if (!mod)
123		pr_err("No module loaded at addr=%lx\n", ip);
124
125	return (addr == (unsigned long)ftrace_caller ? mod->arch.tramp : mod->arch.tramp_regs);
126}
127#else
128static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long addr)
129{
130	return 0;
131}
132#endif
133
134static unsigned long ftrace_get_ool_stub(struct dyn_ftrace *rec)
135{
136#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
137	return rec->arch.ool_stub;
138#else
139	BUILD_BUG();
140#endif
141}
142
143static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
144{
145	unsigned long ip;
146	unsigned long stub;
147
148	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
149		ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */
150	else
151		ip = rec->ip;
152
153	if (!is_offset_in_branch_range(addr - ip) && addr != FTRACE_ADDR &&
154	    addr != FTRACE_REGS_ADDR) {
155		/* This can only happen with ftrace direct */
156		if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)) {
157			pr_err("0x%lx (0x%lx): Unexpected target address 0x%lx\n",
158			       ip, rec->ip, addr);
159			return -EINVAL;
160		}
161		addr = FTRACE_ADDR;
162	}
163
164	if (is_offset_in_branch_range(addr - ip))
165		/* Within range */
166		stub = addr;
167	else if (core_kernel_text(ip))
 
 
 
 
 
 
168		/* We would be branching to one of our ftrace stubs */
169		stub = find_ftrace_tramp(ip);
170	else
171		stub = ftrace_lookup_module_stub(ip, addr);
172
173	if (!stub) {
174		pr_err("0x%lx (0x%lx): No ftrace stubs reachable\n", ip, rec->ip);
175		return -EINVAL;
176	}
177
178	*call_inst = ftrace_create_branch_inst(ip, stub, 1);
179	return 0;
180}
181
182static int ftrace_init_ool_stub(struct module *mod, struct dyn_ftrace *rec)
183{
184#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
185	static int ool_stub_text_index, ool_stub_text_end_index, ool_stub_inittext_index;
186	int ret = 0, ool_stub_count, *ool_stub_index;
187	ppc_inst_t inst;
188	/*
189	 * See ftrace_entry.S if changing the below instruction sequence, as we rely on
190	 * decoding the last branch instruction here to recover the correct function ip.
191	 */
192	struct ftrace_ool_stub *ool_stub, ool_stub_template = {
193		.insn = {
194			PPC_RAW_MFLR(_R0),
195			PPC_RAW_NOP(),		/* bl ftrace_caller */
196			PPC_RAW_MTLR(_R0),
197			PPC_RAW_NOP()		/* b rec->ip + 4 */
198		}
199	};
200
201	WARN_ON(rec->arch.ool_stub);
202
203	if (is_kernel_inittext(rec->ip)) {
204		ool_stub = ftrace_ool_stub_inittext;
205		ool_stub_index = &ool_stub_inittext_index;
206		ool_stub_count = ftrace_ool_stub_inittext_count;
207	} else if (is_kernel_text(rec->ip)) {
208		/*
209		 * ftrace records are sorted, so we first use up the stub area within .text
210		 * (ftrace_ool_stub_text) before using the area at the end of .text
211		 * (ftrace_ool_stub_text_end), unless the stub is out of range of the record.
212		 */
213		if (ool_stub_text_index >= ftrace_ool_stub_text_count ||
214		    !is_offset_in_branch_range((long)rec->ip -
215					       (long)&ftrace_ool_stub_text[ool_stub_text_index])) {
216			ool_stub = ftrace_ool_stub_text_end;
217			ool_stub_index = &ool_stub_text_end_index;
218			ool_stub_count = ftrace_ool_stub_text_end_count;
219		} else {
220			ool_stub = ftrace_ool_stub_text;
221			ool_stub_index = &ool_stub_text_index;
222			ool_stub_count = ftrace_ool_stub_text_count;
223		}
224#ifdef CONFIG_MODULES
225	} else if (mod) {
226		ool_stub = mod->arch.ool_stubs;
227		ool_stub_index = &mod->arch.ool_stub_index;
228		ool_stub_count = mod->arch.ool_stub_count;
229#endif
230	} else {
231		return -EINVAL;
232	}
233
234	ool_stub += (*ool_stub_index)++;
235
236	if (WARN_ON(*ool_stub_index > ool_stub_count))
237		return -EINVAL;
238
239	if (!is_offset_in_branch_range((long)rec->ip - (long)&ool_stub->insn[0]) ||
240	    !is_offset_in_branch_range((long)(rec->ip + MCOUNT_INSN_SIZE) -
241				       (long)&ool_stub->insn[3])) {
242		pr_err("%s: ftrace ool stub out of range (%p -> %p).\n",
243					__func__, (void *)rec->ip, (void *)&ool_stub->insn[0]);
244		return -EINVAL;
245	}
246
247	rec->arch.ool_stub = (unsigned long)&ool_stub->insn[0];
248
249	/* bl ftrace_caller */
250	if (!mod)
251		ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &inst);
252#ifdef CONFIG_MODULES
253	else
254		/*
255		 * We can't use ftrace_get_call_inst() since that uses
256		 * __module_text_address(rec->ip) to look up the module.
257		 * But, since the module is not fully formed at this stage,
258		 * the lookup fails. We know the target though, so generate
259		 * the branch inst directly.
260		 */
261		inst = ftrace_create_branch_inst(ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE,
262						 mod->arch.tramp, 1);
263#endif
264	ool_stub_template.insn[1] = ppc_inst_val(inst);
265
266	/* b rec->ip + 4 */
267	if (!ret && create_branch(&inst, &ool_stub->insn[3], rec->ip + MCOUNT_INSN_SIZE, 0))
268		return -EINVAL;
269	ool_stub_template.insn[3] = ppc_inst_val(inst);
270
271	if (!ret)
272		ret = patch_instructions((u32 *)ool_stub, (u32 *)&ool_stub_template,
273					 sizeof(ool_stub_template), false);
274
275	return ret;
276#else /* !CONFIG_PPC_FTRACE_OUT_OF_LINE */
277	BUILD_BUG();
278#endif
279}
280
281#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
282static const struct ftrace_ops *powerpc_rec_get_ops(struct dyn_ftrace *rec)
283{
284	const struct ftrace_ops *ops = NULL;
285
286	if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
287		ops = ftrace_find_unique_ops(rec);
288		WARN_ON_ONCE(!ops);
289	}
290
291	if (!ops)
292		ops = &ftrace_list_ops;
293
294	return ops;
295}
296
297static int ftrace_rec_set_ops(struct dyn_ftrace *rec, const struct ftrace_ops *ops)
298{
299	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
300		return patch_ulong((void *)(ftrace_get_ool_stub(rec) - sizeof(unsigned long)),
301				   (unsigned long)ops);
302	else
303		return patch_ulong((void *)(rec->ip - MCOUNT_INSN_SIZE - sizeof(unsigned long)),
304				   (unsigned long)ops);
305}
306
307static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
308{
309	return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
310}
311
312static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
313{
314	return ftrace_rec_set_ops(rec, powerpc_rec_get_ops(rec));
315}
316#else
317static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
318static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
319#endif
320
321#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
322int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
323{
324	/* This should never be called since we override ftrace_replace_code() */
325	WARN_ON(1);
326	return -EINVAL;
327}
328#endif
329
330int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
331{
332	ppc_inst_t old, new;
333	unsigned long ip = rec->ip;
334	int ret = 0;
335
336	/* This can only ever be called during module load */
337	if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(ip)))
338		return -EINVAL;
339
340	old = ppc_inst(PPC_RAW_NOP());
341	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) {
342		ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE; /* second instruction in stub */
343		ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &old);
344	}
345
346	ret |= ftrace_get_call_inst(rec, addr, &new);
347
348	if (!ret)
349		ret = ftrace_modify_code(ip, old, new);
350
351	ret = ftrace_rec_update_ops(rec);
352	if (ret)
353		return ret;
354
355	if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
356		ret = ftrace_modify_code(rec->ip, ppc_inst(PPC_RAW_NOP()),
357			 ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) - (long)rec->ip)));
358
359	return ret;
360}
361
362int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
363{
364	/*
365	 * This should never be called since we override ftrace_replace_code(),
366	 * as well as ftrace_init_nop()
367	 */
368	WARN_ON(1);
369	return -EINVAL;
370}
371
372void ftrace_replace_code(int enable)
373{
374	ppc_inst_t old, new, call_inst, new_call_inst;
375	ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
376	unsigned long ip, new_addr, addr;
377	struct ftrace_rec_iter *iter;
378	struct dyn_ftrace *rec;
379	int ret = 0, update;
380
381	for_ftrace_rec_iter(iter) {
382		rec = ftrace_rec_iter_record(iter);
383		ip = rec->ip;
384
385		if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
386			continue;
387
388		addr = ftrace_get_addr_curr(rec);
389		new_addr = ftrace_get_addr_new(rec);
390		update = ftrace_update_record(rec, enable);
391
392		if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) && update != FTRACE_UPDATE_IGNORE) {
393			ip = ftrace_get_ool_stub(rec) + MCOUNT_INSN_SIZE;
394			ret = ftrace_get_call_inst(rec, (unsigned long)ftrace_caller, &nop_inst);
395			if (ret)
396				goto out;
397		}
398
399		switch (update) {
400		case FTRACE_UPDATE_IGNORE:
401		default:
402			continue;
403		case FTRACE_UPDATE_MODIFY_CALL:
404			ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
405			ret |= ftrace_get_call_inst(rec, addr, &call_inst);
406			ret |= ftrace_rec_update_ops(rec);
407			old = call_inst;
408			new = new_call_inst;
409			break;
410		case FTRACE_UPDATE_MAKE_NOP:
411			ret = ftrace_get_call_inst(rec, addr, &call_inst);
412			ret |= ftrace_rec_set_nop_ops(rec);
413			old = call_inst;
414			new = nop_inst;
415			break;
416		case FTRACE_UPDATE_MAKE_CALL:
417			ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
418			ret |= ftrace_rec_update_ops(rec);
419			old = nop_inst;
420			new = call_inst;
421			break;
422		}
423
424		if (!ret)
425			ret = ftrace_modify_code(ip, old, new);
426
427		if (!ret && IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) &&
428		    (update == FTRACE_UPDATE_MAKE_NOP || update == FTRACE_UPDATE_MAKE_CALL)) {
429			/* Update the actual ftrace location */
430			call_inst = ppc_inst(PPC_RAW_BRANCH((long)ftrace_get_ool_stub(rec) -
431							    (long)rec->ip));
432			nop_inst = ppc_inst(PPC_RAW_NOP());
433			ip = rec->ip;
434
435			if (update == FTRACE_UPDATE_MAKE_NOP)
436				ret = ftrace_modify_code(ip, call_inst, nop_inst);
437			else
438				ret = ftrace_modify_code(ip, nop_inst, call_inst);
439
440			if (ret)
441				goto out;
442		}
443
444		if (ret)
445			goto out;
446	}
447
448out:
449	if (ret)
450		ftrace_bug(ret, rec);
451	return;
452}
453
454int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
455{
456	unsigned long addr, ip = rec->ip;
457	ppc_inst_t old, new;
458	int ret = 0;
459
460	/* Verify instructions surrounding the ftrace location */
461	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
462		/* Expect nops */
463		if (!IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
464			ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
465		if (!ret)
466			ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
467	} else if (IS_ENABLED(CONFIG_PPC32)) {
468		/* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
469		ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
470		if (ret)
471			return ret;
472		ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)),
473					 ppc_inst(PPC_RAW_NOP()));
474	} else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
475		/* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
476		ret = ftrace_read_inst(ip - 4, &old);
477		if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
478			/* Gcc v5.x emit the additional 'std' instruction, gcc v6.x don't */
479			ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
480			if (ret)
481				return ret;
482			ret = ftrace_modify_code(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)),
483						 ppc_inst(PPC_RAW_NOP()));
484		}
485	} else {
486		return -EINVAL;
487	}
488
489	if (ret)
490		return ret;
491
492	/* Set up out-of-line stub */
493	if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
494		return ftrace_init_ool_stub(mod, rec);
 
 
 
 
495
496	/* Nop-out the ftrace location */
497	new = ppc_inst(PPC_RAW_NOP());
498	addr = MCOUNT_ADDR;
499	if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
500		/* we instead patch-in the 'mflr r0' */
501		old = ppc_inst(PPC_RAW_NOP());
502		new = ppc_inst(PPC_RAW_MFLR(_R0));
503		ret = ftrace_modify_code(ip - 4, old, new);
504	} else if (is_offset_in_branch_range(addr - ip)) {
505		/* Within range */
506		old = ftrace_create_branch_inst(ip, addr, 1);
507		ret = ftrace_modify_code(ip, old, new);
508	} else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
509		/*
510		 * We would be branching to a linker-generated stub, or to the module _mcount
511		 * stub. Let's just confirm we have a 'bl' here.
512		 */
513		ret = ftrace_read_inst(ip, &old);
514		if (ret)
515			return ret;
516		if (!is_bl_op(old)) {
517			pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
518			return -EINVAL;
519		}
520		ret = patch_instruction((u32 *)ip, new);
521	} else {
522		return -EINVAL;
523	}
524
525	return ret;
526}
527
528int ftrace_update_ftrace_func(ftrace_func_t func)
529{
530	unsigned long ip = (unsigned long)(&ftrace_call);
531	ppc_inst_t old, new;
532	int ret;
533
534	/*
535	 * When using CALL_OPS, the function to call is associated with the
536	 * call site, and we don't have a global function pointer to update.
537	 */
538	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
539		return 0;
540
541	old = ppc_inst_read((u32 *)&ftrace_call);
542	new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
543	ret = ftrace_modify_code(ip, old, new);
544
545	/* Also update the regs callback function */
546	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
547		ip = (unsigned long)(&ftrace_regs_call);
548		old = ppc_inst_read((u32 *)&ftrace_regs_call);
549		new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
550		ret = ftrace_modify_code(ip, old, new);
551	}
552
553	return ret;
554}
555
556/*
557 * Use the default ftrace_modify_all_code, but without
558 * stop_machine().
559 */
560void arch_ftrace_update_code(int command)
561{
562	ftrace_modify_all_code(command);
563}
564
565void ftrace_free_init_tramp(void)
566{
567	int i;
568
569	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
570		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
571			ftrace_tramps[i] = 0;
572			return;
573		}
574}
575
576static void __init add_ftrace_tramp(unsigned long tramp)
577{
578	int i;
579
580	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
581		if (!ftrace_tramps[i]) {
582			ftrace_tramps[i] = tramp;
583			return;
584		}
585}
586
587int __init ftrace_dyn_arch_init(void)
588{
589	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
590	unsigned long addr = FTRACE_REGS_ADDR;
591	long reladdr;
592	int i;
593	u32 stub_insns[] = {
594#ifdef CONFIG_PPC_KERNEL_PCREL
595		/* pla r12,addr */
596		PPC_PREFIX_MLS | __PPC_PRFX_R(1),
597		PPC_INST_PADDI | ___PPC_RT(_R12),
598		PPC_RAW_MTCTR(_R12),
599		PPC_RAW_BCTR()
600#elif defined(CONFIG_PPC64)
601		PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
602		PPC_RAW_ADDIS(_R12, _R12, 0),
603		PPC_RAW_ADDI(_R12, _R12, 0),
604		PPC_RAW_MTCTR(_R12),
605		PPC_RAW_BCTR()
606#else
607		PPC_RAW_LIS(_R12, 0),
608		PPC_RAW_ADDI(_R12, _R12, 0),
609		PPC_RAW_MTCTR(_R12),
610		PPC_RAW_BCTR()
611#endif
612	};
613
614	if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
615		for (i = 0; i < 2; i++) {
616			reladdr = addr - (unsigned long)tramp[i];
617
618			if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
619				pr_err("Address of %ps out of range of pcrel address.\n",
620					(void *)addr);
621				return -1;
622			}
623
624			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
625			tramp[i][0] |= IMM_H18(reladdr);
626			tramp[i][1] |= IMM_L(reladdr);
627			add_ftrace_tramp((unsigned long)tramp[i]);
628		}
629	} else if (IS_ENABLED(CONFIG_PPC64)) {
630		reladdr = addr - kernel_toc_addr();
631
632		if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
633			pr_err("Address of %ps out of range of kernel_toc.\n",
634				(void *)addr);
635			return -1;
636		}
637
638		for (i = 0; i < 2; i++) {
639			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
640			tramp[i][1] |= PPC_HA(reladdr);
641			tramp[i][2] |= PPC_LO(reladdr);
642			add_ftrace_tramp((unsigned long)tramp[i]);
643		}
644	} else {
645		for (i = 0; i < 2; i++) {
646			memcpy(tramp[i], stub_insns, sizeof(stub_insns));
647			tramp[i][0] |= PPC_HA(addr);
648			tramp[i][1] |= PPC_LO(addr);
649			add_ftrace_tramp((unsigned long)tramp[i]);
650		}
651	}
652
653	return 0;
654}
655
656#ifdef CONFIG_FUNCTION_GRAPH_TRACER
657void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
658		       struct ftrace_ops *op, struct ftrace_regs *fregs)
659{
660	unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
661	int bit;
662
663	if (unlikely(ftrace_graph_is_dead()))
664		goto out;
665
666	if (unlikely(atomic_read(&current->tracing_graph_pause)))
667		goto out;
668
669	bit = ftrace_test_recursion_trylock(ip, parent_ip);
670	if (bit < 0)
671		goto out;
672
673	if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
674		parent_ip = ppc_function_entry(return_to_handler);
675
676	ftrace_test_recursion_unlock(bit);
677out:
678	arch_ftrace_regs(fregs)->regs.link = parent_ip;
679}
680#endif /* CONFIG_FUNCTION_GRAPH_TRACER */