Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Code for replacing ftrace calls with jumps.
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7 *
  8 * Added function graph tracer code, taken from x86 that was written
  9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
 10 *
 11 */
 12
 13#define pr_fmt(fmt) "ftrace-powerpc: " fmt
 14
 15#include <linux/spinlock.h>
 16#include <linux/hardirq.h>
 17#include <linux/uaccess.h>
 18#include <linux/module.h>
 19#include <linux/ftrace.h>
 20#include <linux/percpu.h>
 21#include <linux/init.h>
 22#include <linux/list.h>
 23
 24#include <asm/cacheflush.h>
 25#include <asm/code-patching.h>
 26#include <asm/ftrace.h>
 27#include <asm/syscall.h>
 28
 29
 30#ifdef CONFIG_DYNAMIC_FTRACE
 31static unsigned int
 32ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
 33{
 34	unsigned int op;
 35
 36	addr = ppc_function_entry((void *)addr);
 37
 38	/* if (link) set op to 'bl' else 'b' */
 39	op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
 40
 41	return op;
 42}
 43
 44static int
 45ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
 46{
 47	unsigned int replaced;
 48
 49	/*
 50	 * Note: Due to modules and __init, code can
 51	 *  disappear and change, we need to protect against faulting
 52	 *  as well as code changing. We do this by using the
 53	 *  probe_kernel_* functions.
 54	 *
 55	 * No real locking needed, this code is run through
 56	 * kstop_machine, or before SMP starts.
 57	 */
 58
 59	/* read the text we want to modify */
 60	if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
 61		return -EFAULT;
 62
 63	/* Make sure it is what we expect it to be */
 64	if (replaced != old) {
 65		pr_err("%p: replaced (%#x) != old (%#x)",
 66		(void *)ip, replaced, old);
 67		return -EINVAL;
 68	}
 69
 70	/* replace the text with the new text */
 71	if (patch_instruction((unsigned int *)ip, new))
 72		return -EPERM;
 73
 74	return 0;
 75}
 76
 77/*
 78 * Helper functions that are the same for both PPC64 and PPC32.
 79 */
 80static int test_24bit_addr(unsigned long ip, unsigned long addr)
 81{
 82	addr = ppc_function_entry((void *)addr);
 83
 84	/* use the create_branch to verify that this offset can be branched */
 85	return create_branch((unsigned int *)ip, addr, 0);
 86}
 87
 88#ifdef CONFIG_MODULES
 89
 90static int is_bl_op(unsigned int op)
 91{
 92	return (op & 0xfc000003) == 0x48000001;
 93}
 94
 95static unsigned long find_bl_target(unsigned long ip, unsigned int op)
 96{
 97	static int offset;
 98
 99	offset = (op & 0x03fffffc);
100	/* make it signed */
101	if (offset & 0x02000000)
102		offset |= 0xfe000000;
103
104	return ip + (long)offset;
105}
106
107#ifdef CONFIG_PPC64
108static int
109__ftrace_make_nop(struct module *mod,
110		  struct dyn_ftrace *rec, unsigned long addr)
111{
112	unsigned long entry, ptr, tramp;
113	unsigned long ip = rec->ip;
114	unsigned int op, pop;
115
116	/* read where this goes */
117	if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
118		pr_err("Fetching opcode failed.\n");
119		return -EFAULT;
120	}
121
122	/* Make sure that that this is still a 24bit jump */
123	if (!is_bl_op(op)) {
124		pr_err("Not expected bl: opcode is %x\n", op);
125		return -EINVAL;
126	}
127
128	/* lets find where the pointer goes */
129	tramp = find_bl_target(ip, op);
130
131	pr_devel("ip:%lx jumps to %lx", ip, tramp);
132
133	if (module_trampoline_target(mod, tramp, &ptr)) {
134		pr_err("Failed to get trampoline target\n");
135		return -EFAULT;
136	}
137
138	pr_devel("trampoline target %lx", ptr);
139
140	entry = ppc_global_function_entry((void *)addr);
141	/* This should match what was called */
142	if (ptr != entry) {
143		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
144		return -EINVAL;
145	}
146
147	/*
148	 * Our original call site looks like:
149	 *
150	 * bl <tramp>
151	 * ld r2,XX(r1)
152	 *
153	 * Milton Miller pointed out that we can not simply nop the branch.
154	 * If a task was preempted when calling a trace function, the nops
155	 * will remove the way to restore the TOC in r2 and the r2 TOC will
156	 * get corrupted.
157	 *
158	 * Use a b +8 to jump over the load.
159	 */
160
161	pop = PPC_INST_BRANCH | 8;	/* b +8 */
162
163	/*
164	 * Check what is in the next instruction. We can see ld r2,40(r1), but
165	 * on first pass after boot we will see mflr r0.
166	 */
167	if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
168		pr_err("Fetching op failed.\n");
169		return -EFAULT;
170	}
171
172	if (op != PPC_INST_LD_TOC) {
173		unsigned int inst;
174
175		if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
176			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
177			return -EFAULT;
178		}
179
180		/* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
181		if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
182			pr_err("Unexpected instructions around bl _mcount\n"
183			       "when enabling dynamic ftrace!\t"
184			       "(%08x,bl,%08x)\n", inst, op);
185			return -EINVAL;
186		}
187
188		/* When using -mkernel_profile there is no load to jump over */
189		pop = PPC_INST_NOP;
190	}
191
192	if (patch_instruction((unsigned int *)ip, pop)) {
193		pr_err("Patching NOP failed.\n");
194		return -EPERM;
195	}
196
197	return 0;
198}
199
200#else /* !PPC64 */
201static int
202__ftrace_make_nop(struct module *mod,
203		  struct dyn_ftrace *rec, unsigned long addr)
204{
205	unsigned int op;
206	unsigned int jmp[4];
207	unsigned long ip = rec->ip;
208	unsigned long tramp;
209
210	if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
211		return -EFAULT;
212
213	/* Make sure that that this is still a 24bit jump */
214	if (!is_bl_op(op)) {
215		pr_err("Not expected bl: opcode is %x\n", op);
216		return -EINVAL;
217	}
218
219	/* lets find where the pointer goes */
220	tramp = find_bl_target(ip, op);
221
222	/*
223	 * On PPC32 the trampoline looks like:
224	 *  0x3d, 0x80, 0x00, 0x00  lis r12,sym@ha
225	 *  0x39, 0x8c, 0x00, 0x00  addi r12,r12,sym@l
226	 *  0x7d, 0x89, 0x03, 0xa6  mtctr r12
227	 *  0x4e, 0x80, 0x04, 0x20  bctr
228	 */
229
230	pr_devel("ip:%lx jumps to %lx", ip, tramp);
231
232	/* Find where the trampoline jumps to */
233	if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
234		pr_err("Failed to read %lx\n", tramp);
235		return -EFAULT;
236	}
237
238	pr_devel(" %08x %08x ", jmp[0], jmp[1]);
239
240	/* verify that this is what we expect it to be */
241	if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
242	    ((jmp[1] & 0xffff0000) != 0x398c0000) ||
243	    (jmp[2] != 0x7d8903a6) ||
244	    (jmp[3] != 0x4e800420)) {
245		pr_err("Not a trampoline\n");
246		return -EINVAL;
247	}
248
249	tramp = (jmp[1] & 0xffff) |
250		((jmp[0] & 0xffff) << 16);
251	if (tramp & 0x8000)
252		tramp -= 0x10000;
253
254	pr_devel(" %lx ", tramp);
255
256	if (tramp != addr) {
257		pr_err("Trampoline location %08lx does not match addr\n",
258		       tramp);
259		return -EINVAL;
260	}
261
262	op = PPC_INST_NOP;
263
264	if (patch_instruction((unsigned int *)ip, op))
265		return -EPERM;
266
267	return 0;
268}
269#endif /* PPC64 */
270#endif /* CONFIG_MODULES */
271
272int ftrace_make_nop(struct module *mod,
273		    struct dyn_ftrace *rec, unsigned long addr)
274{
275	unsigned long ip = rec->ip;
276	unsigned int old, new;
277
278	/*
279	 * If the calling address is more that 24 bits away,
280	 * then we had to use a trampoline to make the call.
281	 * Otherwise just update the call site.
282	 */
283	if (test_24bit_addr(ip, addr)) {
284		/* within range */
285		old = ftrace_call_replace(ip, addr, 1);
286		new = PPC_INST_NOP;
287		return ftrace_modify_code(ip, old, new);
288	}
289
290#ifdef CONFIG_MODULES
291	/*
292	 * Out of range jumps are called from modules.
293	 * We should either already have a pointer to the module
294	 * or it has been passed in.
295	 */
296	if (!rec->arch.mod) {
297		if (!mod) {
298			pr_err("No module loaded addr=%lx\n", addr);
299			return -EFAULT;
300		}
301		rec->arch.mod = mod;
302	} else if (mod) {
303		if (mod != rec->arch.mod) {
304			pr_err("Record mod %p not equal to passed in mod %p\n",
305			       rec->arch.mod, mod);
306			return -EINVAL;
307		}
308		/* nothing to do if mod == rec->arch.mod */
309	} else
310		mod = rec->arch.mod;
311
312	return __ftrace_make_nop(mod, rec, addr);
313#else
314	/* We should not get here without modules */
315	return -EINVAL;
316#endif /* CONFIG_MODULES */
317}
318
319#ifdef CONFIG_MODULES
320#ifdef CONFIG_PPC64
321/*
322 * Examine the existing instructions for __ftrace_make_call.
323 * They should effectively be a NOP, and follow formal constraints,
324 * depending on the ABI. Return false if they don't.
325 */
326#ifndef CC_USING_MPROFILE_KERNEL
327static int
328expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
329{
330	/*
331	 * We expect to see:
332	 *
333	 * b +8
334	 * ld r2,XX(r1)
335	 *
336	 * The load offset is different depending on the ABI. For simplicity
337	 * just mask it out when doing the compare.
338	 */
339	if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
340		return 0;
341	return 1;
342}
343#else
344static int
345expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
346{
347	/* look for patched "NOP" on ppc64 with -mprofile-kernel */
348	if (op0 != PPC_INST_NOP)
349		return 0;
350	return 1;
351}
352#endif
353
354static int
355__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
356{
357	unsigned int op[2];
358	void *ip = (void *)rec->ip;
359
360	/* read where this goes */
361	if (probe_kernel_read(op, ip, sizeof(op)))
362		return -EFAULT;
363
364	if (!expected_nop_sequence(ip, op[0], op[1])) {
365		pr_err("Unexpected call sequence at %p: %x %x\n",
366		ip, op[0], op[1]);
367		return -EINVAL;
368	}
369
370	/* If we never set up a trampoline to ftrace_caller, then bail */
371	if (!rec->arch.mod->arch.tramp) {
372		pr_err("No ftrace trampoline\n");
373		return -EINVAL;
374	}
375
376	/* Ensure branch is within 24 bits */
377	if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
378		pr_err("Branch out of range\n");
379		return -EINVAL;
380	}
381
382	if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
383		pr_err("REL24 out of range!\n");
384		return -EINVAL;
385	}
386
387	return 0;
388}
389
390#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
391int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
392			unsigned long addr)
393{
394	return ftrace_make_call(rec, addr);
395}
396#endif
397
398#else  /* !CONFIG_PPC64: */
399static int
400__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
401{
402	unsigned int op;
403	unsigned long ip = rec->ip;
404
405	/* read where this goes */
406	if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
407		return -EFAULT;
408
409	/* It should be pointing to a nop */
410	if (op != PPC_INST_NOP) {
411		pr_err("Expected NOP but have %x\n", op);
412		return -EINVAL;
413	}
414
415	/* If we never set up a trampoline to ftrace_caller, then bail */
416	if (!rec->arch.mod->arch.tramp) {
417		pr_err("No ftrace trampoline\n");
418		return -EINVAL;
419	}
420
421	/* create the branch to the trampoline */
422	op = create_branch((unsigned int *)ip,
423			   rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
424	if (!op) {
425		pr_err("REL24 out of range!\n");
426		return -EINVAL;
427	}
428
429	pr_devel("write to %lx\n", rec->ip);
430
431	if (patch_instruction((unsigned int *)ip, op))
432		return -EPERM;
433
434	return 0;
435}
436#endif /* CONFIG_PPC64 */
437#endif /* CONFIG_MODULES */
438
439int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
440{
441	unsigned long ip = rec->ip;
442	unsigned int old, new;
443
444	/*
445	 * If the calling address is more that 24 bits away,
446	 * then we had to use a trampoline to make the call.
447	 * Otherwise just update the call site.
448	 */
449	if (test_24bit_addr(ip, addr)) {
450		/* within range */
451		old = PPC_INST_NOP;
452		new = ftrace_call_replace(ip, addr, 1);
453		return ftrace_modify_code(ip, old, new);
454	}
455
456#ifdef CONFIG_MODULES
457	/*
458	 * Out of range jumps are called from modules.
459	 * Being that we are converting from nop, it had better
460	 * already have a module defined.
461	 */
462	if (!rec->arch.mod) {
463		pr_err("No module loaded\n");
464		return -EINVAL;
465	}
466
467	return __ftrace_make_call(rec, addr);
468#else
469	/* We should not get here without modules */
470	return -EINVAL;
471#endif /* CONFIG_MODULES */
472}
473
474int ftrace_update_ftrace_func(ftrace_func_t func)
475{
476	unsigned long ip = (unsigned long)(&ftrace_call);
477	unsigned int old, new;
478	int ret;
479
480	old = *(unsigned int *)&ftrace_call;
481	new = ftrace_call_replace(ip, (unsigned long)func, 1);
482	ret = ftrace_modify_code(ip, old, new);
483
484	return ret;
485}
486
487static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
488{
489	unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
490	int ret;
491
492	ret = ftrace_update_record(rec, enable);
493
494	switch (ret) {
495	case FTRACE_UPDATE_IGNORE:
496		return 0;
497	case FTRACE_UPDATE_MAKE_CALL:
498		return ftrace_make_call(rec, ftrace_addr);
499	case FTRACE_UPDATE_MAKE_NOP:
500		return ftrace_make_nop(NULL, rec, ftrace_addr);
501	}
502
503	return 0;
504}
505
506void ftrace_replace_code(int enable)
507{
508	struct ftrace_rec_iter *iter;
509	struct dyn_ftrace *rec;
510	int ret;
511
512	for (iter = ftrace_rec_iter_start(); iter;
513	     iter = ftrace_rec_iter_next(iter)) {
514		rec = ftrace_rec_iter_record(iter);
515		ret = __ftrace_replace_code(rec, enable);
516		if (ret) {
517			ftrace_bug(ret, rec);
518			return;
519		}
520	}
521}
522
523/*
524 * Use the default ftrace_modify_all_code, but without
525 * stop_machine().
526 */
527void arch_ftrace_update_code(int command)
528{
529	ftrace_modify_all_code(command);
530}
531
532int __init ftrace_dyn_arch_init(void)
533{
534	return 0;
535}
536#endif /* CONFIG_DYNAMIC_FTRACE */
537
538#ifdef CONFIG_FUNCTION_GRAPH_TRACER
539
540#ifdef CONFIG_DYNAMIC_FTRACE
541extern void ftrace_graph_call(void);
542extern void ftrace_graph_stub(void);
543
544int ftrace_enable_ftrace_graph_caller(void)
545{
546	unsigned long ip = (unsigned long)(&ftrace_graph_call);
547	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
548	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
549	unsigned int old, new;
550
551	old = ftrace_call_replace(ip, stub, 0);
552	new = ftrace_call_replace(ip, addr, 0);
553
554	return ftrace_modify_code(ip, old, new);
555}
556
557int ftrace_disable_ftrace_graph_caller(void)
558{
559	unsigned long ip = (unsigned long)(&ftrace_graph_call);
560	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
561	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
562	unsigned int old, new;
563
564	old = ftrace_call_replace(ip, addr, 0);
565	new = ftrace_call_replace(ip, stub, 0);
566
567	return ftrace_modify_code(ip, old, new);
568}
569#endif /* CONFIG_DYNAMIC_FTRACE */
570
571/*
572 * Hook the return address and push it in the stack of return addrs
573 * in current thread info. Return the address we want to divert to.
574 */
575unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
576{
577	struct ftrace_graph_ent trace;
578	unsigned long return_hooker;
579
580	if (unlikely(ftrace_graph_is_dead()))
581		goto out;
582
583	if (unlikely(atomic_read(&current->tracing_graph_pause)))
584		goto out;
585
586	return_hooker = ppc_function_entry(return_to_handler);
587
588	trace.func = ip;
589	trace.depth = current->curr_ret_stack + 1;
590
591	/* Only trace if the calling function expects to */
592	if (!ftrace_graph_entry(&trace))
593		goto out;
594
595	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
596		goto out;
597
598	parent = return_hooker;
599out:
600	return parent;
601}
602#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
603
604#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
605unsigned long __init arch_syscall_addr(int nr)
606{
607	return sys_call_table[nr*2];
608}
609#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */