Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* arch/sparc64/kernel/kprobes.c
  3 *
  4 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/kprobes.h>
  9#include <linux/extable.h>
 10#include <linux/kdebug.h>
 11#include <linux/slab.h>
 12#include <linux/context_tracking.h>
 13#include <asm/signal.h>
 14#include <asm/cacheflush.h>
 15#include <linux/uaccess.h>
 16
 17/* We do not have hardware single-stepping on sparc64.
 18 * So we implement software single-stepping with breakpoint
 19 * traps.  The top-level scheme is similar to that used
 20 * in the x86 kprobes implementation.
 21 *
 22 * In the kprobe->ainsn.insn[] array we store the original
 23 * instruction at index zero and a break instruction at
 24 * index one.
 25 *
 26 * When we hit a kprobe we:
 27 * - Run the pre-handler
 28 * - Remember "regs->tnpc" and interrupt level stored in
 29 *   "regs->tstate" so we can restore them later
 30 * - Disable PIL interrupts
 31 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
 32 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
 33 * - Mark that we are actively in a kprobe
 34 *
 35 * At this point we wait for the second breakpoint at
 36 * kprobe->ainsn.insn[1] to hit.  When it does we:
 37 * - Run the post-handler
 38 * - Set regs->tpc to "remembered" regs->tnpc stored above,
 39 *   restore the PIL interrupt level in "regs->tstate" as well
 40 * - Make any adjustments necessary to regs->tnpc in order
 41 *   to handle relative branches correctly.  See below.
 42 * - Mark that we are no longer actively in a kprobe.
 43 */
 44
 45DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 46DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 47
 48struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 49
 50int __kprobes arch_prepare_kprobe(struct kprobe *p)
 51{
 52	if ((unsigned long) p->addr & 0x3UL)
 53		return -EILSEQ;
 54
 55	p->ainsn.insn[0] = *p->addr;
 56	flushi(&p->ainsn.insn[0]);
 57
 58	p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
 59	flushi(&p->ainsn.insn[1]);
 60
 61	p->opcode = *p->addr;
 62	return 0;
 63}
 64
 65void __kprobes arch_arm_kprobe(struct kprobe *p)
 66{
 67	*p->addr = BREAKPOINT_INSTRUCTION;
 68	flushi(p->addr);
 69}
 70
 71void __kprobes arch_disarm_kprobe(struct kprobe *p)
 72{
 73	*p->addr = p->opcode;
 74	flushi(p->addr);
 75}
 76
 77static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 78{
 79	kcb->prev_kprobe.kp = kprobe_running();
 80	kcb->prev_kprobe.status = kcb->kprobe_status;
 81	kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
 82	kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
 83}
 84
 85static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 86{
 87	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 88	kcb->kprobe_status = kcb->prev_kprobe.status;
 89	kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
 90	kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
 91}
 92
 93static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 94				struct kprobe_ctlblk *kcb)
 95{
 96	__this_cpu_write(current_kprobe, p);
 97	kcb->kprobe_orig_tnpc = regs->tnpc;
 98	kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
 99}
100
101static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
102			struct kprobe_ctlblk *kcb)
103{
104	regs->tstate |= TSTATE_PIL;
105
106	/*single step inline, if it a breakpoint instruction*/
107	if (p->opcode == BREAKPOINT_INSTRUCTION) {
108		regs->tpc = (unsigned long) p->addr;
109		regs->tnpc = kcb->kprobe_orig_tnpc;
110	} else {
111		regs->tpc = (unsigned long) &p->ainsn.insn[0];
112		regs->tnpc = (unsigned long) &p->ainsn.insn[1];
113	}
114}
115
116static int __kprobes kprobe_handler(struct pt_regs *regs)
117{
118	struct kprobe *p;
119	void *addr = (void *) regs->tpc;
120	int ret = 0;
121	struct kprobe_ctlblk *kcb;
122
123	/*
124	 * We don't want to be preempted for the entire
125	 * duration of kprobe processing
126	 */
127	preempt_disable();
128	kcb = get_kprobe_ctlblk();
129
130	if (kprobe_running()) {
131		p = get_kprobe(addr);
132		if (p) {
133			if (kcb->kprobe_status == KPROBE_HIT_SS) {
134				regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
135					kcb->kprobe_orig_tstate_pil);
136				goto no_kprobe;
137			}
138			/* We have reentered the kprobe_handler(), since
139			 * another probe was hit while within the handler.
140			 * We here save the original kprobes variables and
141			 * just single step on the instruction of the new probe
142			 * without calling any user handlers.
143			 */
144			save_previous_kprobe(kcb);
145			set_current_kprobe(p, regs, kcb);
146			kprobes_inc_nmissed_count(p);
147			kcb->kprobe_status = KPROBE_REENTER;
148			prepare_singlestep(p, regs, kcb);
149			return 1;
150		} else {
151			if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
152			/* The breakpoint instruction was removed by
153			 * another cpu right after we hit, no further
154			 * handling of this interrupt is appropriate
155			 */
156				ret = 1;
157				goto no_kprobe;
158			}
159			p = __this_cpu_read(current_kprobe);
160			if (p->break_handler && p->break_handler(p, regs))
161				goto ss_probe;
162		}
163		goto no_kprobe;
164	}
165
166	p = get_kprobe(addr);
167	if (!p) {
168		if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
169			/*
170			 * The breakpoint instruction was removed right
171			 * after we hit it.  Another cpu has removed
172			 * either a probepoint or a debugger breakpoint
173			 * at this address.  In either case, no further
174			 * handling of this interrupt is appropriate.
175			 */
176			ret = 1;
177		}
178		/* Not one of ours: let kernel handle it */
179		goto no_kprobe;
180	}
181
182	set_current_kprobe(p, regs, kcb);
183	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
184	if (p->pre_handler && p->pre_handler(p, regs))
185		return 1;
186
187ss_probe:
188	prepare_singlestep(p, regs, kcb);
189	kcb->kprobe_status = KPROBE_HIT_SS;
190	return 1;
191
192no_kprobe:
193	preempt_enable_no_resched();
194	return ret;
195}
196
197/* If INSN is a relative control transfer instruction,
198 * return the corrected branch destination value.
199 *
200 * regs->tpc and regs->tnpc still hold the values of the
201 * program counters at the time of trap due to the execution
202 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
203 * 
204 */
205static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
206					       struct pt_regs *regs)
207{
208	unsigned long real_pc = (unsigned long) p->addr;
209
210	/* Branch not taken, no mods necessary.  */
211	if (regs->tnpc == regs->tpc + 0x4UL)
212		return real_pc + 0x8UL;
213
214	/* The three cases are call, branch w/prediction,
215	 * and traditional branch.
216	 */
217	if ((insn & 0xc0000000) == 0x40000000 ||
218	    (insn & 0xc1c00000) == 0x00400000 ||
219	    (insn & 0xc1c00000) == 0x00800000) {
220		unsigned long ainsn_addr;
221
222		ainsn_addr = (unsigned long) &p->ainsn.insn[0];
223
224		/* The instruction did all the work for us
225		 * already, just apply the offset to the correct
226		 * instruction location.
227		 */
228		return (real_pc + (regs->tnpc - ainsn_addr));
229	}
230
231	/* It is jmpl or some other absolute PC modification instruction,
232	 * leave NPC as-is.
233	 */
234	return regs->tnpc;
235}
236
237/* If INSN is an instruction which writes it's PC location
238 * into a destination register, fix that up.
239 */
240static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
241				  unsigned long real_pc)
242{
243	unsigned long *slot = NULL;
244
245	/* Simplest case is 'call', which always uses %o7 */
246	if ((insn & 0xc0000000) == 0x40000000) {
247		slot = &regs->u_regs[UREG_I7];
248	}
249
250	/* 'jmpl' encodes the register inside of the opcode */
251	if ((insn & 0xc1f80000) == 0x81c00000) {
252		unsigned long rd = ((insn >> 25) & 0x1f);
253
254		if (rd <= 15) {
255			slot = &regs->u_regs[rd];
256		} else {
257			/* Hard case, it goes onto the stack. */
258			flushw_all();
259
260			rd -= 16;
261			slot = (unsigned long *)
262				(regs->u_regs[UREG_FP] + STACK_BIAS);
263			slot += rd;
264		}
265	}
266	if (slot != NULL)
267		*slot = real_pc;
268}
269
270/*
271 * Called after single-stepping.  p->addr is the address of the
272 * instruction which has been replaced by the breakpoint
273 * instruction.  To avoid the SMP problems that can occur when we
274 * temporarily put back the original opcode to single-step, we
275 * single-stepped a copy of the instruction.  The address of this
276 * copy is &p->ainsn.insn[0].
277 *
278 * This function prepares to return from the post-single-step
279 * breakpoint trap.
280 */
281static void __kprobes resume_execution(struct kprobe *p,
282		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
283{
284	u32 insn = p->ainsn.insn[0];
285
286	regs->tnpc = relbranch_fixup(insn, p, regs);
287
288	/* This assignment must occur after relbranch_fixup() */
289	regs->tpc = kcb->kprobe_orig_tnpc;
290
291	retpc_fixup(regs, insn, (unsigned long) p->addr);
292
293	regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
294			kcb->kprobe_orig_tstate_pil);
295}
296
297static int __kprobes post_kprobe_handler(struct pt_regs *regs)
298{
299	struct kprobe *cur = kprobe_running();
300	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
301
302	if (!cur)
303		return 0;
304
305	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
306		kcb->kprobe_status = KPROBE_HIT_SSDONE;
307		cur->post_handler(cur, regs, 0);
308	}
309
310	resume_execution(cur, regs, kcb);
311
312	/*Restore back the original saved kprobes variables and continue. */
313	if (kcb->kprobe_status == KPROBE_REENTER) {
314		restore_previous_kprobe(kcb);
315		goto out;
316	}
317	reset_current_kprobe();
318out:
319	preempt_enable_no_resched();
320
321	return 1;
322}
323
324int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
325{
326	struct kprobe *cur = kprobe_running();
327	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
328	const struct exception_table_entry *entry;
329
330	switch(kcb->kprobe_status) {
331	case KPROBE_HIT_SS:
332	case KPROBE_REENTER:
333		/*
334		 * We are here because the instruction being single
335		 * stepped caused a page fault. We reset the current
336		 * kprobe and the tpc points back to the probe address
337		 * and allow the page fault handler to continue as a
338		 * normal page fault.
339		 */
340		regs->tpc = (unsigned long)cur->addr;
341		regs->tnpc = kcb->kprobe_orig_tnpc;
342		regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
343				kcb->kprobe_orig_tstate_pil);
344		if (kcb->kprobe_status == KPROBE_REENTER)
345			restore_previous_kprobe(kcb);
346		else
347			reset_current_kprobe();
348		preempt_enable_no_resched();
349		break;
350	case KPROBE_HIT_ACTIVE:
351	case KPROBE_HIT_SSDONE:
352		/*
353		 * We increment the nmissed count for accounting,
354		 * we can also use npre/npostfault count for accounting
355		 * these specific fault cases.
356		 */
357		kprobes_inc_nmissed_count(cur);
358
359		/*
360		 * We come here because instructions in the pre/post
361		 * handler caused the page_fault, this could happen
362		 * if handler tries to access user space by
363		 * copy_from_user(), get_user() etc. Let the
364		 * user-specified handler try to fix it first.
365		 */
366		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
367			return 1;
368
369		/*
370		 * In case the user-specified fault handler returned
371		 * zero, try to fix up.
372		 */
373
374		entry = search_exception_tables(regs->tpc);
375		if (entry) {
376			regs->tpc = entry->fixup;
377			regs->tnpc = regs->tpc + 4;
378			return 1;
379		}
380
381		/*
382		 * fixup_exception() could not handle it,
383		 * Let do_page_fault() fix it.
384		 */
385		break;
386	default:
387		break;
388	}
389
390	return 0;
391}
392
393/*
394 * Wrapper routine to for handling exceptions.
395 */
396int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
397				       unsigned long val, void *data)
398{
399	struct die_args *args = (struct die_args *)data;
400	int ret = NOTIFY_DONE;
401
402	if (args->regs && user_mode(args->regs))
403		return ret;
404
405	switch (val) {
406	case DIE_DEBUG:
407		if (kprobe_handler(args->regs))
408			ret = NOTIFY_STOP;
409		break;
410	case DIE_DEBUG_2:
411		if (post_kprobe_handler(args->regs))
412			ret = NOTIFY_STOP;
413		break;
414	default:
415		break;
416	}
417	return ret;
418}
419
420asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
421				      struct pt_regs *regs)
422{
423	enum ctx_state prev_state = exception_enter();
424
425	BUG_ON(trap_level != 0x170 && trap_level != 0x171);
426
427	if (user_mode(regs)) {
428		local_irq_enable();
429		bad_trap(regs, trap_level);
430		goto out;
431	}
432
433	/* trap_level == 0x170 --> ta 0x70
434	 * trap_level == 0x171 --> ta 0x71
435	 */
436	if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
437		       (trap_level == 0x170) ? "debug" : "debug_2",
438		       regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
439		bad_trap(regs, trap_level);
440out:
441	exception_exit(prev_state);
442}
443
444/* Jprobes support.  */
445int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
446{
447	struct jprobe *jp = container_of(p, struct jprobe, kp);
448	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
449
450	memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
451
452	regs->tpc  = (unsigned long) jp->entry;
453	regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
454	regs->tstate |= TSTATE_PIL;
455
456	return 1;
457}
458
459void __kprobes jprobe_return(void)
460{
461	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
462	register unsigned long orig_fp asm("g1");
463
464	orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
465	__asm__ __volatile__("\n"
466"1:	cmp		%%sp, %0\n\t"
467	"blu,a,pt	%%xcc, 1b\n\t"
468	" restore\n\t"
469	".globl		jprobe_return_trap_instruction\n"
470"jprobe_return_trap_instruction:\n\t"
471	"ta		0x70"
472	: /* no outputs */
473	: "r" (orig_fp));
474}
475
476extern void jprobe_return_trap_instruction(void);
477
478int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
479{
480	u32 *addr = (u32 *) regs->tpc;
481	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
482
483	if (addr == (u32 *) jprobe_return_trap_instruction) {
484		memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
485		preempt_enable_no_resched();
486		return 1;
487	}
488	return 0;
489}
490
491/* The value stored in the return address register is actually 2
492 * instructions before where the callee will return to.
493 * Sequences usually look something like this
494 *
495 *		call	some_function	<--- return register points here
496 *		 nop			<--- call delay slot
497 *		whatever		<--- where callee returns to
498 *
499 * To keep trampoline_probe_handler logic simpler, we normalize the
500 * value kept in ri->ret_addr so we don't need to keep adjusting it
501 * back and forth.
502 */
503void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
504				      struct pt_regs *regs)
505{
506	ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
507
508	/* Replace the return addr with trampoline addr */
509	regs->u_regs[UREG_RETPC] =
510		((unsigned long)kretprobe_trampoline) - 8;
511}
512
513/*
514 * Called when the probe at kretprobe trampoline is hit
515 */
516static int __kprobes trampoline_probe_handler(struct kprobe *p,
517					      struct pt_regs *regs)
518{
519	struct kretprobe_instance *ri = NULL;
520	struct hlist_head *head, empty_rp;
521	struct hlist_node *tmp;
522	unsigned long flags, orig_ret_address = 0;
523	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
524
525	INIT_HLIST_HEAD(&empty_rp);
526	kretprobe_hash_lock(current, &head, &flags);
527
528	/*
529	 * It is possible to have multiple instances associated with a given
530	 * task either because an multiple functions in the call path
531	 * have a return probe installed on them, and/or more than one return
532	 * return probe was registered for a target function.
533	 *
534	 * We can handle this because:
535	 *     - instances are always inserted at the head of the list
536	 *     - when multiple return probes are registered for the same
537	 *       function, the first instance's ret_addr will point to the
538	 *       real return address, and all the rest will point to
539	 *       kretprobe_trampoline
540	 */
541	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
542		if (ri->task != current)
543			/* another task is sharing our hash bucket */
544			continue;
545
546		if (ri->rp && ri->rp->handler)
547			ri->rp->handler(ri, regs);
548
549		orig_ret_address = (unsigned long)ri->ret_addr;
550		recycle_rp_inst(ri, &empty_rp);
551
552		if (orig_ret_address != trampoline_address)
553			/*
554			 * This is the real return address. Any other
555			 * instances associated with this task are for
556			 * other calls deeper on the call stack
557			 */
558			break;
559	}
560
561	kretprobe_assert(ri, orig_ret_address, trampoline_address);
562	regs->tpc = orig_ret_address;
563	regs->tnpc = orig_ret_address + 4;
564
565	reset_current_kprobe();
566	kretprobe_hash_unlock(current, &flags);
567	preempt_enable_no_resched();
568
569	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
570		hlist_del(&ri->hlist);
571		kfree(ri);
572	}
573	/*
574	 * By returning a non-zero value, we are telling
575	 * kprobe_handler() that we don't want the post_handler
576	 * to run (and have re-enabled preemption)
577	 */
578	return 1;
579}
580
581static void __used kretprobe_trampoline_holder(void)
582{
583	asm volatile(".global kretprobe_trampoline\n"
584		     "kretprobe_trampoline:\n"
585		     "\tnop\n"
586		     "\tnop\n");
587}
588static struct kprobe trampoline_p = {
589	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
590	.pre_handler = trampoline_probe_handler
591};
592
593int __init arch_init_kprobes(void)
594{
595	return register_kprobe(&trampoline_p);
596}
597
598int __kprobes arch_trampoline_kprobe(struct kprobe *p)
599{
600	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
601		return 1;
602
603	return 0;
604}
v4.6
 
  1/* arch/sparc64/kernel/kprobes.c
  2 *
  3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/kprobes.h>
  8#include <linux/module.h>
  9#include <linux/kdebug.h>
 10#include <linux/slab.h>
 11#include <linux/context_tracking.h>
 12#include <asm/signal.h>
 13#include <asm/cacheflush.h>
 14#include <asm/uaccess.h>
 15
 16/* We do not have hardware single-stepping on sparc64.
 17 * So we implement software single-stepping with breakpoint
 18 * traps.  The top-level scheme is similar to that used
 19 * in the x86 kprobes implementation.
 20 *
 21 * In the kprobe->ainsn.insn[] array we store the original
 22 * instruction at index zero and a break instruction at
 23 * index one.
 24 *
 25 * When we hit a kprobe we:
 26 * - Run the pre-handler
 27 * - Remember "regs->tnpc" and interrupt level stored in
 28 *   "regs->tstate" so we can restore them later
 29 * - Disable PIL interrupts
 30 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
 31 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
 32 * - Mark that we are actively in a kprobe
 33 *
 34 * At this point we wait for the second breakpoint at
 35 * kprobe->ainsn.insn[1] to hit.  When it does we:
 36 * - Run the post-handler
 37 * - Set regs->tpc to "remembered" regs->tnpc stored above,
 38 *   restore the PIL interrupt level in "regs->tstate" as well
 39 * - Make any adjustments necessary to regs->tnpc in order
 40 *   to handle relative branches correctly.  See below.
 41 * - Mark that we are no longer actively in a kprobe.
 42 */
 43
 44DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 45DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 46
 47struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 48
 49int __kprobes arch_prepare_kprobe(struct kprobe *p)
 50{
 51	if ((unsigned long) p->addr & 0x3UL)
 52		return -EILSEQ;
 53
 54	p->ainsn.insn[0] = *p->addr;
 55	flushi(&p->ainsn.insn[0]);
 56
 57	p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
 58	flushi(&p->ainsn.insn[1]);
 59
 60	p->opcode = *p->addr;
 61	return 0;
 62}
 63
 64void __kprobes arch_arm_kprobe(struct kprobe *p)
 65{
 66	*p->addr = BREAKPOINT_INSTRUCTION;
 67	flushi(p->addr);
 68}
 69
 70void __kprobes arch_disarm_kprobe(struct kprobe *p)
 71{
 72	*p->addr = p->opcode;
 73	flushi(p->addr);
 74}
 75
 76static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 77{
 78	kcb->prev_kprobe.kp = kprobe_running();
 79	kcb->prev_kprobe.status = kcb->kprobe_status;
 80	kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
 81	kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
 82}
 83
 84static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 85{
 86	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 87	kcb->kprobe_status = kcb->prev_kprobe.status;
 88	kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
 89	kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
 90}
 91
 92static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 93				struct kprobe_ctlblk *kcb)
 94{
 95	__this_cpu_write(current_kprobe, p);
 96	kcb->kprobe_orig_tnpc = regs->tnpc;
 97	kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
 98}
 99
100static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
101			struct kprobe_ctlblk *kcb)
102{
103	regs->tstate |= TSTATE_PIL;
104
105	/*single step inline, if it a breakpoint instruction*/
106	if (p->opcode == BREAKPOINT_INSTRUCTION) {
107		regs->tpc = (unsigned long) p->addr;
108		regs->tnpc = kcb->kprobe_orig_tnpc;
109	} else {
110		regs->tpc = (unsigned long) &p->ainsn.insn[0];
111		regs->tnpc = (unsigned long) &p->ainsn.insn[1];
112	}
113}
114
115static int __kprobes kprobe_handler(struct pt_regs *regs)
116{
117	struct kprobe *p;
118	void *addr = (void *) regs->tpc;
119	int ret = 0;
120	struct kprobe_ctlblk *kcb;
121
122	/*
123	 * We don't want to be preempted for the entire
124	 * duration of kprobe processing
125	 */
126	preempt_disable();
127	kcb = get_kprobe_ctlblk();
128
129	if (kprobe_running()) {
130		p = get_kprobe(addr);
131		if (p) {
132			if (kcb->kprobe_status == KPROBE_HIT_SS) {
133				regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
134					kcb->kprobe_orig_tstate_pil);
135				goto no_kprobe;
136			}
137			/* We have reentered the kprobe_handler(), since
138			 * another probe was hit while within the handler.
139			 * We here save the original kprobes variables and
140			 * just single step on the instruction of the new probe
141			 * without calling any user handlers.
142			 */
143			save_previous_kprobe(kcb);
144			set_current_kprobe(p, regs, kcb);
145			kprobes_inc_nmissed_count(p);
146			kcb->kprobe_status = KPROBE_REENTER;
147			prepare_singlestep(p, regs, kcb);
148			return 1;
149		} else {
150			if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
151			/* The breakpoint instruction was removed by
152			 * another cpu right after we hit, no further
153			 * handling of this interrupt is appropriate
154			 */
155				ret = 1;
156				goto no_kprobe;
157			}
158			p = __this_cpu_read(current_kprobe);
159			if (p->break_handler && p->break_handler(p, regs))
160				goto ss_probe;
161		}
162		goto no_kprobe;
163	}
164
165	p = get_kprobe(addr);
166	if (!p) {
167		if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
168			/*
169			 * The breakpoint instruction was removed right
170			 * after we hit it.  Another cpu has removed
171			 * either a probepoint or a debugger breakpoint
172			 * at this address.  In either case, no further
173			 * handling of this interrupt is appropriate.
174			 */
175			ret = 1;
176		}
177		/* Not one of ours: let kernel handle it */
178		goto no_kprobe;
179	}
180
181	set_current_kprobe(p, regs, kcb);
182	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
183	if (p->pre_handler && p->pre_handler(p, regs))
184		return 1;
185
186ss_probe:
187	prepare_singlestep(p, regs, kcb);
188	kcb->kprobe_status = KPROBE_HIT_SS;
189	return 1;
190
191no_kprobe:
192	preempt_enable_no_resched();
193	return ret;
194}
195
196/* If INSN is a relative control transfer instruction,
197 * return the corrected branch destination value.
198 *
199 * regs->tpc and regs->tnpc still hold the values of the
200 * program counters at the time of trap due to the execution
201 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
202 * 
203 */
204static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
205					       struct pt_regs *regs)
206{
207	unsigned long real_pc = (unsigned long) p->addr;
208
209	/* Branch not taken, no mods necessary.  */
210	if (regs->tnpc == regs->tpc + 0x4UL)
211		return real_pc + 0x8UL;
212
213	/* The three cases are call, branch w/prediction,
214	 * and traditional branch.
215	 */
216	if ((insn & 0xc0000000) == 0x40000000 ||
217	    (insn & 0xc1c00000) == 0x00400000 ||
218	    (insn & 0xc1c00000) == 0x00800000) {
219		unsigned long ainsn_addr;
220
221		ainsn_addr = (unsigned long) &p->ainsn.insn[0];
222
223		/* The instruction did all the work for us
224		 * already, just apply the offset to the correct
225		 * instruction location.
226		 */
227		return (real_pc + (regs->tnpc - ainsn_addr));
228	}
229
230	/* It is jmpl or some other absolute PC modification instruction,
231	 * leave NPC as-is.
232	 */
233	return regs->tnpc;
234}
235
236/* If INSN is an instruction which writes it's PC location
237 * into a destination register, fix that up.
238 */
239static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
240				  unsigned long real_pc)
241{
242	unsigned long *slot = NULL;
243
244	/* Simplest case is 'call', which always uses %o7 */
245	if ((insn & 0xc0000000) == 0x40000000) {
246		slot = &regs->u_regs[UREG_I7];
247	}
248
249	/* 'jmpl' encodes the register inside of the opcode */
250	if ((insn & 0xc1f80000) == 0x81c00000) {
251		unsigned long rd = ((insn >> 25) & 0x1f);
252
253		if (rd <= 15) {
254			slot = &regs->u_regs[rd];
255		} else {
256			/* Hard case, it goes onto the stack. */
257			flushw_all();
258
259			rd -= 16;
260			slot = (unsigned long *)
261				(regs->u_regs[UREG_FP] + STACK_BIAS);
262			slot += rd;
263		}
264	}
265	if (slot != NULL)
266		*slot = real_pc;
267}
268
269/*
270 * Called after single-stepping.  p->addr is the address of the
271 * instruction which has been replaced by the breakpoint
272 * instruction.  To avoid the SMP problems that can occur when we
273 * temporarily put back the original opcode to single-step, we
274 * single-stepped a copy of the instruction.  The address of this
275 * copy is &p->ainsn.insn[0].
276 *
277 * This function prepares to return from the post-single-step
278 * breakpoint trap.
279 */
280static void __kprobes resume_execution(struct kprobe *p,
281		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
282{
283	u32 insn = p->ainsn.insn[0];
284
285	regs->tnpc = relbranch_fixup(insn, p, regs);
286
287	/* This assignment must occur after relbranch_fixup() */
288	regs->tpc = kcb->kprobe_orig_tnpc;
289
290	retpc_fixup(regs, insn, (unsigned long) p->addr);
291
292	regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
293			kcb->kprobe_orig_tstate_pil);
294}
295
296static int __kprobes post_kprobe_handler(struct pt_regs *regs)
297{
298	struct kprobe *cur = kprobe_running();
299	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
300
301	if (!cur)
302		return 0;
303
304	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
305		kcb->kprobe_status = KPROBE_HIT_SSDONE;
306		cur->post_handler(cur, regs, 0);
307	}
308
309	resume_execution(cur, regs, kcb);
310
311	/*Restore back the original saved kprobes variables and continue. */
312	if (kcb->kprobe_status == KPROBE_REENTER) {
313		restore_previous_kprobe(kcb);
314		goto out;
315	}
316	reset_current_kprobe();
317out:
318	preempt_enable_no_resched();
319
320	return 1;
321}
322
323int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
324{
325	struct kprobe *cur = kprobe_running();
326	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
327	const struct exception_table_entry *entry;
328
329	switch(kcb->kprobe_status) {
330	case KPROBE_HIT_SS:
331	case KPROBE_REENTER:
332		/*
333		 * We are here because the instruction being single
334		 * stepped caused a page fault. We reset the current
335		 * kprobe and the tpc points back to the probe address
336		 * and allow the page fault handler to continue as a
337		 * normal page fault.
338		 */
339		regs->tpc = (unsigned long)cur->addr;
340		regs->tnpc = kcb->kprobe_orig_tnpc;
341		regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
342				kcb->kprobe_orig_tstate_pil);
343		if (kcb->kprobe_status == KPROBE_REENTER)
344			restore_previous_kprobe(kcb);
345		else
346			reset_current_kprobe();
347		preempt_enable_no_resched();
348		break;
349	case KPROBE_HIT_ACTIVE:
350	case KPROBE_HIT_SSDONE:
351		/*
352		 * We increment the nmissed count for accounting,
353		 * we can also use npre/npostfault count for accounting
354		 * these specific fault cases.
355		 */
356		kprobes_inc_nmissed_count(cur);
357
358		/*
359		 * We come here because instructions in the pre/post
360		 * handler caused the page_fault, this could happen
361		 * if handler tries to access user space by
362		 * copy_from_user(), get_user() etc. Let the
363		 * user-specified handler try to fix it first.
364		 */
365		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
366			return 1;
367
368		/*
369		 * In case the user-specified fault handler returned
370		 * zero, try to fix up.
371		 */
372
373		entry = search_exception_tables(regs->tpc);
374		if (entry) {
375			regs->tpc = entry->fixup;
376			regs->tnpc = regs->tpc + 4;
377			return 1;
378		}
379
380		/*
381		 * fixup_exception() could not handle it,
382		 * Let do_page_fault() fix it.
383		 */
384		break;
385	default:
386		break;
387	}
388
389	return 0;
390}
391
392/*
393 * Wrapper routine to for handling exceptions.
394 */
395int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
396				       unsigned long val, void *data)
397{
398	struct die_args *args = (struct die_args *)data;
399	int ret = NOTIFY_DONE;
400
401	if (args->regs && user_mode(args->regs))
402		return ret;
403
404	switch (val) {
405	case DIE_DEBUG:
406		if (kprobe_handler(args->regs))
407			ret = NOTIFY_STOP;
408		break;
409	case DIE_DEBUG_2:
410		if (post_kprobe_handler(args->regs))
411			ret = NOTIFY_STOP;
412		break;
413	default:
414		break;
415	}
416	return ret;
417}
418
419asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
420				      struct pt_regs *regs)
421{
422	enum ctx_state prev_state = exception_enter();
423
424	BUG_ON(trap_level != 0x170 && trap_level != 0x171);
425
426	if (user_mode(regs)) {
427		local_irq_enable();
428		bad_trap(regs, trap_level);
429		goto out;
430	}
431
432	/* trap_level == 0x170 --> ta 0x70
433	 * trap_level == 0x171 --> ta 0x71
434	 */
435	if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
436		       (trap_level == 0x170) ? "debug" : "debug_2",
437		       regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
438		bad_trap(regs, trap_level);
439out:
440	exception_exit(prev_state);
441}
442
443/* Jprobes support.  */
444int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
445{
446	struct jprobe *jp = container_of(p, struct jprobe, kp);
447	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
448
449	memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
450
451	regs->tpc  = (unsigned long) jp->entry;
452	regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
453	regs->tstate |= TSTATE_PIL;
454
455	return 1;
456}
457
458void __kprobes jprobe_return(void)
459{
460	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
461	register unsigned long orig_fp asm("g1");
462
463	orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
464	__asm__ __volatile__("\n"
465"1:	cmp		%%sp, %0\n\t"
466	"blu,a,pt	%%xcc, 1b\n\t"
467	" restore\n\t"
468	".globl		jprobe_return_trap_instruction\n"
469"jprobe_return_trap_instruction:\n\t"
470	"ta		0x70"
471	: /* no outputs */
472	: "r" (orig_fp));
473}
474
475extern void jprobe_return_trap_instruction(void);
476
477int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
478{
479	u32 *addr = (u32 *) regs->tpc;
480	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
481
482	if (addr == (u32 *) jprobe_return_trap_instruction) {
483		memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
484		preempt_enable_no_resched();
485		return 1;
486	}
487	return 0;
488}
489
490/* The value stored in the return address register is actually 2
491 * instructions before where the callee will return to.
492 * Sequences usually look something like this
493 *
494 *		call	some_function	<--- return register points here
495 *		 nop			<--- call delay slot
496 *		whatever		<--- where callee returns to
497 *
498 * To keep trampoline_probe_handler logic simpler, we normalize the
499 * value kept in ri->ret_addr so we don't need to keep adjusting it
500 * back and forth.
501 */
502void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
503				      struct pt_regs *regs)
504{
505	ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
506
507	/* Replace the return addr with trampoline addr */
508	regs->u_regs[UREG_RETPC] =
509		((unsigned long)kretprobe_trampoline) - 8;
510}
511
512/*
513 * Called when the probe at kretprobe trampoline is hit
514 */
515static int __kprobes trampoline_probe_handler(struct kprobe *p,
516					      struct pt_regs *regs)
517{
518	struct kretprobe_instance *ri = NULL;
519	struct hlist_head *head, empty_rp;
520	struct hlist_node *tmp;
521	unsigned long flags, orig_ret_address = 0;
522	unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
523
524	INIT_HLIST_HEAD(&empty_rp);
525	kretprobe_hash_lock(current, &head, &flags);
526
527	/*
528	 * It is possible to have multiple instances associated with a given
529	 * task either because an multiple functions in the call path
530	 * have a return probe installed on them, and/or more than one return
531	 * return probe was registered for a target function.
532	 *
533	 * We can handle this because:
534	 *     - instances are always inserted at the head of the list
535	 *     - when multiple return probes are registered for the same
536	 *       function, the first instance's ret_addr will point to the
537	 *       real return address, and all the rest will point to
538	 *       kretprobe_trampoline
539	 */
540	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
541		if (ri->task != current)
542			/* another task is sharing our hash bucket */
543			continue;
544
545		if (ri->rp && ri->rp->handler)
546			ri->rp->handler(ri, regs);
547
548		orig_ret_address = (unsigned long)ri->ret_addr;
549		recycle_rp_inst(ri, &empty_rp);
550
551		if (orig_ret_address != trampoline_address)
552			/*
553			 * This is the real return address. Any other
554			 * instances associated with this task are for
555			 * other calls deeper on the call stack
556			 */
557			break;
558	}
559
560	kretprobe_assert(ri, orig_ret_address, trampoline_address);
561	regs->tpc = orig_ret_address;
562	regs->tnpc = orig_ret_address + 4;
563
564	reset_current_kprobe();
565	kretprobe_hash_unlock(current, &flags);
566	preempt_enable_no_resched();
567
568	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
569		hlist_del(&ri->hlist);
570		kfree(ri);
571	}
572	/*
573	 * By returning a non-zero value, we are telling
574	 * kprobe_handler() that we don't want the post_handler
575	 * to run (and have re-enabled preemption)
576	 */
577	return 1;
578}
579
580static void __used kretprobe_trampoline_holder(void)
581{
582	asm volatile(".global kretprobe_trampoline\n"
583		     "kretprobe_trampoline:\n"
584		     "\tnop\n"
585		     "\tnop\n");
586}
587static struct kprobe trampoline_p = {
588	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
589	.pre_handler = trampoline_probe_handler
590};
591
592int __init arch_init_kprobes(void)
593{
594	return register_kprobe(&trampoline_p);
595}
596
597int __kprobes arch_trampoline_kprobe(struct kprobe *p)
598{
599	if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
600		return 1;
601
602	return 0;
603}