Linux Audio

Check our new training course

Loading...
  1/*
  2 *  linux/arch/parisc/traps.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
  6 */
  7
  8/*
  9 * 'Traps.c' handles hardware traps and faults after we have saved some
 10 * state in 'asm.s'.
 11 */
 12
 13#include <linux/sched.h>
 14#include <linux/kernel.h>
 15#include <linux/string.h>
 16#include <linux/errno.h>
 17#include <linux/ptrace.h>
 18#include <linux/timer.h>
 19#include <linux/delay.h>
 20#include <linux/mm.h>
 21#include <linux/module.h>
 22#include <linux/smp.h>
 23#include <linux/spinlock.h>
 24#include <linux/init.h>
 25#include <linux/interrupt.h>
 26#include <linux/console.h>
 27#include <linux/bug.h>
 28
 29#include <asm/assembly.h>
 30#include <asm/uaccess.h>
 31#include <asm/io.h>
 32#include <asm/irq.h>
 33#include <asm/traps.h>
 34#include <asm/unaligned.h>
 35#include <linux/atomic.h>
 36#include <asm/smp.h>
 37#include <asm/pdc.h>
 38#include <asm/pdc_chassis.h>
 39#include <asm/unwind.h>
 40#include <asm/tlbflush.h>
 41#include <asm/cacheflush.h>
 42
 43#include "../math-emu/math-emu.h"	/* for handle_fpe() */
 44
 45#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
 46			  /*  dumped to the console via printk)          */
 47
 48#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 49DEFINE_SPINLOCK(pa_dbit_lock);
 50#endif
 51
 52static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
 53	struct pt_regs *regs);
 54
 55static int printbinary(char *buf, unsigned long x, int nbits)
 56{
 57	unsigned long mask = 1UL << (nbits - 1);
 58	while (mask != 0) {
 59		*buf++ = (mask & x ? '1' : '0');
 60		mask >>= 1;
 61	}
 62	*buf = '\0';
 63
 64	return nbits;
 65}
 66
 67#ifdef CONFIG_64BIT
 68#define RFMT "%016lx"
 69#else
 70#define RFMT "%08lx"
 71#endif
 72#define FFMT "%016llx"	/* fpregs are 64-bit always */
 73
 74#define PRINTREGS(lvl,r,f,fmt,x)	\
 75	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
 76		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
 77		(r)[(x)+2], (r)[(x)+3])
 78
 79static void print_gr(char *level, struct pt_regs *regs)
 80{
 81	int i;
 82	char buf[64];
 83
 84	printk("%s\n", level);
 85	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
 86	printbinary(buf, regs->gr[0], 32);
 87	printk("%sPSW: %s %s\n", level, buf, print_tainted());
 88
 89	for (i = 0; i < 32; i += 4)
 90		PRINTREGS(level, regs->gr, "r", RFMT, i);
 91}
 92
 93static void print_fr(char *level, struct pt_regs *regs)
 94{
 95	int i;
 96	char buf[64];
 97	struct { u32 sw[2]; } s;
 98
 99	/* FR are 64bit everywhere. Need to use asm to get the content
100	 * of fpsr/fper1, and we assume that we won't have a FP Identify
101	 * in our way, otherwise we're screwed.
102	 * The fldd is used to restore the T-bit if there was one, as the
103	 * store clears it anyway.
104	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105	asm volatile ("fstd %%fr0,0(%1)	\n\t"
106		      "fldd 0(%1),%%fr0	\n\t"
107		      : "=m" (s) : "r" (&s) : "r0");
108
109	printk("%s\n", level);
110	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111	printbinary(buf, s.sw[0], 32);
112	printk("%sFPSR: %s\n", level, buf);
113	printk("%sFPER1: %08x\n", level, s.sw[1]);
114
115	/* here we'll print fr0 again, tho it'll be meaningless */
116	for (i = 0; i < 32; i += 4)
117		PRINTREGS(level, regs->fr, "fr", FFMT, i);
118}
119
120void show_regs(struct pt_regs *regs)
121{
122	int i, user;
123	char *level;
124	unsigned long cr30, cr31;
125
126	user = user_mode(regs);
127	level = user ? KERN_DEBUG : KERN_CRIT;
128
129	print_gr(level, regs);
130
131	for (i = 0; i < 8; i += 4)
132		PRINTREGS(level, regs->sr, "sr", RFMT, i);
133
134	if (user)
135		print_fr(level, regs);
136
137	cr30 = mfctl(30);
138	cr31 = mfctl(31);
139	printk("%s\n", level);
140	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
141	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
142	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
143	       level, regs->iir, regs->isr, regs->ior);
144	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
145	       level, current_thread_info()->cpu, cr30, cr31);
146	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
147
148	if (user) {
149		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
150		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
151		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
152	} else {
153		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
154		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
155		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
156
157		parisc_show_stack(current, NULL, regs);
158	}
159}
160
161
162void dump_stack(void)
163{
164	show_stack(NULL, NULL);
165}
166
167EXPORT_SYMBOL(dump_stack);
168
169static void do_show_stack(struct unwind_frame_info *info)
170{
171	int i = 1;
172
173	printk(KERN_CRIT "Backtrace:\n");
174	while (i <= 16) {
175		if (unwind_once(info) < 0 || info->ip == 0)
176			break;
177
178		if (__kernel_text_address(info->ip)) {
179			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
180				info->ip, (void *) info->ip);
181			i++;
182		}
183	}
184	printk(KERN_CRIT "\n");
185}
186
187static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
188	struct pt_regs *regs)
189{
190	struct unwind_frame_info info;
191	struct task_struct *t;
192
193	t = task ? task : current;
194	if (regs) {
195		unwind_frame_init(&info, t, regs);
196		goto show_stack;
197	}
198
199	if (t == current) {
200		unsigned long sp;
201
202HERE:
203		asm volatile ("copy %%r30, %0" : "=r"(sp));
204		{
205			struct pt_regs r;
206
207			memset(&r, 0, sizeof(struct pt_regs));
208			r.iaoq[0] = (unsigned long)&&HERE;
209			r.gr[2] = (unsigned long)__builtin_return_address(0);
210			r.gr[30] = sp;
211
212			unwind_frame_init(&info, current, &r);
213		}
214	} else {
215		unwind_frame_init_from_blocked_task(&info, t);
216	}
217
218show_stack:
219	do_show_stack(&info);
220}
221
222void show_stack(struct task_struct *t, unsigned long *sp)
223{
224	return parisc_show_stack(t, sp, NULL);
225}
226
227int is_valid_bugaddr(unsigned long iaoq)
228{
229	return 1;
230}
231
232void die_if_kernel(char *str, struct pt_regs *regs, long err)
233{
234	if (user_mode(regs)) {
235		if (err == 0)
236			return; /* STFU */
237
238		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
239			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
240#ifdef PRINT_USER_FAULTS
241		/* XXX for debugging only */
242		show_regs(regs);
243#endif
244		return;
245	}
246
247	oops_in_progress = 1;
248
249	oops_enter();
250
251	/* Amuse the user in a SPARC fashion */
252	if (err) printk(KERN_CRIT
253			"      _______________________________ \n"
254			"     < Your System ate a SPARC! Gah! >\n"
255			"      ------------------------------- \n"
256			"             \\   ^__^\n"
257			"                 (__)\\       )\\/\\\n"
258			"                  U  ||----w |\n"
259			"                     ||     ||\n");
260	
261	/* unlock the pdc lock if necessary */
262	pdc_emergency_unlock();
263
264	/* maybe the kernel hasn't booted very far yet and hasn't been able 
265	 * to initialize the serial or STI console. In that case we should 
266	 * re-enable the pdc console, so that the user will be able to 
267	 * identify the problem. */
268	if (!console_drivers)
269		pdc_console_restart();
270	
271	if (err)
272		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
273			current->comm, task_pid_nr(current), str, err);
274
275	/* Wot's wrong wif bein' racy? */
276	if (current->thread.flags & PARISC_KERNEL_DEATH) {
277		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
278		local_irq_enable();
279		while (1);
280	}
281	current->thread.flags |= PARISC_KERNEL_DEATH;
282
283	show_regs(regs);
284	dump_stack();
285	add_taint(TAINT_DIE);
286
287	if (in_interrupt())
288		panic("Fatal exception in interrupt");
289
290	if (panic_on_oops) {
291		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
292		ssleep(5);
293		panic("Fatal exception");
294	}
295
296	oops_exit();
297	do_exit(SIGSEGV);
298}
299
300int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
301{
302	return syscall(regs);
303}
304
305/* gdb uses break 4,8 */
306#define GDB_BREAK_INSN 0x10004
307static void handle_gdb_break(struct pt_regs *regs, int wot)
308{
309	struct siginfo si;
310
311	si.si_signo = SIGTRAP;
312	si.si_errno = 0;
313	si.si_code = wot;
314	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
315	force_sig_info(SIGTRAP, &si, current);
316}
317
318static void handle_break(struct pt_regs *regs)
319{
320	unsigned iir = regs->iir;
321
322	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
323		/* check if a BUG() or WARN() trapped here.  */
324		enum bug_trap_type tt;
325		tt = report_bug(regs->iaoq[0] & ~3, regs);
326		if (tt == BUG_TRAP_TYPE_WARN) {
327			regs->iaoq[0] += 4;
328			regs->iaoq[1] += 4;
329			return; /* return to next instruction when WARN_ON().  */
330		}
331		die_if_kernel("Unknown kernel breakpoint", regs,
332			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
333	}
334
335#ifdef PRINT_USER_FAULTS
336	if (unlikely(iir != GDB_BREAK_INSN)) {
337		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
338			iir & 31, (iir>>13) & ((1<<13)-1),
339			task_pid_nr(current), current->comm);
340		show_regs(regs);
341	}
342#endif
343
344	/* send standard GDB signal */
345	handle_gdb_break(regs, TRAP_BRKPT);
346}
347
348static void default_trap(int code, struct pt_regs *regs)
349{
350	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
351	show_regs(regs);
352}
353
354void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
355
356
357void transfer_pim_to_trap_frame(struct pt_regs *regs)
358{
359    register int i;
360    extern unsigned int hpmc_pim_data[];
361    struct pdc_hpmc_pim_11 *pim_narrow;
362    struct pdc_hpmc_pim_20 *pim_wide;
363
364    if (boot_cpu_data.cpu_type >= pcxu) {
365
366	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
367
368	/*
369	 * Note: The following code will probably generate a
370	 * bunch of truncation error warnings from the compiler.
371	 * Could be handled with an ifdef, but perhaps there
372	 * is a better way.
373	 */
374
375	regs->gr[0] = pim_wide->cr[22];
376
377	for (i = 1; i < 32; i++)
378	    regs->gr[i] = pim_wide->gr[i];
379
380	for (i = 0; i < 32; i++)
381	    regs->fr[i] = pim_wide->fr[i];
382
383	for (i = 0; i < 8; i++)
384	    regs->sr[i] = pim_wide->sr[i];
385
386	regs->iasq[0] = pim_wide->cr[17];
387	regs->iasq[1] = pim_wide->iasq_back;
388	regs->iaoq[0] = pim_wide->cr[18];
389	regs->iaoq[1] = pim_wide->iaoq_back;
390
391	regs->sar  = pim_wide->cr[11];
392	regs->iir  = pim_wide->cr[19];
393	regs->isr  = pim_wide->cr[20];
394	regs->ior  = pim_wide->cr[21];
395    }
396    else {
397	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
398
399	regs->gr[0] = pim_narrow->cr[22];
400
401	for (i = 1; i < 32; i++)
402	    regs->gr[i] = pim_narrow->gr[i];
403
404	for (i = 0; i < 32; i++)
405	    regs->fr[i] = pim_narrow->fr[i];
406
407	for (i = 0; i < 8; i++)
408	    regs->sr[i] = pim_narrow->sr[i];
409
410	regs->iasq[0] = pim_narrow->cr[17];
411	regs->iasq[1] = pim_narrow->iasq_back;
412	regs->iaoq[0] = pim_narrow->cr[18];
413	regs->iaoq[1] = pim_narrow->iaoq_back;
414
415	regs->sar  = pim_narrow->cr[11];
416	regs->iir  = pim_narrow->cr[19];
417	regs->isr  = pim_narrow->cr[20];
418	regs->ior  = pim_narrow->cr[21];
419    }
420
421    /*
422     * The following fields only have meaning if we came through
423     * another path. So just zero them here.
424     */
425
426    regs->ksp = 0;
427    regs->kpc = 0;
428    regs->orig_r28 = 0;
429}
430
431
432/*
433 * This routine is called as a last resort when everything else
434 * has gone clearly wrong. We get called for faults in kernel space,
435 * and HPMC's.
436 */
437void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
438{
439	static DEFINE_SPINLOCK(terminate_lock);
440
441	oops_in_progress = 1;
442
443	set_eiem(0);
444	local_irq_disable();
445	spin_lock(&terminate_lock);
446
447	/* unlock the pdc lock if necessary */
448	pdc_emergency_unlock();
449
450	/* restart pdc console if necessary */
451	if (!console_drivers)
452		pdc_console_restart();
453
454	/* Not all paths will gutter the processor... */
455	switch(code){
456
457	case 1:
458		transfer_pim_to_trap_frame(regs);
459		break;
460
461	default:
462		/* Fall through */
463		break;
464
465	}
466	    
467	{
468		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
469		struct unwind_frame_info info;
470		unwind_frame_init(&info, current, regs);
471		do_show_stack(&info);
472	}
473
474	printk("\n");
475	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
476			msg, code, regs, offset);
477	show_regs(regs);
478
479	spin_unlock(&terminate_lock);
480
481	/* put soft power button back under hardware control;
482	 * if the user had pressed it once at any time, the 
483	 * system will shut down immediately right here. */
484	pdc_soft_power_button(0);
485	
486	/* Call kernel panic() so reboot timeouts work properly 
487	 * FIXME: This function should be on the list of
488	 * panic notifiers, and we should call panic
489	 * directly from the location that we wish. 
490	 * e.g. We should not call panic from
491	 * parisc_terminate, but rather the oter way around.
492	 * This hack works, prints the panic message twice,
493	 * and it enables reboot timers!
494	 */
495	panic(msg);
496}
497
498void notrace handle_interruption(int code, struct pt_regs *regs)
499{
500	unsigned long fault_address = 0;
501	unsigned long fault_space = 0;
502	struct siginfo si;
503
504	if (code == 1)
505	    pdc_console_restart();  /* switch back to pdc if HPMC */
506	else
507	    local_irq_enable();
508
509	/* Security check:
510	 * If the priority level is still user, and the
511	 * faulting space is not equal to the active space
512	 * then the user is attempting something in a space
513	 * that does not belong to them. Kill the process.
514	 *
515	 * This is normally the situation when the user
516	 * attempts to jump into the kernel space at the
517	 * wrong offset, be it at the gateway page or a
518	 * random location.
519	 *
520	 * We cannot normally signal the process because it
521	 * could *be* on the gateway page, and processes
522	 * executing on the gateway page can't have signals
523	 * delivered.
524	 * 
525	 * We merely readjust the address into the users
526	 * space, at a destination address of zero, and
527	 * allow processing to continue.
528	 */
529	if (((unsigned long)regs->iaoq[0] & 3) &&
530	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
531	  	/* Kill the user process later */
532	  	regs->iaoq[0] = 0 | 3;
533		regs->iaoq[1] = regs->iaoq[0] + 4;
534	 	regs->iasq[0] = regs->iasq[1] = regs->sr[7];
535		regs->gr[0] &= ~PSW_B;
536		return;
537	}
538	
539#if 0
540	printk(KERN_CRIT "Interruption # %d\n", code);
541#endif
542
543	switch(code) {
544
545	case  1:
546		/* High-priority machine check (HPMC) */
547		
548		/* set up a new led state on systems shipped with a LED State panel */
549		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
550		    
551	    	parisc_terminate("High Priority Machine Check (HPMC)",
552				regs, code, 0);
553		/* NOT REACHED */
554		
555	case  2:
556		/* Power failure interrupt */
557		printk(KERN_CRIT "Power failure interrupt !\n");
558		return;
559
560	case  3:
561		/* Recovery counter trap */
562		regs->gr[0] &= ~PSW_R;
563		if (user_space(regs))
564			handle_gdb_break(regs, TRAP_TRACE);
565		/* else this must be the start of a syscall - just let it run */
566		return;
567
568	case  5:
569		/* Low-priority machine check */
570		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
571		
572		flush_cache_all();
573		flush_tlb_all();
574		cpu_lpmc(5, regs);
575		return;
576
577	case  6:
578		/* Instruction TLB miss fault/Instruction page fault */
579		fault_address = regs->iaoq[0];
580		fault_space   = regs->iasq[0];
581		break;
582
583	case  8:
584		/* Illegal instruction trap */
585		die_if_kernel("Illegal instruction", regs, code);
586		si.si_code = ILL_ILLOPC;
587		goto give_sigill;
588
589	case  9:
590		/* Break instruction trap */
591		handle_break(regs);
592		return;
593	
594	case 10:
595		/* Privileged operation trap */
596		die_if_kernel("Privileged operation", regs, code);
597		si.si_code = ILL_PRVOPC;
598		goto give_sigill;
599	
600	case 11:
601		/* Privileged register trap */
602		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
603
604			/* This is a MFCTL cr26/cr27 to gr instruction.
605			 * PCXS traps on this, so we need to emulate it.
606			 */
607
608			if (regs->iir & 0x00200000)
609				regs->gr[regs->iir & 0x1f] = mfctl(27);
610			else
611				regs->gr[regs->iir & 0x1f] = mfctl(26);
612
613			regs->iaoq[0] = regs->iaoq[1];
614			regs->iaoq[1] += 4;
615			regs->iasq[0] = regs->iasq[1];
616			return;
617		}
618
619		die_if_kernel("Privileged register usage", regs, code);
620		si.si_code = ILL_PRVREG;
621	give_sigill:
622		si.si_signo = SIGILL;
623		si.si_errno = 0;
624		si.si_addr = (void __user *) regs->iaoq[0];
625		force_sig_info(SIGILL, &si, current);
626		return;
627
628	case 12:
629		/* Overflow Trap, let the userland signal handler do the cleanup */
630		si.si_signo = SIGFPE;
631		si.si_code = FPE_INTOVF;
632		si.si_addr = (void __user *) regs->iaoq[0];
633		force_sig_info(SIGFPE, &si, current);
634		return;
635		
636	case 13:
637		/* Conditional Trap
638		   The condition succeeds in an instruction which traps
639		   on condition  */
640		if(user_mode(regs)){
641			si.si_signo = SIGFPE;
642			/* Set to zero, and let the userspace app figure it out from
643		   	   the insn pointed to by si_addr */
644			si.si_code = 0;
645			si.si_addr = (void __user *) regs->iaoq[0];
646			force_sig_info(SIGFPE, &si, current);
647			return;
648		} 
649		/* The kernel doesn't want to handle condition codes */
650		break;
651		
652	case 14:
653		/* Assist Exception Trap, i.e. floating point exception. */
654		die_if_kernel("Floating point exception", regs, 0); /* quiet */
655		handle_fpe(regs);
656		return;
657		
658	case 15:
659		/* Data TLB miss fault/Data page fault */
660		/* Fall through */
661	case 16:
662		/* Non-access instruction TLB miss fault */
663		/* The instruction TLB entry needed for the target address of the FIC
664		   is absent, and hardware can't find it, so we get to cleanup */
665		/* Fall through */
666	case 17:
667		/* Non-access data TLB miss fault/Non-access data page fault */
668		/* FIXME: 
669		 	 Still need to add slow path emulation code here!
670		         If the insn used a non-shadow register, then the tlb
671			 handlers could not have their side-effect (e.g. probe
672			 writing to a target register) emulated since rfir would
673			 erase the changes to said register. Instead we have to
674			 setup everything, call this function we are in, and emulate
675			 by hand. Technically we need to emulate:
676			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
677		*/			  
678		fault_address = regs->ior;
679		fault_space = regs->isr;
680		break;
681
682	case 18:
683		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
684		/* Check for unaligned access */
685		if (check_unaligned(regs)) {
686			handle_unaligned(regs);
687			return;
688		}
689		/* Fall Through */
690	case 26: 
691		/* PCXL: Data memory access rights trap */
692		fault_address = regs->ior;
693		fault_space   = regs->isr;
694		break;
695
696	case 19:
697		/* Data memory break trap */
698		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
699		/* fall thru */
700	case 21:
701		/* Page reference trap */
702		handle_gdb_break(regs, TRAP_HWBKPT);
703		return;
704
705	case 25:
706		/* Taken branch trap */
707		regs->gr[0] &= ~PSW_T;
708		if (user_space(regs))
709			handle_gdb_break(regs, TRAP_BRANCH);
710		/* else this must be the start of a syscall - just let it
711		 * run.
712		 */
713		return;
714
715	case  7:  
716		/* Instruction access rights */
717		/* PCXL: Instruction memory protection trap */
718
719		/*
720		 * This could be caused by either: 1) a process attempting
721		 * to execute within a vma that does not have execute
722		 * permission, or 2) an access rights violation caused by a
723		 * flush only translation set up by ptep_get_and_clear().
724		 * So we check the vma permissions to differentiate the two.
725		 * If the vma indicates we have execute permission, then
726		 * the cause is the latter one. In this case, we need to
727		 * call do_page_fault() to fix the problem.
728		 */
729
730		if (user_mode(regs)) {
731			struct vm_area_struct *vma;
732
733			down_read(&current->mm->mmap_sem);
734			vma = find_vma(current->mm,regs->iaoq[0]);
735			if (vma && (regs->iaoq[0] >= vma->vm_start)
736				&& (vma->vm_flags & VM_EXEC)) {
737
738				fault_address = regs->iaoq[0];
739				fault_space = regs->iasq[0];
740
741				up_read(&current->mm->mmap_sem);
742				break; /* call do_page_fault() */
743			}
744			up_read(&current->mm->mmap_sem);
745		}
746		/* Fall Through */
747	case 27: 
748		/* Data memory protection ID trap */
749		if (code == 27 && !user_mode(regs) &&
750			fixup_exception(regs))
751			return;
752
753		die_if_kernel("Protection id trap", regs, code);
754		si.si_code = SEGV_MAPERR;
755		si.si_signo = SIGSEGV;
756		si.si_errno = 0;
757		if (code == 7)
758		    si.si_addr = (void __user *) regs->iaoq[0];
759		else
760		    si.si_addr = (void __user *) regs->ior;
761		force_sig_info(SIGSEGV, &si, current);
762		return;
763
764	case 28: 
765		/* Unaligned data reference trap */
766		handle_unaligned(regs);
767		return;
768
769	default:
770		if (user_mode(regs)) {
771#ifdef PRINT_USER_FAULTS
772			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
773			    task_pid_nr(current), current->comm);
774			show_regs(regs);
775#endif
776			/* SIGBUS, for lack of a better one. */
777			si.si_signo = SIGBUS;
778			si.si_code = BUS_OBJERR;
779			si.si_errno = 0;
780			si.si_addr = (void __user *) regs->ior;
781			force_sig_info(SIGBUS, &si, current);
782			return;
783		}
784		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
785		
786		parisc_terminate("Unexpected interruption", regs, code, 0);
787		/* NOT REACHED */
788	}
789
790	if (user_mode(regs)) {
791	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
792#ifdef PRINT_USER_FAULTS
793		if (fault_space == 0)
794			printk(KERN_DEBUG "User Fault on Kernel Space ");
795		else
796			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
797			       code);
798		printk(KERN_CONT "pid=%d command='%s'\n",
799		       task_pid_nr(current), current->comm);
800		show_regs(regs);
801#endif
802		si.si_signo = SIGSEGV;
803		si.si_errno = 0;
804		si.si_code = SEGV_MAPERR;
805		si.si_addr = (void __user *) regs->ior;
806		force_sig_info(SIGSEGV, &si, current);
807		return;
808	    }
809	}
810	else {
811
812	    /*
813	     * The kernel should never fault on its own address space.
814	     */
815
816	    if (fault_space == 0) 
817	    {
818		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
819		parisc_terminate("Kernel Fault", regs, code, fault_address);
820	
821	    }
822	}
823
824	do_page_fault(regs, code, fault_address);
825}
826
827
828int __init check_ivt(void *iva)
829{
830	extern u32 os_hpmc_size;
831	extern const u32 os_hpmc[];
832
833	int i;
834	u32 check = 0;
835	u32 *ivap;
836	u32 *hpmcp;
837	u32 length;
838
839	if (strcmp((char *)iva, "cows can fly"))
840		return -1;
841
842	ivap = (u32 *)iva;
843
844	for (i = 0; i < 8; i++)
845	    *ivap++ = 0;
846
847	/* Compute Checksum for HPMC handler */
848	length = os_hpmc_size;
849	ivap[7] = length;
850
851	hpmcp = (u32 *)os_hpmc;
852
853	for (i=0; i<length/4; i++)
854	    check += *hpmcp++;
855
856	for (i=0; i<8; i++)
857	    check += ivap[i];
858
859	ivap[5] = -check;
860
861	return 0;
862}
863	
864#ifndef CONFIG_64BIT
865extern const void fault_vector_11;
866#endif
867extern const void fault_vector_20;
868
869void __init trap_init(void)
870{
871	void *iva;
872
873	if (boot_cpu_data.cpu_type >= pcxu)
874		iva = (void *) &fault_vector_20;
875	else
876#ifdef CONFIG_64BIT
877		panic("Can't boot 64-bit OS on PA1.1 processor!");
878#else
879		iva = (void *) &fault_vector_11;
880#endif
881
882	if (check_ivt(iva))
883		panic("IVT invalid");
884}