Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/traps.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
  7 */
  8
  9/*
 10 * 'Traps.c' handles hardware traps and faults after we have saved some
 11 * state in 'asm.s'.
 12 */
 13
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/kernel.h>
 17#include <linux/string.h>
 18#include <linux/errno.h>
 19#include <linux/ptrace.h>
 20#include <linux/timer.h>
 21#include <linux/delay.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/smp.h>
 25#include <linux/spinlock.h>
 26#include <linux/init.h>
 27#include <linux/interrupt.h>
 28#include <linux/console.h>
 29#include <linux/bug.h>
 30#include <linux/ratelimit.h>
 31#include <linux/uaccess.h>
 32#include <linux/kdebug.h>
 33
 34#include <asm/assembly.h>
 
 
 35#include <asm/io.h>
 36#include <asm/irq.h>
 37#include <asm/traps.h>
 38#include <asm/unaligned.h>
 39#include <linux/atomic.h>
 40#include <asm/smp.h>
 41#include <asm/pdc.h>
 42#include <asm/pdc_chassis.h>
 43#include <asm/unwind.h>
 44#include <asm/tlbflush.h>
 45#include <asm/cacheflush.h>
 46#include <linux/kgdb.h>
 47#include <linux/kprobes.h>
 48
 49#include "../math-emu/math-emu.h"	/* for handle_fpe() */
 50
 51static void parisc_show_stack(struct task_struct *task,
 
 
 
 
 
 
 
 52	struct pt_regs *regs);
 53
 54static int printbinary(char *buf, unsigned long x, int nbits)
 55{
 56	unsigned long mask = 1UL << (nbits - 1);
 57	while (mask != 0) {
 58		*buf++ = (mask & x ? '1' : '0');
 59		mask >>= 1;
 60	}
 61	*buf = '\0';
 62
 63	return nbits;
 64}
 65
 66#ifdef CONFIG_64BIT
 67#define RFMT "%016lx"
 68#else
 69#define RFMT "%08lx"
 70#endif
 71#define FFMT "%016llx"	/* fpregs are 64-bit always */
 72
 73#define PRINTREGS(lvl,r,f,fmt,x)	\
 74	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
 75		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
 76		(r)[(x)+2], (r)[(x)+3])
 77
 78static void print_gr(char *level, struct pt_regs *regs)
 79{
 80	int i;
 81	char buf[64];
 82
 83	printk("%s\n", level);
 84	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
 85	printbinary(buf, regs->gr[0], 32);
 86	printk("%sPSW: %s %s\n", level, buf, print_tainted());
 87
 88	for (i = 0; i < 32; i += 4)
 89		PRINTREGS(level, regs->gr, "r", RFMT, i);
 90}
 91
 92static void print_fr(char *level, struct pt_regs *regs)
 93{
 94	int i;
 95	char buf[64];
 96	struct { u32 sw[2]; } s;
 97
 98	/* FR are 64bit everywhere. Need to use asm to get the content
 99	 * of fpsr/fper1, and we assume that we won't have a FP Identify
100	 * in our way, otherwise we're screwed.
101	 * The fldd is used to restore the T-bit if there was one, as the
102	 * store clears it anyway.
103	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104	asm volatile ("fstd %%fr0,0(%1)	\n\t"
105		      "fldd 0(%1),%%fr0	\n\t"
106		      : "=m" (s) : "r" (&s) : "r0");
107
108	printk("%s\n", level);
109	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110	printbinary(buf, s.sw[0], 32);
111	printk("%sFPSR: %s\n", level, buf);
112	printk("%sFPER1: %08x\n", level, s.sw[1]);
113
114	/* here we'll print fr0 again, tho it'll be meaningless */
115	for (i = 0; i < 32; i += 4)
116		PRINTREGS(level, regs->fr, "fr", FFMT, i);
117}
118
119void show_regs(struct pt_regs *regs)
120{
121	int i, user;
122	char *level;
123	unsigned long cr30, cr31;
124
125	user = user_mode(regs);
126	level = user ? KERN_DEBUG : KERN_CRIT;
127
128	show_regs_print_info(level);
129
130	print_gr(level, regs);
131
132	for (i = 0; i < 8; i += 4)
133		PRINTREGS(level, regs->sr, "sr", RFMT, i);
134
135	if (user)
136		print_fr(level, regs);
137
138	cr30 = mfctl(30);
139	cr31 = mfctl(31);
140	printk("%s\n", level);
141	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
144	       level, regs->iir, regs->isr, regs->ior);
145	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
146	       level, current_thread_info()->cpu, cr30, cr31);
147	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148
149	if (user) {
150		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153	} else {
154		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157
158		parisc_show_stack(current, regs);
159	}
160}
161
162static DEFINE_RATELIMIT_STATE(_hppa_rs,
163	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
164
165#define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
166	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
167		printk(fmt, ##__VA_ARGS__);				      \
168		show_regs(regs);					      \
169	}								      \
170}
171
 
172
173static void do_show_stack(struct unwind_frame_info *info)
174{
175	int i = 1;
176
177	printk(KERN_CRIT "Backtrace:\n");
178	while (i <= MAX_UNWIND_ENTRIES) {
179		if (unwind_once(info) < 0 || info->ip == 0)
180			break;
181
182		if (__kernel_text_address(info->ip)) {
183			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
184				info->ip, (void *) info->ip);
185			i++;
186		}
187	}
188	printk(KERN_CRIT "\n");
189}
190
191static void parisc_show_stack(struct task_struct *task,
192	struct pt_regs *regs)
193{
194	struct unwind_frame_info info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196	unwind_frame_init_task(&info, task, regs);
 
 
 
197
 
 
 
 
 
 
 
198	do_show_stack(&info);
199}
200
201void show_stack(struct task_struct *t, unsigned long *sp)
202{
203	parisc_show_stack(t, NULL);
204}
205
206int is_valid_bugaddr(unsigned long iaoq)
207{
208	return 1;
209}
210
211void die_if_kernel(char *str, struct pt_regs *regs, long err)
212{
213	if (user_mode(regs)) {
214		if (err == 0)
215			return; /* STFU */
216
217		parisc_printk_ratelimited(1, regs,
218			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
219			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
220
 
 
 
221		return;
222	}
223
224	bust_spinlocks(1);
225
226	oops_enter();
227
228	/* Amuse the user in a SPARC fashion */
229	if (err) printk(KERN_CRIT
230			"      _______________________________ \n"
231			"     < Your System ate a SPARC! Gah! >\n"
232			"      ------------------------------- \n"
233			"             \\   ^__^\n"
234			"                 (__)\\       )\\/\\\n"
235			"                  U  ||----w |\n"
236			"                     ||     ||\n");
237	
238	/* unlock the pdc lock if necessary */
239	pdc_emergency_unlock();
240
241	/* maybe the kernel hasn't booted very far yet and hasn't been able 
242	 * to initialize the serial or STI console. In that case we should 
243	 * re-enable the pdc console, so that the user will be able to 
244	 * identify the problem. */
245	if (!console_drivers)
246		pdc_console_restart();
247	
248	if (err)
249		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250			current->comm, task_pid_nr(current), str, err);
251
252	/* Wot's wrong wif bein' racy? */
253	if (current->thread.flags & PARISC_KERNEL_DEATH) {
254		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255		local_irq_enable();
256		while (1);
257	}
258	current->thread.flags |= PARISC_KERNEL_DEATH;
259
260	show_regs(regs);
261	dump_stack();
262	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263
264	if (in_interrupt())
265		panic("Fatal exception in interrupt");
266
267	if (panic_on_oops)
 
 
268		panic("Fatal exception");
 
269
270	oops_exit();
271	do_exit(SIGSEGV);
272}
273
 
 
 
 
 
274/* gdb uses break 4,8 */
275#define GDB_BREAK_INSN 0x10004
276static void handle_gdb_break(struct pt_regs *regs, int wot)
277{
278	force_sig_fault(SIGTRAP, wot,
279			(void __user *) (regs->iaoq[0] & ~3));
 
 
 
 
 
280}
281
282static void handle_break(struct pt_regs *regs)
283{
284	unsigned iir = regs->iir;
285
286	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287		/* check if a BUG() or WARN() trapped here.  */
288		enum bug_trap_type tt;
289		tt = report_bug(regs->iaoq[0] & ~3, regs);
290		if (tt == BUG_TRAP_TYPE_WARN) {
291			regs->iaoq[0] += 4;
292			regs->iaoq[1] += 4;
293			return; /* return to next instruction when WARN_ON().  */
294		}
295		die_if_kernel("Unknown kernel breakpoint", regs,
296			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297	}
298
299#ifdef CONFIG_KPROBES
300	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
301		parisc_kprobe_break_handler(regs);
302		return;
303	}
304
305#endif
306
307#ifdef CONFIG_KGDB
308	if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
309		iir == PARISC_KGDB_BREAK_INSN)) {
310		kgdb_handle_exception(9, SIGTRAP, 0, regs);
311		return;
312	}
313#endif
314
315	if (unlikely(iir != GDB_BREAK_INSN))
316		parisc_printk_ratelimited(0, regs,
317			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
318			iir & 31, (iir>>13) & ((1<<13)-1),
319			task_pid_nr(current), current->comm);
 
 
 
320
321	/* send standard GDB signal */
322	handle_gdb_break(regs, TRAP_BRKPT);
323}
324
325static void default_trap(int code, struct pt_regs *regs)
326{
327	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
328	show_regs(regs);
329}
330
331void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
332
333
334void transfer_pim_to_trap_frame(struct pt_regs *regs)
335{
336    register int i;
337    extern unsigned int hpmc_pim_data[];
338    struct pdc_hpmc_pim_11 *pim_narrow;
339    struct pdc_hpmc_pim_20 *pim_wide;
340
341    if (boot_cpu_data.cpu_type >= pcxu) {
342
343	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
344
345	/*
346	 * Note: The following code will probably generate a
347	 * bunch of truncation error warnings from the compiler.
348	 * Could be handled with an ifdef, but perhaps there
349	 * is a better way.
350	 */
351
352	regs->gr[0] = pim_wide->cr[22];
353
354	for (i = 1; i < 32; i++)
355	    regs->gr[i] = pim_wide->gr[i];
356
357	for (i = 0; i < 32; i++)
358	    regs->fr[i] = pim_wide->fr[i];
359
360	for (i = 0; i < 8; i++)
361	    regs->sr[i] = pim_wide->sr[i];
362
363	regs->iasq[0] = pim_wide->cr[17];
364	regs->iasq[1] = pim_wide->iasq_back;
365	regs->iaoq[0] = pim_wide->cr[18];
366	regs->iaoq[1] = pim_wide->iaoq_back;
367
368	regs->sar  = pim_wide->cr[11];
369	regs->iir  = pim_wide->cr[19];
370	regs->isr  = pim_wide->cr[20];
371	regs->ior  = pim_wide->cr[21];
372    }
373    else {
374	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
375
376	regs->gr[0] = pim_narrow->cr[22];
377
378	for (i = 1; i < 32; i++)
379	    regs->gr[i] = pim_narrow->gr[i];
380
381	for (i = 0; i < 32; i++)
382	    regs->fr[i] = pim_narrow->fr[i];
383
384	for (i = 0; i < 8; i++)
385	    regs->sr[i] = pim_narrow->sr[i];
386
387	regs->iasq[0] = pim_narrow->cr[17];
388	regs->iasq[1] = pim_narrow->iasq_back;
389	regs->iaoq[0] = pim_narrow->cr[18];
390	regs->iaoq[1] = pim_narrow->iaoq_back;
391
392	regs->sar  = pim_narrow->cr[11];
393	regs->iir  = pim_narrow->cr[19];
394	regs->isr  = pim_narrow->cr[20];
395	regs->ior  = pim_narrow->cr[21];
396    }
397
398    /*
399     * The following fields only have meaning if we came through
400     * another path. So just zero them here.
401     */
402
403    regs->ksp = 0;
404    regs->kpc = 0;
405    regs->orig_r28 = 0;
406}
407
408
409/*
410 * This routine is called as a last resort when everything else
411 * has gone clearly wrong. We get called for faults in kernel space,
412 * and HPMC's.
413 */
414void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
415{
416	static DEFINE_SPINLOCK(terminate_lock);
417
418	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
419	bust_spinlocks(1);
420
421	set_eiem(0);
422	local_irq_disable();
423	spin_lock(&terminate_lock);
424
425	/* unlock the pdc lock if necessary */
426	pdc_emergency_unlock();
427
428	/* restart pdc console if necessary */
429	if (!console_drivers)
430		pdc_console_restart();
431
432	/* Not all paths will gutter the processor... */
433	switch(code){
434
435	case 1:
436		transfer_pim_to_trap_frame(regs);
437		break;
438
439	default:
440		/* Fall through */
441		break;
442
443	}
444	    
445	{
446		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
447		struct unwind_frame_info info;
448		unwind_frame_init(&info, current, regs);
449		do_show_stack(&info);
450	}
451
452	printk("\n");
453	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
454		msg, code, trap_name(code), offset);
455	show_regs(regs);
456
457	spin_unlock(&terminate_lock);
458
459	/* put soft power button back under hardware control;
460	 * if the user had pressed it once at any time, the 
461	 * system will shut down immediately right here. */
462	pdc_soft_power_button(0);
463	
464	/* Call kernel panic() so reboot timeouts work properly 
465	 * FIXME: This function should be on the list of
466	 * panic notifiers, and we should call panic
467	 * directly from the location that we wish. 
468	 * e.g. We should not call panic from
469	 * parisc_terminate, but rather the oter way around.
470	 * This hack works, prints the panic message twice,
471	 * and it enables reboot timers!
472	 */
473	panic(msg);
474}
475
476void notrace handle_interruption(int code, struct pt_regs *regs)
477{
478	unsigned long fault_address = 0;
479	unsigned long fault_space = 0;
480	int si_code;
481
482	if (code == 1)
483	    pdc_console_restart();  /* switch back to pdc if HPMC */
484	else
485	    local_irq_enable();
486
487	/* Security check:
488	 * If the priority level is still user, and the
489	 * faulting space is not equal to the active space
490	 * then the user is attempting something in a space
491	 * that does not belong to them. Kill the process.
492	 *
493	 * This is normally the situation when the user
494	 * attempts to jump into the kernel space at the
495	 * wrong offset, be it at the gateway page or a
496	 * random location.
497	 *
498	 * We cannot normally signal the process because it
499	 * could *be* on the gateway page, and processes
500	 * executing on the gateway page can't have signals
501	 * delivered.
502	 * 
503	 * We merely readjust the address into the users
504	 * space, at a destination address of zero, and
505	 * allow processing to continue.
506	 */
507	if (((unsigned long)regs->iaoq[0] & 3) &&
508	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
509		/* Kill the user process later */
510		regs->iaoq[0] = 0 | 3;
511		regs->iaoq[1] = regs->iaoq[0] + 4;
512		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
513		regs->gr[0] &= ~PSW_B;
514		return;
515	}
516	
517#if 0
518	printk(KERN_CRIT "Interruption # %d\n", code);
519#endif
520
521	switch(code) {
522
523	case  1:
524		/* High-priority machine check (HPMC) */
525		
526		/* set up a new led state on systems shipped with a LED State panel */
527		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
528
529		parisc_terminate("High Priority Machine Check (HPMC)",
530				regs, code, 0);
531		/* NOT REACHED */
532		
533	case  2:
534		/* Power failure interrupt */
535		printk(KERN_CRIT "Power failure interrupt !\n");
536		return;
537
538	case  3:
539		/* Recovery counter trap */
540		regs->gr[0] &= ~PSW_R;
541
542#ifdef CONFIG_KPROBES
543		if (parisc_kprobe_ss_handler(regs))
544			return;
545#endif
546
547#ifdef CONFIG_KGDB
548		if (kgdb_single_step) {
549			kgdb_handle_exception(0, SIGTRAP, 0, regs);
550			return;
551		}
552#endif
553
554		if (user_space(regs))
555			handle_gdb_break(regs, TRAP_TRACE);
556		/* else this must be the start of a syscall - just let it run */
557		return;
558
559	case  5:
560		/* Low-priority machine check */
561		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
562		
563		flush_cache_all();
564		flush_tlb_all();
565		cpu_lpmc(5, regs);
566		return;
567
568	case  PARISC_ITLB_TRAP:
569		/* Instruction TLB miss fault/Instruction page fault */
570		fault_address = regs->iaoq[0];
571		fault_space   = regs->iasq[0];
572		break;
573
574	case  8:
575		/* Illegal instruction trap */
576		die_if_kernel("Illegal instruction", regs, code);
577		si_code = ILL_ILLOPC;
578		goto give_sigill;
579
580	case  9:
581		/* Break instruction trap */
582		handle_break(regs);
583		return;
584
585	case 10:
586		/* Privileged operation trap */
587		die_if_kernel("Privileged operation", regs, code);
588		si_code = ILL_PRVOPC;
589		goto give_sigill;
590
591	case 11:
592		/* Privileged register trap */
593		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
594
595			/* This is a MFCTL cr26/cr27 to gr instruction.
596			 * PCXS traps on this, so we need to emulate it.
597			 */
598
599			if (regs->iir & 0x00200000)
600				regs->gr[regs->iir & 0x1f] = mfctl(27);
601			else
602				regs->gr[regs->iir & 0x1f] = mfctl(26);
603
604			regs->iaoq[0] = regs->iaoq[1];
605			regs->iaoq[1] += 4;
606			regs->iasq[0] = regs->iasq[1];
607			return;
608		}
609
610		die_if_kernel("Privileged register usage", regs, code);
611		si_code = ILL_PRVREG;
612	give_sigill:
613		force_sig_fault(SIGILL, si_code,
614				(void __user *) regs->iaoq[0]);
 
 
615		return;
616
617	case 12:
618		/* Overflow Trap, let the userland signal handler do the cleanup */
619		force_sig_fault(SIGFPE, FPE_INTOVF,
620				(void __user *) regs->iaoq[0]);
 
 
621		return;
622		
623	case 13:
624		/* Conditional Trap
625		   The condition succeeds in an instruction which traps
626		   on condition  */
627		if(user_mode(regs)){
628			/* Let userspace app figure it out from the insn pointed
629			 * to by si_addr.
630			 */
631			force_sig_fault(SIGFPE, FPE_CONDTRAP,
632					(void __user *) regs->iaoq[0]);
 
633			return;
634		} 
635		/* The kernel doesn't want to handle condition codes */
636		break;
637		
638	case 14:
639		/* Assist Exception Trap, i.e. floating point exception. */
640		die_if_kernel("Floating point exception", regs, 0); /* quiet */
641		__inc_irq_stat(irq_fpassist_count);
642		handle_fpe(regs);
643		return;
644
645	case 15:
646		/* Data TLB miss fault/Data page fault */
647		/* Fall through */
648	case 16:
649		/* Non-access instruction TLB miss fault */
650		/* The instruction TLB entry needed for the target address of the FIC
651		   is absent, and hardware can't find it, so we get to cleanup */
652		/* Fall through */
653	case 17:
654		/* Non-access data TLB miss fault/Non-access data page fault */
655		/* FIXME: 
656			 Still need to add slow path emulation code here!
657			 If the insn used a non-shadow register, then the tlb
658			 handlers could not have their side-effect (e.g. probe
659			 writing to a target register) emulated since rfir would
660			 erase the changes to said register. Instead we have to
661			 setup everything, call this function we are in, and emulate
662			 by hand. Technically we need to emulate:
663			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
664		*/
665		fault_address = regs->ior;
666		fault_space = regs->isr;
667		break;
668
669	case 18:
670		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
671		/* Check for unaligned access */
672		if (check_unaligned(regs)) {
673			handle_unaligned(regs);
674			return;
675		}
676		/* Fall Through */
677	case 26: 
678		/* PCXL: Data memory access rights trap */
679		fault_address = regs->ior;
680		fault_space   = regs->isr;
681		break;
682
683	case 19:
684		/* Data memory break trap */
685		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
686		/* fall thru */
687	case 21:
688		/* Page reference trap */
689		handle_gdb_break(regs, TRAP_HWBKPT);
690		return;
691
692	case 25:
693		/* Taken branch trap */
694		regs->gr[0] &= ~PSW_T;
695		if (user_space(regs))
696			handle_gdb_break(regs, TRAP_BRANCH);
697		/* else this must be the start of a syscall - just let it
698		 * run.
699		 */
700		return;
701
702	case  7:  
703		/* Instruction access rights */
704		/* PCXL: Instruction memory protection trap */
705
706		/*
707		 * This could be caused by either: 1) a process attempting
708		 * to execute within a vma that does not have execute
709		 * permission, or 2) an access rights violation caused by a
710		 * flush only translation set up by ptep_get_and_clear().
711		 * So we check the vma permissions to differentiate the two.
712		 * If the vma indicates we have execute permission, then
713		 * the cause is the latter one. In this case, we need to
714		 * call do_page_fault() to fix the problem.
715		 */
716
717		if (user_mode(regs)) {
718			struct vm_area_struct *vma;
719
720			down_read(&current->mm->mmap_sem);
721			vma = find_vma(current->mm,regs->iaoq[0]);
722			if (vma && (regs->iaoq[0] >= vma->vm_start)
723				&& (vma->vm_flags & VM_EXEC)) {
724
725				fault_address = regs->iaoq[0];
726				fault_space = regs->iasq[0];
727
728				up_read(&current->mm->mmap_sem);
729				break; /* call do_page_fault() */
730			}
731			up_read(&current->mm->mmap_sem);
732		}
733		/* Fall Through */
734	case 27: 
735		/* Data memory protection ID trap */
736		if (code == 27 && !user_mode(regs) &&
737			fixup_exception(regs))
738			return;
739
740		die_if_kernel("Protection id trap", regs, code);
741		force_sig_fault(SIGSEGV, SEGV_MAPERR,
742				(code == 7)?
743				((void __user *) regs->iaoq[0]) :
744				((void __user *) regs->ior));
 
 
 
 
745		return;
746
747	case 28: 
748		/* Unaligned data reference trap */
749		handle_unaligned(regs);
750		return;
751
752	default:
753		if (user_mode(regs)) {
754			parisc_printk_ratelimited(0, regs, KERN_DEBUG
755				"handle_interruption() pid=%d command='%s'\n",
756				task_pid_nr(current), current->comm);
 
 
757			/* SIGBUS, for lack of a better one. */
758			force_sig_fault(SIGBUS, BUS_OBJERR,
759					(void __user *)regs->ior);
 
 
 
760			return;
761		}
762		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
763		
764		parisc_terminate("Unexpected interruption", regs, code, 0);
765		/* NOT REACHED */
766	}
767
768	if (user_mode(regs)) {
769	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
770		parisc_printk_ratelimited(0, regs, KERN_DEBUG
771				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
772				code, fault_space,
773				task_pid_nr(current), current->comm);
774		force_sig_fault(SIGSEGV, SEGV_MAPERR,
775				(void __user *)regs->ior);
 
 
 
 
 
 
 
 
 
776		return;
777	    }
778	}
779	else {
780
781	    /*
782	     * The kernel should never fault on its own address space,
783	     * unless pagefault_disable() was called before.
784	     */
785
786	    if (fault_space == 0 && !faulthandler_disabled())
787	    {
788		/* Clean up and return if in exception table. */
789		if (fixup_exception(regs))
790			return;
791		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
792		parisc_terminate("Kernel Fault", regs, code, fault_address);
 
793	    }
794	}
795
796	do_page_fault(regs, code, fault_address);
797}
798
799
800void __init initialize_ivt(const void *iva)
801{
802	extern u32 os_hpmc_size;
803	extern const u32 os_hpmc[];
804
805	int i;
806	u32 check = 0;
807	u32 *ivap;
808	u32 *hpmcp;
809	u32 length, instr;
810
811	if (strcmp((const char *)iva, "cows can fly"))
812		panic("IVT invalid");
813
814	ivap = (u32 *)iva;
815
816	for (i = 0; i < 8; i++)
817	    *ivap++ = 0;
818
819	/*
820	 * Use PDC_INSTR firmware function to get instruction that invokes
821	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
822	 * the PA 1.1 Firmware Architecture document.
823	 */
824	if (pdc_instr(&instr) == PDC_OK)
825		ivap[0] = instr;
826
827	/*
828	 * Rules for the checksum of the HPMC handler:
829	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
830	 *    its own IVA).
831	 * 2. The word at IVA + 32 is nonzero.
832	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
833	 *    Address (IVA + 56) are word-aligned.
834	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
835	 *    the Length/4 words starting at Address is zero.
836	 */
837
838	/* Setup IVA and compute checksum for HPMC handler */
839	ivap[6] = (u32)__pa(os_hpmc);
840	length = os_hpmc_size;
841	ivap[7] = length;
842
843	hpmcp = (u32 *)os_hpmc;
844
845	for (i=0; i<length/4; i++)
846	    check += *hpmcp++;
847
848	for (i=0; i<8; i++)
849	    check += ivap[i];
850
851	ivap[5] = -check;
 
 
852}
853	
854
855/* early_trap_init() is called before we set up kernel mappings and
856 * write-protect the kernel */
857void  __init early_trap_init(void)
858{
859	extern const void fault_vector_20;
860
861#ifndef CONFIG_64BIT
862	extern const void fault_vector_11;
863	initialize_ivt(&fault_vector_11);
864#endif
865
866	initialize_ivt(&fault_vector_20);
867}
868
869void __init trap_init(void)
870{
 
 
 
 
 
 
 
 
 
 
 
 
 
871}
v3.1
 
  1/*
  2 *  linux/arch/parisc/traps.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
  6 */
  7
  8/*
  9 * 'Traps.c' handles hardware traps and faults after we have saved some
 10 * state in 'asm.s'.
 11 */
 12
 13#include <linux/sched.h>
 
 14#include <linux/kernel.h>
 15#include <linux/string.h>
 16#include <linux/errno.h>
 17#include <linux/ptrace.h>
 18#include <linux/timer.h>
 19#include <linux/delay.h>
 20#include <linux/mm.h>
 21#include <linux/module.h>
 22#include <linux/smp.h>
 23#include <linux/spinlock.h>
 24#include <linux/init.h>
 25#include <linux/interrupt.h>
 26#include <linux/console.h>
 27#include <linux/bug.h>
 
 
 
 28
 29#include <asm/assembly.h>
 30#include <asm/system.h>
 31#include <asm/uaccess.h>
 32#include <asm/io.h>
 33#include <asm/irq.h>
 34#include <asm/traps.h>
 35#include <asm/unaligned.h>
 36#include <linux/atomic.h>
 37#include <asm/smp.h>
 38#include <asm/pdc.h>
 39#include <asm/pdc_chassis.h>
 40#include <asm/unwind.h>
 41#include <asm/tlbflush.h>
 42#include <asm/cacheflush.h>
 
 
 43
 44#include "../math-emu/math-emu.h"	/* for handle_fpe() */
 45
 46#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
 47			  /*  dumped to the console via printk)          */
 48
 49#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 50DEFINE_SPINLOCK(pa_dbit_lock);
 51#endif
 52
 53static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
 54	struct pt_regs *regs);
 55
 56static int printbinary(char *buf, unsigned long x, int nbits)
 57{
 58	unsigned long mask = 1UL << (nbits - 1);
 59	while (mask != 0) {
 60		*buf++ = (mask & x ? '1' : '0');
 61		mask >>= 1;
 62	}
 63	*buf = '\0';
 64
 65	return nbits;
 66}
 67
 68#ifdef CONFIG_64BIT
 69#define RFMT "%016lx"
 70#else
 71#define RFMT "%08lx"
 72#endif
 73#define FFMT "%016llx"	/* fpregs are 64-bit always */
 74
 75#define PRINTREGS(lvl,r,f,fmt,x)	\
 76	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
 77		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
 78		(r)[(x)+2], (r)[(x)+3])
 79
 80static void print_gr(char *level, struct pt_regs *regs)
 81{
 82	int i;
 83	char buf[64];
 84
 85	printk("%s\n", level);
 86	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
 87	printbinary(buf, regs->gr[0], 32);
 88	printk("%sPSW: %s %s\n", level, buf, print_tainted());
 89
 90	for (i = 0; i < 32; i += 4)
 91		PRINTREGS(level, regs->gr, "r", RFMT, i);
 92}
 93
 94static void print_fr(char *level, struct pt_regs *regs)
 95{
 96	int i;
 97	char buf[64];
 98	struct { u32 sw[2]; } s;
 99
100	/* FR are 64bit everywhere. Need to use asm to get the content
101	 * of fpsr/fper1, and we assume that we won't have a FP Identify
102	 * in our way, otherwise we're screwed.
103	 * The fldd is used to restore the T-bit if there was one, as the
104	 * store clears it anyway.
105	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
106	asm volatile ("fstd %%fr0,0(%1)	\n\t"
107		      "fldd 0(%1),%%fr0	\n\t"
108		      : "=m" (s) : "r" (&s) : "r0");
109
110	printk("%s\n", level);
111	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
112	printbinary(buf, s.sw[0], 32);
113	printk("%sFPSR: %s\n", level, buf);
114	printk("%sFPER1: %08x\n", level, s.sw[1]);
115
116	/* here we'll print fr0 again, tho it'll be meaningless */
117	for (i = 0; i < 32; i += 4)
118		PRINTREGS(level, regs->fr, "fr", FFMT, i);
119}
120
121void show_regs(struct pt_regs *regs)
122{
123	int i, user;
124	char *level;
125	unsigned long cr30, cr31;
126
127	user = user_mode(regs);
128	level = user ? KERN_DEBUG : KERN_CRIT;
129
 
 
130	print_gr(level, regs);
131
132	for (i = 0; i < 8; i += 4)
133		PRINTREGS(level, regs->sr, "sr", RFMT, i);
134
135	if (user)
136		print_fr(level, regs);
137
138	cr30 = mfctl(30);
139	cr31 = mfctl(31);
140	printk("%s\n", level);
141	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
144	       level, regs->iir, regs->isr, regs->ior);
145	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
146	       level, current_thread_info()->cpu, cr30, cr31);
147	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148
149	if (user) {
150		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153	} else {
154		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157
158		parisc_show_stack(current, NULL, regs);
159	}
160}
161
 
 
162
163void dump_stack(void)
164{
165	show_stack(NULL, NULL);
 
 
166}
167
168EXPORT_SYMBOL(dump_stack);
169
170static void do_show_stack(struct unwind_frame_info *info)
171{
172	int i = 1;
173
174	printk(KERN_CRIT "Backtrace:\n");
175	while (i <= 16) {
176		if (unwind_once(info) < 0 || info->ip == 0)
177			break;
178
179		if (__kernel_text_address(info->ip)) {
180			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181				info->ip, (void *) info->ip);
182			i++;
183		}
184	}
185	printk(KERN_CRIT "\n");
186}
187
188static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189	struct pt_regs *regs)
190{
191	struct unwind_frame_info info;
192	struct task_struct *t;
193
194	t = task ? task : current;
195	if (regs) {
196		unwind_frame_init(&info, t, regs);
197		goto show_stack;
198	}
199
200	if (t == current) {
201		unsigned long sp;
202
203HERE:
204		asm volatile ("copy %%r30, %0" : "=r"(sp));
205		{
206			struct pt_regs r;
207
208			memset(&r, 0, sizeof(struct pt_regs));
209			r.iaoq[0] = (unsigned long)&&HERE;
210			r.gr[2] = (unsigned long)__builtin_return_address(0);
211			r.gr[30] = sp;
212
213			unwind_frame_init(&info, current, &r);
214		}
215	} else {
216		unwind_frame_init_from_blocked_task(&info, t);
217	}
218
219show_stack:
220	do_show_stack(&info);
221}
222
223void show_stack(struct task_struct *t, unsigned long *sp)
224{
225	return parisc_show_stack(t, sp, NULL);
226}
227
228int is_valid_bugaddr(unsigned long iaoq)
229{
230	return 1;
231}
232
233void die_if_kernel(char *str, struct pt_regs *regs, long err)
234{
235	if (user_mode(regs)) {
236		if (err == 0)
237			return; /* STFU */
238
239		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
 
240			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241#ifdef PRINT_USER_FAULTS
242		/* XXX for debugging only */
243		show_regs(regs);
244#endif
245		return;
246	}
247
248	oops_in_progress = 1;
249
250	oops_enter();
251
252	/* Amuse the user in a SPARC fashion */
253	if (err) printk(KERN_CRIT
254			"      _______________________________ \n"
255			"     < Your System ate a SPARC! Gah! >\n"
256			"      ------------------------------- \n"
257			"             \\   ^__^\n"
258			"                 (__)\\       )\\/\\\n"
259			"                  U  ||----w |\n"
260			"                     ||     ||\n");
261	
262	/* unlock the pdc lock if necessary */
263	pdc_emergency_unlock();
264
265	/* maybe the kernel hasn't booted very far yet and hasn't been able 
266	 * to initialize the serial or STI console. In that case we should 
267	 * re-enable the pdc console, so that the user will be able to 
268	 * identify the problem. */
269	if (!console_drivers)
270		pdc_console_restart();
271	
272	if (err)
273		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
274			current->comm, task_pid_nr(current), str, err);
275
276	/* Wot's wrong wif bein' racy? */
277	if (current->thread.flags & PARISC_KERNEL_DEATH) {
278		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
279		local_irq_enable();
280		while (1);
281	}
282	current->thread.flags |= PARISC_KERNEL_DEATH;
283
284	show_regs(regs);
285	dump_stack();
286	add_taint(TAINT_DIE);
287
288	if (in_interrupt())
289		panic("Fatal exception in interrupt");
290
291	if (panic_on_oops) {
292		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
293		ssleep(5);
294		panic("Fatal exception");
295	}
296
297	oops_exit();
298	do_exit(SIGSEGV);
299}
300
301int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
302{
303	return syscall(regs);
304}
305
306/* gdb uses break 4,8 */
307#define GDB_BREAK_INSN 0x10004
308static void handle_gdb_break(struct pt_regs *regs, int wot)
309{
310	struct siginfo si;
311
312	si.si_signo = SIGTRAP;
313	si.si_errno = 0;
314	si.si_code = wot;
315	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
316	force_sig_info(SIGTRAP, &si, current);
317}
318
319static void handle_break(struct pt_regs *regs)
320{
321	unsigned iir = regs->iir;
322
323	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
324		/* check if a BUG() or WARN() trapped here.  */
325		enum bug_trap_type tt;
326		tt = report_bug(regs->iaoq[0] & ~3, regs);
327		if (tt == BUG_TRAP_TYPE_WARN) {
328			regs->iaoq[0] += 4;
329			regs->iaoq[1] += 4;
330			return; /* return to next instruction when WARN_ON().  */
331		}
332		die_if_kernel("Unknown kernel breakpoint", regs,
333			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
334	}
335
336#ifdef PRINT_USER_FAULTS
337	if (unlikely(iir != GDB_BREAK_INSN)) {
338		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339			iir & 31, (iir>>13) & ((1<<13)-1),
340			task_pid_nr(current), current->comm);
341		show_regs(regs);
342	}
343#endif
344
345	/* send standard GDB signal */
346	handle_gdb_break(regs, TRAP_BRKPT);
347}
348
349static void default_trap(int code, struct pt_regs *regs)
350{
351	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
352	show_regs(regs);
353}
354
355void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
356
357
358void transfer_pim_to_trap_frame(struct pt_regs *regs)
359{
360    register int i;
361    extern unsigned int hpmc_pim_data[];
362    struct pdc_hpmc_pim_11 *pim_narrow;
363    struct pdc_hpmc_pim_20 *pim_wide;
364
365    if (boot_cpu_data.cpu_type >= pcxu) {
366
367	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
368
369	/*
370	 * Note: The following code will probably generate a
371	 * bunch of truncation error warnings from the compiler.
372	 * Could be handled with an ifdef, but perhaps there
373	 * is a better way.
374	 */
375
376	regs->gr[0] = pim_wide->cr[22];
377
378	for (i = 1; i < 32; i++)
379	    regs->gr[i] = pim_wide->gr[i];
380
381	for (i = 0; i < 32; i++)
382	    regs->fr[i] = pim_wide->fr[i];
383
384	for (i = 0; i < 8; i++)
385	    regs->sr[i] = pim_wide->sr[i];
386
387	regs->iasq[0] = pim_wide->cr[17];
388	regs->iasq[1] = pim_wide->iasq_back;
389	regs->iaoq[0] = pim_wide->cr[18];
390	regs->iaoq[1] = pim_wide->iaoq_back;
391
392	regs->sar  = pim_wide->cr[11];
393	regs->iir  = pim_wide->cr[19];
394	regs->isr  = pim_wide->cr[20];
395	regs->ior  = pim_wide->cr[21];
396    }
397    else {
398	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
399
400	regs->gr[0] = pim_narrow->cr[22];
401
402	for (i = 1; i < 32; i++)
403	    regs->gr[i] = pim_narrow->gr[i];
404
405	for (i = 0; i < 32; i++)
406	    regs->fr[i] = pim_narrow->fr[i];
407
408	for (i = 0; i < 8; i++)
409	    regs->sr[i] = pim_narrow->sr[i];
410
411	regs->iasq[0] = pim_narrow->cr[17];
412	regs->iasq[1] = pim_narrow->iasq_back;
413	regs->iaoq[0] = pim_narrow->cr[18];
414	regs->iaoq[1] = pim_narrow->iaoq_back;
415
416	regs->sar  = pim_narrow->cr[11];
417	regs->iir  = pim_narrow->cr[19];
418	regs->isr  = pim_narrow->cr[20];
419	regs->ior  = pim_narrow->cr[21];
420    }
421
422    /*
423     * The following fields only have meaning if we came through
424     * another path. So just zero them here.
425     */
426
427    regs->ksp = 0;
428    regs->kpc = 0;
429    regs->orig_r28 = 0;
430}
431
432
433/*
434 * This routine is called as a last resort when everything else
435 * has gone clearly wrong. We get called for faults in kernel space,
436 * and HPMC's.
437 */
438void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
439{
440	static DEFINE_SPINLOCK(terminate_lock);
441
442	oops_in_progress = 1;
 
443
444	set_eiem(0);
445	local_irq_disable();
446	spin_lock(&terminate_lock);
447
448	/* unlock the pdc lock if necessary */
449	pdc_emergency_unlock();
450
451	/* restart pdc console if necessary */
452	if (!console_drivers)
453		pdc_console_restart();
454
455	/* Not all paths will gutter the processor... */
456	switch(code){
457
458	case 1:
459		transfer_pim_to_trap_frame(regs);
460		break;
461
462	default:
463		/* Fall through */
464		break;
465
466	}
467	    
468	{
469		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
470		struct unwind_frame_info info;
471		unwind_frame_init(&info, current, regs);
472		do_show_stack(&info);
473	}
474
475	printk("\n");
476	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
477			msg, code, regs, offset);
478	show_regs(regs);
479
480	spin_unlock(&terminate_lock);
481
482	/* put soft power button back under hardware control;
483	 * if the user had pressed it once at any time, the 
484	 * system will shut down immediately right here. */
485	pdc_soft_power_button(0);
486	
487	/* Call kernel panic() so reboot timeouts work properly 
488	 * FIXME: This function should be on the list of
489	 * panic notifiers, and we should call panic
490	 * directly from the location that we wish. 
491	 * e.g. We should not call panic from
492	 * parisc_terminate, but rather the oter way around.
493	 * This hack works, prints the panic message twice,
494	 * and it enables reboot timers!
495	 */
496	panic(msg);
497}
498
499void notrace handle_interruption(int code, struct pt_regs *regs)
500{
501	unsigned long fault_address = 0;
502	unsigned long fault_space = 0;
503	struct siginfo si;
504
505	if (code == 1)
506	    pdc_console_restart();  /* switch back to pdc if HPMC */
507	else
508	    local_irq_enable();
509
510	/* Security check:
511	 * If the priority level is still user, and the
512	 * faulting space is not equal to the active space
513	 * then the user is attempting something in a space
514	 * that does not belong to them. Kill the process.
515	 *
516	 * This is normally the situation when the user
517	 * attempts to jump into the kernel space at the
518	 * wrong offset, be it at the gateway page or a
519	 * random location.
520	 *
521	 * We cannot normally signal the process because it
522	 * could *be* on the gateway page, and processes
523	 * executing on the gateway page can't have signals
524	 * delivered.
525	 * 
526	 * We merely readjust the address into the users
527	 * space, at a destination address of zero, and
528	 * allow processing to continue.
529	 */
530	if (((unsigned long)regs->iaoq[0] & 3) &&
531	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
532	  	/* Kill the user process later */
533	  	regs->iaoq[0] = 0 | 3;
534		regs->iaoq[1] = regs->iaoq[0] + 4;
535	 	regs->iasq[0] = regs->iasq[1] = regs->sr[7];
536		regs->gr[0] &= ~PSW_B;
537		return;
538	}
539	
540#if 0
541	printk(KERN_CRIT "Interruption # %d\n", code);
542#endif
543
544	switch(code) {
545
546	case  1:
547		/* High-priority machine check (HPMC) */
548		
549		/* set up a new led state on systems shipped with a LED State panel */
550		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
551		    
552	    	parisc_terminate("High Priority Machine Check (HPMC)",
553				regs, code, 0);
554		/* NOT REACHED */
555		
556	case  2:
557		/* Power failure interrupt */
558		printk(KERN_CRIT "Power failure interrupt !\n");
559		return;
560
561	case  3:
562		/* Recovery counter trap */
563		regs->gr[0] &= ~PSW_R;
 
 
 
 
 
 
 
 
 
 
 
 
 
564		if (user_space(regs))
565			handle_gdb_break(regs, TRAP_TRACE);
566		/* else this must be the start of a syscall - just let it run */
567		return;
568
569	case  5:
570		/* Low-priority machine check */
571		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
572		
573		flush_cache_all();
574		flush_tlb_all();
575		cpu_lpmc(5, regs);
576		return;
577
578	case  6:
579		/* Instruction TLB miss fault/Instruction page fault */
580		fault_address = regs->iaoq[0];
581		fault_space   = regs->iasq[0];
582		break;
583
584	case  8:
585		/* Illegal instruction trap */
586		die_if_kernel("Illegal instruction", regs, code);
587		si.si_code = ILL_ILLOPC;
588		goto give_sigill;
589
590	case  9:
591		/* Break instruction trap */
592		handle_break(regs);
593		return;
594	
595	case 10:
596		/* Privileged operation trap */
597		die_if_kernel("Privileged operation", regs, code);
598		si.si_code = ILL_PRVOPC;
599		goto give_sigill;
600	
601	case 11:
602		/* Privileged register trap */
603		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
604
605			/* This is a MFCTL cr26/cr27 to gr instruction.
606			 * PCXS traps on this, so we need to emulate it.
607			 */
608
609			if (regs->iir & 0x00200000)
610				regs->gr[regs->iir & 0x1f] = mfctl(27);
611			else
612				regs->gr[regs->iir & 0x1f] = mfctl(26);
613
614			regs->iaoq[0] = regs->iaoq[1];
615			regs->iaoq[1] += 4;
616			regs->iasq[0] = regs->iasq[1];
617			return;
618		}
619
620		die_if_kernel("Privileged register usage", regs, code);
621		si.si_code = ILL_PRVREG;
622	give_sigill:
623		si.si_signo = SIGILL;
624		si.si_errno = 0;
625		si.si_addr = (void __user *) regs->iaoq[0];
626		force_sig_info(SIGILL, &si, current);
627		return;
628
629	case 12:
630		/* Overflow Trap, let the userland signal handler do the cleanup */
631		si.si_signo = SIGFPE;
632		si.si_code = FPE_INTOVF;
633		si.si_addr = (void __user *) regs->iaoq[0];
634		force_sig_info(SIGFPE, &si, current);
635		return;
636		
637	case 13:
638		/* Conditional Trap
639		   The condition succeeds in an instruction which traps
640		   on condition  */
641		if(user_mode(regs)){
642			si.si_signo = SIGFPE;
643			/* Set to zero, and let the userspace app figure it out from
644		   	   the insn pointed to by si_addr */
645			si.si_code = 0;
646			si.si_addr = (void __user *) regs->iaoq[0];
647			force_sig_info(SIGFPE, &si, current);
648			return;
649		} 
650		/* The kernel doesn't want to handle condition codes */
651		break;
652		
653	case 14:
654		/* Assist Exception Trap, i.e. floating point exception. */
655		die_if_kernel("Floating point exception", regs, 0); /* quiet */
 
656		handle_fpe(regs);
657		return;
658		
659	case 15:
660		/* Data TLB miss fault/Data page fault */
661		/* Fall through */
662	case 16:
663		/* Non-access instruction TLB miss fault */
664		/* The instruction TLB entry needed for the target address of the FIC
665		   is absent, and hardware can't find it, so we get to cleanup */
666		/* Fall through */
667	case 17:
668		/* Non-access data TLB miss fault/Non-access data page fault */
669		/* FIXME: 
670		 	 Still need to add slow path emulation code here!
671		         If the insn used a non-shadow register, then the tlb
672			 handlers could not have their side-effect (e.g. probe
673			 writing to a target register) emulated since rfir would
674			 erase the changes to said register. Instead we have to
675			 setup everything, call this function we are in, and emulate
676			 by hand. Technically we need to emulate:
677			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
678		*/			  
679		fault_address = regs->ior;
680		fault_space = regs->isr;
681		break;
682
683	case 18:
684		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
685		/* Check for unaligned access */
686		if (check_unaligned(regs)) {
687			handle_unaligned(regs);
688			return;
689		}
690		/* Fall Through */
691	case 26: 
692		/* PCXL: Data memory access rights trap */
693		fault_address = regs->ior;
694		fault_space   = regs->isr;
695		break;
696
697	case 19:
698		/* Data memory break trap */
699		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
700		/* fall thru */
701	case 21:
702		/* Page reference trap */
703		handle_gdb_break(regs, TRAP_HWBKPT);
704		return;
705
706	case 25:
707		/* Taken branch trap */
708		regs->gr[0] &= ~PSW_T;
709		if (user_space(regs))
710			handle_gdb_break(regs, TRAP_BRANCH);
711		/* else this must be the start of a syscall - just let it
712		 * run.
713		 */
714		return;
715
716	case  7:  
717		/* Instruction access rights */
718		/* PCXL: Instruction memory protection trap */
719
720		/*
721		 * This could be caused by either: 1) a process attempting
722		 * to execute within a vma that does not have execute
723		 * permission, or 2) an access rights violation caused by a
724		 * flush only translation set up by ptep_get_and_clear().
725		 * So we check the vma permissions to differentiate the two.
726		 * If the vma indicates we have execute permission, then
727		 * the cause is the latter one. In this case, we need to
728		 * call do_page_fault() to fix the problem.
729		 */
730
731		if (user_mode(regs)) {
732			struct vm_area_struct *vma;
733
734			down_read(&current->mm->mmap_sem);
735			vma = find_vma(current->mm,regs->iaoq[0]);
736			if (vma && (regs->iaoq[0] >= vma->vm_start)
737				&& (vma->vm_flags & VM_EXEC)) {
738
739				fault_address = regs->iaoq[0];
740				fault_space = regs->iasq[0];
741
742				up_read(&current->mm->mmap_sem);
743				break; /* call do_page_fault() */
744			}
745			up_read(&current->mm->mmap_sem);
746		}
747		/* Fall Through */
748	case 27: 
749		/* Data memory protection ID trap */
750		if (code == 27 && !user_mode(regs) &&
751			fixup_exception(regs))
752			return;
753
754		die_if_kernel("Protection id trap", regs, code);
755		si.si_code = SEGV_MAPERR;
756		si.si_signo = SIGSEGV;
757		si.si_errno = 0;
758		if (code == 7)
759		    si.si_addr = (void __user *) regs->iaoq[0];
760		else
761		    si.si_addr = (void __user *) regs->ior;
762		force_sig_info(SIGSEGV, &si, current);
763		return;
764
765	case 28: 
766		/* Unaligned data reference trap */
767		handle_unaligned(regs);
768		return;
769
770	default:
771		if (user_mode(regs)) {
772#ifdef PRINT_USER_FAULTS
773			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
774			    task_pid_nr(current), current->comm);
775			show_regs(regs);
776#endif
777			/* SIGBUS, for lack of a better one. */
778			si.si_signo = SIGBUS;
779			si.si_code = BUS_OBJERR;
780			si.si_errno = 0;
781			si.si_addr = (void __user *) regs->ior;
782			force_sig_info(SIGBUS, &si, current);
783			return;
784		}
785		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
786		
787		parisc_terminate("Unexpected interruption", regs, code, 0);
788		/* NOT REACHED */
789	}
790
791	if (user_mode(regs)) {
792	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
793#ifdef PRINT_USER_FAULTS
794		if (fault_space == 0)
795			printk(KERN_DEBUG "User Fault on Kernel Space ");
796		else
797			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
798			       code);
799		printk(KERN_CONT "pid=%d command='%s'\n",
800		       task_pid_nr(current), current->comm);
801		show_regs(regs);
802#endif
803		si.si_signo = SIGSEGV;
804		si.si_errno = 0;
805		si.si_code = SEGV_MAPERR;
806		si.si_addr = (void __user *) regs->ior;
807		force_sig_info(SIGSEGV, &si, current);
808		return;
809	    }
810	}
811	else {
812
813	    /*
814	     * The kernel should never fault on its own address space.
 
815	     */
816
817	    if (fault_space == 0) 
818	    {
 
 
 
819		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
820		parisc_terminate("Kernel Fault", regs, code, fault_address);
821	
822	    }
823	}
824
825	do_page_fault(regs, code, fault_address);
826}
827
828
829int __init check_ivt(void *iva)
830{
831	extern u32 os_hpmc_size;
832	extern const u32 os_hpmc[];
833
834	int i;
835	u32 check = 0;
836	u32 *ivap;
837	u32 *hpmcp;
838	u32 length;
839
840	if (strcmp((char *)iva, "cows can fly"))
841		return -1;
842
843	ivap = (u32 *)iva;
844
845	for (i = 0; i < 8; i++)
846	    *ivap++ = 0;
847
848	/* Compute Checksum for HPMC handler */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
849	length = os_hpmc_size;
850	ivap[7] = length;
851
852	hpmcp = (u32 *)os_hpmc;
853
854	for (i=0; i<length/4; i++)
855	    check += *hpmcp++;
856
857	for (i=0; i<8; i++)
858	    check += ivap[i];
859
860	ivap[5] = -check;
861
862	return 0;
863}
864	
 
 
 
 
 
 
 
865#ifndef CONFIG_64BIT
866extern const void fault_vector_11;
 
867#endif
868extern const void fault_vector_20;
 
 
869
870void __init trap_init(void)
871{
872	void *iva;
873
874	if (boot_cpu_data.cpu_type >= pcxu)
875		iva = (void *) &fault_vector_20;
876	else
877#ifdef CONFIG_64BIT
878		panic("Can't boot 64-bit OS on PA1.1 processor!");
879#else
880		iva = (void *) &fault_vector_11;
881#endif
882
883	if (check_ivt(iva))
884		panic("IVT invalid");
885}