Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/traps.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
  7 */
  8
  9/*
 10 * 'Traps.c' handles hardware traps and faults after we have saved some
 11 * state in 'asm.s'.
 12 */
 13
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/kernel.h>
 17#include <linux/string.h>
 18#include <linux/errno.h>
 19#include <linux/ptrace.h>
 20#include <linux/timer.h>
 21#include <linux/delay.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/smp.h>
 25#include <linux/spinlock.h>
 26#include <linux/init.h>
 27#include <linux/interrupt.h>
 28#include <linux/console.h>
 29#include <linux/bug.h>
 30#include <linux/ratelimit.h>
 31#include <linux/uaccess.h>
 32#include <linux/kdebug.h>
 33#include <linux/kfence.h>
 34
 35#include <asm/assembly.h>
 36#include <asm/io.h>
 37#include <asm/irq.h>
 38#include <asm/traps.h>
 39#include <linux/unaligned.h>
 40#include <linux/atomic.h>
 41#include <asm/smp.h>
 42#include <asm/pdc.h>
 43#include <asm/pdc_chassis.h>
 44#include <asm/unwind.h>
 45#include <asm/tlbflush.h>
 46#include <asm/cacheflush.h>
 47#include <linux/kgdb.h>
 48#include <linux/kprobes.h>
 49
 50#include "unaligned.h"
 51
 52#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
 53#include <asm/spinlock.h>
 54#endif
 55
 56#include "../math-emu/math-emu.h"	/* for handle_fpe() */
 57
 58static void parisc_show_stack(struct task_struct *task,
 59	struct pt_regs *regs, const char *loglvl);
 60
 61static int printbinary(char *buf, unsigned long x, int nbits)
 62{
 63	unsigned long mask = 1UL << (nbits - 1);
 64	while (mask != 0) {
 65		*buf++ = (mask & x ? '1' : '0');
 66		mask >>= 1;
 67	}
 68	*buf = '\0';
 69
 70	return nbits;
 71}
 72
 73#ifdef CONFIG_64BIT
 74#define RFMT "%016lx"
 75#else
 76#define RFMT "%08lx"
 77#endif
 78#define FFMT "%016llx"	/* fpregs are 64-bit always */
 79
 80#define PRINTREGS(lvl,r,f,fmt,x)	\
 81	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
 82		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
 83		(r)[(x)+2], (r)[(x)+3])
 84
 85static void print_gr(const char *level, struct pt_regs *regs)
 86{
 87	int i;
 88	char buf[64];
 89
 90	printk("%s\n", level);
 91	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
 92	printbinary(buf, regs->gr[0], 32);
 93	printk("%sPSW: %s %s\n", level, buf, print_tainted());
 94
 95	for (i = 0; i < 32; i += 4)
 96		PRINTREGS(level, regs->gr, "r", RFMT, i);
 97}
 98
 99static void print_fr(const char *level, struct pt_regs *regs)
100{
101	int i;
102	char buf[64];
103	struct { u32 sw[2]; } s;
104
105	/* FR are 64bit everywhere. Need to use asm to get the content
106	 * of fpsr/fper1, and we assume that we won't have a FP Identify
107	 * in our way, otherwise we're screwed.
108	 * The fldd is used to restore the T-bit if there was one, as the
109	 * store clears it anyway.
110	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
111	asm volatile ("fstd %%fr0,0(%1)	\n\t"
112		      "fldd 0(%1),%%fr0	\n\t"
113		      : "=m" (s) : "r" (&s) : "r0");
114
115	printk("%s\n", level);
116	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
117	printbinary(buf, s.sw[0], 32);
118	printk("%sFPSR: %s\n", level, buf);
119	printk("%sFPER1: %08x\n", level, s.sw[1]);
120
121	/* here we'll print fr0 again, tho it'll be meaningless */
122	for (i = 0; i < 32; i += 4)
123		PRINTREGS(level, regs->fr, "fr", FFMT, i);
124}
125
126void show_regs(struct pt_regs *regs)
127{
128	int i, user;
129	const char *level;
130	unsigned long cr30, cr31;
131
132	user = user_mode(regs);
133	level = user ? KERN_DEBUG : KERN_CRIT;
134
135	show_regs_print_info(level);
136
137	print_gr(level, regs);
138
139	for (i = 0; i < 8; i += 4)
140		PRINTREGS(level, regs->sr, "sr", RFMT, i);
141
142	if (user)
143		print_fr(level, regs);
144
145	cr30 = mfctl(30);
146	cr31 = mfctl(31);
147	printk("%s\n", level);
148	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
149	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
150	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
151	       level, regs->iir, regs->isr, regs->ior);
152	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
153	       level, task_cpu(current), cr30, cr31);
154	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
155
156	if (user) {
157		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
158		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
159		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
160	} else {
161		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
162		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
163		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
164
165		parisc_show_stack(current, regs, KERN_DEFAULT);
166	}
167}
168
169static DEFINE_RATELIMIT_STATE(_hppa_rs,
170	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
171
172#define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
173	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
174		printk(fmt, ##__VA_ARGS__);				      \
175		show_regs(regs);					      \
176	}								      \
177}
178
179
180static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
181{
182	int i = 1;
183
184	printk("%sBacktrace:\n", loglvl);
185	while (i <= MAX_UNWIND_ENTRIES) {
186		if (unwind_once(info) < 0 || info->ip == 0)
187			break;
188
189		if (__kernel_text_address(info->ip)) {
190			printk("%s [<" RFMT ">] %pS\n",
191				loglvl, info->ip, (void *) info->ip);
192			i++;
193		}
194	}
195	printk("%s\n", loglvl);
196}
197
198static void parisc_show_stack(struct task_struct *task,
199	struct pt_regs *regs, const char *loglvl)
200{
201	struct unwind_frame_info info;
 
 
 
 
 
 
 
 
 
 
202
203	unwind_frame_init_task(&info, task, regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
205	do_show_stack(&info, loglvl);
 
206}
207
208void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
209{
210	parisc_show_stack(t, NULL, loglvl);
211}
212
213int is_valid_bugaddr(unsigned long iaoq)
214{
215	return 1;
216}
217
218void die_if_kernel(char *str, struct pt_regs *regs, long err)
219{
220	if (user_mode(regs)) {
221		if (err == 0)
222			return; /* STFU */
223
224		parisc_printk_ratelimited(1, regs,
225			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
226			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
227
228		return;
229	}
230
231	bust_spinlocks(1);
232
233	oops_enter();
234
235	/* Amuse the user in a SPARC fashion */
236	if (err) printk(KERN_CRIT
237			"      _______________________________ \n"
238			"     < Your System ate a SPARC! Gah! >\n"
239			"      ------------------------------- \n"
240			"             \\   ^__^\n"
241			"                 (__)\\       )\\/\\\n"
242			"                  U  ||----w |\n"
243			"                     ||     ||\n");
244	
245	/* unlock the pdc lock if necessary */
246	pdc_emergency_unlock();
247
 
 
 
 
 
 
 
248	if (err)
249		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250			current->comm, task_pid_nr(current), str, err);
251
252	/* Wot's wrong wif bein' racy? */
253	if (current->thread.flags & PARISC_KERNEL_DEATH) {
254		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255		local_irq_enable();
256		while (1);
257	}
258	current->thread.flags |= PARISC_KERNEL_DEATH;
259
260	show_regs(regs);
261	dump_stack();
262	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263
264	if (in_interrupt())
265		panic("Fatal exception in interrupt");
266
267	if (panic_on_oops)
268		panic("Fatal exception");
269
270	oops_exit();
271	make_task_dead(SIGSEGV);
272}
273
274/* gdb uses break 4,8 */
275#define GDB_BREAK_INSN 0x10004
276static void handle_gdb_break(struct pt_regs *regs, int wot)
277{
278	force_sig_fault(SIGTRAP, wot,
279			(void __user *) (regs->iaoq[0] & ~3));
 
 
 
 
 
280}
281
282static void handle_break(struct pt_regs *regs)
283{
284	unsigned iir = regs->iir;
285
286	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287		/* check if a BUG() or WARN() trapped here.  */
288		enum bug_trap_type tt;
289		tt = report_bug(regs->iaoq[0] & ~3, regs);
290		if (tt == BUG_TRAP_TYPE_WARN) {
291			regs->iaoq[0] += 4;
292			regs->iaoq[1] += 4;
293			return; /* return to next instruction when WARN_ON().  */
294		}
295		die_if_kernel("Unknown kernel breakpoint", regs,
296			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297	}
298
299#ifdef CONFIG_KPROBES
300	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
301		parisc_kprobe_break_handler(regs);
302		return;
303	}
304	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
305		parisc_kprobe_ss_handler(regs);
306		return;
307	}
308#endif
309
310#ifdef CONFIG_KGDB
311	if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
312		iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
313		kgdb_handle_exception(9, SIGTRAP, 0, regs);
314		return;
315	}
316#endif
317
318#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
319        if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
320		die_if_kernel("Spinlock was trashed", regs, 1);
321	}
322#endif
323
324	if (unlikely(iir != GDB_BREAK_INSN))
325		parisc_printk_ratelimited(0, regs,
326			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327			iir & 31, (iir>>13) & ((1<<13)-1),
328			task_pid_nr(current), current->comm);
329
330	/* send standard GDB signal */
331	handle_gdb_break(regs, TRAP_BRKPT);
332}
333
334static void default_trap(int code, struct pt_regs *regs)
335{
336	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
337	show_regs(regs);
338}
339
340static void transfer_pim_to_trap_frame(struct pt_regs *regs)
 
 
 
341{
342    register int i;
343    extern unsigned int hpmc_pim_data[];
344    struct pdc_hpmc_pim_11 *pim_narrow;
345    struct pdc_hpmc_pim_20 *pim_wide;
346
347    if (boot_cpu_data.cpu_type >= pcxu) {
348
349	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
350
351	/*
352	 * Note: The following code will probably generate a
353	 * bunch of truncation error warnings from the compiler.
354	 * Could be handled with an ifdef, but perhaps there
355	 * is a better way.
356	 */
357
358	regs->gr[0] = pim_wide->cr[22];
359
360	for (i = 1; i < 32; i++)
361	    regs->gr[i] = pim_wide->gr[i];
362
363	for (i = 0; i < 32; i++)
364	    regs->fr[i] = pim_wide->fr[i];
365
366	for (i = 0; i < 8; i++)
367	    regs->sr[i] = pim_wide->sr[i];
368
369	regs->iasq[0] = pim_wide->cr[17];
370	regs->iasq[1] = pim_wide->iasq_back;
371	regs->iaoq[0] = pim_wide->cr[18];
372	regs->iaoq[1] = pim_wide->iaoq_back;
373
374	regs->sar  = pim_wide->cr[11];
375	regs->iir  = pim_wide->cr[19];
376	regs->isr  = pim_wide->cr[20];
377	regs->ior  = pim_wide->cr[21];
378    }
379    else {
380	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
381
382	regs->gr[0] = pim_narrow->cr[22];
383
384	for (i = 1; i < 32; i++)
385	    regs->gr[i] = pim_narrow->gr[i];
386
387	for (i = 0; i < 32; i++)
388	    regs->fr[i] = pim_narrow->fr[i];
389
390	for (i = 0; i < 8; i++)
391	    regs->sr[i] = pim_narrow->sr[i];
392
393	regs->iasq[0] = pim_narrow->cr[17];
394	regs->iasq[1] = pim_narrow->iasq_back;
395	regs->iaoq[0] = pim_narrow->cr[18];
396	regs->iaoq[1] = pim_narrow->iaoq_back;
397
398	regs->sar  = pim_narrow->cr[11];
399	regs->iir  = pim_narrow->cr[19];
400	regs->isr  = pim_narrow->cr[20];
401	regs->ior  = pim_narrow->cr[21];
402    }
403
404    /*
405     * The following fields only have meaning if we came through
406     * another path. So just zero them here.
407     */
408
409    regs->ksp = 0;
410    regs->kpc = 0;
411    regs->orig_r28 = 0;
412}
413
414
415/*
416 * This routine is called as a last resort when everything else
417 * has gone clearly wrong. We get called for faults in kernel space,
418 * and HPMC's.
419 */
420void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
421{
422	static DEFINE_SPINLOCK(terminate_lock);
423
424	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
425	bust_spinlocks(1);
426
427	set_eiem(0);
428	local_irq_disable();
429	spin_lock(&terminate_lock);
430
431	/* unlock the pdc lock if necessary */
432	pdc_emergency_unlock();
433
 
 
 
 
434	/* Not all paths will gutter the processor... */
435	switch(code){
436
437	case 1:
438		transfer_pim_to_trap_frame(regs);
439		break;
440
441	default:
 
442		break;
443
444	}
445	    
446	{
447		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
448		struct unwind_frame_info info;
449		unwind_frame_init(&info, current, regs);
450		do_show_stack(&info, KERN_CRIT);
451	}
452
453	printk("\n");
454	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
455		msg, code, trap_name(code), offset);
456	show_regs(regs);
457
458	spin_unlock(&terminate_lock);
459
460	/* put soft power button back under hardware control;
461	 * if the user had pressed it once at any time, the 
462	 * system will shut down immediately right here. */
463	pdc_soft_power_button(0);
464	
465	/* Call kernel panic() so reboot timeouts work properly 
466	 * FIXME: This function should be on the list of
467	 * panic notifiers, and we should call panic
468	 * directly from the location that we wish. 
469	 * e.g. We should not call panic from
470	 * parisc_terminate, but rather the other way around.
471	 * This hack works, prints the panic message twice,
472	 * and it enables reboot timers!
473	 */
474	panic(msg);
475}
476
477void notrace handle_interruption(int code, struct pt_regs *regs)
478{
479	unsigned long fault_address = 0;
480	unsigned long fault_space = 0;
481	int si_code;
482
483	if (!irqs_disabled_flags(regs->gr[0]))
 
 
484	    local_irq_enable();
485
486	/* Security check:
487	 * If the priority level is still user, and the
488	 * faulting space is not equal to the active space
489	 * then the user is attempting something in a space
490	 * that does not belong to them. Kill the process.
491	 *
492	 * This is normally the situation when the user
493	 * attempts to jump into the kernel space at the
494	 * wrong offset, be it at the gateway page or a
495	 * random location.
496	 *
497	 * We cannot normally signal the process because it
498	 * could *be* on the gateway page, and processes
499	 * executing on the gateway page can't have signals
500	 * delivered.
501	 * 
502	 * We merely readjust the address into the users
503	 * space, at a destination address of zero, and
504	 * allow processing to continue.
505	 */
506	if (((unsigned long)regs->iaoq[0] & 3) &&
507	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
508		/* Kill the user process later */
509		regs->iaoq[0] = 0 | PRIV_USER;
510		regs->iaoq[1] = regs->iaoq[0] + 4;
511		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
512		regs->gr[0] &= ~PSW_B;
513		return;
514	}
515	
516#if 0
517	printk(KERN_CRIT "Interruption # %d\n", code);
518#endif
519
520	switch(code) {
521
522	case  1:
523		/* High-priority machine check (HPMC) */
524		
525		/* set up a new led state on systems shipped with a LED State panel */
526		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
527
528		parisc_terminate("High Priority Machine Check (HPMC)",
529				regs, code, 0);
530		/* NOT REACHED */
531		
532	case  2:
533		/* Power failure interrupt */
534		printk(KERN_CRIT "Power failure interrupt !\n");
535		return;
536
537	case  3:
538		/* Recovery counter trap */
539		regs->gr[0] &= ~PSW_R;
540
541#ifdef CONFIG_KGDB
542		if (kgdb_single_step) {
543			kgdb_handle_exception(0, SIGTRAP, 0, regs);
544			return;
545		}
546#endif
547
548		if (user_space(regs))
549			handle_gdb_break(regs, TRAP_TRACE);
550		/* else this must be the start of a syscall - just let it run */
551		return;
552
553	case  5:
554		/* Low-priority machine check */
555		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
556		
557		flush_cache_all();
558		flush_tlb_all();
559		default_trap(code, regs);
560		return;
561
562	case  PARISC_ITLB_TRAP:
563		/* Instruction TLB miss fault/Instruction page fault */
564		fault_address = regs->iaoq[0];
565		fault_space   = regs->iasq[0];
566		break;
567
568	case  8:
569		/* Illegal instruction trap */
570		die_if_kernel("Illegal instruction", regs, code);
571		si_code = ILL_ILLOPC;
572		goto give_sigill;
573
574	case  9:
575		/* Break instruction trap */
576		handle_break(regs);
577		return;
578
579	case 10:
580		/* Privileged operation trap */
581		die_if_kernel("Privileged operation", regs, code);
582		si_code = ILL_PRVOPC;
583		goto give_sigill;
584
585	case 11:
586		/* Privileged register trap */
587		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
588
589			/* This is a MFCTL cr26/cr27 to gr instruction.
590			 * PCXS traps on this, so we need to emulate it.
591			 */
592
593			if (regs->iir & 0x00200000)
594				regs->gr[regs->iir & 0x1f] = mfctl(27);
595			else
596				regs->gr[regs->iir & 0x1f] = mfctl(26);
597
598			regs->iaoq[0] = regs->iaoq[1];
599			regs->iaoq[1] += 4;
600			regs->iasq[0] = regs->iasq[1];
601			return;
602		}
603
604		die_if_kernel("Privileged register usage", regs, code);
605		si_code = ILL_PRVREG;
606	give_sigill:
607		force_sig_fault(SIGILL, si_code,
608				(void __user *) regs->iaoq[0]);
 
 
609		return;
610
611	case 12:
612		/* Overflow Trap, let the userland signal handler do the cleanup */
613		force_sig_fault(SIGFPE, FPE_INTOVF,
614				(void __user *) regs->iaoq[0]);
 
 
615		return;
616		
617	case 13:
618		/* Conditional Trap
619		   The condition succeeds in an instruction which traps
620		   on condition  */
621		if(user_mode(regs)){
 
622			/* Let userspace app figure it out from the insn pointed
623			 * to by si_addr.
624			 */
625			force_sig_fault(SIGFPE, FPE_CONDTRAP,
626					(void __user *) regs->iaoq[0]);
 
627			return;
628		} 
629		/* The kernel doesn't want to handle condition codes */
630		break;
631		
632	case 14:
633		/* Assist Exception Trap, i.e. floating point exception. */
634		die_if_kernel("Floating point exception", regs, 0); /* quiet */
635		__inc_irq_stat(irq_fpassist_count);
636		handle_fpe(regs);
637		return;
638
639	case 15:
640		/* Data TLB miss fault/Data page fault */
641		fallthrough;
642	case 16:
643		/* Non-access instruction TLB miss fault */
644		/* The instruction TLB entry needed for the target address of the FIC
645		   is absent, and hardware can't find it, so we get to cleanup */
646		fallthrough;
647	case 17:
648		/* Non-access data TLB miss fault/Non-access data page fault */
649		/* FIXME: 
650			 Still need to add slow path emulation code here!
651			 If the insn used a non-shadow register, then the tlb
652			 handlers could not have their side-effect (e.g. probe
653			 writing to a target register) emulated since rfir would
654			 erase the changes to said register. Instead we have to
655			 setup everything, call this function we are in, and emulate
656			 by hand. Technically we need to emulate:
657			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
658		*/
659		if (code == 17 && handle_nadtlb_fault(regs))
660			return;
661		fault_address = regs->ior;
662		fault_space = regs->isr;
663		break;
664
665	case 18:
666		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
667		/* Check for unaligned access */
668		if (check_unaligned(regs)) {
669			handle_unaligned(regs);
670			return;
671		}
672		fallthrough;
673	case 26: 
674		/* PCXL: Data memory access rights trap */
675		fault_address = regs->ior;
676		fault_space   = regs->isr;
677		break;
678
679	case 19:
680		/* Data memory break trap */
681		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
682		fallthrough;
683	case 21:
684		/* Page reference trap */
685		handle_gdb_break(regs, TRAP_HWBKPT);
686		return;
687
688	case 25:
689		/* Taken branch trap */
690		regs->gr[0] &= ~PSW_T;
691		if (user_space(regs))
692			handle_gdb_break(regs, TRAP_BRANCH);
693		/* else this must be the start of a syscall - just let it
694		 * run.
695		 */
696		return;
697
698	case  7:  
699		/* Instruction access rights */
700		/* PCXL: Instruction memory protection trap */
701
702		/*
703		 * This could be caused by either: 1) a process attempting
704		 * to execute within a vma that does not have execute
705		 * permission, or 2) an access rights violation caused by a
706		 * flush only translation set up by ptep_get_and_clear().
707		 * So we check the vma permissions to differentiate the two.
708		 * If the vma indicates we have execute permission, then
709		 * the cause is the latter one. In this case, we need to
710		 * call do_page_fault() to fix the problem.
711		 */
712
713		if (user_mode(regs)) {
714			struct vm_area_struct *vma;
715
716			mmap_read_lock(current->mm);
717			vma = find_vma(current->mm,regs->iaoq[0]);
718			if (vma && (regs->iaoq[0] >= vma->vm_start)
719				&& (vma->vm_flags & VM_EXEC)) {
720
721				fault_address = regs->iaoq[0];
722				fault_space = regs->iasq[0];
723
724				mmap_read_unlock(current->mm);
725				break; /* call do_page_fault() */
726			}
727			mmap_read_unlock(current->mm);
728		}
729		/* CPU could not fetch instruction, so clear stale IIR value. */
730		regs->iir = 0xbaadf00d;
731		fallthrough;
732	case 27: 
733		/* Data memory protection ID trap */
734		if (code == 27 && !user_mode(regs) &&
735			fixup_exception(regs))
736			return;
737
738		die_if_kernel("Protection id trap", regs, code);
739		force_sig_fault(SIGSEGV, SEGV_MAPERR,
740				(code == 7)?
741				((void __user *) regs->iaoq[0]) :
742				((void __user *) regs->ior));
 
 
 
 
743		return;
744
745	case 28: 
746		/* Unaligned data reference trap */
747		handle_unaligned(regs);
748		return;
749
750	default:
751		if (user_mode(regs)) {
752			parisc_printk_ratelimited(0, regs, KERN_DEBUG
753				"handle_interruption() pid=%d command='%s'\n",
754				task_pid_nr(current), current->comm);
755			/* SIGBUS, for lack of a better one. */
756			force_sig_fault(SIGBUS, BUS_OBJERR,
757					(void __user *)regs->ior);
 
 
 
758			return;
759		}
760		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
761		
762		parisc_terminate("Unexpected interruption", regs, code, 0);
763		/* NOT REACHED */
764	}
765
766	if (user_mode(regs)) {
767	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
768		parisc_printk_ratelimited(0, regs, KERN_DEBUG
769				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
770				code, fault_space,
771				task_pid_nr(current), current->comm);
772		force_sig_fault(SIGSEGV, SEGV_MAPERR,
773				(void __user *)regs->ior);
 
 
 
774		return;
775	    }
776	}
777	else {
778
779	    /*
780	     * The kernel should never fault on its own address space,
781	     * unless pagefault_disable() was called before.
782	     */
783
784	    if (faulthandler_disabled() || fault_space == 0)
785	    {
786		/* Clean up and return if in exception table. */
787		if (fixup_exception(regs))
788			return;
789		/* Clean up and return if handled by kfence. */
790		if (kfence_handle_page_fault(fault_address,
791			parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
792			return;
793		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
794		parisc_terminate("Kernel Fault", regs, code, fault_address);
795	    }
796	}
797
798	do_page_fault(regs, code, fault_address);
799}
800
801
802static void __init initialize_ivt(const void *iva)
803{
 
804	extern const u32 os_hpmc[];
805
806	int i;
807	u32 check = 0;
808	u32 *ivap;
809	u32 instr;
 
810
811	if (strcmp((const char *)iva, "cows can fly"))
812		panic("IVT invalid");
813
814	ivap = (u32 *)iva;
815
816	for (i = 0; i < 8; i++)
817	    *ivap++ = 0;
818
819	/*
820	 * Use PDC_INSTR firmware function to get instruction that invokes
821	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
822	 * the PA 1.1 Firmware Architecture document.
823	 */
824	if (pdc_instr(&instr) == PDC_OK)
825		ivap[0] = instr;
826
827	/*
828	 * Rules for the checksum of the HPMC handler:
829	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
830	 *    its own IVA).
831	 * 2. The word at IVA + 32 is nonzero.
832	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
833	 *    Address (IVA + 56) are word-aligned.
834	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
835	 *    the Length/4 words starting at Address is zero.
836	 */
837
838	/* Setup IVA and compute checksum for HPMC handler */
839	ivap[6] = (u32)__pa(os_hpmc);
 
 
 
 
 
 
840
841	for (i=0; i<8; i++)
842	    check += ivap[i];
843
844	ivap[5] = -check;
845	pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
846}
847	
848
849/* early_trap_init() is called before we set up kernel mappings and
850 * write-protect the kernel */
851void  __init early_trap_init(void)
852{
853	extern const void fault_vector_20;
854
855#ifndef CONFIG_64BIT
856	extern const void fault_vector_11;
857	initialize_ivt(&fault_vector_11);
858#endif
859
860	initialize_ivt(&fault_vector_20);
 
 
 
 
861}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/traps.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
  7 */
  8
  9/*
 10 * 'Traps.c' handles hardware traps and faults after we have saved some
 11 * state in 'asm.s'.
 12 */
 13
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/kernel.h>
 17#include <linux/string.h>
 18#include <linux/errno.h>
 19#include <linux/ptrace.h>
 20#include <linux/timer.h>
 21#include <linux/delay.h>
 22#include <linux/mm.h>
 23#include <linux/module.h>
 24#include <linux/smp.h>
 25#include <linux/spinlock.h>
 26#include <linux/init.h>
 27#include <linux/interrupt.h>
 28#include <linux/console.h>
 29#include <linux/bug.h>
 30#include <linux/ratelimit.h>
 31#include <linux/uaccess.h>
 
 
 32
 33#include <asm/assembly.h>
 34#include <asm/io.h>
 35#include <asm/irq.h>
 36#include <asm/traps.h>
 37#include <asm/unaligned.h>
 38#include <linux/atomic.h>
 39#include <asm/smp.h>
 40#include <asm/pdc.h>
 41#include <asm/pdc_chassis.h>
 42#include <asm/unwind.h>
 43#include <asm/tlbflush.h>
 44#include <asm/cacheflush.h>
 
 
 
 
 
 
 
 
 45
 46#include "../math-emu/math-emu.h"	/* for handle_fpe() */
 47
 48static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
 49	struct pt_regs *regs);
 50
 51static int printbinary(char *buf, unsigned long x, int nbits)
 52{
 53	unsigned long mask = 1UL << (nbits - 1);
 54	while (mask != 0) {
 55		*buf++ = (mask & x ? '1' : '0');
 56		mask >>= 1;
 57	}
 58	*buf = '\0';
 59
 60	return nbits;
 61}
 62
 63#ifdef CONFIG_64BIT
 64#define RFMT "%016lx"
 65#else
 66#define RFMT "%08lx"
 67#endif
 68#define FFMT "%016llx"	/* fpregs are 64-bit always */
 69
 70#define PRINTREGS(lvl,r,f,fmt,x)	\
 71	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
 72		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
 73		(r)[(x)+2], (r)[(x)+3])
 74
 75static void print_gr(char *level, struct pt_regs *regs)
 76{
 77	int i;
 78	char buf[64];
 79
 80	printk("%s\n", level);
 81	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
 82	printbinary(buf, regs->gr[0], 32);
 83	printk("%sPSW: %s %s\n", level, buf, print_tainted());
 84
 85	for (i = 0; i < 32; i += 4)
 86		PRINTREGS(level, regs->gr, "r", RFMT, i);
 87}
 88
 89static void print_fr(char *level, struct pt_regs *regs)
 90{
 91	int i;
 92	char buf[64];
 93	struct { u32 sw[2]; } s;
 94
 95	/* FR are 64bit everywhere. Need to use asm to get the content
 96	 * of fpsr/fper1, and we assume that we won't have a FP Identify
 97	 * in our way, otherwise we're screwed.
 98	 * The fldd is used to restore the T-bit if there was one, as the
 99	 * store clears it anyway.
100	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
101	asm volatile ("fstd %%fr0,0(%1)	\n\t"
102		      "fldd 0(%1),%%fr0	\n\t"
103		      : "=m" (s) : "r" (&s) : "r0");
104
105	printk("%s\n", level);
106	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
107	printbinary(buf, s.sw[0], 32);
108	printk("%sFPSR: %s\n", level, buf);
109	printk("%sFPER1: %08x\n", level, s.sw[1]);
110
111	/* here we'll print fr0 again, tho it'll be meaningless */
112	for (i = 0; i < 32; i += 4)
113		PRINTREGS(level, regs->fr, "fr", FFMT, i);
114}
115
116void show_regs(struct pt_regs *regs)
117{
118	int i, user;
119	char *level;
120	unsigned long cr30, cr31;
121
122	user = user_mode(regs);
123	level = user ? KERN_DEBUG : KERN_CRIT;
124
125	show_regs_print_info(level);
126
127	print_gr(level, regs);
128
129	for (i = 0; i < 8; i += 4)
130		PRINTREGS(level, regs->sr, "sr", RFMT, i);
131
132	if (user)
133		print_fr(level, regs);
134
135	cr30 = mfctl(30);
136	cr31 = mfctl(31);
137	printk("%s\n", level);
138	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
139	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
140	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
141	       level, regs->iir, regs->isr, regs->ior);
142	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
143	       level, current_thread_info()->cpu, cr30, cr31);
144	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
145
146	if (user) {
147		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
148		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
149		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
150	} else {
151		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
152		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
153		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
154
155		parisc_show_stack(current, NULL, regs);
156	}
157}
158
159static DEFINE_RATELIMIT_STATE(_hppa_rs,
160	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
161
162#define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
163	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
164		printk(fmt, ##__VA_ARGS__);				      \
165		show_regs(regs);					      \
166	}								      \
167}
168
169
170static void do_show_stack(struct unwind_frame_info *info)
171{
172	int i = 1;
173
174	printk(KERN_CRIT "Backtrace:\n");
175	while (i <= 16) {
176		if (unwind_once(info) < 0 || info->ip == 0)
177			break;
178
179		if (__kernel_text_address(info->ip)) {
180			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181				info->ip, (void *) info->ip);
182			i++;
183		}
184	}
185	printk(KERN_CRIT "\n");
186}
187
188static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189	struct pt_regs *regs)
190{
191	struct unwind_frame_info info;
192	struct task_struct *t;
193
194	t = task ? task : current;
195	if (regs) {
196		unwind_frame_init(&info, t, regs);
197		goto show_stack;
198	}
199
200	if (t == current) {
201		unsigned long sp;
202
203HERE:
204		asm volatile ("copy %%r30, %0" : "=r"(sp));
205		{
206			struct pt_regs r;
207
208			memset(&r, 0, sizeof(struct pt_regs));
209			r.iaoq[0] = (unsigned long)&&HERE;
210			r.gr[2] = (unsigned long)__builtin_return_address(0);
211			r.gr[30] = sp;
212
213			unwind_frame_init(&info, current, &r);
214		}
215	} else {
216		unwind_frame_init_from_blocked_task(&info, t);
217	}
218
219show_stack:
220	do_show_stack(&info);
221}
222
223void show_stack(struct task_struct *t, unsigned long *sp)
224{
225	return parisc_show_stack(t, sp, NULL);
226}
227
228int is_valid_bugaddr(unsigned long iaoq)
229{
230	return 1;
231}
232
233void die_if_kernel(char *str, struct pt_regs *regs, long err)
234{
235	if (user_mode(regs)) {
236		if (err == 0)
237			return; /* STFU */
238
239		parisc_printk_ratelimited(1, regs,
240			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
241			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
242
243		return;
244	}
245
246	oops_in_progress = 1;
247
248	oops_enter();
249
250	/* Amuse the user in a SPARC fashion */
251	if (err) printk(KERN_CRIT
252			"      _______________________________ \n"
253			"     < Your System ate a SPARC! Gah! >\n"
254			"      ------------------------------- \n"
255			"             \\   ^__^\n"
256			"                 (__)\\       )\\/\\\n"
257			"                  U  ||----w |\n"
258			"                     ||     ||\n");
259	
260	/* unlock the pdc lock if necessary */
261	pdc_emergency_unlock();
262
263	/* maybe the kernel hasn't booted very far yet and hasn't been able 
264	 * to initialize the serial or STI console. In that case we should 
265	 * re-enable the pdc console, so that the user will be able to 
266	 * identify the problem. */
267	if (!console_drivers)
268		pdc_console_restart();
269	
270	if (err)
271		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
272			current->comm, task_pid_nr(current), str, err);
273
274	/* Wot's wrong wif bein' racy? */
275	if (current->thread.flags & PARISC_KERNEL_DEATH) {
276		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
277		local_irq_enable();
278		while (1);
279	}
280	current->thread.flags |= PARISC_KERNEL_DEATH;
281
282	show_regs(regs);
283	dump_stack();
284	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
285
286	if (in_interrupt())
287		panic("Fatal exception in interrupt");
288
289	if (panic_on_oops)
290		panic("Fatal exception");
291
292	oops_exit();
293	do_exit(SIGSEGV);
294}
295
296/* gdb uses break 4,8 */
297#define GDB_BREAK_INSN 0x10004
298static void handle_gdb_break(struct pt_regs *regs, int wot)
299{
300	struct siginfo si;
301
302	si.si_signo = SIGTRAP;
303	si.si_errno = 0;
304	si.si_code = wot;
305	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
306	force_sig_info(SIGTRAP, &si, current);
307}
308
309static void handle_break(struct pt_regs *regs)
310{
311	unsigned iir = regs->iir;
312
313	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
314		/* check if a BUG() or WARN() trapped here.  */
315		enum bug_trap_type tt;
316		tt = report_bug(regs->iaoq[0] & ~3, regs);
317		if (tt == BUG_TRAP_TYPE_WARN) {
318			regs->iaoq[0] += 4;
319			regs->iaoq[1] += 4;
320			return; /* return to next instruction when WARN_ON().  */
321		}
322		die_if_kernel("Unknown kernel breakpoint", regs,
323			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
324	}
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326	if (unlikely(iir != GDB_BREAK_INSN))
327		parisc_printk_ratelimited(0, regs,
328			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
329			iir & 31, (iir>>13) & ((1<<13)-1),
330			task_pid_nr(current), current->comm);
331
332	/* send standard GDB signal */
333	handle_gdb_break(regs, TRAP_BRKPT);
334}
335
336static void default_trap(int code, struct pt_regs *regs)
337{
338	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
339	show_regs(regs);
340}
341
342void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
343
344
345void transfer_pim_to_trap_frame(struct pt_regs *regs)
346{
347    register int i;
348    extern unsigned int hpmc_pim_data[];
349    struct pdc_hpmc_pim_11 *pim_narrow;
350    struct pdc_hpmc_pim_20 *pim_wide;
351
352    if (boot_cpu_data.cpu_type >= pcxu) {
353
354	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
355
356	/*
357	 * Note: The following code will probably generate a
358	 * bunch of truncation error warnings from the compiler.
359	 * Could be handled with an ifdef, but perhaps there
360	 * is a better way.
361	 */
362
363	regs->gr[0] = pim_wide->cr[22];
364
365	for (i = 1; i < 32; i++)
366	    regs->gr[i] = pim_wide->gr[i];
367
368	for (i = 0; i < 32; i++)
369	    regs->fr[i] = pim_wide->fr[i];
370
371	for (i = 0; i < 8; i++)
372	    regs->sr[i] = pim_wide->sr[i];
373
374	regs->iasq[0] = pim_wide->cr[17];
375	regs->iasq[1] = pim_wide->iasq_back;
376	regs->iaoq[0] = pim_wide->cr[18];
377	regs->iaoq[1] = pim_wide->iaoq_back;
378
379	regs->sar  = pim_wide->cr[11];
380	regs->iir  = pim_wide->cr[19];
381	regs->isr  = pim_wide->cr[20];
382	regs->ior  = pim_wide->cr[21];
383    }
384    else {
385	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
386
387	regs->gr[0] = pim_narrow->cr[22];
388
389	for (i = 1; i < 32; i++)
390	    regs->gr[i] = pim_narrow->gr[i];
391
392	for (i = 0; i < 32; i++)
393	    regs->fr[i] = pim_narrow->fr[i];
394
395	for (i = 0; i < 8; i++)
396	    regs->sr[i] = pim_narrow->sr[i];
397
398	regs->iasq[0] = pim_narrow->cr[17];
399	regs->iasq[1] = pim_narrow->iasq_back;
400	regs->iaoq[0] = pim_narrow->cr[18];
401	regs->iaoq[1] = pim_narrow->iaoq_back;
402
403	regs->sar  = pim_narrow->cr[11];
404	regs->iir  = pim_narrow->cr[19];
405	regs->isr  = pim_narrow->cr[20];
406	regs->ior  = pim_narrow->cr[21];
407    }
408
409    /*
410     * The following fields only have meaning if we came through
411     * another path. So just zero them here.
412     */
413
414    regs->ksp = 0;
415    regs->kpc = 0;
416    regs->orig_r28 = 0;
417}
418
419
420/*
421 * This routine is called as a last resort when everything else
422 * has gone clearly wrong. We get called for faults in kernel space,
423 * and HPMC's.
424 */
425void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
426{
427	static DEFINE_SPINLOCK(terminate_lock);
428
429	oops_in_progress = 1;
 
430
431	set_eiem(0);
432	local_irq_disable();
433	spin_lock(&terminate_lock);
434
435	/* unlock the pdc lock if necessary */
436	pdc_emergency_unlock();
437
438	/* restart pdc console if necessary */
439	if (!console_drivers)
440		pdc_console_restart();
441
442	/* Not all paths will gutter the processor... */
443	switch(code){
444
445	case 1:
446		transfer_pim_to_trap_frame(regs);
447		break;
448
449	default:
450		/* Fall through */
451		break;
452
453	}
454	    
455	{
456		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
457		struct unwind_frame_info info;
458		unwind_frame_init(&info, current, regs);
459		do_show_stack(&info);
460	}
461
462	printk("\n");
463	pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n",
464		msg, code, trap_name(code), regs, offset);
465	show_regs(regs);
466
467	spin_unlock(&terminate_lock);
468
469	/* put soft power button back under hardware control;
470	 * if the user had pressed it once at any time, the 
471	 * system will shut down immediately right here. */
472	pdc_soft_power_button(0);
473	
474	/* Call kernel panic() so reboot timeouts work properly 
475	 * FIXME: This function should be on the list of
476	 * panic notifiers, and we should call panic
477	 * directly from the location that we wish. 
478	 * e.g. We should not call panic from
479	 * parisc_terminate, but rather the oter way around.
480	 * This hack works, prints the panic message twice,
481	 * and it enables reboot timers!
482	 */
483	panic(msg);
484}
485
486void notrace handle_interruption(int code, struct pt_regs *regs)
487{
488	unsigned long fault_address = 0;
489	unsigned long fault_space = 0;
490	struct siginfo si;
491
492	if (code == 1)
493	    pdc_console_restart();  /* switch back to pdc if HPMC */
494	else
495	    local_irq_enable();
496
497	/* Security check:
498	 * If the priority level is still user, and the
499	 * faulting space is not equal to the active space
500	 * then the user is attempting something in a space
501	 * that does not belong to them. Kill the process.
502	 *
503	 * This is normally the situation when the user
504	 * attempts to jump into the kernel space at the
505	 * wrong offset, be it at the gateway page or a
506	 * random location.
507	 *
508	 * We cannot normally signal the process because it
509	 * could *be* on the gateway page, and processes
510	 * executing on the gateway page can't have signals
511	 * delivered.
512	 * 
513	 * We merely readjust the address into the users
514	 * space, at a destination address of zero, and
515	 * allow processing to continue.
516	 */
517	if (((unsigned long)regs->iaoq[0] & 3) &&
518	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 
519		/* Kill the user process later */
520		regs->iaoq[0] = 0 | 3;
521		regs->iaoq[1] = regs->iaoq[0] + 4;
522		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
523		regs->gr[0] &= ~PSW_B;
524		return;
525	}
526	
527#if 0
528	printk(KERN_CRIT "Interruption # %d\n", code);
529#endif
530
531	switch(code) {
532
533	case  1:
534		/* High-priority machine check (HPMC) */
535		
536		/* set up a new led state on systems shipped with a LED State panel */
537		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
538
539		parisc_terminate("High Priority Machine Check (HPMC)",
540				regs, code, 0);
541		/* NOT REACHED */
542		
543	case  2:
544		/* Power failure interrupt */
545		printk(KERN_CRIT "Power failure interrupt !\n");
546		return;
547
548	case  3:
549		/* Recovery counter trap */
550		regs->gr[0] &= ~PSW_R;
 
 
 
 
 
 
 
 
551		if (user_space(regs))
552			handle_gdb_break(regs, TRAP_TRACE);
553		/* else this must be the start of a syscall - just let it run */
554		return;
555
556	case  5:
557		/* Low-priority machine check */
558		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
559		
560		flush_cache_all();
561		flush_tlb_all();
562		cpu_lpmc(5, regs);
563		return;
564
565	case  6:
566		/* Instruction TLB miss fault/Instruction page fault */
567		fault_address = regs->iaoq[0];
568		fault_space   = regs->iasq[0];
569		break;
570
571	case  8:
572		/* Illegal instruction trap */
573		die_if_kernel("Illegal instruction", regs, code);
574		si.si_code = ILL_ILLOPC;
575		goto give_sigill;
576
577	case  9:
578		/* Break instruction trap */
579		handle_break(regs);
580		return;
581
582	case 10:
583		/* Privileged operation trap */
584		die_if_kernel("Privileged operation", regs, code);
585		si.si_code = ILL_PRVOPC;
586		goto give_sigill;
587
588	case 11:
589		/* Privileged register trap */
590		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
591
592			/* This is a MFCTL cr26/cr27 to gr instruction.
593			 * PCXS traps on this, so we need to emulate it.
594			 */
595
596			if (regs->iir & 0x00200000)
597				regs->gr[regs->iir & 0x1f] = mfctl(27);
598			else
599				regs->gr[regs->iir & 0x1f] = mfctl(26);
600
601			regs->iaoq[0] = regs->iaoq[1];
602			regs->iaoq[1] += 4;
603			regs->iasq[0] = regs->iasq[1];
604			return;
605		}
606
607		die_if_kernel("Privileged register usage", regs, code);
608		si.si_code = ILL_PRVREG;
609	give_sigill:
610		si.si_signo = SIGILL;
611		si.si_errno = 0;
612		si.si_addr = (void __user *) regs->iaoq[0];
613		force_sig_info(SIGILL, &si, current);
614		return;
615
616	case 12:
617		/* Overflow Trap, let the userland signal handler do the cleanup */
618		si.si_signo = SIGFPE;
619		si.si_code = FPE_INTOVF;
620		si.si_addr = (void __user *) regs->iaoq[0];
621		force_sig_info(SIGFPE, &si, current);
622		return;
623		
624	case 13:
625		/* Conditional Trap
626		   The condition succeeds in an instruction which traps
627		   on condition  */
628		if(user_mode(regs)){
629			si.si_signo = SIGFPE;
630			/* Let userspace app figure it out from the insn pointed
631			 * to by si_addr.
632			 */
633			si.si_code = FPE_CONDTRAP;
634			si.si_addr = (void __user *) regs->iaoq[0];
635			force_sig_info(SIGFPE, &si, current);
636			return;
637		} 
638		/* The kernel doesn't want to handle condition codes */
639		break;
640		
641	case 14:
642		/* Assist Exception Trap, i.e. floating point exception. */
643		die_if_kernel("Floating point exception", regs, 0); /* quiet */
644		__inc_irq_stat(irq_fpassist_count);
645		handle_fpe(regs);
646		return;
647
648	case 15:
649		/* Data TLB miss fault/Data page fault */
650		/* Fall through */
651	case 16:
652		/* Non-access instruction TLB miss fault */
653		/* The instruction TLB entry needed for the target address of the FIC
654		   is absent, and hardware can't find it, so we get to cleanup */
655		/* Fall through */
656	case 17:
657		/* Non-access data TLB miss fault/Non-access data page fault */
658		/* FIXME: 
659			 Still need to add slow path emulation code here!
660			 If the insn used a non-shadow register, then the tlb
661			 handlers could not have their side-effect (e.g. probe
662			 writing to a target register) emulated since rfir would
663			 erase the changes to said register. Instead we have to
664			 setup everything, call this function we are in, and emulate
665			 by hand. Technically we need to emulate:
666			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
667		*/
 
 
668		fault_address = regs->ior;
669		fault_space = regs->isr;
670		break;
671
672	case 18:
673		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
674		/* Check for unaligned access */
675		if (check_unaligned(regs)) {
676			handle_unaligned(regs);
677			return;
678		}
679		/* Fall Through */
680	case 26: 
681		/* PCXL: Data memory access rights trap */
682		fault_address = regs->ior;
683		fault_space   = regs->isr;
684		break;
685
686	case 19:
687		/* Data memory break trap */
688		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
689		/* fall thru */
690	case 21:
691		/* Page reference trap */
692		handle_gdb_break(regs, TRAP_HWBKPT);
693		return;
694
695	case 25:
696		/* Taken branch trap */
697		regs->gr[0] &= ~PSW_T;
698		if (user_space(regs))
699			handle_gdb_break(regs, TRAP_BRANCH);
700		/* else this must be the start of a syscall - just let it
701		 * run.
702		 */
703		return;
704
705	case  7:  
706		/* Instruction access rights */
707		/* PCXL: Instruction memory protection trap */
708
709		/*
710		 * This could be caused by either: 1) a process attempting
711		 * to execute within a vma that does not have execute
712		 * permission, or 2) an access rights violation caused by a
713		 * flush only translation set up by ptep_get_and_clear().
714		 * So we check the vma permissions to differentiate the two.
715		 * If the vma indicates we have execute permission, then
716		 * the cause is the latter one. In this case, we need to
717		 * call do_page_fault() to fix the problem.
718		 */
719
720		if (user_mode(regs)) {
721			struct vm_area_struct *vma;
722
723			down_read(&current->mm->mmap_sem);
724			vma = find_vma(current->mm,regs->iaoq[0]);
725			if (vma && (regs->iaoq[0] >= vma->vm_start)
726				&& (vma->vm_flags & VM_EXEC)) {
727
728				fault_address = regs->iaoq[0];
729				fault_space = regs->iasq[0];
730
731				up_read(&current->mm->mmap_sem);
732				break; /* call do_page_fault() */
733			}
734			up_read(&current->mm->mmap_sem);
735		}
736		/* Fall Through */
 
 
737	case 27: 
738		/* Data memory protection ID trap */
739		if (code == 27 && !user_mode(regs) &&
740			fixup_exception(regs))
741			return;
742
743		die_if_kernel("Protection id trap", regs, code);
744		si.si_code = SEGV_MAPERR;
745		si.si_signo = SIGSEGV;
746		si.si_errno = 0;
747		if (code == 7)
748		    si.si_addr = (void __user *) regs->iaoq[0];
749		else
750		    si.si_addr = (void __user *) regs->ior;
751		force_sig_info(SIGSEGV, &si, current);
752		return;
753
754	case 28: 
755		/* Unaligned data reference trap */
756		handle_unaligned(regs);
757		return;
758
759	default:
760		if (user_mode(regs)) {
761			parisc_printk_ratelimited(0, regs, KERN_DEBUG
762				"handle_interruption() pid=%d command='%s'\n",
763				task_pid_nr(current), current->comm);
764			/* SIGBUS, for lack of a better one. */
765			si.si_signo = SIGBUS;
766			si.si_code = BUS_OBJERR;
767			si.si_errno = 0;
768			si.si_addr = (void __user *) regs->ior;
769			force_sig_info(SIGBUS, &si, current);
770			return;
771		}
772		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
773		
774		parisc_terminate("Unexpected interruption", regs, code, 0);
775		/* NOT REACHED */
776	}
777
778	if (user_mode(regs)) {
779	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
780		parisc_printk_ratelimited(0, regs, KERN_DEBUG
781				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
782				code, fault_space,
783				task_pid_nr(current), current->comm);
784		si.si_signo = SIGSEGV;
785		si.si_errno = 0;
786		si.si_code = SEGV_MAPERR;
787		si.si_addr = (void __user *) regs->ior;
788		force_sig_info(SIGSEGV, &si, current);
789		return;
790	    }
791	}
792	else {
793
794	    /*
795	     * The kernel should never fault on its own address space,
796	     * unless pagefault_disable() was called before.
797	     */
798
799	    if (fault_space == 0 && !faulthandler_disabled())
800	    {
801		/* Clean up and return if in exception table. */
802		if (fixup_exception(regs))
803			return;
 
 
 
 
804		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
805		parisc_terminate("Kernel Fault", regs, code, fault_address);
806	    }
807	}
808
809	do_page_fault(regs, code, fault_address);
810}
811
812
813void __init initialize_ivt(const void *iva)
814{
815	extern u32 os_hpmc_size;
816	extern const u32 os_hpmc[];
817
818	int i;
819	u32 check = 0;
820	u32 *ivap;
821	u32 *hpmcp;
822	u32 length, instr;
823
824	if (strcmp((const char *)iva, "cows can fly"))
825		panic("IVT invalid");
826
827	ivap = (u32 *)iva;
828
829	for (i = 0; i < 8; i++)
830	    *ivap++ = 0;
831
832	/*
833	 * Use PDC_INSTR firmware function to get instruction that invokes
834	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
835	 * the PA 1.1 Firmware Architecture document.
836	 */
837	if (pdc_instr(&instr) == PDC_OK)
838		ivap[0] = instr;
839
840	/*
841	 * Rules for the checksum of the HPMC handler:
842	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
843	 *    its own IVA).
844	 * 2. The word at IVA + 32 is nonzero.
845	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
846	 *    Address (IVA + 56) are word-aligned.
847	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
848	 *    the Length/4 words starting at Address is zero.
849	 */
850
851	/* Compute Checksum for HPMC handler */
852	length = os_hpmc_size;
853	ivap[7] = length;
854
855	hpmcp = (u32 *)os_hpmc;
856
857	for (i=0; i<length/4; i++)
858	    check += *hpmcp++;
859
860	for (i=0; i<8; i++)
861	    check += ivap[i];
862
863	ivap[5] = -check;
 
864}
865	
866
867/* early_trap_init() is called before we set up kernel mappings and
868 * write-protect the kernel */
869void  __init early_trap_init(void)
870{
871	extern const void fault_vector_20;
872
873#ifndef CONFIG_64BIT
874	extern const void fault_vector_11;
875	initialize_ivt(&fault_vector_11);
876#endif
877
878	initialize_ivt(&fault_vector_20);
879}
880
881void __init trap_init(void)
882{
883}