Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/parisc/traps.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
7 */
8
9/*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
13
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/timer.h>
21#include <linux/delay.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/console.h>
29#include <linux/bug.h>
30#include <linux/ratelimit.h>
31#include <linux/uaccess.h>
32#include <linux/kdebug.h>
33#include <linux/kfence.h>
34
35#include <asm/assembly.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/traps.h>
39#include <linux/unaligned.h>
40#include <linux/atomic.h>
41#include <asm/smp.h>
42#include <asm/pdc.h>
43#include <asm/pdc_chassis.h>
44#include <asm/unwind.h>
45#include <asm/tlbflush.h>
46#include <asm/cacheflush.h>
47#include <linux/kgdb.h>
48#include <linux/kprobes.h>
49
50#include "unaligned.h"
51
52#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
53#include <asm/spinlock.h>
54#endif
55
56#include "../math-emu/math-emu.h" /* for handle_fpe() */
57
58static void parisc_show_stack(struct task_struct *task,
59 struct pt_regs *regs, const char *loglvl);
60
61static int printbinary(char *buf, unsigned long x, int nbits)
62{
63 unsigned long mask = 1UL << (nbits - 1);
64 while (mask != 0) {
65 *buf++ = (mask & x ? '1' : '0');
66 mask >>= 1;
67 }
68 *buf = '\0';
69
70 return nbits;
71}
72
73#ifdef CONFIG_64BIT
74#define RFMT "%016lx"
75#else
76#define RFMT "%08lx"
77#endif
78#define FFMT "%016llx" /* fpregs are 64-bit always */
79
80#define PRINTREGS(lvl,r,f,fmt,x) \
81 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
82 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
83 (r)[(x)+2], (r)[(x)+3])
84
85static void print_gr(const char *level, struct pt_regs *regs)
86{
87 int i;
88 char buf[64];
89
90 printk("%s\n", level);
91 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
92 printbinary(buf, regs->gr[0], 32);
93 printk("%sPSW: %s %s\n", level, buf, print_tainted());
94
95 for (i = 0; i < 32; i += 4)
96 PRINTREGS(level, regs->gr, "r", RFMT, i);
97}
98
99static void print_fr(const char *level, struct pt_regs *regs)
100{
101 int i;
102 char buf[64];
103 struct { u32 sw[2]; } s;
104
105 /* FR are 64bit everywhere. Need to use asm to get the content
106 * of fpsr/fper1, and we assume that we won't have a FP Identify
107 * in our way, otherwise we're screwed.
108 * The fldd is used to restore the T-bit if there was one, as the
109 * store clears it anyway.
110 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
111 asm volatile ("fstd %%fr0,0(%1) \n\t"
112 "fldd 0(%1),%%fr0 \n\t"
113 : "=m" (s) : "r" (&s) : "r0");
114
115 printk("%s\n", level);
116 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
117 printbinary(buf, s.sw[0], 32);
118 printk("%sFPSR: %s\n", level, buf);
119 printk("%sFPER1: %08x\n", level, s.sw[1]);
120
121 /* here we'll print fr0 again, tho it'll be meaningless */
122 for (i = 0; i < 32; i += 4)
123 PRINTREGS(level, regs->fr, "fr", FFMT, i);
124}
125
126void show_regs(struct pt_regs *regs)
127{
128 int i, user;
129 const char *level;
130 unsigned long cr30, cr31;
131
132 user = user_mode(regs);
133 level = user ? KERN_DEBUG : KERN_CRIT;
134
135 show_regs_print_info(level);
136
137 print_gr(level, regs);
138
139 for (i = 0; i < 8; i += 4)
140 PRINTREGS(level, regs->sr, "sr", RFMT, i);
141
142 if (user)
143 print_fr(level, regs);
144
145 cr30 = mfctl(30);
146 cr31 = mfctl(31);
147 printk("%s\n", level);
148 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
149 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
150 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
151 level, regs->iir, regs->isr, regs->ior);
152 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
153 level, task_cpu(current), cr30, cr31);
154 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
155
156 if (user) {
157 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
158 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
159 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
160 } else {
161 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
162 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
163 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
164
165 parisc_show_stack(current, regs, KERN_DEFAULT);
166 }
167}
168
169static DEFINE_RATELIMIT_STATE(_hppa_rs,
170 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
171
172#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
173 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
174 printk(fmt, ##__VA_ARGS__); \
175 show_regs(regs); \
176 } \
177}
178
179
180static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
181{
182 int i = 1;
183
184 printk("%sBacktrace:\n", loglvl);
185 while (i <= MAX_UNWIND_ENTRIES) {
186 if (unwind_once(info) < 0 || info->ip == 0)
187 break;
188
189 if (__kernel_text_address(info->ip)) {
190 printk("%s [<" RFMT ">] %pS\n",
191 loglvl, info->ip, (void *) info->ip);
192 i++;
193 }
194 }
195 printk("%s\n", loglvl);
196}
197
198static void parisc_show_stack(struct task_struct *task,
199 struct pt_regs *regs, const char *loglvl)
200{
201 struct unwind_frame_info info;
202
203 unwind_frame_init_task(&info, task, regs);
204
205 do_show_stack(&info, loglvl);
206}
207
208void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
209{
210 parisc_show_stack(t, NULL, loglvl);
211}
212
213int is_valid_bugaddr(unsigned long iaoq)
214{
215 return 1;
216}
217
218void die_if_kernel(char *str, struct pt_regs *regs, long err)
219{
220 if (user_mode(regs)) {
221 if (err == 0)
222 return; /* STFU */
223
224 parisc_printk_ratelimited(1, regs,
225 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
226 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
227
228 return;
229 }
230
231 bust_spinlocks(1);
232
233 oops_enter();
234
235 /* Amuse the user in a SPARC fashion */
236 if (err) printk(KERN_CRIT
237 " _______________________________ \n"
238 " < Your System ate a SPARC! Gah! >\n"
239 " ------------------------------- \n"
240 " \\ ^__^\n"
241 " (__)\\ )\\/\\\n"
242 " U ||----w |\n"
243 " || ||\n");
244
245 /* unlock the pdc lock if necessary */
246 pdc_emergency_unlock();
247
248 if (err)
249 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250 current->comm, task_pid_nr(current), str, err);
251
252 /* Wot's wrong wif bein' racy? */
253 if (current->thread.flags & PARISC_KERNEL_DEATH) {
254 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255 local_irq_enable();
256 while (1);
257 }
258 current->thread.flags |= PARISC_KERNEL_DEATH;
259
260 show_regs(regs);
261 dump_stack();
262 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263
264 if (in_interrupt())
265 panic("Fatal exception in interrupt");
266
267 if (panic_on_oops)
268 panic("Fatal exception");
269
270 oops_exit();
271 make_task_dead(SIGSEGV);
272}
273
274/* gdb uses break 4,8 */
275#define GDB_BREAK_INSN 0x10004
276static void handle_gdb_break(struct pt_regs *regs, int wot)
277{
278 force_sig_fault(SIGTRAP, wot,
279 (void __user *) (regs->iaoq[0] & ~3));
280}
281
282static void handle_break(struct pt_regs *regs)
283{
284 unsigned iir = regs->iir;
285
286 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287 /* check if a BUG() or WARN() trapped here. */
288 enum bug_trap_type tt;
289 tt = report_bug(regs->iaoq[0] & ~3, regs);
290 if (tt == BUG_TRAP_TYPE_WARN) {
291 regs->iaoq[0] += 4;
292 regs->iaoq[1] += 4;
293 return; /* return to next instruction when WARN_ON(). */
294 }
295 die_if_kernel("Unknown kernel breakpoint", regs,
296 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297 }
298
299#ifdef CONFIG_KPROBES
300 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
301 parisc_kprobe_break_handler(regs);
302 return;
303 }
304 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
305 parisc_kprobe_ss_handler(regs);
306 return;
307 }
308#endif
309
310#ifdef CONFIG_KGDB
311 if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
312 iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
313 kgdb_handle_exception(9, SIGTRAP, 0, regs);
314 return;
315 }
316#endif
317
318#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
319 if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
320 die_if_kernel("Spinlock was trashed", regs, 1);
321 }
322#endif
323
324 if (unlikely(iir != GDB_BREAK_INSN))
325 parisc_printk_ratelimited(0, regs,
326 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327 iir & 31, (iir>>13) & ((1<<13)-1),
328 task_pid_nr(current), current->comm);
329
330 /* send standard GDB signal */
331 handle_gdb_break(regs, TRAP_BRKPT);
332}
333
334static void default_trap(int code, struct pt_regs *regs)
335{
336 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
337 show_regs(regs);
338}
339
340static void transfer_pim_to_trap_frame(struct pt_regs *regs)
341{
342 register int i;
343 extern unsigned int hpmc_pim_data[];
344 struct pdc_hpmc_pim_11 *pim_narrow;
345 struct pdc_hpmc_pim_20 *pim_wide;
346
347 if (boot_cpu_data.cpu_type >= pcxu) {
348
349 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
350
351 /*
352 * Note: The following code will probably generate a
353 * bunch of truncation error warnings from the compiler.
354 * Could be handled with an ifdef, but perhaps there
355 * is a better way.
356 */
357
358 regs->gr[0] = pim_wide->cr[22];
359
360 for (i = 1; i < 32; i++)
361 regs->gr[i] = pim_wide->gr[i];
362
363 for (i = 0; i < 32; i++)
364 regs->fr[i] = pim_wide->fr[i];
365
366 for (i = 0; i < 8; i++)
367 regs->sr[i] = pim_wide->sr[i];
368
369 regs->iasq[0] = pim_wide->cr[17];
370 regs->iasq[1] = pim_wide->iasq_back;
371 regs->iaoq[0] = pim_wide->cr[18];
372 regs->iaoq[1] = pim_wide->iaoq_back;
373
374 regs->sar = pim_wide->cr[11];
375 regs->iir = pim_wide->cr[19];
376 regs->isr = pim_wide->cr[20];
377 regs->ior = pim_wide->cr[21];
378 }
379 else {
380 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
381
382 regs->gr[0] = pim_narrow->cr[22];
383
384 for (i = 1; i < 32; i++)
385 regs->gr[i] = pim_narrow->gr[i];
386
387 for (i = 0; i < 32; i++)
388 regs->fr[i] = pim_narrow->fr[i];
389
390 for (i = 0; i < 8; i++)
391 regs->sr[i] = pim_narrow->sr[i];
392
393 regs->iasq[0] = pim_narrow->cr[17];
394 regs->iasq[1] = pim_narrow->iasq_back;
395 regs->iaoq[0] = pim_narrow->cr[18];
396 regs->iaoq[1] = pim_narrow->iaoq_back;
397
398 regs->sar = pim_narrow->cr[11];
399 regs->iir = pim_narrow->cr[19];
400 regs->isr = pim_narrow->cr[20];
401 regs->ior = pim_narrow->cr[21];
402 }
403
404 /*
405 * The following fields only have meaning if we came through
406 * another path. So just zero them here.
407 */
408
409 regs->ksp = 0;
410 regs->kpc = 0;
411 regs->orig_r28 = 0;
412}
413
414
415/*
416 * This routine is called as a last resort when everything else
417 * has gone clearly wrong. We get called for faults in kernel space,
418 * and HPMC's.
419 */
420void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
421{
422 static DEFINE_SPINLOCK(terminate_lock);
423
424 (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
425 bust_spinlocks(1);
426
427 set_eiem(0);
428 local_irq_disable();
429 spin_lock(&terminate_lock);
430
431 /* unlock the pdc lock if necessary */
432 pdc_emergency_unlock();
433
434 /* Not all paths will gutter the processor... */
435 switch(code){
436
437 case 1:
438 transfer_pim_to_trap_frame(regs);
439 break;
440
441 default:
442 break;
443
444 }
445
446 {
447 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
448 struct unwind_frame_info info;
449 unwind_frame_init(&info, current, regs);
450 do_show_stack(&info, KERN_CRIT);
451 }
452
453 printk("\n");
454 pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
455 msg, code, trap_name(code), offset);
456 show_regs(regs);
457
458 spin_unlock(&terminate_lock);
459
460 /* put soft power button back under hardware control;
461 * if the user had pressed it once at any time, the
462 * system will shut down immediately right here. */
463 pdc_soft_power_button(0);
464
465 /* Call kernel panic() so reboot timeouts work properly
466 * FIXME: This function should be on the list of
467 * panic notifiers, and we should call panic
468 * directly from the location that we wish.
469 * e.g. We should not call panic from
470 * parisc_terminate, but rather the other way around.
471 * This hack works, prints the panic message twice,
472 * and it enables reboot timers!
473 */
474 panic(msg);
475}
476
477void notrace handle_interruption(int code, struct pt_regs *regs)
478{
479 unsigned long fault_address = 0;
480 unsigned long fault_space = 0;
481 int si_code;
482
483 if (!irqs_disabled_flags(regs->gr[0]))
484 local_irq_enable();
485
486 /* Security check:
487 * If the priority level is still user, and the
488 * faulting space is not equal to the active space
489 * then the user is attempting something in a space
490 * that does not belong to them. Kill the process.
491 *
492 * This is normally the situation when the user
493 * attempts to jump into the kernel space at the
494 * wrong offset, be it at the gateway page or a
495 * random location.
496 *
497 * We cannot normally signal the process because it
498 * could *be* on the gateway page, and processes
499 * executing on the gateway page can't have signals
500 * delivered.
501 *
502 * We merely readjust the address into the users
503 * space, at a destination address of zero, and
504 * allow processing to continue.
505 */
506 if (((unsigned long)regs->iaoq[0] & 3) &&
507 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
508 /* Kill the user process later */
509 regs->iaoq[0] = 0 | PRIV_USER;
510 regs->iaoq[1] = regs->iaoq[0] + 4;
511 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
512 regs->gr[0] &= ~PSW_B;
513 return;
514 }
515
516#if 0
517 printk(KERN_CRIT "Interruption # %d\n", code);
518#endif
519
520 switch(code) {
521
522 case 1:
523 /* High-priority machine check (HPMC) */
524
525 /* set up a new led state on systems shipped with a LED State panel */
526 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
527
528 parisc_terminate("High Priority Machine Check (HPMC)",
529 regs, code, 0);
530 /* NOT REACHED */
531
532 case 2:
533 /* Power failure interrupt */
534 printk(KERN_CRIT "Power failure interrupt !\n");
535 return;
536
537 case 3:
538 /* Recovery counter trap */
539 regs->gr[0] &= ~PSW_R;
540
541#ifdef CONFIG_KGDB
542 if (kgdb_single_step) {
543 kgdb_handle_exception(0, SIGTRAP, 0, regs);
544 return;
545 }
546#endif
547
548 if (user_space(regs))
549 handle_gdb_break(regs, TRAP_TRACE);
550 /* else this must be the start of a syscall - just let it run */
551 return;
552
553 case 5:
554 /* Low-priority machine check */
555 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
556
557 flush_cache_all();
558 flush_tlb_all();
559 default_trap(code, regs);
560 return;
561
562 case PARISC_ITLB_TRAP:
563 /* Instruction TLB miss fault/Instruction page fault */
564 fault_address = regs->iaoq[0];
565 fault_space = regs->iasq[0];
566 break;
567
568 case 8:
569 /* Illegal instruction trap */
570 die_if_kernel("Illegal instruction", regs, code);
571 si_code = ILL_ILLOPC;
572 goto give_sigill;
573
574 case 9:
575 /* Break instruction trap */
576 handle_break(regs);
577 return;
578
579 case 10:
580 /* Privileged operation trap */
581 die_if_kernel("Privileged operation", regs, code);
582 si_code = ILL_PRVOPC;
583 goto give_sigill;
584
585 case 11:
586 /* Privileged register trap */
587 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
588
589 /* This is a MFCTL cr26/cr27 to gr instruction.
590 * PCXS traps on this, so we need to emulate it.
591 */
592
593 if (regs->iir & 0x00200000)
594 regs->gr[regs->iir & 0x1f] = mfctl(27);
595 else
596 regs->gr[regs->iir & 0x1f] = mfctl(26);
597
598 regs->iaoq[0] = regs->iaoq[1];
599 regs->iaoq[1] += 4;
600 regs->iasq[0] = regs->iasq[1];
601 return;
602 }
603
604 die_if_kernel("Privileged register usage", regs, code);
605 si_code = ILL_PRVREG;
606 give_sigill:
607 force_sig_fault(SIGILL, si_code,
608 (void __user *) regs->iaoq[0]);
609 return;
610
611 case 12:
612 /* Overflow Trap, let the userland signal handler do the cleanup */
613 force_sig_fault(SIGFPE, FPE_INTOVF,
614 (void __user *) regs->iaoq[0]);
615 return;
616
617 case 13:
618 /* Conditional Trap
619 The condition succeeds in an instruction which traps
620 on condition */
621 if(user_mode(regs)){
622 /* Let userspace app figure it out from the insn pointed
623 * to by si_addr.
624 */
625 force_sig_fault(SIGFPE, FPE_CONDTRAP,
626 (void __user *) regs->iaoq[0]);
627 return;
628 }
629 /* The kernel doesn't want to handle condition codes */
630 break;
631
632 case 14:
633 /* Assist Exception Trap, i.e. floating point exception. */
634 die_if_kernel("Floating point exception", regs, 0); /* quiet */
635 __inc_irq_stat(irq_fpassist_count);
636 handle_fpe(regs);
637 return;
638
639 case 15:
640 /* Data TLB miss fault/Data page fault */
641 fallthrough;
642 case 16:
643 /* Non-access instruction TLB miss fault */
644 /* The instruction TLB entry needed for the target address of the FIC
645 is absent, and hardware can't find it, so we get to cleanup */
646 fallthrough;
647 case 17:
648 /* Non-access data TLB miss fault/Non-access data page fault */
649 /* FIXME:
650 Still need to add slow path emulation code here!
651 If the insn used a non-shadow register, then the tlb
652 handlers could not have their side-effect (e.g. probe
653 writing to a target register) emulated since rfir would
654 erase the changes to said register. Instead we have to
655 setup everything, call this function we are in, and emulate
656 by hand. Technically we need to emulate:
657 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
658 */
659 if (code == 17 && handle_nadtlb_fault(regs))
660 return;
661 fault_address = regs->ior;
662 fault_space = regs->isr;
663 break;
664
665 case 18:
666 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
667 /* Check for unaligned access */
668 if (check_unaligned(regs)) {
669 handle_unaligned(regs);
670 return;
671 }
672 fallthrough;
673 case 26:
674 /* PCXL: Data memory access rights trap */
675 fault_address = regs->ior;
676 fault_space = regs->isr;
677 break;
678
679 case 19:
680 /* Data memory break trap */
681 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
682 fallthrough;
683 case 21:
684 /* Page reference trap */
685 handle_gdb_break(regs, TRAP_HWBKPT);
686 return;
687
688 case 25:
689 /* Taken branch trap */
690 regs->gr[0] &= ~PSW_T;
691 if (user_space(regs))
692 handle_gdb_break(regs, TRAP_BRANCH);
693 /* else this must be the start of a syscall - just let it
694 * run.
695 */
696 return;
697
698 case 7:
699 /* Instruction access rights */
700 /* PCXL: Instruction memory protection trap */
701
702 /*
703 * This could be caused by either: 1) a process attempting
704 * to execute within a vma that does not have execute
705 * permission, or 2) an access rights violation caused by a
706 * flush only translation set up by ptep_get_and_clear().
707 * So we check the vma permissions to differentiate the two.
708 * If the vma indicates we have execute permission, then
709 * the cause is the latter one. In this case, we need to
710 * call do_page_fault() to fix the problem.
711 */
712
713 if (user_mode(regs)) {
714 struct vm_area_struct *vma;
715
716 mmap_read_lock(current->mm);
717 vma = find_vma(current->mm,regs->iaoq[0]);
718 if (vma && (regs->iaoq[0] >= vma->vm_start)
719 && (vma->vm_flags & VM_EXEC)) {
720
721 fault_address = regs->iaoq[0];
722 fault_space = regs->iasq[0];
723
724 mmap_read_unlock(current->mm);
725 break; /* call do_page_fault() */
726 }
727 mmap_read_unlock(current->mm);
728 }
729 /* CPU could not fetch instruction, so clear stale IIR value. */
730 regs->iir = 0xbaadf00d;
731 fallthrough;
732 case 27:
733 /* Data memory protection ID trap */
734 if (code == 27 && !user_mode(regs) &&
735 fixup_exception(regs))
736 return;
737
738 die_if_kernel("Protection id trap", regs, code);
739 force_sig_fault(SIGSEGV, SEGV_MAPERR,
740 (code == 7)?
741 ((void __user *) regs->iaoq[0]) :
742 ((void __user *) regs->ior));
743 return;
744
745 case 28:
746 /* Unaligned data reference trap */
747 handle_unaligned(regs);
748 return;
749
750 default:
751 if (user_mode(regs)) {
752 parisc_printk_ratelimited(0, regs, KERN_DEBUG
753 "handle_interruption() pid=%d command='%s'\n",
754 task_pid_nr(current), current->comm);
755 /* SIGBUS, for lack of a better one. */
756 force_sig_fault(SIGBUS, BUS_OBJERR,
757 (void __user *)regs->ior);
758 return;
759 }
760 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
761
762 parisc_terminate("Unexpected interruption", regs, code, 0);
763 /* NOT REACHED */
764 }
765
766 if (user_mode(regs)) {
767 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
768 parisc_printk_ratelimited(0, regs, KERN_DEBUG
769 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
770 code, fault_space,
771 task_pid_nr(current), current->comm);
772 force_sig_fault(SIGSEGV, SEGV_MAPERR,
773 (void __user *)regs->ior);
774 return;
775 }
776 }
777 else {
778
779 /*
780 * The kernel should never fault on its own address space,
781 * unless pagefault_disable() was called before.
782 */
783
784 if (faulthandler_disabled() || fault_space == 0)
785 {
786 /* Clean up and return if in exception table. */
787 if (fixup_exception(regs))
788 return;
789 /* Clean up and return if handled by kfence. */
790 if (kfence_handle_page_fault(fault_address,
791 parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
792 return;
793 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
794 parisc_terminate("Kernel Fault", regs, code, fault_address);
795 }
796 }
797
798 do_page_fault(regs, code, fault_address);
799}
800
801
802static void __init initialize_ivt(const void *iva)
803{
804 extern const u32 os_hpmc[];
805
806 int i;
807 u32 check = 0;
808 u32 *ivap;
809 u32 instr;
810
811 if (strcmp((const char *)iva, "cows can fly"))
812 panic("IVT invalid");
813
814 ivap = (u32 *)iva;
815
816 for (i = 0; i < 8; i++)
817 *ivap++ = 0;
818
819 /*
820 * Use PDC_INSTR firmware function to get instruction that invokes
821 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
822 * the PA 1.1 Firmware Architecture document.
823 */
824 if (pdc_instr(&instr) == PDC_OK)
825 ivap[0] = instr;
826
827 /*
828 * Rules for the checksum of the HPMC handler:
829 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
830 * its own IVA).
831 * 2. The word at IVA + 32 is nonzero.
832 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
833 * Address (IVA + 56) are word-aligned.
834 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
835 * the Length/4 words starting at Address is zero.
836 */
837
838 /* Setup IVA and compute checksum for HPMC handler */
839 ivap[6] = (u32)__pa(os_hpmc);
840
841 for (i=0; i<8; i++)
842 check += ivap[i];
843
844 ivap[5] = -check;
845 pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
846}
847
848
849/* early_trap_init() is called before we set up kernel mappings and
850 * write-protect the kernel */
851void __init early_trap_init(void)
852{
853 extern const void fault_vector_20;
854
855#ifndef CONFIG_64BIT
856 extern const void fault_vector_11;
857 initialize_ivt(&fault_vector_11);
858#endif
859
860 initialize_ivt(&fault_vector_20);
861}
1/*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/timer.h>
19#include <linux/delay.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/spinlock.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/console.h>
27#include <linux/bug.h>
28
29#include <asm/assembly.h>
30#include <asm/system.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/traps.h>
35#include <asm/unaligned.h>
36#include <linux/atomic.h>
37#include <asm/smp.h>
38#include <asm/pdc.h>
39#include <asm/pdc_chassis.h>
40#include <asm/unwind.h>
41#include <asm/tlbflush.h>
42#include <asm/cacheflush.h>
43
44#include "../math-emu/math-emu.h" /* for handle_fpe() */
45
46#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
47 /* dumped to the console via printk) */
48
49#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
50DEFINE_SPINLOCK(pa_dbit_lock);
51#endif
52
53static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
54 struct pt_regs *regs);
55
56static int printbinary(char *buf, unsigned long x, int nbits)
57{
58 unsigned long mask = 1UL << (nbits - 1);
59 while (mask != 0) {
60 *buf++ = (mask & x ? '1' : '0');
61 mask >>= 1;
62 }
63 *buf = '\0';
64
65 return nbits;
66}
67
68#ifdef CONFIG_64BIT
69#define RFMT "%016lx"
70#else
71#define RFMT "%08lx"
72#endif
73#define FFMT "%016llx" /* fpregs are 64-bit always */
74
75#define PRINTREGS(lvl,r,f,fmt,x) \
76 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
77 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
78 (r)[(x)+2], (r)[(x)+3])
79
80static void print_gr(char *level, struct pt_regs *regs)
81{
82 int i;
83 char buf[64];
84
85 printk("%s\n", level);
86 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
87 printbinary(buf, regs->gr[0], 32);
88 printk("%sPSW: %s %s\n", level, buf, print_tainted());
89
90 for (i = 0; i < 32; i += 4)
91 PRINTREGS(level, regs->gr, "r", RFMT, i);
92}
93
94static void print_fr(char *level, struct pt_regs *regs)
95{
96 int i;
97 char buf[64];
98 struct { u32 sw[2]; } s;
99
100 /* FR are 64bit everywhere. Need to use asm to get the content
101 * of fpsr/fper1, and we assume that we won't have a FP Identify
102 * in our way, otherwise we're screwed.
103 * The fldd is used to restore the T-bit if there was one, as the
104 * store clears it anyway.
105 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
106 asm volatile ("fstd %%fr0,0(%1) \n\t"
107 "fldd 0(%1),%%fr0 \n\t"
108 : "=m" (s) : "r" (&s) : "r0");
109
110 printk("%s\n", level);
111 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
112 printbinary(buf, s.sw[0], 32);
113 printk("%sFPSR: %s\n", level, buf);
114 printk("%sFPER1: %08x\n", level, s.sw[1]);
115
116 /* here we'll print fr0 again, tho it'll be meaningless */
117 for (i = 0; i < 32; i += 4)
118 PRINTREGS(level, regs->fr, "fr", FFMT, i);
119}
120
121void show_regs(struct pt_regs *regs)
122{
123 int i, user;
124 char *level;
125 unsigned long cr30, cr31;
126
127 user = user_mode(regs);
128 level = user ? KERN_DEBUG : KERN_CRIT;
129
130 print_gr(level, regs);
131
132 for (i = 0; i < 8; i += 4)
133 PRINTREGS(level, regs->sr, "sr", RFMT, i);
134
135 if (user)
136 print_fr(level, regs);
137
138 cr30 = mfctl(30);
139 cr31 = mfctl(31);
140 printk("%s\n", level);
141 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
144 level, regs->iir, regs->isr, regs->ior);
145 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
146 level, current_thread_info()->cpu, cr30, cr31);
147 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148
149 if (user) {
150 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 } else {
154 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157
158 parisc_show_stack(current, NULL, regs);
159 }
160}
161
162
163void dump_stack(void)
164{
165 show_stack(NULL, NULL);
166}
167
168EXPORT_SYMBOL(dump_stack);
169
170static void do_show_stack(struct unwind_frame_info *info)
171{
172 int i = 1;
173
174 printk(KERN_CRIT "Backtrace:\n");
175 while (i <= 16) {
176 if (unwind_once(info) < 0 || info->ip == 0)
177 break;
178
179 if (__kernel_text_address(info->ip)) {
180 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181 info->ip, (void *) info->ip);
182 i++;
183 }
184 }
185 printk(KERN_CRIT "\n");
186}
187
188static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189 struct pt_regs *regs)
190{
191 struct unwind_frame_info info;
192 struct task_struct *t;
193
194 t = task ? task : current;
195 if (regs) {
196 unwind_frame_init(&info, t, regs);
197 goto show_stack;
198 }
199
200 if (t == current) {
201 unsigned long sp;
202
203HERE:
204 asm volatile ("copy %%r30, %0" : "=r"(sp));
205 {
206 struct pt_regs r;
207
208 memset(&r, 0, sizeof(struct pt_regs));
209 r.iaoq[0] = (unsigned long)&&HERE;
210 r.gr[2] = (unsigned long)__builtin_return_address(0);
211 r.gr[30] = sp;
212
213 unwind_frame_init(&info, current, &r);
214 }
215 } else {
216 unwind_frame_init_from_blocked_task(&info, t);
217 }
218
219show_stack:
220 do_show_stack(&info);
221}
222
223void show_stack(struct task_struct *t, unsigned long *sp)
224{
225 return parisc_show_stack(t, sp, NULL);
226}
227
228int is_valid_bugaddr(unsigned long iaoq)
229{
230 return 1;
231}
232
233void die_if_kernel(char *str, struct pt_regs *regs, long err)
234{
235 if (user_mode(regs)) {
236 if (err == 0)
237 return; /* STFU */
238
239 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
240 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241#ifdef PRINT_USER_FAULTS
242 /* XXX for debugging only */
243 show_regs(regs);
244#endif
245 return;
246 }
247
248 oops_in_progress = 1;
249
250 oops_enter();
251
252 /* Amuse the user in a SPARC fashion */
253 if (err) printk(KERN_CRIT
254 " _______________________________ \n"
255 " < Your System ate a SPARC! Gah! >\n"
256 " ------------------------------- \n"
257 " \\ ^__^\n"
258 " (__)\\ )\\/\\\n"
259 " U ||----w |\n"
260 " || ||\n");
261
262 /* unlock the pdc lock if necessary */
263 pdc_emergency_unlock();
264
265 /* maybe the kernel hasn't booted very far yet and hasn't been able
266 * to initialize the serial or STI console. In that case we should
267 * re-enable the pdc console, so that the user will be able to
268 * identify the problem. */
269 if (!console_drivers)
270 pdc_console_restart();
271
272 if (err)
273 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
274 current->comm, task_pid_nr(current), str, err);
275
276 /* Wot's wrong wif bein' racy? */
277 if (current->thread.flags & PARISC_KERNEL_DEATH) {
278 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
279 local_irq_enable();
280 while (1);
281 }
282 current->thread.flags |= PARISC_KERNEL_DEATH;
283
284 show_regs(regs);
285 dump_stack();
286 add_taint(TAINT_DIE);
287
288 if (in_interrupt())
289 panic("Fatal exception in interrupt");
290
291 if (panic_on_oops) {
292 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
293 ssleep(5);
294 panic("Fatal exception");
295 }
296
297 oops_exit();
298 do_exit(SIGSEGV);
299}
300
301int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
302{
303 return syscall(regs);
304}
305
306/* gdb uses break 4,8 */
307#define GDB_BREAK_INSN 0x10004
308static void handle_gdb_break(struct pt_regs *regs, int wot)
309{
310 struct siginfo si;
311
312 si.si_signo = SIGTRAP;
313 si.si_errno = 0;
314 si.si_code = wot;
315 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
316 force_sig_info(SIGTRAP, &si, current);
317}
318
319static void handle_break(struct pt_regs *regs)
320{
321 unsigned iir = regs->iir;
322
323 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
324 /* check if a BUG() or WARN() trapped here. */
325 enum bug_trap_type tt;
326 tt = report_bug(regs->iaoq[0] & ~3, regs);
327 if (tt == BUG_TRAP_TYPE_WARN) {
328 regs->iaoq[0] += 4;
329 regs->iaoq[1] += 4;
330 return; /* return to next instruction when WARN_ON(). */
331 }
332 die_if_kernel("Unknown kernel breakpoint", regs,
333 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
334 }
335
336#ifdef PRINT_USER_FAULTS
337 if (unlikely(iir != GDB_BREAK_INSN)) {
338 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
339 iir & 31, (iir>>13) & ((1<<13)-1),
340 task_pid_nr(current), current->comm);
341 show_regs(regs);
342 }
343#endif
344
345 /* send standard GDB signal */
346 handle_gdb_break(regs, TRAP_BRKPT);
347}
348
349static void default_trap(int code, struct pt_regs *regs)
350{
351 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
352 show_regs(regs);
353}
354
355void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
356
357
358void transfer_pim_to_trap_frame(struct pt_regs *regs)
359{
360 register int i;
361 extern unsigned int hpmc_pim_data[];
362 struct pdc_hpmc_pim_11 *pim_narrow;
363 struct pdc_hpmc_pim_20 *pim_wide;
364
365 if (boot_cpu_data.cpu_type >= pcxu) {
366
367 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
368
369 /*
370 * Note: The following code will probably generate a
371 * bunch of truncation error warnings from the compiler.
372 * Could be handled with an ifdef, but perhaps there
373 * is a better way.
374 */
375
376 regs->gr[0] = pim_wide->cr[22];
377
378 for (i = 1; i < 32; i++)
379 regs->gr[i] = pim_wide->gr[i];
380
381 for (i = 0; i < 32; i++)
382 regs->fr[i] = pim_wide->fr[i];
383
384 for (i = 0; i < 8; i++)
385 regs->sr[i] = pim_wide->sr[i];
386
387 regs->iasq[0] = pim_wide->cr[17];
388 regs->iasq[1] = pim_wide->iasq_back;
389 regs->iaoq[0] = pim_wide->cr[18];
390 regs->iaoq[1] = pim_wide->iaoq_back;
391
392 regs->sar = pim_wide->cr[11];
393 regs->iir = pim_wide->cr[19];
394 regs->isr = pim_wide->cr[20];
395 regs->ior = pim_wide->cr[21];
396 }
397 else {
398 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
399
400 regs->gr[0] = pim_narrow->cr[22];
401
402 for (i = 1; i < 32; i++)
403 regs->gr[i] = pim_narrow->gr[i];
404
405 for (i = 0; i < 32; i++)
406 regs->fr[i] = pim_narrow->fr[i];
407
408 for (i = 0; i < 8; i++)
409 regs->sr[i] = pim_narrow->sr[i];
410
411 regs->iasq[0] = pim_narrow->cr[17];
412 regs->iasq[1] = pim_narrow->iasq_back;
413 regs->iaoq[0] = pim_narrow->cr[18];
414 regs->iaoq[1] = pim_narrow->iaoq_back;
415
416 regs->sar = pim_narrow->cr[11];
417 regs->iir = pim_narrow->cr[19];
418 regs->isr = pim_narrow->cr[20];
419 regs->ior = pim_narrow->cr[21];
420 }
421
422 /*
423 * The following fields only have meaning if we came through
424 * another path. So just zero them here.
425 */
426
427 regs->ksp = 0;
428 regs->kpc = 0;
429 regs->orig_r28 = 0;
430}
431
432
433/*
434 * This routine is called as a last resort when everything else
435 * has gone clearly wrong. We get called for faults in kernel space,
436 * and HPMC's.
437 */
438void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
439{
440 static DEFINE_SPINLOCK(terminate_lock);
441
442 oops_in_progress = 1;
443
444 set_eiem(0);
445 local_irq_disable();
446 spin_lock(&terminate_lock);
447
448 /* unlock the pdc lock if necessary */
449 pdc_emergency_unlock();
450
451 /* restart pdc console if necessary */
452 if (!console_drivers)
453 pdc_console_restart();
454
455 /* Not all paths will gutter the processor... */
456 switch(code){
457
458 case 1:
459 transfer_pim_to_trap_frame(regs);
460 break;
461
462 default:
463 /* Fall through */
464 break;
465
466 }
467
468 {
469 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
470 struct unwind_frame_info info;
471 unwind_frame_init(&info, current, regs);
472 do_show_stack(&info);
473 }
474
475 printk("\n");
476 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
477 msg, code, regs, offset);
478 show_regs(regs);
479
480 spin_unlock(&terminate_lock);
481
482 /* put soft power button back under hardware control;
483 * if the user had pressed it once at any time, the
484 * system will shut down immediately right here. */
485 pdc_soft_power_button(0);
486
487 /* Call kernel panic() so reboot timeouts work properly
488 * FIXME: This function should be on the list of
489 * panic notifiers, and we should call panic
490 * directly from the location that we wish.
491 * e.g. We should not call panic from
492 * parisc_terminate, but rather the oter way around.
493 * This hack works, prints the panic message twice,
494 * and it enables reboot timers!
495 */
496 panic(msg);
497}
498
499void notrace handle_interruption(int code, struct pt_regs *regs)
500{
501 unsigned long fault_address = 0;
502 unsigned long fault_space = 0;
503 struct siginfo si;
504
505 if (code == 1)
506 pdc_console_restart(); /* switch back to pdc if HPMC */
507 else
508 local_irq_enable();
509
510 /* Security check:
511 * If the priority level is still user, and the
512 * faulting space is not equal to the active space
513 * then the user is attempting something in a space
514 * that does not belong to them. Kill the process.
515 *
516 * This is normally the situation when the user
517 * attempts to jump into the kernel space at the
518 * wrong offset, be it at the gateway page or a
519 * random location.
520 *
521 * We cannot normally signal the process because it
522 * could *be* on the gateway page, and processes
523 * executing on the gateway page can't have signals
524 * delivered.
525 *
526 * We merely readjust the address into the users
527 * space, at a destination address of zero, and
528 * allow processing to continue.
529 */
530 if (((unsigned long)regs->iaoq[0] & 3) &&
531 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
532 /* Kill the user process later */
533 regs->iaoq[0] = 0 | 3;
534 regs->iaoq[1] = regs->iaoq[0] + 4;
535 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
536 regs->gr[0] &= ~PSW_B;
537 return;
538 }
539
540#if 0
541 printk(KERN_CRIT "Interruption # %d\n", code);
542#endif
543
544 switch(code) {
545
546 case 1:
547 /* High-priority machine check (HPMC) */
548
549 /* set up a new led state on systems shipped with a LED State panel */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
551
552 parisc_terminate("High Priority Machine Check (HPMC)",
553 regs, code, 0);
554 /* NOT REACHED */
555
556 case 2:
557 /* Power failure interrupt */
558 printk(KERN_CRIT "Power failure interrupt !\n");
559 return;
560
561 case 3:
562 /* Recovery counter trap */
563 regs->gr[0] &= ~PSW_R;
564 if (user_space(regs))
565 handle_gdb_break(regs, TRAP_TRACE);
566 /* else this must be the start of a syscall - just let it run */
567 return;
568
569 case 5:
570 /* Low-priority machine check */
571 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
572
573 flush_cache_all();
574 flush_tlb_all();
575 cpu_lpmc(5, regs);
576 return;
577
578 case 6:
579 /* Instruction TLB miss fault/Instruction page fault */
580 fault_address = regs->iaoq[0];
581 fault_space = regs->iasq[0];
582 break;
583
584 case 8:
585 /* Illegal instruction trap */
586 die_if_kernel("Illegal instruction", regs, code);
587 si.si_code = ILL_ILLOPC;
588 goto give_sigill;
589
590 case 9:
591 /* Break instruction trap */
592 handle_break(regs);
593 return;
594
595 case 10:
596 /* Privileged operation trap */
597 die_if_kernel("Privileged operation", regs, code);
598 si.si_code = ILL_PRVOPC;
599 goto give_sigill;
600
601 case 11:
602 /* Privileged register trap */
603 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
604
605 /* This is a MFCTL cr26/cr27 to gr instruction.
606 * PCXS traps on this, so we need to emulate it.
607 */
608
609 if (regs->iir & 0x00200000)
610 regs->gr[regs->iir & 0x1f] = mfctl(27);
611 else
612 regs->gr[regs->iir & 0x1f] = mfctl(26);
613
614 regs->iaoq[0] = regs->iaoq[1];
615 regs->iaoq[1] += 4;
616 regs->iasq[0] = regs->iasq[1];
617 return;
618 }
619
620 die_if_kernel("Privileged register usage", regs, code);
621 si.si_code = ILL_PRVREG;
622 give_sigill:
623 si.si_signo = SIGILL;
624 si.si_errno = 0;
625 si.si_addr = (void __user *) regs->iaoq[0];
626 force_sig_info(SIGILL, &si, current);
627 return;
628
629 case 12:
630 /* Overflow Trap, let the userland signal handler do the cleanup */
631 si.si_signo = SIGFPE;
632 si.si_code = FPE_INTOVF;
633 si.si_addr = (void __user *) regs->iaoq[0];
634 force_sig_info(SIGFPE, &si, current);
635 return;
636
637 case 13:
638 /* Conditional Trap
639 The condition succeeds in an instruction which traps
640 on condition */
641 if(user_mode(regs)){
642 si.si_signo = SIGFPE;
643 /* Set to zero, and let the userspace app figure it out from
644 the insn pointed to by si_addr */
645 si.si_code = 0;
646 si.si_addr = (void __user *) regs->iaoq[0];
647 force_sig_info(SIGFPE, &si, current);
648 return;
649 }
650 /* The kernel doesn't want to handle condition codes */
651 break;
652
653 case 14:
654 /* Assist Exception Trap, i.e. floating point exception. */
655 die_if_kernel("Floating point exception", regs, 0); /* quiet */
656 handle_fpe(regs);
657 return;
658
659 case 15:
660 /* Data TLB miss fault/Data page fault */
661 /* Fall through */
662 case 16:
663 /* Non-access instruction TLB miss fault */
664 /* The instruction TLB entry needed for the target address of the FIC
665 is absent, and hardware can't find it, so we get to cleanup */
666 /* Fall through */
667 case 17:
668 /* Non-access data TLB miss fault/Non-access data page fault */
669 /* FIXME:
670 Still need to add slow path emulation code here!
671 If the insn used a non-shadow register, then the tlb
672 handlers could not have their side-effect (e.g. probe
673 writing to a target register) emulated since rfir would
674 erase the changes to said register. Instead we have to
675 setup everything, call this function we are in, and emulate
676 by hand. Technically we need to emulate:
677 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
678 */
679 fault_address = regs->ior;
680 fault_space = regs->isr;
681 break;
682
683 case 18:
684 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
685 /* Check for unaligned access */
686 if (check_unaligned(regs)) {
687 handle_unaligned(regs);
688 return;
689 }
690 /* Fall Through */
691 case 26:
692 /* PCXL: Data memory access rights trap */
693 fault_address = regs->ior;
694 fault_space = regs->isr;
695 break;
696
697 case 19:
698 /* Data memory break trap */
699 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
700 /* fall thru */
701 case 21:
702 /* Page reference trap */
703 handle_gdb_break(regs, TRAP_HWBKPT);
704 return;
705
706 case 25:
707 /* Taken branch trap */
708 regs->gr[0] &= ~PSW_T;
709 if (user_space(regs))
710 handle_gdb_break(regs, TRAP_BRANCH);
711 /* else this must be the start of a syscall - just let it
712 * run.
713 */
714 return;
715
716 case 7:
717 /* Instruction access rights */
718 /* PCXL: Instruction memory protection trap */
719
720 /*
721 * This could be caused by either: 1) a process attempting
722 * to execute within a vma that does not have execute
723 * permission, or 2) an access rights violation caused by a
724 * flush only translation set up by ptep_get_and_clear().
725 * So we check the vma permissions to differentiate the two.
726 * If the vma indicates we have execute permission, then
727 * the cause is the latter one. In this case, we need to
728 * call do_page_fault() to fix the problem.
729 */
730
731 if (user_mode(regs)) {
732 struct vm_area_struct *vma;
733
734 down_read(¤t->mm->mmap_sem);
735 vma = find_vma(current->mm,regs->iaoq[0]);
736 if (vma && (regs->iaoq[0] >= vma->vm_start)
737 && (vma->vm_flags & VM_EXEC)) {
738
739 fault_address = regs->iaoq[0];
740 fault_space = regs->iasq[0];
741
742 up_read(¤t->mm->mmap_sem);
743 break; /* call do_page_fault() */
744 }
745 up_read(¤t->mm->mmap_sem);
746 }
747 /* Fall Through */
748 case 27:
749 /* Data memory protection ID trap */
750 if (code == 27 && !user_mode(regs) &&
751 fixup_exception(regs))
752 return;
753
754 die_if_kernel("Protection id trap", regs, code);
755 si.si_code = SEGV_MAPERR;
756 si.si_signo = SIGSEGV;
757 si.si_errno = 0;
758 if (code == 7)
759 si.si_addr = (void __user *) regs->iaoq[0];
760 else
761 si.si_addr = (void __user *) regs->ior;
762 force_sig_info(SIGSEGV, &si, current);
763 return;
764
765 case 28:
766 /* Unaligned data reference trap */
767 handle_unaligned(regs);
768 return;
769
770 default:
771 if (user_mode(regs)) {
772#ifdef PRINT_USER_FAULTS
773 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
774 task_pid_nr(current), current->comm);
775 show_regs(regs);
776#endif
777 /* SIGBUS, for lack of a better one. */
778 si.si_signo = SIGBUS;
779 si.si_code = BUS_OBJERR;
780 si.si_errno = 0;
781 si.si_addr = (void __user *) regs->ior;
782 force_sig_info(SIGBUS, &si, current);
783 return;
784 }
785 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
786
787 parisc_terminate("Unexpected interruption", regs, code, 0);
788 /* NOT REACHED */
789 }
790
791 if (user_mode(regs)) {
792 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
793#ifdef PRINT_USER_FAULTS
794 if (fault_space == 0)
795 printk(KERN_DEBUG "User Fault on Kernel Space ");
796 else
797 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
798 code);
799 printk(KERN_CONT "pid=%d command='%s'\n",
800 task_pid_nr(current), current->comm);
801 show_regs(regs);
802#endif
803 si.si_signo = SIGSEGV;
804 si.si_errno = 0;
805 si.si_code = SEGV_MAPERR;
806 si.si_addr = (void __user *) regs->ior;
807 force_sig_info(SIGSEGV, &si, current);
808 return;
809 }
810 }
811 else {
812
813 /*
814 * The kernel should never fault on its own address space.
815 */
816
817 if (fault_space == 0)
818 {
819 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
820 parisc_terminate("Kernel Fault", regs, code, fault_address);
821
822 }
823 }
824
825 do_page_fault(regs, code, fault_address);
826}
827
828
829int __init check_ivt(void *iva)
830{
831 extern u32 os_hpmc_size;
832 extern const u32 os_hpmc[];
833
834 int i;
835 u32 check = 0;
836 u32 *ivap;
837 u32 *hpmcp;
838 u32 length;
839
840 if (strcmp((char *)iva, "cows can fly"))
841 return -1;
842
843 ivap = (u32 *)iva;
844
845 for (i = 0; i < 8; i++)
846 *ivap++ = 0;
847
848 /* Compute Checksum for HPMC handler */
849 length = os_hpmc_size;
850 ivap[7] = length;
851
852 hpmcp = (u32 *)os_hpmc;
853
854 for (i=0; i<length/4; i++)
855 check += *hpmcp++;
856
857 for (i=0; i<8; i++)
858 check += ivap[i];
859
860 ivap[5] = -check;
861
862 return 0;
863}
864
865#ifndef CONFIG_64BIT
866extern const void fault_vector_11;
867#endif
868extern const void fault_vector_20;
869
870void __init trap_init(void)
871{
872 void *iva;
873
874 if (boot_cpu_data.cpu_type >= pcxu)
875 iva = (void *) &fault_vector_20;
876 else
877#ifdef CONFIG_64BIT
878 panic("Can't boot 64-bit OS on PA1.1 processor!");
879#else
880 iva = (void *) &fault_vector_11;
881#endif
882
883 if (check_ivt(iva))
884 panic("IVT invalid");
885}