Loading...
1/*
2 * linux/arch/parisc/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */
7
8/*
9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/timer.h>
19#include <linux/delay.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/smp.h>
23#include <linux/spinlock.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/console.h>
27#include <linux/bug.h>
28#include <linux/ratelimit.h>
29#include <linux/uaccess.h>
30
31#include <asm/assembly.h>
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/traps.h>
35#include <asm/unaligned.h>
36#include <linux/atomic.h>
37#include <asm/smp.h>
38#include <asm/pdc.h>
39#include <asm/pdc_chassis.h>
40#include <asm/unwind.h>
41#include <asm/tlbflush.h>
42#include <asm/cacheflush.h>
43
44#include "../math-emu/math-emu.h" /* for handle_fpe() */
45
46static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
47 struct pt_regs *regs);
48
49static int printbinary(char *buf, unsigned long x, int nbits)
50{
51 unsigned long mask = 1UL << (nbits - 1);
52 while (mask != 0) {
53 *buf++ = (mask & x ? '1' : '0');
54 mask >>= 1;
55 }
56 *buf = '\0';
57
58 return nbits;
59}
60
61#ifdef CONFIG_64BIT
62#define RFMT "%016lx"
63#else
64#define RFMT "%08lx"
65#endif
66#define FFMT "%016llx" /* fpregs are 64-bit always */
67
68#define PRINTREGS(lvl,r,f,fmt,x) \
69 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
70 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
71 (r)[(x)+2], (r)[(x)+3])
72
73static void print_gr(char *level, struct pt_regs *regs)
74{
75 int i;
76 char buf[64];
77
78 printk("%s\n", level);
79 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
80 printbinary(buf, regs->gr[0], 32);
81 printk("%sPSW: %s %s\n", level, buf, print_tainted());
82
83 for (i = 0; i < 32; i += 4)
84 PRINTREGS(level, regs->gr, "r", RFMT, i);
85}
86
87static void print_fr(char *level, struct pt_regs *regs)
88{
89 int i;
90 char buf[64];
91 struct { u32 sw[2]; } s;
92
93 /* FR are 64bit everywhere. Need to use asm to get the content
94 * of fpsr/fper1, and we assume that we won't have a FP Identify
95 * in our way, otherwise we're screwed.
96 * The fldd is used to restore the T-bit if there was one, as the
97 * store clears it anyway.
98 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
99 asm volatile ("fstd %%fr0,0(%1) \n\t"
100 "fldd 0(%1),%%fr0 \n\t"
101 : "=m" (s) : "r" (&s) : "r0");
102
103 printk("%s\n", level);
104 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
105 printbinary(buf, s.sw[0], 32);
106 printk("%sFPSR: %s\n", level, buf);
107 printk("%sFPER1: %08x\n", level, s.sw[1]);
108
109 /* here we'll print fr0 again, tho it'll be meaningless */
110 for (i = 0; i < 32; i += 4)
111 PRINTREGS(level, regs->fr, "fr", FFMT, i);
112}
113
114void show_regs(struct pt_regs *regs)
115{
116 int i, user;
117 char *level;
118 unsigned long cr30, cr31;
119
120 user = user_mode(regs);
121 level = user ? KERN_DEBUG : KERN_CRIT;
122
123 show_regs_print_info(level);
124
125 print_gr(level, regs);
126
127 for (i = 0; i < 8; i += 4)
128 PRINTREGS(level, regs->sr, "sr", RFMT, i);
129
130 if (user)
131 print_fr(level, regs);
132
133 cr30 = mfctl(30);
134 cr31 = mfctl(31);
135 printk("%s\n", level);
136 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
137 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
138 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
139 level, regs->iir, regs->isr, regs->ior);
140 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
141 level, current_thread_info()->cpu, cr30, cr31);
142 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
143
144 if (user) {
145 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
146 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
147 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
148 } else {
149 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
150 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
151 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
152
153 parisc_show_stack(current, NULL, regs);
154 }
155}
156
157static DEFINE_RATELIMIT_STATE(_hppa_rs,
158 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
159
160#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
161 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
162 printk(fmt, ##__VA_ARGS__); \
163 show_regs(regs); \
164 } \
165}
166
167
168static void do_show_stack(struct unwind_frame_info *info)
169{
170 int i = 1;
171
172 printk(KERN_CRIT "Backtrace:\n");
173 while (i <= 16) {
174 if (unwind_once(info) < 0 || info->ip == 0)
175 break;
176
177 if (__kernel_text_address(info->ip)) {
178 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
179 info->ip, (void *) info->ip);
180 i++;
181 }
182 }
183 printk(KERN_CRIT "\n");
184}
185
186static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
187 struct pt_regs *regs)
188{
189 struct unwind_frame_info info;
190 struct task_struct *t;
191
192 t = task ? task : current;
193 if (regs) {
194 unwind_frame_init(&info, t, regs);
195 goto show_stack;
196 }
197
198 if (t == current) {
199 unsigned long sp;
200
201HERE:
202 asm volatile ("copy %%r30, %0" : "=r"(sp));
203 {
204 struct pt_regs r;
205
206 memset(&r, 0, sizeof(struct pt_regs));
207 r.iaoq[0] = (unsigned long)&&HERE;
208 r.gr[2] = (unsigned long)__builtin_return_address(0);
209 r.gr[30] = sp;
210
211 unwind_frame_init(&info, current, &r);
212 }
213 } else {
214 unwind_frame_init_from_blocked_task(&info, t);
215 }
216
217show_stack:
218 do_show_stack(&info);
219}
220
221void show_stack(struct task_struct *t, unsigned long *sp)
222{
223 return parisc_show_stack(t, sp, NULL);
224}
225
226int is_valid_bugaddr(unsigned long iaoq)
227{
228 return 1;
229}
230
231void die_if_kernel(char *str, struct pt_regs *regs, long err)
232{
233 if (user_mode(regs)) {
234 if (err == 0)
235 return; /* STFU */
236
237 parisc_printk_ratelimited(1, regs,
238 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
239 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
240
241 return;
242 }
243
244 oops_in_progress = 1;
245
246 oops_enter();
247
248 /* Amuse the user in a SPARC fashion */
249 if (err) printk(KERN_CRIT
250 " _______________________________ \n"
251 " < Your System ate a SPARC! Gah! >\n"
252 " ------------------------------- \n"
253 " \\ ^__^\n"
254 " (__)\\ )\\/\\\n"
255 " U ||----w |\n"
256 " || ||\n");
257
258 /* unlock the pdc lock if necessary */
259 pdc_emergency_unlock();
260
261 /* maybe the kernel hasn't booted very far yet and hasn't been able
262 * to initialize the serial or STI console. In that case we should
263 * re-enable the pdc console, so that the user will be able to
264 * identify the problem. */
265 if (!console_drivers)
266 pdc_console_restart();
267
268 if (err)
269 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
270 current->comm, task_pid_nr(current), str, err);
271
272 /* Wot's wrong wif bein' racy? */
273 if (current->thread.flags & PARISC_KERNEL_DEATH) {
274 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
275 local_irq_enable();
276 while (1);
277 }
278 current->thread.flags |= PARISC_KERNEL_DEATH;
279
280 show_regs(regs);
281 dump_stack();
282 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
283
284 if (in_interrupt())
285 panic("Fatal exception in interrupt");
286
287 if (panic_on_oops)
288 panic("Fatal exception");
289
290 oops_exit();
291 do_exit(SIGSEGV);
292}
293
294/* gdb uses break 4,8 */
295#define GDB_BREAK_INSN 0x10004
296static void handle_gdb_break(struct pt_regs *regs, int wot)
297{
298 struct siginfo si;
299
300 si.si_signo = SIGTRAP;
301 si.si_errno = 0;
302 si.si_code = wot;
303 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
304 force_sig_info(SIGTRAP, &si, current);
305}
306
307static void handle_break(struct pt_regs *regs)
308{
309 unsigned iir = regs->iir;
310
311 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
312 /* check if a BUG() or WARN() trapped here. */
313 enum bug_trap_type tt;
314 tt = report_bug(regs->iaoq[0] & ~3, regs);
315 if (tt == BUG_TRAP_TYPE_WARN) {
316 regs->iaoq[0] += 4;
317 regs->iaoq[1] += 4;
318 return; /* return to next instruction when WARN_ON(). */
319 }
320 die_if_kernel("Unknown kernel breakpoint", regs,
321 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
322 }
323
324 if (unlikely(iir != GDB_BREAK_INSN))
325 parisc_printk_ratelimited(0, regs,
326 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327 iir & 31, (iir>>13) & ((1<<13)-1),
328 task_pid_nr(current), current->comm);
329
330 /* send standard GDB signal */
331 handle_gdb_break(regs, TRAP_BRKPT);
332}
333
334static void default_trap(int code, struct pt_regs *regs)
335{
336 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
337 show_regs(regs);
338}
339
340void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
341
342
343void transfer_pim_to_trap_frame(struct pt_regs *regs)
344{
345 register int i;
346 extern unsigned int hpmc_pim_data[];
347 struct pdc_hpmc_pim_11 *pim_narrow;
348 struct pdc_hpmc_pim_20 *pim_wide;
349
350 if (boot_cpu_data.cpu_type >= pcxu) {
351
352 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
353
354 /*
355 * Note: The following code will probably generate a
356 * bunch of truncation error warnings from the compiler.
357 * Could be handled with an ifdef, but perhaps there
358 * is a better way.
359 */
360
361 regs->gr[0] = pim_wide->cr[22];
362
363 for (i = 1; i < 32; i++)
364 regs->gr[i] = pim_wide->gr[i];
365
366 for (i = 0; i < 32; i++)
367 regs->fr[i] = pim_wide->fr[i];
368
369 for (i = 0; i < 8; i++)
370 regs->sr[i] = pim_wide->sr[i];
371
372 regs->iasq[0] = pim_wide->cr[17];
373 regs->iasq[1] = pim_wide->iasq_back;
374 regs->iaoq[0] = pim_wide->cr[18];
375 regs->iaoq[1] = pim_wide->iaoq_back;
376
377 regs->sar = pim_wide->cr[11];
378 regs->iir = pim_wide->cr[19];
379 regs->isr = pim_wide->cr[20];
380 regs->ior = pim_wide->cr[21];
381 }
382 else {
383 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
384
385 regs->gr[0] = pim_narrow->cr[22];
386
387 for (i = 1; i < 32; i++)
388 regs->gr[i] = pim_narrow->gr[i];
389
390 for (i = 0; i < 32; i++)
391 regs->fr[i] = pim_narrow->fr[i];
392
393 for (i = 0; i < 8; i++)
394 regs->sr[i] = pim_narrow->sr[i];
395
396 regs->iasq[0] = pim_narrow->cr[17];
397 regs->iasq[1] = pim_narrow->iasq_back;
398 regs->iaoq[0] = pim_narrow->cr[18];
399 regs->iaoq[1] = pim_narrow->iaoq_back;
400
401 regs->sar = pim_narrow->cr[11];
402 regs->iir = pim_narrow->cr[19];
403 regs->isr = pim_narrow->cr[20];
404 regs->ior = pim_narrow->cr[21];
405 }
406
407 /*
408 * The following fields only have meaning if we came through
409 * another path. So just zero them here.
410 */
411
412 regs->ksp = 0;
413 regs->kpc = 0;
414 regs->orig_r28 = 0;
415}
416
417
418/*
419 * This routine is called as a last resort when everything else
420 * has gone clearly wrong. We get called for faults in kernel space,
421 * and HPMC's.
422 */
423void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
424{
425 static DEFINE_SPINLOCK(terminate_lock);
426
427 oops_in_progress = 1;
428
429 set_eiem(0);
430 local_irq_disable();
431 spin_lock(&terminate_lock);
432
433 /* unlock the pdc lock if necessary */
434 pdc_emergency_unlock();
435
436 /* restart pdc console if necessary */
437 if (!console_drivers)
438 pdc_console_restart();
439
440 /* Not all paths will gutter the processor... */
441 switch(code){
442
443 case 1:
444 transfer_pim_to_trap_frame(regs);
445 break;
446
447 default:
448 /* Fall through */
449 break;
450
451 }
452
453 {
454 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
455 struct unwind_frame_info info;
456 unwind_frame_init(&info, current, regs);
457 do_show_stack(&info);
458 }
459
460 printk("\n");
461 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
462 msg, code, regs, offset);
463 show_regs(regs);
464
465 spin_unlock(&terminate_lock);
466
467 /* put soft power button back under hardware control;
468 * if the user had pressed it once at any time, the
469 * system will shut down immediately right here. */
470 pdc_soft_power_button(0);
471
472 /* Call kernel panic() so reboot timeouts work properly
473 * FIXME: This function should be on the list of
474 * panic notifiers, and we should call panic
475 * directly from the location that we wish.
476 * e.g. We should not call panic from
477 * parisc_terminate, but rather the oter way around.
478 * This hack works, prints the panic message twice,
479 * and it enables reboot timers!
480 */
481 panic(msg);
482}
483
484void notrace handle_interruption(int code, struct pt_regs *regs)
485{
486 unsigned long fault_address = 0;
487 unsigned long fault_space = 0;
488 struct siginfo si;
489
490 if (code == 1)
491 pdc_console_restart(); /* switch back to pdc if HPMC */
492 else
493 local_irq_enable();
494
495 /* Security check:
496 * If the priority level is still user, and the
497 * faulting space is not equal to the active space
498 * then the user is attempting something in a space
499 * that does not belong to them. Kill the process.
500 *
501 * This is normally the situation when the user
502 * attempts to jump into the kernel space at the
503 * wrong offset, be it at the gateway page or a
504 * random location.
505 *
506 * We cannot normally signal the process because it
507 * could *be* on the gateway page, and processes
508 * executing on the gateway page can't have signals
509 * delivered.
510 *
511 * We merely readjust the address into the users
512 * space, at a destination address of zero, and
513 * allow processing to continue.
514 */
515 if (((unsigned long)regs->iaoq[0] & 3) &&
516 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
517 /* Kill the user process later */
518 regs->iaoq[0] = 0 | 3;
519 regs->iaoq[1] = regs->iaoq[0] + 4;
520 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
521 regs->gr[0] &= ~PSW_B;
522 return;
523 }
524
525#if 0
526 printk(KERN_CRIT "Interruption # %d\n", code);
527#endif
528
529 switch(code) {
530
531 case 1:
532 /* High-priority machine check (HPMC) */
533
534 /* set up a new led state on systems shipped with a LED State panel */
535 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
536
537 parisc_terminate("High Priority Machine Check (HPMC)",
538 regs, code, 0);
539 /* NOT REACHED */
540
541 case 2:
542 /* Power failure interrupt */
543 printk(KERN_CRIT "Power failure interrupt !\n");
544 return;
545
546 case 3:
547 /* Recovery counter trap */
548 regs->gr[0] &= ~PSW_R;
549 if (user_space(regs))
550 handle_gdb_break(regs, TRAP_TRACE);
551 /* else this must be the start of a syscall - just let it run */
552 return;
553
554 case 5:
555 /* Low-priority machine check */
556 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
557
558 flush_cache_all();
559 flush_tlb_all();
560 cpu_lpmc(5, regs);
561 return;
562
563 case 6:
564 /* Instruction TLB miss fault/Instruction page fault */
565 fault_address = regs->iaoq[0];
566 fault_space = regs->iasq[0];
567 break;
568
569 case 8:
570 /* Illegal instruction trap */
571 die_if_kernel("Illegal instruction", regs, code);
572 si.si_code = ILL_ILLOPC;
573 goto give_sigill;
574
575 case 9:
576 /* Break instruction trap */
577 handle_break(regs);
578 return;
579
580 case 10:
581 /* Privileged operation trap */
582 die_if_kernel("Privileged operation", regs, code);
583 si.si_code = ILL_PRVOPC;
584 goto give_sigill;
585
586 case 11:
587 /* Privileged register trap */
588 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
589
590 /* This is a MFCTL cr26/cr27 to gr instruction.
591 * PCXS traps on this, so we need to emulate it.
592 */
593
594 if (regs->iir & 0x00200000)
595 regs->gr[regs->iir & 0x1f] = mfctl(27);
596 else
597 regs->gr[regs->iir & 0x1f] = mfctl(26);
598
599 regs->iaoq[0] = regs->iaoq[1];
600 regs->iaoq[1] += 4;
601 regs->iasq[0] = regs->iasq[1];
602 return;
603 }
604
605 die_if_kernel("Privileged register usage", regs, code);
606 si.si_code = ILL_PRVREG;
607 give_sigill:
608 si.si_signo = SIGILL;
609 si.si_errno = 0;
610 si.si_addr = (void __user *) regs->iaoq[0];
611 force_sig_info(SIGILL, &si, current);
612 return;
613
614 case 12:
615 /* Overflow Trap, let the userland signal handler do the cleanup */
616 si.si_signo = SIGFPE;
617 si.si_code = FPE_INTOVF;
618 si.si_addr = (void __user *) regs->iaoq[0];
619 force_sig_info(SIGFPE, &si, current);
620 return;
621
622 case 13:
623 /* Conditional Trap
624 The condition succeeds in an instruction which traps
625 on condition */
626 if(user_mode(regs)){
627 si.si_signo = SIGFPE;
628 /* Set to zero, and let the userspace app figure it out from
629 the insn pointed to by si_addr */
630 si.si_code = 0;
631 si.si_addr = (void __user *) regs->iaoq[0];
632 force_sig_info(SIGFPE, &si, current);
633 return;
634 }
635 /* The kernel doesn't want to handle condition codes */
636 break;
637
638 case 14:
639 /* Assist Exception Trap, i.e. floating point exception. */
640 die_if_kernel("Floating point exception", regs, 0); /* quiet */
641 __inc_irq_stat(irq_fpassist_count);
642 handle_fpe(regs);
643 return;
644
645 case 15:
646 /* Data TLB miss fault/Data page fault */
647 /* Fall through */
648 case 16:
649 /* Non-access instruction TLB miss fault */
650 /* The instruction TLB entry needed for the target address of the FIC
651 is absent, and hardware can't find it, so we get to cleanup */
652 /* Fall through */
653 case 17:
654 /* Non-access data TLB miss fault/Non-access data page fault */
655 /* FIXME:
656 Still need to add slow path emulation code here!
657 If the insn used a non-shadow register, then the tlb
658 handlers could not have their side-effect (e.g. probe
659 writing to a target register) emulated since rfir would
660 erase the changes to said register. Instead we have to
661 setup everything, call this function we are in, and emulate
662 by hand. Technically we need to emulate:
663 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
664 */
665 fault_address = regs->ior;
666 fault_space = regs->isr;
667 break;
668
669 case 18:
670 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
671 /* Check for unaligned access */
672 if (check_unaligned(regs)) {
673 handle_unaligned(regs);
674 return;
675 }
676 /* Fall Through */
677 case 26:
678 /* PCXL: Data memory access rights trap */
679 fault_address = regs->ior;
680 fault_space = regs->isr;
681 break;
682
683 case 19:
684 /* Data memory break trap */
685 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
686 /* fall thru */
687 case 21:
688 /* Page reference trap */
689 handle_gdb_break(regs, TRAP_HWBKPT);
690 return;
691
692 case 25:
693 /* Taken branch trap */
694 regs->gr[0] &= ~PSW_T;
695 if (user_space(regs))
696 handle_gdb_break(regs, TRAP_BRANCH);
697 /* else this must be the start of a syscall - just let it
698 * run.
699 */
700 return;
701
702 case 7:
703 /* Instruction access rights */
704 /* PCXL: Instruction memory protection trap */
705
706 /*
707 * This could be caused by either: 1) a process attempting
708 * to execute within a vma that does not have execute
709 * permission, or 2) an access rights violation caused by a
710 * flush only translation set up by ptep_get_and_clear().
711 * So we check the vma permissions to differentiate the two.
712 * If the vma indicates we have execute permission, then
713 * the cause is the latter one. In this case, we need to
714 * call do_page_fault() to fix the problem.
715 */
716
717 if (user_mode(regs)) {
718 struct vm_area_struct *vma;
719
720 down_read(¤t->mm->mmap_sem);
721 vma = find_vma(current->mm,regs->iaoq[0]);
722 if (vma && (regs->iaoq[0] >= vma->vm_start)
723 && (vma->vm_flags & VM_EXEC)) {
724
725 fault_address = regs->iaoq[0];
726 fault_space = regs->iasq[0];
727
728 up_read(¤t->mm->mmap_sem);
729 break; /* call do_page_fault() */
730 }
731 up_read(¤t->mm->mmap_sem);
732 }
733 /* Fall Through */
734 case 27:
735 /* Data memory protection ID trap */
736 if (code == 27 && !user_mode(regs) &&
737 fixup_exception(regs))
738 return;
739
740 die_if_kernel("Protection id trap", regs, code);
741 si.si_code = SEGV_MAPERR;
742 si.si_signo = SIGSEGV;
743 si.si_errno = 0;
744 if (code == 7)
745 si.si_addr = (void __user *) regs->iaoq[0];
746 else
747 si.si_addr = (void __user *) regs->ior;
748 force_sig_info(SIGSEGV, &si, current);
749 return;
750
751 case 28:
752 /* Unaligned data reference trap */
753 handle_unaligned(regs);
754 return;
755
756 default:
757 if (user_mode(regs)) {
758 parisc_printk_ratelimited(0, regs, KERN_DEBUG
759 "handle_interruption() pid=%d command='%s'\n",
760 task_pid_nr(current), current->comm);
761 /* SIGBUS, for lack of a better one. */
762 si.si_signo = SIGBUS;
763 si.si_code = BUS_OBJERR;
764 si.si_errno = 0;
765 si.si_addr = (void __user *) regs->ior;
766 force_sig_info(SIGBUS, &si, current);
767 return;
768 }
769 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
770
771 parisc_terminate("Unexpected interruption", regs, code, 0);
772 /* NOT REACHED */
773 }
774
775 if (user_mode(regs)) {
776 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
777 parisc_printk_ratelimited(0, regs, KERN_DEBUG
778 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
779 code, fault_space,
780 task_pid_nr(current), current->comm);
781 si.si_signo = SIGSEGV;
782 si.si_errno = 0;
783 si.si_code = SEGV_MAPERR;
784 si.si_addr = (void __user *) regs->ior;
785 force_sig_info(SIGSEGV, &si, current);
786 return;
787 }
788 }
789 else {
790
791 /*
792 * The kernel should never fault on its own address space,
793 * unless pagefault_disable() was called before.
794 */
795
796 if (fault_space == 0 && !faulthandler_disabled())
797 {
798 /* Clean up and return if in exception table. */
799 if (fixup_exception(regs))
800 return;
801 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
802 parisc_terminate("Kernel Fault", regs, code, fault_address);
803 }
804 }
805
806 do_page_fault(regs, code, fault_address);
807}
808
809
810void __init initialize_ivt(const void *iva)
811{
812 extern u32 os_hpmc_size;
813 extern const u32 os_hpmc[];
814
815 int i;
816 u32 check = 0;
817 u32 *ivap;
818 u32 *hpmcp;
819 u32 length;
820
821 if (strcmp((const char *)iva, "cows can fly"))
822 panic("IVT invalid");
823
824 ivap = (u32 *)iva;
825
826 for (i = 0; i < 8; i++)
827 *ivap++ = 0;
828
829 /* Compute Checksum for HPMC handler */
830 length = os_hpmc_size;
831 ivap[7] = length;
832
833 hpmcp = (u32 *)os_hpmc;
834
835 for (i=0; i<length/4; i++)
836 check += *hpmcp++;
837
838 for (i=0; i<8; i++)
839 check += ivap[i];
840
841 ivap[5] = -check;
842}
843
844
845/* early_trap_init() is called before we set up kernel mappings and
846 * write-protect the kernel */
847void __init early_trap_init(void)
848{
849 extern const void fault_vector_20;
850
851#ifndef CONFIG_64BIT
852 extern const void fault_vector_11;
853 initialize_ivt(&fault_vector_11);
854#endif
855
856 initialize_ivt(&fault_vector_20);
857}
858
859void __init trap_init(void)
860{
861}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/parisc/traps.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
7 */
8
9/*
10 * 'Traps.c' handles hardware traps and faults after we have saved some
11 * state in 'asm.s'.
12 */
13
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/timer.h>
21#include <linux/delay.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/console.h>
29#include <linux/bug.h>
30#include <linux/ratelimit.h>
31#include <linux/uaccess.h>
32#include <linux/kdebug.h>
33
34#include <asm/assembly.h>
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/traps.h>
38#include <asm/unaligned.h>
39#include <linux/atomic.h>
40#include <asm/smp.h>
41#include <asm/pdc.h>
42#include <asm/pdc_chassis.h>
43#include <asm/unwind.h>
44#include <asm/tlbflush.h>
45#include <asm/cacheflush.h>
46#include <linux/kgdb.h>
47#include <linux/kprobes.h>
48
49#include "../math-emu/math-emu.h" /* for handle_fpe() */
50
51static void parisc_show_stack(struct task_struct *task,
52 struct pt_regs *regs, const char *loglvl);
53
54static int printbinary(char *buf, unsigned long x, int nbits)
55{
56 unsigned long mask = 1UL << (nbits - 1);
57 while (mask != 0) {
58 *buf++ = (mask & x ? '1' : '0');
59 mask >>= 1;
60 }
61 *buf = '\0';
62
63 return nbits;
64}
65
66#ifdef CONFIG_64BIT
67#define RFMT "%016lx"
68#else
69#define RFMT "%08lx"
70#endif
71#define FFMT "%016llx" /* fpregs are 64-bit always */
72
73#define PRINTREGS(lvl,r,f,fmt,x) \
74 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
75 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
76 (r)[(x)+2], (r)[(x)+3])
77
78static void print_gr(const char *level, struct pt_regs *regs)
79{
80 int i;
81 char buf[64];
82
83 printk("%s\n", level);
84 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
85 printbinary(buf, regs->gr[0], 32);
86 printk("%sPSW: %s %s\n", level, buf, print_tainted());
87
88 for (i = 0; i < 32; i += 4)
89 PRINTREGS(level, regs->gr, "r", RFMT, i);
90}
91
92static void print_fr(const char *level, struct pt_regs *regs)
93{
94 int i;
95 char buf[64];
96 struct { u32 sw[2]; } s;
97
98 /* FR are 64bit everywhere. Need to use asm to get the content
99 * of fpsr/fper1, and we assume that we won't have a FP Identify
100 * in our way, otherwise we're screwed.
101 * The fldd is used to restore the T-bit if there was one, as the
102 * store clears it anyway.
103 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104 asm volatile ("fstd %%fr0,0(%1) \n\t"
105 "fldd 0(%1),%%fr0 \n\t"
106 : "=m" (s) : "r" (&s) : "r0");
107
108 printk("%s\n", level);
109 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110 printbinary(buf, s.sw[0], 32);
111 printk("%sFPSR: %s\n", level, buf);
112 printk("%sFPER1: %08x\n", level, s.sw[1]);
113
114 /* here we'll print fr0 again, tho it'll be meaningless */
115 for (i = 0; i < 32; i += 4)
116 PRINTREGS(level, regs->fr, "fr", FFMT, i);
117}
118
119void show_regs(struct pt_regs *regs)
120{
121 int i, user;
122 const char *level;
123 unsigned long cr30, cr31;
124
125 user = user_mode(regs);
126 level = user ? KERN_DEBUG : KERN_CRIT;
127
128 show_regs_print_info(level);
129
130 print_gr(level, regs);
131
132 for (i = 0; i < 8; i += 4)
133 PRINTREGS(level, regs->sr, "sr", RFMT, i);
134
135 if (user)
136 print_fr(level, regs);
137
138 cr30 = mfctl(30);
139 cr31 = mfctl(31);
140 printk("%s\n", level);
141 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
144 level, regs->iir, regs->isr, regs->ior);
145 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
146 level, current_thread_info()->cpu, cr30, cr31);
147 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148
149 if (user) {
150 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 } else {
154 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157
158 parisc_show_stack(current, regs, KERN_DEFAULT);
159 }
160}
161
162static DEFINE_RATELIMIT_STATE(_hppa_rs,
163 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
164
165#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
166 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
167 printk(fmt, ##__VA_ARGS__); \
168 show_regs(regs); \
169 } \
170}
171
172
173static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
174{
175 int i = 1;
176
177 printk("%sBacktrace:\n", loglvl);
178 while (i <= MAX_UNWIND_ENTRIES) {
179 if (unwind_once(info) < 0 || info->ip == 0)
180 break;
181
182 if (__kernel_text_address(info->ip)) {
183 printk("%s [<" RFMT ">] %pS\n",
184 loglvl, info->ip, (void *) info->ip);
185 i++;
186 }
187 }
188 printk("%s\n", loglvl);
189}
190
191static void parisc_show_stack(struct task_struct *task,
192 struct pt_regs *regs, const char *loglvl)
193{
194 struct unwind_frame_info info;
195
196 unwind_frame_init_task(&info, task, regs);
197
198 do_show_stack(&info, loglvl);
199}
200
201void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
202{
203 parisc_show_stack(t, NULL, loglvl);
204}
205
206int is_valid_bugaddr(unsigned long iaoq)
207{
208 return 1;
209}
210
211void die_if_kernel(char *str, struct pt_regs *regs, long err)
212{
213 if (user_mode(regs)) {
214 if (err == 0)
215 return; /* STFU */
216
217 parisc_printk_ratelimited(1, regs,
218 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
219 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
220
221 return;
222 }
223
224 bust_spinlocks(1);
225
226 oops_enter();
227
228 /* Amuse the user in a SPARC fashion */
229 if (err) printk(KERN_CRIT
230 " _______________________________ \n"
231 " < Your System ate a SPARC! Gah! >\n"
232 " ------------------------------- \n"
233 " \\ ^__^\n"
234 " (__)\\ )\\/\\\n"
235 " U ||----w |\n"
236 " || ||\n");
237
238 /* unlock the pdc lock if necessary */
239 pdc_emergency_unlock();
240
241 /* maybe the kernel hasn't booted very far yet and hasn't been able
242 * to initialize the serial or STI console. In that case we should
243 * re-enable the pdc console, so that the user will be able to
244 * identify the problem. */
245 if (!console_drivers)
246 pdc_console_restart();
247
248 if (err)
249 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250 current->comm, task_pid_nr(current), str, err);
251
252 /* Wot's wrong wif bein' racy? */
253 if (current->thread.flags & PARISC_KERNEL_DEATH) {
254 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255 local_irq_enable();
256 while (1);
257 }
258 current->thread.flags |= PARISC_KERNEL_DEATH;
259
260 show_regs(regs);
261 dump_stack();
262 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263
264 if (in_interrupt())
265 panic("Fatal exception in interrupt");
266
267 if (panic_on_oops)
268 panic("Fatal exception");
269
270 oops_exit();
271 do_exit(SIGSEGV);
272}
273
274/* gdb uses break 4,8 */
275#define GDB_BREAK_INSN 0x10004
276static void handle_gdb_break(struct pt_regs *regs, int wot)
277{
278 force_sig_fault(SIGTRAP, wot,
279 (void __user *) (regs->iaoq[0] & ~3));
280}
281
282static void handle_break(struct pt_regs *regs)
283{
284 unsigned iir = regs->iir;
285
286 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287 /* check if a BUG() or WARN() trapped here. */
288 enum bug_trap_type tt;
289 tt = report_bug(regs->iaoq[0] & ~3, regs);
290 if (tt == BUG_TRAP_TYPE_WARN) {
291 regs->iaoq[0] += 4;
292 regs->iaoq[1] += 4;
293 return; /* return to next instruction when WARN_ON(). */
294 }
295 die_if_kernel("Unknown kernel breakpoint", regs,
296 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297 }
298
299#ifdef CONFIG_KPROBES
300 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
301 parisc_kprobe_break_handler(regs);
302 return;
303 }
304
305#endif
306
307#ifdef CONFIG_KGDB
308 if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
309 iir == PARISC_KGDB_BREAK_INSN)) {
310 kgdb_handle_exception(9, SIGTRAP, 0, regs);
311 return;
312 }
313#endif
314
315 if (unlikely(iir != GDB_BREAK_INSN))
316 parisc_printk_ratelimited(0, regs,
317 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
318 iir & 31, (iir>>13) & ((1<<13)-1),
319 task_pid_nr(current), current->comm);
320
321 /* send standard GDB signal */
322 handle_gdb_break(regs, TRAP_BRKPT);
323}
324
325static void default_trap(int code, struct pt_regs *regs)
326{
327 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
328 show_regs(regs);
329}
330
331void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
332
333
334void transfer_pim_to_trap_frame(struct pt_regs *regs)
335{
336 register int i;
337 extern unsigned int hpmc_pim_data[];
338 struct pdc_hpmc_pim_11 *pim_narrow;
339 struct pdc_hpmc_pim_20 *pim_wide;
340
341 if (boot_cpu_data.cpu_type >= pcxu) {
342
343 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
344
345 /*
346 * Note: The following code will probably generate a
347 * bunch of truncation error warnings from the compiler.
348 * Could be handled with an ifdef, but perhaps there
349 * is a better way.
350 */
351
352 regs->gr[0] = pim_wide->cr[22];
353
354 for (i = 1; i < 32; i++)
355 regs->gr[i] = pim_wide->gr[i];
356
357 for (i = 0; i < 32; i++)
358 regs->fr[i] = pim_wide->fr[i];
359
360 for (i = 0; i < 8; i++)
361 regs->sr[i] = pim_wide->sr[i];
362
363 regs->iasq[0] = pim_wide->cr[17];
364 regs->iasq[1] = pim_wide->iasq_back;
365 regs->iaoq[0] = pim_wide->cr[18];
366 regs->iaoq[1] = pim_wide->iaoq_back;
367
368 regs->sar = pim_wide->cr[11];
369 regs->iir = pim_wide->cr[19];
370 regs->isr = pim_wide->cr[20];
371 regs->ior = pim_wide->cr[21];
372 }
373 else {
374 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
375
376 regs->gr[0] = pim_narrow->cr[22];
377
378 for (i = 1; i < 32; i++)
379 regs->gr[i] = pim_narrow->gr[i];
380
381 for (i = 0; i < 32; i++)
382 regs->fr[i] = pim_narrow->fr[i];
383
384 for (i = 0; i < 8; i++)
385 regs->sr[i] = pim_narrow->sr[i];
386
387 regs->iasq[0] = pim_narrow->cr[17];
388 regs->iasq[1] = pim_narrow->iasq_back;
389 regs->iaoq[0] = pim_narrow->cr[18];
390 regs->iaoq[1] = pim_narrow->iaoq_back;
391
392 regs->sar = pim_narrow->cr[11];
393 regs->iir = pim_narrow->cr[19];
394 regs->isr = pim_narrow->cr[20];
395 regs->ior = pim_narrow->cr[21];
396 }
397
398 /*
399 * The following fields only have meaning if we came through
400 * another path. So just zero them here.
401 */
402
403 regs->ksp = 0;
404 regs->kpc = 0;
405 regs->orig_r28 = 0;
406}
407
408
409/*
410 * This routine is called as a last resort when everything else
411 * has gone clearly wrong. We get called for faults in kernel space,
412 * and HPMC's.
413 */
414void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
415{
416 static DEFINE_SPINLOCK(terminate_lock);
417
418 (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
419 bust_spinlocks(1);
420
421 set_eiem(0);
422 local_irq_disable();
423 spin_lock(&terminate_lock);
424
425 /* unlock the pdc lock if necessary */
426 pdc_emergency_unlock();
427
428 /* restart pdc console if necessary */
429 if (!console_drivers)
430 pdc_console_restart();
431
432 /* Not all paths will gutter the processor... */
433 switch(code){
434
435 case 1:
436 transfer_pim_to_trap_frame(regs);
437 break;
438
439 default:
440 break;
441
442 }
443
444 {
445 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
446 struct unwind_frame_info info;
447 unwind_frame_init(&info, current, regs);
448 do_show_stack(&info, KERN_CRIT);
449 }
450
451 printk("\n");
452 pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
453 msg, code, trap_name(code), offset);
454 show_regs(regs);
455
456 spin_unlock(&terminate_lock);
457
458 /* put soft power button back under hardware control;
459 * if the user had pressed it once at any time, the
460 * system will shut down immediately right here. */
461 pdc_soft_power_button(0);
462
463 /* Call kernel panic() so reboot timeouts work properly
464 * FIXME: This function should be on the list of
465 * panic notifiers, and we should call panic
466 * directly from the location that we wish.
467 * e.g. We should not call panic from
468 * parisc_terminate, but rather the oter way around.
469 * This hack works, prints the panic message twice,
470 * and it enables reboot timers!
471 */
472 panic(msg);
473}
474
475void notrace handle_interruption(int code, struct pt_regs *regs)
476{
477 unsigned long fault_address = 0;
478 unsigned long fault_space = 0;
479 int si_code;
480
481 if (code == 1)
482 pdc_console_restart(); /* switch back to pdc if HPMC */
483 else
484 local_irq_enable();
485
486 /* Security check:
487 * If the priority level is still user, and the
488 * faulting space is not equal to the active space
489 * then the user is attempting something in a space
490 * that does not belong to them. Kill the process.
491 *
492 * This is normally the situation when the user
493 * attempts to jump into the kernel space at the
494 * wrong offset, be it at the gateway page or a
495 * random location.
496 *
497 * We cannot normally signal the process because it
498 * could *be* on the gateway page, and processes
499 * executing on the gateway page can't have signals
500 * delivered.
501 *
502 * We merely readjust the address into the users
503 * space, at a destination address of zero, and
504 * allow processing to continue.
505 */
506 if (((unsigned long)regs->iaoq[0] & 3) &&
507 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
508 /* Kill the user process later */
509 regs->iaoq[0] = 0 | 3;
510 regs->iaoq[1] = regs->iaoq[0] + 4;
511 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
512 regs->gr[0] &= ~PSW_B;
513 return;
514 }
515
516#if 0
517 printk(KERN_CRIT "Interruption # %d\n", code);
518#endif
519
520 switch(code) {
521
522 case 1:
523 /* High-priority machine check (HPMC) */
524
525 /* set up a new led state on systems shipped with a LED State panel */
526 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
527
528 parisc_terminate("High Priority Machine Check (HPMC)",
529 regs, code, 0);
530 /* NOT REACHED */
531
532 case 2:
533 /* Power failure interrupt */
534 printk(KERN_CRIT "Power failure interrupt !\n");
535 return;
536
537 case 3:
538 /* Recovery counter trap */
539 regs->gr[0] &= ~PSW_R;
540
541#ifdef CONFIG_KPROBES
542 if (parisc_kprobe_ss_handler(regs))
543 return;
544#endif
545
546#ifdef CONFIG_KGDB
547 if (kgdb_single_step) {
548 kgdb_handle_exception(0, SIGTRAP, 0, regs);
549 return;
550 }
551#endif
552
553 if (user_space(regs))
554 handle_gdb_break(regs, TRAP_TRACE);
555 /* else this must be the start of a syscall - just let it run */
556 return;
557
558 case 5:
559 /* Low-priority machine check */
560 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
561
562 flush_cache_all();
563 flush_tlb_all();
564 cpu_lpmc(5, regs);
565 return;
566
567 case PARISC_ITLB_TRAP:
568 /* Instruction TLB miss fault/Instruction page fault */
569 fault_address = regs->iaoq[0];
570 fault_space = regs->iasq[0];
571 break;
572
573 case 8:
574 /* Illegal instruction trap */
575 die_if_kernel("Illegal instruction", regs, code);
576 si_code = ILL_ILLOPC;
577 goto give_sigill;
578
579 case 9:
580 /* Break instruction trap */
581 handle_break(regs);
582 return;
583
584 case 10:
585 /* Privileged operation trap */
586 die_if_kernel("Privileged operation", regs, code);
587 si_code = ILL_PRVOPC;
588 goto give_sigill;
589
590 case 11:
591 /* Privileged register trap */
592 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
593
594 /* This is a MFCTL cr26/cr27 to gr instruction.
595 * PCXS traps on this, so we need to emulate it.
596 */
597
598 if (regs->iir & 0x00200000)
599 regs->gr[regs->iir & 0x1f] = mfctl(27);
600 else
601 regs->gr[regs->iir & 0x1f] = mfctl(26);
602
603 regs->iaoq[0] = regs->iaoq[1];
604 regs->iaoq[1] += 4;
605 regs->iasq[0] = regs->iasq[1];
606 return;
607 }
608
609 die_if_kernel("Privileged register usage", regs, code);
610 si_code = ILL_PRVREG;
611 give_sigill:
612 force_sig_fault(SIGILL, si_code,
613 (void __user *) regs->iaoq[0]);
614 return;
615
616 case 12:
617 /* Overflow Trap, let the userland signal handler do the cleanup */
618 force_sig_fault(SIGFPE, FPE_INTOVF,
619 (void __user *) regs->iaoq[0]);
620 return;
621
622 case 13:
623 /* Conditional Trap
624 The condition succeeds in an instruction which traps
625 on condition */
626 if(user_mode(regs)){
627 /* Let userspace app figure it out from the insn pointed
628 * to by si_addr.
629 */
630 force_sig_fault(SIGFPE, FPE_CONDTRAP,
631 (void __user *) regs->iaoq[0]);
632 return;
633 }
634 /* The kernel doesn't want to handle condition codes */
635 break;
636
637 case 14:
638 /* Assist Exception Trap, i.e. floating point exception. */
639 die_if_kernel("Floating point exception", regs, 0); /* quiet */
640 __inc_irq_stat(irq_fpassist_count);
641 handle_fpe(regs);
642 return;
643
644 case 15:
645 /* Data TLB miss fault/Data page fault */
646 fallthrough;
647 case 16:
648 /* Non-access instruction TLB miss fault */
649 /* The instruction TLB entry needed for the target address of the FIC
650 is absent, and hardware can't find it, so we get to cleanup */
651 fallthrough;
652 case 17:
653 /* Non-access data TLB miss fault/Non-access data page fault */
654 /* FIXME:
655 Still need to add slow path emulation code here!
656 If the insn used a non-shadow register, then the tlb
657 handlers could not have their side-effect (e.g. probe
658 writing to a target register) emulated since rfir would
659 erase the changes to said register. Instead we have to
660 setup everything, call this function we are in, and emulate
661 by hand. Technically we need to emulate:
662 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
663 */
664 fault_address = regs->ior;
665 fault_space = regs->isr;
666 break;
667
668 case 18:
669 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
670 /* Check for unaligned access */
671 if (check_unaligned(regs)) {
672 handle_unaligned(regs);
673 return;
674 }
675 fallthrough;
676 case 26:
677 /* PCXL: Data memory access rights trap */
678 fault_address = regs->ior;
679 fault_space = regs->isr;
680 break;
681
682 case 19:
683 /* Data memory break trap */
684 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
685 fallthrough;
686 case 21:
687 /* Page reference trap */
688 handle_gdb_break(regs, TRAP_HWBKPT);
689 return;
690
691 case 25:
692 /* Taken branch trap */
693 regs->gr[0] &= ~PSW_T;
694 if (user_space(regs))
695 handle_gdb_break(regs, TRAP_BRANCH);
696 /* else this must be the start of a syscall - just let it
697 * run.
698 */
699 return;
700
701 case 7:
702 /* Instruction access rights */
703 /* PCXL: Instruction memory protection trap */
704
705 /*
706 * This could be caused by either: 1) a process attempting
707 * to execute within a vma that does not have execute
708 * permission, or 2) an access rights violation caused by a
709 * flush only translation set up by ptep_get_and_clear().
710 * So we check the vma permissions to differentiate the two.
711 * If the vma indicates we have execute permission, then
712 * the cause is the latter one. In this case, we need to
713 * call do_page_fault() to fix the problem.
714 */
715
716 if (user_mode(regs)) {
717 struct vm_area_struct *vma;
718
719 mmap_read_lock(current->mm);
720 vma = find_vma(current->mm,regs->iaoq[0]);
721 if (vma && (regs->iaoq[0] >= vma->vm_start)
722 && (vma->vm_flags & VM_EXEC)) {
723
724 fault_address = regs->iaoq[0];
725 fault_space = regs->iasq[0];
726
727 mmap_read_unlock(current->mm);
728 break; /* call do_page_fault() */
729 }
730 mmap_read_unlock(current->mm);
731 }
732 fallthrough;
733 case 27:
734 /* Data memory protection ID trap */
735 if (code == 27 && !user_mode(regs) &&
736 fixup_exception(regs))
737 return;
738
739 die_if_kernel("Protection id trap", regs, code);
740 force_sig_fault(SIGSEGV, SEGV_MAPERR,
741 (code == 7)?
742 ((void __user *) regs->iaoq[0]) :
743 ((void __user *) regs->ior));
744 return;
745
746 case 28:
747 /* Unaligned data reference trap */
748 handle_unaligned(regs);
749 return;
750
751 default:
752 if (user_mode(regs)) {
753 parisc_printk_ratelimited(0, regs, KERN_DEBUG
754 "handle_interruption() pid=%d command='%s'\n",
755 task_pid_nr(current), current->comm);
756 /* SIGBUS, for lack of a better one. */
757 force_sig_fault(SIGBUS, BUS_OBJERR,
758 (void __user *)regs->ior);
759 return;
760 }
761 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
762
763 parisc_terminate("Unexpected interruption", regs, code, 0);
764 /* NOT REACHED */
765 }
766
767 if (user_mode(regs)) {
768 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
769 parisc_printk_ratelimited(0, regs, KERN_DEBUG
770 "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
771 code, fault_space,
772 task_pid_nr(current), current->comm);
773 force_sig_fault(SIGSEGV, SEGV_MAPERR,
774 (void __user *)regs->ior);
775 return;
776 }
777 }
778 else {
779
780 /*
781 * The kernel should never fault on its own address space,
782 * unless pagefault_disable() was called before.
783 */
784
785 if (fault_space == 0 && !faulthandler_disabled())
786 {
787 /* Clean up and return if in exception table. */
788 if (fixup_exception(regs))
789 return;
790 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
791 parisc_terminate("Kernel Fault", regs, code, fault_address);
792 }
793 }
794
795 do_page_fault(regs, code, fault_address);
796}
797
798
799void __init initialize_ivt(const void *iva)
800{
801 extern const u32 os_hpmc[];
802
803 int i;
804 u32 check = 0;
805 u32 *ivap;
806 u32 *hpmcp;
807 u32 instr;
808
809 if (strcmp((const char *)iva, "cows can fly"))
810 panic("IVT invalid");
811
812 ivap = (u32 *)iva;
813
814 for (i = 0; i < 8; i++)
815 *ivap++ = 0;
816
817 /*
818 * Use PDC_INSTR firmware function to get instruction that invokes
819 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
820 * the PA 1.1 Firmware Architecture document.
821 */
822 if (pdc_instr(&instr) == PDC_OK)
823 ivap[0] = instr;
824
825 /*
826 * Rules for the checksum of the HPMC handler:
827 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
828 * its own IVA).
829 * 2. The word at IVA + 32 is nonzero.
830 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
831 * Address (IVA + 56) are word-aligned.
832 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
833 * the Length/4 words starting at Address is zero.
834 */
835
836 /* Setup IVA and compute checksum for HPMC handler */
837 ivap[6] = (u32)__pa(os_hpmc);
838
839 hpmcp = (u32 *)os_hpmc;
840
841 for (i=0; i<8; i++)
842 check += ivap[i];
843
844 ivap[5] = -check;
845 pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
846}
847
848
849/* early_trap_init() is called before we set up kernel mappings and
850 * write-protect the kernel */
851void __init early_trap_init(void)
852{
853 extern const void fault_vector_20;
854
855#ifndef CONFIG_64BIT
856 extern const void fault_vector_11;
857 initialize_ivt(&fault_vector_11);
858#endif
859
860 initialize_ivt(&fault_vector_20);
861}
862
863void __init trap_init(void)
864{
865}