Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
14 */
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/memblock.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/isa-rev.h>
54#include <asm/mips-cps.h>
55#include <asm/mips-r2-to-r6-emul.h>
56#include <asm/mipsregs.h>
57#include <asm/mipsmtregs.h>
58#include <asm/module.h>
59#include <asm/msa.h>
60#include <asm/pgtable.h>
61#include <asm/ptrace.h>
62#include <asm/sections.h>
63#include <asm/siginfo.h>
64#include <asm/tlbdebug.h>
65#include <asm/traps.h>
66#include <linux/uaccess.h>
67#include <asm/watch.h>
68#include <asm/mmu_context.h>
69#include <asm/types.h>
70#include <asm/stacktrace.h>
71#include <asm/tlbex.h>
72#include <asm/uasm.h>
73
74extern void check_wait(void);
75extern asmlinkage void rollback_handle_int(void);
76extern asmlinkage void handle_int(void);
77extern asmlinkage void handle_adel(void);
78extern asmlinkage void handle_ades(void);
79extern asmlinkage void handle_ibe(void);
80extern asmlinkage void handle_dbe(void);
81extern asmlinkage void handle_sys(void);
82extern asmlinkage void handle_bp(void);
83extern asmlinkage void handle_ri(void);
84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
85extern asmlinkage void handle_ri_rdhwr(void);
86extern asmlinkage void handle_cpu(void);
87extern asmlinkage void handle_ov(void);
88extern asmlinkage void handle_tr(void);
89extern asmlinkage void handle_msa_fpe(void);
90extern asmlinkage void handle_fpe(void);
91extern asmlinkage void handle_ftlb(void);
92extern asmlinkage void handle_msa(void);
93extern asmlinkage void handle_mdmx(void);
94extern asmlinkage void handle_watch(void);
95extern asmlinkage void handle_mt(void);
96extern asmlinkage void handle_dsp(void);
97extern asmlinkage void handle_mcheck(void);
98extern asmlinkage void handle_reserved(void);
99extern void tlb_do_page_fault_0(void);
100
101void (*board_be_init)(void);
102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
103void (*board_nmi_handler_setup)(void);
104void (*board_ejtag_handler_setup)(void);
105void (*board_bind_eic_interrupt)(int irq, int regset);
106void (*board_ebase_setup)(void);
107void(*board_cache_error_setup)(void);
108
109static void show_raw_backtrace(unsigned long reg29)
110{
111 unsigned long *sp = (unsigned long *)(reg29 & ~3);
112 unsigned long addr;
113
114 printk("Call Trace:");
115#ifdef CONFIG_KALLSYMS
116 printk("\n");
117#endif
118 while (!kstack_end(sp)) {
119 unsigned long __user *p =
120 (unsigned long __user *)(unsigned long)sp++;
121 if (__get_user(addr, p)) {
122 printk(" (Bad stack address)");
123 break;
124 }
125 if (__kernel_text_address(addr))
126 print_ip_sym(addr);
127 }
128 printk("\n");
129}
130
131#ifdef CONFIG_KALLSYMS
132int raw_show_trace;
133static int __init set_raw_show_trace(char *str)
134{
135 raw_show_trace = 1;
136 return 1;
137}
138__setup("raw_show_trace", set_raw_show_trace);
139#endif
140
141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
142{
143 unsigned long sp = regs->regs[29];
144 unsigned long ra = regs->regs[31];
145 unsigned long pc = regs->cp0_epc;
146
147 if (!task)
148 task = current;
149
150 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
151 show_raw_backtrace(sp);
152 return;
153 }
154 printk("Call Trace:\n");
155 do {
156 print_ip_sym(pc);
157 pc = unwind_stack(task, &sp, pc, &ra);
158 } while (pc);
159 pr_cont("\n");
160}
161
162/*
163 * This routine abuses get_user()/put_user() to reference pointers
164 * with at least a bit of error checking ...
165 */
166static void show_stacktrace(struct task_struct *task,
167 const struct pt_regs *regs)
168{
169 const int field = 2 * sizeof(unsigned long);
170 long stackdata;
171 int i;
172 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
173
174 printk("Stack :");
175 i = 0;
176 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
177 if (i && ((i % (64 / field)) == 0)) {
178 pr_cont("\n");
179 printk(" ");
180 }
181 if (i > 39) {
182 pr_cont(" ...");
183 break;
184 }
185
186 if (__get_user(stackdata, sp++)) {
187 pr_cont(" (Bad stack address)");
188 break;
189 }
190
191 pr_cont(" %0*lx", field, stackdata);
192 i++;
193 }
194 pr_cont("\n");
195 show_backtrace(task, regs);
196}
197
198void show_stack(struct task_struct *task, unsigned long *sp)
199{
200 struct pt_regs regs;
201 mm_segment_t old_fs = get_fs();
202
203 regs.cp0_status = KSU_KERNEL;
204 if (sp) {
205 regs.regs[29] = (unsigned long)sp;
206 regs.regs[31] = 0;
207 regs.cp0_epc = 0;
208 } else {
209 if (task && task != current) {
210 regs.regs[29] = task->thread.reg29;
211 regs.regs[31] = 0;
212 regs.cp0_epc = task->thread.reg31;
213#ifdef CONFIG_KGDB_KDB
214 } else if (atomic_read(&kgdb_active) != -1 &&
215 kdb_current_regs) {
216 memcpy(®s, kdb_current_regs, sizeof(regs));
217#endif /* CONFIG_KGDB_KDB */
218 } else {
219 prepare_frametrace(®s);
220 }
221 }
222 /*
223 * show_stack() deals exclusively with kernel mode, so be sure to access
224 * the stack in the kernel (not user) address space.
225 */
226 set_fs(KERNEL_DS);
227 show_stacktrace(task, ®s);
228 set_fs(old_fs);
229}
230
231static void show_code(unsigned int __user *pc)
232{
233 long i;
234 unsigned short __user *pc16 = NULL;
235
236 printk("Code:");
237
238 if ((unsigned long)pc & 1)
239 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
240 for(i = -3 ; i < 6 ; i++) {
241 unsigned int insn;
242 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
243 pr_cont(" (Bad address in epc)\n");
244 break;
245 }
246 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
247 }
248 pr_cont("\n");
249}
250
251static void __show_regs(const struct pt_regs *regs)
252{
253 const int field = 2 * sizeof(unsigned long);
254 unsigned int cause = regs->cp0_cause;
255 unsigned int exccode;
256 int i;
257
258 show_regs_print_info(KERN_DEFAULT);
259
260 /*
261 * Saved main processor registers
262 */
263 for (i = 0; i < 32; ) {
264 if ((i % 4) == 0)
265 printk("$%2d :", i);
266 if (i == 0)
267 pr_cont(" %0*lx", field, 0UL);
268 else if (i == 26 || i == 27)
269 pr_cont(" %*s", field, "");
270 else
271 pr_cont(" %0*lx", field, regs->regs[i]);
272
273 i++;
274 if ((i % 4) == 0)
275 pr_cont("\n");
276 }
277
278#ifdef CONFIG_CPU_HAS_SMARTMIPS
279 printk("Acx : %0*lx\n", field, regs->acx);
280#endif
281 if (MIPS_ISA_REV < 6) {
282 printk("Hi : %0*lx\n", field, regs->hi);
283 printk("Lo : %0*lx\n", field, regs->lo);
284 }
285
286 /*
287 * Saved cp0 registers
288 */
289 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
290 (void *) regs->cp0_epc);
291 printk("ra : %0*lx %pS\n", field, regs->regs[31],
292 (void *) regs->regs[31]);
293
294 printk("Status: %08x ", (uint32_t) regs->cp0_status);
295
296 if (cpu_has_3kex) {
297 if (regs->cp0_status & ST0_KUO)
298 pr_cont("KUo ");
299 if (regs->cp0_status & ST0_IEO)
300 pr_cont("IEo ");
301 if (regs->cp0_status & ST0_KUP)
302 pr_cont("KUp ");
303 if (regs->cp0_status & ST0_IEP)
304 pr_cont("IEp ");
305 if (regs->cp0_status & ST0_KUC)
306 pr_cont("KUc ");
307 if (regs->cp0_status & ST0_IEC)
308 pr_cont("IEc ");
309 } else if (cpu_has_4kex) {
310 if (regs->cp0_status & ST0_KX)
311 pr_cont("KX ");
312 if (regs->cp0_status & ST0_SX)
313 pr_cont("SX ");
314 if (regs->cp0_status & ST0_UX)
315 pr_cont("UX ");
316 switch (regs->cp0_status & ST0_KSU) {
317 case KSU_USER:
318 pr_cont("USER ");
319 break;
320 case KSU_SUPERVISOR:
321 pr_cont("SUPERVISOR ");
322 break;
323 case KSU_KERNEL:
324 pr_cont("KERNEL ");
325 break;
326 default:
327 pr_cont("BAD_MODE ");
328 break;
329 }
330 if (regs->cp0_status & ST0_ERL)
331 pr_cont("ERL ");
332 if (regs->cp0_status & ST0_EXL)
333 pr_cont("EXL ");
334 if (regs->cp0_status & ST0_IE)
335 pr_cont("IE ");
336 }
337 pr_cont("\n");
338
339 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
340 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
341
342 if (1 <= exccode && exccode <= 5)
343 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
344
345 printk("PrId : %08x (%s)\n", read_c0_prid(),
346 cpu_name_string());
347}
348
349/*
350 * FIXME: really the generic show_regs should take a const pointer argument.
351 */
352void show_regs(struct pt_regs *regs)
353{
354 __show_regs(regs);
355 dump_stack();
356}
357
358void show_registers(struct pt_regs *regs)
359{
360 const int field = 2 * sizeof(unsigned long);
361 mm_segment_t old_fs = get_fs();
362
363 __show_regs(regs);
364 print_modules();
365 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
366 current->comm, current->pid, current_thread_info(), current,
367 field, current_thread_info()->tp_value);
368 if (cpu_has_userlocal) {
369 unsigned long tls;
370
371 tls = read_c0_userlocal();
372 if (tls != current_thread_info()->tp_value)
373 printk("*HwTLS: %0*lx\n", field, tls);
374 }
375
376 if (!user_mode(regs))
377 /* Necessary for getting the correct stack content */
378 set_fs(KERNEL_DS);
379 show_stacktrace(current, regs);
380 show_code((unsigned int __user *) regs->cp0_epc);
381 printk("\n");
382 set_fs(old_fs);
383}
384
385static DEFINE_RAW_SPINLOCK(die_lock);
386
387void __noreturn die(const char *str, struct pt_regs *regs)
388{
389 static int die_counter;
390 int sig = SIGSEGV;
391
392 oops_enter();
393
394 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
395 SIGSEGV) == NOTIFY_STOP)
396 sig = 0;
397
398 console_verbose();
399 raw_spin_lock_irq(&die_lock);
400 bust_spinlocks(1);
401
402 printk("%s[#%d]:\n", str, ++die_counter);
403 show_registers(regs);
404 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
405 raw_spin_unlock_irq(&die_lock);
406
407 oops_exit();
408
409 if (in_interrupt())
410 panic("Fatal exception in interrupt");
411
412 if (panic_on_oops)
413 panic("Fatal exception");
414
415 if (regs && kexec_should_crash(current))
416 crash_kexec(regs);
417
418 do_exit(sig);
419}
420
421extern struct exception_table_entry __start___dbe_table[];
422extern struct exception_table_entry __stop___dbe_table[];
423
424__asm__(
425" .section __dbe_table, \"a\"\n"
426" .previous \n");
427
428/* Given an address, look for it in the exception tables. */
429static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
430{
431 const struct exception_table_entry *e;
432
433 e = search_extable(__start___dbe_table,
434 __stop___dbe_table - __start___dbe_table, addr);
435 if (!e)
436 e = search_module_dbetables(addr);
437 return e;
438}
439
440asmlinkage void do_be(struct pt_regs *regs)
441{
442 const int field = 2 * sizeof(unsigned long);
443 const struct exception_table_entry *fixup = NULL;
444 int data = regs->cp0_cause & 4;
445 int action = MIPS_BE_FATAL;
446 enum ctx_state prev_state;
447
448 prev_state = exception_enter();
449 /* XXX For now. Fixme, this searches the wrong table ... */
450 if (data && !user_mode(regs))
451 fixup = search_dbe_tables(exception_epc(regs));
452
453 if (fixup)
454 action = MIPS_BE_FIXUP;
455
456 if (board_be_handler)
457 action = board_be_handler(regs, fixup != NULL);
458 else
459 mips_cm_error_report();
460
461 switch (action) {
462 case MIPS_BE_DISCARD:
463 goto out;
464 case MIPS_BE_FIXUP:
465 if (fixup) {
466 regs->cp0_epc = fixup->nextinsn;
467 goto out;
468 }
469 break;
470 default:
471 break;
472 }
473
474 /*
475 * Assume it would be too dangerous to continue ...
476 */
477 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
478 data ? "Data" : "Instruction",
479 field, regs->cp0_epc, field, regs->regs[31]);
480 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
481 SIGBUS) == NOTIFY_STOP)
482 goto out;
483
484 die_if_kernel("Oops", regs);
485 force_sig(SIGBUS);
486
487out:
488 exception_exit(prev_state);
489}
490
491/*
492 * ll/sc, rdhwr, sync emulation
493 */
494
495#define OPCODE 0xfc000000
496#define BASE 0x03e00000
497#define RT 0x001f0000
498#define OFFSET 0x0000ffff
499#define LL 0xc0000000
500#define SC 0xe0000000
501#define SPEC0 0x00000000
502#define SPEC3 0x7c000000
503#define RD 0x0000f800
504#define FUNC 0x0000003f
505#define SYNC 0x0000000f
506#define RDHWR 0x0000003b
507
508/* microMIPS definitions */
509#define MM_POOL32A_FUNC 0xfc00ffff
510#define MM_RDHWR 0x00006b3c
511#define MM_RS 0x001f0000
512#define MM_RT 0x03e00000
513
514/*
515 * The ll_bit is cleared by r*_switch.S
516 */
517
518unsigned int ll_bit;
519struct task_struct *ll_task;
520
521static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
522{
523 unsigned long value, __user *vaddr;
524 long offset;
525
526 /*
527 * analyse the ll instruction that just caused a ri exception
528 * and put the referenced address to addr.
529 */
530
531 /* sign extend offset */
532 offset = opcode & OFFSET;
533 offset <<= 16;
534 offset >>= 16;
535
536 vaddr = (unsigned long __user *)
537 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
538
539 if ((unsigned long)vaddr & 3)
540 return SIGBUS;
541 if (get_user(value, vaddr))
542 return SIGSEGV;
543
544 preempt_disable();
545
546 if (ll_task == NULL || ll_task == current) {
547 ll_bit = 1;
548 } else {
549 ll_bit = 0;
550 }
551 ll_task = current;
552
553 preempt_enable();
554
555 regs->regs[(opcode & RT) >> 16] = value;
556
557 return 0;
558}
559
560static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
561{
562 unsigned long __user *vaddr;
563 unsigned long reg;
564 long offset;
565
566 /*
567 * analyse the sc instruction that just caused a ri exception
568 * and put the referenced address to addr.
569 */
570
571 /* sign extend offset */
572 offset = opcode & OFFSET;
573 offset <<= 16;
574 offset >>= 16;
575
576 vaddr = (unsigned long __user *)
577 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
578 reg = (opcode & RT) >> 16;
579
580 if ((unsigned long)vaddr & 3)
581 return SIGBUS;
582
583 preempt_disable();
584
585 if (ll_bit == 0 || ll_task != current) {
586 regs->regs[reg] = 0;
587 preempt_enable();
588 return 0;
589 }
590
591 preempt_enable();
592
593 if (put_user(regs->regs[reg], vaddr))
594 return SIGSEGV;
595
596 regs->regs[reg] = 1;
597
598 return 0;
599}
600
601/*
602 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
603 * opcodes are supposed to result in coprocessor unusable exceptions if
604 * executed on ll/sc-less processors. That's the theory. In practice a
605 * few processors such as NEC's VR4100 throw reserved instruction exceptions
606 * instead, so we're doing the emulation thing in both exception handlers.
607 */
608static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
609{
610 if ((opcode & OPCODE) == LL) {
611 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
612 1, regs, 0);
613 return simulate_ll(regs, opcode);
614 }
615 if ((opcode & OPCODE) == SC) {
616 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
617 1, regs, 0);
618 return simulate_sc(regs, opcode);
619 }
620
621 return -1; /* Must be something else ... */
622}
623
624/*
625 * Simulate trapping 'rdhwr' instructions to provide user accessible
626 * registers not implemented in hardware.
627 */
628static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
629{
630 struct thread_info *ti = task_thread_info(current);
631
632 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
633 1, regs, 0);
634 switch (rd) {
635 case MIPS_HWR_CPUNUM: /* CPU number */
636 regs->regs[rt] = smp_processor_id();
637 return 0;
638 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
639 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
640 current_cpu_data.icache.linesz);
641 return 0;
642 case MIPS_HWR_CC: /* Read count register */
643 regs->regs[rt] = read_c0_count();
644 return 0;
645 case MIPS_HWR_CCRES: /* Count register resolution */
646 switch (current_cpu_type()) {
647 case CPU_20KC:
648 case CPU_25KF:
649 regs->regs[rt] = 1;
650 break;
651 default:
652 regs->regs[rt] = 2;
653 }
654 return 0;
655 case MIPS_HWR_ULR: /* Read UserLocal register */
656 regs->regs[rt] = ti->tp_value;
657 return 0;
658 default:
659 return -1;
660 }
661}
662
663static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
664{
665 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
666 int rd = (opcode & RD) >> 11;
667 int rt = (opcode & RT) >> 16;
668
669 simulate_rdhwr(regs, rd, rt);
670 return 0;
671 }
672
673 /* Not ours. */
674 return -1;
675}
676
677static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
678{
679 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
680 int rd = (opcode & MM_RS) >> 16;
681 int rt = (opcode & MM_RT) >> 21;
682 simulate_rdhwr(regs, rd, rt);
683 return 0;
684 }
685
686 /* Not ours. */
687 return -1;
688}
689
690static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
691{
692 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
693 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
694 1, regs, 0);
695 return 0;
696 }
697
698 return -1; /* Must be something else ... */
699}
700
701asmlinkage void do_ov(struct pt_regs *regs)
702{
703 enum ctx_state prev_state;
704
705 prev_state = exception_enter();
706 die_if_kernel("Integer overflow", regs);
707
708 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
709 exception_exit(prev_state);
710}
711
712#ifdef CONFIG_MIPS_FP_SUPPORT
713
714/*
715 * Send SIGFPE according to FCSR Cause bits, which must have already
716 * been masked against Enable bits. This is impotant as Inexact can
717 * happen together with Overflow or Underflow, and `ptrace' can set
718 * any bits.
719 */
720void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
721 struct task_struct *tsk)
722{
723 int si_code = FPE_FLTUNK;
724
725 if (fcr31 & FPU_CSR_INV_X)
726 si_code = FPE_FLTINV;
727 else if (fcr31 & FPU_CSR_DIV_X)
728 si_code = FPE_FLTDIV;
729 else if (fcr31 & FPU_CSR_OVF_X)
730 si_code = FPE_FLTOVF;
731 else if (fcr31 & FPU_CSR_UDF_X)
732 si_code = FPE_FLTUND;
733 else if (fcr31 & FPU_CSR_INE_X)
734 si_code = FPE_FLTRES;
735
736 force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
737}
738
739int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
740{
741 int si_code;
742 struct vm_area_struct *vma;
743
744 switch (sig) {
745 case 0:
746 return 0;
747
748 case SIGFPE:
749 force_fcr31_sig(fcr31, fault_addr, current);
750 return 1;
751
752 case SIGBUS:
753 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
754 return 1;
755
756 case SIGSEGV:
757 down_read(¤t->mm->mmap_sem);
758 vma = find_vma(current->mm, (unsigned long)fault_addr);
759 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
760 si_code = SEGV_ACCERR;
761 else
762 si_code = SEGV_MAPERR;
763 up_read(¤t->mm->mmap_sem);
764 force_sig_fault(SIGSEGV, si_code, fault_addr);
765 return 1;
766
767 default:
768 force_sig(sig);
769 return 1;
770 }
771}
772
773static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
774 unsigned long old_epc, unsigned long old_ra)
775{
776 union mips_instruction inst = { .word = opcode };
777 void __user *fault_addr;
778 unsigned long fcr31;
779 int sig;
780
781 /* If it's obviously not an FP instruction, skip it */
782 switch (inst.i_format.opcode) {
783 case cop1_op:
784 case cop1x_op:
785 case lwc1_op:
786 case ldc1_op:
787 case swc1_op:
788 case sdc1_op:
789 break;
790
791 default:
792 return -1;
793 }
794
795 /*
796 * do_ri skipped over the instruction via compute_return_epc, undo
797 * that for the FPU emulator.
798 */
799 regs->cp0_epc = old_epc;
800 regs->regs[31] = old_ra;
801
802 /* Run the emulator */
803 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
804 &fault_addr);
805
806 /*
807 * We can't allow the emulated instruction to leave any
808 * enabled Cause bits set in $fcr31.
809 */
810 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
811 current->thread.fpu.fcr31 &= ~fcr31;
812
813 /* Restore the hardware register state */
814 own_fpu(1);
815
816 /* Send a signal if required. */
817 process_fpemu_return(sig, fault_addr, fcr31);
818
819 return 0;
820}
821
822/*
823 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
824 */
825asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
826{
827 enum ctx_state prev_state;
828 void __user *fault_addr;
829 int sig;
830
831 prev_state = exception_enter();
832 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
833 SIGFPE) == NOTIFY_STOP)
834 goto out;
835
836 /* Clear FCSR.Cause before enabling interrupts */
837 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
838 local_irq_enable();
839
840 die_if_kernel("FP exception in kernel code", regs);
841
842 if (fcr31 & FPU_CSR_UNI_X) {
843 /*
844 * Unimplemented operation exception. If we've got the full
845 * software emulator on-board, let's use it...
846 *
847 * Force FPU to dump state into task/thread context. We're
848 * moving a lot of data here for what is probably a single
849 * instruction, but the alternative is to pre-decode the FP
850 * register operands before invoking the emulator, which seems
851 * a bit extreme for what should be an infrequent event.
852 */
853
854 /* Run the emulator */
855 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
856 &fault_addr);
857
858 /*
859 * We can't allow the emulated instruction to leave any
860 * enabled Cause bits set in $fcr31.
861 */
862 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
863 current->thread.fpu.fcr31 &= ~fcr31;
864
865 /* Restore the hardware register state */
866 own_fpu(1); /* Using the FPU again. */
867 } else {
868 sig = SIGFPE;
869 fault_addr = (void __user *) regs->cp0_epc;
870 }
871
872 /* Send a signal if required. */
873 process_fpemu_return(sig, fault_addr, fcr31);
874
875out:
876 exception_exit(prev_state);
877}
878
879/*
880 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
881 * emulated more than some threshold number of instructions, force migration to
882 * a "CPU" that has FP support.
883 */
884static void mt_ase_fp_affinity(void)
885{
886#ifdef CONFIG_MIPS_MT_FPAFF
887 if (mt_fpemul_threshold > 0 &&
888 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
889 /*
890 * If there's no FPU present, or if the application has already
891 * restricted the allowed set to exclude any CPUs with FPUs,
892 * we'll skip the procedure.
893 */
894 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
895 cpumask_t tmask;
896
897 current->thread.user_cpus_allowed
898 = current->cpus_mask;
899 cpumask_and(&tmask, ¤t->cpus_mask,
900 &mt_fpu_cpumask);
901 set_cpus_allowed_ptr(current, &tmask);
902 set_thread_flag(TIF_FPUBOUND);
903 }
904 }
905#endif /* CONFIG_MIPS_MT_FPAFF */
906}
907
908#else /* !CONFIG_MIPS_FP_SUPPORT */
909
910static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
911 unsigned long old_epc, unsigned long old_ra)
912{
913 return -1;
914}
915
916#endif /* !CONFIG_MIPS_FP_SUPPORT */
917
918void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
919 const char *str)
920{
921 char b[40];
922
923#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
924 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
925 SIGTRAP) == NOTIFY_STOP)
926 return;
927#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
928
929 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
930 SIGTRAP) == NOTIFY_STOP)
931 return;
932
933 /*
934 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
935 * insns, even for trap and break codes that indicate arithmetic
936 * failures. Weird ...
937 * But should we continue the brokenness??? --macro
938 */
939 switch (code) {
940 case BRK_OVERFLOW:
941 case BRK_DIVZERO:
942 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
943 die_if_kernel(b, regs);
944 force_sig_fault(SIGFPE,
945 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
946 (void __user *) regs->cp0_epc);
947 break;
948 case BRK_BUG:
949 die_if_kernel("Kernel bug detected", regs);
950 force_sig(SIGTRAP);
951 break;
952 case BRK_MEMU:
953 /*
954 * This breakpoint code is used by the FPU emulator to retake
955 * control of the CPU after executing the instruction from the
956 * delay slot of an emulated branch.
957 *
958 * Terminate if exception was recognized as a delay slot return
959 * otherwise handle as normal.
960 */
961 if (do_dsemulret(regs))
962 return;
963
964 die_if_kernel("Math emu break/trap", regs);
965 force_sig(SIGTRAP);
966 break;
967 default:
968 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
969 die_if_kernel(b, regs);
970 if (si_code) {
971 force_sig_fault(SIGTRAP, si_code, NULL);
972 } else {
973 force_sig(SIGTRAP);
974 }
975 }
976}
977
978asmlinkage void do_bp(struct pt_regs *regs)
979{
980 unsigned long epc = msk_isa16_mode(exception_epc(regs));
981 unsigned int opcode, bcode;
982 enum ctx_state prev_state;
983 mm_segment_t seg;
984
985 seg = get_fs();
986 if (!user_mode(regs))
987 set_fs(KERNEL_DS);
988
989 prev_state = exception_enter();
990 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
991 if (get_isa16_mode(regs->cp0_epc)) {
992 u16 instr[2];
993
994 if (__get_user(instr[0], (u16 __user *)epc))
995 goto out_sigsegv;
996
997 if (!cpu_has_mmips) {
998 /* MIPS16e mode */
999 bcode = (instr[0] >> 5) & 0x3f;
1000 } else if (mm_insn_16bit(instr[0])) {
1001 /* 16-bit microMIPS BREAK */
1002 bcode = instr[0] & 0xf;
1003 } else {
1004 /* 32-bit microMIPS BREAK */
1005 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
1006 goto out_sigsegv;
1007 opcode = (instr[0] << 16) | instr[1];
1008 bcode = (opcode >> 6) & ((1 << 20) - 1);
1009 }
1010 } else {
1011 if (__get_user(opcode, (unsigned int __user *)epc))
1012 goto out_sigsegv;
1013 bcode = (opcode >> 6) & ((1 << 20) - 1);
1014 }
1015
1016 /*
1017 * There is the ancient bug in the MIPS assemblers that the break
1018 * code starts left to bit 16 instead to bit 6 in the opcode.
1019 * Gas is bug-compatible, but not always, grrr...
1020 * We handle both cases with a simple heuristics. --macro
1021 */
1022 if (bcode >= (1 << 10))
1023 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1024
1025 /*
1026 * notify the kprobe handlers, if instruction is likely to
1027 * pertain to them.
1028 */
1029 switch (bcode) {
1030 case BRK_UPROBE:
1031 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1032 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1033 goto out;
1034 else
1035 break;
1036 case BRK_UPROBE_XOL:
1037 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1038 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1039 goto out;
1040 else
1041 break;
1042 case BRK_KPROBE_BP:
1043 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1044 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1045 goto out;
1046 else
1047 break;
1048 case BRK_KPROBE_SSTEPBP:
1049 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1050 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1051 goto out;
1052 else
1053 break;
1054 default:
1055 break;
1056 }
1057
1058 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1059
1060out:
1061 set_fs(seg);
1062 exception_exit(prev_state);
1063 return;
1064
1065out_sigsegv:
1066 force_sig(SIGSEGV);
1067 goto out;
1068}
1069
1070asmlinkage void do_tr(struct pt_regs *regs)
1071{
1072 u32 opcode, tcode = 0;
1073 enum ctx_state prev_state;
1074 u16 instr[2];
1075 mm_segment_t seg;
1076 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1077
1078 seg = get_fs();
1079 if (!user_mode(regs))
1080 set_fs(KERNEL_DS);
1081
1082 prev_state = exception_enter();
1083 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1084 if (get_isa16_mode(regs->cp0_epc)) {
1085 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1086 __get_user(instr[1], (u16 __user *)(epc + 2)))
1087 goto out_sigsegv;
1088 opcode = (instr[0] << 16) | instr[1];
1089 /* Immediate versions don't provide a code. */
1090 if (!(opcode & OPCODE))
1091 tcode = (opcode >> 12) & ((1 << 4) - 1);
1092 } else {
1093 if (__get_user(opcode, (u32 __user *)epc))
1094 goto out_sigsegv;
1095 /* Immediate versions don't provide a code. */
1096 if (!(opcode & OPCODE))
1097 tcode = (opcode >> 6) & ((1 << 10) - 1);
1098 }
1099
1100 do_trap_or_bp(regs, tcode, 0, "Trap");
1101
1102out:
1103 set_fs(seg);
1104 exception_exit(prev_state);
1105 return;
1106
1107out_sigsegv:
1108 force_sig(SIGSEGV);
1109 goto out;
1110}
1111
1112asmlinkage void do_ri(struct pt_regs *regs)
1113{
1114 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1115 unsigned long old_epc = regs->cp0_epc;
1116 unsigned long old31 = regs->regs[31];
1117 enum ctx_state prev_state;
1118 unsigned int opcode = 0;
1119 int status = -1;
1120
1121 /*
1122 * Avoid any kernel code. Just emulate the R2 instruction
1123 * as quickly as possible.
1124 */
1125 if (mipsr2_emulation && cpu_has_mips_r6 &&
1126 likely(user_mode(regs)) &&
1127 likely(get_user(opcode, epc) >= 0)) {
1128 unsigned long fcr31 = 0;
1129
1130 status = mipsr2_decoder(regs, opcode, &fcr31);
1131 switch (status) {
1132 case 0:
1133 case SIGEMT:
1134 return;
1135 case SIGILL:
1136 goto no_r2_instr;
1137 default:
1138 process_fpemu_return(status,
1139 ¤t->thread.cp0_baduaddr,
1140 fcr31);
1141 return;
1142 }
1143 }
1144
1145no_r2_instr:
1146
1147 prev_state = exception_enter();
1148 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1149
1150 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1151 SIGILL) == NOTIFY_STOP)
1152 goto out;
1153
1154 die_if_kernel("Reserved instruction in kernel code", regs);
1155
1156 if (unlikely(compute_return_epc(regs) < 0))
1157 goto out;
1158
1159 if (!get_isa16_mode(regs->cp0_epc)) {
1160 if (unlikely(get_user(opcode, epc) < 0))
1161 status = SIGSEGV;
1162
1163 if (!cpu_has_llsc && status < 0)
1164 status = simulate_llsc(regs, opcode);
1165
1166 if (status < 0)
1167 status = simulate_rdhwr_normal(regs, opcode);
1168
1169 if (status < 0)
1170 status = simulate_sync(regs, opcode);
1171
1172 if (status < 0)
1173 status = simulate_fp(regs, opcode, old_epc, old31);
1174 } else if (cpu_has_mmips) {
1175 unsigned short mmop[2] = { 0 };
1176
1177 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1178 status = SIGSEGV;
1179 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1180 status = SIGSEGV;
1181 opcode = mmop[0];
1182 opcode = (opcode << 16) | mmop[1];
1183
1184 if (status < 0)
1185 status = simulate_rdhwr_mm(regs, opcode);
1186 }
1187
1188 if (status < 0)
1189 status = SIGILL;
1190
1191 if (unlikely(status > 0)) {
1192 regs->cp0_epc = old_epc; /* Undo skip-over. */
1193 regs->regs[31] = old31;
1194 force_sig(status);
1195 }
1196
1197out:
1198 exception_exit(prev_state);
1199}
1200
1201/*
1202 * No lock; only written during early bootup by CPU 0.
1203 */
1204static RAW_NOTIFIER_HEAD(cu2_chain);
1205
1206int __ref register_cu2_notifier(struct notifier_block *nb)
1207{
1208 return raw_notifier_chain_register(&cu2_chain, nb);
1209}
1210
1211int cu2_notifier_call_chain(unsigned long val, void *v)
1212{
1213 return raw_notifier_call_chain(&cu2_chain, val, v);
1214}
1215
1216static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1217 void *data)
1218{
1219 struct pt_regs *regs = data;
1220
1221 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1222 "instruction", regs);
1223 force_sig(SIGILL);
1224
1225 return NOTIFY_OK;
1226}
1227
1228#ifdef CONFIG_MIPS_FP_SUPPORT
1229
1230static int enable_restore_fp_context(int msa)
1231{
1232 int err, was_fpu_owner, prior_msa;
1233 bool first_fp;
1234
1235 /* Initialize context if it hasn't been used already */
1236 first_fp = init_fp_ctx(current);
1237
1238 if (first_fp) {
1239 preempt_disable();
1240 err = own_fpu_inatomic(1);
1241 if (msa && !err) {
1242 enable_msa();
1243 set_thread_flag(TIF_USEDMSA);
1244 set_thread_flag(TIF_MSA_CTX_LIVE);
1245 }
1246 preempt_enable();
1247 return err;
1248 }
1249
1250 /*
1251 * This task has formerly used the FP context.
1252 *
1253 * If this thread has no live MSA vector context then we can simply
1254 * restore the scalar FP context. If it has live MSA vector context
1255 * (that is, it has or may have used MSA since last performing a
1256 * function call) then we'll need to restore the vector context. This
1257 * applies even if we're currently only executing a scalar FP
1258 * instruction. This is because if we were to later execute an MSA
1259 * instruction then we'd either have to:
1260 *
1261 * - Restore the vector context & clobber any registers modified by
1262 * scalar FP instructions between now & then.
1263 *
1264 * or
1265 *
1266 * - Not restore the vector context & lose the most significant bits
1267 * of all vector registers.
1268 *
1269 * Neither of those options is acceptable. We cannot restore the least
1270 * significant bits of the registers now & only restore the most
1271 * significant bits later because the most significant bits of any
1272 * vector registers whose aliased FP register is modified now will have
1273 * been zeroed. We'd have no way to know that when restoring the vector
1274 * context & thus may load an outdated value for the most significant
1275 * bits of a vector register.
1276 */
1277 if (!msa && !thread_msa_context_live())
1278 return own_fpu(1);
1279
1280 /*
1281 * This task is using or has previously used MSA. Thus we require
1282 * that Status.FR == 1.
1283 */
1284 preempt_disable();
1285 was_fpu_owner = is_fpu_owner();
1286 err = own_fpu_inatomic(0);
1287 if (err)
1288 goto out;
1289
1290 enable_msa();
1291 write_msa_csr(current->thread.fpu.msacsr);
1292 set_thread_flag(TIF_USEDMSA);
1293
1294 /*
1295 * If this is the first time that the task is using MSA and it has
1296 * previously used scalar FP in this time slice then we already nave
1297 * FP context which we shouldn't clobber. We do however need to clear
1298 * the upper 64b of each vector register so that this task has no
1299 * opportunity to see data left behind by another.
1300 */
1301 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1302 if (!prior_msa && was_fpu_owner) {
1303 init_msa_upper();
1304
1305 goto out;
1306 }
1307
1308 if (!prior_msa) {
1309 /*
1310 * Restore the least significant 64b of each vector register
1311 * from the existing scalar FP context.
1312 */
1313 _restore_fp(current);
1314
1315 /*
1316 * The task has not formerly used MSA, so clear the upper 64b
1317 * of each vector register such that it cannot see data left
1318 * behind by another task.
1319 */
1320 init_msa_upper();
1321 } else {
1322 /* We need to restore the vector context. */
1323 restore_msa(current);
1324
1325 /* Restore the scalar FP control & status register */
1326 if (!was_fpu_owner)
1327 write_32bit_cp1_register(CP1_STATUS,
1328 current->thread.fpu.fcr31);
1329 }
1330
1331out:
1332 preempt_enable();
1333
1334 return 0;
1335}
1336
1337#else /* !CONFIG_MIPS_FP_SUPPORT */
1338
1339static int enable_restore_fp_context(int msa)
1340{
1341 return SIGILL;
1342}
1343
1344#endif /* CONFIG_MIPS_FP_SUPPORT */
1345
1346asmlinkage void do_cpu(struct pt_regs *regs)
1347{
1348 enum ctx_state prev_state;
1349 unsigned int __user *epc;
1350 unsigned long old_epc, old31;
1351 unsigned int opcode;
1352 unsigned int cpid;
1353 int status;
1354
1355 prev_state = exception_enter();
1356 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1357
1358 if (cpid != 2)
1359 die_if_kernel("do_cpu invoked from kernel context!", regs);
1360
1361 switch (cpid) {
1362 case 0:
1363 epc = (unsigned int __user *)exception_epc(regs);
1364 old_epc = regs->cp0_epc;
1365 old31 = regs->regs[31];
1366 opcode = 0;
1367 status = -1;
1368
1369 if (unlikely(compute_return_epc(regs) < 0))
1370 break;
1371
1372 if (!get_isa16_mode(regs->cp0_epc)) {
1373 if (unlikely(get_user(opcode, epc) < 0))
1374 status = SIGSEGV;
1375
1376 if (!cpu_has_llsc && status < 0)
1377 status = simulate_llsc(regs, opcode);
1378 }
1379
1380 if (status < 0)
1381 status = SIGILL;
1382
1383 if (unlikely(status > 0)) {
1384 regs->cp0_epc = old_epc; /* Undo skip-over. */
1385 regs->regs[31] = old31;
1386 force_sig(status);
1387 }
1388
1389 break;
1390
1391#ifdef CONFIG_MIPS_FP_SUPPORT
1392 case 3:
1393 /*
1394 * The COP3 opcode space and consequently the CP0.Status.CU3
1395 * bit and the CP0.Cause.CE=3 encoding have been removed as
1396 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1397 * up the space has been reused for COP1X instructions, that
1398 * are enabled by the CP0.Status.CU1 bit and consequently
1399 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1400 * exceptions. Some FPU-less processors that implement one
1401 * of these ISAs however use this code erroneously for COP1X
1402 * instructions. Therefore we redirect this trap to the FP
1403 * emulator too.
1404 */
1405 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1406 force_sig(SIGILL);
1407 break;
1408 }
1409 /* Fall through. */
1410
1411 case 1: {
1412 void __user *fault_addr;
1413 unsigned long fcr31;
1414 int err, sig;
1415
1416 err = enable_restore_fp_context(0);
1417
1418 if (raw_cpu_has_fpu && !err)
1419 break;
1420
1421 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1422 &fault_addr);
1423
1424 /*
1425 * We can't allow the emulated instruction to leave
1426 * any enabled Cause bits set in $fcr31.
1427 */
1428 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1429 current->thread.fpu.fcr31 &= ~fcr31;
1430
1431 /* Send a signal if required. */
1432 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1433 mt_ase_fp_affinity();
1434
1435 break;
1436 }
1437#else /* CONFIG_MIPS_FP_SUPPORT */
1438 case 1:
1439 case 3:
1440 force_sig(SIGILL);
1441 break;
1442#endif /* CONFIG_MIPS_FP_SUPPORT */
1443
1444 case 2:
1445 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1446 break;
1447 }
1448
1449 exception_exit(prev_state);
1450}
1451
1452asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1453{
1454 enum ctx_state prev_state;
1455
1456 prev_state = exception_enter();
1457 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1458 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1459 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1460 goto out;
1461
1462 /* Clear MSACSR.Cause before enabling interrupts */
1463 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1464 local_irq_enable();
1465
1466 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1467 force_sig(SIGFPE);
1468out:
1469 exception_exit(prev_state);
1470}
1471
1472asmlinkage void do_msa(struct pt_regs *regs)
1473{
1474 enum ctx_state prev_state;
1475 int err;
1476
1477 prev_state = exception_enter();
1478
1479 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1480 force_sig(SIGILL);
1481 goto out;
1482 }
1483
1484 die_if_kernel("do_msa invoked from kernel context!", regs);
1485
1486 err = enable_restore_fp_context(1);
1487 if (err)
1488 force_sig(SIGILL);
1489out:
1490 exception_exit(prev_state);
1491}
1492
1493asmlinkage void do_mdmx(struct pt_regs *regs)
1494{
1495 enum ctx_state prev_state;
1496
1497 prev_state = exception_enter();
1498 force_sig(SIGILL);
1499 exception_exit(prev_state);
1500}
1501
1502/*
1503 * Called with interrupts disabled.
1504 */
1505asmlinkage void do_watch(struct pt_regs *regs)
1506{
1507 enum ctx_state prev_state;
1508
1509 prev_state = exception_enter();
1510 /*
1511 * Clear WP (bit 22) bit of cause register so we don't loop
1512 * forever.
1513 */
1514 clear_c0_cause(CAUSEF_WP);
1515
1516 /*
1517 * If the current thread has the watch registers loaded, save
1518 * their values and send SIGTRAP. Otherwise another thread
1519 * left the registers set, clear them and continue.
1520 */
1521 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1522 mips_read_watch_registers();
1523 local_irq_enable();
1524 force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1525 } else {
1526 mips_clear_watch_registers();
1527 local_irq_enable();
1528 }
1529 exception_exit(prev_state);
1530}
1531
1532asmlinkage void do_mcheck(struct pt_regs *regs)
1533{
1534 int multi_match = regs->cp0_status & ST0_TS;
1535 enum ctx_state prev_state;
1536 mm_segment_t old_fs = get_fs();
1537
1538 prev_state = exception_enter();
1539 show_regs(regs);
1540
1541 if (multi_match) {
1542 dump_tlb_regs();
1543 pr_info("\n");
1544 dump_tlb_all();
1545 }
1546
1547 if (!user_mode(regs))
1548 set_fs(KERNEL_DS);
1549
1550 show_code((unsigned int __user *) regs->cp0_epc);
1551
1552 set_fs(old_fs);
1553
1554 /*
1555 * Some chips may have other causes of machine check (e.g. SB1
1556 * graduation timer)
1557 */
1558 panic("Caught Machine Check exception - %scaused by multiple "
1559 "matching entries in the TLB.",
1560 (multi_match) ? "" : "not ");
1561}
1562
1563asmlinkage void do_mt(struct pt_regs *regs)
1564{
1565 int subcode;
1566
1567 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1568 >> VPECONTROL_EXCPT_SHIFT;
1569 switch (subcode) {
1570 case 0:
1571 printk(KERN_DEBUG "Thread Underflow\n");
1572 break;
1573 case 1:
1574 printk(KERN_DEBUG "Thread Overflow\n");
1575 break;
1576 case 2:
1577 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1578 break;
1579 case 3:
1580 printk(KERN_DEBUG "Gating Storage Exception\n");
1581 break;
1582 case 4:
1583 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1584 break;
1585 case 5:
1586 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1587 break;
1588 default:
1589 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1590 subcode);
1591 break;
1592 }
1593 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1594
1595 force_sig(SIGILL);
1596}
1597
1598
1599asmlinkage void do_dsp(struct pt_regs *regs)
1600{
1601 if (cpu_has_dsp)
1602 panic("Unexpected DSP exception");
1603
1604 force_sig(SIGILL);
1605}
1606
1607asmlinkage void do_reserved(struct pt_regs *regs)
1608{
1609 /*
1610 * Game over - no way to handle this if it ever occurs. Most probably
1611 * caused by a new unknown cpu type or after another deadly
1612 * hard/software error.
1613 */
1614 show_regs(regs);
1615 panic("Caught reserved exception %ld - should not happen.",
1616 (regs->cp0_cause & 0x7f) >> 2);
1617}
1618
1619static int __initdata l1parity = 1;
1620static int __init nol1parity(char *s)
1621{
1622 l1parity = 0;
1623 return 1;
1624}
1625__setup("nol1par", nol1parity);
1626static int __initdata l2parity = 1;
1627static int __init nol2parity(char *s)
1628{
1629 l2parity = 0;
1630 return 1;
1631}
1632__setup("nol2par", nol2parity);
1633
1634/*
1635 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1636 * it different ways.
1637 */
1638static inline void parity_protection_init(void)
1639{
1640#define ERRCTL_PE 0x80000000
1641#define ERRCTL_L2P 0x00800000
1642
1643 if (mips_cm_revision() >= CM_REV_CM3) {
1644 ulong gcr_ectl, cp0_ectl;
1645
1646 /*
1647 * With CM3 systems we need to ensure that the L1 & L2
1648 * parity enables are set to the same value, since this
1649 * is presumed by the hardware engineers.
1650 *
1651 * If the user disabled either of L1 or L2 ECC checking,
1652 * disable both.
1653 */
1654 l1parity &= l2parity;
1655 l2parity &= l1parity;
1656
1657 /* Probe L1 ECC support */
1658 cp0_ectl = read_c0_ecc();
1659 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1660 back_to_back_c0_hazard();
1661 cp0_ectl = read_c0_ecc();
1662
1663 /* Probe L2 ECC support */
1664 gcr_ectl = read_gcr_err_control();
1665
1666 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1667 !(cp0_ectl & ERRCTL_PE)) {
1668 /*
1669 * One of L1 or L2 ECC checking isn't supported,
1670 * so we cannot enable either.
1671 */
1672 l1parity = l2parity = 0;
1673 }
1674
1675 /* Configure L1 ECC checking */
1676 if (l1parity)
1677 cp0_ectl |= ERRCTL_PE;
1678 else
1679 cp0_ectl &= ~ERRCTL_PE;
1680 write_c0_ecc(cp0_ectl);
1681 back_to_back_c0_hazard();
1682 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1683
1684 /* Configure L2 ECC checking */
1685 if (l2parity)
1686 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1687 else
1688 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1689 write_gcr_err_control(gcr_ectl);
1690 gcr_ectl = read_gcr_err_control();
1691 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1692 WARN_ON(!!gcr_ectl != l2parity);
1693
1694 pr_info("Cache parity protection %sabled\n",
1695 l1parity ? "en" : "dis");
1696 return;
1697 }
1698
1699 switch (current_cpu_type()) {
1700 case CPU_24K:
1701 case CPU_34K:
1702 case CPU_74K:
1703 case CPU_1004K:
1704 case CPU_1074K:
1705 case CPU_INTERAPTIV:
1706 case CPU_PROAPTIV:
1707 case CPU_P5600:
1708 case CPU_QEMU_GENERIC:
1709 case CPU_P6600:
1710 {
1711 unsigned long errctl;
1712 unsigned int l1parity_present, l2parity_present;
1713
1714 errctl = read_c0_ecc();
1715 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1716
1717 /* probe L1 parity support */
1718 write_c0_ecc(errctl | ERRCTL_PE);
1719 back_to_back_c0_hazard();
1720 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1721
1722 /* probe L2 parity support */
1723 write_c0_ecc(errctl|ERRCTL_L2P);
1724 back_to_back_c0_hazard();
1725 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1726
1727 if (l1parity_present && l2parity_present) {
1728 if (l1parity)
1729 errctl |= ERRCTL_PE;
1730 if (l1parity ^ l2parity)
1731 errctl |= ERRCTL_L2P;
1732 } else if (l1parity_present) {
1733 if (l1parity)
1734 errctl |= ERRCTL_PE;
1735 } else if (l2parity_present) {
1736 if (l2parity)
1737 errctl |= ERRCTL_L2P;
1738 } else {
1739 /* No parity available */
1740 }
1741
1742 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1743
1744 write_c0_ecc(errctl);
1745 back_to_back_c0_hazard();
1746 errctl = read_c0_ecc();
1747 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1748
1749 if (l1parity_present)
1750 printk(KERN_INFO "Cache parity protection %sabled\n",
1751 (errctl & ERRCTL_PE) ? "en" : "dis");
1752
1753 if (l2parity_present) {
1754 if (l1parity_present && l1parity)
1755 errctl ^= ERRCTL_L2P;
1756 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1757 (errctl & ERRCTL_L2P) ? "en" : "dis");
1758 }
1759 }
1760 break;
1761
1762 case CPU_5KC:
1763 case CPU_5KE:
1764 case CPU_LOONGSON1:
1765 write_c0_ecc(0x80000000);
1766 back_to_back_c0_hazard();
1767 /* Set the PE bit (bit 31) in the c0_errctl register. */
1768 printk(KERN_INFO "Cache parity protection %sabled\n",
1769 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1770 break;
1771 case CPU_20KC:
1772 case CPU_25KF:
1773 /* Clear the DE bit (bit 16) in the c0_status register. */
1774 printk(KERN_INFO "Enable cache parity protection for "
1775 "MIPS 20KC/25KF CPUs.\n");
1776 clear_c0_status(ST0_DE);
1777 break;
1778 default:
1779 break;
1780 }
1781}
1782
1783asmlinkage void cache_parity_error(void)
1784{
1785 const int field = 2 * sizeof(unsigned long);
1786 unsigned int reg_val;
1787
1788 /* For the moment, report the problem and hang. */
1789 printk("Cache error exception:\n");
1790 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1791 reg_val = read_c0_cacheerr();
1792 printk("c0_cacheerr == %08x\n", reg_val);
1793
1794 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1795 reg_val & (1<<30) ? "secondary" : "primary",
1796 reg_val & (1<<31) ? "data" : "insn");
1797 if ((cpu_has_mips_r2_r6) &&
1798 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1799 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1800 reg_val & (1<<29) ? "ED " : "",
1801 reg_val & (1<<28) ? "ET " : "",
1802 reg_val & (1<<27) ? "ES " : "",
1803 reg_val & (1<<26) ? "EE " : "",
1804 reg_val & (1<<25) ? "EB " : "",
1805 reg_val & (1<<24) ? "EI " : "",
1806 reg_val & (1<<23) ? "E1 " : "",
1807 reg_val & (1<<22) ? "E0 " : "");
1808 } else {
1809 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1810 reg_val & (1<<29) ? "ED " : "",
1811 reg_val & (1<<28) ? "ET " : "",
1812 reg_val & (1<<26) ? "EE " : "",
1813 reg_val & (1<<25) ? "EB " : "",
1814 reg_val & (1<<24) ? "EI " : "",
1815 reg_val & (1<<23) ? "E1 " : "",
1816 reg_val & (1<<22) ? "E0 " : "");
1817 }
1818 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1819
1820#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1821 if (reg_val & (1<<22))
1822 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1823
1824 if (reg_val & (1<<23))
1825 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1826#endif
1827
1828 panic("Can't handle the cache error!");
1829}
1830
1831asmlinkage void do_ftlb(void)
1832{
1833 const int field = 2 * sizeof(unsigned long);
1834 unsigned int reg_val;
1835
1836 /* For the moment, report the problem and hang. */
1837 if ((cpu_has_mips_r2_r6) &&
1838 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1839 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1840 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1841 read_c0_ecc());
1842 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1843 reg_val = read_c0_cacheerr();
1844 pr_err("c0_cacheerr == %08x\n", reg_val);
1845
1846 if ((reg_val & 0xc0000000) == 0xc0000000) {
1847 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1848 } else {
1849 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1850 reg_val & (1<<30) ? "secondary" : "primary",
1851 reg_val & (1<<31) ? "data" : "insn");
1852 }
1853 } else {
1854 pr_err("FTLB error exception\n");
1855 }
1856 /* Just print the cacheerr bits for now */
1857 cache_parity_error();
1858}
1859
1860/*
1861 * SDBBP EJTAG debug exception handler.
1862 * We skip the instruction and return to the next instruction.
1863 */
1864void ejtag_exception_handler(struct pt_regs *regs)
1865{
1866 const int field = 2 * sizeof(unsigned long);
1867 unsigned long depc, old_epc, old_ra;
1868 unsigned int debug;
1869
1870 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1871 depc = read_c0_depc();
1872 debug = read_c0_debug();
1873 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1874 if (debug & 0x80000000) {
1875 /*
1876 * In branch delay slot.
1877 * We cheat a little bit here and use EPC to calculate the
1878 * debug return address (DEPC). EPC is restored after the
1879 * calculation.
1880 */
1881 old_epc = regs->cp0_epc;
1882 old_ra = regs->regs[31];
1883 regs->cp0_epc = depc;
1884 compute_return_epc(regs);
1885 depc = regs->cp0_epc;
1886 regs->cp0_epc = old_epc;
1887 regs->regs[31] = old_ra;
1888 } else
1889 depc += 4;
1890 write_c0_depc(depc);
1891
1892#if 0
1893 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1894 write_c0_debug(debug | 0x100);
1895#endif
1896}
1897
1898/*
1899 * NMI exception handler.
1900 * No lock; only written during early bootup by CPU 0.
1901 */
1902static RAW_NOTIFIER_HEAD(nmi_chain);
1903
1904int register_nmi_notifier(struct notifier_block *nb)
1905{
1906 return raw_notifier_chain_register(&nmi_chain, nb);
1907}
1908
1909void __noreturn nmi_exception_handler(struct pt_regs *regs)
1910{
1911 char str[100];
1912
1913 nmi_enter();
1914 raw_notifier_call_chain(&nmi_chain, 0, regs);
1915 bust_spinlocks(1);
1916 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1917 smp_processor_id(), regs->cp0_epc);
1918 regs->cp0_epc = read_c0_errorepc();
1919 die(str, regs);
1920 nmi_exit();
1921}
1922
1923#define VECTORSPACING 0x100 /* for EI/VI mode */
1924
1925unsigned long ebase;
1926EXPORT_SYMBOL_GPL(ebase);
1927unsigned long exception_handlers[32];
1928unsigned long vi_handlers[64];
1929
1930void __init *set_except_vector(int n, void *addr)
1931{
1932 unsigned long handler = (unsigned long) addr;
1933 unsigned long old_handler;
1934
1935#ifdef CONFIG_CPU_MICROMIPS
1936 /*
1937 * Only the TLB handlers are cache aligned with an even
1938 * address. All other handlers are on an odd address and
1939 * require no modification. Otherwise, MIPS32 mode will
1940 * be entered when handling any TLB exceptions. That
1941 * would be bad...since we must stay in microMIPS mode.
1942 */
1943 if (!(handler & 0x1))
1944 handler |= 1;
1945#endif
1946 old_handler = xchg(&exception_handlers[n], handler);
1947
1948 if (n == 0 && cpu_has_divec) {
1949#ifdef CONFIG_CPU_MICROMIPS
1950 unsigned long jump_mask = ~((1 << 27) - 1);
1951#else
1952 unsigned long jump_mask = ~((1 << 28) - 1);
1953#endif
1954 u32 *buf = (u32 *)(ebase + 0x200);
1955 unsigned int k0 = 26;
1956 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1957 uasm_i_j(&buf, handler & ~jump_mask);
1958 uasm_i_nop(&buf);
1959 } else {
1960 UASM_i_LA(&buf, k0, handler);
1961 uasm_i_jr(&buf, k0);
1962 uasm_i_nop(&buf);
1963 }
1964 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1965 }
1966 return (void *)old_handler;
1967}
1968
1969static void do_default_vi(void)
1970{
1971 show_regs(get_irq_regs());
1972 panic("Caught unexpected vectored interrupt.");
1973}
1974
1975static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1976{
1977 unsigned long handler;
1978 unsigned long old_handler = vi_handlers[n];
1979 int srssets = current_cpu_data.srsets;
1980 u16 *h;
1981 unsigned char *b;
1982
1983 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1984
1985 if (addr == NULL) {
1986 handler = (unsigned long) do_default_vi;
1987 srs = 0;
1988 } else
1989 handler = (unsigned long) addr;
1990 vi_handlers[n] = handler;
1991
1992 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1993
1994 if (srs >= srssets)
1995 panic("Shadow register set %d not supported", srs);
1996
1997 if (cpu_has_veic) {
1998 if (board_bind_eic_interrupt)
1999 board_bind_eic_interrupt(n, srs);
2000 } else if (cpu_has_vint) {
2001 /* SRSMap is only defined if shadow sets are implemented */
2002 if (srssets > 1)
2003 change_c0_srsmap(0xf << n*4, srs << n*4);
2004 }
2005
2006 if (srs == 0) {
2007 /*
2008 * If no shadow set is selected then use the default handler
2009 * that does normal register saving and standard interrupt exit
2010 */
2011 extern char except_vec_vi, except_vec_vi_lui;
2012 extern char except_vec_vi_ori, except_vec_vi_end;
2013 extern char rollback_except_vec_vi;
2014 char *vec_start = using_rollback_handler() ?
2015 &rollback_except_vec_vi : &except_vec_vi;
2016#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2017 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2018 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2019#else
2020 const int lui_offset = &except_vec_vi_lui - vec_start;
2021 const int ori_offset = &except_vec_vi_ori - vec_start;
2022#endif
2023 const int handler_len = &except_vec_vi_end - vec_start;
2024
2025 if (handler_len > VECTORSPACING) {
2026 /*
2027 * Sigh... panicing won't help as the console
2028 * is probably not configured :(
2029 */
2030 panic("VECTORSPACING too small");
2031 }
2032
2033 set_handler(((unsigned long)b - ebase), vec_start,
2034#ifdef CONFIG_CPU_MICROMIPS
2035 (handler_len - 1));
2036#else
2037 handler_len);
2038#endif
2039 h = (u16 *)(b + lui_offset);
2040 *h = (handler >> 16) & 0xffff;
2041 h = (u16 *)(b + ori_offset);
2042 *h = (handler & 0xffff);
2043 local_flush_icache_range((unsigned long)b,
2044 (unsigned long)(b+handler_len));
2045 }
2046 else {
2047 /*
2048 * In other cases jump directly to the interrupt handler. It
2049 * is the handler's responsibility to save registers if required
2050 * (eg hi/lo) and return from the exception using "eret".
2051 */
2052 u32 insn;
2053
2054 h = (u16 *)b;
2055 /* j handler */
2056#ifdef CONFIG_CPU_MICROMIPS
2057 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2058#else
2059 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2060#endif
2061 h[0] = (insn >> 16) & 0xffff;
2062 h[1] = insn & 0xffff;
2063 h[2] = 0;
2064 h[3] = 0;
2065 local_flush_icache_range((unsigned long)b,
2066 (unsigned long)(b+8));
2067 }
2068
2069 return (void *)old_handler;
2070}
2071
2072void *set_vi_handler(int n, vi_handler_t addr)
2073{
2074 return set_vi_srs_handler(n, addr, 0);
2075}
2076
2077extern void tlb_init(void);
2078
2079/*
2080 * Timer interrupt
2081 */
2082int cp0_compare_irq;
2083EXPORT_SYMBOL_GPL(cp0_compare_irq);
2084int cp0_compare_irq_shift;
2085
2086/*
2087 * Performance counter IRQ or -1 if shared with timer
2088 */
2089int cp0_perfcount_irq;
2090EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2091
2092/*
2093 * Fast debug channel IRQ or -1 if not present
2094 */
2095int cp0_fdc_irq;
2096EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2097
2098static int noulri;
2099
2100static int __init ulri_disable(char *s)
2101{
2102 pr_info("Disabling ulri\n");
2103 noulri = 1;
2104
2105 return 1;
2106}
2107__setup("noulri", ulri_disable);
2108
2109/* configure STATUS register */
2110static void configure_status(void)
2111{
2112 /*
2113 * Disable coprocessors and select 32-bit or 64-bit addressing
2114 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2115 * flag that some firmware may have left set and the TS bit (for
2116 * IP27). Set XX for ISA IV code to work.
2117 */
2118 unsigned int status_set = ST0_CU0;
2119#ifdef CONFIG_64BIT
2120 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2121#endif
2122 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2123 status_set |= ST0_XX;
2124 if (cpu_has_dsp)
2125 status_set |= ST0_MX;
2126
2127 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2128 status_set);
2129}
2130
2131unsigned int hwrena;
2132EXPORT_SYMBOL_GPL(hwrena);
2133
2134/* configure HWRENA register */
2135static void configure_hwrena(void)
2136{
2137 hwrena = cpu_hwrena_impl_bits;
2138
2139 if (cpu_has_mips_r2_r6)
2140 hwrena |= MIPS_HWRENA_CPUNUM |
2141 MIPS_HWRENA_SYNCISTEP |
2142 MIPS_HWRENA_CC |
2143 MIPS_HWRENA_CCRES;
2144
2145 if (!noulri && cpu_has_userlocal)
2146 hwrena |= MIPS_HWRENA_ULR;
2147
2148 if (hwrena)
2149 write_c0_hwrena(hwrena);
2150}
2151
2152static void configure_exception_vector(void)
2153{
2154 if (cpu_has_mips_r2_r6) {
2155 unsigned long sr = set_c0_status(ST0_BEV);
2156 /* If available, use WG to set top bits of EBASE */
2157 if (cpu_has_ebase_wg) {
2158#ifdef CONFIG_64BIT
2159 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2160#else
2161 write_c0_ebase(ebase | MIPS_EBASE_WG);
2162#endif
2163 }
2164 write_c0_ebase(ebase);
2165 write_c0_status(sr);
2166 }
2167 if (cpu_has_veic || cpu_has_vint) {
2168 /* Setting vector spacing enables EI/VI mode */
2169 change_c0_intctl(0x3e0, VECTORSPACING);
2170 }
2171 if (cpu_has_divec) {
2172 if (cpu_has_mipsmt) {
2173 unsigned int vpflags = dvpe();
2174 set_c0_cause(CAUSEF_IV);
2175 evpe(vpflags);
2176 } else
2177 set_c0_cause(CAUSEF_IV);
2178 }
2179}
2180
2181void per_cpu_trap_init(bool is_boot_cpu)
2182{
2183 unsigned int cpu = smp_processor_id();
2184
2185 configure_status();
2186 configure_hwrena();
2187
2188 configure_exception_vector();
2189
2190 /*
2191 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2192 *
2193 * o read IntCtl.IPTI to determine the timer interrupt
2194 * o read IntCtl.IPPCI to determine the performance counter interrupt
2195 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2196 */
2197 if (cpu_has_mips_r2_r6) {
2198 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2199 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2200 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2201 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2202 if (!cp0_fdc_irq)
2203 cp0_fdc_irq = -1;
2204
2205 } else {
2206 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2207 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2208 cp0_perfcount_irq = -1;
2209 cp0_fdc_irq = -1;
2210 }
2211
2212 if (cpu_has_mmid)
2213 cpu_data[cpu].asid_cache = 0;
2214 else if (!cpu_data[cpu].asid_cache)
2215 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2216
2217 mmgrab(&init_mm);
2218 current->active_mm = &init_mm;
2219 BUG_ON(current->mm);
2220 enter_lazy_tlb(&init_mm, current);
2221
2222 /* Boot CPU's cache setup in setup_arch(). */
2223 if (!is_boot_cpu)
2224 cpu_cache_init();
2225 tlb_init();
2226 TLBMISS_HANDLER_SETUP();
2227}
2228
2229/* Install CPU exception handler */
2230void set_handler(unsigned long offset, void *addr, unsigned long size)
2231{
2232#ifdef CONFIG_CPU_MICROMIPS
2233 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2234#else
2235 memcpy((void *)(ebase + offset), addr, size);
2236#endif
2237 local_flush_icache_range(ebase + offset, ebase + offset + size);
2238}
2239
2240static const char panic_null_cerr[] =
2241 "Trying to set NULL cache error exception handler\n";
2242
2243/*
2244 * Install uncached CPU exception handler.
2245 * This is suitable only for the cache error exception which is the only
2246 * exception handler that is being run uncached.
2247 */
2248void set_uncached_handler(unsigned long offset, void *addr,
2249 unsigned long size)
2250{
2251 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2252
2253 if (!addr)
2254 panic(panic_null_cerr);
2255
2256 memcpy((void *)(uncached_ebase + offset), addr, size);
2257}
2258
2259static int __initdata rdhwr_noopt;
2260static int __init set_rdhwr_noopt(char *str)
2261{
2262 rdhwr_noopt = 1;
2263 return 1;
2264}
2265
2266__setup("rdhwr_noopt", set_rdhwr_noopt);
2267
2268void __init trap_init(void)
2269{
2270 extern char except_vec3_generic;
2271 extern char except_vec4;
2272 extern char except_vec3_r4000;
2273 unsigned long i, vec_size;
2274 phys_addr_t ebase_pa;
2275
2276 check_wait();
2277
2278 if (!cpu_has_mips_r2_r6) {
2279 ebase = CAC_BASE;
2280 ebase_pa = virt_to_phys((void *)ebase);
2281 vec_size = 0x400;
2282
2283 memblock_reserve(ebase_pa, vec_size);
2284 } else {
2285 if (cpu_has_veic || cpu_has_vint)
2286 vec_size = 0x200 + VECTORSPACING*64;
2287 else
2288 vec_size = PAGE_SIZE;
2289
2290 ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2291 if (!ebase_pa)
2292 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2293 __func__, vec_size, 1 << fls(vec_size));
2294
2295 /*
2296 * Try to ensure ebase resides in KSeg0 if possible.
2297 *
2298 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2299 * hitting a poorly defined exception base for Cache Errors.
2300 * The allocation is likely to be in the low 512MB of physical,
2301 * in which case we should be able to convert to KSeg0.
2302 *
2303 * EVA is special though as it allows segments to be rearranged
2304 * and to become uncached during cache error handling.
2305 */
2306 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2307 ebase = CKSEG0ADDR(ebase_pa);
2308 else
2309 ebase = (unsigned long)phys_to_virt(ebase_pa);
2310 }
2311
2312 if (cpu_has_mmips) {
2313 unsigned int config3 = read_c0_config3();
2314
2315 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2316 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2317 else
2318 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2319 }
2320
2321 if (board_ebase_setup)
2322 board_ebase_setup();
2323 per_cpu_trap_init(true);
2324 memblock_set_bottom_up(false);
2325
2326 /*
2327 * Copy the generic exception handlers to their final destination.
2328 * This will be overridden later as suitable for a particular
2329 * configuration.
2330 */
2331 set_handler(0x180, &except_vec3_generic, 0x80);
2332
2333 /*
2334 * Setup default vectors
2335 */
2336 for (i = 0; i <= 31; i++)
2337 set_except_vector(i, handle_reserved);
2338
2339 /*
2340 * Copy the EJTAG debug exception vector handler code to it's final
2341 * destination.
2342 */
2343 if (cpu_has_ejtag && board_ejtag_handler_setup)
2344 board_ejtag_handler_setup();
2345
2346 /*
2347 * Only some CPUs have the watch exceptions.
2348 */
2349 if (cpu_has_watch)
2350 set_except_vector(EXCCODE_WATCH, handle_watch);
2351
2352 /*
2353 * Initialise interrupt handlers
2354 */
2355 if (cpu_has_veic || cpu_has_vint) {
2356 int nvec = cpu_has_veic ? 64 : 8;
2357 for (i = 0; i < nvec; i++)
2358 set_vi_handler(i, NULL);
2359 }
2360 else if (cpu_has_divec)
2361 set_handler(0x200, &except_vec4, 0x8);
2362
2363 /*
2364 * Some CPUs can enable/disable for cache parity detection, but does
2365 * it different ways.
2366 */
2367 parity_protection_init();
2368
2369 /*
2370 * The Data Bus Errors / Instruction Bus Errors are signaled
2371 * by external hardware. Therefore these two exceptions
2372 * may have board specific handlers.
2373 */
2374 if (board_be_init)
2375 board_be_init();
2376
2377 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2378 rollback_handle_int : handle_int);
2379 set_except_vector(EXCCODE_MOD, handle_tlbm);
2380 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2381 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2382
2383 set_except_vector(EXCCODE_ADEL, handle_adel);
2384 set_except_vector(EXCCODE_ADES, handle_ades);
2385
2386 set_except_vector(EXCCODE_IBE, handle_ibe);
2387 set_except_vector(EXCCODE_DBE, handle_dbe);
2388
2389 set_except_vector(EXCCODE_SYS, handle_sys);
2390 set_except_vector(EXCCODE_BP, handle_bp);
2391
2392 if (rdhwr_noopt)
2393 set_except_vector(EXCCODE_RI, handle_ri);
2394 else {
2395 if (cpu_has_vtag_icache)
2396 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2397 else if (current_cpu_type() == CPU_LOONGSON3)
2398 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2399 else
2400 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2401 }
2402
2403 set_except_vector(EXCCODE_CPU, handle_cpu);
2404 set_except_vector(EXCCODE_OV, handle_ov);
2405 set_except_vector(EXCCODE_TR, handle_tr);
2406 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2407
2408 if (board_nmi_handler_setup)
2409 board_nmi_handler_setup();
2410
2411 if (cpu_has_fpu && !cpu_has_nofpuex)
2412 set_except_vector(EXCCODE_FPE, handle_fpe);
2413
2414 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2415
2416 if (cpu_has_rixiex) {
2417 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2418 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2419 }
2420
2421 set_except_vector(EXCCODE_MSADIS, handle_msa);
2422 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2423
2424 if (cpu_has_mcheck)
2425 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2426
2427 if (cpu_has_mipsmt)
2428 set_except_vector(EXCCODE_THREAD, handle_mt);
2429
2430 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2431
2432 if (board_cache_error_setup)
2433 board_cache_error_setup();
2434
2435 if (cpu_has_vce)
2436 /* Special exception: R4[04]00 uses also the divec space. */
2437 set_handler(0x180, &except_vec3_r4000, 0x100);
2438 else if (cpu_has_4kex)
2439 set_handler(0x180, &except_vec3_generic, 0x80);
2440 else
2441 set_handler(0x080, &except_vec3_generic, 0x80);
2442
2443 local_flush_icache_range(ebase, ebase + vec_size);
2444
2445 sort_extable(__start___dbe_table, __stop___dbe_table);
2446
2447 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2448}
2449
2450static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2451 void *v)
2452{
2453 switch (cmd) {
2454 case CPU_PM_ENTER_FAILED:
2455 case CPU_PM_EXIT:
2456 configure_status();
2457 configure_hwrena();
2458 configure_exception_vector();
2459
2460 /* Restore register with CPU number for TLB handlers */
2461 TLBMISS_HANDLER_RESTORE();
2462
2463 break;
2464 }
2465
2466 return NOTIFY_OK;
2467}
2468
2469static struct notifier_block trap_pm_notifier_block = {
2470 .notifier_call = trap_pm_notifier,
2471};
2472
2473static int __init trap_pm_init(void)
2474{
2475 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2476}
2477arch_initcall(trap_pm_init);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
14 */
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/bootmem.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/mips-cps.h>
54#include <asm/mips-r2-to-r6-emul.h>
55#include <asm/mipsregs.h>
56#include <asm/mipsmtregs.h>
57#include <asm/module.h>
58#include <asm/msa.h>
59#include <asm/pgtable.h>
60#include <asm/ptrace.h>
61#include <asm/sections.h>
62#include <asm/siginfo.h>
63#include <asm/tlbdebug.h>
64#include <asm/traps.h>
65#include <linux/uaccess.h>
66#include <asm/watch.h>
67#include <asm/mmu_context.h>
68#include <asm/types.h>
69#include <asm/stacktrace.h>
70#include <asm/uasm.h>
71
72extern void check_wait(void);
73extern asmlinkage void rollback_handle_int(void);
74extern asmlinkage void handle_int(void);
75extern u32 handle_tlbl[];
76extern u32 handle_tlbs[];
77extern u32 handle_tlbm[];
78extern asmlinkage void handle_adel(void);
79extern asmlinkage void handle_ades(void);
80extern asmlinkage void handle_ibe(void);
81extern asmlinkage void handle_dbe(void);
82extern asmlinkage void handle_sys(void);
83extern asmlinkage void handle_bp(void);
84extern asmlinkage void handle_ri(void);
85extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86extern asmlinkage void handle_ri_rdhwr(void);
87extern asmlinkage void handle_cpu(void);
88extern asmlinkage void handle_ov(void);
89extern asmlinkage void handle_tr(void);
90extern asmlinkage void handle_msa_fpe(void);
91extern asmlinkage void handle_fpe(void);
92extern asmlinkage void handle_ftlb(void);
93extern asmlinkage void handle_msa(void);
94extern asmlinkage void handle_mdmx(void);
95extern asmlinkage void handle_watch(void);
96extern asmlinkage void handle_mt(void);
97extern asmlinkage void handle_dsp(void);
98extern asmlinkage void handle_mcheck(void);
99extern asmlinkage void handle_reserved(void);
100extern void tlb_do_page_fault_0(void);
101
102void (*board_be_init)(void);
103int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104void (*board_nmi_handler_setup)(void);
105void (*board_ejtag_handler_setup)(void);
106void (*board_bind_eic_interrupt)(int irq, int regset);
107void (*board_ebase_setup)(void);
108void(*board_cache_error_setup)(void);
109
110static void show_raw_backtrace(unsigned long reg29)
111{
112 unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 unsigned long addr;
114
115 printk("Call Trace:");
116#ifdef CONFIG_KALLSYMS
117 printk("\n");
118#endif
119 while (!kstack_end(sp)) {
120 unsigned long __user *p =
121 (unsigned long __user *)(unsigned long)sp++;
122 if (__get_user(addr, p)) {
123 printk(" (Bad stack address)");
124 break;
125 }
126 if (__kernel_text_address(addr))
127 print_ip_sym(addr);
128 }
129 printk("\n");
130}
131
132#ifdef CONFIG_KALLSYMS
133int raw_show_trace;
134static int __init set_raw_show_trace(char *str)
135{
136 raw_show_trace = 1;
137 return 1;
138}
139__setup("raw_show_trace", set_raw_show_trace);
140#endif
141
142static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143{
144 unsigned long sp = regs->regs[29];
145 unsigned long ra = regs->regs[31];
146 unsigned long pc = regs->cp0_epc;
147
148 if (!task)
149 task = current;
150
151 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152 show_raw_backtrace(sp);
153 return;
154 }
155 printk("Call Trace:\n");
156 do {
157 print_ip_sym(pc);
158 pc = unwind_stack(task, &sp, pc, &ra);
159 } while (pc);
160 pr_cont("\n");
161}
162
163/*
164 * This routine abuses get_user()/put_user() to reference pointers
165 * with at least a bit of error checking ...
166 */
167static void show_stacktrace(struct task_struct *task,
168 const struct pt_regs *regs)
169{
170 const int field = 2 * sizeof(unsigned long);
171 long stackdata;
172 int i;
173 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
174
175 printk("Stack :");
176 i = 0;
177 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 if (i && ((i % (64 / field)) == 0)) {
179 pr_cont("\n");
180 printk(" ");
181 }
182 if (i > 39) {
183 pr_cont(" ...");
184 break;
185 }
186
187 if (__get_user(stackdata, sp++)) {
188 pr_cont(" (Bad stack address)");
189 break;
190 }
191
192 pr_cont(" %0*lx", field, stackdata);
193 i++;
194 }
195 pr_cont("\n");
196 show_backtrace(task, regs);
197}
198
199void show_stack(struct task_struct *task, unsigned long *sp)
200{
201 struct pt_regs regs;
202 mm_segment_t old_fs = get_fs();
203
204 regs.cp0_status = KSU_KERNEL;
205 if (sp) {
206 regs.regs[29] = (unsigned long)sp;
207 regs.regs[31] = 0;
208 regs.cp0_epc = 0;
209 } else {
210 if (task && task != current) {
211 regs.regs[29] = task->thread.reg29;
212 regs.regs[31] = 0;
213 regs.cp0_epc = task->thread.reg31;
214#ifdef CONFIG_KGDB_KDB
215 } else if (atomic_read(&kgdb_active) != -1 &&
216 kdb_current_regs) {
217 memcpy(®s, kdb_current_regs, sizeof(regs));
218#endif /* CONFIG_KGDB_KDB */
219 } else {
220 prepare_frametrace(®s);
221 }
222 }
223 /*
224 * show_stack() deals exclusively with kernel mode, so be sure to access
225 * the stack in the kernel (not user) address space.
226 */
227 set_fs(KERNEL_DS);
228 show_stacktrace(task, ®s);
229 set_fs(old_fs);
230}
231
232static void show_code(unsigned int __user *pc)
233{
234 long i;
235 unsigned short __user *pc16 = NULL;
236
237 printk("Code:");
238
239 if ((unsigned long)pc & 1)
240 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
241 for(i = -3 ; i < 6 ; i++) {
242 unsigned int insn;
243 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
244 pr_cont(" (Bad address in epc)\n");
245 break;
246 }
247 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
248 }
249 pr_cont("\n");
250}
251
252static void __show_regs(const struct pt_regs *regs)
253{
254 const int field = 2 * sizeof(unsigned long);
255 unsigned int cause = regs->cp0_cause;
256 unsigned int exccode;
257 int i;
258
259 show_regs_print_info(KERN_DEFAULT);
260
261 /*
262 * Saved main processor registers
263 */
264 for (i = 0; i < 32; ) {
265 if ((i % 4) == 0)
266 printk("$%2d :", i);
267 if (i == 0)
268 pr_cont(" %0*lx", field, 0UL);
269 else if (i == 26 || i == 27)
270 pr_cont(" %*s", field, "");
271 else
272 pr_cont(" %0*lx", field, regs->regs[i]);
273
274 i++;
275 if ((i % 4) == 0)
276 pr_cont("\n");
277 }
278
279#ifdef CONFIG_CPU_HAS_SMARTMIPS
280 printk("Acx : %0*lx\n", field, regs->acx);
281#endif
282 printk("Hi : %0*lx\n", field, regs->hi);
283 printk("Lo : %0*lx\n", field, regs->lo);
284
285 /*
286 * Saved cp0 registers
287 */
288 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
289 (void *) regs->cp0_epc);
290 printk("ra : %0*lx %pS\n", field, regs->regs[31],
291 (void *) regs->regs[31]);
292
293 printk("Status: %08x ", (uint32_t) regs->cp0_status);
294
295 if (cpu_has_3kex) {
296 if (regs->cp0_status & ST0_KUO)
297 pr_cont("KUo ");
298 if (regs->cp0_status & ST0_IEO)
299 pr_cont("IEo ");
300 if (regs->cp0_status & ST0_KUP)
301 pr_cont("KUp ");
302 if (regs->cp0_status & ST0_IEP)
303 pr_cont("IEp ");
304 if (regs->cp0_status & ST0_KUC)
305 pr_cont("KUc ");
306 if (regs->cp0_status & ST0_IEC)
307 pr_cont("IEc ");
308 } else if (cpu_has_4kex) {
309 if (regs->cp0_status & ST0_KX)
310 pr_cont("KX ");
311 if (regs->cp0_status & ST0_SX)
312 pr_cont("SX ");
313 if (regs->cp0_status & ST0_UX)
314 pr_cont("UX ");
315 switch (regs->cp0_status & ST0_KSU) {
316 case KSU_USER:
317 pr_cont("USER ");
318 break;
319 case KSU_SUPERVISOR:
320 pr_cont("SUPERVISOR ");
321 break;
322 case KSU_KERNEL:
323 pr_cont("KERNEL ");
324 break;
325 default:
326 pr_cont("BAD_MODE ");
327 break;
328 }
329 if (regs->cp0_status & ST0_ERL)
330 pr_cont("ERL ");
331 if (regs->cp0_status & ST0_EXL)
332 pr_cont("EXL ");
333 if (regs->cp0_status & ST0_IE)
334 pr_cont("IE ");
335 }
336 pr_cont("\n");
337
338 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
339 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
340
341 if (1 <= exccode && exccode <= 5)
342 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
343
344 printk("PrId : %08x (%s)\n", read_c0_prid(),
345 cpu_name_string());
346}
347
348/*
349 * FIXME: really the generic show_regs should take a const pointer argument.
350 */
351void show_regs(struct pt_regs *regs)
352{
353 __show_regs((struct pt_regs *)regs);
354}
355
356void show_registers(struct pt_regs *regs)
357{
358 const int field = 2 * sizeof(unsigned long);
359 mm_segment_t old_fs = get_fs();
360
361 __show_regs(regs);
362 print_modules();
363 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
364 current->comm, current->pid, current_thread_info(), current,
365 field, current_thread_info()->tp_value);
366 if (cpu_has_userlocal) {
367 unsigned long tls;
368
369 tls = read_c0_userlocal();
370 if (tls != current_thread_info()->tp_value)
371 printk("*HwTLS: %0*lx\n", field, tls);
372 }
373
374 if (!user_mode(regs))
375 /* Necessary for getting the correct stack content */
376 set_fs(KERNEL_DS);
377 show_stacktrace(current, regs);
378 show_code((unsigned int __user *) regs->cp0_epc);
379 printk("\n");
380 set_fs(old_fs);
381}
382
383static DEFINE_RAW_SPINLOCK(die_lock);
384
385void __noreturn die(const char *str, struct pt_regs *regs)
386{
387 static int die_counter;
388 int sig = SIGSEGV;
389
390 oops_enter();
391
392 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
393 SIGSEGV) == NOTIFY_STOP)
394 sig = 0;
395
396 console_verbose();
397 raw_spin_lock_irq(&die_lock);
398 bust_spinlocks(1);
399
400 printk("%s[#%d]:\n", str, ++die_counter);
401 show_registers(regs);
402 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
403 raw_spin_unlock_irq(&die_lock);
404
405 oops_exit();
406
407 if (in_interrupt())
408 panic("Fatal exception in interrupt");
409
410 if (panic_on_oops)
411 panic("Fatal exception");
412
413 if (regs && kexec_should_crash(current))
414 crash_kexec(regs);
415
416 do_exit(sig);
417}
418
419extern struct exception_table_entry __start___dbe_table[];
420extern struct exception_table_entry __stop___dbe_table[];
421
422__asm__(
423" .section __dbe_table, \"a\"\n"
424" .previous \n");
425
426/* Given an address, look for it in the exception tables. */
427static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
428{
429 const struct exception_table_entry *e;
430
431 e = search_extable(__start___dbe_table,
432 __stop___dbe_table - __start___dbe_table, addr);
433 if (!e)
434 e = search_module_dbetables(addr);
435 return e;
436}
437
438asmlinkage void do_be(struct pt_regs *regs)
439{
440 const int field = 2 * sizeof(unsigned long);
441 const struct exception_table_entry *fixup = NULL;
442 int data = regs->cp0_cause & 4;
443 int action = MIPS_BE_FATAL;
444 enum ctx_state prev_state;
445
446 prev_state = exception_enter();
447 /* XXX For now. Fixme, this searches the wrong table ... */
448 if (data && !user_mode(regs))
449 fixup = search_dbe_tables(exception_epc(regs));
450
451 if (fixup)
452 action = MIPS_BE_FIXUP;
453
454 if (board_be_handler)
455 action = board_be_handler(regs, fixup != NULL);
456 else
457 mips_cm_error_report();
458
459 switch (action) {
460 case MIPS_BE_DISCARD:
461 goto out;
462 case MIPS_BE_FIXUP:
463 if (fixup) {
464 regs->cp0_epc = fixup->nextinsn;
465 goto out;
466 }
467 break;
468 default:
469 break;
470 }
471
472 /*
473 * Assume it would be too dangerous to continue ...
474 */
475 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
476 data ? "Data" : "Instruction",
477 field, regs->cp0_epc, field, regs->regs[31]);
478 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
479 SIGBUS) == NOTIFY_STOP)
480 goto out;
481
482 die_if_kernel("Oops", regs);
483 force_sig(SIGBUS, current);
484
485out:
486 exception_exit(prev_state);
487}
488
489/*
490 * ll/sc, rdhwr, sync emulation
491 */
492
493#define OPCODE 0xfc000000
494#define BASE 0x03e00000
495#define RT 0x001f0000
496#define OFFSET 0x0000ffff
497#define LL 0xc0000000
498#define SC 0xe0000000
499#define SPEC0 0x00000000
500#define SPEC3 0x7c000000
501#define RD 0x0000f800
502#define FUNC 0x0000003f
503#define SYNC 0x0000000f
504#define RDHWR 0x0000003b
505
506/* microMIPS definitions */
507#define MM_POOL32A_FUNC 0xfc00ffff
508#define MM_RDHWR 0x00006b3c
509#define MM_RS 0x001f0000
510#define MM_RT 0x03e00000
511
512/*
513 * The ll_bit is cleared by r*_switch.S
514 */
515
516unsigned int ll_bit;
517struct task_struct *ll_task;
518
519static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
520{
521 unsigned long value, __user *vaddr;
522 long offset;
523
524 /*
525 * analyse the ll instruction that just caused a ri exception
526 * and put the referenced address to addr.
527 */
528
529 /* sign extend offset */
530 offset = opcode & OFFSET;
531 offset <<= 16;
532 offset >>= 16;
533
534 vaddr = (unsigned long __user *)
535 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
536
537 if ((unsigned long)vaddr & 3)
538 return SIGBUS;
539 if (get_user(value, vaddr))
540 return SIGSEGV;
541
542 preempt_disable();
543
544 if (ll_task == NULL || ll_task == current) {
545 ll_bit = 1;
546 } else {
547 ll_bit = 0;
548 }
549 ll_task = current;
550
551 preempt_enable();
552
553 regs->regs[(opcode & RT) >> 16] = value;
554
555 return 0;
556}
557
558static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
559{
560 unsigned long __user *vaddr;
561 unsigned long reg;
562 long offset;
563
564 /*
565 * analyse the sc instruction that just caused a ri exception
566 * and put the referenced address to addr.
567 */
568
569 /* sign extend offset */
570 offset = opcode & OFFSET;
571 offset <<= 16;
572 offset >>= 16;
573
574 vaddr = (unsigned long __user *)
575 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
576 reg = (opcode & RT) >> 16;
577
578 if ((unsigned long)vaddr & 3)
579 return SIGBUS;
580
581 preempt_disable();
582
583 if (ll_bit == 0 || ll_task != current) {
584 regs->regs[reg] = 0;
585 preempt_enable();
586 return 0;
587 }
588
589 preempt_enable();
590
591 if (put_user(regs->regs[reg], vaddr))
592 return SIGSEGV;
593
594 regs->regs[reg] = 1;
595
596 return 0;
597}
598
599/*
600 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
601 * opcodes are supposed to result in coprocessor unusable exceptions if
602 * executed on ll/sc-less processors. That's the theory. In practice a
603 * few processors such as NEC's VR4100 throw reserved instruction exceptions
604 * instead, so we're doing the emulation thing in both exception handlers.
605 */
606static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
607{
608 if ((opcode & OPCODE) == LL) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 1, regs, 0);
611 return simulate_ll(regs, opcode);
612 }
613 if ((opcode & OPCODE) == SC) {
614 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
615 1, regs, 0);
616 return simulate_sc(regs, opcode);
617 }
618
619 return -1; /* Must be something else ... */
620}
621
622/*
623 * Simulate trapping 'rdhwr' instructions to provide user accessible
624 * registers not implemented in hardware.
625 */
626static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
627{
628 struct thread_info *ti = task_thread_info(current);
629
630 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
631 1, regs, 0);
632 switch (rd) {
633 case MIPS_HWR_CPUNUM: /* CPU number */
634 regs->regs[rt] = smp_processor_id();
635 return 0;
636 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
637 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
638 current_cpu_data.icache.linesz);
639 return 0;
640 case MIPS_HWR_CC: /* Read count register */
641 regs->regs[rt] = read_c0_count();
642 return 0;
643 case MIPS_HWR_CCRES: /* Count register resolution */
644 switch (current_cpu_type()) {
645 case CPU_20KC:
646 case CPU_25KF:
647 regs->regs[rt] = 1;
648 break;
649 default:
650 regs->regs[rt] = 2;
651 }
652 return 0;
653 case MIPS_HWR_ULR: /* Read UserLocal register */
654 regs->regs[rt] = ti->tp_value;
655 return 0;
656 default:
657 return -1;
658 }
659}
660
661static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
662{
663 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
664 int rd = (opcode & RD) >> 11;
665 int rt = (opcode & RT) >> 16;
666
667 simulate_rdhwr(regs, rd, rt);
668 return 0;
669 }
670
671 /* Not ours. */
672 return -1;
673}
674
675static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
676{
677 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
678 int rd = (opcode & MM_RS) >> 16;
679 int rt = (opcode & MM_RT) >> 21;
680 simulate_rdhwr(regs, rd, rt);
681 return 0;
682 }
683
684 /* Not ours. */
685 return -1;
686}
687
688static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
689{
690 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
691 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
692 1, regs, 0);
693 return 0;
694 }
695
696 return -1; /* Must be something else ... */
697}
698
699asmlinkage void do_ov(struct pt_regs *regs)
700{
701 enum ctx_state prev_state;
702 siginfo_t info;
703
704 clear_siginfo(&info);
705 info.si_signo = SIGFPE;
706 info.si_code = FPE_INTOVF;
707 info.si_addr = (void __user *)regs->cp0_epc;
708
709 prev_state = exception_enter();
710 die_if_kernel("Integer overflow", regs);
711
712 force_sig_info(SIGFPE, &info, current);
713 exception_exit(prev_state);
714}
715
716/*
717 * Send SIGFPE according to FCSR Cause bits, which must have already
718 * been masked against Enable bits. This is impotant as Inexact can
719 * happen together with Overflow or Underflow, and `ptrace' can set
720 * any bits.
721 */
722void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
723 struct task_struct *tsk)
724{
725 struct siginfo si;
726
727 clear_siginfo(&si);
728 si.si_addr = fault_addr;
729 si.si_signo = SIGFPE;
730
731 if (fcr31 & FPU_CSR_INV_X)
732 si.si_code = FPE_FLTINV;
733 else if (fcr31 & FPU_CSR_DIV_X)
734 si.si_code = FPE_FLTDIV;
735 else if (fcr31 & FPU_CSR_OVF_X)
736 si.si_code = FPE_FLTOVF;
737 else if (fcr31 & FPU_CSR_UDF_X)
738 si.si_code = FPE_FLTUND;
739 else if (fcr31 & FPU_CSR_INE_X)
740 si.si_code = FPE_FLTRES;
741
742 force_sig_info(SIGFPE, &si, tsk);
743}
744
745int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
746{
747 struct siginfo si;
748 struct vm_area_struct *vma;
749
750 clear_siginfo(&si);
751 switch (sig) {
752 case 0:
753 return 0;
754
755 case SIGFPE:
756 force_fcr31_sig(fcr31, fault_addr, current);
757 return 1;
758
759 case SIGBUS:
760 si.si_addr = fault_addr;
761 si.si_signo = sig;
762 si.si_code = BUS_ADRERR;
763 force_sig_info(sig, &si, current);
764 return 1;
765
766 case SIGSEGV:
767 si.si_addr = fault_addr;
768 si.si_signo = sig;
769 down_read(¤t->mm->mmap_sem);
770 vma = find_vma(current->mm, (unsigned long)fault_addr);
771 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
772 si.si_code = SEGV_ACCERR;
773 else
774 si.si_code = SEGV_MAPERR;
775 up_read(¤t->mm->mmap_sem);
776 force_sig_info(sig, &si, current);
777 return 1;
778
779 default:
780 force_sig(sig, current);
781 return 1;
782 }
783}
784
785static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
786 unsigned long old_epc, unsigned long old_ra)
787{
788 union mips_instruction inst = { .word = opcode };
789 void __user *fault_addr;
790 unsigned long fcr31;
791 int sig;
792
793 /* If it's obviously not an FP instruction, skip it */
794 switch (inst.i_format.opcode) {
795 case cop1_op:
796 case cop1x_op:
797 case lwc1_op:
798 case ldc1_op:
799 case swc1_op:
800 case sdc1_op:
801 break;
802
803 default:
804 return -1;
805 }
806
807 /*
808 * do_ri skipped over the instruction via compute_return_epc, undo
809 * that for the FPU emulator.
810 */
811 regs->cp0_epc = old_epc;
812 regs->regs[31] = old_ra;
813
814 /* Save the FP context to struct thread_struct */
815 lose_fpu(1);
816
817 /* Run the emulator */
818 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
819 &fault_addr);
820
821 /*
822 * We can't allow the emulated instruction to leave any
823 * enabled Cause bits set in $fcr31.
824 */
825 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
826 current->thread.fpu.fcr31 &= ~fcr31;
827
828 /* Restore the hardware register state */
829 own_fpu(1);
830
831 /* Send a signal if required. */
832 process_fpemu_return(sig, fault_addr, fcr31);
833
834 return 0;
835}
836
837/*
838 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
839 */
840asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
841{
842 enum ctx_state prev_state;
843 void __user *fault_addr;
844 int sig;
845
846 prev_state = exception_enter();
847 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
848 SIGFPE) == NOTIFY_STOP)
849 goto out;
850
851 /* Clear FCSR.Cause before enabling interrupts */
852 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
853 local_irq_enable();
854
855 die_if_kernel("FP exception in kernel code", regs);
856
857 if (fcr31 & FPU_CSR_UNI_X) {
858 /*
859 * Unimplemented operation exception. If we've got the full
860 * software emulator on-board, let's use it...
861 *
862 * Force FPU to dump state into task/thread context. We're
863 * moving a lot of data here for what is probably a single
864 * instruction, but the alternative is to pre-decode the FP
865 * register operands before invoking the emulator, which seems
866 * a bit extreme for what should be an infrequent event.
867 */
868 /* Ensure 'resume' not overwrite saved fp context again. */
869 lose_fpu(1);
870
871 /* Run the emulator */
872 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
873 &fault_addr);
874
875 /*
876 * We can't allow the emulated instruction to leave any
877 * enabled Cause bits set in $fcr31.
878 */
879 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
880 current->thread.fpu.fcr31 &= ~fcr31;
881
882 /* Restore the hardware register state */
883 own_fpu(1); /* Using the FPU again. */
884 } else {
885 sig = SIGFPE;
886 fault_addr = (void __user *) regs->cp0_epc;
887 }
888
889 /* Send a signal if required. */
890 process_fpemu_return(sig, fault_addr, fcr31);
891
892out:
893 exception_exit(prev_state);
894}
895
896void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
897 const char *str)
898{
899 siginfo_t info;
900 char b[40];
901
902 clear_siginfo(&info);
903#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
904 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
905 SIGTRAP) == NOTIFY_STOP)
906 return;
907#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
908
909 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
910 SIGTRAP) == NOTIFY_STOP)
911 return;
912
913 /*
914 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
915 * insns, even for trap and break codes that indicate arithmetic
916 * failures. Weird ...
917 * But should we continue the brokenness??? --macro
918 */
919 switch (code) {
920 case BRK_OVERFLOW:
921 case BRK_DIVZERO:
922 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
923 die_if_kernel(b, regs);
924 if (code == BRK_DIVZERO)
925 info.si_code = FPE_INTDIV;
926 else
927 info.si_code = FPE_INTOVF;
928 info.si_signo = SIGFPE;
929 info.si_addr = (void __user *) regs->cp0_epc;
930 force_sig_info(SIGFPE, &info, current);
931 break;
932 case BRK_BUG:
933 die_if_kernel("Kernel bug detected", regs);
934 force_sig(SIGTRAP, current);
935 break;
936 case BRK_MEMU:
937 /*
938 * This breakpoint code is used by the FPU emulator to retake
939 * control of the CPU after executing the instruction from the
940 * delay slot of an emulated branch.
941 *
942 * Terminate if exception was recognized as a delay slot return
943 * otherwise handle as normal.
944 */
945 if (do_dsemulret(regs))
946 return;
947
948 die_if_kernel("Math emu break/trap", regs);
949 force_sig(SIGTRAP, current);
950 break;
951 default:
952 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
953 die_if_kernel(b, regs);
954 if (si_code) {
955 info.si_signo = SIGTRAP;
956 info.si_code = si_code;
957 force_sig_info(SIGTRAP, &info, current);
958 } else {
959 force_sig(SIGTRAP, current);
960 }
961 }
962}
963
964asmlinkage void do_bp(struct pt_regs *regs)
965{
966 unsigned long epc = msk_isa16_mode(exception_epc(regs));
967 unsigned int opcode, bcode;
968 enum ctx_state prev_state;
969 mm_segment_t seg;
970
971 seg = get_fs();
972 if (!user_mode(regs))
973 set_fs(KERNEL_DS);
974
975 prev_state = exception_enter();
976 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
977 if (get_isa16_mode(regs->cp0_epc)) {
978 u16 instr[2];
979
980 if (__get_user(instr[0], (u16 __user *)epc))
981 goto out_sigsegv;
982
983 if (!cpu_has_mmips) {
984 /* MIPS16e mode */
985 bcode = (instr[0] >> 5) & 0x3f;
986 } else if (mm_insn_16bit(instr[0])) {
987 /* 16-bit microMIPS BREAK */
988 bcode = instr[0] & 0xf;
989 } else {
990 /* 32-bit microMIPS BREAK */
991 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
992 goto out_sigsegv;
993 opcode = (instr[0] << 16) | instr[1];
994 bcode = (opcode >> 6) & ((1 << 20) - 1);
995 }
996 } else {
997 if (__get_user(opcode, (unsigned int __user *)epc))
998 goto out_sigsegv;
999 bcode = (opcode >> 6) & ((1 << 20) - 1);
1000 }
1001
1002 /*
1003 * There is the ancient bug in the MIPS assemblers that the break
1004 * code starts left to bit 16 instead to bit 6 in the opcode.
1005 * Gas is bug-compatible, but not always, grrr...
1006 * We handle both cases with a simple heuristics. --macro
1007 */
1008 if (bcode >= (1 << 10))
1009 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1010
1011 /*
1012 * notify the kprobe handlers, if instruction is likely to
1013 * pertain to them.
1014 */
1015 switch (bcode) {
1016 case BRK_UPROBE:
1017 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1018 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1019 goto out;
1020 else
1021 break;
1022 case BRK_UPROBE_XOL:
1023 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1024 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1025 goto out;
1026 else
1027 break;
1028 case BRK_KPROBE_BP:
1029 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1030 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1031 goto out;
1032 else
1033 break;
1034 case BRK_KPROBE_SSTEPBP:
1035 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1036 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1037 goto out;
1038 else
1039 break;
1040 default:
1041 break;
1042 }
1043
1044 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1045
1046out:
1047 set_fs(seg);
1048 exception_exit(prev_state);
1049 return;
1050
1051out_sigsegv:
1052 force_sig(SIGSEGV, current);
1053 goto out;
1054}
1055
1056asmlinkage void do_tr(struct pt_regs *regs)
1057{
1058 u32 opcode, tcode = 0;
1059 enum ctx_state prev_state;
1060 u16 instr[2];
1061 mm_segment_t seg;
1062 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1063
1064 seg = get_fs();
1065 if (!user_mode(regs))
1066 set_fs(get_ds());
1067
1068 prev_state = exception_enter();
1069 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1070 if (get_isa16_mode(regs->cp0_epc)) {
1071 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1072 __get_user(instr[1], (u16 __user *)(epc + 2)))
1073 goto out_sigsegv;
1074 opcode = (instr[0] << 16) | instr[1];
1075 /* Immediate versions don't provide a code. */
1076 if (!(opcode & OPCODE))
1077 tcode = (opcode >> 12) & ((1 << 4) - 1);
1078 } else {
1079 if (__get_user(opcode, (u32 __user *)epc))
1080 goto out_sigsegv;
1081 /* Immediate versions don't provide a code. */
1082 if (!(opcode & OPCODE))
1083 tcode = (opcode >> 6) & ((1 << 10) - 1);
1084 }
1085
1086 do_trap_or_bp(regs, tcode, 0, "Trap");
1087
1088out:
1089 set_fs(seg);
1090 exception_exit(prev_state);
1091 return;
1092
1093out_sigsegv:
1094 force_sig(SIGSEGV, current);
1095 goto out;
1096}
1097
1098asmlinkage void do_ri(struct pt_regs *regs)
1099{
1100 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1101 unsigned long old_epc = regs->cp0_epc;
1102 unsigned long old31 = regs->regs[31];
1103 enum ctx_state prev_state;
1104 unsigned int opcode = 0;
1105 int status = -1;
1106
1107 /*
1108 * Avoid any kernel code. Just emulate the R2 instruction
1109 * as quickly as possible.
1110 */
1111 if (mipsr2_emulation && cpu_has_mips_r6 &&
1112 likely(user_mode(regs)) &&
1113 likely(get_user(opcode, epc) >= 0)) {
1114 unsigned long fcr31 = 0;
1115
1116 status = mipsr2_decoder(regs, opcode, &fcr31);
1117 switch (status) {
1118 case 0:
1119 case SIGEMT:
1120 return;
1121 case SIGILL:
1122 goto no_r2_instr;
1123 default:
1124 process_fpemu_return(status,
1125 ¤t->thread.cp0_baduaddr,
1126 fcr31);
1127 return;
1128 }
1129 }
1130
1131no_r2_instr:
1132
1133 prev_state = exception_enter();
1134 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1135
1136 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1137 SIGILL) == NOTIFY_STOP)
1138 goto out;
1139
1140 die_if_kernel("Reserved instruction in kernel code", regs);
1141
1142 if (unlikely(compute_return_epc(regs) < 0))
1143 goto out;
1144
1145 if (!get_isa16_mode(regs->cp0_epc)) {
1146 if (unlikely(get_user(opcode, epc) < 0))
1147 status = SIGSEGV;
1148
1149 if (!cpu_has_llsc && status < 0)
1150 status = simulate_llsc(regs, opcode);
1151
1152 if (status < 0)
1153 status = simulate_rdhwr_normal(regs, opcode);
1154
1155 if (status < 0)
1156 status = simulate_sync(regs, opcode);
1157
1158 if (status < 0)
1159 status = simulate_fp(regs, opcode, old_epc, old31);
1160 } else if (cpu_has_mmips) {
1161 unsigned short mmop[2] = { 0 };
1162
1163 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1164 status = SIGSEGV;
1165 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1166 status = SIGSEGV;
1167 opcode = mmop[0];
1168 opcode = (opcode << 16) | mmop[1];
1169
1170 if (status < 0)
1171 status = simulate_rdhwr_mm(regs, opcode);
1172 }
1173
1174 if (status < 0)
1175 status = SIGILL;
1176
1177 if (unlikely(status > 0)) {
1178 regs->cp0_epc = old_epc; /* Undo skip-over. */
1179 regs->regs[31] = old31;
1180 force_sig(status, current);
1181 }
1182
1183out:
1184 exception_exit(prev_state);
1185}
1186
1187/*
1188 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1189 * emulated more than some threshold number of instructions, force migration to
1190 * a "CPU" that has FP support.
1191 */
1192static void mt_ase_fp_affinity(void)
1193{
1194#ifdef CONFIG_MIPS_MT_FPAFF
1195 if (mt_fpemul_threshold > 0 &&
1196 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1197 /*
1198 * If there's no FPU present, or if the application has already
1199 * restricted the allowed set to exclude any CPUs with FPUs,
1200 * we'll skip the procedure.
1201 */
1202 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1203 cpumask_t tmask;
1204
1205 current->thread.user_cpus_allowed
1206 = current->cpus_allowed;
1207 cpumask_and(&tmask, ¤t->cpus_allowed,
1208 &mt_fpu_cpumask);
1209 set_cpus_allowed_ptr(current, &tmask);
1210 set_thread_flag(TIF_FPUBOUND);
1211 }
1212 }
1213#endif /* CONFIG_MIPS_MT_FPAFF */
1214}
1215
1216/*
1217 * No lock; only written during early bootup by CPU 0.
1218 */
1219static RAW_NOTIFIER_HEAD(cu2_chain);
1220
1221int __ref register_cu2_notifier(struct notifier_block *nb)
1222{
1223 return raw_notifier_chain_register(&cu2_chain, nb);
1224}
1225
1226int cu2_notifier_call_chain(unsigned long val, void *v)
1227{
1228 return raw_notifier_call_chain(&cu2_chain, val, v);
1229}
1230
1231static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1232 void *data)
1233{
1234 struct pt_regs *regs = data;
1235
1236 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1237 "instruction", regs);
1238 force_sig(SIGILL, current);
1239
1240 return NOTIFY_OK;
1241}
1242
1243static int enable_restore_fp_context(int msa)
1244{
1245 int err, was_fpu_owner, prior_msa;
1246
1247 /*
1248 * If an FP mode switch is currently underway, wait for it to
1249 * complete before proceeding.
1250 */
1251 wait_var_event(¤t->mm->context.fp_mode_switching,
1252 !atomic_read(¤t->mm->context.fp_mode_switching));
1253
1254 if (!used_math()) {
1255 /* First time FP context user. */
1256 preempt_disable();
1257 err = init_fpu();
1258 if (msa && !err) {
1259 enable_msa();
1260 init_msa_upper();
1261 set_thread_flag(TIF_USEDMSA);
1262 set_thread_flag(TIF_MSA_CTX_LIVE);
1263 }
1264 preempt_enable();
1265 if (!err)
1266 set_used_math();
1267 return err;
1268 }
1269
1270 /*
1271 * This task has formerly used the FP context.
1272 *
1273 * If this thread has no live MSA vector context then we can simply
1274 * restore the scalar FP context. If it has live MSA vector context
1275 * (that is, it has or may have used MSA since last performing a
1276 * function call) then we'll need to restore the vector context. This
1277 * applies even if we're currently only executing a scalar FP
1278 * instruction. This is because if we were to later execute an MSA
1279 * instruction then we'd either have to:
1280 *
1281 * - Restore the vector context & clobber any registers modified by
1282 * scalar FP instructions between now & then.
1283 *
1284 * or
1285 *
1286 * - Not restore the vector context & lose the most significant bits
1287 * of all vector registers.
1288 *
1289 * Neither of those options is acceptable. We cannot restore the least
1290 * significant bits of the registers now & only restore the most
1291 * significant bits later because the most significant bits of any
1292 * vector registers whose aliased FP register is modified now will have
1293 * been zeroed. We'd have no way to know that when restoring the vector
1294 * context & thus may load an outdated value for the most significant
1295 * bits of a vector register.
1296 */
1297 if (!msa && !thread_msa_context_live())
1298 return own_fpu(1);
1299
1300 /*
1301 * This task is using or has previously used MSA. Thus we require
1302 * that Status.FR == 1.
1303 */
1304 preempt_disable();
1305 was_fpu_owner = is_fpu_owner();
1306 err = own_fpu_inatomic(0);
1307 if (err)
1308 goto out;
1309
1310 enable_msa();
1311 write_msa_csr(current->thread.fpu.msacsr);
1312 set_thread_flag(TIF_USEDMSA);
1313
1314 /*
1315 * If this is the first time that the task is using MSA and it has
1316 * previously used scalar FP in this time slice then we already nave
1317 * FP context which we shouldn't clobber. We do however need to clear
1318 * the upper 64b of each vector register so that this task has no
1319 * opportunity to see data left behind by another.
1320 */
1321 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1322 if (!prior_msa && was_fpu_owner) {
1323 init_msa_upper();
1324
1325 goto out;
1326 }
1327
1328 if (!prior_msa) {
1329 /*
1330 * Restore the least significant 64b of each vector register
1331 * from the existing scalar FP context.
1332 */
1333 _restore_fp(current);
1334
1335 /*
1336 * The task has not formerly used MSA, so clear the upper 64b
1337 * of each vector register such that it cannot see data left
1338 * behind by another task.
1339 */
1340 init_msa_upper();
1341 } else {
1342 /* We need to restore the vector context. */
1343 restore_msa(current);
1344
1345 /* Restore the scalar FP control & status register */
1346 if (!was_fpu_owner)
1347 write_32bit_cp1_register(CP1_STATUS,
1348 current->thread.fpu.fcr31);
1349 }
1350
1351out:
1352 preempt_enable();
1353
1354 return 0;
1355}
1356
1357asmlinkage void do_cpu(struct pt_regs *regs)
1358{
1359 enum ctx_state prev_state;
1360 unsigned int __user *epc;
1361 unsigned long old_epc, old31;
1362 void __user *fault_addr;
1363 unsigned int opcode;
1364 unsigned long fcr31;
1365 unsigned int cpid;
1366 int status, err;
1367 int sig;
1368
1369 prev_state = exception_enter();
1370 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1371
1372 if (cpid != 2)
1373 die_if_kernel("do_cpu invoked from kernel context!", regs);
1374
1375 switch (cpid) {
1376 case 0:
1377 epc = (unsigned int __user *)exception_epc(regs);
1378 old_epc = regs->cp0_epc;
1379 old31 = regs->regs[31];
1380 opcode = 0;
1381 status = -1;
1382
1383 if (unlikely(compute_return_epc(regs) < 0))
1384 break;
1385
1386 if (!get_isa16_mode(regs->cp0_epc)) {
1387 if (unlikely(get_user(opcode, epc) < 0))
1388 status = SIGSEGV;
1389
1390 if (!cpu_has_llsc && status < 0)
1391 status = simulate_llsc(regs, opcode);
1392 }
1393
1394 if (status < 0)
1395 status = SIGILL;
1396
1397 if (unlikely(status > 0)) {
1398 regs->cp0_epc = old_epc; /* Undo skip-over. */
1399 regs->regs[31] = old31;
1400 force_sig(status, current);
1401 }
1402
1403 break;
1404
1405 case 3:
1406 /*
1407 * The COP3 opcode space and consequently the CP0.Status.CU3
1408 * bit and the CP0.Cause.CE=3 encoding have been removed as
1409 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1410 * up the space has been reused for COP1X instructions, that
1411 * are enabled by the CP0.Status.CU1 bit and consequently
1412 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1413 * exceptions. Some FPU-less processors that implement one
1414 * of these ISAs however use this code erroneously for COP1X
1415 * instructions. Therefore we redirect this trap to the FP
1416 * emulator too.
1417 */
1418 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1419 force_sig(SIGILL, current);
1420 break;
1421 }
1422 /* Fall through. */
1423
1424 case 1:
1425 err = enable_restore_fp_context(0);
1426
1427 if (raw_cpu_has_fpu && !err)
1428 break;
1429
1430 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1431 &fault_addr);
1432
1433 /*
1434 * We can't allow the emulated instruction to leave
1435 * any enabled Cause bits set in $fcr31.
1436 */
1437 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1438 current->thread.fpu.fcr31 &= ~fcr31;
1439
1440 /* Send a signal if required. */
1441 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1442 mt_ase_fp_affinity();
1443
1444 break;
1445
1446 case 2:
1447 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1448 break;
1449 }
1450
1451 exception_exit(prev_state);
1452}
1453
1454asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1455{
1456 enum ctx_state prev_state;
1457
1458 prev_state = exception_enter();
1459 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1460 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1461 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1462 goto out;
1463
1464 /* Clear MSACSR.Cause before enabling interrupts */
1465 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1466 local_irq_enable();
1467
1468 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1469 force_sig(SIGFPE, current);
1470out:
1471 exception_exit(prev_state);
1472}
1473
1474asmlinkage void do_msa(struct pt_regs *regs)
1475{
1476 enum ctx_state prev_state;
1477 int err;
1478
1479 prev_state = exception_enter();
1480
1481 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1482 force_sig(SIGILL, current);
1483 goto out;
1484 }
1485
1486 die_if_kernel("do_msa invoked from kernel context!", regs);
1487
1488 err = enable_restore_fp_context(1);
1489 if (err)
1490 force_sig(SIGILL, current);
1491out:
1492 exception_exit(prev_state);
1493}
1494
1495asmlinkage void do_mdmx(struct pt_regs *regs)
1496{
1497 enum ctx_state prev_state;
1498
1499 prev_state = exception_enter();
1500 force_sig(SIGILL, current);
1501 exception_exit(prev_state);
1502}
1503
1504/*
1505 * Called with interrupts disabled.
1506 */
1507asmlinkage void do_watch(struct pt_regs *regs)
1508{
1509 siginfo_t info;
1510 enum ctx_state prev_state;
1511
1512 clear_siginfo(&info);
1513 info.si_signo = SIGTRAP;
1514 info.si_code = TRAP_HWBKPT;
1515
1516 prev_state = exception_enter();
1517 /*
1518 * Clear WP (bit 22) bit of cause register so we don't loop
1519 * forever.
1520 */
1521 clear_c0_cause(CAUSEF_WP);
1522
1523 /*
1524 * If the current thread has the watch registers loaded, save
1525 * their values and send SIGTRAP. Otherwise another thread
1526 * left the registers set, clear them and continue.
1527 */
1528 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1529 mips_read_watch_registers();
1530 local_irq_enable();
1531 force_sig_info(SIGTRAP, &info, current);
1532 } else {
1533 mips_clear_watch_registers();
1534 local_irq_enable();
1535 }
1536 exception_exit(prev_state);
1537}
1538
1539asmlinkage void do_mcheck(struct pt_regs *regs)
1540{
1541 int multi_match = regs->cp0_status & ST0_TS;
1542 enum ctx_state prev_state;
1543 mm_segment_t old_fs = get_fs();
1544
1545 prev_state = exception_enter();
1546 show_regs(regs);
1547
1548 if (multi_match) {
1549 dump_tlb_regs();
1550 pr_info("\n");
1551 dump_tlb_all();
1552 }
1553
1554 if (!user_mode(regs))
1555 set_fs(KERNEL_DS);
1556
1557 show_code((unsigned int __user *) regs->cp0_epc);
1558
1559 set_fs(old_fs);
1560
1561 /*
1562 * Some chips may have other causes of machine check (e.g. SB1
1563 * graduation timer)
1564 */
1565 panic("Caught Machine Check exception - %scaused by multiple "
1566 "matching entries in the TLB.",
1567 (multi_match) ? "" : "not ");
1568}
1569
1570asmlinkage void do_mt(struct pt_regs *regs)
1571{
1572 int subcode;
1573
1574 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1575 >> VPECONTROL_EXCPT_SHIFT;
1576 switch (subcode) {
1577 case 0:
1578 printk(KERN_DEBUG "Thread Underflow\n");
1579 break;
1580 case 1:
1581 printk(KERN_DEBUG "Thread Overflow\n");
1582 break;
1583 case 2:
1584 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1585 break;
1586 case 3:
1587 printk(KERN_DEBUG "Gating Storage Exception\n");
1588 break;
1589 case 4:
1590 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1591 break;
1592 case 5:
1593 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1594 break;
1595 default:
1596 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1597 subcode);
1598 break;
1599 }
1600 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1601
1602 force_sig(SIGILL, current);
1603}
1604
1605
1606asmlinkage void do_dsp(struct pt_regs *regs)
1607{
1608 if (cpu_has_dsp)
1609 panic("Unexpected DSP exception");
1610
1611 force_sig(SIGILL, current);
1612}
1613
1614asmlinkage void do_reserved(struct pt_regs *regs)
1615{
1616 /*
1617 * Game over - no way to handle this if it ever occurs. Most probably
1618 * caused by a new unknown cpu type or after another deadly
1619 * hard/software error.
1620 */
1621 show_regs(regs);
1622 panic("Caught reserved exception %ld - should not happen.",
1623 (regs->cp0_cause & 0x7f) >> 2);
1624}
1625
1626static int __initdata l1parity = 1;
1627static int __init nol1parity(char *s)
1628{
1629 l1parity = 0;
1630 return 1;
1631}
1632__setup("nol1par", nol1parity);
1633static int __initdata l2parity = 1;
1634static int __init nol2parity(char *s)
1635{
1636 l2parity = 0;
1637 return 1;
1638}
1639__setup("nol2par", nol2parity);
1640
1641/*
1642 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1643 * it different ways.
1644 */
1645static inline void parity_protection_init(void)
1646{
1647#define ERRCTL_PE 0x80000000
1648#define ERRCTL_L2P 0x00800000
1649
1650 if (mips_cm_revision() >= CM_REV_CM3) {
1651 ulong gcr_ectl, cp0_ectl;
1652
1653 /*
1654 * With CM3 systems we need to ensure that the L1 & L2
1655 * parity enables are set to the same value, since this
1656 * is presumed by the hardware engineers.
1657 *
1658 * If the user disabled either of L1 or L2 ECC checking,
1659 * disable both.
1660 */
1661 l1parity &= l2parity;
1662 l2parity &= l1parity;
1663
1664 /* Probe L1 ECC support */
1665 cp0_ectl = read_c0_ecc();
1666 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1667 back_to_back_c0_hazard();
1668 cp0_ectl = read_c0_ecc();
1669
1670 /* Probe L2 ECC support */
1671 gcr_ectl = read_gcr_err_control();
1672
1673 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1674 !(cp0_ectl & ERRCTL_PE)) {
1675 /*
1676 * One of L1 or L2 ECC checking isn't supported,
1677 * so we cannot enable either.
1678 */
1679 l1parity = l2parity = 0;
1680 }
1681
1682 /* Configure L1 ECC checking */
1683 if (l1parity)
1684 cp0_ectl |= ERRCTL_PE;
1685 else
1686 cp0_ectl &= ~ERRCTL_PE;
1687 write_c0_ecc(cp0_ectl);
1688 back_to_back_c0_hazard();
1689 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1690
1691 /* Configure L2 ECC checking */
1692 if (l2parity)
1693 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1694 else
1695 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1696 write_gcr_err_control(gcr_ectl);
1697 gcr_ectl = read_gcr_err_control();
1698 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1699 WARN_ON(!!gcr_ectl != l2parity);
1700
1701 pr_info("Cache parity protection %sabled\n",
1702 l1parity ? "en" : "dis");
1703 return;
1704 }
1705
1706 switch (current_cpu_type()) {
1707 case CPU_24K:
1708 case CPU_34K:
1709 case CPU_74K:
1710 case CPU_1004K:
1711 case CPU_1074K:
1712 case CPU_INTERAPTIV:
1713 case CPU_PROAPTIV:
1714 case CPU_P5600:
1715 case CPU_QEMU_GENERIC:
1716 case CPU_P6600:
1717 {
1718 unsigned long errctl;
1719 unsigned int l1parity_present, l2parity_present;
1720
1721 errctl = read_c0_ecc();
1722 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1723
1724 /* probe L1 parity support */
1725 write_c0_ecc(errctl | ERRCTL_PE);
1726 back_to_back_c0_hazard();
1727 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1728
1729 /* probe L2 parity support */
1730 write_c0_ecc(errctl|ERRCTL_L2P);
1731 back_to_back_c0_hazard();
1732 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1733
1734 if (l1parity_present && l2parity_present) {
1735 if (l1parity)
1736 errctl |= ERRCTL_PE;
1737 if (l1parity ^ l2parity)
1738 errctl |= ERRCTL_L2P;
1739 } else if (l1parity_present) {
1740 if (l1parity)
1741 errctl |= ERRCTL_PE;
1742 } else if (l2parity_present) {
1743 if (l2parity)
1744 errctl |= ERRCTL_L2P;
1745 } else {
1746 /* No parity available */
1747 }
1748
1749 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1750
1751 write_c0_ecc(errctl);
1752 back_to_back_c0_hazard();
1753 errctl = read_c0_ecc();
1754 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1755
1756 if (l1parity_present)
1757 printk(KERN_INFO "Cache parity protection %sabled\n",
1758 (errctl & ERRCTL_PE) ? "en" : "dis");
1759
1760 if (l2parity_present) {
1761 if (l1parity_present && l1parity)
1762 errctl ^= ERRCTL_L2P;
1763 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1764 (errctl & ERRCTL_L2P) ? "en" : "dis");
1765 }
1766 }
1767 break;
1768
1769 case CPU_5KC:
1770 case CPU_5KE:
1771 case CPU_LOONGSON1:
1772 write_c0_ecc(0x80000000);
1773 back_to_back_c0_hazard();
1774 /* Set the PE bit (bit 31) in the c0_errctl register. */
1775 printk(KERN_INFO "Cache parity protection %sabled\n",
1776 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1777 break;
1778 case CPU_20KC:
1779 case CPU_25KF:
1780 /* Clear the DE bit (bit 16) in the c0_status register. */
1781 printk(KERN_INFO "Enable cache parity protection for "
1782 "MIPS 20KC/25KF CPUs.\n");
1783 clear_c0_status(ST0_DE);
1784 break;
1785 default:
1786 break;
1787 }
1788}
1789
1790asmlinkage void cache_parity_error(void)
1791{
1792 const int field = 2 * sizeof(unsigned long);
1793 unsigned int reg_val;
1794
1795 /* For the moment, report the problem and hang. */
1796 printk("Cache error exception:\n");
1797 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1798 reg_val = read_c0_cacheerr();
1799 printk("c0_cacheerr == %08x\n", reg_val);
1800
1801 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1802 reg_val & (1<<30) ? "secondary" : "primary",
1803 reg_val & (1<<31) ? "data" : "insn");
1804 if ((cpu_has_mips_r2_r6) &&
1805 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1806 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1807 reg_val & (1<<29) ? "ED " : "",
1808 reg_val & (1<<28) ? "ET " : "",
1809 reg_val & (1<<27) ? "ES " : "",
1810 reg_val & (1<<26) ? "EE " : "",
1811 reg_val & (1<<25) ? "EB " : "",
1812 reg_val & (1<<24) ? "EI " : "",
1813 reg_val & (1<<23) ? "E1 " : "",
1814 reg_val & (1<<22) ? "E0 " : "");
1815 } else {
1816 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1817 reg_val & (1<<29) ? "ED " : "",
1818 reg_val & (1<<28) ? "ET " : "",
1819 reg_val & (1<<26) ? "EE " : "",
1820 reg_val & (1<<25) ? "EB " : "",
1821 reg_val & (1<<24) ? "EI " : "",
1822 reg_val & (1<<23) ? "E1 " : "",
1823 reg_val & (1<<22) ? "E0 " : "");
1824 }
1825 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1826
1827#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1828 if (reg_val & (1<<22))
1829 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1830
1831 if (reg_val & (1<<23))
1832 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1833#endif
1834
1835 panic("Can't handle the cache error!");
1836}
1837
1838asmlinkage void do_ftlb(void)
1839{
1840 const int field = 2 * sizeof(unsigned long);
1841 unsigned int reg_val;
1842
1843 /* For the moment, report the problem and hang. */
1844 if ((cpu_has_mips_r2_r6) &&
1845 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1846 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1847 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1848 read_c0_ecc());
1849 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1850 reg_val = read_c0_cacheerr();
1851 pr_err("c0_cacheerr == %08x\n", reg_val);
1852
1853 if ((reg_val & 0xc0000000) == 0xc0000000) {
1854 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1855 } else {
1856 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1857 reg_val & (1<<30) ? "secondary" : "primary",
1858 reg_val & (1<<31) ? "data" : "insn");
1859 }
1860 } else {
1861 pr_err("FTLB error exception\n");
1862 }
1863 /* Just print the cacheerr bits for now */
1864 cache_parity_error();
1865}
1866
1867/*
1868 * SDBBP EJTAG debug exception handler.
1869 * We skip the instruction and return to the next instruction.
1870 */
1871void ejtag_exception_handler(struct pt_regs *regs)
1872{
1873 const int field = 2 * sizeof(unsigned long);
1874 unsigned long depc, old_epc, old_ra;
1875 unsigned int debug;
1876
1877 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1878 depc = read_c0_depc();
1879 debug = read_c0_debug();
1880 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1881 if (debug & 0x80000000) {
1882 /*
1883 * In branch delay slot.
1884 * We cheat a little bit here and use EPC to calculate the
1885 * debug return address (DEPC). EPC is restored after the
1886 * calculation.
1887 */
1888 old_epc = regs->cp0_epc;
1889 old_ra = regs->regs[31];
1890 regs->cp0_epc = depc;
1891 compute_return_epc(regs);
1892 depc = regs->cp0_epc;
1893 regs->cp0_epc = old_epc;
1894 regs->regs[31] = old_ra;
1895 } else
1896 depc += 4;
1897 write_c0_depc(depc);
1898
1899#if 0
1900 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1901 write_c0_debug(debug | 0x100);
1902#endif
1903}
1904
1905/*
1906 * NMI exception handler.
1907 * No lock; only written during early bootup by CPU 0.
1908 */
1909static RAW_NOTIFIER_HEAD(nmi_chain);
1910
1911int register_nmi_notifier(struct notifier_block *nb)
1912{
1913 return raw_notifier_chain_register(&nmi_chain, nb);
1914}
1915
1916void __noreturn nmi_exception_handler(struct pt_regs *regs)
1917{
1918 char str[100];
1919
1920 nmi_enter();
1921 raw_notifier_call_chain(&nmi_chain, 0, regs);
1922 bust_spinlocks(1);
1923 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1924 smp_processor_id(), regs->cp0_epc);
1925 regs->cp0_epc = read_c0_errorepc();
1926 die(str, regs);
1927 nmi_exit();
1928}
1929
1930#define VECTORSPACING 0x100 /* for EI/VI mode */
1931
1932unsigned long ebase;
1933EXPORT_SYMBOL_GPL(ebase);
1934unsigned long exception_handlers[32];
1935unsigned long vi_handlers[64];
1936
1937void __init *set_except_vector(int n, void *addr)
1938{
1939 unsigned long handler = (unsigned long) addr;
1940 unsigned long old_handler;
1941
1942#ifdef CONFIG_CPU_MICROMIPS
1943 /*
1944 * Only the TLB handlers are cache aligned with an even
1945 * address. All other handlers are on an odd address and
1946 * require no modification. Otherwise, MIPS32 mode will
1947 * be entered when handling any TLB exceptions. That
1948 * would be bad...since we must stay in microMIPS mode.
1949 */
1950 if (!(handler & 0x1))
1951 handler |= 1;
1952#endif
1953 old_handler = xchg(&exception_handlers[n], handler);
1954
1955 if (n == 0 && cpu_has_divec) {
1956#ifdef CONFIG_CPU_MICROMIPS
1957 unsigned long jump_mask = ~((1 << 27) - 1);
1958#else
1959 unsigned long jump_mask = ~((1 << 28) - 1);
1960#endif
1961 u32 *buf = (u32 *)(ebase + 0x200);
1962 unsigned int k0 = 26;
1963 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1964 uasm_i_j(&buf, handler & ~jump_mask);
1965 uasm_i_nop(&buf);
1966 } else {
1967 UASM_i_LA(&buf, k0, handler);
1968 uasm_i_jr(&buf, k0);
1969 uasm_i_nop(&buf);
1970 }
1971 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1972 }
1973 return (void *)old_handler;
1974}
1975
1976static void do_default_vi(void)
1977{
1978 show_regs(get_irq_regs());
1979 panic("Caught unexpected vectored interrupt.");
1980}
1981
1982static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1983{
1984 unsigned long handler;
1985 unsigned long old_handler = vi_handlers[n];
1986 int srssets = current_cpu_data.srsets;
1987 u16 *h;
1988 unsigned char *b;
1989
1990 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1991
1992 if (addr == NULL) {
1993 handler = (unsigned long) do_default_vi;
1994 srs = 0;
1995 } else
1996 handler = (unsigned long) addr;
1997 vi_handlers[n] = handler;
1998
1999 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2000
2001 if (srs >= srssets)
2002 panic("Shadow register set %d not supported", srs);
2003
2004 if (cpu_has_veic) {
2005 if (board_bind_eic_interrupt)
2006 board_bind_eic_interrupt(n, srs);
2007 } else if (cpu_has_vint) {
2008 /* SRSMap is only defined if shadow sets are implemented */
2009 if (srssets > 1)
2010 change_c0_srsmap(0xf << n*4, srs << n*4);
2011 }
2012
2013 if (srs == 0) {
2014 /*
2015 * If no shadow set is selected then use the default handler
2016 * that does normal register saving and standard interrupt exit
2017 */
2018 extern char except_vec_vi, except_vec_vi_lui;
2019 extern char except_vec_vi_ori, except_vec_vi_end;
2020 extern char rollback_except_vec_vi;
2021 char *vec_start = using_rollback_handler() ?
2022 &rollback_except_vec_vi : &except_vec_vi;
2023#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2024 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2025 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2026#else
2027 const int lui_offset = &except_vec_vi_lui - vec_start;
2028 const int ori_offset = &except_vec_vi_ori - vec_start;
2029#endif
2030 const int handler_len = &except_vec_vi_end - vec_start;
2031
2032 if (handler_len > VECTORSPACING) {
2033 /*
2034 * Sigh... panicing won't help as the console
2035 * is probably not configured :(
2036 */
2037 panic("VECTORSPACING too small");
2038 }
2039
2040 set_handler(((unsigned long)b - ebase), vec_start,
2041#ifdef CONFIG_CPU_MICROMIPS
2042 (handler_len - 1));
2043#else
2044 handler_len);
2045#endif
2046 h = (u16 *)(b + lui_offset);
2047 *h = (handler >> 16) & 0xffff;
2048 h = (u16 *)(b + ori_offset);
2049 *h = (handler & 0xffff);
2050 local_flush_icache_range((unsigned long)b,
2051 (unsigned long)(b+handler_len));
2052 }
2053 else {
2054 /*
2055 * In other cases jump directly to the interrupt handler. It
2056 * is the handler's responsibility to save registers if required
2057 * (eg hi/lo) and return from the exception using "eret".
2058 */
2059 u32 insn;
2060
2061 h = (u16 *)b;
2062 /* j handler */
2063#ifdef CONFIG_CPU_MICROMIPS
2064 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2065#else
2066 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2067#endif
2068 h[0] = (insn >> 16) & 0xffff;
2069 h[1] = insn & 0xffff;
2070 h[2] = 0;
2071 h[3] = 0;
2072 local_flush_icache_range((unsigned long)b,
2073 (unsigned long)(b+8));
2074 }
2075
2076 return (void *)old_handler;
2077}
2078
2079void *set_vi_handler(int n, vi_handler_t addr)
2080{
2081 return set_vi_srs_handler(n, addr, 0);
2082}
2083
2084extern void tlb_init(void);
2085
2086/*
2087 * Timer interrupt
2088 */
2089int cp0_compare_irq;
2090EXPORT_SYMBOL_GPL(cp0_compare_irq);
2091int cp0_compare_irq_shift;
2092
2093/*
2094 * Performance counter IRQ or -1 if shared with timer
2095 */
2096int cp0_perfcount_irq;
2097EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2098
2099/*
2100 * Fast debug channel IRQ or -1 if not present
2101 */
2102int cp0_fdc_irq;
2103EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2104
2105static int noulri;
2106
2107static int __init ulri_disable(char *s)
2108{
2109 pr_info("Disabling ulri\n");
2110 noulri = 1;
2111
2112 return 1;
2113}
2114__setup("noulri", ulri_disable);
2115
2116/* configure STATUS register */
2117static void configure_status(void)
2118{
2119 /*
2120 * Disable coprocessors and select 32-bit or 64-bit addressing
2121 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2122 * flag that some firmware may have left set and the TS bit (for
2123 * IP27). Set XX for ISA IV code to work.
2124 */
2125 unsigned int status_set = ST0_CU0;
2126#ifdef CONFIG_64BIT
2127 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2128#endif
2129 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2130 status_set |= ST0_XX;
2131 if (cpu_has_dsp)
2132 status_set |= ST0_MX;
2133
2134 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2135 status_set);
2136}
2137
2138unsigned int hwrena;
2139EXPORT_SYMBOL_GPL(hwrena);
2140
2141/* configure HWRENA register */
2142static void configure_hwrena(void)
2143{
2144 hwrena = cpu_hwrena_impl_bits;
2145
2146 if (cpu_has_mips_r2_r6)
2147 hwrena |= MIPS_HWRENA_CPUNUM |
2148 MIPS_HWRENA_SYNCISTEP |
2149 MIPS_HWRENA_CC |
2150 MIPS_HWRENA_CCRES;
2151
2152 if (!noulri && cpu_has_userlocal)
2153 hwrena |= MIPS_HWRENA_ULR;
2154
2155 if (hwrena)
2156 write_c0_hwrena(hwrena);
2157}
2158
2159static void configure_exception_vector(void)
2160{
2161 if (cpu_has_veic || cpu_has_vint) {
2162 unsigned long sr = set_c0_status(ST0_BEV);
2163 /* If available, use WG to set top bits of EBASE */
2164 if (cpu_has_ebase_wg) {
2165#ifdef CONFIG_64BIT
2166 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2167#else
2168 write_c0_ebase(ebase | MIPS_EBASE_WG);
2169#endif
2170 }
2171 write_c0_ebase(ebase);
2172 write_c0_status(sr);
2173 /* Setting vector spacing enables EI/VI mode */
2174 change_c0_intctl(0x3e0, VECTORSPACING);
2175 }
2176 if (cpu_has_divec) {
2177 if (cpu_has_mipsmt) {
2178 unsigned int vpflags = dvpe();
2179 set_c0_cause(CAUSEF_IV);
2180 evpe(vpflags);
2181 } else
2182 set_c0_cause(CAUSEF_IV);
2183 }
2184}
2185
2186void per_cpu_trap_init(bool is_boot_cpu)
2187{
2188 unsigned int cpu = smp_processor_id();
2189
2190 configure_status();
2191 configure_hwrena();
2192
2193 configure_exception_vector();
2194
2195 /*
2196 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2197 *
2198 * o read IntCtl.IPTI to determine the timer interrupt
2199 * o read IntCtl.IPPCI to determine the performance counter interrupt
2200 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2201 */
2202 if (cpu_has_mips_r2_r6) {
2203 /*
2204 * We shouldn't trust a secondary core has a sane EBASE register
2205 * so use the one calculated by the boot CPU.
2206 */
2207 if (!is_boot_cpu) {
2208 /* If available, use WG to set top bits of EBASE */
2209 if (cpu_has_ebase_wg) {
2210#ifdef CONFIG_64BIT
2211 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2212#else
2213 write_c0_ebase(ebase | MIPS_EBASE_WG);
2214#endif
2215 }
2216 write_c0_ebase(ebase);
2217 }
2218
2219 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2220 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2221 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2222 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2223 if (!cp0_fdc_irq)
2224 cp0_fdc_irq = -1;
2225
2226 } else {
2227 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2228 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2229 cp0_perfcount_irq = -1;
2230 cp0_fdc_irq = -1;
2231 }
2232
2233 if (!cpu_data[cpu].asid_cache)
2234 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2235
2236 mmgrab(&init_mm);
2237 current->active_mm = &init_mm;
2238 BUG_ON(current->mm);
2239 enter_lazy_tlb(&init_mm, current);
2240
2241 /* Boot CPU's cache setup in setup_arch(). */
2242 if (!is_boot_cpu)
2243 cpu_cache_init();
2244 tlb_init();
2245 TLBMISS_HANDLER_SETUP();
2246}
2247
2248/* Install CPU exception handler */
2249void set_handler(unsigned long offset, void *addr, unsigned long size)
2250{
2251#ifdef CONFIG_CPU_MICROMIPS
2252 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2253#else
2254 memcpy((void *)(ebase + offset), addr, size);
2255#endif
2256 local_flush_icache_range(ebase + offset, ebase + offset + size);
2257}
2258
2259static const char panic_null_cerr[] =
2260 "Trying to set NULL cache error exception handler\n";
2261
2262/*
2263 * Install uncached CPU exception handler.
2264 * This is suitable only for the cache error exception which is the only
2265 * exception handler that is being run uncached.
2266 */
2267void set_uncached_handler(unsigned long offset, void *addr,
2268 unsigned long size)
2269{
2270 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2271
2272 if (!addr)
2273 panic(panic_null_cerr);
2274
2275 memcpy((void *)(uncached_ebase + offset), addr, size);
2276}
2277
2278static int __initdata rdhwr_noopt;
2279static int __init set_rdhwr_noopt(char *str)
2280{
2281 rdhwr_noopt = 1;
2282 return 1;
2283}
2284
2285__setup("rdhwr_noopt", set_rdhwr_noopt);
2286
2287void __init trap_init(void)
2288{
2289 extern char except_vec3_generic;
2290 extern char except_vec4;
2291 extern char except_vec3_r4000;
2292 unsigned long i;
2293
2294 check_wait();
2295
2296 if (cpu_has_veic || cpu_has_vint) {
2297 unsigned long size = 0x200 + VECTORSPACING*64;
2298 phys_addr_t ebase_pa;
2299
2300 ebase = (unsigned long)
2301 __alloc_bootmem(size, 1 << fls(size), 0);
2302
2303 /*
2304 * Try to ensure ebase resides in KSeg0 if possible.
2305 *
2306 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2307 * hitting a poorly defined exception base for Cache Errors.
2308 * The allocation is likely to be in the low 512MB of physical,
2309 * in which case we should be able to convert to KSeg0.
2310 *
2311 * EVA is special though as it allows segments to be rearranged
2312 * and to become uncached during cache error handling.
2313 */
2314 ebase_pa = __pa(ebase);
2315 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2316 ebase = CKSEG0ADDR(ebase_pa);
2317 } else {
2318 ebase = CAC_BASE;
2319
2320 if (cpu_has_mips_r2_r6) {
2321 if (cpu_has_ebase_wg) {
2322#ifdef CONFIG_64BIT
2323 ebase = (read_c0_ebase_64() & ~0xfff);
2324#else
2325 ebase = (read_c0_ebase() & ~0xfff);
2326#endif
2327 } else {
2328 ebase += (read_c0_ebase() & 0x3ffff000);
2329 }
2330 }
2331 }
2332
2333 if (cpu_has_mmips) {
2334 unsigned int config3 = read_c0_config3();
2335
2336 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2337 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2338 else
2339 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2340 }
2341
2342 if (board_ebase_setup)
2343 board_ebase_setup();
2344 per_cpu_trap_init(true);
2345
2346 /*
2347 * Copy the generic exception handlers to their final destination.
2348 * This will be overridden later as suitable for a particular
2349 * configuration.
2350 */
2351 set_handler(0x180, &except_vec3_generic, 0x80);
2352
2353 /*
2354 * Setup default vectors
2355 */
2356 for (i = 0; i <= 31; i++)
2357 set_except_vector(i, handle_reserved);
2358
2359 /*
2360 * Copy the EJTAG debug exception vector handler code to it's final
2361 * destination.
2362 */
2363 if (cpu_has_ejtag && board_ejtag_handler_setup)
2364 board_ejtag_handler_setup();
2365
2366 /*
2367 * Only some CPUs have the watch exceptions.
2368 */
2369 if (cpu_has_watch)
2370 set_except_vector(EXCCODE_WATCH, handle_watch);
2371
2372 /*
2373 * Initialise interrupt handlers
2374 */
2375 if (cpu_has_veic || cpu_has_vint) {
2376 int nvec = cpu_has_veic ? 64 : 8;
2377 for (i = 0; i < nvec; i++)
2378 set_vi_handler(i, NULL);
2379 }
2380 else if (cpu_has_divec)
2381 set_handler(0x200, &except_vec4, 0x8);
2382
2383 /*
2384 * Some CPUs can enable/disable for cache parity detection, but does
2385 * it different ways.
2386 */
2387 parity_protection_init();
2388
2389 /*
2390 * The Data Bus Errors / Instruction Bus Errors are signaled
2391 * by external hardware. Therefore these two exceptions
2392 * may have board specific handlers.
2393 */
2394 if (board_be_init)
2395 board_be_init();
2396
2397 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2398 rollback_handle_int : handle_int);
2399 set_except_vector(EXCCODE_MOD, handle_tlbm);
2400 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2401 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2402
2403 set_except_vector(EXCCODE_ADEL, handle_adel);
2404 set_except_vector(EXCCODE_ADES, handle_ades);
2405
2406 set_except_vector(EXCCODE_IBE, handle_ibe);
2407 set_except_vector(EXCCODE_DBE, handle_dbe);
2408
2409 set_except_vector(EXCCODE_SYS, handle_sys);
2410 set_except_vector(EXCCODE_BP, handle_bp);
2411
2412 if (rdhwr_noopt)
2413 set_except_vector(EXCCODE_RI, handle_ri);
2414 else {
2415 if (cpu_has_vtag_icache)
2416 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417 else if (current_cpu_type() == CPU_LOONGSON3)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421 }
2422
2423 set_except_vector(EXCCODE_CPU, handle_cpu);
2424 set_except_vector(EXCCODE_OV, handle_ov);
2425 set_except_vector(EXCCODE_TR, handle_tr);
2426 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2427
2428 if (board_nmi_handler_setup)
2429 board_nmi_handler_setup();
2430
2431 if (cpu_has_fpu && !cpu_has_nofpuex)
2432 set_except_vector(EXCCODE_FPE, handle_fpe);
2433
2434 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2435
2436 if (cpu_has_rixiex) {
2437 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2438 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2439 }
2440
2441 set_except_vector(EXCCODE_MSADIS, handle_msa);
2442 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2443
2444 if (cpu_has_mcheck)
2445 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2446
2447 if (cpu_has_mipsmt)
2448 set_except_vector(EXCCODE_THREAD, handle_mt);
2449
2450 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2451
2452 if (board_cache_error_setup)
2453 board_cache_error_setup();
2454
2455 if (cpu_has_vce)
2456 /* Special exception: R4[04]00 uses also the divec space. */
2457 set_handler(0x180, &except_vec3_r4000, 0x100);
2458 else if (cpu_has_4kex)
2459 set_handler(0x180, &except_vec3_generic, 0x80);
2460 else
2461 set_handler(0x080, &except_vec3_generic, 0x80);
2462
2463 local_flush_icache_range(ebase, ebase + 0x400);
2464
2465 sort_extable(__start___dbe_table, __stop___dbe_table);
2466
2467 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2468}
2469
2470static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2471 void *v)
2472{
2473 switch (cmd) {
2474 case CPU_PM_ENTER_FAILED:
2475 case CPU_PM_EXIT:
2476 configure_status();
2477 configure_hwrena();
2478 configure_exception_vector();
2479
2480 /* Restore register with CPU number for TLB handlers */
2481 TLBMISS_HANDLER_RESTORE();
2482
2483 break;
2484 }
2485
2486 return NOTIFY_OK;
2487}
2488
2489static struct notifier_block trap_pm_notifier_block = {
2490 .notifier_call = trap_pm_notifier,
2491};
2492
2493static int __init trap_pm_init(void)
2494{
2495 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2496}
2497arch_initcall(trap_pm_init);