Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/debug.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/kallsyms.h>
  31#include <linux/bootmem.h>
  32#include <linux/interrupt.h>
  33#include <linux/ptrace.h>
  34#include <linux/kgdb.h>
  35#include <linux/kdebug.h>
  36#include <linux/kprobes.h>
  37#include <linux/notifier.h>
  38#include <linux/kdb.h>
  39#include <linux/irq.h>
  40#include <linux/perf_event.h>
  41
  42#include <asm/addrspace.h>
  43#include <asm/bootinfo.h>
  44#include <asm/branch.h>
  45#include <asm/break.h>
  46#include <asm/cop2.h>
  47#include <asm/cpu.h>
  48#include <asm/cpu-type.h>
  49#include <asm/dsp.h>
  50#include <asm/fpu.h>
  51#include <asm/fpu_emulator.h>
  52#include <asm/idle.h>
  53#include <asm/mips-cps.h>
  54#include <asm/mips-r2-to-r6-emul.h>
  55#include <asm/mipsregs.h>
  56#include <asm/mipsmtregs.h>
  57#include <asm/module.h>
  58#include <asm/msa.h>
  59#include <asm/pgtable.h>
  60#include <asm/ptrace.h>
  61#include <asm/sections.h>
  62#include <asm/siginfo.h>
  63#include <asm/tlbdebug.h>
  64#include <asm/traps.h>
  65#include <linux/uaccess.h>
  66#include <asm/watch.h>
  67#include <asm/mmu_context.h>
  68#include <asm/types.h>
  69#include <asm/stacktrace.h>
  70#include <asm/uasm.h>
  71
  72extern void check_wait(void);
  73extern asmlinkage void rollback_handle_int(void);
  74extern asmlinkage void handle_int(void);
  75extern u32 handle_tlbl[];
  76extern u32 handle_tlbs[];
  77extern u32 handle_tlbm[];
  78extern asmlinkage void handle_adel(void);
  79extern asmlinkage void handle_ades(void);
  80extern asmlinkage void handle_ibe(void);
  81extern asmlinkage void handle_dbe(void);
  82extern asmlinkage void handle_sys(void);
  83extern asmlinkage void handle_bp(void);
  84extern asmlinkage void handle_ri(void);
  85extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  86extern asmlinkage void handle_ri_rdhwr(void);
  87extern asmlinkage void handle_cpu(void);
  88extern asmlinkage void handle_ov(void);
  89extern asmlinkage void handle_tr(void);
  90extern asmlinkage void handle_msa_fpe(void);
  91extern asmlinkage void handle_fpe(void);
  92extern asmlinkage void handle_ftlb(void);
  93extern asmlinkage void handle_msa(void);
  94extern asmlinkage void handle_mdmx(void);
  95extern asmlinkage void handle_watch(void);
  96extern asmlinkage void handle_mt(void);
  97extern asmlinkage void handle_dsp(void);
  98extern asmlinkage void handle_mcheck(void);
  99extern asmlinkage void handle_reserved(void);
 100extern void tlb_do_page_fault_0(void);
 101
 102void (*board_be_init)(void);
 103int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 104void (*board_nmi_handler_setup)(void);
 105void (*board_ejtag_handler_setup)(void);
 106void (*board_bind_eic_interrupt)(int irq, int regset);
 107void (*board_ebase_setup)(void);
 108void(*board_cache_error_setup)(void);
 109
 110static void show_raw_backtrace(unsigned long reg29)
 111{
 112	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 113	unsigned long addr;
 114
 115	printk("Call Trace:");
 116#ifdef CONFIG_KALLSYMS
 117	printk("\n");
 118#endif
 119	while (!kstack_end(sp)) {
 120		unsigned long __user *p =
 121			(unsigned long __user *)(unsigned long)sp++;
 122		if (__get_user(addr, p)) {
 123			printk(" (Bad stack address)");
 124			break;
 125		}
 126		if (__kernel_text_address(addr))
 127			print_ip_sym(addr);
 128	}
 129	printk("\n");
 130}
 131
 132#ifdef CONFIG_KALLSYMS
 133int raw_show_trace;
 134static int __init set_raw_show_trace(char *str)
 135{
 136	raw_show_trace = 1;
 137	return 1;
 138}
 139__setup("raw_show_trace", set_raw_show_trace);
 140#endif
 141
 142static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 143{
 144	unsigned long sp = regs->regs[29];
 145	unsigned long ra = regs->regs[31];
 146	unsigned long pc = regs->cp0_epc;
 147
 148	if (!task)
 149		task = current;
 150
 151	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 152		show_raw_backtrace(sp);
 153		return;
 154	}
 155	printk("Call Trace:\n");
 156	do {
 157		print_ip_sym(pc);
 158		pc = unwind_stack(task, &sp, pc, &ra);
 159	} while (pc);
 160	pr_cont("\n");
 161}
 162
 163/*
 164 * This routine abuses get_user()/put_user() to reference pointers
 165 * with at least a bit of error checking ...
 166 */
 167static void show_stacktrace(struct task_struct *task,
 168	const struct pt_regs *regs)
 169{
 170	const int field = 2 * sizeof(unsigned long);
 171	long stackdata;
 172	int i;
 173	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 174
 175	printk("Stack :");
 176	i = 0;
 177	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 178		if (i && ((i % (64 / field)) == 0)) {
 179			pr_cont("\n");
 180			printk("       ");
 181		}
 182		if (i > 39) {
 183			pr_cont(" ...");
 184			break;
 185		}
 186
 187		if (__get_user(stackdata, sp++)) {
 188			pr_cont(" (Bad stack address)");
 189			break;
 190		}
 191
 192		pr_cont(" %0*lx", field, stackdata);
 193		i++;
 194	}
 195	pr_cont("\n");
 196	show_backtrace(task, regs);
 197}
 198
 199void show_stack(struct task_struct *task, unsigned long *sp)
 200{
 201	struct pt_regs regs;
 202	mm_segment_t old_fs = get_fs();
 203
 204	regs.cp0_status = KSU_KERNEL;
 205	if (sp) {
 206		regs.regs[29] = (unsigned long)sp;
 207		regs.regs[31] = 0;
 208		regs.cp0_epc = 0;
 209	} else {
 210		if (task && task != current) {
 211			regs.regs[29] = task->thread.reg29;
 212			regs.regs[31] = 0;
 213			regs.cp0_epc = task->thread.reg31;
 214#ifdef CONFIG_KGDB_KDB
 215		} else if (atomic_read(&kgdb_active) != -1 &&
 216			   kdb_current_regs) {
 217			memcpy(&regs, kdb_current_regs, sizeof(regs));
 218#endif /* CONFIG_KGDB_KDB */
 219		} else {
 220			prepare_frametrace(&regs);
 221		}
 222	}
 223	/*
 224	 * show_stack() deals exclusively with kernel mode, so be sure to access
 225	 * the stack in the kernel (not user) address space.
 226	 */
 227	set_fs(KERNEL_DS);
 228	show_stacktrace(task, &regs);
 229	set_fs(old_fs);
 230}
 231
 232static void show_code(unsigned int __user *pc)
 233{
 234	long i;
 235	unsigned short __user *pc16 = NULL;
 236
 237	printk("Code:");
 238
 239	if ((unsigned long)pc & 1)
 240		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 241	for(i = -3 ; i < 6 ; i++) {
 242		unsigned int insn;
 243		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 244			pr_cont(" (Bad address in epc)\n");
 245			break;
 246		}
 247		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 248	}
 249	pr_cont("\n");
 250}
 251
 252static void __show_regs(const struct pt_regs *regs)
 253{
 254	const int field = 2 * sizeof(unsigned long);
 255	unsigned int cause = regs->cp0_cause;
 256	unsigned int exccode;
 257	int i;
 258
 259	show_regs_print_info(KERN_DEFAULT);
 260
 261	/*
 262	 * Saved main processor registers
 263	 */
 264	for (i = 0; i < 32; ) {
 265		if ((i % 4) == 0)
 266			printk("$%2d   :", i);
 267		if (i == 0)
 268			pr_cont(" %0*lx", field, 0UL);
 269		else if (i == 26 || i == 27)
 270			pr_cont(" %*s", field, "");
 271		else
 272			pr_cont(" %0*lx", field, regs->regs[i]);
 273
 274		i++;
 275		if ((i % 4) == 0)
 276			pr_cont("\n");
 277	}
 278
 279#ifdef CONFIG_CPU_HAS_SMARTMIPS
 280	printk("Acx    : %0*lx\n", field, regs->acx);
 281#endif
 282	printk("Hi    : %0*lx\n", field, regs->hi);
 283	printk("Lo    : %0*lx\n", field, regs->lo);
 284
 285	/*
 286	 * Saved cp0 registers
 287	 */
 288	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 289	       (void *) regs->cp0_epc);
 290	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 291	       (void *) regs->regs[31]);
 292
 293	printk("Status: %08x	", (uint32_t) regs->cp0_status);
 294
 295	if (cpu_has_3kex) {
 296		if (regs->cp0_status & ST0_KUO)
 297			pr_cont("KUo ");
 298		if (regs->cp0_status & ST0_IEO)
 299			pr_cont("IEo ");
 300		if (regs->cp0_status & ST0_KUP)
 301			pr_cont("KUp ");
 302		if (regs->cp0_status & ST0_IEP)
 303			pr_cont("IEp ");
 304		if (regs->cp0_status & ST0_KUC)
 305			pr_cont("KUc ");
 306		if (regs->cp0_status & ST0_IEC)
 307			pr_cont("IEc ");
 308	} else if (cpu_has_4kex) {
 309		if (regs->cp0_status & ST0_KX)
 310			pr_cont("KX ");
 311		if (regs->cp0_status & ST0_SX)
 312			pr_cont("SX ");
 313		if (regs->cp0_status & ST0_UX)
 314			pr_cont("UX ");
 315		switch (regs->cp0_status & ST0_KSU) {
 316		case KSU_USER:
 317			pr_cont("USER ");
 318			break;
 319		case KSU_SUPERVISOR:
 320			pr_cont("SUPERVISOR ");
 321			break;
 322		case KSU_KERNEL:
 323			pr_cont("KERNEL ");
 324			break;
 325		default:
 326			pr_cont("BAD_MODE ");
 327			break;
 328		}
 329		if (regs->cp0_status & ST0_ERL)
 330			pr_cont("ERL ");
 331		if (regs->cp0_status & ST0_EXL)
 332			pr_cont("EXL ");
 333		if (regs->cp0_status & ST0_IE)
 334			pr_cont("IE ");
 335	}
 336	pr_cont("\n");
 337
 338	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 339	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 340
 341	if (1 <= exccode && exccode <= 5)
 342		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 343
 344	printk("PrId  : %08x (%s)\n", read_c0_prid(),
 345	       cpu_name_string());
 346}
 347
 348/*
 349 * FIXME: really the generic show_regs should take a const pointer argument.
 350 */
 351void show_regs(struct pt_regs *regs)
 352{
 353	__show_regs((struct pt_regs *)regs);
 354}
 355
 356void show_registers(struct pt_regs *regs)
 357{
 358	const int field = 2 * sizeof(unsigned long);
 359	mm_segment_t old_fs = get_fs();
 360
 361	__show_regs(regs);
 362	print_modules();
 363	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 364	       current->comm, current->pid, current_thread_info(), current,
 365	      field, current_thread_info()->tp_value);
 366	if (cpu_has_userlocal) {
 367		unsigned long tls;
 368
 369		tls = read_c0_userlocal();
 370		if (tls != current_thread_info()->tp_value)
 371			printk("*HwTLS: %0*lx\n", field, tls);
 372	}
 373
 374	if (!user_mode(regs))
 375		/* Necessary for getting the correct stack content */
 376		set_fs(KERNEL_DS);
 377	show_stacktrace(current, regs);
 378	show_code((unsigned int __user *) regs->cp0_epc);
 379	printk("\n");
 380	set_fs(old_fs);
 381}
 382
 383static DEFINE_RAW_SPINLOCK(die_lock);
 384
 385void __noreturn die(const char *str, struct pt_regs *regs)
 386{
 387	static int die_counter;
 388	int sig = SIGSEGV;
 389
 390	oops_enter();
 391
 392	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 393		       SIGSEGV) == NOTIFY_STOP)
 394		sig = 0;
 395
 396	console_verbose();
 397	raw_spin_lock_irq(&die_lock);
 398	bust_spinlocks(1);
 399
 400	printk("%s[#%d]:\n", str, ++die_counter);
 401	show_registers(regs);
 402	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 403	raw_spin_unlock_irq(&die_lock);
 404
 405	oops_exit();
 406
 407	if (in_interrupt())
 408		panic("Fatal exception in interrupt");
 409
 410	if (panic_on_oops)
 411		panic("Fatal exception");
 412
 413	if (regs && kexec_should_crash(current))
 414		crash_kexec(regs);
 415
 416	do_exit(sig);
 417}
 418
 419extern struct exception_table_entry __start___dbe_table[];
 420extern struct exception_table_entry __stop___dbe_table[];
 421
 422__asm__(
 423"	.section	__dbe_table, \"a\"\n"
 424"	.previous			\n");
 425
 426/* Given an address, look for it in the exception tables. */
 427static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 428{
 429	const struct exception_table_entry *e;
 430
 431	e = search_extable(__start___dbe_table,
 432			   __stop___dbe_table - __start___dbe_table, addr);
 433	if (!e)
 434		e = search_module_dbetables(addr);
 435	return e;
 436}
 437
 438asmlinkage void do_be(struct pt_regs *regs)
 439{
 440	const int field = 2 * sizeof(unsigned long);
 441	const struct exception_table_entry *fixup = NULL;
 442	int data = regs->cp0_cause & 4;
 443	int action = MIPS_BE_FATAL;
 444	enum ctx_state prev_state;
 445
 446	prev_state = exception_enter();
 447	/* XXX For now.	 Fixme, this searches the wrong table ...  */
 448	if (data && !user_mode(regs))
 449		fixup = search_dbe_tables(exception_epc(regs));
 450
 451	if (fixup)
 452		action = MIPS_BE_FIXUP;
 453
 454	if (board_be_handler)
 455		action = board_be_handler(regs, fixup != NULL);
 456	else
 457		mips_cm_error_report();
 458
 459	switch (action) {
 460	case MIPS_BE_DISCARD:
 461		goto out;
 462	case MIPS_BE_FIXUP:
 463		if (fixup) {
 464			regs->cp0_epc = fixup->nextinsn;
 465			goto out;
 466		}
 467		break;
 468	default:
 469		break;
 470	}
 471
 472	/*
 473	 * Assume it would be too dangerous to continue ...
 474	 */
 475	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 476	       data ? "Data" : "Instruction",
 477	       field, regs->cp0_epc, field, regs->regs[31]);
 478	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 479		       SIGBUS) == NOTIFY_STOP)
 480		goto out;
 481
 482	die_if_kernel("Oops", regs);
 483	force_sig(SIGBUS, current);
 484
 485out:
 486	exception_exit(prev_state);
 487}
 488
 489/*
 490 * ll/sc, rdhwr, sync emulation
 491 */
 492
 493#define OPCODE 0xfc000000
 494#define BASE   0x03e00000
 495#define RT     0x001f0000
 496#define OFFSET 0x0000ffff
 497#define LL     0xc0000000
 498#define SC     0xe0000000
 499#define SPEC0  0x00000000
 500#define SPEC3  0x7c000000
 501#define RD     0x0000f800
 502#define FUNC   0x0000003f
 503#define SYNC   0x0000000f
 504#define RDHWR  0x0000003b
 505
 506/*  microMIPS definitions   */
 507#define MM_POOL32A_FUNC 0xfc00ffff
 508#define MM_RDHWR        0x00006b3c
 509#define MM_RS           0x001f0000
 510#define MM_RT           0x03e00000
 511
 512/*
 513 * The ll_bit is cleared by r*_switch.S
 514 */
 515
 516unsigned int ll_bit;
 517struct task_struct *ll_task;
 518
 519static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 520{
 521	unsigned long value, __user *vaddr;
 522	long offset;
 523
 524	/*
 525	 * analyse the ll instruction that just caused a ri exception
 526	 * and put the referenced address to addr.
 527	 */
 528
 529	/* sign extend offset */
 530	offset = opcode & OFFSET;
 531	offset <<= 16;
 532	offset >>= 16;
 533
 534	vaddr = (unsigned long __user *)
 535		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 536
 537	if ((unsigned long)vaddr & 3)
 538		return SIGBUS;
 539	if (get_user(value, vaddr))
 540		return SIGSEGV;
 541
 542	preempt_disable();
 543
 544	if (ll_task == NULL || ll_task == current) {
 545		ll_bit = 1;
 546	} else {
 547		ll_bit = 0;
 548	}
 549	ll_task = current;
 550
 551	preempt_enable();
 552
 553	regs->regs[(opcode & RT) >> 16] = value;
 554
 555	return 0;
 556}
 557
 558static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 559{
 560	unsigned long __user *vaddr;
 561	unsigned long reg;
 562	long offset;
 563
 564	/*
 565	 * analyse the sc instruction that just caused a ri exception
 566	 * and put the referenced address to addr.
 567	 */
 568
 569	/* sign extend offset */
 570	offset = opcode & OFFSET;
 571	offset <<= 16;
 572	offset >>= 16;
 573
 574	vaddr = (unsigned long __user *)
 575		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 576	reg = (opcode & RT) >> 16;
 577
 578	if ((unsigned long)vaddr & 3)
 579		return SIGBUS;
 580
 581	preempt_disable();
 582
 583	if (ll_bit == 0 || ll_task != current) {
 584		regs->regs[reg] = 0;
 585		preempt_enable();
 586		return 0;
 587	}
 588
 589	preempt_enable();
 590
 591	if (put_user(regs->regs[reg], vaddr))
 592		return SIGSEGV;
 593
 594	regs->regs[reg] = 1;
 595
 596	return 0;
 597}
 598
 599/*
 600 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 601 * opcodes are supposed to result in coprocessor unusable exceptions if
 602 * executed on ll/sc-less processors.  That's the theory.  In practice a
 603 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 604 * instead, so we're doing the emulation thing in both exception handlers.
 605 */
 606static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 607{
 608	if ((opcode & OPCODE) == LL) {
 609		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 610				1, regs, 0);
 611		return simulate_ll(regs, opcode);
 612	}
 613	if ((opcode & OPCODE) == SC) {
 614		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 615				1, regs, 0);
 616		return simulate_sc(regs, opcode);
 617	}
 618
 619	return -1;			/* Must be something else ... */
 620}
 621
 622/*
 623 * Simulate trapping 'rdhwr' instructions to provide user accessible
 624 * registers not implemented in hardware.
 625 */
 626static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 627{
 628	struct thread_info *ti = task_thread_info(current);
 629
 630	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 631			1, regs, 0);
 632	switch (rd) {
 633	case MIPS_HWR_CPUNUM:		/* CPU number */
 634		regs->regs[rt] = smp_processor_id();
 635		return 0;
 636	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
 637		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 638				     current_cpu_data.icache.linesz);
 639		return 0;
 640	case MIPS_HWR_CC:		/* Read count register */
 641		regs->regs[rt] = read_c0_count();
 642		return 0;
 643	case MIPS_HWR_CCRES:		/* Count register resolution */
 644		switch (current_cpu_type()) {
 645		case CPU_20KC:
 646		case CPU_25KF:
 647			regs->regs[rt] = 1;
 648			break;
 649		default:
 650			regs->regs[rt] = 2;
 651		}
 652		return 0;
 653	case MIPS_HWR_ULR:		/* Read UserLocal register */
 654		regs->regs[rt] = ti->tp_value;
 655		return 0;
 656	default:
 657		return -1;
 658	}
 659}
 660
 661static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 662{
 663	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 664		int rd = (opcode & RD) >> 11;
 665		int rt = (opcode & RT) >> 16;
 666
 667		simulate_rdhwr(regs, rd, rt);
 668		return 0;
 669	}
 670
 671	/* Not ours.  */
 672	return -1;
 673}
 674
 675static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 676{
 677	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 678		int rd = (opcode & MM_RS) >> 16;
 679		int rt = (opcode & MM_RT) >> 21;
 680		simulate_rdhwr(regs, rd, rt);
 681		return 0;
 682	}
 683
 684	/* Not ours.  */
 685	return -1;
 686}
 687
 688static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 689{
 690	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 691		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 692				1, regs, 0);
 693		return 0;
 694	}
 695
 696	return -1;			/* Must be something else ... */
 697}
 698
 699asmlinkage void do_ov(struct pt_regs *regs)
 700{
 701	enum ctx_state prev_state;
 702	siginfo_t info;
 703
 704	clear_siginfo(&info);
 705	info.si_signo = SIGFPE;
 706	info.si_code = FPE_INTOVF;
 707	info.si_addr = (void __user *)regs->cp0_epc;
 708
 709	prev_state = exception_enter();
 710	die_if_kernel("Integer overflow", regs);
 711
 712	force_sig_info(SIGFPE, &info, current);
 713	exception_exit(prev_state);
 714}
 715
 716/*
 717 * Send SIGFPE according to FCSR Cause bits, which must have already
 718 * been masked against Enable bits.  This is impotant as Inexact can
 719 * happen together with Overflow or Underflow, and `ptrace' can set
 720 * any bits.
 721 */
 722void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 723		     struct task_struct *tsk)
 724{
 725	struct siginfo si;
 726
 727	clear_siginfo(&si);
 728	si.si_addr = fault_addr;
 729	si.si_signo = SIGFPE;
 730
 731	if (fcr31 & FPU_CSR_INV_X)
 732		si.si_code = FPE_FLTINV;
 733	else if (fcr31 & FPU_CSR_DIV_X)
 734		si.si_code = FPE_FLTDIV;
 735	else if (fcr31 & FPU_CSR_OVF_X)
 736		si.si_code = FPE_FLTOVF;
 737	else if (fcr31 & FPU_CSR_UDF_X)
 738		si.si_code = FPE_FLTUND;
 739	else if (fcr31 & FPU_CSR_INE_X)
 740		si.si_code = FPE_FLTRES;
 741
 
 742	force_sig_info(SIGFPE, &si, tsk);
 743}
 744
 745int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 746{
 747	struct siginfo si;
 748	struct vm_area_struct *vma;
 749
 750	clear_siginfo(&si);
 751	switch (sig) {
 752	case 0:
 753		return 0;
 754
 755	case SIGFPE:
 756		force_fcr31_sig(fcr31, fault_addr, current);
 757		return 1;
 758
 759	case SIGBUS:
 760		si.si_addr = fault_addr;
 761		si.si_signo = sig;
 762		si.si_code = BUS_ADRERR;
 763		force_sig_info(sig, &si, current);
 764		return 1;
 765
 766	case SIGSEGV:
 767		si.si_addr = fault_addr;
 768		si.si_signo = sig;
 769		down_read(&current->mm->mmap_sem);
 770		vma = find_vma(current->mm, (unsigned long)fault_addr);
 771		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 772			si.si_code = SEGV_ACCERR;
 773		else
 774			si.si_code = SEGV_MAPERR;
 775		up_read(&current->mm->mmap_sem);
 776		force_sig_info(sig, &si, current);
 777		return 1;
 778
 779	default:
 780		force_sig(sig, current);
 781		return 1;
 782	}
 783}
 784
 785static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 786		       unsigned long old_epc, unsigned long old_ra)
 787{
 788	union mips_instruction inst = { .word = opcode };
 789	void __user *fault_addr;
 790	unsigned long fcr31;
 791	int sig;
 792
 793	/* If it's obviously not an FP instruction, skip it */
 794	switch (inst.i_format.opcode) {
 795	case cop1_op:
 796	case cop1x_op:
 797	case lwc1_op:
 798	case ldc1_op:
 799	case swc1_op:
 800	case sdc1_op:
 801		break;
 802
 803	default:
 804		return -1;
 805	}
 806
 807	/*
 808	 * do_ri skipped over the instruction via compute_return_epc, undo
 809	 * that for the FPU emulator.
 810	 */
 811	regs->cp0_epc = old_epc;
 812	regs->regs[31] = old_ra;
 813
 814	/* Save the FP context to struct thread_struct */
 815	lose_fpu(1);
 816
 817	/* Run the emulator */
 818	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 819				       &fault_addr);
 820
 821	/*
 822	 * We can't allow the emulated instruction to leave any
 823	 * enabled Cause bits set in $fcr31.
 824	 */
 825	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 826	current->thread.fpu.fcr31 &= ~fcr31;
 827
 828	/* Restore the hardware register state */
 829	own_fpu(1);
 830
 831	/* Send a signal if required.  */
 832	process_fpemu_return(sig, fault_addr, fcr31);
 833
 834	return 0;
 835}
 836
 837/*
 838 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 839 */
 840asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 841{
 842	enum ctx_state prev_state;
 843	void __user *fault_addr;
 844	int sig;
 845
 846	prev_state = exception_enter();
 847	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 848		       SIGFPE) == NOTIFY_STOP)
 849		goto out;
 850
 851	/* Clear FCSR.Cause before enabling interrupts */
 852	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 853	local_irq_enable();
 854
 855	die_if_kernel("FP exception in kernel code", regs);
 856
 857	if (fcr31 & FPU_CSR_UNI_X) {
 858		/*
 859		 * Unimplemented operation exception.  If we've got the full
 860		 * software emulator on-board, let's use it...
 861		 *
 862		 * Force FPU to dump state into task/thread context.  We're
 863		 * moving a lot of data here for what is probably a single
 864		 * instruction, but the alternative is to pre-decode the FP
 865		 * register operands before invoking the emulator, which seems
 866		 * a bit extreme for what should be an infrequent event.
 867		 */
 868		/* Ensure 'resume' not overwrite saved fp context again. */
 869		lose_fpu(1);
 870
 871		/* Run the emulator */
 872		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 873					       &fault_addr);
 874
 875		/*
 876		 * We can't allow the emulated instruction to leave any
 877		 * enabled Cause bits set in $fcr31.
 878		 */
 879		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 880		current->thread.fpu.fcr31 &= ~fcr31;
 881
 882		/* Restore the hardware register state */
 883		own_fpu(1);	/* Using the FPU again.	 */
 884	} else {
 885		sig = SIGFPE;
 886		fault_addr = (void __user *) regs->cp0_epc;
 887	}
 888
 889	/* Send a signal if required.  */
 890	process_fpemu_return(sig, fault_addr, fcr31);
 891
 892out:
 893	exception_exit(prev_state);
 894}
 895
 896void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 897	const char *str)
 898{
 899	siginfo_t info;
 900	char b[40];
 901
 902	clear_siginfo(&info);
 903#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 904	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 905			 SIGTRAP) == NOTIFY_STOP)
 906		return;
 907#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 908
 909	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 910		       SIGTRAP) == NOTIFY_STOP)
 911		return;
 912
 913	/*
 914	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 915	 * insns, even for trap and break codes that indicate arithmetic
 916	 * failures.  Weird ...
 917	 * But should we continue the brokenness???  --macro
 918	 */
 919	switch (code) {
 920	case BRK_OVERFLOW:
 921	case BRK_DIVZERO:
 922		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 923		die_if_kernel(b, regs);
 924		if (code == BRK_DIVZERO)
 925			info.si_code = FPE_INTDIV;
 926		else
 927			info.si_code = FPE_INTOVF;
 928		info.si_signo = SIGFPE;
 929		info.si_addr = (void __user *) regs->cp0_epc;
 930		force_sig_info(SIGFPE, &info, current);
 931		break;
 932	case BRK_BUG:
 933		die_if_kernel("Kernel bug detected", regs);
 934		force_sig(SIGTRAP, current);
 935		break;
 936	case BRK_MEMU:
 937		/*
 938		 * This breakpoint code is used by the FPU emulator to retake
 939		 * control of the CPU after executing the instruction from the
 940		 * delay slot of an emulated branch.
 941		 *
 942		 * Terminate if exception was recognized as a delay slot return
 943		 * otherwise handle as normal.
 944		 */
 945		if (do_dsemulret(regs))
 946			return;
 947
 948		die_if_kernel("Math emu break/trap", regs);
 949		force_sig(SIGTRAP, current);
 950		break;
 951	default:
 952		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 953		die_if_kernel(b, regs);
 954		if (si_code) {
 955			info.si_signo = SIGTRAP;
 956			info.si_code = si_code;
 957			force_sig_info(SIGTRAP, &info, current);
 958		} else {
 959			force_sig(SIGTRAP, current);
 960		}
 961	}
 962}
 963
 964asmlinkage void do_bp(struct pt_regs *regs)
 965{
 966	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 967	unsigned int opcode, bcode;
 968	enum ctx_state prev_state;
 969	mm_segment_t seg;
 970
 971	seg = get_fs();
 972	if (!user_mode(regs))
 973		set_fs(KERNEL_DS);
 974
 975	prev_state = exception_enter();
 976	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 977	if (get_isa16_mode(regs->cp0_epc)) {
 978		u16 instr[2];
 979
 980		if (__get_user(instr[0], (u16 __user *)epc))
 981			goto out_sigsegv;
 982
 983		if (!cpu_has_mmips) {
 984			/* MIPS16e mode */
 985			bcode = (instr[0] >> 5) & 0x3f;
 986		} else if (mm_insn_16bit(instr[0])) {
 987			/* 16-bit microMIPS BREAK */
 988			bcode = instr[0] & 0xf;
 989		} else {
 990			/* 32-bit microMIPS BREAK */
 991			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
 992				goto out_sigsegv;
 993			opcode = (instr[0] << 16) | instr[1];
 994			bcode = (opcode >> 6) & ((1 << 20) - 1);
 995		}
 996	} else {
 997		if (__get_user(opcode, (unsigned int __user *)epc))
 998			goto out_sigsegv;
 999		bcode = (opcode >> 6) & ((1 << 20) - 1);
1000	}
1001
1002	/*
1003	 * There is the ancient bug in the MIPS assemblers that the break
1004	 * code starts left to bit 16 instead to bit 6 in the opcode.
1005	 * Gas is bug-compatible, but not always, grrr...
1006	 * We handle both cases with a simple heuristics.  --macro
1007	 */
1008	if (bcode >= (1 << 10))
1009		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1010
1011	/*
1012	 * notify the kprobe handlers, if instruction is likely to
1013	 * pertain to them.
1014	 */
1015	switch (bcode) {
1016	case BRK_UPROBE:
1017		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1018			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1019			goto out;
1020		else
1021			break;
1022	case BRK_UPROBE_XOL:
1023		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1024			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1025			goto out;
1026		else
1027			break;
1028	case BRK_KPROBE_BP:
1029		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1030			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1031			goto out;
1032		else
1033			break;
1034	case BRK_KPROBE_SSTEPBP:
1035		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1036			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1037			goto out;
1038		else
1039			break;
1040	default:
1041		break;
1042	}
1043
1044	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1045
1046out:
1047	set_fs(seg);
1048	exception_exit(prev_state);
1049	return;
1050
1051out_sigsegv:
1052	force_sig(SIGSEGV, current);
1053	goto out;
1054}
1055
1056asmlinkage void do_tr(struct pt_regs *regs)
1057{
1058	u32 opcode, tcode = 0;
1059	enum ctx_state prev_state;
1060	u16 instr[2];
1061	mm_segment_t seg;
1062	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1063
1064	seg = get_fs();
1065	if (!user_mode(regs))
1066		set_fs(get_ds());
1067
1068	prev_state = exception_enter();
1069	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1070	if (get_isa16_mode(regs->cp0_epc)) {
1071		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1072		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1073			goto out_sigsegv;
1074		opcode = (instr[0] << 16) | instr[1];
1075		/* Immediate versions don't provide a code.  */
1076		if (!(opcode & OPCODE))
1077			tcode = (opcode >> 12) & ((1 << 4) - 1);
1078	} else {
1079		if (__get_user(opcode, (u32 __user *)epc))
1080			goto out_sigsegv;
1081		/* Immediate versions don't provide a code.  */
1082		if (!(opcode & OPCODE))
1083			tcode = (opcode >> 6) & ((1 << 10) - 1);
1084	}
1085
1086	do_trap_or_bp(regs, tcode, 0, "Trap");
1087
1088out:
1089	set_fs(seg);
1090	exception_exit(prev_state);
1091	return;
1092
1093out_sigsegv:
1094	force_sig(SIGSEGV, current);
1095	goto out;
1096}
1097
1098asmlinkage void do_ri(struct pt_regs *regs)
1099{
1100	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1101	unsigned long old_epc = regs->cp0_epc;
1102	unsigned long old31 = regs->regs[31];
1103	enum ctx_state prev_state;
1104	unsigned int opcode = 0;
1105	int status = -1;
1106
1107	/*
1108	 * Avoid any kernel code. Just emulate the R2 instruction
1109	 * as quickly as possible.
1110	 */
1111	if (mipsr2_emulation && cpu_has_mips_r6 &&
1112	    likely(user_mode(regs)) &&
1113	    likely(get_user(opcode, epc) >= 0)) {
1114		unsigned long fcr31 = 0;
1115
1116		status = mipsr2_decoder(regs, opcode, &fcr31);
1117		switch (status) {
1118		case 0:
1119		case SIGEMT:
 
1120			return;
1121		case SIGILL:
1122			goto no_r2_instr;
1123		default:
1124			process_fpemu_return(status,
1125					     &current->thread.cp0_baduaddr,
1126					     fcr31);
 
1127			return;
1128		}
1129	}
1130
1131no_r2_instr:
1132
1133	prev_state = exception_enter();
1134	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1135
1136	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1137		       SIGILL) == NOTIFY_STOP)
1138		goto out;
1139
1140	die_if_kernel("Reserved instruction in kernel code", regs);
1141
1142	if (unlikely(compute_return_epc(regs) < 0))
1143		goto out;
1144
1145	if (!get_isa16_mode(regs->cp0_epc)) {
1146		if (unlikely(get_user(opcode, epc) < 0))
1147			status = SIGSEGV;
1148
1149		if (!cpu_has_llsc && status < 0)
1150			status = simulate_llsc(regs, opcode);
1151
1152		if (status < 0)
1153			status = simulate_rdhwr_normal(regs, opcode);
1154
1155		if (status < 0)
1156			status = simulate_sync(regs, opcode);
1157
1158		if (status < 0)
1159			status = simulate_fp(regs, opcode, old_epc, old31);
1160	} else if (cpu_has_mmips) {
1161		unsigned short mmop[2] = { 0 };
1162
1163		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1164			status = SIGSEGV;
1165		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1166			status = SIGSEGV;
1167		opcode = mmop[0];
1168		opcode = (opcode << 16) | mmop[1];
1169
1170		if (status < 0)
1171			status = simulate_rdhwr_mm(regs, opcode);
1172	}
1173
1174	if (status < 0)
1175		status = SIGILL;
1176
1177	if (unlikely(status > 0)) {
1178		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1179		regs->regs[31] = old31;
1180		force_sig(status, current);
1181	}
1182
1183out:
1184	exception_exit(prev_state);
1185}
1186
1187/*
1188 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1189 * emulated more than some threshold number of instructions, force migration to
1190 * a "CPU" that has FP support.
1191 */
1192static void mt_ase_fp_affinity(void)
1193{
1194#ifdef CONFIG_MIPS_MT_FPAFF
1195	if (mt_fpemul_threshold > 0 &&
1196	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1197		/*
1198		 * If there's no FPU present, or if the application has already
1199		 * restricted the allowed set to exclude any CPUs with FPUs,
1200		 * we'll skip the procedure.
1201		 */
1202		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1203			cpumask_t tmask;
1204
1205			current->thread.user_cpus_allowed
1206				= current->cpus_allowed;
1207			cpumask_and(&tmask, &current->cpus_allowed,
1208				    &mt_fpu_cpumask);
1209			set_cpus_allowed_ptr(current, &tmask);
1210			set_thread_flag(TIF_FPUBOUND);
1211		}
1212	}
1213#endif /* CONFIG_MIPS_MT_FPAFF */
1214}
1215
1216/*
1217 * No lock; only written during early bootup by CPU 0.
1218 */
1219static RAW_NOTIFIER_HEAD(cu2_chain);
1220
1221int __ref register_cu2_notifier(struct notifier_block *nb)
1222{
1223	return raw_notifier_chain_register(&cu2_chain, nb);
1224}
1225
1226int cu2_notifier_call_chain(unsigned long val, void *v)
1227{
1228	return raw_notifier_call_chain(&cu2_chain, val, v);
1229}
1230
1231static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1232	void *data)
1233{
1234	struct pt_regs *regs = data;
1235
1236	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1237			      "instruction", regs);
1238	force_sig(SIGILL, current);
1239
1240	return NOTIFY_OK;
1241}
1242
 
 
 
 
 
 
 
 
 
 
 
 
1243static int enable_restore_fp_context(int msa)
1244{
1245	int err, was_fpu_owner, prior_msa;
1246
1247	/*
1248	 * If an FP mode switch is currently underway, wait for it to
1249	 * complete before proceeding.
1250	 */
1251	wait_var_event(&current->mm->context.fp_mode_switching,
1252		       !atomic_read(&current->mm->context.fp_mode_switching));
1253
1254	if (!used_math()) {
1255		/* First time FP context user. */
1256		preempt_disable();
1257		err = init_fpu();
1258		if (msa && !err) {
1259			enable_msa();
1260			init_msa_upper();
1261			set_thread_flag(TIF_USEDMSA);
1262			set_thread_flag(TIF_MSA_CTX_LIVE);
1263		}
1264		preempt_enable();
1265		if (!err)
1266			set_used_math();
1267		return err;
1268	}
1269
1270	/*
1271	 * This task has formerly used the FP context.
1272	 *
1273	 * If this thread has no live MSA vector context then we can simply
1274	 * restore the scalar FP context. If it has live MSA vector context
1275	 * (that is, it has or may have used MSA since last performing a
1276	 * function call) then we'll need to restore the vector context. This
1277	 * applies even if we're currently only executing a scalar FP
1278	 * instruction. This is because if we were to later execute an MSA
1279	 * instruction then we'd either have to:
1280	 *
1281	 *  - Restore the vector context & clobber any registers modified by
1282	 *    scalar FP instructions between now & then.
1283	 *
1284	 * or
1285	 *
1286	 *  - Not restore the vector context & lose the most significant bits
1287	 *    of all vector registers.
1288	 *
1289	 * Neither of those options is acceptable. We cannot restore the least
1290	 * significant bits of the registers now & only restore the most
1291	 * significant bits later because the most significant bits of any
1292	 * vector registers whose aliased FP register is modified now will have
1293	 * been zeroed. We'd have no way to know that when restoring the vector
1294	 * context & thus may load an outdated value for the most significant
1295	 * bits of a vector register.
1296	 */
1297	if (!msa && !thread_msa_context_live())
1298		return own_fpu(1);
1299
1300	/*
1301	 * This task is using or has previously used MSA. Thus we require
1302	 * that Status.FR == 1.
1303	 */
1304	preempt_disable();
1305	was_fpu_owner = is_fpu_owner();
1306	err = own_fpu_inatomic(0);
1307	if (err)
1308		goto out;
1309
1310	enable_msa();
1311	write_msa_csr(current->thread.fpu.msacsr);
1312	set_thread_flag(TIF_USEDMSA);
1313
1314	/*
1315	 * If this is the first time that the task is using MSA and it has
1316	 * previously used scalar FP in this time slice then we already nave
1317	 * FP context which we shouldn't clobber. We do however need to clear
1318	 * the upper 64b of each vector register so that this task has no
1319	 * opportunity to see data left behind by another.
1320	 */
1321	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1322	if (!prior_msa && was_fpu_owner) {
1323		init_msa_upper();
1324
1325		goto out;
1326	}
1327
1328	if (!prior_msa) {
1329		/*
1330		 * Restore the least significant 64b of each vector register
1331		 * from the existing scalar FP context.
1332		 */
1333		_restore_fp(current);
1334
1335		/*
1336		 * The task has not formerly used MSA, so clear the upper 64b
1337		 * of each vector register such that it cannot see data left
1338		 * behind by another task.
1339		 */
1340		init_msa_upper();
1341	} else {
1342		/* We need to restore the vector context. */
1343		restore_msa(current);
1344
1345		/* Restore the scalar FP control & status register */
1346		if (!was_fpu_owner)
1347			write_32bit_cp1_register(CP1_STATUS,
1348						 current->thread.fpu.fcr31);
1349	}
1350
1351out:
1352	preempt_enable();
1353
1354	return 0;
1355}
1356
1357asmlinkage void do_cpu(struct pt_regs *regs)
1358{
1359	enum ctx_state prev_state;
1360	unsigned int __user *epc;
1361	unsigned long old_epc, old31;
1362	void __user *fault_addr;
1363	unsigned int opcode;
1364	unsigned long fcr31;
1365	unsigned int cpid;
1366	int status, err;
1367	int sig;
1368
1369	prev_state = exception_enter();
1370	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1371
1372	if (cpid != 2)
1373		die_if_kernel("do_cpu invoked from kernel context!", regs);
1374
1375	switch (cpid) {
1376	case 0:
1377		epc = (unsigned int __user *)exception_epc(regs);
1378		old_epc = regs->cp0_epc;
1379		old31 = regs->regs[31];
1380		opcode = 0;
1381		status = -1;
1382
1383		if (unlikely(compute_return_epc(regs) < 0))
1384			break;
1385
1386		if (!get_isa16_mode(regs->cp0_epc)) {
1387			if (unlikely(get_user(opcode, epc) < 0))
1388				status = SIGSEGV;
1389
1390			if (!cpu_has_llsc && status < 0)
1391				status = simulate_llsc(regs, opcode);
1392		}
1393
1394		if (status < 0)
1395			status = SIGILL;
1396
1397		if (unlikely(status > 0)) {
1398			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1399			regs->regs[31] = old31;
1400			force_sig(status, current);
1401		}
1402
1403		break;
1404
1405	case 3:
1406		/*
1407		 * The COP3 opcode space and consequently the CP0.Status.CU3
1408		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1409		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1410		 * up the space has been reused for COP1X instructions, that
1411		 * are enabled by the CP0.Status.CU1 bit and consequently
1412		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1413		 * exceptions.  Some FPU-less processors that implement one
1414		 * of these ISAs however use this code erroneously for COP1X
1415		 * instructions.  Therefore we redirect this trap to the FP
1416		 * emulator too.
1417		 */
1418		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1419			force_sig(SIGILL, current);
1420			break;
1421		}
1422		/* Fall through.  */
1423
1424	case 1:
1425		err = enable_restore_fp_context(0);
1426
1427		if (raw_cpu_has_fpu && !err)
1428			break;
1429
1430		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1431					       &fault_addr);
1432
1433		/*
1434		 * We can't allow the emulated instruction to leave
1435		 * any enabled Cause bits set in $fcr31.
1436		 */
1437		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1438		current->thread.fpu.fcr31 &= ~fcr31;
1439
1440		/* Send a signal if required.  */
1441		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1442			mt_ase_fp_affinity();
1443
1444		break;
1445
1446	case 2:
1447		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1448		break;
1449	}
1450
1451	exception_exit(prev_state);
1452}
1453
1454asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1455{
1456	enum ctx_state prev_state;
1457
1458	prev_state = exception_enter();
1459	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1460	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1461		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1462		goto out;
1463
1464	/* Clear MSACSR.Cause before enabling interrupts */
1465	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1466	local_irq_enable();
1467
1468	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1469	force_sig(SIGFPE, current);
1470out:
1471	exception_exit(prev_state);
1472}
1473
1474asmlinkage void do_msa(struct pt_regs *regs)
1475{
1476	enum ctx_state prev_state;
1477	int err;
1478
1479	prev_state = exception_enter();
1480
1481	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1482		force_sig(SIGILL, current);
1483		goto out;
1484	}
1485
1486	die_if_kernel("do_msa invoked from kernel context!", regs);
1487
1488	err = enable_restore_fp_context(1);
1489	if (err)
1490		force_sig(SIGILL, current);
1491out:
1492	exception_exit(prev_state);
1493}
1494
1495asmlinkage void do_mdmx(struct pt_regs *regs)
1496{
1497	enum ctx_state prev_state;
1498
1499	prev_state = exception_enter();
1500	force_sig(SIGILL, current);
1501	exception_exit(prev_state);
1502}
1503
1504/*
1505 * Called with interrupts disabled.
1506 */
1507asmlinkage void do_watch(struct pt_regs *regs)
1508{
1509	siginfo_t info;
1510	enum ctx_state prev_state;
1511
1512	clear_siginfo(&info);
1513	info.si_signo = SIGTRAP;
1514	info.si_code = TRAP_HWBKPT;
1515
1516	prev_state = exception_enter();
1517	/*
1518	 * Clear WP (bit 22) bit of cause register so we don't loop
1519	 * forever.
1520	 */
1521	clear_c0_cause(CAUSEF_WP);
1522
1523	/*
1524	 * If the current thread has the watch registers loaded, save
1525	 * their values and send SIGTRAP.  Otherwise another thread
1526	 * left the registers set, clear them and continue.
1527	 */
1528	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1529		mips_read_watch_registers();
1530		local_irq_enable();
1531		force_sig_info(SIGTRAP, &info, current);
1532	} else {
1533		mips_clear_watch_registers();
1534		local_irq_enable();
1535	}
1536	exception_exit(prev_state);
1537}
1538
1539asmlinkage void do_mcheck(struct pt_regs *regs)
1540{
1541	int multi_match = regs->cp0_status & ST0_TS;
1542	enum ctx_state prev_state;
1543	mm_segment_t old_fs = get_fs();
1544
1545	prev_state = exception_enter();
1546	show_regs(regs);
1547
1548	if (multi_match) {
1549		dump_tlb_regs();
1550		pr_info("\n");
1551		dump_tlb_all();
1552	}
1553
1554	if (!user_mode(regs))
1555		set_fs(KERNEL_DS);
1556
1557	show_code((unsigned int __user *) regs->cp0_epc);
1558
1559	set_fs(old_fs);
1560
1561	/*
1562	 * Some chips may have other causes of machine check (e.g. SB1
1563	 * graduation timer)
1564	 */
1565	panic("Caught Machine Check exception - %scaused by multiple "
1566	      "matching entries in the TLB.",
1567	      (multi_match) ? "" : "not ");
1568}
1569
1570asmlinkage void do_mt(struct pt_regs *regs)
1571{
1572	int subcode;
1573
1574	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1575			>> VPECONTROL_EXCPT_SHIFT;
1576	switch (subcode) {
1577	case 0:
1578		printk(KERN_DEBUG "Thread Underflow\n");
1579		break;
1580	case 1:
1581		printk(KERN_DEBUG "Thread Overflow\n");
1582		break;
1583	case 2:
1584		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1585		break;
1586	case 3:
1587		printk(KERN_DEBUG "Gating Storage Exception\n");
1588		break;
1589	case 4:
1590		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1591		break;
1592	case 5:
1593		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1594		break;
1595	default:
1596		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1597			subcode);
1598		break;
1599	}
1600	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1601
1602	force_sig(SIGILL, current);
1603}
1604
1605
1606asmlinkage void do_dsp(struct pt_regs *regs)
1607{
1608	if (cpu_has_dsp)
1609		panic("Unexpected DSP exception");
1610
1611	force_sig(SIGILL, current);
1612}
1613
1614asmlinkage void do_reserved(struct pt_regs *regs)
1615{
1616	/*
1617	 * Game over - no way to handle this if it ever occurs.	 Most probably
1618	 * caused by a new unknown cpu type or after another deadly
1619	 * hard/software error.
1620	 */
1621	show_regs(regs);
1622	panic("Caught reserved exception %ld - should not happen.",
1623	      (regs->cp0_cause & 0x7f) >> 2);
1624}
1625
1626static int __initdata l1parity = 1;
1627static int __init nol1parity(char *s)
1628{
1629	l1parity = 0;
1630	return 1;
1631}
1632__setup("nol1par", nol1parity);
1633static int __initdata l2parity = 1;
1634static int __init nol2parity(char *s)
1635{
1636	l2parity = 0;
1637	return 1;
1638}
1639__setup("nol2par", nol2parity);
1640
1641/*
1642 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1643 * it different ways.
1644 */
1645static inline void parity_protection_init(void)
1646{
1647#define ERRCTL_PE	0x80000000
1648#define ERRCTL_L2P	0x00800000
1649
1650	if (mips_cm_revision() >= CM_REV_CM3) {
1651		ulong gcr_ectl, cp0_ectl;
1652
1653		/*
1654		 * With CM3 systems we need to ensure that the L1 & L2
1655		 * parity enables are set to the same value, since this
1656		 * is presumed by the hardware engineers.
1657		 *
1658		 * If the user disabled either of L1 or L2 ECC checking,
1659		 * disable both.
1660		 */
1661		l1parity &= l2parity;
1662		l2parity &= l1parity;
1663
1664		/* Probe L1 ECC support */
1665		cp0_ectl = read_c0_ecc();
1666		write_c0_ecc(cp0_ectl | ERRCTL_PE);
1667		back_to_back_c0_hazard();
1668		cp0_ectl = read_c0_ecc();
1669
1670		/* Probe L2 ECC support */
1671		gcr_ectl = read_gcr_err_control();
1672
1673		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1674		    !(cp0_ectl & ERRCTL_PE)) {
1675			/*
1676			 * One of L1 or L2 ECC checking isn't supported,
1677			 * so we cannot enable either.
1678			 */
1679			l1parity = l2parity = 0;
1680		}
1681
1682		/* Configure L1 ECC checking */
1683		if (l1parity)
1684			cp0_ectl |= ERRCTL_PE;
1685		else
1686			cp0_ectl &= ~ERRCTL_PE;
1687		write_c0_ecc(cp0_ectl);
1688		back_to_back_c0_hazard();
1689		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1690
1691		/* Configure L2 ECC checking */
1692		if (l2parity)
1693			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1694		else
1695			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1696		write_gcr_err_control(gcr_ectl);
1697		gcr_ectl = read_gcr_err_control();
1698		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1699		WARN_ON(!!gcr_ectl != l2parity);
1700
1701		pr_info("Cache parity protection %sabled\n",
1702			l1parity ? "en" : "dis");
1703		return;
1704	}
1705
1706	switch (current_cpu_type()) {
1707	case CPU_24K:
1708	case CPU_34K:
1709	case CPU_74K:
1710	case CPU_1004K:
1711	case CPU_1074K:
1712	case CPU_INTERAPTIV:
1713	case CPU_PROAPTIV:
1714	case CPU_P5600:
1715	case CPU_QEMU_GENERIC:
 
1716	case CPU_P6600:
1717		{
 
 
1718			unsigned long errctl;
1719			unsigned int l1parity_present, l2parity_present;
1720
1721			errctl = read_c0_ecc();
1722			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1723
1724			/* probe L1 parity support */
1725			write_c0_ecc(errctl | ERRCTL_PE);
1726			back_to_back_c0_hazard();
1727			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1728
1729			/* probe L2 parity support */
1730			write_c0_ecc(errctl|ERRCTL_L2P);
1731			back_to_back_c0_hazard();
1732			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1733
1734			if (l1parity_present && l2parity_present) {
1735				if (l1parity)
1736					errctl |= ERRCTL_PE;
1737				if (l1parity ^ l2parity)
1738					errctl |= ERRCTL_L2P;
1739			} else if (l1parity_present) {
1740				if (l1parity)
1741					errctl |= ERRCTL_PE;
1742			} else if (l2parity_present) {
1743				if (l2parity)
1744					errctl |= ERRCTL_L2P;
1745			} else {
1746				/* No parity available */
1747			}
1748
1749			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1750
1751			write_c0_ecc(errctl);
1752			back_to_back_c0_hazard();
1753			errctl = read_c0_ecc();
1754			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1755
1756			if (l1parity_present)
1757				printk(KERN_INFO "Cache parity protection %sabled\n",
1758				       (errctl & ERRCTL_PE) ? "en" : "dis");
1759
1760			if (l2parity_present) {
1761				if (l1parity_present && l1parity)
1762					errctl ^= ERRCTL_L2P;
1763				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1764				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1765			}
1766		}
1767		break;
1768
1769	case CPU_5KC:
1770	case CPU_5KE:
1771	case CPU_LOONGSON1:
1772		write_c0_ecc(0x80000000);
1773		back_to_back_c0_hazard();
1774		/* Set the PE bit (bit 31) in the c0_errctl register. */
1775		printk(KERN_INFO "Cache parity protection %sabled\n",
1776		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1777		break;
1778	case CPU_20KC:
1779	case CPU_25KF:
1780		/* Clear the DE bit (bit 16) in the c0_status register. */
1781		printk(KERN_INFO "Enable cache parity protection for "
1782		       "MIPS 20KC/25KF CPUs.\n");
1783		clear_c0_status(ST0_DE);
1784		break;
1785	default:
1786		break;
1787	}
1788}
1789
1790asmlinkage void cache_parity_error(void)
1791{
1792	const int field = 2 * sizeof(unsigned long);
1793	unsigned int reg_val;
1794
1795	/* For the moment, report the problem and hang. */
1796	printk("Cache error exception:\n");
1797	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1798	reg_val = read_c0_cacheerr();
1799	printk("c0_cacheerr == %08x\n", reg_val);
1800
1801	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1802	       reg_val & (1<<30) ? "secondary" : "primary",
1803	       reg_val & (1<<31) ? "data" : "insn");
1804	if ((cpu_has_mips_r2_r6) &&
1805	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1806		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1807			reg_val & (1<<29) ? "ED " : "",
1808			reg_val & (1<<28) ? "ET " : "",
1809			reg_val & (1<<27) ? "ES " : "",
1810			reg_val & (1<<26) ? "EE " : "",
1811			reg_val & (1<<25) ? "EB " : "",
1812			reg_val & (1<<24) ? "EI " : "",
1813			reg_val & (1<<23) ? "E1 " : "",
1814			reg_val & (1<<22) ? "E0 " : "");
1815	} else {
1816		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1817			reg_val & (1<<29) ? "ED " : "",
1818			reg_val & (1<<28) ? "ET " : "",
1819			reg_val & (1<<26) ? "EE " : "",
1820			reg_val & (1<<25) ? "EB " : "",
1821			reg_val & (1<<24) ? "EI " : "",
1822			reg_val & (1<<23) ? "E1 " : "",
1823			reg_val & (1<<22) ? "E0 " : "");
1824	}
1825	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1826
1827#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1828	if (reg_val & (1<<22))
1829		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1830
1831	if (reg_val & (1<<23))
1832		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1833#endif
1834
1835	panic("Can't handle the cache error!");
1836}
1837
1838asmlinkage void do_ftlb(void)
1839{
1840	const int field = 2 * sizeof(unsigned long);
1841	unsigned int reg_val;
1842
1843	/* For the moment, report the problem and hang. */
1844	if ((cpu_has_mips_r2_r6) &&
1845	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1846	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1847		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1848		       read_c0_ecc());
1849		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1850		reg_val = read_c0_cacheerr();
1851		pr_err("c0_cacheerr == %08x\n", reg_val);
1852
1853		if ((reg_val & 0xc0000000) == 0xc0000000) {
1854			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1855		} else {
1856			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1857			       reg_val & (1<<30) ? "secondary" : "primary",
1858			       reg_val & (1<<31) ? "data" : "insn");
1859		}
1860	} else {
1861		pr_err("FTLB error exception\n");
1862	}
1863	/* Just print the cacheerr bits for now */
1864	cache_parity_error();
1865}
1866
1867/*
1868 * SDBBP EJTAG debug exception handler.
1869 * We skip the instruction and return to the next instruction.
1870 */
1871void ejtag_exception_handler(struct pt_regs *regs)
1872{
1873	const int field = 2 * sizeof(unsigned long);
1874	unsigned long depc, old_epc, old_ra;
1875	unsigned int debug;
1876
1877	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1878	depc = read_c0_depc();
1879	debug = read_c0_debug();
1880	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1881	if (debug & 0x80000000) {
1882		/*
1883		 * In branch delay slot.
1884		 * We cheat a little bit here and use EPC to calculate the
1885		 * debug return address (DEPC). EPC is restored after the
1886		 * calculation.
1887		 */
1888		old_epc = regs->cp0_epc;
1889		old_ra = regs->regs[31];
1890		regs->cp0_epc = depc;
1891		compute_return_epc(regs);
1892		depc = regs->cp0_epc;
1893		regs->cp0_epc = old_epc;
1894		regs->regs[31] = old_ra;
1895	} else
1896		depc += 4;
1897	write_c0_depc(depc);
1898
1899#if 0
1900	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1901	write_c0_debug(debug | 0x100);
1902#endif
1903}
1904
1905/*
1906 * NMI exception handler.
1907 * No lock; only written during early bootup by CPU 0.
1908 */
1909static RAW_NOTIFIER_HEAD(nmi_chain);
1910
1911int register_nmi_notifier(struct notifier_block *nb)
1912{
1913	return raw_notifier_chain_register(&nmi_chain, nb);
1914}
1915
1916void __noreturn nmi_exception_handler(struct pt_regs *regs)
1917{
1918	char str[100];
1919
1920	nmi_enter();
1921	raw_notifier_call_chain(&nmi_chain, 0, regs);
1922	bust_spinlocks(1);
1923	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1924		 smp_processor_id(), regs->cp0_epc);
1925	regs->cp0_epc = read_c0_errorepc();
1926	die(str, regs);
1927	nmi_exit();
1928}
1929
1930#define VECTORSPACING 0x100	/* for EI/VI mode */
1931
1932unsigned long ebase;
1933EXPORT_SYMBOL_GPL(ebase);
1934unsigned long exception_handlers[32];
1935unsigned long vi_handlers[64];
1936
1937void __init *set_except_vector(int n, void *addr)
1938{
1939	unsigned long handler = (unsigned long) addr;
1940	unsigned long old_handler;
1941
1942#ifdef CONFIG_CPU_MICROMIPS
1943	/*
1944	 * Only the TLB handlers are cache aligned with an even
1945	 * address. All other handlers are on an odd address and
1946	 * require no modification. Otherwise, MIPS32 mode will
1947	 * be entered when handling any TLB exceptions. That
1948	 * would be bad...since we must stay in microMIPS mode.
1949	 */
1950	if (!(handler & 0x1))
1951		handler |= 1;
1952#endif
1953	old_handler = xchg(&exception_handlers[n], handler);
1954
1955	if (n == 0 && cpu_has_divec) {
1956#ifdef CONFIG_CPU_MICROMIPS
1957		unsigned long jump_mask = ~((1 << 27) - 1);
1958#else
1959		unsigned long jump_mask = ~((1 << 28) - 1);
1960#endif
1961		u32 *buf = (u32 *)(ebase + 0x200);
1962		unsigned int k0 = 26;
1963		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1964			uasm_i_j(&buf, handler & ~jump_mask);
1965			uasm_i_nop(&buf);
1966		} else {
1967			UASM_i_LA(&buf, k0, handler);
1968			uasm_i_jr(&buf, k0);
1969			uasm_i_nop(&buf);
1970		}
1971		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1972	}
1973	return (void *)old_handler;
1974}
1975
1976static void do_default_vi(void)
1977{
1978	show_regs(get_irq_regs());
1979	panic("Caught unexpected vectored interrupt.");
1980}
1981
1982static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1983{
1984	unsigned long handler;
1985	unsigned long old_handler = vi_handlers[n];
1986	int srssets = current_cpu_data.srsets;
1987	u16 *h;
1988	unsigned char *b;
1989
1990	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1991
1992	if (addr == NULL) {
1993		handler = (unsigned long) do_default_vi;
1994		srs = 0;
1995	} else
1996		handler = (unsigned long) addr;
1997	vi_handlers[n] = handler;
1998
1999	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2000
2001	if (srs >= srssets)
2002		panic("Shadow register set %d not supported", srs);
2003
2004	if (cpu_has_veic) {
2005		if (board_bind_eic_interrupt)
2006			board_bind_eic_interrupt(n, srs);
2007	} else if (cpu_has_vint) {
2008		/* SRSMap is only defined if shadow sets are implemented */
2009		if (srssets > 1)
2010			change_c0_srsmap(0xf << n*4, srs << n*4);
2011	}
2012
2013	if (srs == 0) {
2014		/*
2015		 * If no shadow set is selected then use the default handler
2016		 * that does normal register saving and standard interrupt exit
2017		 */
2018		extern char except_vec_vi, except_vec_vi_lui;
2019		extern char except_vec_vi_ori, except_vec_vi_end;
2020		extern char rollback_except_vec_vi;
2021		char *vec_start = using_rollback_handler() ?
2022			&rollback_except_vec_vi : &except_vec_vi;
2023#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2024		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2025		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2026#else
2027		const int lui_offset = &except_vec_vi_lui - vec_start;
2028		const int ori_offset = &except_vec_vi_ori - vec_start;
2029#endif
2030		const int handler_len = &except_vec_vi_end - vec_start;
2031
2032		if (handler_len > VECTORSPACING) {
2033			/*
2034			 * Sigh... panicing won't help as the console
2035			 * is probably not configured :(
2036			 */
2037			panic("VECTORSPACING too small");
2038		}
2039
2040		set_handler(((unsigned long)b - ebase), vec_start,
2041#ifdef CONFIG_CPU_MICROMIPS
2042				(handler_len - 1));
2043#else
2044				handler_len);
2045#endif
2046		h = (u16 *)(b + lui_offset);
2047		*h = (handler >> 16) & 0xffff;
2048		h = (u16 *)(b + ori_offset);
2049		*h = (handler & 0xffff);
2050		local_flush_icache_range((unsigned long)b,
2051					 (unsigned long)(b+handler_len));
2052	}
2053	else {
2054		/*
2055		 * In other cases jump directly to the interrupt handler. It
2056		 * is the handler's responsibility to save registers if required
2057		 * (eg hi/lo) and return from the exception using "eret".
2058		 */
2059		u32 insn;
2060
2061		h = (u16 *)b;
2062		/* j handler */
2063#ifdef CONFIG_CPU_MICROMIPS
2064		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2065#else
2066		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2067#endif
2068		h[0] = (insn >> 16) & 0xffff;
2069		h[1] = insn & 0xffff;
2070		h[2] = 0;
2071		h[3] = 0;
2072		local_flush_icache_range((unsigned long)b,
2073					 (unsigned long)(b+8));
2074	}
2075
2076	return (void *)old_handler;
2077}
2078
2079void *set_vi_handler(int n, vi_handler_t addr)
2080{
2081	return set_vi_srs_handler(n, addr, 0);
2082}
2083
2084extern void tlb_init(void);
2085
2086/*
2087 * Timer interrupt
2088 */
2089int cp0_compare_irq;
2090EXPORT_SYMBOL_GPL(cp0_compare_irq);
2091int cp0_compare_irq_shift;
2092
2093/*
2094 * Performance counter IRQ or -1 if shared with timer
2095 */
2096int cp0_perfcount_irq;
2097EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2098
2099/*
2100 * Fast debug channel IRQ or -1 if not present
2101 */
2102int cp0_fdc_irq;
2103EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2104
2105static int noulri;
2106
2107static int __init ulri_disable(char *s)
2108{
2109	pr_info("Disabling ulri\n");
2110	noulri = 1;
2111
2112	return 1;
2113}
2114__setup("noulri", ulri_disable);
2115
2116/* configure STATUS register */
2117static void configure_status(void)
2118{
2119	/*
2120	 * Disable coprocessors and select 32-bit or 64-bit addressing
2121	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2122	 * flag that some firmware may have left set and the TS bit (for
2123	 * IP27).  Set XX for ISA IV code to work.
2124	 */
2125	unsigned int status_set = ST0_CU0;
2126#ifdef CONFIG_64BIT
2127	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2128#endif
2129	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2130		status_set |= ST0_XX;
2131	if (cpu_has_dsp)
2132		status_set |= ST0_MX;
2133
2134	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2135			 status_set);
2136}
2137
2138unsigned int hwrena;
2139EXPORT_SYMBOL_GPL(hwrena);
2140
2141/* configure HWRENA register */
2142static void configure_hwrena(void)
2143{
2144	hwrena = cpu_hwrena_impl_bits;
2145
2146	if (cpu_has_mips_r2_r6)
2147		hwrena |= MIPS_HWRENA_CPUNUM |
2148			  MIPS_HWRENA_SYNCISTEP |
2149			  MIPS_HWRENA_CC |
2150			  MIPS_HWRENA_CCRES;
2151
2152	if (!noulri && cpu_has_userlocal)
2153		hwrena |= MIPS_HWRENA_ULR;
2154
2155	if (hwrena)
2156		write_c0_hwrena(hwrena);
2157}
2158
2159static void configure_exception_vector(void)
2160{
2161	if (cpu_has_veic || cpu_has_vint) {
2162		unsigned long sr = set_c0_status(ST0_BEV);
2163		/* If available, use WG to set top bits of EBASE */
2164		if (cpu_has_ebase_wg) {
2165#ifdef CONFIG_64BIT
2166			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2167#else
2168			write_c0_ebase(ebase | MIPS_EBASE_WG);
2169#endif
2170		}
2171		write_c0_ebase(ebase);
2172		write_c0_status(sr);
2173		/* Setting vector spacing enables EI/VI mode  */
2174		change_c0_intctl(0x3e0, VECTORSPACING);
2175	}
2176	if (cpu_has_divec) {
2177		if (cpu_has_mipsmt) {
2178			unsigned int vpflags = dvpe();
2179			set_c0_cause(CAUSEF_IV);
2180			evpe(vpflags);
2181		} else
2182			set_c0_cause(CAUSEF_IV);
2183	}
2184}
2185
2186void per_cpu_trap_init(bool is_boot_cpu)
2187{
2188	unsigned int cpu = smp_processor_id();
2189
2190	configure_status();
2191	configure_hwrena();
2192
2193	configure_exception_vector();
2194
2195	/*
2196	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2197	 *
2198	 *  o read IntCtl.IPTI to determine the timer interrupt
2199	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2200	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2201	 */
2202	if (cpu_has_mips_r2_r6) {
2203		/*
2204		 * We shouldn't trust a secondary core has a sane EBASE register
2205		 * so use the one calculated by the boot CPU.
2206		 */
2207		if (!is_boot_cpu) {
2208			/* If available, use WG to set top bits of EBASE */
2209			if (cpu_has_ebase_wg) {
2210#ifdef CONFIG_64BIT
2211				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2212#else
2213				write_c0_ebase(ebase | MIPS_EBASE_WG);
2214#endif
2215			}
2216			write_c0_ebase(ebase);
2217		}
2218
2219		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2220		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2221		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2222		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2223		if (!cp0_fdc_irq)
2224			cp0_fdc_irq = -1;
2225
2226	} else {
2227		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2228		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2229		cp0_perfcount_irq = -1;
2230		cp0_fdc_irq = -1;
2231	}
2232
2233	if (!cpu_data[cpu].asid_cache)
2234		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2235
2236	mmgrab(&init_mm);
2237	current->active_mm = &init_mm;
2238	BUG_ON(current->mm);
2239	enter_lazy_tlb(&init_mm, current);
2240
2241	/* Boot CPU's cache setup in setup_arch(). */
2242	if (!is_boot_cpu)
2243		cpu_cache_init();
2244	tlb_init();
2245	TLBMISS_HANDLER_SETUP();
2246}
2247
2248/* Install CPU exception handler */
2249void set_handler(unsigned long offset, void *addr, unsigned long size)
2250{
2251#ifdef CONFIG_CPU_MICROMIPS
2252	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2253#else
2254	memcpy((void *)(ebase + offset), addr, size);
2255#endif
2256	local_flush_icache_range(ebase + offset, ebase + offset + size);
2257}
2258
2259static const char panic_null_cerr[] =
2260	"Trying to set NULL cache error exception handler\n";
2261
2262/*
2263 * Install uncached CPU exception handler.
2264 * This is suitable only for the cache error exception which is the only
2265 * exception handler that is being run uncached.
2266 */
2267void set_uncached_handler(unsigned long offset, void *addr,
2268	unsigned long size)
2269{
2270	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2271
2272	if (!addr)
2273		panic(panic_null_cerr);
2274
2275	memcpy((void *)(uncached_ebase + offset), addr, size);
2276}
2277
2278static int __initdata rdhwr_noopt;
2279static int __init set_rdhwr_noopt(char *str)
2280{
2281	rdhwr_noopt = 1;
2282	return 1;
2283}
2284
2285__setup("rdhwr_noopt", set_rdhwr_noopt);
2286
2287void __init trap_init(void)
2288{
2289	extern char except_vec3_generic;
2290	extern char except_vec4;
2291	extern char except_vec3_r4000;
2292	unsigned long i;
2293
2294	check_wait();
2295
2296	if (cpu_has_veic || cpu_has_vint) {
2297		unsigned long size = 0x200 + VECTORSPACING*64;
2298		phys_addr_t ebase_pa;
2299
2300		ebase = (unsigned long)
2301			__alloc_bootmem(size, 1 << fls(size), 0);
2302
2303		/*
2304		 * Try to ensure ebase resides in KSeg0 if possible.
2305		 *
2306		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2307		 * hitting a poorly defined exception base for Cache Errors.
2308		 * The allocation is likely to be in the low 512MB of physical,
2309		 * in which case we should be able to convert to KSeg0.
2310		 *
2311		 * EVA is special though as it allows segments to be rearranged
2312		 * and to become uncached during cache error handling.
2313		 */
2314		ebase_pa = __pa(ebase);
2315		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2316			ebase = CKSEG0ADDR(ebase_pa);
2317	} else {
2318		ebase = CAC_BASE;
2319
2320		if (cpu_has_mips_r2_r6) {
2321			if (cpu_has_ebase_wg) {
2322#ifdef CONFIG_64BIT
2323				ebase = (read_c0_ebase_64() & ~0xfff);
2324#else
2325				ebase = (read_c0_ebase() & ~0xfff);
2326#endif
2327			} else {
2328				ebase += (read_c0_ebase() & 0x3ffff000);
2329			}
2330		}
2331	}
2332
2333	if (cpu_has_mmips) {
2334		unsigned int config3 = read_c0_config3();
2335
2336		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2337			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2338		else
2339			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2340	}
2341
2342	if (board_ebase_setup)
2343		board_ebase_setup();
2344	per_cpu_trap_init(true);
2345
2346	/*
2347	 * Copy the generic exception handlers to their final destination.
2348	 * This will be overridden later as suitable for a particular
2349	 * configuration.
2350	 */
2351	set_handler(0x180, &except_vec3_generic, 0x80);
2352
2353	/*
2354	 * Setup default vectors
2355	 */
2356	for (i = 0; i <= 31; i++)
2357		set_except_vector(i, handle_reserved);
2358
2359	/*
2360	 * Copy the EJTAG debug exception vector handler code to it's final
2361	 * destination.
2362	 */
2363	if (cpu_has_ejtag && board_ejtag_handler_setup)
2364		board_ejtag_handler_setup();
2365
2366	/*
2367	 * Only some CPUs have the watch exceptions.
2368	 */
2369	if (cpu_has_watch)
2370		set_except_vector(EXCCODE_WATCH, handle_watch);
2371
2372	/*
2373	 * Initialise interrupt handlers
2374	 */
2375	if (cpu_has_veic || cpu_has_vint) {
2376		int nvec = cpu_has_veic ? 64 : 8;
2377		for (i = 0; i < nvec; i++)
2378			set_vi_handler(i, NULL);
2379	}
2380	else if (cpu_has_divec)
2381		set_handler(0x200, &except_vec4, 0x8);
2382
2383	/*
2384	 * Some CPUs can enable/disable for cache parity detection, but does
2385	 * it different ways.
2386	 */
2387	parity_protection_init();
2388
2389	/*
2390	 * The Data Bus Errors / Instruction Bus Errors are signaled
2391	 * by external hardware.  Therefore these two exceptions
2392	 * may have board specific handlers.
2393	 */
2394	if (board_be_init)
2395		board_be_init();
2396
2397	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2398					rollback_handle_int : handle_int);
2399	set_except_vector(EXCCODE_MOD, handle_tlbm);
2400	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2401	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2402
2403	set_except_vector(EXCCODE_ADEL, handle_adel);
2404	set_except_vector(EXCCODE_ADES, handle_ades);
2405
2406	set_except_vector(EXCCODE_IBE, handle_ibe);
2407	set_except_vector(EXCCODE_DBE, handle_dbe);
2408
2409	set_except_vector(EXCCODE_SYS, handle_sys);
2410	set_except_vector(EXCCODE_BP, handle_bp);
2411
2412	if (rdhwr_noopt)
2413		set_except_vector(EXCCODE_RI, handle_ri);
2414	else {
2415		if (cpu_has_vtag_icache)
2416			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2417		else if (current_cpu_type() == CPU_LOONGSON3)
2418			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419		else
2420			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2421	}
2422
2423	set_except_vector(EXCCODE_CPU, handle_cpu);
2424	set_except_vector(EXCCODE_OV, handle_ov);
2425	set_except_vector(EXCCODE_TR, handle_tr);
2426	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2427
2428	if (board_nmi_handler_setup)
2429		board_nmi_handler_setup();
2430
2431	if (cpu_has_fpu && !cpu_has_nofpuex)
2432		set_except_vector(EXCCODE_FPE, handle_fpe);
2433
2434	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2435
2436	if (cpu_has_rixiex) {
2437		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2438		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2439	}
2440
2441	set_except_vector(EXCCODE_MSADIS, handle_msa);
2442	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2443
2444	if (cpu_has_mcheck)
2445		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2446
2447	if (cpu_has_mipsmt)
2448		set_except_vector(EXCCODE_THREAD, handle_mt);
2449
2450	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2451
2452	if (board_cache_error_setup)
2453		board_cache_error_setup();
2454
2455	if (cpu_has_vce)
2456		/* Special exception: R4[04]00 uses also the divec space. */
2457		set_handler(0x180, &except_vec3_r4000, 0x100);
2458	else if (cpu_has_4kex)
2459		set_handler(0x180, &except_vec3_generic, 0x80);
2460	else
2461		set_handler(0x080, &except_vec3_generic, 0x80);
2462
2463	local_flush_icache_range(ebase, ebase + 0x400);
2464
2465	sort_extable(__start___dbe_table, __stop___dbe_table);
2466
2467	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2468}
2469
2470static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2471			    void *v)
2472{
2473	switch (cmd) {
2474	case CPU_PM_ENTER_FAILED:
2475	case CPU_PM_EXIT:
2476		configure_status();
2477		configure_hwrena();
2478		configure_exception_vector();
2479
2480		/* Restore register with CPU number for TLB handlers */
2481		TLBMISS_HANDLER_RESTORE();
2482
2483		break;
2484	}
2485
2486	return NOTIFY_OK;
2487}
2488
2489static struct notifier_block trap_pm_notifier_block = {
2490	.notifier_call = trap_pm_notifier,
2491};
2492
2493static int __init trap_pm_init(void)
2494{
2495	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2496}
2497arch_initcall(trap_pm_init);
v4.10.11
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched.h>
 
  27#include <linux/smp.h>
  28#include <linux/spinlock.h>
  29#include <linux/kallsyms.h>
  30#include <linux/bootmem.h>
  31#include <linux/interrupt.h>
  32#include <linux/ptrace.h>
  33#include <linux/kgdb.h>
  34#include <linux/kdebug.h>
  35#include <linux/kprobes.h>
  36#include <linux/notifier.h>
  37#include <linux/kdb.h>
  38#include <linux/irq.h>
  39#include <linux/perf_event.h>
  40
  41#include <asm/addrspace.h>
  42#include <asm/bootinfo.h>
  43#include <asm/branch.h>
  44#include <asm/break.h>
  45#include <asm/cop2.h>
  46#include <asm/cpu.h>
  47#include <asm/cpu-type.h>
  48#include <asm/dsp.h>
  49#include <asm/fpu.h>
  50#include <asm/fpu_emulator.h>
  51#include <asm/idle.h>
  52#include <asm/mips-cm.h>
  53#include <asm/mips-r2-to-r6-emul.h>
  54#include <asm/mipsregs.h>
  55#include <asm/mipsmtregs.h>
  56#include <asm/module.h>
  57#include <asm/msa.h>
  58#include <asm/pgtable.h>
  59#include <asm/ptrace.h>
  60#include <asm/sections.h>
  61#include <asm/siginfo.h>
  62#include <asm/tlbdebug.h>
  63#include <asm/traps.h>
  64#include <linux/uaccess.h>
  65#include <asm/watch.h>
  66#include <asm/mmu_context.h>
  67#include <asm/types.h>
  68#include <asm/stacktrace.h>
  69#include <asm/uasm.h>
  70
  71extern void check_wait(void);
  72extern asmlinkage void rollback_handle_int(void);
  73extern asmlinkage void handle_int(void);
  74extern u32 handle_tlbl[];
  75extern u32 handle_tlbs[];
  76extern u32 handle_tlbm[];
  77extern asmlinkage void handle_adel(void);
  78extern asmlinkage void handle_ades(void);
  79extern asmlinkage void handle_ibe(void);
  80extern asmlinkage void handle_dbe(void);
  81extern asmlinkage void handle_sys(void);
  82extern asmlinkage void handle_bp(void);
  83extern asmlinkage void handle_ri(void);
  84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  85extern asmlinkage void handle_ri_rdhwr(void);
  86extern asmlinkage void handle_cpu(void);
  87extern asmlinkage void handle_ov(void);
  88extern asmlinkage void handle_tr(void);
  89extern asmlinkage void handle_msa_fpe(void);
  90extern asmlinkage void handle_fpe(void);
  91extern asmlinkage void handle_ftlb(void);
  92extern asmlinkage void handle_msa(void);
  93extern asmlinkage void handle_mdmx(void);
  94extern asmlinkage void handle_watch(void);
  95extern asmlinkage void handle_mt(void);
  96extern asmlinkage void handle_dsp(void);
  97extern asmlinkage void handle_mcheck(void);
  98extern asmlinkage void handle_reserved(void);
  99extern void tlb_do_page_fault_0(void);
 100
 101void (*board_be_init)(void);
 102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 103void (*board_nmi_handler_setup)(void);
 104void (*board_ejtag_handler_setup)(void);
 105void (*board_bind_eic_interrupt)(int irq, int regset);
 106void (*board_ebase_setup)(void);
 107void(*board_cache_error_setup)(void);
 108
 109static void show_raw_backtrace(unsigned long reg29)
 110{
 111	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 112	unsigned long addr;
 113
 114	printk("Call Trace:");
 115#ifdef CONFIG_KALLSYMS
 116	printk("\n");
 117#endif
 118	while (!kstack_end(sp)) {
 119		unsigned long __user *p =
 120			(unsigned long __user *)(unsigned long)sp++;
 121		if (__get_user(addr, p)) {
 122			printk(" (Bad stack address)");
 123			break;
 124		}
 125		if (__kernel_text_address(addr))
 126			print_ip_sym(addr);
 127	}
 128	printk("\n");
 129}
 130
 131#ifdef CONFIG_KALLSYMS
 132int raw_show_trace;
 133static int __init set_raw_show_trace(char *str)
 134{
 135	raw_show_trace = 1;
 136	return 1;
 137}
 138__setup("raw_show_trace", set_raw_show_trace);
 139#endif
 140
 141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 142{
 143	unsigned long sp = regs->regs[29];
 144	unsigned long ra = regs->regs[31];
 145	unsigned long pc = regs->cp0_epc;
 146
 147	if (!task)
 148		task = current;
 149
 150	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 151		show_raw_backtrace(sp);
 152		return;
 153	}
 154	printk("Call Trace:\n");
 155	do {
 156		print_ip_sym(pc);
 157		pc = unwind_stack(task, &sp, pc, &ra);
 158	} while (pc);
 159	pr_cont("\n");
 160}
 161
 162/*
 163 * This routine abuses get_user()/put_user() to reference pointers
 164 * with at least a bit of error checking ...
 165 */
 166static void show_stacktrace(struct task_struct *task,
 167	const struct pt_regs *regs)
 168{
 169	const int field = 2 * sizeof(unsigned long);
 170	long stackdata;
 171	int i;
 172	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 173
 174	printk("Stack :");
 175	i = 0;
 176	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 177		if (i && ((i % (64 / field)) == 0)) {
 178			pr_cont("\n");
 179			printk("       ");
 180		}
 181		if (i > 39) {
 182			pr_cont(" ...");
 183			break;
 184		}
 185
 186		if (__get_user(stackdata, sp++)) {
 187			pr_cont(" (Bad stack address)");
 188			break;
 189		}
 190
 191		pr_cont(" %0*lx", field, stackdata);
 192		i++;
 193	}
 194	pr_cont("\n");
 195	show_backtrace(task, regs);
 196}
 197
 198void show_stack(struct task_struct *task, unsigned long *sp)
 199{
 200	struct pt_regs regs;
 201	mm_segment_t old_fs = get_fs();
 
 
 202	if (sp) {
 203		regs.regs[29] = (unsigned long)sp;
 204		regs.regs[31] = 0;
 205		regs.cp0_epc = 0;
 206	} else {
 207		if (task && task != current) {
 208			regs.regs[29] = task->thread.reg29;
 209			regs.regs[31] = 0;
 210			regs.cp0_epc = task->thread.reg31;
 211#ifdef CONFIG_KGDB_KDB
 212		} else if (atomic_read(&kgdb_active) != -1 &&
 213			   kdb_current_regs) {
 214			memcpy(&regs, kdb_current_regs, sizeof(regs));
 215#endif /* CONFIG_KGDB_KDB */
 216		} else {
 217			prepare_frametrace(&regs);
 218		}
 219	}
 220	/*
 221	 * show_stack() deals exclusively with kernel mode, so be sure to access
 222	 * the stack in the kernel (not user) address space.
 223	 */
 224	set_fs(KERNEL_DS);
 225	show_stacktrace(task, &regs);
 226	set_fs(old_fs);
 227}
 228
 229static void show_code(unsigned int __user *pc)
 230{
 231	long i;
 232	unsigned short __user *pc16 = NULL;
 233
 234	printk("Code:");
 235
 236	if ((unsigned long)pc & 1)
 237		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 238	for(i = -3 ; i < 6 ; i++) {
 239		unsigned int insn;
 240		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 241			pr_cont(" (Bad address in epc)\n");
 242			break;
 243		}
 244		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 245	}
 246	pr_cont("\n");
 247}
 248
 249static void __show_regs(const struct pt_regs *regs)
 250{
 251	const int field = 2 * sizeof(unsigned long);
 252	unsigned int cause = regs->cp0_cause;
 253	unsigned int exccode;
 254	int i;
 255
 256	show_regs_print_info(KERN_DEFAULT);
 257
 258	/*
 259	 * Saved main processor registers
 260	 */
 261	for (i = 0; i < 32; ) {
 262		if ((i % 4) == 0)
 263			printk("$%2d   :", i);
 264		if (i == 0)
 265			pr_cont(" %0*lx", field, 0UL);
 266		else if (i == 26 || i == 27)
 267			pr_cont(" %*s", field, "");
 268		else
 269			pr_cont(" %0*lx", field, regs->regs[i]);
 270
 271		i++;
 272		if ((i % 4) == 0)
 273			pr_cont("\n");
 274	}
 275
 276#ifdef CONFIG_CPU_HAS_SMARTMIPS
 277	printk("Acx    : %0*lx\n", field, regs->acx);
 278#endif
 279	printk("Hi    : %0*lx\n", field, regs->hi);
 280	printk("Lo    : %0*lx\n", field, regs->lo);
 281
 282	/*
 283	 * Saved cp0 registers
 284	 */
 285	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 286	       (void *) regs->cp0_epc);
 287	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 288	       (void *) regs->regs[31]);
 289
 290	printk("Status: %08x	", (uint32_t) regs->cp0_status);
 291
 292	if (cpu_has_3kex) {
 293		if (regs->cp0_status & ST0_KUO)
 294			pr_cont("KUo ");
 295		if (regs->cp0_status & ST0_IEO)
 296			pr_cont("IEo ");
 297		if (regs->cp0_status & ST0_KUP)
 298			pr_cont("KUp ");
 299		if (regs->cp0_status & ST0_IEP)
 300			pr_cont("IEp ");
 301		if (regs->cp0_status & ST0_KUC)
 302			pr_cont("KUc ");
 303		if (regs->cp0_status & ST0_IEC)
 304			pr_cont("IEc ");
 305	} else if (cpu_has_4kex) {
 306		if (regs->cp0_status & ST0_KX)
 307			pr_cont("KX ");
 308		if (regs->cp0_status & ST0_SX)
 309			pr_cont("SX ");
 310		if (regs->cp0_status & ST0_UX)
 311			pr_cont("UX ");
 312		switch (regs->cp0_status & ST0_KSU) {
 313		case KSU_USER:
 314			pr_cont("USER ");
 315			break;
 316		case KSU_SUPERVISOR:
 317			pr_cont("SUPERVISOR ");
 318			break;
 319		case KSU_KERNEL:
 320			pr_cont("KERNEL ");
 321			break;
 322		default:
 323			pr_cont("BAD_MODE ");
 324			break;
 325		}
 326		if (regs->cp0_status & ST0_ERL)
 327			pr_cont("ERL ");
 328		if (regs->cp0_status & ST0_EXL)
 329			pr_cont("EXL ");
 330		if (regs->cp0_status & ST0_IE)
 331			pr_cont("IE ");
 332	}
 333	pr_cont("\n");
 334
 335	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 336	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 337
 338	if (1 <= exccode && exccode <= 5)
 339		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 340
 341	printk("PrId  : %08x (%s)\n", read_c0_prid(),
 342	       cpu_name_string());
 343}
 344
 345/*
 346 * FIXME: really the generic show_regs should take a const pointer argument.
 347 */
 348void show_regs(struct pt_regs *regs)
 349{
 350	__show_regs((struct pt_regs *)regs);
 351}
 352
 353void show_registers(struct pt_regs *regs)
 354{
 355	const int field = 2 * sizeof(unsigned long);
 356	mm_segment_t old_fs = get_fs();
 357
 358	__show_regs(regs);
 359	print_modules();
 360	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 361	       current->comm, current->pid, current_thread_info(), current,
 362	      field, current_thread_info()->tp_value);
 363	if (cpu_has_userlocal) {
 364		unsigned long tls;
 365
 366		tls = read_c0_userlocal();
 367		if (tls != current_thread_info()->tp_value)
 368			printk("*HwTLS: %0*lx\n", field, tls);
 369	}
 370
 371	if (!user_mode(regs))
 372		/* Necessary for getting the correct stack content */
 373		set_fs(KERNEL_DS);
 374	show_stacktrace(current, regs);
 375	show_code((unsigned int __user *) regs->cp0_epc);
 376	printk("\n");
 377	set_fs(old_fs);
 378}
 379
 380static DEFINE_RAW_SPINLOCK(die_lock);
 381
 382void __noreturn die(const char *str, struct pt_regs *regs)
 383{
 384	static int die_counter;
 385	int sig = SIGSEGV;
 386
 387	oops_enter();
 388
 389	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 390		       SIGSEGV) == NOTIFY_STOP)
 391		sig = 0;
 392
 393	console_verbose();
 394	raw_spin_lock_irq(&die_lock);
 395	bust_spinlocks(1);
 396
 397	printk("%s[#%d]:\n", str, ++die_counter);
 398	show_registers(regs);
 399	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 400	raw_spin_unlock_irq(&die_lock);
 401
 402	oops_exit();
 403
 404	if (in_interrupt())
 405		panic("Fatal exception in interrupt");
 406
 407	if (panic_on_oops)
 408		panic("Fatal exception");
 409
 410	if (regs && kexec_should_crash(current))
 411		crash_kexec(regs);
 412
 413	do_exit(sig);
 414}
 415
 416extern struct exception_table_entry __start___dbe_table[];
 417extern struct exception_table_entry __stop___dbe_table[];
 418
 419__asm__(
 420"	.section	__dbe_table, \"a\"\n"
 421"	.previous			\n");
 422
 423/* Given an address, look for it in the exception tables. */
 424static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 425{
 426	const struct exception_table_entry *e;
 427
 428	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
 
 429	if (!e)
 430		e = search_module_dbetables(addr);
 431	return e;
 432}
 433
 434asmlinkage void do_be(struct pt_regs *regs)
 435{
 436	const int field = 2 * sizeof(unsigned long);
 437	const struct exception_table_entry *fixup = NULL;
 438	int data = regs->cp0_cause & 4;
 439	int action = MIPS_BE_FATAL;
 440	enum ctx_state prev_state;
 441
 442	prev_state = exception_enter();
 443	/* XXX For now.	 Fixme, this searches the wrong table ...  */
 444	if (data && !user_mode(regs))
 445		fixup = search_dbe_tables(exception_epc(regs));
 446
 447	if (fixup)
 448		action = MIPS_BE_FIXUP;
 449
 450	if (board_be_handler)
 451		action = board_be_handler(regs, fixup != NULL);
 452	else
 453		mips_cm_error_report();
 454
 455	switch (action) {
 456	case MIPS_BE_DISCARD:
 457		goto out;
 458	case MIPS_BE_FIXUP:
 459		if (fixup) {
 460			regs->cp0_epc = fixup->nextinsn;
 461			goto out;
 462		}
 463		break;
 464	default:
 465		break;
 466	}
 467
 468	/*
 469	 * Assume it would be too dangerous to continue ...
 470	 */
 471	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 472	       data ? "Data" : "Instruction",
 473	       field, regs->cp0_epc, field, regs->regs[31]);
 474	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 475		       SIGBUS) == NOTIFY_STOP)
 476		goto out;
 477
 478	die_if_kernel("Oops", regs);
 479	force_sig(SIGBUS, current);
 480
 481out:
 482	exception_exit(prev_state);
 483}
 484
 485/*
 486 * ll/sc, rdhwr, sync emulation
 487 */
 488
 489#define OPCODE 0xfc000000
 490#define BASE   0x03e00000
 491#define RT     0x001f0000
 492#define OFFSET 0x0000ffff
 493#define LL     0xc0000000
 494#define SC     0xe0000000
 495#define SPEC0  0x00000000
 496#define SPEC3  0x7c000000
 497#define RD     0x0000f800
 498#define FUNC   0x0000003f
 499#define SYNC   0x0000000f
 500#define RDHWR  0x0000003b
 501
 502/*  microMIPS definitions   */
 503#define MM_POOL32A_FUNC 0xfc00ffff
 504#define MM_RDHWR        0x00006b3c
 505#define MM_RS           0x001f0000
 506#define MM_RT           0x03e00000
 507
 508/*
 509 * The ll_bit is cleared by r*_switch.S
 510 */
 511
 512unsigned int ll_bit;
 513struct task_struct *ll_task;
 514
 515static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 516{
 517	unsigned long value, __user *vaddr;
 518	long offset;
 519
 520	/*
 521	 * analyse the ll instruction that just caused a ri exception
 522	 * and put the referenced address to addr.
 523	 */
 524
 525	/* sign extend offset */
 526	offset = opcode & OFFSET;
 527	offset <<= 16;
 528	offset >>= 16;
 529
 530	vaddr = (unsigned long __user *)
 531		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 532
 533	if ((unsigned long)vaddr & 3)
 534		return SIGBUS;
 535	if (get_user(value, vaddr))
 536		return SIGSEGV;
 537
 538	preempt_disable();
 539
 540	if (ll_task == NULL || ll_task == current) {
 541		ll_bit = 1;
 542	} else {
 543		ll_bit = 0;
 544	}
 545	ll_task = current;
 546
 547	preempt_enable();
 548
 549	regs->regs[(opcode & RT) >> 16] = value;
 550
 551	return 0;
 552}
 553
 554static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 555{
 556	unsigned long __user *vaddr;
 557	unsigned long reg;
 558	long offset;
 559
 560	/*
 561	 * analyse the sc instruction that just caused a ri exception
 562	 * and put the referenced address to addr.
 563	 */
 564
 565	/* sign extend offset */
 566	offset = opcode & OFFSET;
 567	offset <<= 16;
 568	offset >>= 16;
 569
 570	vaddr = (unsigned long __user *)
 571		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 572	reg = (opcode & RT) >> 16;
 573
 574	if ((unsigned long)vaddr & 3)
 575		return SIGBUS;
 576
 577	preempt_disable();
 578
 579	if (ll_bit == 0 || ll_task != current) {
 580		regs->regs[reg] = 0;
 581		preempt_enable();
 582		return 0;
 583	}
 584
 585	preempt_enable();
 586
 587	if (put_user(regs->regs[reg], vaddr))
 588		return SIGSEGV;
 589
 590	regs->regs[reg] = 1;
 591
 592	return 0;
 593}
 594
 595/*
 596 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 597 * opcodes are supposed to result in coprocessor unusable exceptions if
 598 * executed on ll/sc-less processors.  That's the theory.  In practice a
 599 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 600 * instead, so we're doing the emulation thing in both exception handlers.
 601 */
 602static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 603{
 604	if ((opcode & OPCODE) == LL) {
 605		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 606				1, regs, 0);
 607		return simulate_ll(regs, opcode);
 608	}
 609	if ((opcode & OPCODE) == SC) {
 610		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 611				1, regs, 0);
 612		return simulate_sc(regs, opcode);
 613	}
 614
 615	return -1;			/* Must be something else ... */
 616}
 617
 618/*
 619 * Simulate trapping 'rdhwr' instructions to provide user accessible
 620 * registers not implemented in hardware.
 621 */
 622static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 623{
 624	struct thread_info *ti = task_thread_info(current);
 625
 626	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 627			1, regs, 0);
 628	switch (rd) {
 629	case MIPS_HWR_CPUNUM:		/* CPU number */
 630		regs->regs[rt] = smp_processor_id();
 631		return 0;
 632	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
 633		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 634				     current_cpu_data.icache.linesz);
 635		return 0;
 636	case MIPS_HWR_CC:		/* Read count register */
 637		regs->regs[rt] = read_c0_count();
 638		return 0;
 639	case MIPS_HWR_CCRES:		/* Count register resolution */
 640		switch (current_cpu_type()) {
 641		case CPU_20KC:
 642		case CPU_25KF:
 643			regs->regs[rt] = 1;
 644			break;
 645		default:
 646			regs->regs[rt] = 2;
 647		}
 648		return 0;
 649	case MIPS_HWR_ULR:		/* Read UserLocal register */
 650		regs->regs[rt] = ti->tp_value;
 651		return 0;
 652	default:
 653		return -1;
 654	}
 655}
 656
 657static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 658{
 659	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 660		int rd = (opcode & RD) >> 11;
 661		int rt = (opcode & RT) >> 16;
 662
 663		simulate_rdhwr(regs, rd, rt);
 664		return 0;
 665	}
 666
 667	/* Not ours.  */
 668	return -1;
 669}
 670
 671static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 672{
 673	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 674		int rd = (opcode & MM_RS) >> 16;
 675		int rt = (opcode & MM_RT) >> 21;
 676		simulate_rdhwr(regs, rd, rt);
 677		return 0;
 678	}
 679
 680	/* Not ours.  */
 681	return -1;
 682}
 683
 684static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 685{
 686	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 687		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 688				1, regs, 0);
 689		return 0;
 690	}
 691
 692	return -1;			/* Must be something else ... */
 693}
 694
 695asmlinkage void do_ov(struct pt_regs *regs)
 696{
 697	enum ctx_state prev_state;
 698	siginfo_t info = {
 699		.si_signo = SIGFPE,
 700		.si_code = FPE_INTOVF,
 701		.si_addr = (void __user *)regs->cp0_epc,
 702	};
 
 703
 704	prev_state = exception_enter();
 705	die_if_kernel("Integer overflow", regs);
 706
 707	force_sig_info(SIGFPE, &info, current);
 708	exception_exit(prev_state);
 709}
 710
 711/*
 712 * Send SIGFPE according to FCSR Cause bits, which must have already
 713 * been masked against Enable bits.  This is impotant as Inexact can
 714 * happen together with Overflow or Underflow, and `ptrace' can set
 715 * any bits.
 716 */
 717void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 718		     struct task_struct *tsk)
 719{
 720	struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
 
 
 
 
 721
 722	if (fcr31 & FPU_CSR_INV_X)
 723		si.si_code = FPE_FLTINV;
 724	else if (fcr31 & FPU_CSR_DIV_X)
 725		si.si_code = FPE_FLTDIV;
 726	else if (fcr31 & FPU_CSR_OVF_X)
 727		si.si_code = FPE_FLTOVF;
 728	else if (fcr31 & FPU_CSR_UDF_X)
 729		si.si_code = FPE_FLTUND;
 730	else if (fcr31 & FPU_CSR_INE_X)
 731		si.si_code = FPE_FLTRES;
 732	else
 733		si.si_code = __SI_FAULT;
 734	force_sig_info(SIGFPE, &si, tsk);
 735}
 736
 737int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 738{
 739	struct siginfo si = { 0 };
 740	struct vm_area_struct *vma;
 741
 
 742	switch (sig) {
 743	case 0:
 744		return 0;
 745
 746	case SIGFPE:
 747		force_fcr31_sig(fcr31, fault_addr, current);
 748		return 1;
 749
 750	case SIGBUS:
 751		si.si_addr = fault_addr;
 752		si.si_signo = sig;
 753		si.si_code = BUS_ADRERR;
 754		force_sig_info(sig, &si, current);
 755		return 1;
 756
 757	case SIGSEGV:
 758		si.si_addr = fault_addr;
 759		si.si_signo = sig;
 760		down_read(&current->mm->mmap_sem);
 761		vma = find_vma(current->mm, (unsigned long)fault_addr);
 762		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 763			si.si_code = SEGV_ACCERR;
 764		else
 765			si.si_code = SEGV_MAPERR;
 766		up_read(&current->mm->mmap_sem);
 767		force_sig_info(sig, &si, current);
 768		return 1;
 769
 770	default:
 771		force_sig(sig, current);
 772		return 1;
 773	}
 774}
 775
 776static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 777		       unsigned long old_epc, unsigned long old_ra)
 778{
 779	union mips_instruction inst = { .word = opcode };
 780	void __user *fault_addr;
 781	unsigned long fcr31;
 782	int sig;
 783
 784	/* If it's obviously not an FP instruction, skip it */
 785	switch (inst.i_format.opcode) {
 786	case cop1_op:
 787	case cop1x_op:
 788	case lwc1_op:
 789	case ldc1_op:
 790	case swc1_op:
 791	case sdc1_op:
 792		break;
 793
 794	default:
 795		return -1;
 796	}
 797
 798	/*
 799	 * do_ri skipped over the instruction via compute_return_epc, undo
 800	 * that for the FPU emulator.
 801	 */
 802	regs->cp0_epc = old_epc;
 803	regs->regs[31] = old_ra;
 804
 805	/* Save the FP context to struct thread_struct */
 806	lose_fpu(1);
 807
 808	/* Run the emulator */
 809	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 810				       &fault_addr);
 811
 812	/*
 813	 * We can't allow the emulated instruction to leave any
 814	 * enabled Cause bits set in $fcr31.
 815	 */
 816	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 817	current->thread.fpu.fcr31 &= ~fcr31;
 818
 819	/* Restore the hardware register state */
 820	own_fpu(1);
 821
 822	/* Send a signal if required.  */
 823	process_fpemu_return(sig, fault_addr, fcr31);
 824
 825	return 0;
 826}
 827
 828/*
 829 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 830 */
 831asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 832{
 833	enum ctx_state prev_state;
 834	void __user *fault_addr;
 835	int sig;
 836
 837	prev_state = exception_enter();
 838	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 839		       SIGFPE) == NOTIFY_STOP)
 840		goto out;
 841
 842	/* Clear FCSR.Cause before enabling interrupts */
 843	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 844	local_irq_enable();
 845
 846	die_if_kernel("FP exception in kernel code", regs);
 847
 848	if (fcr31 & FPU_CSR_UNI_X) {
 849		/*
 850		 * Unimplemented operation exception.  If we've got the full
 851		 * software emulator on-board, let's use it...
 852		 *
 853		 * Force FPU to dump state into task/thread context.  We're
 854		 * moving a lot of data here for what is probably a single
 855		 * instruction, but the alternative is to pre-decode the FP
 856		 * register operands before invoking the emulator, which seems
 857		 * a bit extreme for what should be an infrequent event.
 858		 */
 859		/* Ensure 'resume' not overwrite saved fp context again. */
 860		lose_fpu(1);
 861
 862		/* Run the emulator */
 863		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 864					       &fault_addr);
 865
 866		/*
 867		 * We can't allow the emulated instruction to leave any
 868		 * enabled Cause bits set in $fcr31.
 869		 */
 870		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 871		current->thread.fpu.fcr31 &= ~fcr31;
 872
 873		/* Restore the hardware register state */
 874		own_fpu(1);	/* Using the FPU again.	 */
 875	} else {
 876		sig = SIGFPE;
 877		fault_addr = (void __user *) regs->cp0_epc;
 878	}
 879
 880	/* Send a signal if required.  */
 881	process_fpemu_return(sig, fault_addr, fcr31);
 882
 883out:
 884	exception_exit(prev_state);
 885}
 886
 887void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 888	const char *str)
 889{
 890	siginfo_t info = { 0 };
 891	char b[40];
 892
 
 893#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 894	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 895			 SIGTRAP) == NOTIFY_STOP)
 896		return;
 897#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 898
 899	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 900		       SIGTRAP) == NOTIFY_STOP)
 901		return;
 902
 903	/*
 904	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 905	 * insns, even for trap and break codes that indicate arithmetic
 906	 * failures.  Weird ...
 907	 * But should we continue the brokenness???  --macro
 908	 */
 909	switch (code) {
 910	case BRK_OVERFLOW:
 911	case BRK_DIVZERO:
 912		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 913		die_if_kernel(b, regs);
 914		if (code == BRK_DIVZERO)
 915			info.si_code = FPE_INTDIV;
 916		else
 917			info.si_code = FPE_INTOVF;
 918		info.si_signo = SIGFPE;
 919		info.si_addr = (void __user *) regs->cp0_epc;
 920		force_sig_info(SIGFPE, &info, current);
 921		break;
 922	case BRK_BUG:
 923		die_if_kernel("Kernel bug detected", regs);
 924		force_sig(SIGTRAP, current);
 925		break;
 926	case BRK_MEMU:
 927		/*
 928		 * This breakpoint code is used by the FPU emulator to retake
 929		 * control of the CPU after executing the instruction from the
 930		 * delay slot of an emulated branch.
 931		 *
 932		 * Terminate if exception was recognized as a delay slot return
 933		 * otherwise handle as normal.
 934		 */
 935		if (do_dsemulret(regs))
 936			return;
 937
 938		die_if_kernel("Math emu break/trap", regs);
 939		force_sig(SIGTRAP, current);
 940		break;
 941	default:
 942		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 943		die_if_kernel(b, regs);
 944		if (si_code) {
 945			info.si_signo = SIGTRAP;
 946			info.si_code = si_code;
 947			force_sig_info(SIGTRAP, &info, current);
 948		} else {
 949			force_sig(SIGTRAP, current);
 950		}
 951	}
 952}
 953
 954asmlinkage void do_bp(struct pt_regs *regs)
 955{
 956	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 957	unsigned int opcode, bcode;
 958	enum ctx_state prev_state;
 959	mm_segment_t seg;
 960
 961	seg = get_fs();
 962	if (!user_mode(regs))
 963		set_fs(KERNEL_DS);
 964
 965	prev_state = exception_enter();
 966	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 967	if (get_isa16_mode(regs->cp0_epc)) {
 968		u16 instr[2];
 969
 970		if (__get_user(instr[0], (u16 __user *)epc))
 971			goto out_sigsegv;
 972
 973		if (!cpu_has_mmips) {
 974			/* MIPS16e mode */
 975			bcode = (instr[0] >> 5) & 0x3f;
 976		} else if (mm_insn_16bit(instr[0])) {
 977			/* 16-bit microMIPS BREAK */
 978			bcode = instr[0] & 0xf;
 979		} else {
 980			/* 32-bit microMIPS BREAK */
 981			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
 982				goto out_sigsegv;
 983			opcode = (instr[0] << 16) | instr[1];
 984			bcode = (opcode >> 6) & ((1 << 20) - 1);
 985		}
 986	} else {
 987		if (__get_user(opcode, (unsigned int __user *)epc))
 988			goto out_sigsegv;
 989		bcode = (opcode >> 6) & ((1 << 20) - 1);
 990	}
 991
 992	/*
 993	 * There is the ancient bug in the MIPS assemblers that the break
 994	 * code starts left to bit 16 instead to bit 6 in the opcode.
 995	 * Gas is bug-compatible, but not always, grrr...
 996	 * We handle both cases with a simple heuristics.  --macro
 997	 */
 998	if (bcode >= (1 << 10))
 999		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1000
1001	/*
1002	 * notify the kprobe handlers, if instruction is likely to
1003	 * pertain to them.
1004	 */
1005	switch (bcode) {
1006	case BRK_UPROBE:
1007		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1008			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1009			goto out;
1010		else
1011			break;
1012	case BRK_UPROBE_XOL:
1013		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1014			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1015			goto out;
1016		else
1017			break;
1018	case BRK_KPROBE_BP:
1019		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1020			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1021			goto out;
1022		else
1023			break;
1024	case BRK_KPROBE_SSTEPBP:
1025		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1026			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1027			goto out;
1028		else
1029			break;
1030	default:
1031		break;
1032	}
1033
1034	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1035
1036out:
1037	set_fs(seg);
1038	exception_exit(prev_state);
1039	return;
1040
1041out_sigsegv:
1042	force_sig(SIGSEGV, current);
1043	goto out;
1044}
1045
1046asmlinkage void do_tr(struct pt_regs *regs)
1047{
1048	u32 opcode, tcode = 0;
1049	enum ctx_state prev_state;
1050	u16 instr[2];
1051	mm_segment_t seg;
1052	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1053
1054	seg = get_fs();
1055	if (!user_mode(regs))
1056		set_fs(get_ds());
1057
1058	prev_state = exception_enter();
1059	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1060	if (get_isa16_mode(regs->cp0_epc)) {
1061		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1062		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1063			goto out_sigsegv;
1064		opcode = (instr[0] << 16) | instr[1];
1065		/* Immediate versions don't provide a code.  */
1066		if (!(opcode & OPCODE))
1067			tcode = (opcode >> 12) & ((1 << 4) - 1);
1068	} else {
1069		if (__get_user(opcode, (u32 __user *)epc))
1070			goto out_sigsegv;
1071		/* Immediate versions don't provide a code.  */
1072		if (!(opcode & OPCODE))
1073			tcode = (opcode >> 6) & ((1 << 10) - 1);
1074	}
1075
1076	do_trap_or_bp(regs, tcode, 0, "Trap");
1077
1078out:
1079	set_fs(seg);
1080	exception_exit(prev_state);
1081	return;
1082
1083out_sigsegv:
1084	force_sig(SIGSEGV, current);
1085	goto out;
1086}
1087
1088asmlinkage void do_ri(struct pt_regs *regs)
1089{
1090	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1091	unsigned long old_epc = regs->cp0_epc;
1092	unsigned long old31 = regs->regs[31];
1093	enum ctx_state prev_state;
1094	unsigned int opcode = 0;
1095	int status = -1;
1096
1097	/*
1098	 * Avoid any kernel code. Just emulate the R2 instruction
1099	 * as quickly as possible.
1100	 */
1101	if (mipsr2_emulation && cpu_has_mips_r6 &&
1102	    likely(user_mode(regs)) &&
1103	    likely(get_user(opcode, epc) >= 0)) {
1104		unsigned long fcr31 = 0;
1105
1106		status = mipsr2_decoder(regs, opcode, &fcr31);
1107		switch (status) {
1108		case 0:
1109		case SIGEMT:
1110			task_thread_info(current)->r2_emul_return = 1;
1111			return;
1112		case SIGILL:
1113			goto no_r2_instr;
1114		default:
1115			process_fpemu_return(status,
1116					     &current->thread.cp0_baduaddr,
1117					     fcr31);
1118			task_thread_info(current)->r2_emul_return = 1;
1119			return;
1120		}
1121	}
1122
1123no_r2_instr:
1124
1125	prev_state = exception_enter();
1126	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1127
1128	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1129		       SIGILL) == NOTIFY_STOP)
1130		goto out;
1131
1132	die_if_kernel("Reserved instruction in kernel code", regs);
1133
1134	if (unlikely(compute_return_epc(regs) < 0))
1135		goto out;
1136
1137	if (!get_isa16_mode(regs->cp0_epc)) {
1138		if (unlikely(get_user(opcode, epc) < 0))
1139			status = SIGSEGV;
1140
1141		if (!cpu_has_llsc && status < 0)
1142			status = simulate_llsc(regs, opcode);
1143
1144		if (status < 0)
1145			status = simulate_rdhwr_normal(regs, opcode);
1146
1147		if (status < 0)
1148			status = simulate_sync(regs, opcode);
1149
1150		if (status < 0)
1151			status = simulate_fp(regs, opcode, old_epc, old31);
1152	} else if (cpu_has_mmips) {
1153		unsigned short mmop[2] = { 0 };
1154
1155		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1156			status = SIGSEGV;
1157		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1158			status = SIGSEGV;
1159		opcode = mmop[0];
1160		opcode = (opcode << 16) | mmop[1];
1161
1162		if (status < 0)
1163			status = simulate_rdhwr_mm(regs, opcode);
1164	}
1165
1166	if (status < 0)
1167		status = SIGILL;
1168
1169	if (unlikely(status > 0)) {
1170		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1171		regs->regs[31] = old31;
1172		force_sig(status, current);
1173	}
1174
1175out:
1176	exception_exit(prev_state);
1177}
1178
1179/*
1180 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1181 * emulated more than some threshold number of instructions, force migration to
1182 * a "CPU" that has FP support.
1183 */
1184static void mt_ase_fp_affinity(void)
1185{
1186#ifdef CONFIG_MIPS_MT_FPAFF
1187	if (mt_fpemul_threshold > 0 &&
1188	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1189		/*
1190		 * If there's no FPU present, or if the application has already
1191		 * restricted the allowed set to exclude any CPUs with FPUs,
1192		 * we'll skip the procedure.
1193		 */
1194		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1195			cpumask_t tmask;
1196
1197			current->thread.user_cpus_allowed
1198				= current->cpus_allowed;
1199			cpumask_and(&tmask, &current->cpus_allowed,
1200				    &mt_fpu_cpumask);
1201			set_cpus_allowed_ptr(current, &tmask);
1202			set_thread_flag(TIF_FPUBOUND);
1203		}
1204	}
1205#endif /* CONFIG_MIPS_MT_FPAFF */
1206}
1207
1208/*
1209 * No lock; only written during early bootup by CPU 0.
1210 */
1211static RAW_NOTIFIER_HEAD(cu2_chain);
1212
1213int __ref register_cu2_notifier(struct notifier_block *nb)
1214{
1215	return raw_notifier_chain_register(&cu2_chain, nb);
1216}
1217
1218int cu2_notifier_call_chain(unsigned long val, void *v)
1219{
1220	return raw_notifier_call_chain(&cu2_chain, val, v);
1221}
1222
1223static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1224	void *data)
1225{
1226	struct pt_regs *regs = data;
1227
1228	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1229			      "instruction", regs);
1230	force_sig(SIGILL, current);
1231
1232	return NOTIFY_OK;
1233}
1234
1235static int wait_on_fp_mode_switch(atomic_t *p)
1236{
1237	/*
1238	 * The FP mode for this task is currently being switched. That may
1239	 * involve modifications to the format of this tasks FP context which
1240	 * make it unsafe to proceed with execution for the moment. Instead,
1241	 * schedule some other task.
1242	 */
1243	schedule();
1244	return 0;
1245}
1246
1247static int enable_restore_fp_context(int msa)
1248{
1249	int err, was_fpu_owner, prior_msa;
1250
1251	/*
1252	 * If an FP mode switch is currently underway, wait for it to
1253	 * complete before proceeding.
1254	 */
1255	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1256			 wait_on_fp_mode_switch, TASK_KILLABLE);
1257
1258	if (!used_math()) {
1259		/* First time FP context user. */
1260		preempt_disable();
1261		err = init_fpu();
1262		if (msa && !err) {
1263			enable_msa();
1264			init_msa_upper();
1265			set_thread_flag(TIF_USEDMSA);
1266			set_thread_flag(TIF_MSA_CTX_LIVE);
1267		}
1268		preempt_enable();
1269		if (!err)
1270			set_used_math();
1271		return err;
1272	}
1273
1274	/*
1275	 * This task has formerly used the FP context.
1276	 *
1277	 * If this thread has no live MSA vector context then we can simply
1278	 * restore the scalar FP context. If it has live MSA vector context
1279	 * (that is, it has or may have used MSA since last performing a
1280	 * function call) then we'll need to restore the vector context. This
1281	 * applies even if we're currently only executing a scalar FP
1282	 * instruction. This is because if we were to later execute an MSA
1283	 * instruction then we'd either have to:
1284	 *
1285	 *  - Restore the vector context & clobber any registers modified by
1286	 *    scalar FP instructions between now & then.
1287	 *
1288	 * or
1289	 *
1290	 *  - Not restore the vector context & lose the most significant bits
1291	 *    of all vector registers.
1292	 *
1293	 * Neither of those options is acceptable. We cannot restore the least
1294	 * significant bits of the registers now & only restore the most
1295	 * significant bits later because the most significant bits of any
1296	 * vector registers whose aliased FP register is modified now will have
1297	 * been zeroed. We'd have no way to know that when restoring the vector
1298	 * context & thus may load an outdated value for the most significant
1299	 * bits of a vector register.
1300	 */
1301	if (!msa && !thread_msa_context_live())
1302		return own_fpu(1);
1303
1304	/*
1305	 * This task is using or has previously used MSA. Thus we require
1306	 * that Status.FR == 1.
1307	 */
1308	preempt_disable();
1309	was_fpu_owner = is_fpu_owner();
1310	err = own_fpu_inatomic(0);
1311	if (err)
1312		goto out;
1313
1314	enable_msa();
1315	write_msa_csr(current->thread.fpu.msacsr);
1316	set_thread_flag(TIF_USEDMSA);
1317
1318	/*
1319	 * If this is the first time that the task is using MSA and it has
1320	 * previously used scalar FP in this time slice then we already nave
1321	 * FP context which we shouldn't clobber. We do however need to clear
1322	 * the upper 64b of each vector register so that this task has no
1323	 * opportunity to see data left behind by another.
1324	 */
1325	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1326	if (!prior_msa && was_fpu_owner) {
1327		init_msa_upper();
1328
1329		goto out;
1330	}
1331
1332	if (!prior_msa) {
1333		/*
1334		 * Restore the least significant 64b of each vector register
1335		 * from the existing scalar FP context.
1336		 */
1337		_restore_fp(current);
1338
1339		/*
1340		 * The task has not formerly used MSA, so clear the upper 64b
1341		 * of each vector register such that it cannot see data left
1342		 * behind by another task.
1343		 */
1344		init_msa_upper();
1345	} else {
1346		/* We need to restore the vector context. */
1347		restore_msa(current);
1348
1349		/* Restore the scalar FP control & status register */
1350		if (!was_fpu_owner)
1351			write_32bit_cp1_register(CP1_STATUS,
1352						 current->thread.fpu.fcr31);
1353	}
1354
1355out:
1356	preempt_enable();
1357
1358	return 0;
1359}
1360
1361asmlinkage void do_cpu(struct pt_regs *regs)
1362{
1363	enum ctx_state prev_state;
1364	unsigned int __user *epc;
1365	unsigned long old_epc, old31;
1366	void __user *fault_addr;
1367	unsigned int opcode;
1368	unsigned long fcr31;
1369	unsigned int cpid;
1370	int status, err;
1371	int sig;
1372
1373	prev_state = exception_enter();
1374	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1375
1376	if (cpid != 2)
1377		die_if_kernel("do_cpu invoked from kernel context!", regs);
1378
1379	switch (cpid) {
1380	case 0:
1381		epc = (unsigned int __user *)exception_epc(regs);
1382		old_epc = regs->cp0_epc;
1383		old31 = regs->regs[31];
1384		opcode = 0;
1385		status = -1;
1386
1387		if (unlikely(compute_return_epc(regs) < 0))
1388			break;
1389
1390		if (!get_isa16_mode(regs->cp0_epc)) {
1391			if (unlikely(get_user(opcode, epc) < 0))
1392				status = SIGSEGV;
1393
1394			if (!cpu_has_llsc && status < 0)
1395				status = simulate_llsc(regs, opcode);
1396		}
1397
1398		if (status < 0)
1399			status = SIGILL;
1400
1401		if (unlikely(status > 0)) {
1402			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1403			regs->regs[31] = old31;
1404			force_sig(status, current);
1405		}
1406
1407		break;
1408
1409	case 3:
1410		/*
1411		 * The COP3 opcode space and consequently the CP0.Status.CU3
1412		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1413		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1414		 * up the space has been reused for COP1X instructions, that
1415		 * are enabled by the CP0.Status.CU1 bit and consequently
1416		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1417		 * exceptions.  Some FPU-less processors that implement one
1418		 * of these ISAs however use this code erroneously for COP1X
1419		 * instructions.  Therefore we redirect this trap to the FP
1420		 * emulator too.
1421		 */
1422		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1423			force_sig(SIGILL, current);
1424			break;
1425		}
1426		/* Fall through.  */
1427
1428	case 1:
1429		err = enable_restore_fp_context(0);
1430
1431		if (raw_cpu_has_fpu && !err)
1432			break;
1433
1434		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1435					       &fault_addr);
1436
1437		/*
1438		 * We can't allow the emulated instruction to leave
1439		 * any enabled Cause bits set in $fcr31.
1440		 */
1441		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1442		current->thread.fpu.fcr31 &= ~fcr31;
1443
1444		/* Send a signal if required.  */
1445		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1446			mt_ase_fp_affinity();
1447
1448		break;
1449
1450	case 2:
1451		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1452		break;
1453	}
1454
1455	exception_exit(prev_state);
1456}
1457
1458asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1459{
1460	enum ctx_state prev_state;
1461
1462	prev_state = exception_enter();
1463	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1464	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1465		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1466		goto out;
1467
1468	/* Clear MSACSR.Cause before enabling interrupts */
1469	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1470	local_irq_enable();
1471
1472	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1473	force_sig(SIGFPE, current);
1474out:
1475	exception_exit(prev_state);
1476}
1477
1478asmlinkage void do_msa(struct pt_regs *regs)
1479{
1480	enum ctx_state prev_state;
1481	int err;
1482
1483	prev_state = exception_enter();
1484
1485	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1486		force_sig(SIGILL, current);
1487		goto out;
1488	}
1489
1490	die_if_kernel("do_msa invoked from kernel context!", regs);
1491
1492	err = enable_restore_fp_context(1);
1493	if (err)
1494		force_sig(SIGILL, current);
1495out:
1496	exception_exit(prev_state);
1497}
1498
1499asmlinkage void do_mdmx(struct pt_regs *regs)
1500{
1501	enum ctx_state prev_state;
1502
1503	prev_state = exception_enter();
1504	force_sig(SIGILL, current);
1505	exception_exit(prev_state);
1506}
1507
1508/*
1509 * Called with interrupts disabled.
1510 */
1511asmlinkage void do_watch(struct pt_regs *regs)
1512{
1513	siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1514	enum ctx_state prev_state;
1515
 
 
 
 
1516	prev_state = exception_enter();
1517	/*
1518	 * Clear WP (bit 22) bit of cause register so we don't loop
1519	 * forever.
1520	 */
1521	clear_c0_cause(CAUSEF_WP);
1522
1523	/*
1524	 * If the current thread has the watch registers loaded, save
1525	 * their values and send SIGTRAP.  Otherwise another thread
1526	 * left the registers set, clear them and continue.
1527	 */
1528	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1529		mips_read_watch_registers();
1530		local_irq_enable();
1531		force_sig_info(SIGTRAP, &info, current);
1532	} else {
1533		mips_clear_watch_registers();
1534		local_irq_enable();
1535	}
1536	exception_exit(prev_state);
1537}
1538
1539asmlinkage void do_mcheck(struct pt_regs *regs)
1540{
1541	int multi_match = regs->cp0_status & ST0_TS;
1542	enum ctx_state prev_state;
1543	mm_segment_t old_fs = get_fs();
1544
1545	prev_state = exception_enter();
1546	show_regs(regs);
1547
1548	if (multi_match) {
1549		dump_tlb_regs();
1550		pr_info("\n");
1551		dump_tlb_all();
1552	}
1553
1554	if (!user_mode(regs))
1555		set_fs(KERNEL_DS);
1556
1557	show_code((unsigned int __user *) regs->cp0_epc);
1558
1559	set_fs(old_fs);
1560
1561	/*
1562	 * Some chips may have other causes of machine check (e.g. SB1
1563	 * graduation timer)
1564	 */
1565	panic("Caught Machine Check exception - %scaused by multiple "
1566	      "matching entries in the TLB.",
1567	      (multi_match) ? "" : "not ");
1568}
1569
1570asmlinkage void do_mt(struct pt_regs *regs)
1571{
1572	int subcode;
1573
1574	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1575			>> VPECONTROL_EXCPT_SHIFT;
1576	switch (subcode) {
1577	case 0:
1578		printk(KERN_DEBUG "Thread Underflow\n");
1579		break;
1580	case 1:
1581		printk(KERN_DEBUG "Thread Overflow\n");
1582		break;
1583	case 2:
1584		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1585		break;
1586	case 3:
1587		printk(KERN_DEBUG "Gating Storage Exception\n");
1588		break;
1589	case 4:
1590		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1591		break;
1592	case 5:
1593		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1594		break;
1595	default:
1596		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1597			subcode);
1598		break;
1599	}
1600	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1601
1602	force_sig(SIGILL, current);
1603}
1604
1605
1606asmlinkage void do_dsp(struct pt_regs *regs)
1607{
1608	if (cpu_has_dsp)
1609		panic("Unexpected DSP exception");
1610
1611	force_sig(SIGILL, current);
1612}
1613
1614asmlinkage void do_reserved(struct pt_regs *regs)
1615{
1616	/*
1617	 * Game over - no way to handle this if it ever occurs.	 Most probably
1618	 * caused by a new unknown cpu type or after another deadly
1619	 * hard/software error.
1620	 */
1621	show_regs(regs);
1622	panic("Caught reserved exception %ld - should not happen.",
1623	      (regs->cp0_cause & 0x7f) >> 2);
1624}
1625
1626static int __initdata l1parity = 1;
1627static int __init nol1parity(char *s)
1628{
1629	l1parity = 0;
1630	return 1;
1631}
1632__setup("nol1par", nol1parity);
1633static int __initdata l2parity = 1;
1634static int __init nol2parity(char *s)
1635{
1636	l2parity = 0;
1637	return 1;
1638}
1639__setup("nol2par", nol2parity);
1640
1641/*
1642 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1643 * it different ways.
1644 */
1645static inline void parity_protection_init(void)
1646{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647	switch (current_cpu_type()) {
1648	case CPU_24K:
1649	case CPU_34K:
1650	case CPU_74K:
1651	case CPU_1004K:
1652	case CPU_1074K:
1653	case CPU_INTERAPTIV:
1654	case CPU_PROAPTIV:
1655	case CPU_P5600:
1656	case CPU_QEMU_GENERIC:
1657	case CPU_I6400:
1658	case CPU_P6600:
1659		{
1660#define ERRCTL_PE	0x80000000
1661#define ERRCTL_L2P	0x00800000
1662			unsigned long errctl;
1663			unsigned int l1parity_present, l2parity_present;
1664
1665			errctl = read_c0_ecc();
1666			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1667
1668			/* probe L1 parity support */
1669			write_c0_ecc(errctl | ERRCTL_PE);
1670			back_to_back_c0_hazard();
1671			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1672
1673			/* probe L2 parity support */
1674			write_c0_ecc(errctl|ERRCTL_L2P);
1675			back_to_back_c0_hazard();
1676			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1677
1678			if (l1parity_present && l2parity_present) {
1679				if (l1parity)
1680					errctl |= ERRCTL_PE;
1681				if (l1parity ^ l2parity)
1682					errctl |= ERRCTL_L2P;
1683			} else if (l1parity_present) {
1684				if (l1parity)
1685					errctl |= ERRCTL_PE;
1686			} else if (l2parity_present) {
1687				if (l2parity)
1688					errctl |= ERRCTL_L2P;
1689			} else {
1690				/* No parity available */
1691			}
1692
1693			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1694
1695			write_c0_ecc(errctl);
1696			back_to_back_c0_hazard();
1697			errctl = read_c0_ecc();
1698			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1699
1700			if (l1parity_present)
1701				printk(KERN_INFO "Cache parity protection %sabled\n",
1702				       (errctl & ERRCTL_PE) ? "en" : "dis");
1703
1704			if (l2parity_present) {
1705				if (l1parity_present && l1parity)
1706					errctl ^= ERRCTL_L2P;
1707				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1708				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1709			}
1710		}
1711		break;
1712
1713	case CPU_5KC:
1714	case CPU_5KE:
1715	case CPU_LOONGSON1:
1716		write_c0_ecc(0x80000000);
1717		back_to_back_c0_hazard();
1718		/* Set the PE bit (bit 31) in the c0_errctl register. */
1719		printk(KERN_INFO "Cache parity protection %sabled\n",
1720		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1721		break;
1722	case CPU_20KC:
1723	case CPU_25KF:
1724		/* Clear the DE bit (bit 16) in the c0_status register. */
1725		printk(KERN_INFO "Enable cache parity protection for "
1726		       "MIPS 20KC/25KF CPUs.\n");
1727		clear_c0_status(ST0_DE);
1728		break;
1729	default:
1730		break;
1731	}
1732}
1733
1734asmlinkage void cache_parity_error(void)
1735{
1736	const int field = 2 * sizeof(unsigned long);
1737	unsigned int reg_val;
1738
1739	/* For the moment, report the problem and hang. */
1740	printk("Cache error exception:\n");
1741	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1742	reg_val = read_c0_cacheerr();
1743	printk("c0_cacheerr == %08x\n", reg_val);
1744
1745	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1746	       reg_val & (1<<30) ? "secondary" : "primary",
1747	       reg_val & (1<<31) ? "data" : "insn");
1748	if ((cpu_has_mips_r2_r6) &&
1749	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1750		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1751			reg_val & (1<<29) ? "ED " : "",
1752			reg_val & (1<<28) ? "ET " : "",
1753			reg_val & (1<<27) ? "ES " : "",
1754			reg_val & (1<<26) ? "EE " : "",
1755			reg_val & (1<<25) ? "EB " : "",
1756			reg_val & (1<<24) ? "EI " : "",
1757			reg_val & (1<<23) ? "E1 " : "",
1758			reg_val & (1<<22) ? "E0 " : "");
1759	} else {
1760		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1761			reg_val & (1<<29) ? "ED " : "",
1762			reg_val & (1<<28) ? "ET " : "",
1763			reg_val & (1<<26) ? "EE " : "",
1764			reg_val & (1<<25) ? "EB " : "",
1765			reg_val & (1<<24) ? "EI " : "",
1766			reg_val & (1<<23) ? "E1 " : "",
1767			reg_val & (1<<22) ? "E0 " : "");
1768	}
1769	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1770
1771#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1772	if (reg_val & (1<<22))
1773		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1774
1775	if (reg_val & (1<<23))
1776		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1777#endif
1778
1779	panic("Can't handle the cache error!");
1780}
1781
1782asmlinkage void do_ftlb(void)
1783{
1784	const int field = 2 * sizeof(unsigned long);
1785	unsigned int reg_val;
1786
1787	/* For the moment, report the problem and hang. */
1788	if ((cpu_has_mips_r2_r6) &&
1789	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1790	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1791		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1792		       read_c0_ecc());
1793		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1794		reg_val = read_c0_cacheerr();
1795		pr_err("c0_cacheerr == %08x\n", reg_val);
1796
1797		if ((reg_val & 0xc0000000) == 0xc0000000) {
1798			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1799		} else {
1800			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1801			       reg_val & (1<<30) ? "secondary" : "primary",
1802			       reg_val & (1<<31) ? "data" : "insn");
1803		}
1804	} else {
1805		pr_err("FTLB error exception\n");
1806	}
1807	/* Just print the cacheerr bits for now */
1808	cache_parity_error();
1809}
1810
1811/*
1812 * SDBBP EJTAG debug exception handler.
1813 * We skip the instruction and return to the next instruction.
1814 */
1815void ejtag_exception_handler(struct pt_regs *regs)
1816{
1817	const int field = 2 * sizeof(unsigned long);
1818	unsigned long depc, old_epc, old_ra;
1819	unsigned int debug;
1820
1821	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1822	depc = read_c0_depc();
1823	debug = read_c0_debug();
1824	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1825	if (debug & 0x80000000) {
1826		/*
1827		 * In branch delay slot.
1828		 * We cheat a little bit here and use EPC to calculate the
1829		 * debug return address (DEPC). EPC is restored after the
1830		 * calculation.
1831		 */
1832		old_epc = regs->cp0_epc;
1833		old_ra = regs->regs[31];
1834		regs->cp0_epc = depc;
1835		compute_return_epc(regs);
1836		depc = regs->cp0_epc;
1837		regs->cp0_epc = old_epc;
1838		regs->regs[31] = old_ra;
1839	} else
1840		depc += 4;
1841	write_c0_depc(depc);
1842
1843#if 0
1844	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1845	write_c0_debug(debug | 0x100);
1846#endif
1847}
1848
1849/*
1850 * NMI exception handler.
1851 * No lock; only written during early bootup by CPU 0.
1852 */
1853static RAW_NOTIFIER_HEAD(nmi_chain);
1854
1855int register_nmi_notifier(struct notifier_block *nb)
1856{
1857	return raw_notifier_chain_register(&nmi_chain, nb);
1858}
1859
1860void __noreturn nmi_exception_handler(struct pt_regs *regs)
1861{
1862	char str[100];
1863
1864	nmi_enter();
1865	raw_notifier_call_chain(&nmi_chain, 0, regs);
1866	bust_spinlocks(1);
1867	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1868		 smp_processor_id(), regs->cp0_epc);
1869	regs->cp0_epc = read_c0_errorepc();
1870	die(str, regs);
1871	nmi_exit();
1872}
1873
1874#define VECTORSPACING 0x100	/* for EI/VI mode */
1875
1876unsigned long ebase;
1877EXPORT_SYMBOL_GPL(ebase);
1878unsigned long exception_handlers[32];
1879unsigned long vi_handlers[64];
1880
1881void __init *set_except_vector(int n, void *addr)
1882{
1883	unsigned long handler = (unsigned long) addr;
1884	unsigned long old_handler;
1885
1886#ifdef CONFIG_CPU_MICROMIPS
1887	/*
1888	 * Only the TLB handlers are cache aligned with an even
1889	 * address. All other handlers are on an odd address and
1890	 * require no modification. Otherwise, MIPS32 mode will
1891	 * be entered when handling any TLB exceptions. That
1892	 * would be bad...since we must stay in microMIPS mode.
1893	 */
1894	if (!(handler & 0x1))
1895		handler |= 1;
1896#endif
1897	old_handler = xchg(&exception_handlers[n], handler);
1898
1899	if (n == 0 && cpu_has_divec) {
1900#ifdef CONFIG_CPU_MICROMIPS
1901		unsigned long jump_mask = ~((1 << 27) - 1);
1902#else
1903		unsigned long jump_mask = ~((1 << 28) - 1);
1904#endif
1905		u32 *buf = (u32 *)(ebase + 0x200);
1906		unsigned int k0 = 26;
1907		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1908			uasm_i_j(&buf, handler & ~jump_mask);
1909			uasm_i_nop(&buf);
1910		} else {
1911			UASM_i_LA(&buf, k0, handler);
1912			uasm_i_jr(&buf, k0);
1913			uasm_i_nop(&buf);
1914		}
1915		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1916	}
1917	return (void *)old_handler;
1918}
1919
1920static void do_default_vi(void)
1921{
1922	show_regs(get_irq_regs());
1923	panic("Caught unexpected vectored interrupt.");
1924}
1925
1926static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1927{
1928	unsigned long handler;
1929	unsigned long old_handler = vi_handlers[n];
1930	int srssets = current_cpu_data.srsets;
1931	u16 *h;
1932	unsigned char *b;
1933
1934	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1935
1936	if (addr == NULL) {
1937		handler = (unsigned long) do_default_vi;
1938		srs = 0;
1939	} else
1940		handler = (unsigned long) addr;
1941	vi_handlers[n] = handler;
1942
1943	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1944
1945	if (srs >= srssets)
1946		panic("Shadow register set %d not supported", srs);
1947
1948	if (cpu_has_veic) {
1949		if (board_bind_eic_interrupt)
1950			board_bind_eic_interrupt(n, srs);
1951	} else if (cpu_has_vint) {
1952		/* SRSMap is only defined if shadow sets are implemented */
1953		if (srssets > 1)
1954			change_c0_srsmap(0xf << n*4, srs << n*4);
1955	}
1956
1957	if (srs == 0) {
1958		/*
1959		 * If no shadow set is selected then use the default handler
1960		 * that does normal register saving and standard interrupt exit
1961		 */
1962		extern char except_vec_vi, except_vec_vi_lui;
1963		extern char except_vec_vi_ori, except_vec_vi_end;
1964		extern char rollback_except_vec_vi;
1965		char *vec_start = using_rollback_handler() ?
1966			&rollback_except_vec_vi : &except_vec_vi;
1967#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1968		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1969		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1970#else
1971		const int lui_offset = &except_vec_vi_lui - vec_start;
1972		const int ori_offset = &except_vec_vi_ori - vec_start;
1973#endif
1974		const int handler_len = &except_vec_vi_end - vec_start;
1975
1976		if (handler_len > VECTORSPACING) {
1977			/*
1978			 * Sigh... panicing won't help as the console
1979			 * is probably not configured :(
1980			 */
1981			panic("VECTORSPACING too small");
1982		}
1983
1984		set_handler(((unsigned long)b - ebase), vec_start,
1985#ifdef CONFIG_CPU_MICROMIPS
1986				(handler_len - 1));
1987#else
1988				handler_len);
1989#endif
1990		h = (u16 *)(b + lui_offset);
1991		*h = (handler >> 16) & 0xffff;
1992		h = (u16 *)(b + ori_offset);
1993		*h = (handler & 0xffff);
1994		local_flush_icache_range((unsigned long)b,
1995					 (unsigned long)(b+handler_len));
1996	}
1997	else {
1998		/*
1999		 * In other cases jump directly to the interrupt handler. It
2000		 * is the handler's responsibility to save registers if required
2001		 * (eg hi/lo) and return from the exception using "eret".
2002		 */
2003		u32 insn;
2004
2005		h = (u16 *)b;
2006		/* j handler */
2007#ifdef CONFIG_CPU_MICROMIPS
2008		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2009#else
2010		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2011#endif
2012		h[0] = (insn >> 16) & 0xffff;
2013		h[1] = insn & 0xffff;
2014		h[2] = 0;
2015		h[3] = 0;
2016		local_flush_icache_range((unsigned long)b,
2017					 (unsigned long)(b+8));
2018	}
2019
2020	return (void *)old_handler;
2021}
2022
2023void *set_vi_handler(int n, vi_handler_t addr)
2024{
2025	return set_vi_srs_handler(n, addr, 0);
2026}
2027
2028extern void tlb_init(void);
2029
2030/*
2031 * Timer interrupt
2032 */
2033int cp0_compare_irq;
2034EXPORT_SYMBOL_GPL(cp0_compare_irq);
2035int cp0_compare_irq_shift;
2036
2037/*
2038 * Performance counter IRQ or -1 if shared with timer
2039 */
2040int cp0_perfcount_irq;
2041EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2042
2043/*
2044 * Fast debug channel IRQ or -1 if not present
2045 */
2046int cp0_fdc_irq;
2047EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2048
2049static int noulri;
2050
2051static int __init ulri_disable(char *s)
2052{
2053	pr_info("Disabling ulri\n");
2054	noulri = 1;
2055
2056	return 1;
2057}
2058__setup("noulri", ulri_disable);
2059
2060/* configure STATUS register */
2061static void configure_status(void)
2062{
2063	/*
2064	 * Disable coprocessors and select 32-bit or 64-bit addressing
2065	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2066	 * flag that some firmware may have left set and the TS bit (for
2067	 * IP27).  Set XX for ISA IV code to work.
2068	 */
2069	unsigned int status_set = ST0_CU0;
2070#ifdef CONFIG_64BIT
2071	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2072#endif
2073	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2074		status_set |= ST0_XX;
2075	if (cpu_has_dsp)
2076		status_set |= ST0_MX;
2077
2078	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2079			 status_set);
2080}
2081
2082unsigned int hwrena;
2083EXPORT_SYMBOL_GPL(hwrena);
2084
2085/* configure HWRENA register */
2086static void configure_hwrena(void)
2087{
2088	hwrena = cpu_hwrena_impl_bits;
2089
2090	if (cpu_has_mips_r2_r6)
2091		hwrena |= MIPS_HWRENA_CPUNUM |
2092			  MIPS_HWRENA_SYNCISTEP |
2093			  MIPS_HWRENA_CC |
2094			  MIPS_HWRENA_CCRES;
2095
2096	if (!noulri && cpu_has_userlocal)
2097		hwrena |= MIPS_HWRENA_ULR;
2098
2099	if (hwrena)
2100		write_c0_hwrena(hwrena);
2101}
2102
2103static void configure_exception_vector(void)
2104{
2105	if (cpu_has_veic || cpu_has_vint) {
2106		unsigned long sr = set_c0_status(ST0_BEV);
2107		/* If available, use WG to set top bits of EBASE */
2108		if (cpu_has_ebase_wg) {
2109#ifdef CONFIG_64BIT
2110			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2111#else
2112			write_c0_ebase(ebase | MIPS_EBASE_WG);
2113#endif
2114		}
2115		write_c0_ebase(ebase);
2116		write_c0_status(sr);
2117		/* Setting vector spacing enables EI/VI mode  */
2118		change_c0_intctl(0x3e0, VECTORSPACING);
2119	}
2120	if (cpu_has_divec) {
2121		if (cpu_has_mipsmt) {
2122			unsigned int vpflags = dvpe();
2123			set_c0_cause(CAUSEF_IV);
2124			evpe(vpflags);
2125		} else
2126			set_c0_cause(CAUSEF_IV);
2127	}
2128}
2129
2130void per_cpu_trap_init(bool is_boot_cpu)
2131{
2132	unsigned int cpu = smp_processor_id();
2133
2134	configure_status();
2135	configure_hwrena();
2136
2137	configure_exception_vector();
2138
2139	/*
2140	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2141	 *
2142	 *  o read IntCtl.IPTI to determine the timer interrupt
2143	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2144	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2145	 */
2146	if (cpu_has_mips_r2_r6) {
2147		/*
2148		 * We shouldn't trust a secondary core has a sane EBASE register
2149		 * so use the one calculated by the boot CPU.
2150		 */
2151		if (!is_boot_cpu) {
2152			/* If available, use WG to set top bits of EBASE */
2153			if (cpu_has_ebase_wg) {
2154#ifdef CONFIG_64BIT
2155				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2156#else
2157				write_c0_ebase(ebase | MIPS_EBASE_WG);
2158#endif
2159			}
2160			write_c0_ebase(ebase);
2161		}
2162
2163		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2164		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2165		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2166		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2167		if (!cp0_fdc_irq)
2168			cp0_fdc_irq = -1;
2169
2170	} else {
2171		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2172		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2173		cp0_perfcount_irq = -1;
2174		cp0_fdc_irq = -1;
2175	}
2176
2177	if (!cpu_data[cpu].asid_cache)
2178		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2179
2180	atomic_inc(&init_mm.mm_count);
2181	current->active_mm = &init_mm;
2182	BUG_ON(current->mm);
2183	enter_lazy_tlb(&init_mm, current);
2184
2185	/* Boot CPU's cache setup in setup_arch(). */
2186	if (!is_boot_cpu)
2187		cpu_cache_init();
2188	tlb_init();
2189	TLBMISS_HANDLER_SETUP();
2190}
2191
2192/* Install CPU exception handler */
2193void set_handler(unsigned long offset, void *addr, unsigned long size)
2194{
2195#ifdef CONFIG_CPU_MICROMIPS
2196	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2197#else
2198	memcpy((void *)(ebase + offset), addr, size);
2199#endif
2200	local_flush_icache_range(ebase + offset, ebase + offset + size);
2201}
2202
2203static char panic_null_cerr[] =
2204	"Trying to set NULL cache error exception handler";
2205
2206/*
2207 * Install uncached CPU exception handler.
2208 * This is suitable only for the cache error exception which is the only
2209 * exception handler that is being run uncached.
2210 */
2211void set_uncached_handler(unsigned long offset, void *addr,
2212	unsigned long size)
2213{
2214	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2215
2216	if (!addr)
2217		panic(panic_null_cerr);
2218
2219	memcpy((void *)(uncached_ebase + offset), addr, size);
2220}
2221
2222static int __initdata rdhwr_noopt;
2223static int __init set_rdhwr_noopt(char *str)
2224{
2225	rdhwr_noopt = 1;
2226	return 1;
2227}
2228
2229__setup("rdhwr_noopt", set_rdhwr_noopt);
2230
2231void __init trap_init(void)
2232{
2233	extern char except_vec3_generic;
2234	extern char except_vec4;
2235	extern char except_vec3_r4000;
2236	unsigned long i;
2237
2238	check_wait();
2239
2240	if (cpu_has_veic || cpu_has_vint) {
2241		unsigned long size = 0x200 + VECTORSPACING*64;
2242		phys_addr_t ebase_pa;
2243
2244		ebase = (unsigned long)
2245			__alloc_bootmem(size, 1 << fls(size), 0);
2246
2247		/*
2248		 * Try to ensure ebase resides in KSeg0 if possible.
2249		 *
2250		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2251		 * hitting a poorly defined exception base for Cache Errors.
2252		 * The allocation is likely to be in the low 512MB of physical,
2253		 * in which case we should be able to convert to KSeg0.
2254		 *
2255		 * EVA is special though as it allows segments to be rearranged
2256		 * and to become uncached during cache error handling.
2257		 */
2258		ebase_pa = __pa(ebase);
2259		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2260			ebase = CKSEG0ADDR(ebase_pa);
2261	} else {
2262		ebase = CAC_BASE;
2263
2264		if (cpu_has_mips_r2_r6) {
2265			if (cpu_has_ebase_wg) {
2266#ifdef CONFIG_64BIT
2267				ebase = (read_c0_ebase_64() & ~0xfff);
2268#else
2269				ebase = (read_c0_ebase() & ~0xfff);
2270#endif
2271			} else {
2272				ebase += (read_c0_ebase() & 0x3ffff000);
2273			}
2274		}
2275	}
2276
2277	if (cpu_has_mmips) {
2278		unsigned int config3 = read_c0_config3();
2279
2280		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2281			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2282		else
2283			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2284	}
2285
2286	if (board_ebase_setup)
2287		board_ebase_setup();
2288	per_cpu_trap_init(true);
2289
2290	/*
2291	 * Copy the generic exception handlers to their final destination.
2292	 * This will be overridden later as suitable for a particular
2293	 * configuration.
2294	 */
2295	set_handler(0x180, &except_vec3_generic, 0x80);
2296
2297	/*
2298	 * Setup default vectors
2299	 */
2300	for (i = 0; i <= 31; i++)
2301		set_except_vector(i, handle_reserved);
2302
2303	/*
2304	 * Copy the EJTAG debug exception vector handler code to it's final
2305	 * destination.
2306	 */
2307	if (cpu_has_ejtag && board_ejtag_handler_setup)
2308		board_ejtag_handler_setup();
2309
2310	/*
2311	 * Only some CPUs have the watch exceptions.
2312	 */
2313	if (cpu_has_watch)
2314		set_except_vector(EXCCODE_WATCH, handle_watch);
2315
2316	/*
2317	 * Initialise interrupt handlers
2318	 */
2319	if (cpu_has_veic || cpu_has_vint) {
2320		int nvec = cpu_has_veic ? 64 : 8;
2321		for (i = 0; i < nvec; i++)
2322			set_vi_handler(i, NULL);
2323	}
2324	else if (cpu_has_divec)
2325		set_handler(0x200, &except_vec4, 0x8);
2326
2327	/*
2328	 * Some CPUs can enable/disable for cache parity detection, but does
2329	 * it different ways.
2330	 */
2331	parity_protection_init();
2332
2333	/*
2334	 * The Data Bus Errors / Instruction Bus Errors are signaled
2335	 * by external hardware.  Therefore these two exceptions
2336	 * may have board specific handlers.
2337	 */
2338	if (board_be_init)
2339		board_be_init();
2340
2341	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2342					rollback_handle_int : handle_int);
2343	set_except_vector(EXCCODE_MOD, handle_tlbm);
2344	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2345	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2346
2347	set_except_vector(EXCCODE_ADEL, handle_adel);
2348	set_except_vector(EXCCODE_ADES, handle_ades);
2349
2350	set_except_vector(EXCCODE_IBE, handle_ibe);
2351	set_except_vector(EXCCODE_DBE, handle_dbe);
2352
2353	set_except_vector(EXCCODE_SYS, handle_sys);
2354	set_except_vector(EXCCODE_BP, handle_bp);
2355
2356	if (rdhwr_noopt)
2357		set_except_vector(EXCCODE_RI, handle_ri);
2358	else {
2359		if (cpu_has_vtag_icache)
2360			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2361		else if (current_cpu_type() == CPU_LOONGSON3)
2362			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2363		else
2364			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2365	}
2366
2367	set_except_vector(EXCCODE_CPU, handle_cpu);
2368	set_except_vector(EXCCODE_OV, handle_ov);
2369	set_except_vector(EXCCODE_TR, handle_tr);
2370	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2371
2372	if (current_cpu_type() == CPU_R6000 ||
2373	    current_cpu_type() == CPU_R6000A) {
2374		/*
2375		 * The R6000 is the only R-series CPU that features a machine
2376		 * check exception (similar to the R4000 cache error) and
2377		 * unaligned ldc1/sdc1 exception.  The handlers have not been
2378		 * written yet.	 Well, anyway there is no R6000 machine on the
2379		 * current list of targets for Linux/MIPS.
2380		 * (Duh, crap, there is someone with a triple R6k machine)
2381		 */
2382		//set_except_vector(14, handle_mc);
2383		//set_except_vector(15, handle_ndc);
2384	}
2385
2386
2387	if (board_nmi_handler_setup)
2388		board_nmi_handler_setup();
2389
2390	if (cpu_has_fpu && !cpu_has_nofpuex)
2391		set_except_vector(EXCCODE_FPE, handle_fpe);
2392
2393	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2394
2395	if (cpu_has_rixiex) {
2396		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2397		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2398	}
2399
2400	set_except_vector(EXCCODE_MSADIS, handle_msa);
2401	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2402
2403	if (cpu_has_mcheck)
2404		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2405
2406	if (cpu_has_mipsmt)
2407		set_except_vector(EXCCODE_THREAD, handle_mt);
2408
2409	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2410
2411	if (board_cache_error_setup)
2412		board_cache_error_setup();
2413
2414	if (cpu_has_vce)
2415		/* Special exception: R4[04]00 uses also the divec space. */
2416		set_handler(0x180, &except_vec3_r4000, 0x100);
2417	else if (cpu_has_4kex)
2418		set_handler(0x180, &except_vec3_generic, 0x80);
2419	else
2420		set_handler(0x080, &except_vec3_generic, 0x80);
2421
2422	local_flush_icache_range(ebase, ebase + 0x400);
2423
2424	sort_extable(__start___dbe_table, __stop___dbe_table);
2425
2426	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2427}
2428
2429static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2430			    void *v)
2431{
2432	switch (cmd) {
2433	case CPU_PM_ENTER_FAILED:
2434	case CPU_PM_EXIT:
2435		configure_status();
2436		configure_hwrena();
2437		configure_exception_vector();
2438
2439		/* Restore register with CPU number for TLB handlers */
2440		TLBMISS_HANDLER_RESTORE();
2441
2442		break;
2443	}
2444
2445	return NOTIFY_OK;
2446}
2447
2448static struct notifier_block trap_pm_notifier_block = {
2449	.notifier_call = trap_pm_notifier,
2450};
2451
2452static int __init trap_pm_init(void)
2453{
2454	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2455}
2456arch_initcall(trap_pm_init);