Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/debug.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memblock.h>
  32#include <linux/interrupt.h>
  33#include <linux/ptrace.h>
  34#include <linux/kgdb.h>
  35#include <linux/kdebug.h>
  36#include <linux/kprobes.h>
  37#include <linux/notifier.h>
  38#include <linux/kdb.h>
  39#include <linux/irq.h>
  40#include <linux/perf_event.h>
  41
  42#include <asm/addrspace.h>
  43#include <asm/bootinfo.h>
  44#include <asm/branch.h>
  45#include <asm/break.h>
  46#include <asm/cop2.h>
  47#include <asm/cpu.h>
  48#include <asm/cpu-type.h>
  49#include <asm/dsp.h>
  50#include <asm/fpu.h>
  51#include <asm/fpu_emulator.h>
  52#include <asm/idle.h>
  53#include <asm/isa-rev.h>
  54#include <asm/mips-cps.h>
  55#include <asm/mips-r2-to-r6-emul.h>
  56#include <asm/mipsregs.h>
  57#include <asm/mipsmtregs.h>
  58#include <asm/module.h>
  59#include <asm/msa.h>
  60#include <asm/pgtable.h>
  61#include <asm/ptrace.h>
  62#include <asm/sections.h>
  63#include <asm/siginfo.h>
  64#include <asm/tlbdebug.h>
  65#include <asm/traps.h>
  66#include <linux/uaccess.h>
  67#include <asm/watch.h>
  68#include <asm/mmu_context.h>
  69#include <asm/types.h>
  70#include <asm/stacktrace.h>
  71#include <asm/tlbex.h>
  72#include <asm/uasm.h>
  73
  74extern void check_wait(void);
  75extern asmlinkage void rollback_handle_int(void);
  76extern asmlinkage void handle_int(void);
 
 
 
  77extern asmlinkage void handle_adel(void);
  78extern asmlinkage void handle_ades(void);
  79extern asmlinkage void handle_ibe(void);
  80extern asmlinkage void handle_dbe(void);
  81extern asmlinkage void handle_sys(void);
  82extern asmlinkage void handle_bp(void);
  83extern asmlinkage void handle_ri(void);
  84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  85extern asmlinkage void handle_ri_rdhwr(void);
  86extern asmlinkage void handle_cpu(void);
  87extern asmlinkage void handle_ov(void);
  88extern asmlinkage void handle_tr(void);
  89extern asmlinkage void handle_msa_fpe(void);
  90extern asmlinkage void handle_fpe(void);
  91extern asmlinkage void handle_ftlb(void);
  92extern asmlinkage void handle_msa(void);
  93extern asmlinkage void handle_mdmx(void);
  94extern asmlinkage void handle_watch(void);
  95extern asmlinkage void handle_mt(void);
  96extern asmlinkage void handle_dsp(void);
  97extern asmlinkage void handle_mcheck(void);
  98extern asmlinkage void handle_reserved(void);
  99extern void tlb_do_page_fault_0(void);
 100
 101void (*board_be_init)(void);
 102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 103void (*board_nmi_handler_setup)(void);
 104void (*board_ejtag_handler_setup)(void);
 105void (*board_bind_eic_interrupt)(int irq, int regset);
 106void (*board_ebase_setup)(void);
 107void(*board_cache_error_setup)(void);
 108
 109static void show_raw_backtrace(unsigned long reg29)
 110{
 111	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 112	unsigned long addr;
 113
 114	printk("Call Trace:");
 115#ifdef CONFIG_KALLSYMS
 116	printk("\n");
 117#endif
 118	while (!kstack_end(sp)) {
 119		unsigned long __user *p =
 120			(unsigned long __user *)(unsigned long)sp++;
 121		if (__get_user(addr, p)) {
 122			printk(" (Bad stack address)");
 123			break;
 124		}
 125		if (__kernel_text_address(addr))
 126			print_ip_sym(addr);
 127	}
 128	printk("\n");
 129}
 130
 131#ifdef CONFIG_KALLSYMS
 132int raw_show_trace;
 133static int __init set_raw_show_trace(char *str)
 134{
 135	raw_show_trace = 1;
 136	return 1;
 137}
 138__setup("raw_show_trace", set_raw_show_trace);
 139#endif
 140
 141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 142{
 143	unsigned long sp = regs->regs[29];
 144	unsigned long ra = regs->regs[31];
 145	unsigned long pc = regs->cp0_epc;
 146
 147	if (!task)
 148		task = current;
 149
 150	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 151		show_raw_backtrace(sp);
 152		return;
 153	}
 154	printk("Call Trace:\n");
 155	do {
 156		print_ip_sym(pc);
 157		pc = unwind_stack(task, &sp, pc, &ra);
 158	} while (pc);
 159	pr_cont("\n");
 160}
 161
 162/*
 163 * This routine abuses get_user()/put_user() to reference pointers
 164 * with at least a bit of error checking ...
 165 */
 166static void show_stacktrace(struct task_struct *task,
 167	const struct pt_regs *regs)
 168{
 169	const int field = 2 * sizeof(unsigned long);
 170	long stackdata;
 171	int i;
 172	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 173
 174	printk("Stack :");
 175	i = 0;
 176	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 177		if (i && ((i % (64 / field)) == 0)) {
 178			pr_cont("\n");
 179			printk("       ");
 180		}
 181		if (i > 39) {
 182			pr_cont(" ...");
 183			break;
 184		}
 185
 186		if (__get_user(stackdata, sp++)) {
 187			pr_cont(" (Bad stack address)");
 188			break;
 189		}
 190
 191		pr_cont(" %0*lx", field, stackdata);
 192		i++;
 193	}
 194	pr_cont("\n");
 195	show_backtrace(task, regs);
 196}
 197
 198void show_stack(struct task_struct *task, unsigned long *sp)
 199{
 200	struct pt_regs regs;
 201	mm_segment_t old_fs = get_fs();
 202
 203	regs.cp0_status = KSU_KERNEL;
 204	if (sp) {
 205		regs.regs[29] = (unsigned long)sp;
 206		regs.regs[31] = 0;
 207		regs.cp0_epc = 0;
 208	} else {
 209		if (task && task != current) {
 210			regs.regs[29] = task->thread.reg29;
 211			regs.regs[31] = 0;
 212			regs.cp0_epc = task->thread.reg31;
 213#ifdef CONFIG_KGDB_KDB
 214		} else if (atomic_read(&kgdb_active) != -1 &&
 215			   kdb_current_regs) {
 216			memcpy(&regs, kdb_current_regs, sizeof(regs));
 217#endif /* CONFIG_KGDB_KDB */
 218		} else {
 219			prepare_frametrace(&regs);
 220		}
 221	}
 222	/*
 223	 * show_stack() deals exclusively with kernel mode, so be sure to access
 224	 * the stack in the kernel (not user) address space.
 225	 */
 226	set_fs(KERNEL_DS);
 227	show_stacktrace(task, &regs);
 228	set_fs(old_fs);
 229}
 230
 231static void show_code(unsigned int __user *pc)
 232{
 233	long i;
 234	unsigned short __user *pc16 = NULL;
 235
 236	printk("Code:");
 237
 238	if ((unsigned long)pc & 1)
 239		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 240	for(i = -3 ; i < 6 ; i++) {
 241		unsigned int insn;
 242		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 243			pr_cont(" (Bad address in epc)\n");
 244			break;
 245		}
 246		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 247	}
 248	pr_cont("\n");
 249}
 250
 251static void __show_regs(const struct pt_regs *regs)
 252{
 253	const int field = 2 * sizeof(unsigned long);
 254	unsigned int cause = regs->cp0_cause;
 255	unsigned int exccode;
 256	int i;
 257
 258	show_regs_print_info(KERN_DEFAULT);
 259
 260	/*
 261	 * Saved main processor registers
 262	 */
 263	for (i = 0; i < 32; ) {
 264		if ((i % 4) == 0)
 265			printk("$%2d   :", i);
 266		if (i == 0)
 267			pr_cont(" %0*lx", field, 0UL);
 268		else if (i == 26 || i == 27)
 269			pr_cont(" %*s", field, "");
 270		else
 271			pr_cont(" %0*lx", field, regs->regs[i]);
 272
 273		i++;
 274		if ((i % 4) == 0)
 275			pr_cont("\n");
 276	}
 277
 278#ifdef CONFIG_CPU_HAS_SMARTMIPS
 279	printk("Acx    : %0*lx\n", field, regs->acx);
 280#endif
 281	if (MIPS_ISA_REV < 6) {
 282		printk("Hi    : %0*lx\n", field, regs->hi);
 283		printk("Lo    : %0*lx\n", field, regs->lo);
 284	}
 285
 286	/*
 287	 * Saved cp0 registers
 288	 */
 289	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 290	       (void *) regs->cp0_epc);
 291	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 292	       (void *) regs->regs[31]);
 293
 294	printk("Status: %08x	", (uint32_t) regs->cp0_status);
 295
 296	if (cpu_has_3kex) {
 297		if (regs->cp0_status & ST0_KUO)
 298			pr_cont("KUo ");
 299		if (regs->cp0_status & ST0_IEO)
 300			pr_cont("IEo ");
 301		if (regs->cp0_status & ST0_KUP)
 302			pr_cont("KUp ");
 303		if (regs->cp0_status & ST0_IEP)
 304			pr_cont("IEp ");
 305		if (regs->cp0_status & ST0_KUC)
 306			pr_cont("KUc ");
 307		if (regs->cp0_status & ST0_IEC)
 308			pr_cont("IEc ");
 309	} else if (cpu_has_4kex) {
 310		if (regs->cp0_status & ST0_KX)
 311			pr_cont("KX ");
 312		if (regs->cp0_status & ST0_SX)
 313			pr_cont("SX ");
 314		if (regs->cp0_status & ST0_UX)
 315			pr_cont("UX ");
 316		switch (regs->cp0_status & ST0_KSU) {
 317		case KSU_USER:
 318			pr_cont("USER ");
 319			break;
 320		case KSU_SUPERVISOR:
 321			pr_cont("SUPERVISOR ");
 322			break;
 323		case KSU_KERNEL:
 324			pr_cont("KERNEL ");
 325			break;
 326		default:
 327			pr_cont("BAD_MODE ");
 328			break;
 329		}
 330		if (regs->cp0_status & ST0_ERL)
 331			pr_cont("ERL ");
 332		if (regs->cp0_status & ST0_EXL)
 333			pr_cont("EXL ");
 334		if (regs->cp0_status & ST0_IE)
 335			pr_cont("IE ");
 336	}
 337	pr_cont("\n");
 338
 339	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 340	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 341
 342	if (1 <= exccode && exccode <= 5)
 343		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 344
 345	printk("PrId  : %08x (%s)\n", read_c0_prid(),
 346	       cpu_name_string());
 347}
 348
 349/*
 350 * FIXME: really the generic show_regs should take a const pointer argument.
 351 */
 352void show_regs(struct pt_regs *regs)
 353{
 354	__show_regs(regs);
 355	dump_stack();
 356}
 357
 358void show_registers(struct pt_regs *regs)
 359{
 360	const int field = 2 * sizeof(unsigned long);
 361	mm_segment_t old_fs = get_fs();
 362
 363	__show_regs(regs);
 364	print_modules();
 365	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 366	       current->comm, current->pid, current_thread_info(), current,
 367	      field, current_thread_info()->tp_value);
 368	if (cpu_has_userlocal) {
 369		unsigned long tls;
 370
 371		tls = read_c0_userlocal();
 372		if (tls != current_thread_info()->tp_value)
 373			printk("*HwTLS: %0*lx\n", field, tls);
 374	}
 375
 376	if (!user_mode(regs))
 377		/* Necessary for getting the correct stack content */
 378		set_fs(KERNEL_DS);
 379	show_stacktrace(current, regs);
 380	show_code((unsigned int __user *) regs->cp0_epc);
 381	printk("\n");
 382	set_fs(old_fs);
 383}
 384
 385static DEFINE_RAW_SPINLOCK(die_lock);
 386
 387void __noreturn die(const char *str, struct pt_regs *regs)
 388{
 389	static int die_counter;
 390	int sig = SIGSEGV;
 391
 392	oops_enter();
 393
 394	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 395		       SIGSEGV) == NOTIFY_STOP)
 396		sig = 0;
 397
 398	console_verbose();
 399	raw_spin_lock_irq(&die_lock);
 400	bust_spinlocks(1);
 401
 402	printk("%s[#%d]:\n", str, ++die_counter);
 403	show_registers(regs);
 404	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 405	raw_spin_unlock_irq(&die_lock);
 406
 407	oops_exit();
 408
 409	if (in_interrupt())
 410		panic("Fatal exception in interrupt");
 411
 412	if (panic_on_oops)
 413		panic("Fatal exception");
 414
 415	if (regs && kexec_should_crash(current))
 416		crash_kexec(regs);
 417
 418	do_exit(sig);
 419}
 420
 421extern struct exception_table_entry __start___dbe_table[];
 422extern struct exception_table_entry __stop___dbe_table[];
 423
 424__asm__(
 425"	.section	__dbe_table, \"a\"\n"
 426"	.previous			\n");
 427
 428/* Given an address, look for it in the exception tables. */
 429static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 430{
 431	const struct exception_table_entry *e;
 432
 433	e = search_extable(__start___dbe_table,
 434			   __stop___dbe_table - __start___dbe_table, addr);
 435	if (!e)
 436		e = search_module_dbetables(addr);
 437	return e;
 438}
 439
 440asmlinkage void do_be(struct pt_regs *regs)
 441{
 442	const int field = 2 * sizeof(unsigned long);
 443	const struct exception_table_entry *fixup = NULL;
 444	int data = regs->cp0_cause & 4;
 445	int action = MIPS_BE_FATAL;
 446	enum ctx_state prev_state;
 447
 448	prev_state = exception_enter();
 449	/* XXX For now.	 Fixme, this searches the wrong table ...  */
 450	if (data && !user_mode(regs))
 451		fixup = search_dbe_tables(exception_epc(regs));
 452
 453	if (fixup)
 454		action = MIPS_BE_FIXUP;
 455
 456	if (board_be_handler)
 457		action = board_be_handler(regs, fixup != NULL);
 458	else
 459		mips_cm_error_report();
 460
 461	switch (action) {
 462	case MIPS_BE_DISCARD:
 463		goto out;
 464	case MIPS_BE_FIXUP:
 465		if (fixup) {
 466			regs->cp0_epc = fixup->nextinsn;
 467			goto out;
 468		}
 469		break;
 470	default:
 471		break;
 472	}
 473
 474	/*
 475	 * Assume it would be too dangerous to continue ...
 476	 */
 477	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 478	       data ? "Data" : "Instruction",
 479	       field, regs->cp0_epc, field, regs->regs[31]);
 480	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 481		       SIGBUS) == NOTIFY_STOP)
 482		goto out;
 483
 484	die_if_kernel("Oops", regs);
 485	force_sig(SIGBUS);
 486
 487out:
 488	exception_exit(prev_state);
 489}
 490
 491/*
 492 * ll/sc, rdhwr, sync emulation
 493 */
 494
 495#define OPCODE 0xfc000000
 496#define BASE   0x03e00000
 497#define RT     0x001f0000
 498#define OFFSET 0x0000ffff
 499#define LL     0xc0000000
 500#define SC     0xe0000000
 501#define SPEC0  0x00000000
 502#define SPEC3  0x7c000000
 503#define RD     0x0000f800
 504#define FUNC   0x0000003f
 505#define SYNC   0x0000000f
 506#define RDHWR  0x0000003b
 507
 508/*  microMIPS definitions   */
 509#define MM_POOL32A_FUNC 0xfc00ffff
 510#define MM_RDHWR        0x00006b3c
 511#define MM_RS           0x001f0000
 512#define MM_RT           0x03e00000
 513
 514/*
 515 * The ll_bit is cleared by r*_switch.S
 516 */
 517
 518unsigned int ll_bit;
 519struct task_struct *ll_task;
 520
 521static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 522{
 523	unsigned long value, __user *vaddr;
 524	long offset;
 525
 526	/*
 527	 * analyse the ll instruction that just caused a ri exception
 528	 * and put the referenced address to addr.
 529	 */
 530
 531	/* sign extend offset */
 532	offset = opcode & OFFSET;
 533	offset <<= 16;
 534	offset >>= 16;
 535
 536	vaddr = (unsigned long __user *)
 537		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 538
 539	if ((unsigned long)vaddr & 3)
 540		return SIGBUS;
 541	if (get_user(value, vaddr))
 542		return SIGSEGV;
 543
 544	preempt_disable();
 545
 546	if (ll_task == NULL || ll_task == current) {
 547		ll_bit = 1;
 548	} else {
 549		ll_bit = 0;
 550	}
 551	ll_task = current;
 552
 553	preempt_enable();
 554
 555	regs->regs[(opcode & RT) >> 16] = value;
 556
 557	return 0;
 558}
 559
 560static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 561{
 562	unsigned long __user *vaddr;
 563	unsigned long reg;
 564	long offset;
 565
 566	/*
 567	 * analyse the sc instruction that just caused a ri exception
 568	 * and put the referenced address to addr.
 569	 */
 570
 571	/* sign extend offset */
 572	offset = opcode & OFFSET;
 573	offset <<= 16;
 574	offset >>= 16;
 575
 576	vaddr = (unsigned long __user *)
 577		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 578	reg = (opcode & RT) >> 16;
 579
 580	if ((unsigned long)vaddr & 3)
 581		return SIGBUS;
 582
 583	preempt_disable();
 584
 585	if (ll_bit == 0 || ll_task != current) {
 586		regs->regs[reg] = 0;
 587		preempt_enable();
 588		return 0;
 589	}
 590
 591	preempt_enable();
 592
 593	if (put_user(regs->regs[reg], vaddr))
 594		return SIGSEGV;
 595
 596	regs->regs[reg] = 1;
 597
 598	return 0;
 599}
 600
 601/*
 602 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 603 * opcodes are supposed to result in coprocessor unusable exceptions if
 604 * executed on ll/sc-less processors.  That's the theory.  In practice a
 605 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 606 * instead, so we're doing the emulation thing in both exception handlers.
 607 */
 608static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 609{
 610	if ((opcode & OPCODE) == LL) {
 611		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 612				1, regs, 0);
 613		return simulate_ll(regs, opcode);
 614	}
 615	if ((opcode & OPCODE) == SC) {
 616		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 617				1, regs, 0);
 618		return simulate_sc(regs, opcode);
 619	}
 620
 621	return -1;			/* Must be something else ... */
 622}
 623
 624/*
 625 * Simulate trapping 'rdhwr' instructions to provide user accessible
 626 * registers not implemented in hardware.
 627 */
 628static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 629{
 630	struct thread_info *ti = task_thread_info(current);
 631
 632	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 633			1, regs, 0);
 634	switch (rd) {
 635	case MIPS_HWR_CPUNUM:		/* CPU number */
 636		regs->regs[rt] = smp_processor_id();
 637		return 0;
 638	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
 639		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 640				     current_cpu_data.icache.linesz);
 641		return 0;
 642	case MIPS_HWR_CC:		/* Read count register */
 643		regs->regs[rt] = read_c0_count();
 644		return 0;
 645	case MIPS_HWR_CCRES:		/* Count register resolution */
 646		switch (current_cpu_type()) {
 647		case CPU_20KC:
 648		case CPU_25KF:
 649			regs->regs[rt] = 1;
 650			break;
 651		default:
 652			regs->regs[rt] = 2;
 653		}
 654		return 0;
 655	case MIPS_HWR_ULR:		/* Read UserLocal register */
 656		regs->regs[rt] = ti->tp_value;
 657		return 0;
 658	default:
 659		return -1;
 660	}
 661}
 662
 663static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 664{
 665	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 666		int rd = (opcode & RD) >> 11;
 667		int rt = (opcode & RT) >> 16;
 668
 669		simulate_rdhwr(regs, rd, rt);
 670		return 0;
 671	}
 672
 673	/* Not ours.  */
 674	return -1;
 675}
 676
 677static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 678{
 679	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 680		int rd = (opcode & MM_RS) >> 16;
 681		int rt = (opcode & MM_RT) >> 21;
 682		simulate_rdhwr(regs, rd, rt);
 683		return 0;
 684	}
 685
 686	/* Not ours.  */
 687	return -1;
 688}
 689
 690static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 691{
 692	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 693		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 694				1, regs, 0);
 695		return 0;
 696	}
 697
 698	return -1;			/* Must be something else ... */
 699}
 700
 701asmlinkage void do_ov(struct pt_regs *regs)
 702{
 703	enum ctx_state prev_state;
 
 
 
 
 
 704
 705	prev_state = exception_enter();
 706	die_if_kernel("Integer overflow", regs);
 707
 708	force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
 709	exception_exit(prev_state);
 710}
 711
 712#ifdef CONFIG_MIPS_FP_SUPPORT
 713
 714/*
 715 * Send SIGFPE according to FCSR Cause bits, which must have already
 716 * been masked against Enable bits.  This is impotant as Inexact can
 717 * happen together with Overflow or Underflow, and `ptrace' can set
 718 * any bits.
 719 */
 720void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 721		     struct task_struct *tsk)
 722{
 723	int si_code = FPE_FLTUNK;
 724
 725	if (fcr31 & FPU_CSR_INV_X)
 726		si_code = FPE_FLTINV;
 727	else if (fcr31 & FPU_CSR_DIV_X)
 728		si_code = FPE_FLTDIV;
 729	else if (fcr31 & FPU_CSR_OVF_X)
 730		si_code = FPE_FLTOVF;
 731	else if (fcr31 & FPU_CSR_UDF_X)
 732		si_code = FPE_FLTUND;
 733	else if (fcr31 & FPU_CSR_INE_X)
 734		si_code = FPE_FLTRES;
 735
 736	force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
 
 737}
 738
 739int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 740{
 741	int si_code;
 742	struct vm_area_struct *vma;
 743
 744	switch (sig) {
 745	case 0:
 746		return 0;
 747
 748	case SIGFPE:
 749		force_fcr31_sig(fcr31, fault_addr, current);
 750		return 1;
 751
 752	case SIGBUS:
 753		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 
 
 
 754		return 1;
 755
 756	case SIGSEGV:
 
 
 757		down_read(&current->mm->mmap_sem);
 758		vma = find_vma(current->mm, (unsigned long)fault_addr);
 759		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 760			si_code = SEGV_ACCERR;
 761		else
 762			si_code = SEGV_MAPERR;
 763		up_read(&current->mm->mmap_sem);
 764		force_sig_fault(SIGSEGV, si_code, fault_addr);
 765		return 1;
 766
 767	default:
 768		force_sig(sig);
 769		return 1;
 770	}
 771}
 772
 773static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 774		       unsigned long old_epc, unsigned long old_ra)
 775{
 776	union mips_instruction inst = { .word = opcode };
 777	void __user *fault_addr;
 778	unsigned long fcr31;
 779	int sig;
 780
 781	/* If it's obviously not an FP instruction, skip it */
 782	switch (inst.i_format.opcode) {
 783	case cop1_op:
 784	case cop1x_op:
 785	case lwc1_op:
 786	case ldc1_op:
 787	case swc1_op:
 788	case sdc1_op:
 789		break;
 790
 791	default:
 792		return -1;
 793	}
 794
 795	/*
 796	 * do_ri skipped over the instruction via compute_return_epc, undo
 797	 * that for the FPU emulator.
 798	 */
 799	regs->cp0_epc = old_epc;
 800	regs->regs[31] = old_ra;
 801
 
 
 
 802	/* Run the emulator */
 803	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 804				       &fault_addr);
 805
 806	/*
 807	 * We can't allow the emulated instruction to leave any
 808	 * enabled Cause bits set in $fcr31.
 809	 */
 810	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 811	current->thread.fpu.fcr31 &= ~fcr31;
 812
 813	/* Restore the hardware register state */
 814	own_fpu(1);
 815
 816	/* Send a signal if required.  */
 817	process_fpemu_return(sig, fault_addr, fcr31);
 818
 819	return 0;
 820}
 821
 822/*
 823 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 824 */
 825asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 826{
 827	enum ctx_state prev_state;
 828	void __user *fault_addr;
 829	int sig;
 830
 831	prev_state = exception_enter();
 832	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 833		       SIGFPE) == NOTIFY_STOP)
 834		goto out;
 835
 836	/* Clear FCSR.Cause before enabling interrupts */
 837	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 838	local_irq_enable();
 839
 840	die_if_kernel("FP exception in kernel code", regs);
 841
 842	if (fcr31 & FPU_CSR_UNI_X) {
 843		/*
 844		 * Unimplemented operation exception.  If we've got the full
 845		 * software emulator on-board, let's use it...
 846		 *
 847		 * Force FPU to dump state into task/thread context.  We're
 848		 * moving a lot of data here for what is probably a single
 849		 * instruction, but the alternative is to pre-decode the FP
 850		 * register operands before invoking the emulator, which seems
 851		 * a bit extreme for what should be an infrequent event.
 852		 */
 
 
 853
 854		/* Run the emulator */
 855		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 856					       &fault_addr);
 857
 858		/*
 859		 * We can't allow the emulated instruction to leave any
 860		 * enabled Cause bits set in $fcr31.
 861		 */
 862		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 863		current->thread.fpu.fcr31 &= ~fcr31;
 864
 865		/* Restore the hardware register state */
 866		own_fpu(1);	/* Using the FPU again.	 */
 867	} else {
 868		sig = SIGFPE;
 869		fault_addr = (void __user *) regs->cp0_epc;
 870	}
 871
 872	/* Send a signal if required.  */
 873	process_fpemu_return(sig, fault_addr, fcr31);
 874
 875out:
 876	exception_exit(prev_state);
 877}
 878
 879/*
 880 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
 881 * emulated more than some threshold number of instructions, force migration to
 882 * a "CPU" that has FP support.
 883 */
 884static void mt_ase_fp_affinity(void)
 885{
 886#ifdef CONFIG_MIPS_MT_FPAFF
 887	if (mt_fpemul_threshold > 0 &&
 888	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
 889		/*
 890		 * If there's no FPU present, or if the application has already
 891		 * restricted the allowed set to exclude any CPUs with FPUs,
 892		 * we'll skip the procedure.
 893		 */
 894		if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
 895			cpumask_t tmask;
 896
 897			current->thread.user_cpus_allowed
 898				= current->cpus_mask;
 899			cpumask_and(&tmask, &current->cpus_mask,
 900				    &mt_fpu_cpumask);
 901			set_cpus_allowed_ptr(current, &tmask);
 902			set_thread_flag(TIF_FPUBOUND);
 903		}
 904	}
 905#endif /* CONFIG_MIPS_MT_FPAFF */
 906}
 907
 908#else /* !CONFIG_MIPS_FP_SUPPORT */
 909
 910static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 911		       unsigned long old_epc, unsigned long old_ra)
 912{
 913	return -1;
 914}
 915
 916#endif /* !CONFIG_MIPS_FP_SUPPORT */
 917
 918void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 919	const char *str)
 920{
 
 921	char b[40];
 922
 923#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 924	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 925			 SIGTRAP) == NOTIFY_STOP)
 926		return;
 927#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 928
 929	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 930		       SIGTRAP) == NOTIFY_STOP)
 931		return;
 932
 933	/*
 934	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 935	 * insns, even for trap and break codes that indicate arithmetic
 936	 * failures.  Weird ...
 937	 * But should we continue the brokenness???  --macro
 938	 */
 939	switch (code) {
 940	case BRK_OVERFLOW:
 941	case BRK_DIVZERO:
 942		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 943		die_if_kernel(b, regs);
 944		force_sig_fault(SIGFPE,
 945				code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
 946				(void __user *) regs->cp0_epc);
 
 
 
 
 947		break;
 948	case BRK_BUG:
 949		die_if_kernel("Kernel bug detected", regs);
 950		force_sig(SIGTRAP);
 951		break;
 952	case BRK_MEMU:
 953		/*
 954		 * This breakpoint code is used by the FPU emulator to retake
 955		 * control of the CPU after executing the instruction from the
 956		 * delay slot of an emulated branch.
 957		 *
 958		 * Terminate if exception was recognized as a delay slot return
 959		 * otherwise handle as normal.
 960		 */
 961		if (do_dsemulret(regs))
 962			return;
 963
 964		die_if_kernel("Math emu break/trap", regs);
 965		force_sig(SIGTRAP);
 966		break;
 967	default:
 968		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 969		die_if_kernel(b, regs);
 970		if (si_code) {
 971			force_sig_fault(SIGTRAP, si_code, NULL);
 
 
 972		} else {
 973			force_sig(SIGTRAP);
 974		}
 975	}
 976}
 977
 978asmlinkage void do_bp(struct pt_regs *regs)
 979{
 980	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 981	unsigned int opcode, bcode;
 982	enum ctx_state prev_state;
 983	mm_segment_t seg;
 984
 985	seg = get_fs();
 986	if (!user_mode(regs))
 987		set_fs(KERNEL_DS);
 988
 989	prev_state = exception_enter();
 990	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 991	if (get_isa16_mode(regs->cp0_epc)) {
 992		u16 instr[2];
 993
 994		if (__get_user(instr[0], (u16 __user *)epc))
 995			goto out_sigsegv;
 996
 997		if (!cpu_has_mmips) {
 998			/* MIPS16e mode */
 999			bcode = (instr[0] >> 5) & 0x3f;
1000		} else if (mm_insn_16bit(instr[0])) {
1001			/* 16-bit microMIPS BREAK */
1002			bcode = instr[0] & 0xf;
1003		} else {
1004			/* 32-bit microMIPS BREAK */
1005			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
1006				goto out_sigsegv;
1007			opcode = (instr[0] << 16) | instr[1];
1008			bcode = (opcode >> 6) & ((1 << 20) - 1);
1009		}
1010	} else {
1011		if (__get_user(opcode, (unsigned int __user *)epc))
1012			goto out_sigsegv;
1013		bcode = (opcode >> 6) & ((1 << 20) - 1);
1014	}
1015
1016	/*
1017	 * There is the ancient bug in the MIPS assemblers that the break
1018	 * code starts left to bit 16 instead to bit 6 in the opcode.
1019	 * Gas is bug-compatible, but not always, grrr...
1020	 * We handle both cases with a simple heuristics.  --macro
1021	 */
1022	if (bcode >= (1 << 10))
1023		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1024
1025	/*
1026	 * notify the kprobe handlers, if instruction is likely to
1027	 * pertain to them.
1028	 */
1029	switch (bcode) {
1030	case BRK_UPROBE:
1031		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1032			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1033			goto out;
1034		else
1035			break;
1036	case BRK_UPROBE_XOL:
1037		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1038			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1039			goto out;
1040		else
1041			break;
1042	case BRK_KPROBE_BP:
1043		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1044			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1045			goto out;
1046		else
1047			break;
1048	case BRK_KPROBE_SSTEPBP:
1049		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1050			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1051			goto out;
1052		else
1053			break;
1054	default:
1055		break;
1056	}
1057
1058	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1059
1060out:
1061	set_fs(seg);
1062	exception_exit(prev_state);
1063	return;
1064
1065out_sigsegv:
1066	force_sig(SIGSEGV);
1067	goto out;
1068}
1069
1070asmlinkage void do_tr(struct pt_regs *regs)
1071{
1072	u32 opcode, tcode = 0;
1073	enum ctx_state prev_state;
1074	u16 instr[2];
1075	mm_segment_t seg;
1076	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1077
1078	seg = get_fs();
1079	if (!user_mode(regs))
1080		set_fs(KERNEL_DS);
1081
1082	prev_state = exception_enter();
1083	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1084	if (get_isa16_mode(regs->cp0_epc)) {
1085		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1086		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1087			goto out_sigsegv;
1088		opcode = (instr[0] << 16) | instr[1];
1089		/* Immediate versions don't provide a code.  */
1090		if (!(opcode & OPCODE))
1091			tcode = (opcode >> 12) & ((1 << 4) - 1);
1092	} else {
1093		if (__get_user(opcode, (u32 __user *)epc))
1094			goto out_sigsegv;
1095		/* Immediate versions don't provide a code.  */
1096		if (!(opcode & OPCODE))
1097			tcode = (opcode >> 6) & ((1 << 10) - 1);
1098	}
1099
1100	do_trap_or_bp(regs, tcode, 0, "Trap");
1101
1102out:
1103	set_fs(seg);
1104	exception_exit(prev_state);
1105	return;
1106
1107out_sigsegv:
1108	force_sig(SIGSEGV);
1109	goto out;
1110}
1111
1112asmlinkage void do_ri(struct pt_regs *regs)
1113{
1114	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1115	unsigned long old_epc = regs->cp0_epc;
1116	unsigned long old31 = regs->regs[31];
1117	enum ctx_state prev_state;
1118	unsigned int opcode = 0;
1119	int status = -1;
1120
1121	/*
1122	 * Avoid any kernel code. Just emulate the R2 instruction
1123	 * as quickly as possible.
1124	 */
1125	if (mipsr2_emulation && cpu_has_mips_r6 &&
1126	    likely(user_mode(regs)) &&
1127	    likely(get_user(opcode, epc) >= 0)) {
1128		unsigned long fcr31 = 0;
1129
1130		status = mipsr2_decoder(regs, opcode, &fcr31);
1131		switch (status) {
1132		case 0:
1133		case SIGEMT:
 
1134			return;
1135		case SIGILL:
1136			goto no_r2_instr;
1137		default:
1138			process_fpemu_return(status,
1139					     &current->thread.cp0_baduaddr,
1140					     fcr31);
 
1141			return;
1142		}
1143	}
1144
1145no_r2_instr:
1146
1147	prev_state = exception_enter();
1148	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1149
1150	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1151		       SIGILL) == NOTIFY_STOP)
1152		goto out;
1153
1154	die_if_kernel("Reserved instruction in kernel code", regs);
1155
1156	if (unlikely(compute_return_epc(regs) < 0))
1157		goto out;
1158
1159	if (!get_isa16_mode(regs->cp0_epc)) {
1160		if (unlikely(get_user(opcode, epc) < 0))
1161			status = SIGSEGV;
1162
1163		if (!cpu_has_llsc && status < 0)
1164			status = simulate_llsc(regs, opcode);
1165
1166		if (status < 0)
1167			status = simulate_rdhwr_normal(regs, opcode);
1168
1169		if (status < 0)
1170			status = simulate_sync(regs, opcode);
1171
1172		if (status < 0)
1173			status = simulate_fp(regs, opcode, old_epc, old31);
1174	} else if (cpu_has_mmips) {
1175		unsigned short mmop[2] = { 0 };
1176
1177		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1178			status = SIGSEGV;
1179		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1180			status = SIGSEGV;
1181		opcode = mmop[0];
1182		opcode = (opcode << 16) | mmop[1];
1183
1184		if (status < 0)
1185			status = simulate_rdhwr_mm(regs, opcode);
1186	}
1187
1188	if (status < 0)
1189		status = SIGILL;
1190
1191	if (unlikely(status > 0)) {
1192		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1193		regs->regs[31] = old31;
1194		force_sig(status);
1195	}
1196
1197out:
1198	exception_exit(prev_state);
1199}
1200
1201/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202 * No lock; only written during early bootup by CPU 0.
1203 */
1204static RAW_NOTIFIER_HEAD(cu2_chain);
1205
1206int __ref register_cu2_notifier(struct notifier_block *nb)
1207{
1208	return raw_notifier_chain_register(&cu2_chain, nb);
1209}
1210
1211int cu2_notifier_call_chain(unsigned long val, void *v)
1212{
1213	return raw_notifier_call_chain(&cu2_chain, val, v);
1214}
1215
1216static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1217	void *data)
1218{
1219	struct pt_regs *regs = data;
1220
1221	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1222			      "instruction", regs);
1223	force_sig(SIGILL);
1224
1225	return NOTIFY_OK;
1226}
1227
1228#ifdef CONFIG_MIPS_FP_SUPPORT
 
 
 
 
 
 
 
 
 
 
1229
1230static int enable_restore_fp_context(int msa)
1231{
1232	int err, was_fpu_owner, prior_msa;
1233	bool first_fp;
1234
1235	/* Initialize context if it hasn't been used already */
1236	first_fp = init_fp_ctx(current);
 
 
 
 
1237
1238	if (first_fp) {
 
1239		preempt_disable();
1240		err = own_fpu_inatomic(1);
1241		if (msa && !err) {
1242			enable_msa();
 
1243			set_thread_flag(TIF_USEDMSA);
1244			set_thread_flag(TIF_MSA_CTX_LIVE);
1245		}
1246		preempt_enable();
 
 
1247		return err;
1248	}
1249
1250	/*
1251	 * This task has formerly used the FP context.
1252	 *
1253	 * If this thread has no live MSA vector context then we can simply
1254	 * restore the scalar FP context. If it has live MSA vector context
1255	 * (that is, it has or may have used MSA since last performing a
1256	 * function call) then we'll need to restore the vector context. This
1257	 * applies even if we're currently only executing a scalar FP
1258	 * instruction. This is because if we were to later execute an MSA
1259	 * instruction then we'd either have to:
1260	 *
1261	 *  - Restore the vector context & clobber any registers modified by
1262	 *    scalar FP instructions between now & then.
1263	 *
1264	 * or
1265	 *
1266	 *  - Not restore the vector context & lose the most significant bits
1267	 *    of all vector registers.
1268	 *
1269	 * Neither of those options is acceptable. We cannot restore the least
1270	 * significant bits of the registers now & only restore the most
1271	 * significant bits later because the most significant bits of any
1272	 * vector registers whose aliased FP register is modified now will have
1273	 * been zeroed. We'd have no way to know that when restoring the vector
1274	 * context & thus may load an outdated value for the most significant
1275	 * bits of a vector register.
1276	 */
1277	if (!msa && !thread_msa_context_live())
1278		return own_fpu(1);
1279
1280	/*
1281	 * This task is using or has previously used MSA. Thus we require
1282	 * that Status.FR == 1.
1283	 */
1284	preempt_disable();
1285	was_fpu_owner = is_fpu_owner();
1286	err = own_fpu_inatomic(0);
1287	if (err)
1288		goto out;
1289
1290	enable_msa();
1291	write_msa_csr(current->thread.fpu.msacsr);
1292	set_thread_flag(TIF_USEDMSA);
1293
1294	/*
1295	 * If this is the first time that the task is using MSA and it has
1296	 * previously used scalar FP in this time slice then we already nave
1297	 * FP context which we shouldn't clobber. We do however need to clear
1298	 * the upper 64b of each vector register so that this task has no
1299	 * opportunity to see data left behind by another.
1300	 */
1301	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1302	if (!prior_msa && was_fpu_owner) {
1303		init_msa_upper();
1304
1305		goto out;
1306	}
1307
1308	if (!prior_msa) {
1309		/*
1310		 * Restore the least significant 64b of each vector register
1311		 * from the existing scalar FP context.
1312		 */
1313		_restore_fp(current);
1314
1315		/*
1316		 * The task has not formerly used MSA, so clear the upper 64b
1317		 * of each vector register such that it cannot see data left
1318		 * behind by another task.
1319		 */
1320		init_msa_upper();
1321	} else {
1322		/* We need to restore the vector context. */
1323		restore_msa(current);
1324
1325		/* Restore the scalar FP control & status register */
1326		if (!was_fpu_owner)
1327			write_32bit_cp1_register(CP1_STATUS,
1328						 current->thread.fpu.fcr31);
1329	}
1330
1331out:
1332	preempt_enable();
1333
1334	return 0;
1335}
1336
1337#else /* !CONFIG_MIPS_FP_SUPPORT */
1338
1339static int enable_restore_fp_context(int msa)
1340{
1341	return SIGILL;
1342}
1343
1344#endif /* CONFIG_MIPS_FP_SUPPORT */
1345
1346asmlinkage void do_cpu(struct pt_regs *regs)
1347{
1348	enum ctx_state prev_state;
1349	unsigned int __user *epc;
1350	unsigned long old_epc, old31;
 
1351	unsigned int opcode;
 
1352	unsigned int cpid;
1353	int status;
 
1354
1355	prev_state = exception_enter();
1356	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1357
1358	if (cpid != 2)
1359		die_if_kernel("do_cpu invoked from kernel context!", regs);
1360
1361	switch (cpid) {
1362	case 0:
1363		epc = (unsigned int __user *)exception_epc(regs);
1364		old_epc = regs->cp0_epc;
1365		old31 = regs->regs[31];
1366		opcode = 0;
1367		status = -1;
1368
1369		if (unlikely(compute_return_epc(regs) < 0))
1370			break;
1371
1372		if (!get_isa16_mode(regs->cp0_epc)) {
1373			if (unlikely(get_user(opcode, epc) < 0))
1374				status = SIGSEGV;
1375
1376			if (!cpu_has_llsc && status < 0)
1377				status = simulate_llsc(regs, opcode);
1378		}
1379
1380		if (status < 0)
1381			status = SIGILL;
1382
1383		if (unlikely(status > 0)) {
1384			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1385			regs->regs[31] = old31;
1386			force_sig(status);
1387		}
1388
1389		break;
1390
1391#ifdef CONFIG_MIPS_FP_SUPPORT
1392	case 3:
1393		/*
1394		 * The COP3 opcode space and consequently the CP0.Status.CU3
1395		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1396		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1397		 * up the space has been reused for COP1X instructions, that
1398		 * are enabled by the CP0.Status.CU1 bit and consequently
1399		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1400		 * exceptions.  Some FPU-less processors that implement one
1401		 * of these ISAs however use this code erroneously for COP1X
1402		 * instructions.  Therefore we redirect this trap to the FP
1403		 * emulator too.
1404		 */
1405		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1406			force_sig(SIGILL);
1407			break;
1408		}
1409		/* Fall through.  */
1410
1411	case 1: {
1412		void __user *fault_addr;
1413		unsigned long fcr31;
1414		int err, sig;
1415
1416		err = enable_restore_fp_context(0);
1417
1418		if (raw_cpu_has_fpu && !err)
1419			break;
1420
1421		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1422					       &fault_addr);
1423
1424		/*
1425		 * We can't allow the emulated instruction to leave
1426		 * any enabled Cause bits set in $fcr31.
1427		 */
1428		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1429		current->thread.fpu.fcr31 &= ~fcr31;
1430
1431		/* Send a signal if required.  */
1432		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1433			mt_ase_fp_affinity();
1434
1435		break;
1436	}
1437#else /* CONFIG_MIPS_FP_SUPPORT */
1438	case 1:
1439	case 3:
1440		force_sig(SIGILL);
1441		break;
1442#endif /* CONFIG_MIPS_FP_SUPPORT */
1443
1444	case 2:
1445		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1446		break;
1447	}
1448
1449	exception_exit(prev_state);
1450}
1451
1452asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1453{
1454	enum ctx_state prev_state;
1455
1456	prev_state = exception_enter();
1457	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1458	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1459		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1460		goto out;
1461
1462	/* Clear MSACSR.Cause before enabling interrupts */
1463	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1464	local_irq_enable();
1465
1466	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1467	force_sig(SIGFPE);
1468out:
1469	exception_exit(prev_state);
1470}
1471
1472asmlinkage void do_msa(struct pt_regs *regs)
1473{
1474	enum ctx_state prev_state;
1475	int err;
1476
1477	prev_state = exception_enter();
1478
1479	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1480		force_sig(SIGILL);
1481		goto out;
1482	}
1483
1484	die_if_kernel("do_msa invoked from kernel context!", regs);
1485
1486	err = enable_restore_fp_context(1);
1487	if (err)
1488		force_sig(SIGILL);
1489out:
1490	exception_exit(prev_state);
1491}
1492
1493asmlinkage void do_mdmx(struct pt_regs *regs)
1494{
1495	enum ctx_state prev_state;
1496
1497	prev_state = exception_enter();
1498	force_sig(SIGILL);
1499	exception_exit(prev_state);
1500}
1501
1502/*
1503 * Called with interrupts disabled.
1504 */
1505asmlinkage void do_watch(struct pt_regs *regs)
1506{
 
1507	enum ctx_state prev_state;
1508
1509	prev_state = exception_enter();
1510	/*
1511	 * Clear WP (bit 22) bit of cause register so we don't loop
1512	 * forever.
1513	 */
1514	clear_c0_cause(CAUSEF_WP);
1515
1516	/*
1517	 * If the current thread has the watch registers loaded, save
1518	 * their values and send SIGTRAP.  Otherwise another thread
1519	 * left the registers set, clear them and continue.
1520	 */
1521	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1522		mips_read_watch_registers();
1523		local_irq_enable();
1524		force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1525	} else {
1526		mips_clear_watch_registers();
1527		local_irq_enable();
1528	}
1529	exception_exit(prev_state);
1530}
1531
1532asmlinkage void do_mcheck(struct pt_regs *regs)
1533{
1534	int multi_match = regs->cp0_status & ST0_TS;
1535	enum ctx_state prev_state;
1536	mm_segment_t old_fs = get_fs();
1537
1538	prev_state = exception_enter();
1539	show_regs(regs);
1540
1541	if (multi_match) {
1542		dump_tlb_regs();
1543		pr_info("\n");
1544		dump_tlb_all();
1545	}
1546
1547	if (!user_mode(regs))
1548		set_fs(KERNEL_DS);
1549
1550	show_code((unsigned int __user *) regs->cp0_epc);
1551
1552	set_fs(old_fs);
1553
1554	/*
1555	 * Some chips may have other causes of machine check (e.g. SB1
1556	 * graduation timer)
1557	 */
1558	panic("Caught Machine Check exception - %scaused by multiple "
1559	      "matching entries in the TLB.",
1560	      (multi_match) ? "" : "not ");
1561}
1562
1563asmlinkage void do_mt(struct pt_regs *regs)
1564{
1565	int subcode;
1566
1567	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1568			>> VPECONTROL_EXCPT_SHIFT;
1569	switch (subcode) {
1570	case 0:
1571		printk(KERN_DEBUG "Thread Underflow\n");
1572		break;
1573	case 1:
1574		printk(KERN_DEBUG "Thread Overflow\n");
1575		break;
1576	case 2:
1577		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1578		break;
1579	case 3:
1580		printk(KERN_DEBUG "Gating Storage Exception\n");
1581		break;
1582	case 4:
1583		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1584		break;
1585	case 5:
1586		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1587		break;
1588	default:
1589		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1590			subcode);
1591		break;
1592	}
1593	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1594
1595	force_sig(SIGILL);
1596}
1597
1598
1599asmlinkage void do_dsp(struct pt_regs *regs)
1600{
1601	if (cpu_has_dsp)
1602		panic("Unexpected DSP exception");
1603
1604	force_sig(SIGILL);
1605}
1606
1607asmlinkage void do_reserved(struct pt_regs *regs)
1608{
1609	/*
1610	 * Game over - no way to handle this if it ever occurs.	 Most probably
1611	 * caused by a new unknown cpu type or after another deadly
1612	 * hard/software error.
1613	 */
1614	show_regs(regs);
1615	panic("Caught reserved exception %ld - should not happen.",
1616	      (regs->cp0_cause & 0x7f) >> 2);
1617}
1618
1619static int __initdata l1parity = 1;
1620static int __init nol1parity(char *s)
1621{
1622	l1parity = 0;
1623	return 1;
1624}
1625__setup("nol1par", nol1parity);
1626static int __initdata l2parity = 1;
1627static int __init nol2parity(char *s)
1628{
1629	l2parity = 0;
1630	return 1;
1631}
1632__setup("nol2par", nol2parity);
1633
1634/*
1635 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1636 * it different ways.
1637 */
1638static inline void parity_protection_init(void)
1639{
1640#define ERRCTL_PE	0x80000000
1641#define ERRCTL_L2P	0x00800000
1642
1643	if (mips_cm_revision() >= CM_REV_CM3) {
1644		ulong gcr_ectl, cp0_ectl;
1645
1646		/*
1647		 * With CM3 systems we need to ensure that the L1 & L2
1648		 * parity enables are set to the same value, since this
1649		 * is presumed by the hardware engineers.
1650		 *
1651		 * If the user disabled either of L1 or L2 ECC checking,
1652		 * disable both.
1653		 */
1654		l1parity &= l2parity;
1655		l2parity &= l1parity;
1656
1657		/* Probe L1 ECC support */
1658		cp0_ectl = read_c0_ecc();
1659		write_c0_ecc(cp0_ectl | ERRCTL_PE);
1660		back_to_back_c0_hazard();
1661		cp0_ectl = read_c0_ecc();
1662
1663		/* Probe L2 ECC support */
1664		gcr_ectl = read_gcr_err_control();
1665
1666		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1667		    !(cp0_ectl & ERRCTL_PE)) {
1668			/*
1669			 * One of L1 or L2 ECC checking isn't supported,
1670			 * so we cannot enable either.
1671			 */
1672			l1parity = l2parity = 0;
1673		}
1674
1675		/* Configure L1 ECC checking */
1676		if (l1parity)
1677			cp0_ectl |= ERRCTL_PE;
1678		else
1679			cp0_ectl &= ~ERRCTL_PE;
1680		write_c0_ecc(cp0_ectl);
1681		back_to_back_c0_hazard();
1682		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1683
1684		/* Configure L2 ECC checking */
1685		if (l2parity)
1686			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1687		else
1688			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1689		write_gcr_err_control(gcr_ectl);
1690		gcr_ectl = read_gcr_err_control();
1691		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1692		WARN_ON(!!gcr_ectl != l2parity);
1693
1694		pr_info("Cache parity protection %sabled\n",
1695			l1parity ? "en" : "dis");
1696		return;
1697	}
1698
1699	switch (current_cpu_type()) {
1700	case CPU_24K:
1701	case CPU_34K:
1702	case CPU_74K:
1703	case CPU_1004K:
1704	case CPU_1074K:
1705	case CPU_INTERAPTIV:
1706	case CPU_PROAPTIV:
1707	case CPU_P5600:
1708	case CPU_QEMU_GENERIC:
 
1709	case CPU_P6600:
1710		{
 
 
1711			unsigned long errctl;
1712			unsigned int l1parity_present, l2parity_present;
1713
1714			errctl = read_c0_ecc();
1715			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1716
1717			/* probe L1 parity support */
1718			write_c0_ecc(errctl | ERRCTL_PE);
1719			back_to_back_c0_hazard();
1720			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1721
1722			/* probe L2 parity support */
1723			write_c0_ecc(errctl|ERRCTL_L2P);
1724			back_to_back_c0_hazard();
1725			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1726
1727			if (l1parity_present && l2parity_present) {
1728				if (l1parity)
1729					errctl |= ERRCTL_PE;
1730				if (l1parity ^ l2parity)
1731					errctl |= ERRCTL_L2P;
1732			} else if (l1parity_present) {
1733				if (l1parity)
1734					errctl |= ERRCTL_PE;
1735			} else if (l2parity_present) {
1736				if (l2parity)
1737					errctl |= ERRCTL_L2P;
1738			} else {
1739				/* No parity available */
1740			}
1741
1742			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1743
1744			write_c0_ecc(errctl);
1745			back_to_back_c0_hazard();
1746			errctl = read_c0_ecc();
1747			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1748
1749			if (l1parity_present)
1750				printk(KERN_INFO "Cache parity protection %sabled\n",
1751				       (errctl & ERRCTL_PE) ? "en" : "dis");
1752
1753			if (l2parity_present) {
1754				if (l1parity_present && l1parity)
1755					errctl ^= ERRCTL_L2P;
1756				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1757				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1758			}
1759		}
1760		break;
1761
1762	case CPU_5KC:
1763	case CPU_5KE:
1764	case CPU_LOONGSON1:
1765		write_c0_ecc(0x80000000);
1766		back_to_back_c0_hazard();
1767		/* Set the PE bit (bit 31) in the c0_errctl register. */
1768		printk(KERN_INFO "Cache parity protection %sabled\n",
1769		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1770		break;
1771	case CPU_20KC:
1772	case CPU_25KF:
1773		/* Clear the DE bit (bit 16) in the c0_status register. */
1774		printk(KERN_INFO "Enable cache parity protection for "
1775		       "MIPS 20KC/25KF CPUs.\n");
1776		clear_c0_status(ST0_DE);
1777		break;
1778	default:
1779		break;
1780	}
1781}
1782
1783asmlinkage void cache_parity_error(void)
1784{
1785	const int field = 2 * sizeof(unsigned long);
1786	unsigned int reg_val;
1787
1788	/* For the moment, report the problem and hang. */
1789	printk("Cache error exception:\n");
1790	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1791	reg_val = read_c0_cacheerr();
1792	printk("c0_cacheerr == %08x\n", reg_val);
1793
1794	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1795	       reg_val & (1<<30) ? "secondary" : "primary",
1796	       reg_val & (1<<31) ? "data" : "insn");
1797	if ((cpu_has_mips_r2_r6) &&
1798	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1799		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1800			reg_val & (1<<29) ? "ED " : "",
1801			reg_val & (1<<28) ? "ET " : "",
1802			reg_val & (1<<27) ? "ES " : "",
1803			reg_val & (1<<26) ? "EE " : "",
1804			reg_val & (1<<25) ? "EB " : "",
1805			reg_val & (1<<24) ? "EI " : "",
1806			reg_val & (1<<23) ? "E1 " : "",
1807			reg_val & (1<<22) ? "E0 " : "");
1808	} else {
1809		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1810			reg_val & (1<<29) ? "ED " : "",
1811			reg_val & (1<<28) ? "ET " : "",
1812			reg_val & (1<<26) ? "EE " : "",
1813			reg_val & (1<<25) ? "EB " : "",
1814			reg_val & (1<<24) ? "EI " : "",
1815			reg_val & (1<<23) ? "E1 " : "",
1816			reg_val & (1<<22) ? "E0 " : "");
1817	}
1818	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1819
1820#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1821	if (reg_val & (1<<22))
1822		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1823
1824	if (reg_val & (1<<23))
1825		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1826#endif
1827
1828	panic("Can't handle the cache error!");
1829}
1830
1831asmlinkage void do_ftlb(void)
1832{
1833	const int field = 2 * sizeof(unsigned long);
1834	unsigned int reg_val;
1835
1836	/* For the moment, report the problem and hang. */
1837	if ((cpu_has_mips_r2_r6) &&
1838	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1839	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1840		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1841		       read_c0_ecc());
1842		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1843		reg_val = read_c0_cacheerr();
1844		pr_err("c0_cacheerr == %08x\n", reg_val);
1845
1846		if ((reg_val & 0xc0000000) == 0xc0000000) {
1847			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1848		} else {
1849			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1850			       reg_val & (1<<30) ? "secondary" : "primary",
1851			       reg_val & (1<<31) ? "data" : "insn");
1852		}
1853	} else {
1854		pr_err("FTLB error exception\n");
1855	}
1856	/* Just print the cacheerr bits for now */
1857	cache_parity_error();
1858}
1859
1860/*
1861 * SDBBP EJTAG debug exception handler.
1862 * We skip the instruction and return to the next instruction.
1863 */
1864void ejtag_exception_handler(struct pt_regs *regs)
1865{
1866	const int field = 2 * sizeof(unsigned long);
1867	unsigned long depc, old_epc, old_ra;
1868	unsigned int debug;
1869
1870	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1871	depc = read_c0_depc();
1872	debug = read_c0_debug();
1873	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1874	if (debug & 0x80000000) {
1875		/*
1876		 * In branch delay slot.
1877		 * We cheat a little bit here and use EPC to calculate the
1878		 * debug return address (DEPC). EPC is restored after the
1879		 * calculation.
1880		 */
1881		old_epc = regs->cp0_epc;
1882		old_ra = regs->regs[31];
1883		regs->cp0_epc = depc;
1884		compute_return_epc(regs);
1885		depc = regs->cp0_epc;
1886		regs->cp0_epc = old_epc;
1887		regs->regs[31] = old_ra;
1888	} else
1889		depc += 4;
1890	write_c0_depc(depc);
1891
1892#if 0
1893	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1894	write_c0_debug(debug | 0x100);
1895#endif
1896}
1897
1898/*
1899 * NMI exception handler.
1900 * No lock; only written during early bootup by CPU 0.
1901 */
1902static RAW_NOTIFIER_HEAD(nmi_chain);
1903
1904int register_nmi_notifier(struct notifier_block *nb)
1905{
1906	return raw_notifier_chain_register(&nmi_chain, nb);
1907}
1908
1909void __noreturn nmi_exception_handler(struct pt_regs *regs)
1910{
1911	char str[100];
1912
1913	nmi_enter();
1914	raw_notifier_call_chain(&nmi_chain, 0, regs);
1915	bust_spinlocks(1);
1916	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1917		 smp_processor_id(), regs->cp0_epc);
1918	regs->cp0_epc = read_c0_errorepc();
1919	die(str, regs);
1920	nmi_exit();
1921}
1922
1923#define VECTORSPACING 0x100	/* for EI/VI mode */
1924
1925unsigned long ebase;
1926EXPORT_SYMBOL_GPL(ebase);
1927unsigned long exception_handlers[32];
1928unsigned long vi_handlers[64];
1929
1930void __init *set_except_vector(int n, void *addr)
1931{
1932	unsigned long handler = (unsigned long) addr;
1933	unsigned long old_handler;
1934
1935#ifdef CONFIG_CPU_MICROMIPS
1936	/*
1937	 * Only the TLB handlers are cache aligned with an even
1938	 * address. All other handlers are on an odd address and
1939	 * require no modification. Otherwise, MIPS32 mode will
1940	 * be entered when handling any TLB exceptions. That
1941	 * would be bad...since we must stay in microMIPS mode.
1942	 */
1943	if (!(handler & 0x1))
1944		handler |= 1;
1945#endif
1946	old_handler = xchg(&exception_handlers[n], handler);
1947
1948	if (n == 0 && cpu_has_divec) {
1949#ifdef CONFIG_CPU_MICROMIPS
1950		unsigned long jump_mask = ~((1 << 27) - 1);
1951#else
1952		unsigned long jump_mask = ~((1 << 28) - 1);
1953#endif
1954		u32 *buf = (u32 *)(ebase + 0x200);
1955		unsigned int k0 = 26;
1956		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1957			uasm_i_j(&buf, handler & ~jump_mask);
1958			uasm_i_nop(&buf);
1959		} else {
1960			UASM_i_LA(&buf, k0, handler);
1961			uasm_i_jr(&buf, k0);
1962			uasm_i_nop(&buf);
1963		}
1964		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1965	}
1966	return (void *)old_handler;
1967}
1968
1969static void do_default_vi(void)
1970{
1971	show_regs(get_irq_regs());
1972	panic("Caught unexpected vectored interrupt.");
1973}
1974
1975static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1976{
1977	unsigned long handler;
1978	unsigned long old_handler = vi_handlers[n];
1979	int srssets = current_cpu_data.srsets;
1980	u16 *h;
1981	unsigned char *b;
1982
1983	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1984
1985	if (addr == NULL) {
1986		handler = (unsigned long) do_default_vi;
1987		srs = 0;
1988	} else
1989		handler = (unsigned long) addr;
1990	vi_handlers[n] = handler;
1991
1992	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1993
1994	if (srs >= srssets)
1995		panic("Shadow register set %d not supported", srs);
1996
1997	if (cpu_has_veic) {
1998		if (board_bind_eic_interrupt)
1999			board_bind_eic_interrupt(n, srs);
2000	} else if (cpu_has_vint) {
2001		/* SRSMap is only defined if shadow sets are implemented */
2002		if (srssets > 1)
2003			change_c0_srsmap(0xf << n*4, srs << n*4);
2004	}
2005
2006	if (srs == 0) {
2007		/*
2008		 * If no shadow set is selected then use the default handler
2009		 * that does normal register saving and standard interrupt exit
2010		 */
2011		extern char except_vec_vi, except_vec_vi_lui;
2012		extern char except_vec_vi_ori, except_vec_vi_end;
2013		extern char rollback_except_vec_vi;
2014		char *vec_start = using_rollback_handler() ?
2015			&rollback_except_vec_vi : &except_vec_vi;
2016#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2017		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2018		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2019#else
2020		const int lui_offset = &except_vec_vi_lui - vec_start;
2021		const int ori_offset = &except_vec_vi_ori - vec_start;
2022#endif
2023		const int handler_len = &except_vec_vi_end - vec_start;
2024
2025		if (handler_len > VECTORSPACING) {
2026			/*
2027			 * Sigh... panicing won't help as the console
2028			 * is probably not configured :(
2029			 */
2030			panic("VECTORSPACING too small");
2031		}
2032
2033		set_handler(((unsigned long)b - ebase), vec_start,
2034#ifdef CONFIG_CPU_MICROMIPS
2035				(handler_len - 1));
2036#else
2037				handler_len);
2038#endif
2039		h = (u16 *)(b + lui_offset);
2040		*h = (handler >> 16) & 0xffff;
2041		h = (u16 *)(b + ori_offset);
2042		*h = (handler & 0xffff);
2043		local_flush_icache_range((unsigned long)b,
2044					 (unsigned long)(b+handler_len));
2045	}
2046	else {
2047		/*
2048		 * In other cases jump directly to the interrupt handler. It
2049		 * is the handler's responsibility to save registers if required
2050		 * (eg hi/lo) and return from the exception using "eret".
2051		 */
2052		u32 insn;
2053
2054		h = (u16 *)b;
2055		/* j handler */
2056#ifdef CONFIG_CPU_MICROMIPS
2057		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2058#else
2059		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2060#endif
2061		h[0] = (insn >> 16) & 0xffff;
2062		h[1] = insn & 0xffff;
2063		h[2] = 0;
2064		h[3] = 0;
2065		local_flush_icache_range((unsigned long)b,
2066					 (unsigned long)(b+8));
2067	}
2068
2069	return (void *)old_handler;
2070}
2071
2072void *set_vi_handler(int n, vi_handler_t addr)
2073{
2074	return set_vi_srs_handler(n, addr, 0);
2075}
2076
2077extern void tlb_init(void);
2078
2079/*
2080 * Timer interrupt
2081 */
2082int cp0_compare_irq;
2083EXPORT_SYMBOL_GPL(cp0_compare_irq);
2084int cp0_compare_irq_shift;
2085
2086/*
2087 * Performance counter IRQ or -1 if shared with timer
2088 */
2089int cp0_perfcount_irq;
2090EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2091
2092/*
2093 * Fast debug channel IRQ or -1 if not present
2094 */
2095int cp0_fdc_irq;
2096EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2097
2098static int noulri;
2099
2100static int __init ulri_disable(char *s)
2101{
2102	pr_info("Disabling ulri\n");
2103	noulri = 1;
2104
2105	return 1;
2106}
2107__setup("noulri", ulri_disable);
2108
2109/* configure STATUS register */
2110static void configure_status(void)
2111{
2112	/*
2113	 * Disable coprocessors and select 32-bit or 64-bit addressing
2114	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2115	 * flag that some firmware may have left set and the TS bit (for
2116	 * IP27).  Set XX for ISA IV code to work.
2117	 */
2118	unsigned int status_set = ST0_CU0;
2119#ifdef CONFIG_64BIT
2120	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2121#endif
2122	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2123		status_set |= ST0_XX;
2124	if (cpu_has_dsp)
2125		status_set |= ST0_MX;
2126
2127	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2128			 status_set);
2129}
2130
2131unsigned int hwrena;
2132EXPORT_SYMBOL_GPL(hwrena);
2133
2134/* configure HWRENA register */
2135static void configure_hwrena(void)
2136{
2137	hwrena = cpu_hwrena_impl_bits;
2138
2139	if (cpu_has_mips_r2_r6)
2140		hwrena |= MIPS_HWRENA_CPUNUM |
2141			  MIPS_HWRENA_SYNCISTEP |
2142			  MIPS_HWRENA_CC |
2143			  MIPS_HWRENA_CCRES;
2144
2145	if (!noulri && cpu_has_userlocal)
2146		hwrena |= MIPS_HWRENA_ULR;
2147
2148	if (hwrena)
2149		write_c0_hwrena(hwrena);
2150}
2151
2152static void configure_exception_vector(void)
2153{
2154	if (cpu_has_mips_r2_r6) {
2155		unsigned long sr = set_c0_status(ST0_BEV);
2156		/* If available, use WG to set top bits of EBASE */
2157		if (cpu_has_ebase_wg) {
2158#ifdef CONFIG_64BIT
2159			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2160#else
2161			write_c0_ebase(ebase | MIPS_EBASE_WG);
2162#endif
2163		}
2164		write_c0_ebase(ebase);
2165		write_c0_status(sr);
2166	}
2167	if (cpu_has_veic || cpu_has_vint) {
2168		/* Setting vector spacing enables EI/VI mode  */
2169		change_c0_intctl(0x3e0, VECTORSPACING);
2170	}
2171	if (cpu_has_divec) {
2172		if (cpu_has_mipsmt) {
2173			unsigned int vpflags = dvpe();
2174			set_c0_cause(CAUSEF_IV);
2175			evpe(vpflags);
2176		} else
2177			set_c0_cause(CAUSEF_IV);
2178	}
2179}
2180
2181void per_cpu_trap_init(bool is_boot_cpu)
2182{
2183	unsigned int cpu = smp_processor_id();
2184
2185	configure_status();
2186	configure_hwrena();
2187
2188	configure_exception_vector();
2189
2190	/*
2191	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2192	 *
2193	 *  o read IntCtl.IPTI to determine the timer interrupt
2194	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2195	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2196	 */
2197	if (cpu_has_mips_r2_r6) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2198		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2199		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2200		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2201		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2202		if (!cp0_fdc_irq)
2203			cp0_fdc_irq = -1;
2204
2205	} else {
2206		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2207		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2208		cp0_perfcount_irq = -1;
2209		cp0_fdc_irq = -1;
2210	}
2211
2212	if (cpu_has_mmid)
2213		cpu_data[cpu].asid_cache = 0;
2214	else if (!cpu_data[cpu].asid_cache)
2215		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2216
2217	mmgrab(&init_mm);
2218	current->active_mm = &init_mm;
2219	BUG_ON(current->mm);
2220	enter_lazy_tlb(&init_mm, current);
2221
2222	/* Boot CPU's cache setup in setup_arch(). */
2223	if (!is_boot_cpu)
2224		cpu_cache_init();
2225	tlb_init();
2226	TLBMISS_HANDLER_SETUP();
2227}
2228
2229/* Install CPU exception handler */
2230void set_handler(unsigned long offset, void *addr, unsigned long size)
2231{
2232#ifdef CONFIG_CPU_MICROMIPS
2233	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2234#else
2235	memcpy((void *)(ebase + offset), addr, size);
2236#endif
2237	local_flush_icache_range(ebase + offset, ebase + offset + size);
2238}
2239
2240static const char panic_null_cerr[] =
2241	"Trying to set NULL cache error exception handler\n";
2242
2243/*
2244 * Install uncached CPU exception handler.
2245 * This is suitable only for the cache error exception which is the only
2246 * exception handler that is being run uncached.
2247 */
2248void set_uncached_handler(unsigned long offset, void *addr,
2249	unsigned long size)
2250{
2251	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2252
2253	if (!addr)
2254		panic(panic_null_cerr);
2255
2256	memcpy((void *)(uncached_ebase + offset), addr, size);
2257}
2258
2259static int __initdata rdhwr_noopt;
2260static int __init set_rdhwr_noopt(char *str)
2261{
2262	rdhwr_noopt = 1;
2263	return 1;
2264}
2265
2266__setup("rdhwr_noopt", set_rdhwr_noopt);
2267
2268void __init trap_init(void)
2269{
2270	extern char except_vec3_generic;
2271	extern char except_vec4;
2272	extern char except_vec3_r4000;
2273	unsigned long i, vec_size;
2274	phys_addr_t ebase_pa;
2275
2276	check_wait();
2277
2278	if (!cpu_has_mips_r2_r6) {
2279		ebase = CAC_BASE;
2280		ebase_pa = virt_to_phys((void *)ebase);
2281		vec_size = 0x400;
2282
2283		memblock_reserve(ebase_pa, vec_size);
2284	} else {
2285		if (cpu_has_veic || cpu_has_vint)
2286			vec_size = 0x200 + VECTORSPACING*64;
2287		else
2288			vec_size = PAGE_SIZE;
2289
2290		ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2291		if (!ebase_pa)
2292			panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2293			      __func__, vec_size, 1 << fls(vec_size));
2294
2295		/*
2296		 * Try to ensure ebase resides in KSeg0 if possible.
2297		 *
2298		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2299		 * hitting a poorly defined exception base for Cache Errors.
2300		 * The allocation is likely to be in the low 512MB of physical,
2301		 * in which case we should be able to convert to KSeg0.
2302		 *
2303		 * EVA is special though as it allows segments to be rearranged
2304		 * and to become uncached during cache error handling.
2305		 */
 
2306		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2307			ebase = CKSEG0ADDR(ebase_pa);
2308		else
2309			ebase = (unsigned long)phys_to_virt(ebase_pa);
 
 
 
 
 
 
 
 
 
 
 
 
2310	}
2311
2312	if (cpu_has_mmips) {
2313		unsigned int config3 = read_c0_config3();
2314
2315		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2316			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2317		else
2318			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2319	}
2320
2321	if (board_ebase_setup)
2322		board_ebase_setup();
2323	per_cpu_trap_init(true);
2324	memblock_set_bottom_up(false);
2325
2326	/*
2327	 * Copy the generic exception handlers to their final destination.
2328	 * This will be overridden later as suitable for a particular
2329	 * configuration.
2330	 */
2331	set_handler(0x180, &except_vec3_generic, 0x80);
2332
2333	/*
2334	 * Setup default vectors
2335	 */
2336	for (i = 0; i <= 31; i++)
2337		set_except_vector(i, handle_reserved);
2338
2339	/*
2340	 * Copy the EJTAG debug exception vector handler code to it's final
2341	 * destination.
2342	 */
2343	if (cpu_has_ejtag && board_ejtag_handler_setup)
2344		board_ejtag_handler_setup();
2345
2346	/*
2347	 * Only some CPUs have the watch exceptions.
2348	 */
2349	if (cpu_has_watch)
2350		set_except_vector(EXCCODE_WATCH, handle_watch);
2351
2352	/*
2353	 * Initialise interrupt handlers
2354	 */
2355	if (cpu_has_veic || cpu_has_vint) {
2356		int nvec = cpu_has_veic ? 64 : 8;
2357		for (i = 0; i < nvec; i++)
2358			set_vi_handler(i, NULL);
2359	}
2360	else if (cpu_has_divec)
2361		set_handler(0x200, &except_vec4, 0x8);
2362
2363	/*
2364	 * Some CPUs can enable/disable for cache parity detection, but does
2365	 * it different ways.
2366	 */
2367	parity_protection_init();
2368
2369	/*
2370	 * The Data Bus Errors / Instruction Bus Errors are signaled
2371	 * by external hardware.  Therefore these two exceptions
2372	 * may have board specific handlers.
2373	 */
2374	if (board_be_init)
2375		board_be_init();
2376
2377	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2378					rollback_handle_int : handle_int);
2379	set_except_vector(EXCCODE_MOD, handle_tlbm);
2380	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2381	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2382
2383	set_except_vector(EXCCODE_ADEL, handle_adel);
2384	set_except_vector(EXCCODE_ADES, handle_ades);
2385
2386	set_except_vector(EXCCODE_IBE, handle_ibe);
2387	set_except_vector(EXCCODE_DBE, handle_dbe);
2388
2389	set_except_vector(EXCCODE_SYS, handle_sys);
2390	set_except_vector(EXCCODE_BP, handle_bp);
2391
2392	if (rdhwr_noopt)
2393		set_except_vector(EXCCODE_RI, handle_ri);
2394	else {
2395		if (cpu_has_vtag_icache)
2396			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2397		else if (current_cpu_type() == CPU_LOONGSON3)
2398			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2399		else
2400			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2401	}
2402
2403	set_except_vector(EXCCODE_CPU, handle_cpu);
2404	set_except_vector(EXCCODE_OV, handle_ov);
2405	set_except_vector(EXCCODE_TR, handle_tr);
2406	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2408	if (board_nmi_handler_setup)
2409		board_nmi_handler_setup();
2410
2411	if (cpu_has_fpu && !cpu_has_nofpuex)
2412		set_except_vector(EXCCODE_FPE, handle_fpe);
2413
2414	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2415
2416	if (cpu_has_rixiex) {
2417		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2418		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2419	}
2420
2421	set_except_vector(EXCCODE_MSADIS, handle_msa);
2422	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2423
2424	if (cpu_has_mcheck)
2425		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2426
2427	if (cpu_has_mipsmt)
2428		set_except_vector(EXCCODE_THREAD, handle_mt);
2429
2430	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2431
2432	if (board_cache_error_setup)
2433		board_cache_error_setup();
2434
2435	if (cpu_has_vce)
2436		/* Special exception: R4[04]00 uses also the divec space. */
2437		set_handler(0x180, &except_vec3_r4000, 0x100);
2438	else if (cpu_has_4kex)
2439		set_handler(0x180, &except_vec3_generic, 0x80);
2440	else
2441		set_handler(0x080, &except_vec3_generic, 0x80);
2442
2443	local_flush_icache_range(ebase, ebase + vec_size);
2444
2445	sort_extable(__start___dbe_table, __stop___dbe_table);
2446
2447	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2448}
2449
2450static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2451			    void *v)
2452{
2453	switch (cmd) {
2454	case CPU_PM_ENTER_FAILED:
2455	case CPU_PM_EXIT:
2456		configure_status();
2457		configure_hwrena();
2458		configure_exception_vector();
2459
2460		/* Restore register with CPU number for TLB handlers */
2461		TLBMISS_HANDLER_RESTORE();
2462
2463		break;
2464	}
2465
2466	return NOTIFY_OK;
2467}
2468
2469static struct notifier_block trap_pm_notifier_block = {
2470	.notifier_call = trap_pm_notifier,
2471};
2472
2473static int __init trap_pm_init(void)
2474{
2475	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2476}
2477arch_initcall(trap_pm_init);
v4.10.11
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched.h>
 
  27#include <linux/smp.h>
  28#include <linux/spinlock.h>
  29#include <linux/kallsyms.h>
  30#include <linux/bootmem.h>
  31#include <linux/interrupt.h>
  32#include <linux/ptrace.h>
  33#include <linux/kgdb.h>
  34#include <linux/kdebug.h>
  35#include <linux/kprobes.h>
  36#include <linux/notifier.h>
  37#include <linux/kdb.h>
  38#include <linux/irq.h>
  39#include <linux/perf_event.h>
  40
  41#include <asm/addrspace.h>
  42#include <asm/bootinfo.h>
  43#include <asm/branch.h>
  44#include <asm/break.h>
  45#include <asm/cop2.h>
  46#include <asm/cpu.h>
  47#include <asm/cpu-type.h>
  48#include <asm/dsp.h>
  49#include <asm/fpu.h>
  50#include <asm/fpu_emulator.h>
  51#include <asm/idle.h>
  52#include <asm/mips-cm.h>
 
  53#include <asm/mips-r2-to-r6-emul.h>
  54#include <asm/mipsregs.h>
  55#include <asm/mipsmtregs.h>
  56#include <asm/module.h>
  57#include <asm/msa.h>
  58#include <asm/pgtable.h>
  59#include <asm/ptrace.h>
  60#include <asm/sections.h>
  61#include <asm/siginfo.h>
  62#include <asm/tlbdebug.h>
  63#include <asm/traps.h>
  64#include <linux/uaccess.h>
  65#include <asm/watch.h>
  66#include <asm/mmu_context.h>
  67#include <asm/types.h>
  68#include <asm/stacktrace.h>
 
  69#include <asm/uasm.h>
  70
  71extern void check_wait(void);
  72extern asmlinkage void rollback_handle_int(void);
  73extern asmlinkage void handle_int(void);
  74extern u32 handle_tlbl[];
  75extern u32 handle_tlbs[];
  76extern u32 handle_tlbm[];
  77extern asmlinkage void handle_adel(void);
  78extern asmlinkage void handle_ades(void);
  79extern asmlinkage void handle_ibe(void);
  80extern asmlinkage void handle_dbe(void);
  81extern asmlinkage void handle_sys(void);
  82extern asmlinkage void handle_bp(void);
  83extern asmlinkage void handle_ri(void);
  84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  85extern asmlinkage void handle_ri_rdhwr(void);
  86extern asmlinkage void handle_cpu(void);
  87extern asmlinkage void handle_ov(void);
  88extern asmlinkage void handle_tr(void);
  89extern asmlinkage void handle_msa_fpe(void);
  90extern asmlinkage void handle_fpe(void);
  91extern asmlinkage void handle_ftlb(void);
  92extern asmlinkage void handle_msa(void);
  93extern asmlinkage void handle_mdmx(void);
  94extern asmlinkage void handle_watch(void);
  95extern asmlinkage void handle_mt(void);
  96extern asmlinkage void handle_dsp(void);
  97extern asmlinkage void handle_mcheck(void);
  98extern asmlinkage void handle_reserved(void);
  99extern void tlb_do_page_fault_0(void);
 100
 101void (*board_be_init)(void);
 102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 103void (*board_nmi_handler_setup)(void);
 104void (*board_ejtag_handler_setup)(void);
 105void (*board_bind_eic_interrupt)(int irq, int regset);
 106void (*board_ebase_setup)(void);
 107void(*board_cache_error_setup)(void);
 108
 109static void show_raw_backtrace(unsigned long reg29)
 110{
 111	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 112	unsigned long addr;
 113
 114	printk("Call Trace:");
 115#ifdef CONFIG_KALLSYMS
 116	printk("\n");
 117#endif
 118	while (!kstack_end(sp)) {
 119		unsigned long __user *p =
 120			(unsigned long __user *)(unsigned long)sp++;
 121		if (__get_user(addr, p)) {
 122			printk(" (Bad stack address)");
 123			break;
 124		}
 125		if (__kernel_text_address(addr))
 126			print_ip_sym(addr);
 127	}
 128	printk("\n");
 129}
 130
 131#ifdef CONFIG_KALLSYMS
 132int raw_show_trace;
 133static int __init set_raw_show_trace(char *str)
 134{
 135	raw_show_trace = 1;
 136	return 1;
 137}
 138__setup("raw_show_trace", set_raw_show_trace);
 139#endif
 140
 141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 142{
 143	unsigned long sp = regs->regs[29];
 144	unsigned long ra = regs->regs[31];
 145	unsigned long pc = regs->cp0_epc;
 146
 147	if (!task)
 148		task = current;
 149
 150	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 151		show_raw_backtrace(sp);
 152		return;
 153	}
 154	printk("Call Trace:\n");
 155	do {
 156		print_ip_sym(pc);
 157		pc = unwind_stack(task, &sp, pc, &ra);
 158	} while (pc);
 159	pr_cont("\n");
 160}
 161
 162/*
 163 * This routine abuses get_user()/put_user() to reference pointers
 164 * with at least a bit of error checking ...
 165 */
 166static void show_stacktrace(struct task_struct *task,
 167	const struct pt_regs *regs)
 168{
 169	const int field = 2 * sizeof(unsigned long);
 170	long stackdata;
 171	int i;
 172	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 173
 174	printk("Stack :");
 175	i = 0;
 176	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 177		if (i && ((i % (64 / field)) == 0)) {
 178			pr_cont("\n");
 179			printk("       ");
 180		}
 181		if (i > 39) {
 182			pr_cont(" ...");
 183			break;
 184		}
 185
 186		if (__get_user(stackdata, sp++)) {
 187			pr_cont(" (Bad stack address)");
 188			break;
 189		}
 190
 191		pr_cont(" %0*lx", field, stackdata);
 192		i++;
 193	}
 194	pr_cont("\n");
 195	show_backtrace(task, regs);
 196}
 197
 198void show_stack(struct task_struct *task, unsigned long *sp)
 199{
 200	struct pt_regs regs;
 201	mm_segment_t old_fs = get_fs();
 
 
 202	if (sp) {
 203		regs.regs[29] = (unsigned long)sp;
 204		regs.regs[31] = 0;
 205		regs.cp0_epc = 0;
 206	} else {
 207		if (task && task != current) {
 208			regs.regs[29] = task->thread.reg29;
 209			regs.regs[31] = 0;
 210			regs.cp0_epc = task->thread.reg31;
 211#ifdef CONFIG_KGDB_KDB
 212		} else if (atomic_read(&kgdb_active) != -1 &&
 213			   kdb_current_regs) {
 214			memcpy(&regs, kdb_current_regs, sizeof(regs));
 215#endif /* CONFIG_KGDB_KDB */
 216		} else {
 217			prepare_frametrace(&regs);
 218		}
 219	}
 220	/*
 221	 * show_stack() deals exclusively with kernel mode, so be sure to access
 222	 * the stack in the kernel (not user) address space.
 223	 */
 224	set_fs(KERNEL_DS);
 225	show_stacktrace(task, &regs);
 226	set_fs(old_fs);
 227}
 228
 229static void show_code(unsigned int __user *pc)
 230{
 231	long i;
 232	unsigned short __user *pc16 = NULL;
 233
 234	printk("Code:");
 235
 236	if ((unsigned long)pc & 1)
 237		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 238	for(i = -3 ; i < 6 ; i++) {
 239		unsigned int insn;
 240		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 241			pr_cont(" (Bad address in epc)\n");
 242			break;
 243		}
 244		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 245	}
 246	pr_cont("\n");
 247}
 248
 249static void __show_regs(const struct pt_regs *regs)
 250{
 251	const int field = 2 * sizeof(unsigned long);
 252	unsigned int cause = regs->cp0_cause;
 253	unsigned int exccode;
 254	int i;
 255
 256	show_regs_print_info(KERN_DEFAULT);
 257
 258	/*
 259	 * Saved main processor registers
 260	 */
 261	for (i = 0; i < 32; ) {
 262		if ((i % 4) == 0)
 263			printk("$%2d   :", i);
 264		if (i == 0)
 265			pr_cont(" %0*lx", field, 0UL);
 266		else if (i == 26 || i == 27)
 267			pr_cont(" %*s", field, "");
 268		else
 269			pr_cont(" %0*lx", field, regs->regs[i]);
 270
 271		i++;
 272		if ((i % 4) == 0)
 273			pr_cont("\n");
 274	}
 275
 276#ifdef CONFIG_CPU_HAS_SMARTMIPS
 277	printk("Acx    : %0*lx\n", field, regs->acx);
 278#endif
 279	printk("Hi    : %0*lx\n", field, regs->hi);
 280	printk("Lo    : %0*lx\n", field, regs->lo);
 
 
 281
 282	/*
 283	 * Saved cp0 registers
 284	 */
 285	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 286	       (void *) regs->cp0_epc);
 287	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 288	       (void *) regs->regs[31]);
 289
 290	printk("Status: %08x	", (uint32_t) regs->cp0_status);
 291
 292	if (cpu_has_3kex) {
 293		if (regs->cp0_status & ST0_KUO)
 294			pr_cont("KUo ");
 295		if (regs->cp0_status & ST0_IEO)
 296			pr_cont("IEo ");
 297		if (regs->cp0_status & ST0_KUP)
 298			pr_cont("KUp ");
 299		if (regs->cp0_status & ST0_IEP)
 300			pr_cont("IEp ");
 301		if (regs->cp0_status & ST0_KUC)
 302			pr_cont("KUc ");
 303		if (regs->cp0_status & ST0_IEC)
 304			pr_cont("IEc ");
 305	} else if (cpu_has_4kex) {
 306		if (regs->cp0_status & ST0_KX)
 307			pr_cont("KX ");
 308		if (regs->cp0_status & ST0_SX)
 309			pr_cont("SX ");
 310		if (regs->cp0_status & ST0_UX)
 311			pr_cont("UX ");
 312		switch (regs->cp0_status & ST0_KSU) {
 313		case KSU_USER:
 314			pr_cont("USER ");
 315			break;
 316		case KSU_SUPERVISOR:
 317			pr_cont("SUPERVISOR ");
 318			break;
 319		case KSU_KERNEL:
 320			pr_cont("KERNEL ");
 321			break;
 322		default:
 323			pr_cont("BAD_MODE ");
 324			break;
 325		}
 326		if (regs->cp0_status & ST0_ERL)
 327			pr_cont("ERL ");
 328		if (regs->cp0_status & ST0_EXL)
 329			pr_cont("EXL ");
 330		if (regs->cp0_status & ST0_IE)
 331			pr_cont("IE ");
 332	}
 333	pr_cont("\n");
 334
 335	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 336	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 337
 338	if (1 <= exccode && exccode <= 5)
 339		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 340
 341	printk("PrId  : %08x (%s)\n", read_c0_prid(),
 342	       cpu_name_string());
 343}
 344
 345/*
 346 * FIXME: really the generic show_regs should take a const pointer argument.
 347 */
 348void show_regs(struct pt_regs *regs)
 349{
 350	__show_regs((struct pt_regs *)regs);
 
 351}
 352
 353void show_registers(struct pt_regs *regs)
 354{
 355	const int field = 2 * sizeof(unsigned long);
 356	mm_segment_t old_fs = get_fs();
 357
 358	__show_regs(regs);
 359	print_modules();
 360	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 361	       current->comm, current->pid, current_thread_info(), current,
 362	      field, current_thread_info()->tp_value);
 363	if (cpu_has_userlocal) {
 364		unsigned long tls;
 365
 366		tls = read_c0_userlocal();
 367		if (tls != current_thread_info()->tp_value)
 368			printk("*HwTLS: %0*lx\n", field, tls);
 369	}
 370
 371	if (!user_mode(regs))
 372		/* Necessary for getting the correct stack content */
 373		set_fs(KERNEL_DS);
 374	show_stacktrace(current, regs);
 375	show_code((unsigned int __user *) regs->cp0_epc);
 376	printk("\n");
 377	set_fs(old_fs);
 378}
 379
 380static DEFINE_RAW_SPINLOCK(die_lock);
 381
 382void __noreturn die(const char *str, struct pt_regs *regs)
 383{
 384	static int die_counter;
 385	int sig = SIGSEGV;
 386
 387	oops_enter();
 388
 389	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 390		       SIGSEGV) == NOTIFY_STOP)
 391		sig = 0;
 392
 393	console_verbose();
 394	raw_spin_lock_irq(&die_lock);
 395	bust_spinlocks(1);
 396
 397	printk("%s[#%d]:\n", str, ++die_counter);
 398	show_registers(regs);
 399	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 400	raw_spin_unlock_irq(&die_lock);
 401
 402	oops_exit();
 403
 404	if (in_interrupt())
 405		panic("Fatal exception in interrupt");
 406
 407	if (panic_on_oops)
 408		panic("Fatal exception");
 409
 410	if (regs && kexec_should_crash(current))
 411		crash_kexec(regs);
 412
 413	do_exit(sig);
 414}
 415
 416extern struct exception_table_entry __start___dbe_table[];
 417extern struct exception_table_entry __stop___dbe_table[];
 418
 419__asm__(
 420"	.section	__dbe_table, \"a\"\n"
 421"	.previous			\n");
 422
 423/* Given an address, look for it in the exception tables. */
 424static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 425{
 426	const struct exception_table_entry *e;
 427
 428	e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
 
 429	if (!e)
 430		e = search_module_dbetables(addr);
 431	return e;
 432}
 433
 434asmlinkage void do_be(struct pt_regs *regs)
 435{
 436	const int field = 2 * sizeof(unsigned long);
 437	const struct exception_table_entry *fixup = NULL;
 438	int data = regs->cp0_cause & 4;
 439	int action = MIPS_BE_FATAL;
 440	enum ctx_state prev_state;
 441
 442	prev_state = exception_enter();
 443	/* XXX For now.	 Fixme, this searches the wrong table ...  */
 444	if (data && !user_mode(regs))
 445		fixup = search_dbe_tables(exception_epc(regs));
 446
 447	if (fixup)
 448		action = MIPS_BE_FIXUP;
 449
 450	if (board_be_handler)
 451		action = board_be_handler(regs, fixup != NULL);
 452	else
 453		mips_cm_error_report();
 454
 455	switch (action) {
 456	case MIPS_BE_DISCARD:
 457		goto out;
 458	case MIPS_BE_FIXUP:
 459		if (fixup) {
 460			regs->cp0_epc = fixup->nextinsn;
 461			goto out;
 462		}
 463		break;
 464	default:
 465		break;
 466	}
 467
 468	/*
 469	 * Assume it would be too dangerous to continue ...
 470	 */
 471	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 472	       data ? "Data" : "Instruction",
 473	       field, regs->cp0_epc, field, regs->regs[31]);
 474	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 475		       SIGBUS) == NOTIFY_STOP)
 476		goto out;
 477
 478	die_if_kernel("Oops", regs);
 479	force_sig(SIGBUS, current);
 480
 481out:
 482	exception_exit(prev_state);
 483}
 484
 485/*
 486 * ll/sc, rdhwr, sync emulation
 487 */
 488
 489#define OPCODE 0xfc000000
 490#define BASE   0x03e00000
 491#define RT     0x001f0000
 492#define OFFSET 0x0000ffff
 493#define LL     0xc0000000
 494#define SC     0xe0000000
 495#define SPEC0  0x00000000
 496#define SPEC3  0x7c000000
 497#define RD     0x0000f800
 498#define FUNC   0x0000003f
 499#define SYNC   0x0000000f
 500#define RDHWR  0x0000003b
 501
 502/*  microMIPS definitions   */
 503#define MM_POOL32A_FUNC 0xfc00ffff
 504#define MM_RDHWR        0x00006b3c
 505#define MM_RS           0x001f0000
 506#define MM_RT           0x03e00000
 507
 508/*
 509 * The ll_bit is cleared by r*_switch.S
 510 */
 511
 512unsigned int ll_bit;
 513struct task_struct *ll_task;
 514
 515static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 516{
 517	unsigned long value, __user *vaddr;
 518	long offset;
 519
 520	/*
 521	 * analyse the ll instruction that just caused a ri exception
 522	 * and put the referenced address to addr.
 523	 */
 524
 525	/* sign extend offset */
 526	offset = opcode & OFFSET;
 527	offset <<= 16;
 528	offset >>= 16;
 529
 530	vaddr = (unsigned long __user *)
 531		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 532
 533	if ((unsigned long)vaddr & 3)
 534		return SIGBUS;
 535	if (get_user(value, vaddr))
 536		return SIGSEGV;
 537
 538	preempt_disable();
 539
 540	if (ll_task == NULL || ll_task == current) {
 541		ll_bit = 1;
 542	} else {
 543		ll_bit = 0;
 544	}
 545	ll_task = current;
 546
 547	preempt_enable();
 548
 549	regs->regs[(opcode & RT) >> 16] = value;
 550
 551	return 0;
 552}
 553
 554static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 555{
 556	unsigned long __user *vaddr;
 557	unsigned long reg;
 558	long offset;
 559
 560	/*
 561	 * analyse the sc instruction that just caused a ri exception
 562	 * and put the referenced address to addr.
 563	 */
 564
 565	/* sign extend offset */
 566	offset = opcode & OFFSET;
 567	offset <<= 16;
 568	offset >>= 16;
 569
 570	vaddr = (unsigned long __user *)
 571		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 572	reg = (opcode & RT) >> 16;
 573
 574	if ((unsigned long)vaddr & 3)
 575		return SIGBUS;
 576
 577	preempt_disable();
 578
 579	if (ll_bit == 0 || ll_task != current) {
 580		regs->regs[reg] = 0;
 581		preempt_enable();
 582		return 0;
 583	}
 584
 585	preempt_enable();
 586
 587	if (put_user(regs->regs[reg], vaddr))
 588		return SIGSEGV;
 589
 590	regs->regs[reg] = 1;
 591
 592	return 0;
 593}
 594
 595/*
 596 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 597 * opcodes are supposed to result in coprocessor unusable exceptions if
 598 * executed on ll/sc-less processors.  That's the theory.  In practice a
 599 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 600 * instead, so we're doing the emulation thing in both exception handlers.
 601 */
 602static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 603{
 604	if ((opcode & OPCODE) == LL) {
 605		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 606				1, regs, 0);
 607		return simulate_ll(regs, opcode);
 608	}
 609	if ((opcode & OPCODE) == SC) {
 610		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 611				1, regs, 0);
 612		return simulate_sc(regs, opcode);
 613	}
 614
 615	return -1;			/* Must be something else ... */
 616}
 617
 618/*
 619 * Simulate trapping 'rdhwr' instructions to provide user accessible
 620 * registers not implemented in hardware.
 621 */
 622static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 623{
 624	struct thread_info *ti = task_thread_info(current);
 625
 626	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 627			1, regs, 0);
 628	switch (rd) {
 629	case MIPS_HWR_CPUNUM:		/* CPU number */
 630		regs->regs[rt] = smp_processor_id();
 631		return 0;
 632	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
 633		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 634				     current_cpu_data.icache.linesz);
 635		return 0;
 636	case MIPS_HWR_CC:		/* Read count register */
 637		regs->regs[rt] = read_c0_count();
 638		return 0;
 639	case MIPS_HWR_CCRES:		/* Count register resolution */
 640		switch (current_cpu_type()) {
 641		case CPU_20KC:
 642		case CPU_25KF:
 643			regs->regs[rt] = 1;
 644			break;
 645		default:
 646			regs->regs[rt] = 2;
 647		}
 648		return 0;
 649	case MIPS_HWR_ULR:		/* Read UserLocal register */
 650		regs->regs[rt] = ti->tp_value;
 651		return 0;
 652	default:
 653		return -1;
 654	}
 655}
 656
 657static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 658{
 659	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 660		int rd = (opcode & RD) >> 11;
 661		int rt = (opcode & RT) >> 16;
 662
 663		simulate_rdhwr(regs, rd, rt);
 664		return 0;
 665	}
 666
 667	/* Not ours.  */
 668	return -1;
 669}
 670
 671static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 672{
 673	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 674		int rd = (opcode & MM_RS) >> 16;
 675		int rt = (opcode & MM_RT) >> 21;
 676		simulate_rdhwr(regs, rd, rt);
 677		return 0;
 678	}
 679
 680	/* Not ours.  */
 681	return -1;
 682}
 683
 684static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 685{
 686	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 687		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 688				1, regs, 0);
 689		return 0;
 690	}
 691
 692	return -1;			/* Must be something else ... */
 693}
 694
 695asmlinkage void do_ov(struct pt_regs *regs)
 696{
 697	enum ctx_state prev_state;
 698	siginfo_t info = {
 699		.si_signo = SIGFPE,
 700		.si_code = FPE_INTOVF,
 701		.si_addr = (void __user *)regs->cp0_epc,
 702	};
 703
 704	prev_state = exception_enter();
 705	die_if_kernel("Integer overflow", regs);
 706
 707	force_sig_info(SIGFPE, &info, current);
 708	exception_exit(prev_state);
 709}
 710
 
 
 711/*
 712 * Send SIGFPE according to FCSR Cause bits, which must have already
 713 * been masked against Enable bits.  This is impotant as Inexact can
 714 * happen together with Overflow or Underflow, and `ptrace' can set
 715 * any bits.
 716 */
 717void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 718		     struct task_struct *tsk)
 719{
 720	struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
 721
 722	if (fcr31 & FPU_CSR_INV_X)
 723		si.si_code = FPE_FLTINV;
 724	else if (fcr31 & FPU_CSR_DIV_X)
 725		si.si_code = FPE_FLTDIV;
 726	else if (fcr31 & FPU_CSR_OVF_X)
 727		si.si_code = FPE_FLTOVF;
 728	else if (fcr31 & FPU_CSR_UDF_X)
 729		si.si_code = FPE_FLTUND;
 730	else if (fcr31 & FPU_CSR_INE_X)
 731		si.si_code = FPE_FLTRES;
 732	else
 733		si.si_code = __SI_FAULT;
 734	force_sig_info(SIGFPE, &si, tsk);
 735}
 736
 737int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 738{
 739	struct siginfo si = { 0 };
 740	struct vm_area_struct *vma;
 741
 742	switch (sig) {
 743	case 0:
 744		return 0;
 745
 746	case SIGFPE:
 747		force_fcr31_sig(fcr31, fault_addr, current);
 748		return 1;
 749
 750	case SIGBUS:
 751		si.si_addr = fault_addr;
 752		si.si_signo = sig;
 753		si.si_code = BUS_ADRERR;
 754		force_sig_info(sig, &si, current);
 755		return 1;
 756
 757	case SIGSEGV:
 758		si.si_addr = fault_addr;
 759		si.si_signo = sig;
 760		down_read(&current->mm->mmap_sem);
 761		vma = find_vma(current->mm, (unsigned long)fault_addr);
 762		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 763			si.si_code = SEGV_ACCERR;
 764		else
 765			si.si_code = SEGV_MAPERR;
 766		up_read(&current->mm->mmap_sem);
 767		force_sig_info(sig, &si, current);
 768		return 1;
 769
 770	default:
 771		force_sig(sig, current);
 772		return 1;
 773	}
 774}
 775
 776static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 777		       unsigned long old_epc, unsigned long old_ra)
 778{
 779	union mips_instruction inst = { .word = opcode };
 780	void __user *fault_addr;
 781	unsigned long fcr31;
 782	int sig;
 783
 784	/* If it's obviously not an FP instruction, skip it */
 785	switch (inst.i_format.opcode) {
 786	case cop1_op:
 787	case cop1x_op:
 788	case lwc1_op:
 789	case ldc1_op:
 790	case swc1_op:
 791	case sdc1_op:
 792		break;
 793
 794	default:
 795		return -1;
 796	}
 797
 798	/*
 799	 * do_ri skipped over the instruction via compute_return_epc, undo
 800	 * that for the FPU emulator.
 801	 */
 802	regs->cp0_epc = old_epc;
 803	regs->regs[31] = old_ra;
 804
 805	/* Save the FP context to struct thread_struct */
 806	lose_fpu(1);
 807
 808	/* Run the emulator */
 809	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 810				       &fault_addr);
 811
 812	/*
 813	 * We can't allow the emulated instruction to leave any
 814	 * enabled Cause bits set in $fcr31.
 815	 */
 816	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 817	current->thread.fpu.fcr31 &= ~fcr31;
 818
 819	/* Restore the hardware register state */
 820	own_fpu(1);
 821
 822	/* Send a signal if required.  */
 823	process_fpemu_return(sig, fault_addr, fcr31);
 824
 825	return 0;
 826}
 827
 828/*
 829 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 830 */
 831asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 832{
 833	enum ctx_state prev_state;
 834	void __user *fault_addr;
 835	int sig;
 836
 837	prev_state = exception_enter();
 838	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 839		       SIGFPE) == NOTIFY_STOP)
 840		goto out;
 841
 842	/* Clear FCSR.Cause before enabling interrupts */
 843	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 844	local_irq_enable();
 845
 846	die_if_kernel("FP exception in kernel code", regs);
 847
 848	if (fcr31 & FPU_CSR_UNI_X) {
 849		/*
 850		 * Unimplemented operation exception.  If we've got the full
 851		 * software emulator on-board, let's use it...
 852		 *
 853		 * Force FPU to dump state into task/thread context.  We're
 854		 * moving a lot of data here for what is probably a single
 855		 * instruction, but the alternative is to pre-decode the FP
 856		 * register operands before invoking the emulator, which seems
 857		 * a bit extreme for what should be an infrequent event.
 858		 */
 859		/* Ensure 'resume' not overwrite saved fp context again. */
 860		lose_fpu(1);
 861
 862		/* Run the emulator */
 863		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 864					       &fault_addr);
 865
 866		/*
 867		 * We can't allow the emulated instruction to leave any
 868		 * enabled Cause bits set in $fcr31.
 869		 */
 870		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 871		current->thread.fpu.fcr31 &= ~fcr31;
 872
 873		/* Restore the hardware register state */
 874		own_fpu(1);	/* Using the FPU again.	 */
 875	} else {
 876		sig = SIGFPE;
 877		fault_addr = (void __user *) regs->cp0_epc;
 878	}
 879
 880	/* Send a signal if required.  */
 881	process_fpemu_return(sig, fault_addr, fcr31);
 882
 883out:
 884	exception_exit(prev_state);
 885}
 886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 888	const char *str)
 889{
 890	siginfo_t info = { 0 };
 891	char b[40];
 892
 893#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 894	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 895			 SIGTRAP) == NOTIFY_STOP)
 896		return;
 897#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 898
 899	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 900		       SIGTRAP) == NOTIFY_STOP)
 901		return;
 902
 903	/*
 904	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 905	 * insns, even for trap and break codes that indicate arithmetic
 906	 * failures.  Weird ...
 907	 * But should we continue the brokenness???  --macro
 908	 */
 909	switch (code) {
 910	case BRK_OVERFLOW:
 911	case BRK_DIVZERO:
 912		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 913		die_if_kernel(b, regs);
 914		if (code == BRK_DIVZERO)
 915			info.si_code = FPE_INTDIV;
 916		else
 917			info.si_code = FPE_INTOVF;
 918		info.si_signo = SIGFPE;
 919		info.si_addr = (void __user *) regs->cp0_epc;
 920		force_sig_info(SIGFPE, &info, current);
 921		break;
 922	case BRK_BUG:
 923		die_if_kernel("Kernel bug detected", regs);
 924		force_sig(SIGTRAP, current);
 925		break;
 926	case BRK_MEMU:
 927		/*
 928		 * This breakpoint code is used by the FPU emulator to retake
 929		 * control of the CPU after executing the instruction from the
 930		 * delay slot of an emulated branch.
 931		 *
 932		 * Terminate if exception was recognized as a delay slot return
 933		 * otherwise handle as normal.
 934		 */
 935		if (do_dsemulret(regs))
 936			return;
 937
 938		die_if_kernel("Math emu break/trap", regs);
 939		force_sig(SIGTRAP, current);
 940		break;
 941	default:
 942		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 943		die_if_kernel(b, regs);
 944		if (si_code) {
 945			info.si_signo = SIGTRAP;
 946			info.si_code = si_code;
 947			force_sig_info(SIGTRAP, &info, current);
 948		} else {
 949			force_sig(SIGTRAP, current);
 950		}
 951	}
 952}
 953
 954asmlinkage void do_bp(struct pt_regs *regs)
 955{
 956	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 957	unsigned int opcode, bcode;
 958	enum ctx_state prev_state;
 959	mm_segment_t seg;
 960
 961	seg = get_fs();
 962	if (!user_mode(regs))
 963		set_fs(KERNEL_DS);
 964
 965	prev_state = exception_enter();
 966	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 967	if (get_isa16_mode(regs->cp0_epc)) {
 968		u16 instr[2];
 969
 970		if (__get_user(instr[0], (u16 __user *)epc))
 971			goto out_sigsegv;
 972
 973		if (!cpu_has_mmips) {
 974			/* MIPS16e mode */
 975			bcode = (instr[0] >> 5) & 0x3f;
 976		} else if (mm_insn_16bit(instr[0])) {
 977			/* 16-bit microMIPS BREAK */
 978			bcode = instr[0] & 0xf;
 979		} else {
 980			/* 32-bit microMIPS BREAK */
 981			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
 982				goto out_sigsegv;
 983			opcode = (instr[0] << 16) | instr[1];
 984			bcode = (opcode >> 6) & ((1 << 20) - 1);
 985		}
 986	} else {
 987		if (__get_user(opcode, (unsigned int __user *)epc))
 988			goto out_sigsegv;
 989		bcode = (opcode >> 6) & ((1 << 20) - 1);
 990	}
 991
 992	/*
 993	 * There is the ancient bug in the MIPS assemblers that the break
 994	 * code starts left to bit 16 instead to bit 6 in the opcode.
 995	 * Gas is bug-compatible, but not always, grrr...
 996	 * We handle both cases with a simple heuristics.  --macro
 997	 */
 998	if (bcode >= (1 << 10))
 999		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1000
1001	/*
1002	 * notify the kprobe handlers, if instruction is likely to
1003	 * pertain to them.
1004	 */
1005	switch (bcode) {
1006	case BRK_UPROBE:
1007		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1008			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1009			goto out;
1010		else
1011			break;
1012	case BRK_UPROBE_XOL:
1013		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1014			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1015			goto out;
1016		else
1017			break;
1018	case BRK_KPROBE_BP:
1019		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1020			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1021			goto out;
1022		else
1023			break;
1024	case BRK_KPROBE_SSTEPBP:
1025		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1026			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1027			goto out;
1028		else
1029			break;
1030	default:
1031		break;
1032	}
1033
1034	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1035
1036out:
1037	set_fs(seg);
1038	exception_exit(prev_state);
1039	return;
1040
1041out_sigsegv:
1042	force_sig(SIGSEGV, current);
1043	goto out;
1044}
1045
1046asmlinkage void do_tr(struct pt_regs *regs)
1047{
1048	u32 opcode, tcode = 0;
1049	enum ctx_state prev_state;
1050	u16 instr[2];
1051	mm_segment_t seg;
1052	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1053
1054	seg = get_fs();
1055	if (!user_mode(regs))
1056		set_fs(get_ds());
1057
1058	prev_state = exception_enter();
1059	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1060	if (get_isa16_mode(regs->cp0_epc)) {
1061		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1062		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1063			goto out_sigsegv;
1064		opcode = (instr[0] << 16) | instr[1];
1065		/* Immediate versions don't provide a code.  */
1066		if (!(opcode & OPCODE))
1067			tcode = (opcode >> 12) & ((1 << 4) - 1);
1068	} else {
1069		if (__get_user(opcode, (u32 __user *)epc))
1070			goto out_sigsegv;
1071		/* Immediate versions don't provide a code.  */
1072		if (!(opcode & OPCODE))
1073			tcode = (opcode >> 6) & ((1 << 10) - 1);
1074	}
1075
1076	do_trap_or_bp(regs, tcode, 0, "Trap");
1077
1078out:
1079	set_fs(seg);
1080	exception_exit(prev_state);
1081	return;
1082
1083out_sigsegv:
1084	force_sig(SIGSEGV, current);
1085	goto out;
1086}
1087
1088asmlinkage void do_ri(struct pt_regs *regs)
1089{
1090	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1091	unsigned long old_epc = regs->cp0_epc;
1092	unsigned long old31 = regs->regs[31];
1093	enum ctx_state prev_state;
1094	unsigned int opcode = 0;
1095	int status = -1;
1096
1097	/*
1098	 * Avoid any kernel code. Just emulate the R2 instruction
1099	 * as quickly as possible.
1100	 */
1101	if (mipsr2_emulation && cpu_has_mips_r6 &&
1102	    likely(user_mode(regs)) &&
1103	    likely(get_user(opcode, epc) >= 0)) {
1104		unsigned long fcr31 = 0;
1105
1106		status = mipsr2_decoder(regs, opcode, &fcr31);
1107		switch (status) {
1108		case 0:
1109		case SIGEMT:
1110			task_thread_info(current)->r2_emul_return = 1;
1111			return;
1112		case SIGILL:
1113			goto no_r2_instr;
1114		default:
1115			process_fpemu_return(status,
1116					     &current->thread.cp0_baduaddr,
1117					     fcr31);
1118			task_thread_info(current)->r2_emul_return = 1;
1119			return;
1120		}
1121	}
1122
1123no_r2_instr:
1124
1125	prev_state = exception_enter();
1126	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1127
1128	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1129		       SIGILL) == NOTIFY_STOP)
1130		goto out;
1131
1132	die_if_kernel("Reserved instruction in kernel code", regs);
1133
1134	if (unlikely(compute_return_epc(regs) < 0))
1135		goto out;
1136
1137	if (!get_isa16_mode(regs->cp0_epc)) {
1138		if (unlikely(get_user(opcode, epc) < 0))
1139			status = SIGSEGV;
1140
1141		if (!cpu_has_llsc && status < 0)
1142			status = simulate_llsc(regs, opcode);
1143
1144		if (status < 0)
1145			status = simulate_rdhwr_normal(regs, opcode);
1146
1147		if (status < 0)
1148			status = simulate_sync(regs, opcode);
1149
1150		if (status < 0)
1151			status = simulate_fp(regs, opcode, old_epc, old31);
1152	} else if (cpu_has_mmips) {
1153		unsigned short mmop[2] = { 0 };
1154
1155		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1156			status = SIGSEGV;
1157		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1158			status = SIGSEGV;
1159		opcode = mmop[0];
1160		opcode = (opcode << 16) | mmop[1];
1161
1162		if (status < 0)
1163			status = simulate_rdhwr_mm(regs, opcode);
1164	}
1165
1166	if (status < 0)
1167		status = SIGILL;
1168
1169	if (unlikely(status > 0)) {
1170		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1171		regs->regs[31] = old31;
1172		force_sig(status, current);
1173	}
1174
1175out:
1176	exception_exit(prev_state);
1177}
1178
1179/*
1180 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1181 * emulated more than some threshold number of instructions, force migration to
1182 * a "CPU" that has FP support.
1183 */
1184static void mt_ase_fp_affinity(void)
1185{
1186#ifdef CONFIG_MIPS_MT_FPAFF
1187	if (mt_fpemul_threshold > 0 &&
1188	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1189		/*
1190		 * If there's no FPU present, or if the application has already
1191		 * restricted the allowed set to exclude any CPUs with FPUs,
1192		 * we'll skip the procedure.
1193		 */
1194		if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1195			cpumask_t tmask;
1196
1197			current->thread.user_cpus_allowed
1198				= current->cpus_allowed;
1199			cpumask_and(&tmask, &current->cpus_allowed,
1200				    &mt_fpu_cpumask);
1201			set_cpus_allowed_ptr(current, &tmask);
1202			set_thread_flag(TIF_FPUBOUND);
1203		}
1204	}
1205#endif /* CONFIG_MIPS_MT_FPAFF */
1206}
1207
1208/*
1209 * No lock; only written during early bootup by CPU 0.
1210 */
1211static RAW_NOTIFIER_HEAD(cu2_chain);
1212
1213int __ref register_cu2_notifier(struct notifier_block *nb)
1214{
1215	return raw_notifier_chain_register(&cu2_chain, nb);
1216}
1217
1218int cu2_notifier_call_chain(unsigned long val, void *v)
1219{
1220	return raw_notifier_call_chain(&cu2_chain, val, v);
1221}
1222
1223static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1224	void *data)
1225{
1226	struct pt_regs *regs = data;
1227
1228	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1229			      "instruction", regs);
1230	force_sig(SIGILL, current);
1231
1232	return NOTIFY_OK;
1233}
1234
1235static int wait_on_fp_mode_switch(atomic_t *p)
1236{
1237	/*
1238	 * The FP mode for this task is currently being switched. That may
1239	 * involve modifications to the format of this tasks FP context which
1240	 * make it unsafe to proceed with execution for the moment. Instead,
1241	 * schedule some other task.
1242	 */
1243	schedule();
1244	return 0;
1245}
1246
1247static int enable_restore_fp_context(int msa)
1248{
1249	int err, was_fpu_owner, prior_msa;
 
1250
1251	/*
1252	 * If an FP mode switch is currently underway, wait for it to
1253	 * complete before proceeding.
1254	 */
1255	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1256			 wait_on_fp_mode_switch, TASK_KILLABLE);
1257
1258	if (!used_math()) {
1259		/* First time FP context user. */
1260		preempt_disable();
1261		err = init_fpu();
1262		if (msa && !err) {
1263			enable_msa();
1264			init_msa_upper();
1265			set_thread_flag(TIF_USEDMSA);
1266			set_thread_flag(TIF_MSA_CTX_LIVE);
1267		}
1268		preempt_enable();
1269		if (!err)
1270			set_used_math();
1271		return err;
1272	}
1273
1274	/*
1275	 * This task has formerly used the FP context.
1276	 *
1277	 * If this thread has no live MSA vector context then we can simply
1278	 * restore the scalar FP context. If it has live MSA vector context
1279	 * (that is, it has or may have used MSA since last performing a
1280	 * function call) then we'll need to restore the vector context. This
1281	 * applies even if we're currently only executing a scalar FP
1282	 * instruction. This is because if we were to later execute an MSA
1283	 * instruction then we'd either have to:
1284	 *
1285	 *  - Restore the vector context & clobber any registers modified by
1286	 *    scalar FP instructions between now & then.
1287	 *
1288	 * or
1289	 *
1290	 *  - Not restore the vector context & lose the most significant bits
1291	 *    of all vector registers.
1292	 *
1293	 * Neither of those options is acceptable. We cannot restore the least
1294	 * significant bits of the registers now & only restore the most
1295	 * significant bits later because the most significant bits of any
1296	 * vector registers whose aliased FP register is modified now will have
1297	 * been zeroed. We'd have no way to know that when restoring the vector
1298	 * context & thus may load an outdated value for the most significant
1299	 * bits of a vector register.
1300	 */
1301	if (!msa && !thread_msa_context_live())
1302		return own_fpu(1);
1303
1304	/*
1305	 * This task is using or has previously used MSA. Thus we require
1306	 * that Status.FR == 1.
1307	 */
1308	preempt_disable();
1309	was_fpu_owner = is_fpu_owner();
1310	err = own_fpu_inatomic(0);
1311	if (err)
1312		goto out;
1313
1314	enable_msa();
1315	write_msa_csr(current->thread.fpu.msacsr);
1316	set_thread_flag(TIF_USEDMSA);
1317
1318	/*
1319	 * If this is the first time that the task is using MSA and it has
1320	 * previously used scalar FP in this time slice then we already nave
1321	 * FP context which we shouldn't clobber. We do however need to clear
1322	 * the upper 64b of each vector register so that this task has no
1323	 * opportunity to see data left behind by another.
1324	 */
1325	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1326	if (!prior_msa && was_fpu_owner) {
1327		init_msa_upper();
1328
1329		goto out;
1330	}
1331
1332	if (!prior_msa) {
1333		/*
1334		 * Restore the least significant 64b of each vector register
1335		 * from the existing scalar FP context.
1336		 */
1337		_restore_fp(current);
1338
1339		/*
1340		 * The task has not formerly used MSA, so clear the upper 64b
1341		 * of each vector register such that it cannot see data left
1342		 * behind by another task.
1343		 */
1344		init_msa_upper();
1345	} else {
1346		/* We need to restore the vector context. */
1347		restore_msa(current);
1348
1349		/* Restore the scalar FP control & status register */
1350		if (!was_fpu_owner)
1351			write_32bit_cp1_register(CP1_STATUS,
1352						 current->thread.fpu.fcr31);
1353	}
1354
1355out:
1356	preempt_enable();
1357
1358	return 0;
1359}
1360
 
 
 
 
 
 
 
 
 
1361asmlinkage void do_cpu(struct pt_regs *regs)
1362{
1363	enum ctx_state prev_state;
1364	unsigned int __user *epc;
1365	unsigned long old_epc, old31;
1366	void __user *fault_addr;
1367	unsigned int opcode;
1368	unsigned long fcr31;
1369	unsigned int cpid;
1370	int status, err;
1371	int sig;
1372
1373	prev_state = exception_enter();
1374	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1375
1376	if (cpid != 2)
1377		die_if_kernel("do_cpu invoked from kernel context!", regs);
1378
1379	switch (cpid) {
1380	case 0:
1381		epc = (unsigned int __user *)exception_epc(regs);
1382		old_epc = regs->cp0_epc;
1383		old31 = regs->regs[31];
1384		opcode = 0;
1385		status = -1;
1386
1387		if (unlikely(compute_return_epc(regs) < 0))
1388			break;
1389
1390		if (!get_isa16_mode(regs->cp0_epc)) {
1391			if (unlikely(get_user(opcode, epc) < 0))
1392				status = SIGSEGV;
1393
1394			if (!cpu_has_llsc && status < 0)
1395				status = simulate_llsc(regs, opcode);
1396		}
1397
1398		if (status < 0)
1399			status = SIGILL;
1400
1401		if (unlikely(status > 0)) {
1402			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1403			regs->regs[31] = old31;
1404			force_sig(status, current);
1405		}
1406
1407		break;
1408
 
1409	case 3:
1410		/*
1411		 * The COP3 opcode space and consequently the CP0.Status.CU3
1412		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1413		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1414		 * up the space has been reused for COP1X instructions, that
1415		 * are enabled by the CP0.Status.CU1 bit and consequently
1416		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1417		 * exceptions.  Some FPU-less processors that implement one
1418		 * of these ISAs however use this code erroneously for COP1X
1419		 * instructions.  Therefore we redirect this trap to the FP
1420		 * emulator too.
1421		 */
1422		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1423			force_sig(SIGILL, current);
1424			break;
1425		}
1426		/* Fall through.  */
1427
1428	case 1:
 
 
 
 
1429		err = enable_restore_fp_context(0);
1430
1431		if (raw_cpu_has_fpu && !err)
1432			break;
1433
1434		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1435					       &fault_addr);
1436
1437		/*
1438		 * We can't allow the emulated instruction to leave
1439		 * any enabled Cause bits set in $fcr31.
1440		 */
1441		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1442		current->thread.fpu.fcr31 &= ~fcr31;
1443
1444		/* Send a signal if required.  */
1445		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1446			mt_ase_fp_affinity();
1447
1448		break;
 
 
 
 
 
 
 
1449
1450	case 2:
1451		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1452		break;
1453	}
1454
1455	exception_exit(prev_state);
1456}
1457
1458asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1459{
1460	enum ctx_state prev_state;
1461
1462	prev_state = exception_enter();
1463	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1464	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1465		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1466		goto out;
1467
1468	/* Clear MSACSR.Cause before enabling interrupts */
1469	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1470	local_irq_enable();
1471
1472	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1473	force_sig(SIGFPE, current);
1474out:
1475	exception_exit(prev_state);
1476}
1477
1478asmlinkage void do_msa(struct pt_regs *regs)
1479{
1480	enum ctx_state prev_state;
1481	int err;
1482
1483	prev_state = exception_enter();
1484
1485	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1486		force_sig(SIGILL, current);
1487		goto out;
1488	}
1489
1490	die_if_kernel("do_msa invoked from kernel context!", regs);
1491
1492	err = enable_restore_fp_context(1);
1493	if (err)
1494		force_sig(SIGILL, current);
1495out:
1496	exception_exit(prev_state);
1497}
1498
1499asmlinkage void do_mdmx(struct pt_regs *regs)
1500{
1501	enum ctx_state prev_state;
1502
1503	prev_state = exception_enter();
1504	force_sig(SIGILL, current);
1505	exception_exit(prev_state);
1506}
1507
1508/*
1509 * Called with interrupts disabled.
1510 */
1511asmlinkage void do_watch(struct pt_regs *regs)
1512{
1513	siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1514	enum ctx_state prev_state;
1515
1516	prev_state = exception_enter();
1517	/*
1518	 * Clear WP (bit 22) bit of cause register so we don't loop
1519	 * forever.
1520	 */
1521	clear_c0_cause(CAUSEF_WP);
1522
1523	/*
1524	 * If the current thread has the watch registers loaded, save
1525	 * their values and send SIGTRAP.  Otherwise another thread
1526	 * left the registers set, clear them and continue.
1527	 */
1528	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1529		mips_read_watch_registers();
1530		local_irq_enable();
1531		force_sig_info(SIGTRAP, &info, current);
1532	} else {
1533		mips_clear_watch_registers();
1534		local_irq_enable();
1535	}
1536	exception_exit(prev_state);
1537}
1538
1539asmlinkage void do_mcheck(struct pt_regs *regs)
1540{
1541	int multi_match = regs->cp0_status & ST0_TS;
1542	enum ctx_state prev_state;
1543	mm_segment_t old_fs = get_fs();
1544
1545	prev_state = exception_enter();
1546	show_regs(regs);
1547
1548	if (multi_match) {
1549		dump_tlb_regs();
1550		pr_info("\n");
1551		dump_tlb_all();
1552	}
1553
1554	if (!user_mode(regs))
1555		set_fs(KERNEL_DS);
1556
1557	show_code((unsigned int __user *) regs->cp0_epc);
1558
1559	set_fs(old_fs);
1560
1561	/*
1562	 * Some chips may have other causes of machine check (e.g. SB1
1563	 * graduation timer)
1564	 */
1565	panic("Caught Machine Check exception - %scaused by multiple "
1566	      "matching entries in the TLB.",
1567	      (multi_match) ? "" : "not ");
1568}
1569
1570asmlinkage void do_mt(struct pt_regs *regs)
1571{
1572	int subcode;
1573
1574	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1575			>> VPECONTROL_EXCPT_SHIFT;
1576	switch (subcode) {
1577	case 0:
1578		printk(KERN_DEBUG "Thread Underflow\n");
1579		break;
1580	case 1:
1581		printk(KERN_DEBUG "Thread Overflow\n");
1582		break;
1583	case 2:
1584		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1585		break;
1586	case 3:
1587		printk(KERN_DEBUG "Gating Storage Exception\n");
1588		break;
1589	case 4:
1590		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1591		break;
1592	case 5:
1593		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1594		break;
1595	default:
1596		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1597			subcode);
1598		break;
1599	}
1600	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1601
1602	force_sig(SIGILL, current);
1603}
1604
1605
1606asmlinkage void do_dsp(struct pt_regs *regs)
1607{
1608	if (cpu_has_dsp)
1609		panic("Unexpected DSP exception");
1610
1611	force_sig(SIGILL, current);
1612}
1613
1614asmlinkage void do_reserved(struct pt_regs *regs)
1615{
1616	/*
1617	 * Game over - no way to handle this if it ever occurs.	 Most probably
1618	 * caused by a new unknown cpu type or after another deadly
1619	 * hard/software error.
1620	 */
1621	show_regs(regs);
1622	panic("Caught reserved exception %ld - should not happen.",
1623	      (regs->cp0_cause & 0x7f) >> 2);
1624}
1625
1626static int __initdata l1parity = 1;
1627static int __init nol1parity(char *s)
1628{
1629	l1parity = 0;
1630	return 1;
1631}
1632__setup("nol1par", nol1parity);
1633static int __initdata l2parity = 1;
1634static int __init nol2parity(char *s)
1635{
1636	l2parity = 0;
1637	return 1;
1638}
1639__setup("nol2par", nol2parity);
1640
1641/*
1642 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1643 * it different ways.
1644 */
1645static inline void parity_protection_init(void)
1646{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647	switch (current_cpu_type()) {
1648	case CPU_24K:
1649	case CPU_34K:
1650	case CPU_74K:
1651	case CPU_1004K:
1652	case CPU_1074K:
1653	case CPU_INTERAPTIV:
1654	case CPU_PROAPTIV:
1655	case CPU_P5600:
1656	case CPU_QEMU_GENERIC:
1657	case CPU_I6400:
1658	case CPU_P6600:
1659		{
1660#define ERRCTL_PE	0x80000000
1661#define ERRCTL_L2P	0x00800000
1662			unsigned long errctl;
1663			unsigned int l1parity_present, l2parity_present;
1664
1665			errctl = read_c0_ecc();
1666			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1667
1668			/* probe L1 parity support */
1669			write_c0_ecc(errctl | ERRCTL_PE);
1670			back_to_back_c0_hazard();
1671			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1672
1673			/* probe L2 parity support */
1674			write_c0_ecc(errctl|ERRCTL_L2P);
1675			back_to_back_c0_hazard();
1676			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1677
1678			if (l1parity_present && l2parity_present) {
1679				if (l1parity)
1680					errctl |= ERRCTL_PE;
1681				if (l1parity ^ l2parity)
1682					errctl |= ERRCTL_L2P;
1683			} else if (l1parity_present) {
1684				if (l1parity)
1685					errctl |= ERRCTL_PE;
1686			} else if (l2parity_present) {
1687				if (l2parity)
1688					errctl |= ERRCTL_L2P;
1689			} else {
1690				/* No parity available */
1691			}
1692
1693			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1694
1695			write_c0_ecc(errctl);
1696			back_to_back_c0_hazard();
1697			errctl = read_c0_ecc();
1698			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1699
1700			if (l1parity_present)
1701				printk(KERN_INFO "Cache parity protection %sabled\n",
1702				       (errctl & ERRCTL_PE) ? "en" : "dis");
1703
1704			if (l2parity_present) {
1705				if (l1parity_present && l1parity)
1706					errctl ^= ERRCTL_L2P;
1707				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1708				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1709			}
1710		}
1711		break;
1712
1713	case CPU_5KC:
1714	case CPU_5KE:
1715	case CPU_LOONGSON1:
1716		write_c0_ecc(0x80000000);
1717		back_to_back_c0_hazard();
1718		/* Set the PE bit (bit 31) in the c0_errctl register. */
1719		printk(KERN_INFO "Cache parity protection %sabled\n",
1720		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1721		break;
1722	case CPU_20KC:
1723	case CPU_25KF:
1724		/* Clear the DE bit (bit 16) in the c0_status register. */
1725		printk(KERN_INFO "Enable cache parity protection for "
1726		       "MIPS 20KC/25KF CPUs.\n");
1727		clear_c0_status(ST0_DE);
1728		break;
1729	default:
1730		break;
1731	}
1732}
1733
1734asmlinkage void cache_parity_error(void)
1735{
1736	const int field = 2 * sizeof(unsigned long);
1737	unsigned int reg_val;
1738
1739	/* For the moment, report the problem and hang. */
1740	printk("Cache error exception:\n");
1741	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1742	reg_val = read_c0_cacheerr();
1743	printk("c0_cacheerr == %08x\n", reg_val);
1744
1745	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1746	       reg_val & (1<<30) ? "secondary" : "primary",
1747	       reg_val & (1<<31) ? "data" : "insn");
1748	if ((cpu_has_mips_r2_r6) &&
1749	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1750		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1751			reg_val & (1<<29) ? "ED " : "",
1752			reg_val & (1<<28) ? "ET " : "",
1753			reg_val & (1<<27) ? "ES " : "",
1754			reg_val & (1<<26) ? "EE " : "",
1755			reg_val & (1<<25) ? "EB " : "",
1756			reg_val & (1<<24) ? "EI " : "",
1757			reg_val & (1<<23) ? "E1 " : "",
1758			reg_val & (1<<22) ? "E0 " : "");
1759	} else {
1760		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1761			reg_val & (1<<29) ? "ED " : "",
1762			reg_val & (1<<28) ? "ET " : "",
1763			reg_val & (1<<26) ? "EE " : "",
1764			reg_val & (1<<25) ? "EB " : "",
1765			reg_val & (1<<24) ? "EI " : "",
1766			reg_val & (1<<23) ? "E1 " : "",
1767			reg_val & (1<<22) ? "E0 " : "");
1768	}
1769	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1770
1771#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1772	if (reg_val & (1<<22))
1773		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1774
1775	if (reg_val & (1<<23))
1776		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1777#endif
1778
1779	panic("Can't handle the cache error!");
1780}
1781
1782asmlinkage void do_ftlb(void)
1783{
1784	const int field = 2 * sizeof(unsigned long);
1785	unsigned int reg_val;
1786
1787	/* For the moment, report the problem and hang. */
1788	if ((cpu_has_mips_r2_r6) &&
1789	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1790	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1791		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1792		       read_c0_ecc());
1793		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1794		reg_val = read_c0_cacheerr();
1795		pr_err("c0_cacheerr == %08x\n", reg_val);
1796
1797		if ((reg_val & 0xc0000000) == 0xc0000000) {
1798			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1799		} else {
1800			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1801			       reg_val & (1<<30) ? "secondary" : "primary",
1802			       reg_val & (1<<31) ? "data" : "insn");
1803		}
1804	} else {
1805		pr_err("FTLB error exception\n");
1806	}
1807	/* Just print the cacheerr bits for now */
1808	cache_parity_error();
1809}
1810
1811/*
1812 * SDBBP EJTAG debug exception handler.
1813 * We skip the instruction and return to the next instruction.
1814 */
1815void ejtag_exception_handler(struct pt_regs *regs)
1816{
1817	const int field = 2 * sizeof(unsigned long);
1818	unsigned long depc, old_epc, old_ra;
1819	unsigned int debug;
1820
1821	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1822	depc = read_c0_depc();
1823	debug = read_c0_debug();
1824	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1825	if (debug & 0x80000000) {
1826		/*
1827		 * In branch delay slot.
1828		 * We cheat a little bit here and use EPC to calculate the
1829		 * debug return address (DEPC). EPC is restored after the
1830		 * calculation.
1831		 */
1832		old_epc = regs->cp0_epc;
1833		old_ra = regs->regs[31];
1834		regs->cp0_epc = depc;
1835		compute_return_epc(regs);
1836		depc = regs->cp0_epc;
1837		regs->cp0_epc = old_epc;
1838		regs->regs[31] = old_ra;
1839	} else
1840		depc += 4;
1841	write_c0_depc(depc);
1842
1843#if 0
1844	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1845	write_c0_debug(debug | 0x100);
1846#endif
1847}
1848
1849/*
1850 * NMI exception handler.
1851 * No lock; only written during early bootup by CPU 0.
1852 */
1853static RAW_NOTIFIER_HEAD(nmi_chain);
1854
1855int register_nmi_notifier(struct notifier_block *nb)
1856{
1857	return raw_notifier_chain_register(&nmi_chain, nb);
1858}
1859
1860void __noreturn nmi_exception_handler(struct pt_regs *regs)
1861{
1862	char str[100];
1863
1864	nmi_enter();
1865	raw_notifier_call_chain(&nmi_chain, 0, regs);
1866	bust_spinlocks(1);
1867	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1868		 smp_processor_id(), regs->cp0_epc);
1869	regs->cp0_epc = read_c0_errorepc();
1870	die(str, regs);
1871	nmi_exit();
1872}
1873
1874#define VECTORSPACING 0x100	/* for EI/VI mode */
1875
1876unsigned long ebase;
1877EXPORT_SYMBOL_GPL(ebase);
1878unsigned long exception_handlers[32];
1879unsigned long vi_handlers[64];
1880
1881void __init *set_except_vector(int n, void *addr)
1882{
1883	unsigned long handler = (unsigned long) addr;
1884	unsigned long old_handler;
1885
1886#ifdef CONFIG_CPU_MICROMIPS
1887	/*
1888	 * Only the TLB handlers are cache aligned with an even
1889	 * address. All other handlers are on an odd address and
1890	 * require no modification. Otherwise, MIPS32 mode will
1891	 * be entered when handling any TLB exceptions. That
1892	 * would be bad...since we must stay in microMIPS mode.
1893	 */
1894	if (!(handler & 0x1))
1895		handler |= 1;
1896#endif
1897	old_handler = xchg(&exception_handlers[n], handler);
1898
1899	if (n == 0 && cpu_has_divec) {
1900#ifdef CONFIG_CPU_MICROMIPS
1901		unsigned long jump_mask = ~((1 << 27) - 1);
1902#else
1903		unsigned long jump_mask = ~((1 << 28) - 1);
1904#endif
1905		u32 *buf = (u32 *)(ebase + 0x200);
1906		unsigned int k0 = 26;
1907		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1908			uasm_i_j(&buf, handler & ~jump_mask);
1909			uasm_i_nop(&buf);
1910		} else {
1911			UASM_i_LA(&buf, k0, handler);
1912			uasm_i_jr(&buf, k0);
1913			uasm_i_nop(&buf);
1914		}
1915		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1916	}
1917	return (void *)old_handler;
1918}
1919
1920static void do_default_vi(void)
1921{
1922	show_regs(get_irq_regs());
1923	panic("Caught unexpected vectored interrupt.");
1924}
1925
1926static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1927{
1928	unsigned long handler;
1929	unsigned long old_handler = vi_handlers[n];
1930	int srssets = current_cpu_data.srsets;
1931	u16 *h;
1932	unsigned char *b;
1933
1934	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1935
1936	if (addr == NULL) {
1937		handler = (unsigned long) do_default_vi;
1938		srs = 0;
1939	} else
1940		handler = (unsigned long) addr;
1941	vi_handlers[n] = handler;
1942
1943	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1944
1945	if (srs >= srssets)
1946		panic("Shadow register set %d not supported", srs);
1947
1948	if (cpu_has_veic) {
1949		if (board_bind_eic_interrupt)
1950			board_bind_eic_interrupt(n, srs);
1951	} else if (cpu_has_vint) {
1952		/* SRSMap is only defined if shadow sets are implemented */
1953		if (srssets > 1)
1954			change_c0_srsmap(0xf << n*4, srs << n*4);
1955	}
1956
1957	if (srs == 0) {
1958		/*
1959		 * If no shadow set is selected then use the default handler
1960		 * that does normal register saving and standard interrupt exit
1961		 */
1962		extern char except_vec_vi, except_vec_vi_lui;
1963		extern char except_vec_vi_ori, except_vec_vi_end;
1964		extern char rollback_except_vec_vi;
1965		char *vec_start = using_rollback_handler() ?
1966			&rollback_except_vec_vi : &except_vec_vi;
1967#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1968		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1969		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1970#else
1971		const int lui_offset = &except_vec_vi_lui - vec_start;
1972		const int ori_offset = &except_vec_vi_ori - vec_start;
1973#endif
1974		const int handler_len = &except_vec_vi_end - vec_start;
1975
1976		if (handler_len > VECTORSPACING) {
1977			/*
1978			 * Sigh... panicing won't help as the console
1979			 * is probably not configured :(
1980			 */
1981			panic("VECTORSPACING too small");
1982		}
1983
1984		set_handler(((unsigned long)b - ebase), vec_start,
1985#ifdef CONFIG_CPU_MICROMIPS
1986				(handler_len - 1));
1987#else
1988				handler_len);
1989#endif
1990		h = (u16 *)(b + lui_offset);
1991		*h = (handler >> 16) & 0xffff;
1992		h = (u16 *)(b + ori_offset);
1993		*h = (handler & 0xffff);
1994		local_flush_icache_range((unsigned long)b,
1995					 (unsigned long)(b+handler_len));
1996	}
1997	else {
1998		/*
1999		 * In other cases jump directly to the interrupt handler. It
2000		 * is the handler's responsibility to save registers if required
2001		 * (eg hi/lo) and return from the exception using "eret".
2002		 */
2003		u32 insn;
2004
2005		h = (u16 *)b;
2006		/* j handler */
2007#ifdef CONFIG_CPU_MICROMIPS
2008		insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2009#else
2010		insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2011#endif
2012		h[0] = (insn >> 16) & 0xffff;
2013		h[1] = insn & 0xffff;
2014		h[2] = 0;
2015		h[3] = 0;
2016		local_flush_icache_range((unsigned long)b,
2017					 (unsigned long)(b+8));
2018	}
2019
2020	return (void *)old_handler;
2021}
2022
2023void *set_vi_handler(int n, vi_handler_t addr)
2024{
2025	return set_vi_srs_handler(n, addr, 0);
2026}
2027
2028extern void tlb_init(void);
2029
2030/*
2031 * Timer interrupt
2032 */
2033int cp0_compare_irq;
2034EXPORT_SYMBOL_GPL(cp0_compare_irq);
2035int cp0_compare_irq_shift;
2036
2037/*
2038 * Performance counter IRQ or -1 if shared with timer
2039 */
2040int cp0_perfcount_irq;
2041EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2042
2043/*
2044 * Fast debug channel IRQ or -1 if not present
2045 */
2046int cp0_fdc_irq;
2047EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2048
2049static int noulri;
2050
2051static int __init ulri_disable(char *s)
2052{
2053	pr_info("Disabling ulri\n");
2054	noulri = 1;
2055
2056	return 1;
2057}
2058__setup("noulri", ulri_disable);
2059
2060/* configure STATUS register */
2061static void configure_status(void)
2062{
2063	/*
2064	 * Disable coprocessors and select 32-bit or 64-bit addressing
2065	 * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2066	 * flag that some firmware may have left set and the TS bit (for
2067	 * IP27).  Set XX for ISA IV code to work.
2068	 */
2069	unsigned int status_set = ST0_CU0;
2070#ifdef CONFIG_64BIT
2071	status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2072#endif
2073	if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2074		status_set |= ST0_XX;
2075	if (cpu_has_dsp)
2076		status_set |= ST0_MX;
2077
2078	change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2079			 status_set);
2080}
2081
2082unsigned int hwrena;
2083EXPORT_SYMBOL_GPL(hwrena);
2084
2085/* configure HWRENA register */
2086static void configure_hwrena(void)
2087{
2088	hwrena = cpu_hwrena_impl_bits;
2089
2090	if (cpu_has_mips_r2_r6)
2091		hwrena |= MIPS_HWRENA_CPUNUM |
2092			  MIPS_HWRENA_SYNCISTEP |
2093			  MIPS_HWRENA_CC |
2094			  MIPS_HWRENA_CCRES;
2095
2096	if (!noulri && cpu_has_userlocal)
2097		hwrena |= MIPS_HWRENA_ULR;
2098
2099	if (hwrena)
2100		write_c0_hwrena(hwrena);
2101}
2102
2103static void configure_exception_vector(void)
2104{
2105	if (cpu_has_veic || cpu_has_vint) {
2106		unsigned long sr = set_c0_status(ST0_BEV);
2107		/* If available, use WG to set top bits of EBASE */
2108		if (cpu_has_ebase_wg) {
2109#ifdef CONFIG_64BIT
2110			write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2111#else
2112			write_c0_ebase(ebase | MIPS_EBASE_WG);
2113#endif
2114		}
2115		write_c0_ebase(ebase);
2116		write_c0_status(sr);
 
 
2117		/* Setting vector spacing enables EI/VI mode  */
2118		change_c0_intctl(0x3e0, VECTORSPACING);
2119	}
2120	if (cpu_has_divec) {
2121		if (cpu_has_mipsmt) {
2122			unsigned int vpflags = dvpe();
2123			set_c0_cause(CAUSEF_IV);
2124			evpe(vpflags);
2125		} else
2126			set_c0_cause(CAUSEF_IV);
2127	}
2128}
2129
2130void per_cpu_trap_init(bool is_boot_cpu)
2131{
2132	unsigned int cpu = smp_processor_id();
2133
2134	configure_status();
2135	configure_hwrena();
2136
2137	configure_exception_vector();
2138
2139	/*
2140	 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2141	 *
2142	 *  o read IntCtl.IPTI to determine the timer interrupt
2143	 *  o read IntCtl.IPPCI to determine the performance counter interrupt
2144	 *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2145	 */
2146	if (cpu_has_mips_r2_r6) {
2147		/*
2148		 * We shouldn't trust a secondary core has a sane EBASE register
2149		 * so use the one calculated by the boot CPU.
2150		 */
2151		if (!is_boot_cpu) {
2152			/* If available, use WG to set top bits of EBASE */
2153			if (cpu_has_ebase_wg) {
2154#ifdef CONFIG_64BIT
2155				write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2156#else
2157				write_c0_ebase(ebase | MIPS_EBASE_WG);
2158#endif
2159			}
2160			write_c0_ebase(ebase);
2161		}
2162
2163		cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2164		cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2165		cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2166		cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2167		if (!cp0_fdc_irq)
2168			cp0_fdc_irq = -1;
2169
2170	} else {
2171		cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2172		cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2173		cp0_perfcount_irq = -1;
2174		cp0_fdc_irq = -1;
2175	}
2176
2177	if (!cpu_data[cpu].asid_cache)
 
 
2178		cpu_data[cpu].asid_cache = asid_first_version(cpu);
2179
2180	atomic_inc(&init_mm.mm_count);
2181	current->active_mm = &init_mm;
2182	BUG_ON(current->mm);
2183	enter_lazy_tlb(&init_mm, current);
2184
2185	/* Boot CPU's cache setup in setup_arch(). */
2186	if (!is_boot_cpu)
2187		cpu_cache_init();
2188	tlb_init();
2189	TLBMISS_HANDLER_SETUP();
2190}
2191
2192/* Install CPU exception handler */
2193void set_handler(unsigned long offset, void *addr, unsigned long size)
2194{
2195#ifdef CONFIG_CPU_MICROMIPS
2196	memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2197#else
2198	memcpy((void *)(ebase + offset), addr, size);
2199#endif
2200	local_flush_icache_range(ebase + offset, ebase + offset + size);
2201}
2202
2203static char panic_null_cerr[] =
2204	"Trying to set NULL cache error exception handler";
2205
2206/*
2207 * Install uncached CPU exception handler.
2208 * This is suitable only for the cache error exception which is the only
2209 * exception handler that is being run uncached.
2210 */
2211void set_uncached_handler(unsigned long offset, void *addr,
2212	unsigned long size)
2213{
2214	unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2215
2216	if (!addr)
2217		panic(panic_null_cerr);
2218
2219	memcpy((void *)(uncached_ebase + offset), addr, size);
2220}
2221
2222static int __initdata rdhwr_noopt;
2223static int __init set_rdhwr_noopt(char *str)
2224{
2225	rdhwr_noopt = 1;
2226	return 1;
2227}
2228
2229__setup("rdhwr_noopt", set_rdhwr_noopt);
2230
2231void __init trap_init(void)
2232{
2233	extern char except_vec3_generic;
2234	extern char except_vec4;
2235	extern char except_vec3_r4000;
2236	unsigned long i;
 
2237
2238	check_wait();
2239
2240	if (cpu_has_veic || cpu_has_vint) {
2241		unsigned long size = 0x200 + VECTORSPACING*64;
2242		phys_addr_t ebase_pa;
 
 
 
 
 
 
 
 
2243
2244		ebase = (unsigned long)
2245			__alloc_bootmem(size, 1 << fls(size), 0);
 
 
2246
2247		/*
2248		 * Try to ensure ebase resides in KSeg0 if possible.
2249		 *
2250		 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2251		 * hitting a poorly defined exception base for Cache Errors.
2252		 * The allocation is likely to be in the low 512MB of physical,
2253		 * in which case we should be able to convert to KSeg0.
2254		 *
2255		 * EVA is special though as it allows segments to be rearranged
2256		 * and to become uncached during cache error handling.
2257		 */
2258		ebase_pa = __pa(ebase);
2259		if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2260			ebase = CKSEG0ADDR(ebase_pa);
2261	} else {
2262		ebase = CAC_BASE;
2263
2264		if (cpu_has_mips_r2_r6) {
2265			if (cpu_has_ebase_wg) {
2266#ifdef CONFIG_64BIT
2267				ebase = (read_c0_ebase_64() & ~0xfff);
2268#else
2269				ebase = (read_c0_ebase() & ~0xfff);
2270#endif
2271			} else {
2272				ebase += (read_c0_ebase() & 0x3ffff000);
2273			}
2274		}
2275	}
2276
2277	if (cpu_has_mmips) {
2278		unsigned int config3 = read_c0_config3();
2279
2280		if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2281			write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2282		else
2283			write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2284	}
2285
2286	if (board_ebase_setup)
2287		board_ebase_setup();
2288	per_cpu_trap_init(true);
 
2289
2290	/*
2291	 * Copy the generic exception handlers to their final destination.
2292	 * This will be overridden later as suitable for a particular
2293	 * configuration.
2294	 */
2295	set_handler(0x180, &except_vec3_generic, 0x80);
2296
2297	/*
2298	 * Setup default vectors
2299	 */
2300	for (i = 0; i <= 31; i++)
2301		set_except_vector(i, handle_reserved);
2302
2303	/*
2304	 * Copy the EJTAG debug exception vector handler code to it's final
2305	 * destination.
2306	 */
2307	if (cpu_has_ejtag && board_ejtag_handler_setup)
2308		board_ejtag_handler_setup();
2309
2310	/*
2311	 * Only some CPUs have the watch exceptions.
2312	 */
2313	if (cpu_has_watch)
2314		set_except_vector(EXCCODE_WATCH, handle_watch);
2315
2316	/*
2317	 * Initialise interrupt handlers
2318	 */
2319	if (cpu_has_veic || cpu_has_vint) {
2320		int nvec = cpu_has_veic ? 64 : 8;
2321		for (i = 0; i < nvec; i++)
2322			set_vi_handler(i, NULL);
2323	}
2324	else if (cpu_has_divec)
2325		set_handler(0x200, &except_vec4, 0x8);
2326
2327	/*
2328	 * Some CPUs can enable/disable for cache parity detection, but does
2329	 * it different ways.
2330	 */
2331	parity_protection_init();
2332
2333	/*
2334	 * The Data Bus Errors / Instruction Bus Errors are signaled
2335	 * by external hardware.  Therefore these two exceptions
2336	 * may have board specific handlers.
2337	 */
2338	if (board_be_init)
2339		board_be_init();
2340
2341	set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2342					rollback_handle_int : handle_int);
2343	set_except_vector(EXCCODE_MOD, handle_tlbm);
2344	set_except_vector(EXCCODE_TLBL, handle_tlbl);
2345	set_except_vector(EXCCODE_TLBS, handle_tlbs);
2346
2347	set_except_vector(EXCCODE_ADEL, handle_adel);
2348	set_except_vector(EXCCODE_ADES, handle_ades);
2349
2350	set_except_vector(EXCCODE_IBE, handle_ibe);
2351	set_except_vector(EXCCODE_DBE, handle_dbe);
2352
2353	set_except_vector(EXCCODE_SYS, handle_sys);
2354	set_except_vector(EXCCODE_BP, handle_bp);
2355
2356	if (rdhwr_noopt)
2357		set_except_vector(EXCCODE_RI, handle_ri);
2358	else {
2359		if (cpu_has_vtag_icache)
2360			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2361		else if (current_cpu_type() == CPU_LOONGSON3)
2362			set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2363		else
2364			set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2365	}
2366
2367	set_except_vector(EXCCODE_CPU, handle_cpu);
2368	set_except_vector(EXCCODE_OV, handle_ov);
2369	set_except_vector(EXCCODE_TR, handle_tr);
2370	set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2371
2372	if (current_cpu_type() == CPU_R6000 ||
2373	    current_cpu_type() == CPU_R6000A) {
2374		/*
2375		 * The R6000 is the only R-series CPU that features a machine
2376		 * check exception (similar to the R4000 cache error) and
2377		 * unaligned ldc1/sdc1 exception.  The handlers have not been
2378		 * written yet.	 Well, anyway there is no R6000 machine on the
2379		 * current list of targets for Linux/MIPS.
2380		 * (Duh, crap, there is someone with a triple R6k machine)
2381		 */
2382		//set_except_vector(14, handle_mc);
2383		//set_except_vector(15, handle_ndc);
2384	}
2385
2386
2387	if (board_nmi_handler_setup)
2388		board_nmi_handler_setup();
2389
2390	if (cpu_has_fpu && !cpu_has_nofpuex)
2391		set_except_vector(EXCCODE_FPE, handle_fpe);
2392
2393	set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2394
2395	if (cpu_has_rixiex) {
2396		set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2397		set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2398	}
2399
2400	set_except_vector(EXCCODE_MSADIS, handle_msa);
2401	set_except_vector(EXCCODE_MDMX, handle_mdmx);
2402
2403	if (cpu_has_mcheck)
2404		set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2405
2406	if (cpu_has_mipsmt)
2407		set_except_vector(EXCCODE_THREAD, handle_mt);
2408
2409	set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2410
2411	if (board_cache_error_setup)
2412		board_cache_error_setup();
2413
2414	if (cpu_has_vce)
2415		/* Special exception: R4[04]00 uses also the divec space. */
2416		set_handler(0x180, &except_vec3_r4000, 0x100);
2417	else if (cpu_has_4kex)
2418		set_handler(0x180, &except_vec3_generic, 0x80);
2419	else
2420		set_handler(0x080, &except_vec3_generic, 0x80);
2421
2422	local_flush_icache_range(ebase, ebase + 0x400);
2423
2424	sort_extable(__start___dbe_table, __stop___dbe_table);
2425
2426	cu2_notifier(default_cu2_call, 0x80000000);	/* Run last  */
2427}
2428
2429static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2430			    void *v)
2431{
2432	switch (cmd) {
2433	case CPU_PM_ENTER_FAILED:
2434	case CPU_PM_EXIT:
2435		configure_status();
2436		configure_hwrena();
2437		configure_exception_vector();
2438
2439		/* Restore register with CPU number for TLB handlers */
2440		TLBMISS_HANDLER_RESTORE();
2441
2442		break;
2443	}
2444
2445	return NOTIFY_OK;
2446}
2447
2448static struct notifier_block trap_pm_notifier_block = {
2449	.notifier_call = trap_pm_notifier,
2450};
2451
2452static int __init trap_pm_init(void)
2453{
2454	return cpu_pm_register_notifier(&trap_pm_notifier_block);
2455}
2456arch_initcall(trap_pm_init);