Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
  92#include <asm/unaligned-emul.h>
  93#include <asm/mmu_context.h>
 
  94#include <linux/uaccess.h>
  95
  96#include "access-helper.h"
  97
  98enum {
  99	UNALIGNED_ACTION_QUIET,
 100	UNALIGNED_ACTION_SIGNAL,
 101	UNALIGNED_ACTION_SHOW,
 102};
 103#ifdef CONFIG_DEBUG_FS
 104static u32 unaligned_instructions;
 105static u32 unaligned_action;
 106#else
 107#define unaligned_action UNALIGNED_ACTION_QUIET
 108#endif
 109extern void show_registers(struct pt_regs *regs);
 110
 111static void emulate_load_store_insn(struct pt_regs *regs,
 112	void __user *addr, unsigned int *pc)
 113{
 114	unsigned long origpc, orig31, value;
 115	union mips_instruction insn;
 116	unsigned int res;
 117	bool user = user_mode(regs);
 118
 119	origpc = (unsigned long)pc;
 120	orig31 = regs->regs[31];
 121
 122	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 123
 124	/*
 125	 * This load never faults.
 126	 */
 127	__get_inst32(&insn.word, pc, user);
 128
 129	switch (insn.i_format.opcode) {
 130		/*
 131		 * These are instructions that a compiler doesn't generate.  We
 132		 * can assume therefore that the code is MIPS-aware and
 133		 * really buggy.  Emulating these instructions would break the
 134		 * semantics anyway.
 135		 */
 136	case ll_op:
 137	case lld_op:
 138	case sc_op:
 139	case scd_op:
 140
 141		/*
 142		 * For these instructions the only way to create an address
 143		 * error is an attempted access to kernel/supervisor address
 144		 * space.
 145		 */
 146	case ldl_op:
 147	case ldr_op:
 148	case lwl_op:
 149	case lwr_op:
 150	case sdl_op:
 151	case sdr_op:
 152	case swl_op:
 153	case swr_op:
 154	case lb_op:
 155	case lbu_op:
 156	case sb_op:
 157		goto sigbus;
 158
 159		/*
 160		 * The remaining opcodes are the ones that are really of
 161		 * interest.
 162		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 163	case spec3_op:
 164		if (insn.dsp_format.func == lx_op) {
 165			switch (insn.dsp_format.op) {
 166			case lwx_op:
 167				if (user && !access_ok(addr, 4))
 168					goto sigbus;
 169				LoadW(addr, value, res);
 170				if (res)
 171					goto fault;
 172				compute_return_epc(regs);
 173				regs->regs[insn.dsp_format.rd] = value;
 174				break;
 175			case lhx_op:
 176				if (user && !access_ok(addr, 2))
 177					goto sigbus;
 178				LoadHW(addr, value, res);
 179				if (res)
 180					goto fault;
 181				compute_return_epc(regs);
 182				regs->regs[insn.dsp_format.rd] = value;
 183				break;
 184			default:
 185				goto sigill;
 186			}
 187		}
 188#ifdef CONFIG_EVA
 189		else {
 190			/*
 191			 * we can land here only from kernel accessing user
 192			 * memory, so we need to "switch" the address limit to
 193			 * user space, so that address check can work properly.
 194			 */
 195			switch (insn.spec3_format.func) {
 196			case lhe_op:
 197				if (!access_ok(addr, 2))
 198					goto sigbus;
 199				LoadHWE(addr, value, res);
 200				if (res)
 201					goto fault;
 202				compute_return_epc(regs);
 203				regs->regs[insn.spec3_format.rt] = value;
 204				break;
 205			case lwe_op:
 206				if (!access_ok(addr, 4))
 207					goto sigbus;
 208				LoadWE(addr, value, res);
 209				if (res)
 210					goto fault;
 211				compute_return_epc(regs);
 212				regs->regs[insn.spec3_format.rt] = value;
 213				break;
 214			case lhue_op:
 215				if (!access_ok(addr, 2))
 216					goto sigbus;
 217				LoadHWUE(addr, value, res);
 218				if (res)
 219					goto fault;
 220				compute_return_epc(regs);
 221				regs->regs[insn.spec3_format.rt] = value;
 222				break;
 223			case she_op:
 224				if (!access_ok(addr, 2))
 225					goto sigbus;
 226				compute_return_epc(regs);
 227				value = regs->regs[insn.spec3_format.rt];
 228				StoreHWE(addr, value, res);
 229				if (res)
 230					goto fault;
 231				break;
 232			case swe_op:
 233				if (!access_ok(addr, 4))
 234					goto sigbus;
 235				compute_return_epc(regs);
 236				value = regs->regs[insn.spec3_format.rt];
 237				StoreWE(addr, value, res);
 238				if (res)
 239					goto fault;
 240				break;
 241			default:
 242				goto sigill;
 243			}
 244		}
 245#endif
 246		break;
 247	case lh_op:
 248		if (user && !access_ok(addr, 2))
 249			goto sigbus;
 250
 251		if (IS_ENABLED(CONFIG_EVA) && user)
 252			LoadHWE(addr, value, res);
 253		else
 254			LoadHW(addr, value, res);
 255
 256		if (res)
 257			goto fault;
 258		compute_return_epc(regs);
 259		regs->regs[insn.i_format.rt] = value;
 260		break;
 261
 262	case lw_op:
 263		if (user && !access_ok(addr, 4))
 264			goto sigbus;
 265
 266		if (IS_ENABLED(CONFIG_EVA) && user)
 267			LoadWE(addr, value, res);
 268		else
 269			LoadW(addr, value, res);
 270
 271		if (res)
 272			goto fault;
 273		compute_return_epc(regs);
 274		regs->regs[insn.i_format.rt] = value;
 275		break;
 276
 277	case lhu_op:
 278		if (user && !access_ok(addr, 2))
 279			goto sigbus;
 280
 281		if (IS_ENABLED(CONFIG_EVA) && user)
 282			LoadHWUE(addr, value, res);
 283		else
 284			LoadHWU(addr, value, res);
 285
 286		if (res)
 287			goto fault;
 288		compute_return_epc(regs);
 289		regs->regs[insn.i_format.rt] = value;
 290		break;
 291
 292	case lwu_op:
 293#ifdef CONFIG_64BIT
 294		/*
 295		 * A 32-bit kernel might be running on a 64-bit processor.  But
 296		 * if we're on a 32-bit processor and an i-cache incoherency
 297		 * or race makes us see a 64-bit instruction here the sdl/sdr
 298		 * would blow up, so for now we don't handle unaligned 64-bit
 299		 * instructions on 32-bit kernels.
 300		 */
 301		if (user && !access_ok(addr, 4))
 302			goto sigbus;
 303
 304		LoadWU(addr, value, res);
 305		if (res)
 306			goto fault;
 307		compute_return_epc(regs);
 308		regs->regs[insn.i_format.rt] = value;
 309		break;
 310#endif /* CONFIG_64BIT */
 311
 312		/* Cannot handle 64-bit instructions in 32-bit kernel */
 313		goto sigill;
 314
 315	case ld_op:
 316#ifdef CONFIG_64BIT
 317		/*
 318		 * A 32-bit kernel might be running on a 64-bit processor.  But
 319		 * if we're on a 32-bit processor and an i-cache incoherency
 320		 * or race makes us see a 64-bit instruction here the sdl/sdr
 321		 * would blow up, so for now we don't handle unaligned 64-bit
 322		 * instructions on 32-bit kernels.
 323		 */
 324		if (user && !access_ok(addr, 8))
 325			goto sigbus;
 326
 327		LoadDW(addr, value, res);
 328		if (res)
 329			goto fault;
 330		compute_return_epc(regs);
 331		regs->regs[insn.i_format.rt] = value;
 332		break;
 333#endif /* CONFIG_64BIT */
 334
 335		/* Cannot handle 64-bit instructions in 32-bit kernel */
 336		goto sigill;
 337
 338	case sh_op:
 339		if (user && !access_ok(addr, 2))
 340			goto sigbus;
 341
 342		compute_return_epc(regs);
 343		value = regs->regs[insn.i_format.rt];
 344
 345		if (IS_ENABLED(CONFIG_EVA) && user)
 346			StoreHWE(addr, value, res);
 347		else
 348			StoreHW(addr, value, res);
 349
 350		if (res)
 351			goto fault;
 352		break;
 353
 354	case sw_op:
 355		if (user && !access_ok(addr, 4))
 356			goto sigbus;
 357
 358		compute_return_epc(regs);
 359		value = regs->regs[insn.i_format.rt];
 360
 361		if (IS_ENABLED(CONFIG_EVA) && user)
 362			StoreWE(addr, value, res);
 363		else
 364			StoreW(addr, value, res);
 365
 366		if (res)
 367			goto fault;
 368		break;
 369
 370	case sd_op:
 371#ifdef CONFIG_64BIT
 372		/*
 373		 * A 32-bit kernel might be running on a 64-bit processor.  But
 374		 * if we're on a 32-bit processor and an i-cache incoherency
 375		 * or race makes us see a 64-bit instruction here the sdl/sdr
 376		 * would blow up, so for now we don't handle unaligned 64-bit
 377		 * instructions on 32-bit kernels.
 378		 */
 379		if (user && !access_ok(addr, 8))
 380			goto sigbus;
 381
 382		compute_return_epc(regs);
 383		value = regs->regs[insn.i_format.rt];
 384		StoreDW(addr, value, res);
 385		if (res)
 386			goto fault;
 387		break;
 388#endif /* CONFIG_64BIT */
 389
 390		/* Cannot handle 64-bit instructions in 32-bit kernel */
 391		goto sigill;
 392
 393#ifdef CONFIG_MIPS_FP_SUPPORT
 394
 395	case lwc1_op:
 396	case ldc1_op:
 397	case swc1_op:
 398	case sdc1_op:
 399	case cop1x_op: {
 400		void __user *fault_addr = NULL;
 401
 402		die_if_kernel("Unaligned FP access in kernel code", regs);
 403		BUG_ON(!used_math());
 404
 405		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 406					       &fault_addr);
 407		own_fpu(1);	/* Restore FPU state. */
 408
 409		/* Signal if something went wrong. */
 410		process_fpemu_return(res, fault_addr, 0);
 411
 412		if (res == 0)
 413			break;
 414		return;
 415	}
 416#endif /* CONFIG_MIPS_FP_SUPPORT */
 417
 418#ifdef CONFIG_CPU_HAS_MSA
 419
 420	case msa_op: {
 421		unsigned int wd, preempted;
 422		enum msa_2b_fmt df;
 423		union fpureg *fpr;
 424
 425		if (!cpu_has_msa)
 426			goto sigill;
 427
 428		/*
 429		 * If we've reached this point then userland should have taken
 430		 * the MSA disabled exception & initialised vector context at
 431		 * some point in the past.
 432		 */
 433		BUG_ON(!thread_msa_context_live());
 434
 435		df = insn.msa_mi10_format.df;
 436		wd = insn.msa_mi10_format.wd;
 437		fpr = &current->thread.fpu.fpr[wd];
 438
 439		switch (insn.msa_mi10_format.func) {
 440		case msa_ld_op:
 441			if (!access_ok(addr, sizeof(*fpr)))
 442				goto sigbus;
 443
 444			do {
 445				/*
 446				 * If we have live MSA context keep track of
 447				 * whether we get preempted in order to avoid
 448				 * the register context we load being clobbered
 449				 * by the live context as it's saved during
 450				 * preemption. If we don't have live context
 451				 * then it can't be saved to clobber the value
 452				 * we load.
 453				 */
 454				preempted = test_thread_flag(TIF_USEDMSA);
 455
 456				res = __copy_from_user_inatomic(fpr, addr,
 457								sizeof(*fpr));
 458				if (res)
 459					goto fault;
 460
 461				/*
 462				 * Update the hardware register if it is in use
 463				 * by the task in this quantum, in order to
 464				 * avoid having to save & restore the whole
 465				 * vector context.
 466				 */
 467				preempt_disable();
 468				if (test_thread_flag(TIF_USEDMSA)) {
 469					write_msa_wr(wd, fpr, df);
 470					preempted = 0;
 471				}
 472				preempt_enable();
 473			} while (preempted);
 474			break;
 475
 476		case msa_st_op:
 477			if (!access_ok(addr, sizeof(*fpr)))
 478				goto sigbus;
 479
 480			/*
 481			 * Update from the hardware register if it is in use by
 482			 * the task in this quantum, in order to avoid having to
 483			 * save & restore the whole vector context.
 484			 */
 485			preempt_disable();
 486			if (test_thread_flag(TIF_USEDMSA))
 487				read_msa_wr(wd, fpr, df);
 488			preempt_enable();
 489
 490			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
 491			if (res)
 492				goto fault;
 493			break;
 494
 495		default:
 496			goto sigbus;
 497		}
 498
 499		compute_return_epc(regs);
 500		break;
 501	}
 502#endif /* CONFIG_CPU_HAS_MSA */
 503
 504#ifndef CONFIG_CPU_MIPSR6
 505	/*
 506	 * COP2 is available to implementor for application specific use.
 507	 * It's up to applications to register a notifier chain and do
 508	 * whatever they have to do, including possible sending of signals.
 509	 *
 510	 * This instruction has been reallocated in Release 6
 511	 */
 512	case lwc2_op:
 513		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 514		break;
 515
 516	case ldc2_op:
 517		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
 518		break;
 519
 520	case swc2_op:
 521		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
 522		break;
 523
 524	case sdc2_op:
 525		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 526		break;
 527#endif
 528	default:
 529		/*
 530		 * Pheeee...  We encountered an yet unknown instruction or
 531		 * cache coherence problem.  Die sucker, die ...
 532		 */
 533		goto sigill;
 534	}
 535
 536#ifdef CONFIG_DEBUG_FS
 537	unaligned_instructions++;
 538#endif
 539
 540	return;
 541
 542fault:
 543	/* roll back jump/branch */
 544	regs->cp0_epc = origpc;
 545	regs->regs[31] = orig31;
 546	/* Did we have an exception handler installed? */
 547	if (fixup_exception(regs))
 548		return;
 549
 550	die_if_kernel("Unhandled kernel unaligned access", regs);
 551	force_sig(SIGSEGV);
 552
 553	return;
 554
 555sigbus:
 556	die_if_kernel("Unhandled kernel unaligned access", regs);
 557	force_sig(SIGBUS);
 558
 559	return;
 560
 561sigill:
 562	die_if_kernel
 563	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 564	force_sig(SIGILL);
 565}
 566
 567/* Recode table from 16-bit register notation to 32-bit GPR. */
 568const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
 569
 570/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
 571static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
 572
 573static void emulate_load_store_microMIPS(struct pt_regs *regs,
 574					 void __user *addr)
 575{
 576	unsigned long value;
 577	unsigned int res;
 578	int i;
 579	unsigned int reg = 0, rvar;
 580	unsigned long orig31;
 581	u16 __user *pc16;
 582	u16 halfword;
 583	unsigned int word;
 584	unsigned long origpc, contpc;
 585	union mips_instruction insn;
 586	struct mm_decoded_insn mminsn;
 587	bool user = user_mode(regs);
 588
 589	origpc = regs->cp0_epc;
 590	orig31 = regs->regs[31];
 591
 592	mminsn.micro_mips_mode = 1;
 593
 594	/*
 595	 * This load never faults.
 596	 */
 597	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
 598	__get_user(halfword, pc16);
 599	pc16++;
 600	contpc = regs->cp0_epc + 2;
 601	word = ((unsigned int)halfword << 16);
 602	mminsn.pc_inc = 2;
 603
 604	if (!mm_insn_16bit(halfword)) {
 605		__get_user(halfword, pc16);
 606		pc16++;
 607		contpc = regs->cp0_epc + 4;
 608		mminsn.pc_inc = 4;
 609		word |= halfword;
 610	}
 611	mminsn.insn = word;
 612
 613	if (get_user(halfword, pc16))
 614		goto fault;
 615	mminsn.next_pc_inc = 2;
 616	word = ((unsigned int)halfword << 16);
 617
 618	if (!mm_insn_16bit(halfword)) {
 619		pc16++;
 620		if (get_user(halfword, pc16))
 621			goto fault;
 622		mminsn.next_pc_inc = 4;
 623		word |= halfword;
 624	}
 625	mminsn.next_insn = word;
 626
 627	insn = (union mips_instruction)(mminsn.insn);
 628	if (mm_isBranchInstr(regs, mminsn, &contpc))
 629		insn = (union mips_instruction)(mminsn.next_insn);
 630
 631	/*  Parse instruction to find what to do */
 632
 633	switch (insn.mm_i_format.opcode) {
 634
 635	case mm_pool32a_op:
 636		switch (insn.mm_x_format.func) {
 637		case mm_lwxs_op:
 638			reg = insn.mm_x_format.rd;
 639			goto loadW;
 640		}
 641
 642		goto sigbus;
 643
 644	case mm_pool32b_op:
 645		switch (insn.mm_m_format.func) {
 646		case mm_lwp_func:
 647			reg = insn.mm_m_format.rd;
 648			if (reg == 31)
 649				goto sigbus;
 650
 651			if (user && !access_ok(addr, 8))
 652				goto sigbus;
 653
 654			LoadW(addr, value, res);
 655			if (res)
 656				goto fault;
 657			regs->regs[reg] = value;
 658			addr += 4;
 659			LoadW(addr, value, res);
 660			if (res)
 661				goto fault;
 662			regs->regs[reg + 1] = value;
 663			goto success;
 664
 665		case mm_swp_func:
 666			reg = insn.mm_m_format.rd;
 667			if (reg == 31)
 668				goto sigbus;
 669
 670			if (user && !access_ok(addr, 8))
 671				goto sigbus;
 672
 673			value = regs->regs[reg];
 674			StoreW(addr, value, res);
 675			if (res)
 676				goto fault;
 677			addr += 4;
 678			value = regs->regs[reg + 1];
 679			StoreW(addr, value, res);
 680			if (res)
 681				goto fault;
 682			goto success;
 683
 684		case mm_ldp_func:
 685#ifdef CONFIG_64BIT
 686			reg = insn.mm_m_format.rd;
 687			if (reg == 31)
 688				goto sigbus;
 689
 690			if (user && !access_ok(addr, 16))
 691				goto sigbus;
 692
 693			LoadDW(addr, value, res);
 694			if (res)
 695				goto fault;
 696			regs->regs[reg] = value;
 697			addr += 8;
 698			LoadDW(addr, value, res);
 699			if (res)
 700				goto fault;
 701			regs->regs[reg + 1] = value;
 702			goto success;
 703#endif /* CONFIG_64BIT */
 704
 705			goto sigill;
 706
 707		case mm_sdp_func:
 708#ifdef CONFIG_64BIT
 709			reg = insn.mm_m_format.rd;
 710			if (reg == 31)
 711				goto sigbus;
 712
 713			if (user && !access_ok(addr, 16))
 714				goto sigbus;
 715
 716			value = regs->regs[reg];
 717			StoreDW(addr, value, res);
 718			if (res)
 719				goto fault;
 720			addr += 8;
 721			value = regs->regs[reg + 1];
 722			StoreDW(addr, value, res);
 723			if (res)
 724				goto fault;
 725			goto success;
 726#endif /* CONFIG_64BIT */
 727
 728			goto sigill;
 729
 730		case mm_lwm32_func:
 731			reg = insn.mm_m_format.rd;
 732			rvar = reg & 0xf;
 733			if ((rvar > 9) || !reg)
 734				goto sigill;
 735			if (reg & 0x10) {
 736				if (user && !access_ok(addr, 4 * (rvar + 1)))
 737					goto sigbus;
 738			} else {
 739				if (user && !access_ok(addr, 4 * rvar))
 740					goto sigbus;
 741			}
 742			if (rvar == 9)
 743				rvar = 8;
 744			for (i = 16; rvar; rvar--, i++) {
 745				LoadW(addr, value, res);
 746				if (res)
 747					goto fault;
 748				addr += 4;
 749				regs->regs[i] = value;
 750			}
 751			if ((reg & 0xf) == 9) {
 752				LoadW(addr, value, res);
 753				if (res)
 754					goto fault;
 755				addr += 4;
 756				regs->regs[30] = value;
 757			}
 758			if (reg & 0x10) {
 759				LoadW(addr, value, res);
 760				if (res)
 761					goto fault;
 762				regs->regs[31] = value;
 763			}
 764			goto success;
 765
 766		case mm_swm32_func:
 767			reg = insn.mm_m_format.rd;
 768			rvar = reg & 0xf;
 769			if ((rvar > 9) || !reg)
 770				goto sigill;
 771			if (reg & 0x10) {
 772				if (user && !access_ok(addr, 4 * (rvar + 1)))
 773					goto sigbus;
 774			} else {
 775				if (user && !access_ok(addr, 4 * rvar))
 776					goto sigbus;
 777			}
 778			if (rvar == 9)
 779				rvar = 8;
 780			for (i = 16; rvar; rvar--, i++) {
 781				value = regs->regs[i];
 782				StoreW(addr, value, res);
 783				if (res)
 784					goto fault;
 785				addr += 4;
 786			}
 787			if ((reg & 0xf) == 9) {
 788				value = regs->regs[30];
 789				StoreW(addr, value, res);
 790				if (res)
 791					goto fault;
 792				addr += 4;
 793			}
 794			if (reg & 0x10) {
 795				value = regs->regs[31];
 796				StoreW(addr, value, res);
 797				if (res)
 798					goto fault;
 799			}
 800			goto success;
 801
 802		case mm_ldm_func:
 803#ifdef CONFIG_64BIT
 804			reg = insn.mm_m_format.rd;
 805			rvar = reg & 0xf;
 806			if ((rvar > 9) || !reg)
 807				goto sigill;
 808			if (reg & 0x10) {
 809				if (user && !access_ok(addr, 8 * (rvar + 1)))
 810					goto sigbus;
 811			} else {
 812				if (user && !access_ok(addr, 8 * rvar))
 813					goto sigbus;
 814			}
 815			if (rvar == 9)
 816				rvar = 8;
 817
 818			for (i = 16; rvar; rvar--, i++) {
 819				LoadDW(addr, value, res);
 820				if (res)
 821					goto fault;
 822				addr += 4;
 823				regs->regs[i] = value;
 824			}
 825			if ((reg & 0xf) == 9) {
 826				LoadDW(addr, value, res);
 827				if (res)
 828					goto fault;
 829				addr += 8;
 830				regs->regs[30] = value;
 831			}
 832			if (reg & 0x10) {
 833				LoadDW(addr, value, res);
 834				if (res)
 835					goto fault;
 836				regs->regs[31] = value;
 837			}
 838			goto success;
 839#endif /* CONFIG_64BIT */
 840
 841			goto sigill;
 842
 843		case mm_sdm_func:
 844#ifdef CONFIG_64BIT
 845			reg = insn.mm_m_format.rd;
 846			rvar = reg & 0xf;
 847			if ((rvar > 9) || !reg)
 848				goto sigill;
 849			if (reg & 0x10) {
 850				if (user && !access_ok(addr, 8 * (rvar + 1)))
 851					goto sigbus;
 852			} else {
 853				if (user && !access_ok(addr, 8 * rvar))
 854					goto sigbus;
 855			}
 856			if (rvar == 9)
 857				rvar = 8;
 858
 859			for (i = 16; rvar; rvar--, i++) {
 860				value = regs->regs[i];
 861				StoreDW(addr, value, res);
 862				if (res)
 863					goto fault;
 864				addr += 8;
 865			}
 866			if ((reg & 0xf) == 9) {
 867				value = regs->regs[30];
 868				StoreDW(addr, value, res);
 869				if (res)
 870					goto fault;
 871				addr += 8;
 872			}
 873			if (reg & 0x10) {
 874				value = regs->regs[31];
 875				StoreDW(addr, value, res);
 876				if (res)
 877					goto fault;
 878			}
 879			goto success;
 880#endif /* CONFIG_64BIT */
 881
 882			goto sigill;
 883
 884			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
 885		}
 886
 887		goto sigbus;
 888
 889	case mm_pool32c_op:
 890		switch (insn.mm_m_format.func) {
 891		case mm_lwu_func:
 892			reg = insn.mm_m_format.rd;
 893			goto loadWU;
 894		}
 895
 896		/*  LL,SC,LLD,SCD are not serviced */
 897		goto sigbus;
 898
 899#ifdef CONFIG_MIPS_FP_SUPPORT
 900	case mm_pool32f_op:
 901		switch (insn.mm_x_format.func) {
 902		case mm_lwxc1_func:
 903		case mm_swxc1_func:
 904		case mm_ldxc1_func:
 905		case mm_sdxc1_func:
 906			goto fpu_emul;
 907		}
 908
 909		goto sigbus;
 910
 911	case mm_ldc132_op:
 912	case mm_sdc132_op:
 913	case mm_lwc132_op:
 914	case mm_swc132_op: {
 915		void __user *fault_addr = NULL;
 916
 917fpu_emul:
 918		/* roll back jump/branch */
 919		regs->cp0_epc = origpc;
 920		regs->regs[31] = orig31;
 921
 922		die_if_kernel("Unaligned FP access in kernel code", regs);
 923		BUG_ON(!used_math());
 924		BUG_ON(!is_fpu_owner());
 925
 926		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 927					       &fault_addr);
 928		own_fpu(1);	/* restore FPU state */
 929
 930		/* If something went wrong, signal */
 931		process_fpemu_return(res, fault_addr, 0);
 932
 933		if (res == 0)
 934			goto success;
 935		return;
 936	}
 937#endif /* CONFIG_MIPS_FP_SUPPORT */
 938
 939	case mm_lh32_op:
 940		reg = insn.mm_i_format.rt;
 941		goto loadHW;
 942
 943	case mm_lhu32_op:
 944		reg = insn.mm_i_format.rt;
 945		goto loadHWU;
 946
 947	case mm_lw32_op:
 948		reg = insn.mm_i_format.rt;
 949		goto loadW;
 950
 951	case mm_sh32_op:
 952		reg = insn.mm_i_format.rt;
 953		goto storeHW;
 954
 955	case mm_sw32_op:
 956		reg = insn.mm_i_format.rt;
 957		goto storeW;
 958
 959	case mm_ld32_op:
 960		reg = insn.mm_i_format.rt;
 961		goto loadDW;
 962
 963	case mm_sd32_op:
 964		reg = insn.mm_i_format.rt;
 965		goto storeDW;
 966
 967	case mm_pool16c_op:
 968		switch (insn.mm16_m_format.func) {
 969		case mm_lwm16_op:
 970			reg = insn.mm16_m_format.rlist;
 971			rvar = reg + 1;
 972			if (user && !access_ok(addr, 4 * rvar))
 973				goto sigbus;
 974
 975			for (i = 16; rvar; rvar--, i++) {
 976				LoadW(addr, value, res);
 977				if (res)
 978					goto fault;
 979				addr += 4;
 980				regs->regs[i] = value;
 981			}
 982			LoadW(addr, value, res);
 983			if (res)
 984				goto fault;
 985			regs->regs[31] = value;
 986
 987			goto success;
 988
 989		case mm_swm16_op:
 990			reg = insn.mm16_m_format.rlist;
 991			rvar = reg + 1;
 992			if (user && !access_ok(addr, 4 * rvar))
 993				goto sigbus;
 994
 995			for (i = 16; rvar; rvar--, i++) {
 996				value = regs->regs[i];
 997				StoreW(addr, value, res);
 998				if (res)
 999					goto fault;
1000				addr += 4;
1001			}
1002			value = regs->regs[31];
1003			StoreW(addr, value, res);
1004			if (res)
1005				goto fault;
1006
1007			goto success;
1008
1009		}
1010
1011		goto sigbus;
1012
1013	case mm_lhu16_op:
1014		reg = reg16to32[insn.mm16_rb_format.rt];
1015		goto loadHWU;
1016
1017	case mm_lw16_op:
1018		reg = reg16to32[insn.mm16_rb_format.rt];
1019		goto loadW;
1020
1021	case mm_sh16_op:
1022		reg = reg16to32st[insn.mm16_rb_format.rt];
1023		goto storeHW;
1024
1025	case mm_sw16_op:
1026		reg = reg16to32st[insn.mm16_rb_format.rt];
1027		goto storeW;
1028
1029	case mm_lwsp16_op:
1030		reg = insn.mm16_r5_format.rt;
1031		goto loadW;
1032
1033	case mm_swsp16_op:
1034		reg = insn.mm16_r5_format.rt;
1035		goto storeW;
1036
1037	case mm_lwgp16_op:
1038		reg = reg16to32[insn.mm16_r3_format.rt];
1039		goto loadW;
1040
1041	default:
1042		goto sigill;
1043	}
1044
1045loadHW:
1046	if (user && !access_ok(addr, 2))
1047		goto sigbus;
1048
1049	LoadHW(addr, value, res);
1050	if (res)
1051		goto fault;
1052	regs->regs[reg] = value;
1053	goto success;
1054
1055loadHWU:
1056	if (user && !access_ok(addr, 2))
1057		goto sigbus;
1058
1059	LoadHWU(addr, value, res);
1060	if (res)
1061		goto fault;
1062	regs->regs[reg] = value;
1063	goto success;
1064
1065loadW:
1066	if (user && !access_ok(addr, 4))
1067		goto sigbus;
1068
1069	LoadW(addr, value, res);
1070	if (res)
1071		goto fault;
1072	regs->regs[reg] = value;
1073	goto success;
1074
1075loadWU:
1076#ifdef CONFIG_64BIT
1077	/*
1078	 * A 32-bit kernel might be running on a 64-bit processor.  But
1079	 * if we're on a 32-bit processor and an i-cache incoherency
1080	 * or race makes us see a 64-bit instruction here the sdl/sdr
1081	 * would blow up, so for now we don't handle unaligned 64-bit
1082	 * instructions on 32-bit kernels.
1083	 */
1084	if (user && !access_ok(addr, 4))
1085		goto sigbus;
1086
1087	LoadWU(addr, value, res);
1088	if (res)
1089		goto fault;
1090	regs->regs[reg] = value;
1091	goto success;
1092#endif /* CONFIG_64BIT */
1093
1094	/* Cannot handle 64-bit instructions in 32-bit kernel */
1095	goto sigill;
1096
1097loadDW:
1098#ifdef CONFIG_64BIT
1099	/*
1100	 * A 32-bit kernel might be running on a 64-bit processor.  But
1101	 * if we're on a 32-bit processor and an i-cache incoherency
1102	 * or race makes us see a 64-bit instruction here the sdl/sdr
1103	 * would blow up, so for now we don't handle unaligned 64-bit
1104	 * instructions on 32-bit kernels.
1105	 */
1106	if (user && !access_ok(addr, 8))
1107		goto sigbus;
1108
1109	LoadDW(addr, value, res);
1110	if (res)
1111		goto fault;
1112	regs->regs[reg] = value;
1113	goto success;
1114#endif /* CONFIG_64BIT */
1115
1116	/* Cannot handle 64-bit instructions in 32-bit kernel */
1117	goto sigill;
1118
1119storeHW:
1120	if (user && !access_ok(addr, 2))
1121		goto sigbus;
1122
1123	value = regs->regs[reg];
1124	StoreHW(addr, value, res);
1125	if (res)
1126		goto fault;
1127	goto success;
1128
1129storeW:
1130	if (user && !access_ok(addr, 4))
1131		goto sigbus;
1132
1133	value = regs->regs[reg];
1134	StoreW(addr, value, res);
1135	if (res)
1136		goto fault;
1137	goto success;
1138
1139storeDW:
1140#ifdef CONFIG_64BIT
1141	/*
1142	 * A 32-bit kernel might be running on a 64-bit processor.  But
1143	 * if we're on a 32-bit processor and an i-cache incoherency
1144	 * or race makes us see a 64-bit instruction here the sdl/sdr
1145	 * would blow up, so for now we don't handle unaligned 64-bit
1146	 * instructions on 32-bit kernels.
1147	 */
1148	if (user && !access_ok(addr, 8))
1149		goto sigbus;
1150
1151	value = regs->regs[reg];
1152	StoreDW(addr, value, res);
1153	if (res)
1154		goto fault;
1155	goto success;
1156#endif /* CONFIG_64BIT */
1157
1158	/* Cannot handle 64-bit instructions in 32-bit kernel */
1159	goto sigill;
1160
1161success:
1162	regs->cp0_epc = contpc;	/* advance or branch */
1163
1164#ifdef CONFIG_DEBUG_FS
1165	unaligned_instructions++;
1166#endif
1167	return;
1168
1169fault:
1170	/* roll back jump/branch */
1171	regs->cp0_epc = origpc;
1172	regs->regs[31] = orig31;
1173	/* Did we have an exception handler installed? */
1174	if (fixup_exception(regs))
1175		return;
1176
1177	die_if_kernel("Unhandled kernel unaligned access", regs);
1178	force_sig(SIGSEGV);
1179
1180	return;
1181
1182sigbus:
1183	die_if_kernel("Unhandled kernel unaligned access", regs);
1184	force_sig(SIGBUS);
1185
1186	return;
1187
1188sigill:
1189	die_if_kernel
1190	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1191	force_sig(SIGILL);
1192}
1193
1194static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1195{
1196	unsigned long value;
1197	unsigned int res;
1198	int reg;
1199	unsigned long orig31;
1200	u16 __user *pc16;
1201	unsigned long origpc;
1202	union mips16e_instruction mips16inst, oldinst;
1203	unsigned int opcode;
1204	int extended = 0;
1205	bool user = user_mode(regs);
1206
1207	origpc = regs->cp0_epc;
1208	orig31 = regs->regs[31];
1209	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1210	/*
1211	 * This load never faults.
1212	 */
1213	__get_user(mips16inst.full, pc16);
1214	oldinst = mips16inst;
1215
1216	/* skip EXTEND instruction */
1217	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1218		extended = 1;
1219		pc16++;
1220		__get_user(mips16inst.full, pc16);
1221	} else if (delay_slot(regs)) {
1222		/*  skip jump instructions */
1223		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1224		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1225			pc16++;
1226		pc16++;
1227		if (get_user(mips16inst.full, pc16))
1228			goto sigbus;
1229	}
1230
1231	opcode = mips16inst.ri.opcode;
1232	switch (opcode) {
1233	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1234		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1235		case MIPS16e_ldpc_func:
1236		case MIPS16e_ldsp_func:
1237			reg = reg16to32[mips16inst.ri64.ry];
1238			goto loadDW;
1239
1240		case MIPS16e_sdsp_func:
1241			reg = reg16to32[mips16inst.ri64.ry];
1242			goto writeDW;
1243
1244		case MIPS16e_sdrasp_func:
1245			reg = 29;	/* GPRSP */
1246			goto writeDW;
1247		}
1248
1249		goto sigbus;
1250
1251	case MIPS16e_swsp_op:
1252		reg = reg16to32[mips16inst.ri.rx];
1253		if (extended && cpu_has_mips16e2)
1254			switch (mips16inst.ri.imm >> 5) {
1255			case 0:		/* SWSP */
1256			case 1:		/* SWGP */
1257				break;
1258			case 2:		/* SHGP */
1259				opcode = MIPS16e_sh_op;
1260				break;
1261			default:
1262				goto sigbus;
1263			}
1264		break;
1265
1266	case MIPS16e_lwpc_op:
1267		reg = reg16to32[mips16inst.ri.rx];
1268		break;
1269
1270	case MIPS16e_lwsp_op:
1271		reg = reg16to32[mips16inst.ri.rx];
1272		if (extended && cpu_has_mips16e2)
1273			switch (mips16inst.ri.imm >> 5) {
1274			case 0:		/* LWSP */
1275			case 1:		/* LWGP */
1276				break;
1277			case 2:		/* LHGP */
1278				opcode = MIPS16e_lh_op;
1279				break;
1280			case 4:		/* LHUGP */
1281				opcode = MIPS16e_lhu_op;
1282				break;
1283			default:
1284				goto sigbus;
1285			}
1286		break;
1287
1288	case MIPS16e_i8_op:
1289		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1290			goto sigbus;
1291		reg = 29;	/* GPRSP */
1292		break;
1293
1294	default:
1295		reg = reg16to32[mips16inst.rri.ry];
1296		break;
1297	}
1298
1299	switch (opcode) {
1300
1301	case MIPS16e_lb_op:
1302	case MIPS16e_lbu_op:
1303	case MIPS16e_sb_op:
1304		goto sigbus;
1305
1306	case MIPS16e_lh_op:
1307		if (user && !access_ok(addr, 2))
1308			goto sigbus;
1309
1310		LoadHW(addr, value, res);
1311		if (res)
1312			goto fault;
1313		MIPS16e_compute_return_epc(regs, &oldinst);
1314		regs->regs[reg] = value;
1315		break;
1316
1317	case MIPS16e_lhu_op:
1318		if (user && !access_ok(addr, 2))
1319			goto sigbus;
1320
1321		LoadHWU(addr, value, res);
1322		if (res)
1323			goto fault;
1324		MIPS16e_compute_return_epc(regs, &oldinst);
1325		regs->regs[reg] = value;
1326		break;
1327
1328	case MIPS16e_lw_op:
1329	case MIPS16e_lwpc_op:
1330	case MIPS16e_lwsp_op:
1331		if (user && !access_ok(addr, 4))
1332			goto sigbus;
1333
1334		LoadW(addr, value, res);
1335		if (res)
1336			goto fault;
1337		MIPS16e_compute_return_epc(regs, &oldinst);
1338		regs->regs[reg] = value;
1339		break;
1340
1341	case MIPS16e_lwu_op:
1342#ifdef CONFIG_64BIT
1343		/*
1344		 * A 32-bit kernel might be running on a 64-bit processor.  But
1345		 * if we're on a 32-bit processor and an i-cache incoherency
1346		 * or race makes us see a 64-bit instruction here the sdl/sdr
1347		 * would blow up, so for now we don't handle unaligned 64-bit
1348		 * instructions on 32-bit kernels.
1349		 */
1350		if (user && !access_ok(addr, 4))
1351			goto sigbus;
1352
1353		LoadWU(addr, value, res);
1354		if (res)
1355			goto fault;
1356		MIPS16e_compute_return_epc(regs, &oldinst);
1357		regs->regs[reg] = value;
1358		break;
1359#endif /* CONFIG_64BIT */
1360
1361		/* Cannot handle 64-bit instructions in 32-bit kernel */
1362		goto sigill;
1363
1364	case MIPS16e_ld_op:
1365loadDW:
1366#ifdef CONFIG_64BIT
1367		/*
1368		 * A 32-bit kernel might be running on a 64-bit processor.  But
1369		 * if we're on a 32-bit processor and an i-cache incoherency
1370		 * or race makes us see a 64-bit instruction here the sdl/sdr
1371		 * would blow up, so for now we don't handle unaligned 64-bit
1372		 * instructions on 32-bit kernels.
1373		 */
1374		if (user && !access_ok(addr, 8))
1375			goto sigbus;
1376
1377		LoadDW(addr, value, res);
1378		if (res)
1379			goto fault;
1380		MIPS16e_compute_return_epc(regs, &oldinst);
1381		regs->regs[reg] = value;
1382		break;
1383#endif /* CONFIG_64BIT */
1384
1385		/* Cannot handle 64-bit instructions in 32-bit kernel */
1386		goto sigill;
1387
1388	case MIPS16e_sh_op:
1389		if (user && !access_ok(addr, 2))
1390			goto sigbus;
1391
1392		MIPS16e_compute_return_epc(regs, &oldinst);
1393		value = regs->regs[reg];
1394		StoreHW(addr, value, res);
1395		if (res)
1396			goto fault;
1397		break;
1398
1399	case MIPS16e_sw_op:
1400	case MIPS16e_swsp_op:
1401	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1402		if (user && !access_ok(addr, 4))
1403			goto sigbus;
1404
1405		MIPS16e_compute_return_epc(regs, &oldinst);
1406		value = regs->regs[reg];
1407		StoreW(addr, value, res);
1408		if (res)
1409			goto fault;
1410		break;
1411
1412	case MIPS16e_sd_op:
1413writeDW:
1414#ifdef CONFIG_64BIT
1415		/*
1416		 * A 32-bit kernel might be running on a 64-bit processor.  But
1417		 * if we're on a 32-bit processor and an i-cache incoherency
1418		 * or race makes us see a 64-bit instruction here the sdl/sdr
1419		 * would blow up, so for now we don't handle unaligned 64-bit
1420		 * instructions on 32-bit kernels.
1421		 */
1422		if (user && !access_ok(addr, 8))
1423			goto sigbus;
1424
1425		MIPS16e_compute_return_epc(regs, &oldinst);
1426		value = regs->regs[reg];
1427		StoreDW(addr, value, res);
1428		if (res)
1429			goto fault;
1430		break;
1431#endif /* CONFIG_64BIT */
1432
1433		/* Cannot handle 64-bit instructions in 32-bit kernel */
1434		goto sigill;
1435
1436	default:
1437		/*
1438		 * Pheeee...  We encountered an yet unknown instruction or
1439		 * cache coherence problem.  Die sucker, die ...
1440		 */
1441		goto sigill;
1442	}
1443
1444#ifdef CONFIG_DEBUG_FS
1445	unaligned_instructions++;
1446#endif
1447
1448	return;
1449
1450fault:
1451	/* roll back jump/branch */
1452	regs->cp0_epc = origpc;
1453	regs->regs[31] = orig31;
1454	/* Did we have an exception handler installed? */
1455	if (fixup_exception(regs))
1456		return;
1457
1458	die_if_kernel("Unhandled kernel unaligned access", regs);
1459	force_sig(SIGSEGV);
1460
1461	return;
1462
1463sigbus:
1464	die_if_kernel("Unhandled kernel unaligned access", regs);
1465	force_sig(SIGBUS);
1466
1467	return;
1468
1469sigill:
1470	die_if_kernel
1471	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1472	force_sig(SIGILL);
1473}
1474
1475asmlinkage void do_ade(struct pt_regs *regs)
1476{
1477	enum ctx_state prev_state;
1478	unsigned int *pc;
1479
1480	prev_state = exception_enter();
1481	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1482			1, regs, regs->cp0_badvaddr);
1483
1484#ifdef CONFIG_64BIT
1485	/*
1486	 * check, if we are hitting space between CPU implemented maximum
1487	 * virtual user address and 64bit maximum virtual user address
1488	 * and do exception handling to get EFAULTs for get_user/put_user
1489	 */
1490	if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1491	    (regs->cp0_badvaddr < XKSSEG)) {
1492		if (fixup_exception(regs)) {
1493			current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1494			return;
1495		}
1496		goto sigbus;
1497	}
1498#endif
1499
1500	/*
1501	 * Did we catch a fault trying to load an instruction?
1502	 */
1503	if (regs->cp0_badvaddr == regs->cp0_epc)
1504		goto sigbus;
1505
1506	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1507		goto sigbus;
1508	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1509		goto sigbus;
1510
1511	/*
1512	 * Do branch emulation only if we didn't forward the exception.
1513	 * This is all so but ugly ...
1514	 */
1515
1516	/*
1517	 * Are we running in microMIPS mode?
1518	 */
1519	if (get_isa16_mode(regs->cp0_epc)) {
1520		/*
1521		 * Did we catch a fault trying to load an instruction in
1522		 * 16-bit mode?
1523		 */
1524		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1525			goto sigbus;
1526		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1527			show_registers(regs);
1528
1529		if (cpu_has_mmips) {
1530			emulate_load_store_microMIPS(regs,
1531				(void __user *)regs->cp0_badvaddr);
1532			return;
1533		}
1534
1535		if (cpu_has_mips16) {
1536			emulate_load_store_MIPS16e(regs,
1537				(void __user *)regs->cp0_badvaddr);
1538			return;
1539		}
1540
1541		goto sigbus;
1542	}
1543
1544	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1545		show_registers(regs);
1546	pc = (unsigned int *)exception_epc(regs);
1547
1548	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1549
1550	return;
1551
1552sigbus:
1553	die_if_kernel("Kernel unaligned instruction access", regs);
1554	force_sig(SIGBUS);
1555
1556	/*
1557	 * XXX On return from the signal handler we should advance the epc
1558	 */
1559	exception_exit(prev_state);
1560}
1561
1562#ifdef CONFIG_DEBUG_FS
1563static int __init debugfs_unaligned(void)
1564{
1565	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1566			   &unaligned_instructions);
1567	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1568			   mips_debugfs_dir, &unaligned_action);
1569	return 0;
1570}
1571arch_initcall(debugfs_unaligned);
1572#endif
v6.8
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
  92#include <asm/unaligned-emul.h>
  93#include <asm/mmu_context.h>
  94#include <asm/traps.h>
  95#include <linux/uaccess.h>
  96
  97#include "access-helper.h"
  98
  99enum {
 100	UNALIGNED_ACTION_QUIET,
 101	UNALIGNED_ACTION_SIGNAL,
 102	UNALIGNED_ACTION_SHOW,
 103};
 104#ifdef CONFIG_DEBUG_FS
 105static u32 unaligned_instructions;
 106static u32 unaligned_action;
 107#else
 108#define unaligned_action UNALIGNED_ACTION_QUIET
 109#endif
 110extern void show_registers(struct pt_regs *regs);
 111
 112static void emulate_load_store_insn(struct pt_regs *regs,
 113	void __user *addr, unsigned int *pc)
 114{
 115	unsigned long origpc, orig31, value;
 116	union mips_instruction insn;
 117	unsigned int res;
 118	bool user = user_mode(regs);
 119
 120	origpc = (unsigned long)pc;
 121	orig31 = regs->regs[31];
 122
 123	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 124
 125	/*
 126	 * This load never faults.
 127	 */
 128	__get_inst32(&insn.word, pc, user);
 129
 130	switch (insn.i_format.opcode) {
 131		/*
 132		 * These are instructions that a compiler doesn't generate.  We
 133		 * can assume therefore that the code is MIPS-aware and
 134		 * really buggy.  Emulating these instructions would break the
 135		 * semantics anyway.
 136		 */
 137	case ll_op:
 138	case lld_op:
 139	case sc_op:
 140	case scd_op:
 141
 142		/*
 143		 * For these instructions the only way to create an address
 144		 * error is an attempted access to kernel/supervisor address
 145		 * space.
 146		 */
 147	case ldl_op:
 148	case ldr_op:
 149	case lwl_op:
 150	case lwr_op:
 151	case sdl_op:
 152	case sdr_op:
 153	case swl_op:
 154	case swr_op:
 155	case lb_op:
 156	case lbu_op:
 157	case sb_op:
 158		goto sigbus;
 159
 160		/*
 161		 * The remaining opcodes are the ones that are really of
 162		 * interest.
 163		 */
 164#ifdef CONFIG_MACH_INGENIC
 165	case spec2_op:
 166		if (insn.mxu_lx_format.func != mxu_lx_op)
 167			goto sigbus; /* other MXU instructions we don't care */
 168
 169		switch (insn.mxu_lx_format.op) {
 170		case mxu_lxw_op:
 171			if (user && !access_ok(addr, 4))
 172				goto sigbus;
 173			LoadW(addr, value, res);
 174			if (res)
 175				goto fault;
 176			compute_return_epc(regs);
 177			regs->regs[insn.mxu_lx_format.rd] = value;
 178			break;
 179		case mxu_lxh_op:
 180			if (user && !access_ok(addr, 2))
 181				goto sigbus;
 182			LoadHW(addr, value, res);
 183			if (res)
 184				goto fault;
 185			compute_return_epc(regs);
 186			regs->regs[insn.dsp_format.rd] = value;
 187			break;
 188		case mxu_lxhu_op:
 189			if (user && !access_ok(addr, 2))
 190				goto sigbus;
 191			LoadHWU(addr, value, res);
 192			if (res)
 193				goto fault;
 194			compute_return_epc(regs);
 195			regs->regs[insn.dsp_format.rd] = value;
 196			break;
 197		case mxu_lxb_op:
 198		case mxu_lxbu_op:
 199			goto sigbus;
 200		default:
 201			goto sigill;
 202		}
 203		break;
 204#endif
 205	case spec3_op:
 206		if (insn.dsp_format.func == lx_op) {
 207			switch (insn.dsp_format.op) {
 208			case lwx_op:
 209				if (user && !access_ok(addr, 4))
 210					goto sigbus;
 211				LoadW(addr, value, res);
 212				if (res)
 213					goto fault;
 214				compute_return_epc(regs);
 215				regs->regs[insn.dsp_format.rd] = value;
 216				break;
 217			case lhx_op:
 218				if (user && !access_ok(addr, 2))
 219					goto sigbus;
 220				LoadHW(addr, value, res);
 221				if (res)
 222					goto fault;
 223				compute_return_epc(regs);
 224				regs->regs[insn.dsp_format.rd] = value;
 225				break;
 226			default:
 227				goto sigill;
 228			}
 229		}
 230#ifdef CONFIG_EVA
 231		else {
 232			/*
 233			 * we can land here only from kernel accessing user
 234			 * memory, so we need to "switch" the address limit to
 235			 * user space, so that address check can work properly.
 236			 */
 237			switch (insn.spec3_format.func) {
 238			case lhe_op:
 239				if (!access_ok(addr, 2))
 240					goto sigbus;
 241				LoadHWE(addr, value, res);
 242				if (res)
 243					goto fault;
 244				compute_return_epc(regs);
 245				regs->regs[insn.spec3_format.rt] = value;
 246				break;
 247			case lwe_op:
 248				if (!access_ok(addr, 4))
 249					goto sigbus;
 250				LoadWE(addr, value, res);
 251				if (res)
 252					goto fault;
 253				compute_return_epc(regs);
 254				regs->regs[insn.spec3_format.rt] = value;
 255				break;
 256			case lhue_op:
 257				if (!access_ok(addr, 2))
 258					goto sigbus;
 259				LoadHWUE(addr, value, res);
 260				if (res)
 261					goto fault;
 262				compute_return_epc(regs);
 263				regs->regs[insn.spec3_format.rt] = value;
 264				break;
 265			case she_op:
 266				if (!access_ok(addr, 2))
 267					goto sigbus;
 268				compute_return_epc(regs);
 269				value = regs->regs[insn.spec3_format.rt];
 270				StoreHWE(addr, value, res);
 271				if (res)
 272					goto fault;
 273				break;
 274			case swe_op:
 275				if (!access_ok(addr, 4))
 276					goto sigbus;
 277				compute_return_epc(regs);
 278				value = regs->regs[insn.spec3_format.rt];
 279				StoreWE(addr, value, res);
 280				if (res)
 281					goto fault;
 282				break;
 283			default:
 284				goto sigill;
 285			}
 286		}
 287#endif
 288		break;
 289	case lh_op:
 290		if (user && !access_ok(addr, 2))
 291			goto sigbus;
 292
 293		if (IS_ENABLED(CONFIG_EVA) && user)
 294			LoadHWE(addr, value, res);
 295		else
 296			LoadHW(addr, value, res);
 297
 298		if (res)
 299			goto fault;
 300		compute_return_epc(regs);
 301		regs->regs[insn.i_format.rt] = value;
 302		break;
 303
 304	case lw_op:
 305		if (user && !access_ok(addr, 4))
 306			goto sigbus;
 307
 308		if (IS_ENABLED(CONFIG_EVA) && user)
 309			LoadWE(addr, value, res);
 310		else
 311			LoadW(addr, value, res);
 312
 313		if (res)
 314			goto fault;
 315		compute_return_epc(regs);
 316		regs->regs[insn.i_format.rt] = value;
 317		break;
 318
 319	case lhu_op:
 320		if (user && !access_ok(addr, 2))
 321			goto sigbus;
 322
 323		if (IS_ENABLED(CONFIG_EVA) && user)
 324			LoadHWUE(addr, value, res);
 325		else
 326			LoadHWU(addr, value, res);
 327
 328		if (res)
 329			goto fault;
 330		compute_return_epc(regs);
 331		regs->regs[insn.i_format.rt] = value;
 332		break;
 333
 334	case lwu_op:
 335#ifdef CONFIG_64BIT
 336		/*
 337		 * A 32-bit kernel might be running on a 64-bit processor.  But
 338		 * if we're on a 32-bit processor and an i-cache incoherency
 339		 * or race makes us see a 64-bit instruction here the sdl/sdr
 340		 * would blow up, so for now we don't handle unaligned 64-bit
 341		 * instructions on 32-bit kernels.
 342		 */
 343		if (user && !access_ok(addr, 4))
 344			goto sigbus;
 345
 346		LoadWU(addr, value, res);
 347		if (res)
 348			goto fault;
 349		compute_return_epc(regs);
 350		regs->regs[insn.i_format.rt] = value;
 351		break;
 352#endif /* CONFIG_64BIT */
 353
 354		/* Cannot handle 64-bit instructions in 32-bit kernel */
 355		goto sigill;
 356
 357	case ld_op:
 358#ifdef CONFIG_64BIT
 359		/*
 360		 * A 32-bit kernel might be running on a 64-bit processor.  But
 361		 * if we're on a 32-bit processor and an i-cache incoherency
 362		 * or race makes us see a 64-bit instruction here the sdl/sdr
 363		 * would blow up, so for now we don't handle unaligned 64-bit
 364		 * instructions on 32-bit kernels.
 365		 */
 366		if (user && !access_ok(addr, 8))
 367			goto sigbus;
 368
 369		LoadDW(addr, value, res);
 370		if (res)
 371			goto fault;
 372		compute_return_epc(regs);
 373		regs->regs[insn.i_format.rt] = value;
 374		break;
 375#endif /* CONFIG_64BIT */
 376
 377		/* Cannot handle 64-bit instructions in 32-bit kernel */
 378		goto sigill;
 379
 380	case sh_op:
 381		if (user && !access_ok(addr, 2))
 382			goto sigbus;
 383
 384		compute_return_epc(regs);
 385		value = regs->regs[insn.i_format.rt];
 386
 387		if (IS_ENABLED(CONFIG_EVA) && user)
 388			StoreHWE(addr, value, res);
 389		else
 390			StoreHW(addr, value, res);
 391
 392		if (res)
 393			goto fault;
 394		break;
 395
 396	case sw_op:
 397		if (user && !access_ok(addr, 4))
 398			goto sigbus;
 399
 400		compute_return_epc(regs);
 401		value = regs->regs[insn.i_format.rt];
 402
 403		if (IS_ENABLED(CONFIG_EVA) && user)
 404			StoreWE(addr, value, res);
 405		else
 406			StoreW(addr, value, res);
 407
 408		if (res)
 409			goto fault;
 410		break;
 411
 412	case sd_op:
 413#ifdef CONFIG_64BIT
 414		/*
 415		 * A 32-bit kernel might be running on a 64-bit processor.  But
 416		 * if we're on a 32-bit processor and an i-cache incoherency
 417		 * or race makes us see a 64-bit instruction here the sdl/sdr
 418		 * would blow up, so for now we don't handle unaligned 64-bit
 419		 * instructions on 32-bit kernels.
 420		 */
 421		if (user && !access_ok(addr, 8))
 422			goto sigbus;
 423
 424		compute_return_epc(regs);
 425		value = regs->regs[insn.i_format.rt];
 426		StoreDW(addr, value, res);
 427		if (res)
 428			goto fault;
 429		break;
 430#endif /* CONFIG_64BIT */
 431
 432		/* Cannot handle 64-bit instructions in 32-bit kernel */
 433		goto sigill;
 434
 435#ifdef CONFIG_MIPS_FP_SUPPORT
 436
 437	case lwc1_op:
 438	case ldc1_op:
 439	case swc1_op:
 440	case sdc1_op:
 441	case cop1x_op: {
 442		void __user *fault_addr = NULL;
 443
 444		die_if_kernel("Unaligned FP access in kernel code", regs);
 445		BUG_ON(!used_math());
 446
 447		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 448					       &fault_addr);
 449		own_fpu(1);	/* Restore FPU state. */
 450
 451		/* Signal if something went wrong. */
 452		process_fpemu_return(res, fault_addr, 0);
 453
 454		if (res == 0)
 455			break;
 456		return;
 457	}
 458#endif /* CONFIG_MIPS_FP_SUPPORT */
 459
 460#ifdef CONFIG_CPU_HAS_MSA
 461
 462	case msa_op: {
 463		unsigned int wd, preempted;
 464		enum msa_2b_fmt df;
 465		union fpureg *fpr;
 466
 467		if (!cpu_has_msa)
 468			goto sigill;
 469
 470		/*
 471		 * If we've reached this point then userland should have taken
 472		 * the MSA disabled exception & initialised vector context at
 473		 * some point in the past.
 474		 */
 475		BUG_ON(!thread_msa_context_live());
 476
 477		df = insn.msa_mi10_format.df;
 478		wd = insn.msa_mi10_format.wd;
 479		fpr = &current->thread.fpu.fpr[wd];
 480
 481		switch (insn.msa_mi10_format.func) {
 482		case msa_ld_op:
 483			if (!access_ok(addr, sizeof(*fpr)))
 484				goto sigbus;
 485
 486			do {
 487				/*
 488				 * If we have live MSA context keep track of
 489				 * whether we get preempted in order to avoid
 490				 * the register context we load being clobbered
 491				 * by the live context as it's saved during
 492				 * preemption. If we don't have live context
 493				 * then it can't be saved to clobber the value
 494				 * we load.
 495				 */
 496				preempted = test_thread_flag(TIF_USEDMSA);
 497
 498				res = __copy_from_user_inatomic(fpr, addr,
 499								sizeof(*fpr));
 500				if (res)
 501					goto fault;
 502
 503				/*
 504				 * Update the hardware register if it is in use
 505				 * by the task in this quantum, in order to
 506				 * avoid having to save & restore the whole
 507				 * vector context.
 508				 */
 509				preempt_disable();
 510				if (test_thread_flag(TIF_USEDMSA)) {
 511					write_msa_wr(wd, fpr, df);
 512					preempted = 0;
 513				}
 514				preempt_enable();
 515			} while (preempted);
 516			break;
 517
 518		case msa_st_op:
 519			if (!access_ok(addr, sizeof(*fpr)))
 520				goto sigbus;
 521
 522			/*
 523			 * Update from the hardware register if it is in use by
 524			 * the task in this quantum, in order to avoid having to
 525			 * save & restore the whole vector context.
 526			 */
 527			preempt_disable();
 528			if (test_thread_flag(TIF_USEDMSA))
 529				read_msa_wr(wd, fpr, df);
 530			preempt_enable();
 531
 532			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
 533			if (res)
 534				goto fault;
 535			break;
 536
 537		default:
 538			goto sigbus;
 539		}
 540
 541		compute_return_epc(regs);
 542		break;
 543	}
 544#endif /* CONFIG_CPU_HAS_MSA */
 545
 546#ifndef CONFIG_CPU_MIPSR6
 547	/*
 548	 * COP2 is available to implementor for application specific use.
 549	 * It's up to applications to register a notifier chain and do
 550	 * whatever they have to do, including possible sending of signals.
 551	 *
 552	 * This instruction has been reallocated in Release 6
 553	 */
 554	case lwc2_op:
 555		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 556		break;
 557
 558	case ldc2_op:
 559		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
 560		break;
 561
 562	case swc2_op:
 563		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
 564		break;
 565
 566	case sdc2_op:
 567		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 568		break;
 569#endif
 570	default:
 571		/*
 572		 * Pheeee...  We encountered an yet unknown instruction or
 573		 * cache coherence problem.  Die sucker, die ...
 574		 */
 575		goto sigill;
 576	}
 577
 578#ifdef CONFIG_DEBUG_FS
 579	unaligned_instructions++;
 580#endif
 581
 582	return;
 583
 584fault:
 585	/* roll back jump/branch */
 586	regs->cp0_epc = origpc;
 587	regs->regs[31] = orig31;
 588	/* Did we have an exception handler installed? */
 589	if (fixup_exception(regs))
 590		return;
 591
 592	die_if_kernel("Unhandled kernel unaligned access", regs);
 593	force_sig(SIGSEGV);
 594
 595	return;
 596
 597sigbus:
 598	die_if_kernel("Unhandled kernel unaligned access", regs);
 599	force_sig(SIGBUS);
 600
 601	return;
 602
 603sigill:
 604	die_if_kernel
 605	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 606	force_sig(SIGILL);
 607}
 608
 609/* Recode table from 16-bit register notation to 32-bit GPR. */
 610const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
 611
 612/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
 613static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
 614
 615static void emulate_load_store_microMIPS(struct pt_regs *regs,
 616					 void __user *addr)
 617{
 618	unsigned long value;
 619	unsigned int res;
 620	int i;
 621	unsigned int reg = 0, rvar;
 622	unsigned long orig31;
 623	u16 __user *pc16;
 624	u16 halfword;
 625	unsigned int word;
 626	unsigned long origpc, contpc;
 627	union mips_instruction insn;
 628	struct mm_decoded_insn mminsn;
 629	bool user = user_mode(regs);
 630
 631	origpc = regs->cp0_epc;
 632	orig31 = regs->regs[31];
 633
 634	mminsn.micro_mips_mode = 1;
 635
 636	/*
 637	 * This load never faults.
 638	 */
 639	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
 640	__get_user(halfword, pc16);
 641	pc16++;
 642	contpc = regs->cp0_epc + 2;
 643	word = ((unsigned int)halfword << 16);
 644	mminsn.pc_inc = 2;
 645
 646	if (!mm_insn_16bit(halfword)) {
 647		__get_user(halfword, pc16);
 648		pc16++;
 649		contpc = regs->cp0_epc + 4;
 650		mminsn.pc_inc = 4;
 651		word |= halfword;
 652	}
 653	mminsn.insn = word;
 654
 655	if (get_user(halfword, pc16))
 656		goto fault;
 657	mminsn.next_pc_inc = 2;
 658	word = ((unsigned int)halfword << 16);
 659
 660	if (!mm_insn_16bit(halfword)) {
 661		pc16++;
 662		if (get_user(halfword, pc16))
 663			goto fault;
 664		mminsn.next_pc_inc = 4;
 665		word |= halfword;
 666	}
 667	mminsn.next_insn = word;
 668
 669	insn = (union mips_instruction)(mminsn.insn);
 670	if (mm_isBranchInstr(regs, mminsn, &contpc))
 671		insn = (union mips_instruction)(mminsn.next_insn);
 672
 673	/*  Parse instruction to find what to do */
 674
 675	switch (insn.mm_i_format.opcode) {
 676
 677	case mm_pool32a_op:
 678		switch (insn.mm_x_format.func) {
 679		case mm_lwxs_op:
 680			reg = insn.mm_x_format.rd;
 681			goto loadW;
 682		}
 683
 684		goto sigbus;
 685
 686	case mm_pool32b_op:
 687		switch (insn.mm_m_format.func) {
 688		case mm_lwp_func:
 689			reg = insn.mm_m_format.rd;
 690			if (reg == 31)
 691				goto sigbus;
 692
 693			if (user && !access_ok(addr, 8))
 694				goto sigbus;
 695
 696			LoadW(addr, value, res);
 697			if (res)
 698				goto fault;
 699			regs->regs[reg] = value;
 700			addr += 4;
 701			LoadW(addr, value, res);
 702			if (res)
 703				goto fault;
 704			regs->regs[reg + 1] = value;
 705			goto success;
 706
 707		case mm_swp_func:
 708			reg = insn.mm_m_format.rd;
 709			if (reg == 31)
 710				goto sigbus;
 711
 712			if (user && !access_ok(addr, 8))
 713				goto sigbus;
 714
 715			value = regs->regs[reg];
 716			StoreW(addr, value, res);
 717			if (res)
 718				goto fault;
 719			addr += 4;
 720			value = regs->regs[reg + 1];
 721			StoreW(addr, value, res);
 722			if (res)
 723				goto fault;
 724			goto success;
 725
 726		case mm_ldp_func:
 727#ifdef CONFIG_64BIT
 728			reg = insn.mm_m_format.rd;
 729			if (reg == 31)
 730				goto sigbus;
 731
 732			if (user && !access_ok(addr, 16))
 733				goto sigbus;
 734
 735			LoadDW(addr, value, res);
 736			if (res)
 737				goto fault;
 738			regs->regs[reg] = value;
 739			addr += 8;
 740			LoadDW(addr, value, res);
 741			if (res)
 742				goto fault;
 743			regs->regs[reg + 1] = value;
 744			goto success;
 745#endif /* CONFIG_64BIT */
 746
 747			goto sigill;
 748
 749		case mm_sdp_func:
 750#ifdef CONFIG_64BIT
 751			reg = insn.mm_m_format.rd;
 752			if (reg == 31)
 753				goto sigbus;
 754
 755			if (user && !access_ok(addr, 16))
 756				goto sigbus;
 757
 758			value = regs->regs[reg];
 759			StoreDW(addr, value, res);
 760			if (res)
 761				goto fault;
 762			addr += 8;
 763			value = regs->regs[reg + 1];
 764			StoreDW(addr, value, res);
 765			if (res)
 766				goto fault;
 767			goto success;
 768#endif /* CONFIG_64BIT */
 769
 770			goto sigill;
 771
 772		case mm_lwm32_func:
 773			reg = insn.mm_m_format.rd;
 774			rvar = reg & 0xf;
 775			if ((rvar > 9) || !reg)
 776				goto sigill;
 777			if (reg & 0x10) {
 778				if (user && !access_ok(addr, 4 * (rvar + 1)))
 779					goto sigbus;
 780			} else {
 781				if (user && !access_ok(addr, 4 * rvar))
 782					goto sigbus;
 783			}
 784			if (rvar == 9)
 785				rvar = 8;
 786			for (i = 16; rvar; rvar--, i++) {
 787				LoadW(addr, value, res);
 788				if (res)
 789					goto fault;
 790				addr += 4;
 791				regs->regs[i] = value;
 792			}
 793			if ((reg & 0xf) == 9) {
 794				LoadW(addr, value, res);
 795				if (res)
 796					goto fault;
 797				addr += 4;
 798				regs->regs[30] = value;
 799			}
 800			if (reg & 0x10) {
 801				LoadW(addr, value, res);
 802				if (res)
 803					goto fault;
 804				regs->regs[31] = value;
 805			}
 806			goto success;
 807
 808		case mm_swm32_func:
 809			reg = insn.mm_m_format.rd;
 810			rvar = reg & 0xf;
 811			if ((rvar > 9) || !reg)
 812				goto sigill;
 813			if (reg & 0x10) {
 814				if (user && !access_ok(addr, 4 * (rvar + 1)))
 815					goto sigbus;
 816			} else {
 817				if (user && !access_ok(addr, 4 * rvar))
 818					goto sigbus;
 819			}
 820			if (rvar == 9)
 821				rvar = 8;
 822			for (i = 16; rvar; rvar--, i++) {
 823				value = regs->regs[i];
 824				StoreW(addr, value, res);
 825				if (res)
 826					goto fault;
 827				addr += 4;
 828			}
 829			if ((reg & 0xf) == 9) {
 830				value = regs->regs[30];
 831				StoreW(addr, value, res);
 832				if (res)
 833					goto fault;
 834				addr += 4;
 835			}
 836			if (reg & 0x10) {
 837				value = regs->regs[31];
 838				StoreW(addr, value, res);
 839				if (res)
 840					goto fault;
 841			}
 842			goto success;
 843
 844		case mm_ldm_func:
 845#ifdef CONFIG_64BIT
 846			reg = insn.mm_m_format.rd;
 847			rvar = reg & 0xf;
 848			if ((rvar > 9) || !reg)
 849				goto sigill;
 850			if (reg & 0x10) {
 851				if (user && !access_ok(addr, 8 * (rvar + 1)))
 852					goto sigbus;
 853			} else {
 854				if (user && !access_ok(addr, 8 * rvar))
 855					goto sigbus;
 856			}
 857			if (rvar == 9)
 858				rvar = 8;
 859
 860			for (i = 16; rvar; rvar--, i++) {
 861				LoadDW(addr, value, res);
 862				if (res)
 863					goto fault;
 864				addr += 4;
 865				regs->regs[i] = value;
 866			}
 867			if ((reg & 0xf) == 9) {
 868				LoadDW(addr, value, res);
 869				if (res)
 870					goto fault;
 871				addr += 8;
 872				regs->regs[30] = value;
 873			}
 874			if (reg & 0x10) {
 875				LoadDW(addr, value, res);
 876				if (res)
 877					goto fault;
 878				regs->regs[31] = value;
 879			}
 880			goto success;
 881#endif /* CONFIG_64BIT */
 882
 883			goto sigill;
 884
 885		case mm_sdm_func:
 886#ifdef CONFIG_64BIT
 887			reg = insn.mm_m_format.rd;
 888			rvar = reg & 0xf;
 889			if ((rvar > 9) || !reg)
 890				goto sigill;
 891			if (reg & 0x10) {
 892				if (user && !access_ok(addr, 8 * (rvar + 1)))
 893					goto sigbus;
 894			} else {
 895				if (user && !access_ok(addr, 8 * rvar))
 896					goto sigbus;
 897			}
 898			if (rvar == 9)
 899				rvar = 8;
 900
 901			for (i = 16; rvar; rvar--, i++) {
 902				value = regs->regs[i];
 903				StoreDW(addr, value, res);
 904				if (res)
 905					goto fault;
 906				addr += 8;
 907			}
 908			if ((reg & 0xf) == 9) {
 909				value = regs->regs[30];
 910				StoreDW(addr, value, res);
 911				if (res)
 912					goto fault;
 913				addr += 8;
 914			}
 915			if (reg & 0x10) {
 916				value = regs->regs[31];
 917				StoreDW(addr, value, res);
 918				if (res)
 919					goto fault;
 920			}
 921			goto success;
 922#endif /* CONFIG_64BIT */
 923
 924			goto sigill;
 925
 926			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
 927		}
 928
 929		goto sigbus;
 930
 931	case mm_pool32c_op:
 932		switch (insn.mm_m_format.func) {
 933		case mm_lwu_func:
 934			reg = insn.mm_m_format.rd;
 935			goto loadWU;
 936		}
 937
 938		/*  LL,SC,LLD,SCD are not serviced */
 939		goto sigbus;
 940
 941#ifdef CONFIG_MIPS_FP_SUPPORT
 942	case mm_pool32f_op:
 943		switch (insn.mm_x_format.func) {
 944		case mm_lwxc1_func:
 945		case mm_swxc1_func:
 946		case mm_ldxc1_func:
 947		case mm_sdxc1_func:
 948			goto fpu_emul;
 949		}
 950
 951		goto sigbus;
 952
 953	case mm_ldc132_op:
 954	case mm_sdc132_op:
 955	case mm_lwc132_op:
 956	case mm_swc132_op: {
 957		void __user *fault_addr = NULL;
 958
 959fpu_emul:
 960		/* roll back jump/branch */
 961		regs->cp0_epc = origpc;
 962		regs->regs[31] = orig31;
 963
 964		die_if_kernel("Unaligned FP access in kernel code", regs);
 965		BUG_ON(!used_math());
 966		BUG_ON(!is_fpu_owner());
 967
 968		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 969					       &fault_addr);
 970		own_fpu(1);	/* restore FPU state */
 971
 972		/* If something went wrong, signal */
 973		process_fpemu_return(res, fault_addr, 0);
 974
 975		if (res == 0)
 976			goto success;
 977		return;
 978	}
 979#endif /* CONFIG_MIPS_FP_SUPPORT */
 980
 981	case mm_lh32_op:
 982		reg = insn.mm_i_format.rt;
 983		goto loadHW;
 984
 985	case mm_lhu32_op:
 986		reg = insn.mm_i_format.rt;
 987		goto loadHWU;
 988
 989	case mm_lw32_op:
 990		reg = insn.mm_i_format.rt;
 991		goto loadW;
 992
 993	case mm_sh32_op:
 994		reg = insn.mm_i_format.rt;
 995		goto storeHW;
 996
 997	case mm_sw32_op:
 998		reg = insn.mm_i_format.rt;
 999		goto storeW;
1000
1001	case mm_ld32_op:
1002		reg = insn.mm_i_format.rt;
1003		goto loadDW;
1004
1005	case mm_sd32_op:
1006		reg = insn.mm_i_format.rt;
1007		goto storeDW;
1008
1009	case mm_pool16c_op:
1010		switch (insn.mm16_m_format.func) {
1011		case mm_lwm16_op:
1012			reg = insn.mm16_m_format.rlist;
1013			rvar = reg + 1;
1014			if (user && !access_ok(addr, 4 * rvar))
1015				goto sigbus;
1016
1017			for (i = 16; rvar; rvar--, i++) {
1018				LoadW(addr, value, res);
1019				if (res)
1020					goto fault;
1021				addr += 4;
1022				regs->regs[i] = value;
1023			}
1024			LoadW(addr, value, res);
1025			if (res)
1026				goto fault;
1027			regs->regs[31] = value;
1028
1029			goto success;
1030
1031		case mm_swm16_op:
1032			reg = insn.mm16_m_format.rlist;
1033			rvar = reg + 1;
1034			if (user && !access_ok(addr, 4 * rvar))
1035				goto sigbus;
1036
1037			for (i = 16; rvar; rvar--, i++) {
1038				value = regs->regs[i];
1039				StoreW(addr, value, res);
1040				if (res)
1041					goto fault;
1042				addr += 4;
1043			}
1044			value = regs->regs[31];
1045			StoreW(addr, value, res);
1046			if (res)
1047				goto fault;
1048
1049			goto success;
1050
1051		}
1052
1053		goto sigbus;
1054
1055	case mm_lhu16_op:
1056		reg = reg16to32[insn.mm16_rb_format.rt];
1057		goto loadHWU;
1058
1059	case mm_lw16_op:
1060		reg = reg16to32[insn.mm16_rb_format.rt];
1061		goto loadW;
1062
1063	case mm_sh16_op:
1064		reg = reg16to32st[insn.mm16_rb_format.rt];
1065		goto storeHW;
1066
1067	case mm_sw16_op:
1068		reg = reg16to32st[insn.mm16_rb_format.rt];
1069		goto storeW;
1070
1071	case mm_lwsp16_op:
1072		reg = insn.mm16_r5_format.rt;
1073		goto loadW;
1074
1075	case mm_swsp16_op:
1076		reg = insn.mm16_r5_format.rt;
1077		goto storeW;
1078
1079	case mm_lwgp16_op:
1080		reg = reg16to32[insn.mm16_r3_format.rt];
1081		goto loadW;
1082
1083	default:
1084		goto sigill;
1085	}
1086
1087loadHW:
1088	if (user && !access_ok(addr, 2))
1089		goto sigbus;
1090
1091	LoadHW(addr, value, res);
1092	if (res)
1093		goto fault;
1094	regs->regs[reg] = value;
1095	goto success;
1096
1097loadHWU:
1098	if (user && !access_ok(addr, 2))
1099		goto sigbus;
1100
1101	LoadHWU(addr, value, res);
1102	if (res)
1103		goto fault;
1104	regs->regs[reg] = value;
1105	goto success;
1106
1107loadW:
1108	if (user && !access_ok(addr, 4))
1109		goto sigbus;
1110
1111	LoadW(addr, value, res);
1112	if (res)
1113		goto fault;
1114	regs->regs[reg] = value;
1115	goto success;
1116
1117loadWU:
1118#ifdef CONFIG_64BIT
1119	/*
1120	 * A 32-bit kernel might be running on a 64-bit processor.  But
1121	 * if we're on a 32-bit processor and an i-cache incoherency
1122	 * or race makes us see a 64-bit instruction here the sdl/sdr
1123	 * would blow up, so for now we don't handle unaligned 64-bit
1124	 * instructions on 32-bit kernels.
1125	 */
1126	if (user && !access_ok(addr, 4))
1127		goto sigbus;
1128
1129	LoadWU(addr, value, res);
1130	if (res)
1131		goto fault;
1132	regs->regs[reg] = value;
1133	goto success;
1134#endif /* CONFIG_64BIT */
1135
1136	/* Cannot handle 64-bit instructions in 32-bit kernel */
1137	goto sigill;
1138
1139loadDW:
1140#ifdef CONFIG_64BIT
1141	/*
1142	 * A 32-bit kernel might be running on a 64-bit processor.  But
1143	 * if we're on a 32-bit processor and an i-cache incoherency
1144	 * or race makes us see a 64-bit instruction here the sdl/sdr
1145	 * would blow up, so for now we don't handle unaligned 64-bit
1146	 * instructions on 32-bit kernels.
1147	 */
1148	if (user && !access_ok(addr, 8))
1149		goto sigbus;
1150
1151	LoadDW(addr, value, res);
1152	if (res)
1153		goto fault;
1154	regs->regs[reg] = value;
1155	goto success;
1156#endif /* CONFIG_64BIT */
1157
1158	/* Cannot handle 64-bit instructions in 32-bit kernel */
1159	goto sigill;
1160
1161storeHW:
1162	if (user && !access_ok(addr, 2))
1163		goto sigbus;
1164
1165	value = regs->regs[reg];
1166	StoreHW(addr, value, res);
1167	if (res)
1168		goto fault;
1169	goto success;
1170
1171storeW:
1172	if (user && !access_ok(addr, 4))
1173		goto sigbus;
1174
1175	value = regs->regs[reg];
1176	StoreW(addr, value, res);
1177	if (res)
1178		goto fault;
1179	goto success;
1180
1181storeDW:
1182#ifdef CONFIG_64BIT
1183	/*
1184	 * A 32-bit kernel might be running on a 64-bit processor.  But
1185	 * if we're on a 32-bit processor and an i-cache incoherency
1186	 * or race makes us see a 64-bit instruction here the sdl/sdr
1187	 * would blow up, so for now we don't handle unaligned 64-bit
1188	 * instructions on 32-bit kernels.
1189	 */
1190	if (user && !access_ok(addr, 8))
1191		goto sigbus;
1192
1193	value = regs->regs[reg];
1194	StoreDW(addr, value, res);
1195	if (res)
1196		goto fault;
1197	goto success;
1198#endif /* CONFIG_64BIT */
1199
1200	/* Cannot handle 64-bit instructions in 32-bit kernel */
1201	goto sigill;
1202
1203success:
1204	regs->cp0_epc = contpc;	/* advance or branch */
1205
1206#ifdef CONFIG_DEBUG_FS
1207	unaligned_instructions++;
1208#endif
1209	return;
1210
1211fault:
1212	/* roll back jump/branch */
1213	regs->cp0_epc = origpc;
1214	regs->regs[31] = orig31;
1215	/* Did we have an exception handler installed? */
1216	if (fixup_exception(regs))
1217		return;
1218
1219	die_if_kernel("Unhandled kernel unaligned access", regs);
1220	force_sig(SIGSEGV);
1221
1222	return;
1223
1224sigbus:
1225	die_if_kernel("Unhandled kernel unaligned access", regs);
1226	force_sig(SIGBUS);
1227
1228	return;
1229
1230sigill:
1231	die_if_kernel
1232	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1233	force_sig(SIGILL);
1234}
1235
1236static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1237{
1238	unsigned long value;
1239	unsigned int res;
1240	int reg;
1241	unsigned long orig31;
1242	u16 __user *pc16;
1243	unsigned long origpc;
1244	union mips16e_instruction mips16inst, oldinst;
1245	unsigned int opcode;
1246	int extended = 0;
1247	bool user = user_mode(regs);
1248
1249	origpc = regs->cp0_epc;
1250	orig31 = regs->regs[31];
1251	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1252	/*
1253	 * This load never faults.
1254	 */
1255	__get_user(mips16inst.full, pc16);
1256	oldinst = mips16inst;
1257
1258	/* skip EXTEND instruction */
1259	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1260		extended = 1;
1261		pc16++;
1262		__get_user(mips16inst.full, pc16);
1263	} else if (delay_slot(regs)) {
1264		/*  skip jump instructions */
1265		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1266		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1267			pc16++;
1268		pc16++;
1269		if (get_user(mips16inst.full, pc16))
1270			goto sigbus;
1271	}
1272
1273	opcode = mips16inst.ri.opcode;
1274	switch (opcode) {
1275	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1276		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1277		case MIPS16e_ldpc_func:
1278		case MIPS16e_ldsp_func:
1279			reg = reg16to32[mips16inst.ri64.ry];
1280			goto loadDW;
1281
1282		case MIPS16e_sdsp_func:
1283			reg = reg16to32[mips16inst.ri64.ry];
1284			goto writeDW;
1285
1286		case MIPS16e_sdrasp_func:
1287			reg = 29;	/* GPRSP */
1288			goto writeDW;
1289		}
1290
1291		goto sigbus;
1292
1293	case MIPS16e_swsp_op:
1294		reg = reg16to32[mips16inst.ri.rx];
1295		if (extended && cpu_has_mips16e2)
1296			switch (mips16inst.ri.imm >> 5) {
1297			case 0:		/* SWSP */
1298			case 1:		/* SWGP */
1299				break;
1300			case 2:		/* SHGP */
1301				opcode = MIPS16e_sh_op;
1302				break;
1303			default:
1304				goto sigbus;
1305			}
1306		break;
1307
1308	case MIPS16e_lwpc_op:
1309		reg = reg16to32[mips16inst.ri.rx];
1310		break;
1311
1312	case MIPS16e_lwsp_op:
1313		reg = reg16to32[mips16inst.ri.rx];
1314		if (extended && cpu_has_mips16e2)
1315			switch (mips16inst.ri.imm >> 5) {
1316			case 0:		/* LWSP */
1317			case 1:		/* LWGP */
1318				break;
1319			case 2:		/* LHGP */
1320				opcode = MIPS16e_lh_op;
1321				break;
1322			case 4:		/* LHUGP */
1323				opcode = MIPS16e_lhu_op;
1324				break;
1325			default:
1326				goto sigbus;
1327			}
1328		break;
1329
1330	case MIPS16e_i8_op:
1331		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1332			goto sigbus;
1333		reg = 29;	/* GPRSP */
1334		break;
1335
1336	default:
1337		reg = reg16to32[mips16inst.rri.ry];
1338		break;
1339	}
1340
1341	switch (opcode) {
1342
1343	case MIPS16e_lb_op:
1344	case MIPS16e_lbu_op:
1345	case MIPS16e_sb_op:
1346		goto sigbus;
1347
1348	case MIPS16e_lh_op:
1349		if (user && !access_ok(addr, 2))
1350			goto sigbus;
1351
1352		LoadHW(addr, value, res);
1353		if (res)
1354			goto fault;
1355		MIPS16e_compute_return_epc(regs, &oldinst);
1356		regs->regs[reg] = value;
1357		break;
1358
1359	case MIPS16e_lhu_op:
1360		if (user && !access_ok(addr, 2))
1361			goto sigbus;
1362
1363		LoadHWU(addr, value, res);
1364		if (res)
1365			goto fault;
1366		MIPS16e_compute_return_epc(regs, &oldinst);
1367		regs->regs[reg] = value;
1368		break;
1369
1370	case MIPS16e_lw_op:
1371	case MIPS16e_lwpc_op:
1372	case MIPS16e_lwsp_op:
1373		if (user && !access_ok(addr, 4))
1374			goto sigbus;
1375
1376		LoadW(addr, value, res);
1377		if (res)
1378			goto fault;
1379		MIPS16e_compute_return_epc(regs, &oldinst);
1380		regs->regs[reg] = value;
1381		break;
1382
1383	case MIPS16e_lwu_op:
1384#ifdef CONFIG_64BIT
1385		/*
1386		 * A 32-bit kernel might be running on a 64-bit processor.  But
1387		 * if we're on a 32-bit processor and an i-cache incoherency
1388		 * or race makes us see a 64-bit instruction here the sdl/sdr
1389		 * would blow up, so for now we don't handle unaligned 64-bit
1390		 * instructions on 32-bit kernels.
1391		 */
1392		if (user && !access_ok(addr, 4))
1393			goto sigbus;
1394
1395		LoadWU(addr, value, res);
1396		if (res)
1397			goto fault;
1398		MIPS16e_compute_return_epc(regs, &oldinst);
1399		regs->regs[reg] = value;
1400		break;
1401#endif /* CONFIG_64BIT */
1402
1403		/* Cannot handle 64-bit instructions in 32-bit kernel */
1404		goto sigill;
1405
1406	case MIPS16e_ld_op:
1407loadDW:
1408#ifdef CONFIG_64BIT
1409		/*
1410		 * A 32-bit kernel might be running on a 64-bit processor.  But
1411		 * if we're on a 32-bit processor and an i-cache incoherency
1412		 * or race makes us see a 64-bit instruction here the sdl/sdr
1413		 * would blow up, so for now we don't handle unaligned 64-bit
1414		 * instructions on 32-bit kernels.
1415		 */
1416		if (user && !access_ok(addr, 8))
1417			goto sigbus;
1418
1419		LoadDW(addr, value, res);
1420		if (res)
1421			goto fault;
1422		MIPS16e_compute_return_epc(regs, &oldinst);
1423		regs->regs[reg] = value;
1424		break;
1425#endif /* CONFIG_64BIT */
1426
1427		/* Cannot handle 64-bit instructions in 32-bit kernel */
1428		goto sigill;
1429
1430	case MIPS16e_sh_op:
1431		if (user && !access_ok(addr, 2))
1432			goto sigbus;
1433
1434		MIPS16e_compute_return_epc(regs, &oldinst);
1435		value = regs->regs[reg];
1436		StoreHW(addr, value, res);
1437		if (res)
1438			goto fault;
1439		break;
1440
1441	case MIPS16e_sw_op:
1442	case MIPS16e_swsp_op:
1443	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1444		if (user && !access_ok(addr, 4))
1445			goto sigbus;
1446
1447		MIPS16e_compute_return_epc(regs, &oldinst);
1448		value = regs->regs[reg];
1449		StoreW(addr, value, res);
1450		if (res)
1451			goto fault;
1452		break;
1453
1454	case MIPS16e_sd_op:
1455writeDW:
1456#ifdef CONFIG_64BIT
1457		/*
1458		 * A 32-bit kernel might be running on a 64-bit processor.  But
1459		 * if we're on a 32-bit processor and an i-cache incoherency
1460		 * or race makes us see a 64-bit instruction here the sdl/sdr
1461		 * would blow up, so for now we don't handle unaligned 64-bit
1462		 * instructions on 32-bit kernels.
1463		 */
1464		if (user && !access_ok(addr, 8))
1465			goto sigbus;
1466
1467		MIPS16e_compute_return_epc(regs, &oldinst);
1468		value = regs->regs[reg];
1469		StoreDW(addr, value, res);
1470		if (res)
1471			goto fault;
1472		break;
1473#endif /* CONFIG_64BIT */
1474
1475		/* Cannot handle 64-bit instructions in 32-bit kernel */
1476		goto sigill;
1477
1478	default:
1479		/*
1480		 * Pheeee...  We encountered an yet unknown instruction or
1481		 * cache coherence problem.  Die sucker, die ...
1482		 */
1483		goto sigill;
1484	}
1485
1486#ifdef CONFIG_DEBUG_FS
1487	unaligned_instructions++;
1488#endif
1489
1490	return;
1491
1492fault:
1493	/* roll back jump/branch */
1494	regs->cp0_epc = origpc;
1495	regs->regs[31] = orig31;
1496	/* Did we have an exception handler installed? */
1497	if (fixup_exception(regs))
1498		return;
1499
1500	die_if_kernel("Unhandled kernel unaligned access", regs);
1501	force_sig(SIGSEGV);
1502
1503	return;
1504
1505sigbus:
1506	die_if_kernel("Unhandled kernel unaligned access", regs);
1507	force_sig(SIGBUS);
1508
1509	return;
1510
1511sigill:
1512	die_if_kernel
1513	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1514	force_sig(SIGILL);
1515}
1516
1517asmlinkage void do_ade(struct pt_regs *regs)
1518{
1519	enum ctx_state prev_state;
1520	unsigned int *pc;
1521
1522	prev_state = exception_enter();
1523	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1524			1, regs, regs->cp0_badvaddr);
1525
1526#ifdef CONFIG_64BIT
1527	/*
1528	 * check, if we are hitting space between CPU implemented maximum
1529	 * virtual user address and 64bit maximum virtual user address
1530	 * and do exception handling to get EFAULTs for get_user/put_user
1531	 */
1532	if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1533	    (regs->cp0_badvaddr < XKSSEG)) {
1534		if (fixup_exception(regs)) {
1535			current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1536			return;
1537		}
1538		goto sigbus;
1539	}
1540#endif
1541
1542	/*
1543	 * Did we catch a fault trying to load an instruction?
1544	 */
1545	if (regs->cp0_badvaddr == regs->cp0_epc)
1546		goto sigbus;
1547
1548	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1549		goto sigbus;
1550	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1551		goto sigbus;
1552
1553	/*
1554	 * Do branch emulation only if we didn't forward the exception.
1555	 * This is all so but ugly ...
1556	 */
1557
1558	/*
1559	 * Are we running in microMIPS mode?
1560	 */
1561	if (get_isa16_mode(regs->cp0_epc)) {
1562		/*
1563		 * Did we catch a fault trying to load an instruction in
1564		 * 16-bit mode?
1565		 */
1566		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1567			goto sigbus;
1568		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1569			show_registers(regs);
1570
1571		if (cpu_has_mmips) {
1572			emulate_load_store_microMIPS(regs,
1573				(void __user *)regs->cp0_badvaddr);
1574			return;
1575		}
1576
1577		if (cpu_has_mips16) {
1578			emulate_load_store_MIPS16e(regs,
1579				(void __user *)regs->cp0_badvaddr);
1580			return;
1581		}
1582
1583		goto sigbus;
1584	}
1585
1586	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1587		show_registers(regs);
1588	pc = (unsigned int *)exception_epc(regs);
1589
1590	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1591
1592	return;
1593
1594sigbus:
1595	die_if_kernel("Kernel unaligned instruction access", regs);
1596	force_sig(SIGBUS);
1597
1598	/*
1599	 * XXX On return from the signal handler we should advance the epc
1600	 */
1601	exception_exit(prev_state);
1602}
1603
1604#ifdef CONFIG_DEBUG_FS
1605static int __init debugfs_unaligned(void)
1606{
1607	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1608			   &unaligned_instructions);
1609	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1610			   mips_debugfs_dir, &unaligned_action);
1611	return 0;
1612}
1613arch_initcall(debugfs_unaligned);
1614#endif