Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
  92#include <asm/unaligned-emul.h>
  93#include <asm/mmu_context.h>
  94#include <linux/uaccess.h>
  95
  96#include "access-helper.h"
 
  97
  98enum {
  99	UNALIGNED_ACTION_QUIET,
 100	UNALIGNED_ACTION_SIGNAL,
 101	UNALIGNED_ACTION_SHOW,
 102};
 103#ifdef CONFIG_DEBUG_FS
 104static u32 unaligned_instructions;
 105static u32 unaligned_action;
 106#else
 107#define unaligned_action UNALIGNED_ACTION_QUIET
 108#endif
 109extern void show_registers(struct pt_regs *regs);
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111static void emulate_load_store_insn(struct pt_regs *regs,
 112	void __user *addr, unsigned int *pc)
 113{
 114	unsigned long origpc, orig31, value;
 115	union mips_instruction insn;
 116	unsigned int res;
 117	bool user = user_mode(regs);
 118
 
 
 
 
 
 
 
 
 119	origpc = (unsigned long)pc;
 120	orig31 = regs->regs[31];
 121
 122	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 123
 124	/*
 125	 * This load never faults.
 126	 */
 127	__get_inst32(&insn.word, pc, user);
 128
 129	switch (insn.i_format.opcode) {
 130		/*
 131		 * These are instructions that a compiler doesn't generate.  We
 132		 * can assume therefore that the code is MIPS-aware and
 133		 * really buggy.  Emulating these instructions would break the
 134		 * semantics anyway.
 135		 */
 136	case ll_op:
 137	case lld_op:
 138	case sc_op:
 139	case scd_op:
 140
 141		/*
 142		 * For these instructions the only way to create an address
 143		 * error is an attempted access to kernel/supervisor address
 144		 * space.
 145		 */
 146	case ldl_op:
 147	case ldr_op:
 148	case lwl_op:
 149	case lwr_op:
 150	case sdl_op:
 151	case sdr_op:
 152	case swl_op:
 153	case swr_op:
 154	case lb_op:
 155	case lbu_op:
 156	case sb_op:
 157		goto sigbus;
 158
 159		/*
 160		 * The remaining opcodes are the ones that are really of
 161		 * interest.
 162		 */
 
 163	case spec3_op:
 164		if (insn.dsp_format.func == lx_op) {
 165			switch (insn.dsp_format.op) {
 166			case lwx_op:
 167				if (user && !access_ok(addr, 4))
 168					goto sigbus;
 169				LoadW(addr, value, res);
 170				if (res)
 171					goto fault;
 172				compute_return_epc(regs);
 173				regs->regs[insn.dsp_format.rd] = value;
 174				break;
 175			case lhx_op:
 176				if (user && !access_ok(addr, 2))
 177					goto sigbus;
 178				LoadHW(addr, value, res);
 179				if (res)
 180					goto fault;
 181				compute_return_epc(regs);
 182				regs->regs[insn.dsp_format.rd] = value;
 183				break;
 184			default:
 185				goto sigill;
 
 
 
 186			}
 187		}
 188#ifdef CONFIG_EVA
 189		else {
 190			/*
 191			 * we can land here only from kernel accessing user
 192			 * memory, so we need to "switch" the address limit to
 193			 * user space, so that address check can work properly.
 194			 */
 195			switch (insn.spec3_format.func) {
 196			case lhe_op:
 197				if (!access_ok(addr, 2))
 198					goto sigbus;
 199				LoadHWE(addr, value, res);
 200				if (res)
 201					goto fault;
 202				compute_return_epc(regs);
 203				regs->regs[insn.spec3_format.rt] = value;
 204				break;
 205			case lwe_op:
 206				if (!access_ok(addr, 4))
 207					goto sigbus;
 208				LoadWE(addr, value, res);
 209				if (res)
 210					goto fault;
 211				compute_return_epc(regs);
 212				regs->regs[insn.spec3_format.rt] = value;
 213				break;
 214			case lhue_op:
 215				if (!access_ok(addr, 2))
 216					goto sigbus;
 217				LoadHWUE(addr, value, res);
 218				if (res)
 219					goto fault;
 220				compute_return_epc(regs);
 221				regs->regs[insn.spec3_format.rt] = value;
 222				break;
 223			case she_op:
 224				if (!access_ok(addr, 2))
 225					goto sigbus;
 226				compute_return_epc(regs);
 227				value = regs->regs[insn.spec3_format.rt];
 228				StoreHWE(addr, value, res);
 229				if (res)
 230					goto fault;
 231				break;
 232			case swe_op:
 233				if (!access_ok(addr, 4))
 234					goto sigbus;
 235				compute_return_epc(regs);
 236				value = regs->regs[insn.spec3_format.rt];
 237				StoreWE(addr, value, res);
 238				if (res)
 239					goto fault;
 240				break;
 241			default:
 242				goto sigill;
 
 
 
 
 
 
 
 
 
 
 243			}
 
 
 
 
 244		}
 245#endif
 246		break;
 
 247	case lh_op:
 248		if (user && !access_ok(addr, 2))
 249			goto sigbus;
 250
 251		if (IS_ENABLED(CONFIG_EVA) && user)
 252			LoadHWE(addr, value, res);
 253		else
 
 
 
 254			LoadHW(addr, value, res);
 
 255
 256		if (res)
 257			goto fault;
 258		compute_return_epc(regs);
 259		regs->regs[insn.i_format.rt] = value;
 260		break;
 261
 262	case lw_op:
 263		if (user && !access_ok(addr, 4))
 264			goto sigbus;
 265
 266		if (IS_ENABLED(CONFIG_EVA) && user)
 267			LoadWE(addr, value, res);
 268		else
 
 
 
 269			LoadW(addr, value, res);
 
 270
 271		if (res)
 272			goto fault;
 273		compute_return_epc(regs);
 274		regs->regs[insn.i_format.rt] = value;
 275		break;
 276
 277	case lhu_op:
 278		if (user && !access_ok(addr, 2))
 279			goto sigbus;
 280
 281		if (IS_ENABLED(CONFIG_EVA) && user)
 282			LoadHWUE(addr, value, res);
 283		else
 
 
 
 284			LoadHWU(addr, value, res);
 
 285
 286		if (res)
 287			goto fault;
 288		compute_return_epc(regs);
 289		regs->regs[insn.i_format.rt] = value;
 290		break;
 291
 292	case lwu_op:
 293#ifdef CONFIG_64BIT
 294		/*
 295		 * A 32-bit kernel might be running on a 64-bit processor.  But
 296		 * if we're on a 32-bit processor and an i-cache incoherency
 297		 * or race makes us see a 64-bit instruction here the sdl/sdr
 298		 * would blow up, so for now we don't handle unaligned 64-bit
 299		 * instructions on 32-bit kernels.
 300		 */
 301		if (user && !access_ok(addr, 4))
 302			goto sigbus;
 303
 304		LoadWU(addr, value, res);
 305		if (res)
 306			goto fault;
 307		compute_return_epc(regs);
 308		regs->regs[insn.i_format.rt] = value;
 309		break;
 310#endif /* CONFIG_64BIT */
 311
 312		/* Cannot handle 64-bit instructions in 32-bit kernel */
 313		goto sigill;
 314
 315	case ld_op:
 316#ifdef CONFIG_64BIT
 317		/*
 318		 * A 32-bit kernel might be running on a 64-bit processor.  But
 319		 * if we're on a 32-bit processor and an i-cache incoherency
 320		 * or race makes us see a 64-bit instruction here the sdl/sdr
 321		 * would blow up, so for now we don't handle unaligned 64-bit
 322		 * instructions on 32-bit kernels.
 323		 */
 324		if (user && !access_ok(addr, 8))
 325			goto sigbus;
 326
 327		LoadDW(addr, value, res);
 328		if (res)
 329			goto fault;
 330		compute_return_epc(regs);
 331		regs->regs[insn.i_format.rt] = value;
 332		break;
 333#endif /* CONFIG_64BIT */
 334
 335		/* Cannot handle 64-bit instructions in 32-bit kernel */
 336		goto sigill;
 337
 338	case sh_op:
 339		if (user && !access_ok(addr, 2))
 340			goto sigbus;
 341
 342		compute_return_epc(regs);
 343		value = regs->regs[insn.i_format.rt];
 344
 345		if (IS_ENABLED(CONFIG_EVA) && user)
 346			StoreHWE(addr, value, res);
 347		else
 
 
 
 348			StoreHW(addr, value, res);
 
 349
 350		if (res)
 351			goto fault;
 352		break;
 353
 354	case sw_op:
 355		if (user && !access_ok(addr, 4))
 356			goto sigbus;
 357
 358		compute_return_epc(regs);
 359		value = regs->regs[insn.i_format.rt];
 360
 361		if (IS_ENABLED(CONFIG_EVA) && user)
 362			StoreWE(addr, value, res);
 363		else
 
 
 
 364			StoreW(addr, value, res);
 
 365
 366		if (res)
 367			goto fault;
 368		break;
 369
 370	case sd_op:
 371#ifdef CONFIG_64BIT
 372		/*
 373		 * A 32-bit kernel might be running on a 64-bit processor.  But
 374		 * if we're on a 32-bit processor and an i-cache incoherency
 375		 * or race makes us see a 64-bit instruction here the sdl/sdr
 376		 * would blow up, so for now we don't handle unaligned 64-bit
 377		 * instructions on 32-bit kernels.
 378		 */
 379		if (user && !access_ok(addr, 8))
 380			goto sigbus;
 381
 382		compute_return_epc(regs);
 383		value = regs->regs[insn.i_format.rt];
 384		StoreDW(addr, value, res);
 385		if (res)
 386			goto fault;
 387		break;
 388#endif /* CONFIG_64BIT */
 389
 390		/* Cannot handle 64-bit instructions in 32-bit kernel */
 391		goto sigill;
 392
 393#ifdef CONFIG_MIPS_FP_SUPPORT
 394
 395	case lwc1_op:
 396	case ldc1_op:
 397	case swc1_op:
 398	case sdc1_op:
 399	case cop1x_op: {
 400		void __user *fault_addr = NULL;
 401
 402		die_if_kernel("Unaligned FP access in kernel code", regs);
 403		BUG_ON(!used_math());
 404
 
 405		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 406					       &fault_addr);
 407		own_fpu(1);	/* Restore FPU state. */
 408
 409		/* Signal if something went wrong. */
 410		process_fpemu_return(res, fault_addr, 0);
 411
 412		if (res == 0)
 413			break;
 414		return;
 415	}
 416#endif /* CONFIG_MIPS_FP_SUPPORT */
 417
 418#ifdef CONFIG_CPU_HAS_MSA
 419
 420	case msa_op: {
 421		unsigned int wd, preempted;
 422		enum msa_2b_fmt df;
 423		union fpureg *fpr;
 424
 
 425		if (!cpu_has_msa)
 426			goto sigill;
 427
 428		/*
 429		 * If we've reached this point then userland should have taken
 430		 * the MSA disabled exception & initialised vector context at
 431		 * some point in the past.
 432		 */
 433		BUG_ON(!thread_msa_context_live());
 434
 435		df = insn.msa_mi10_format.df;
 436		wd = insn.msa_mi10_format.wd;
 437		fpr = &current->thread.fpu.fpr[wd];
 438
 439		switch (insn.msa_mi10_format.func) {
 440		case msa_ld_op:
 441			if (!access_ok(addr, sizeof(*fpr)))
 442				goto sigbus;
 443
 444			do {
 445				/*
 446				 * If we have live MSA context keep track of
 447				 * whether we get preempted in order to avoid
 448				 * the register context we load being clobbered
 449				 * by the live context as it's saved during
 450				 * preemption. If we don't have live context
 451				 * then it can't be saved to clobber the value
 452				 * we load.
 453				 */
 454				preempted = test_thread_flag(TIF_USEDMSA);
 455
 456				res = __copy_from_user_inatomic(fpr, addr,
 457								sizeof(*fpr));
 458				if (res)
 459					goto fault;
 460
 461				/*
 462				 * Update the hardware register if it is in use
 463				 * by the task in this quantum, in order to
 464				 * avoid having to save & restore the whole
 465				 * vector context.
 466				 */
 467				preempt_disable();
 468				if (test_thread_flag(TIF_USEDMSA)) {
 469					write_msa_wr(wd, fpr, df);
 470					preempted = 0;
 471				}
 472				preempt_enable();
 473			} while (preempted);
 474			break;
 475
 476		case msa_st_op:
 477			if (!access_ok(addr, sizeof(*fpr)))
 478				goto sigbus;
 479
 480			/*
 481			 * Update from the hardware register if it is in use by
 482			 * the task in this quantum, in order to avoid having to
 483			 * save & restore the whole vector context.
 484			 */
 485			preempt_disable();
 486			if (test_thread_flag(TIF_USEDMSA))
 487				read_msa_wr(wd, fpr, df);
 488			preempt_enable();
 489
 490			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
 491			if (res)
 492				goto fault;
 493			break;
 494
 495		default:
 496			goto sigbus;
 497		}
 498
 499		compute_return_epc(regs);
 500		break;
 501	}
 502#endif /* CONFIG_CPU_HAS_MSA */
 503
 504#ifndef CONFIG_CPU_MIPSR6
 505	/*
 506	 * COP2 is available to implementor for application specific use.
 507	 * It's up to applications to register a notifier chain and do
 508	 * whatever they have to do, including possible sending of signals.
 509	 *
 510	 * This instruction has been reallocated in Release 6
 511	 */
 512	case lwc2_op:
 513		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 514		break;
 515
 516	case ldc2_op:
 517		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
 518		break;
 519
 520	case swc2_op:
 521		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
 522		break;
 523
 524	case sdc2_op:
 525		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 526		break;
 527#endif
 528	default:
 529		/*
 530		 * Pheeee...  We encountered an yet unknown instruction or
 531		 * cache coherence problem.  Die sucker, die ...
 532		 */
 533		goto sigill;
 534	}
 535
 536#ifdef CONFIG_DEBUG_FS
 537	unaligned_instructions++;
 538#endif
 539
 540	return;
 541
 542fault:
 543	/* roll back jump/branch */
 544	regs->cp0_epc = origpc;
 545	regs->regs[31] = orig31;
 546	/* Did we have an exception handler installed? */
 547	if (fixup_exception(regs))
 548		return;
 549
 550	die_if_kernel("Unhandled kernel unaligned access", regs);
 551	force_sig(SIGSEGV);
 552
 553	return;
 554
 555sigbus:
 556	die_if_kernel("Unhandled kernel unaligned access", regs);
 557	force_sig(SIGBUS);
 558
 559	return;
 560
 561sigill:
 562	die_if_kernel
 563	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 564	force_sig(SIGILL);
 565}
 566
 567/* Recode table from 16-bit register notation to 32-bit GPR. */
 568const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
 569
 570/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
 571static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
 572
 573static void emulate_load_store_microMIPS(struct pt_regs *regs,
 574					 void __user *addr)
 575{
 576	unsigned long value;
 577	unsigned int res;
 578	int i;
 579	unsigned int reg = 0, rvar;
 580	unsigned long orig31;
 581	u16 __user *pc16;
 582	u16 halfword;
 583	unsigned int word;
 584	unsigned long origpc, contpc;
 585	union mips_instruction insn;
 586	struct mm_decoded_insn mminsn;
 587	bool user = user_mode(regs);
 588
 589	origpc = regs->cp0_epc;
 590	orig31 = regs->regs[31];
 591
 592	mminsn.micro_mips_mode = 1;
 593
 594	/*
 595	 * This load never faults.
 596	 */
 597	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
 598	__get_user(halfword, pc16);
 599	pc16++;
 600	contpc = regs->cp0_epc + 2;
 601	word = ((unsigned int)halfword << 16);
 602	mminsn.pc_inc = 2;
 603
 604	if (!mm_insn_16bit(halfword)) {
 605		__get_user(halfword, pc16);
 606		pc16++;
 607		contpc = regs->cp0_epc + 4;
 608		mminsn.pc_inc = 4;
 609		word |= halfword;
 610	}
 611	mminsn.insn = word;
 612
 613	if (get_user(halfword, pc16))
 614		goto fault;
 615	mminsn.next_pc_inc = 2;
 616	word = ((unsigned int)halfword << 16);
 617
 618	if (!mm_insn_16bit(halfword)) {
 619		pc16++;
 620		if (get_user(halfword, pc16))
 621			goto fault;
 622		mminsn.next_pc_inc = 4;
 623		word |= halfword;
 624	}
 625	mminsn.next_insn = word;
 626
 627	insn = (union mips_instruction)(mminsn.insn);
 628	if (mm_isBranchInstr(regs, mminsn, &contpc))
 629		insn = (union mips_instruction)(mminsn.next_insn);
 630
 631	/*  Parse instruction to find what to do */
 632
 633	switch (insn.mm_i_format.opcode) {
 634
 635	case mm_pool32a_op:
 636		switch (insn.mm_x_format.func) {
 637		case mm_lwxs_op:
 638			reg = insn.mm_x_format.rd;
 639			goto loadW;
 640		}
 641
 642		goto sigbus;
 643
 644	case mm_pool32b_op:
 645		switch (insn.mm_m_format.func) {
 646		case mm_lwp_func:
 647			reg = insn.mm_m_format.rd;
 648			if (reg == 31)
 649				goto sigbus;
 650
 651			if (user && !access_ok(addr, 8))
 652				goto sigbus;
 653
 654			LoadW(addr, value, res);
 655			if (res)
 656				goto fault;
 657			regs->regs[reg] = value;
 658			addr += 4;
 659			LoadW(addr, value, res);
 660			if (res)
 661				goto fault;
 662			regs->regs[reg + 1] = value;
 663			goto success;
 664
 665		case mm_swp_func:
 666			reg = insn.mm_m_format.rd;
 667			if (reg == 31)
 668				goto sigbus;
 669
 670			if (user && !access_ok(addr, 8))
 671				goto sigbus;
 672
 673			value = regs->regs[reg];
 674			StoreW(addr, value, res);
 675			if (res)
 676				goto fault;
 677			addr += 4;
 678			value = regs->regs[reg + 1];
 679			StoreW(addr, value, res);
 680			if (res)
 681				goto fault;
 682			goto success;
 683
 684		case mm_ldp_func:
 685#ifdef CONFIG_64BIT
 686			reg = insn.mm_m_format.rd;
 687			if (reg == 31)
 688				goto sigbus;
 689
 690			if (user && !access_ok(addr, 16))
 691				goto sigbus;
 692
 693			LoadDW(addr, value, res);
 694			if (res)
 695				goto fault;
 696			regs->regs[reg] = value;
 697			addr += 8;
 698			LoadDW(addr, value, res);
 699			if (res)
 700				goto fault;
 701			regs->regs[reg + 1] = value;
 702			goto success;
 703#endif /* CONFIG_64BIT */
 704
 705			goto sigill;
 706
 707		case mm_sdp_func:
 708#ifdef CONFIG_64BIT
 709			reg = insn.mm_m_format.rd;
 710			if (reg == 31)
 711				goto sigbus;
 712
 713			if (user && !access_ok(addr, 16))
 714				goto sigbus;
 715
 716			value = regs->regs[reg];
 717			StoreDW(addr, value, res);
 718			if (res)
 719				goto fault;
 720			addr += 8;
 721			value = regs->regs[reg + 1];
 722			StoreDW(addr, value, res);
 723			if (res)
 724				goto fault;
 725			goto success;
 726#endif /* CONFIG_64BIT */
 727
 728			goto sigill;
 729
 730		case mm_lwm32_func:
 731			reg = insn.mm_m_format.rd;
 732			rvar = reg & 0xf;
 733			if ((rvar > 9) || !reg)
 734				goto sigill;
 735			if (reg & 0x10) {
 736				if (user && !access_ok(addr, 4 * (rvar + 1)))
 
 737					goto sigbus;
 738			} else {
 739				if (user && !access_ok(addr, 4 * rvar))
 740					goto sigbus;
 741			}
 742			if (rvar == 9)
 743				rvar = 8;
 744			for (i = 16; rvar; rvar--, i++) {
 745				LoadW(addr, value, res);
 746				if (res)
 747					goto fault;
 748				addr += 4;
 749				regs->regs[i] = value;
 750			}
 751			if ((reg & 0xf) == 9) {
 752				LoadW(addr, value, res);
 753				if (res)
 754					goto fault;
 755				addr += 4;
 756				regs->regs[30] = value;
 757			}
 758			if (reg & 0x10) {
 759				LoadW(addr, value, res);
 760				if (res)
 761					goto fault;
 762				regs->regs[31] = value;
 763			}
 764			goto success;
 765
 766		case mm_swm32_func:
 767			reg = insn.mm_m_format.rd;
 768			rvar = reg & 0xf;
 769			if ((rvar > 9) || !reg)
 770				goto sigill;
 771			if (reg & 0x10) {
 772				if (user && !access_ok(addr, 4 * (rvar + 1)))
 
 773					goto sigbus;
 774			} else {
 775				if (user && !access_ok(addr, 4 * rvar))
 776					goto sigbus;
 777			}
 778			if (rvar == 9)
 779				rvar = 8;
 780			for (i = 16; rvar; rvar--, i++) {
 781				value = regs->regs[i];
 782				StoreW(addr, value, res);
 783				if (res)
 784					goto fault;
 785				addr += 4;
 786			}
 787			if ((reg & 0xf) == 9) {
 788				value = regs->regs[30];
 789				StoreW(addr, value, res);
 790				if (res)
 791					goto fault;
 792				addr += 4;
 793			}
 794			if (reg & 0x10) {
 795				value = regs->regs[31];
 796				StoreW(addr, value, res);
 797				if (res)
 798					goto fault;
 799			}
 800			goto success;
 801
 802		case mm_ldm_func:
 803#ifdef CONFIG_64BIT
 804			reg = insn.mm_m_format.rd;
 805			rvar = reg & 0xf;
 806			if ((rvar > 9) || !reg)
 807				goto sigill;
 808			if (reg & 0x10) {
 809				if (user && !access_ok(addr, 8 * (rvar + 1)))
 
 810					goto sigbus;
 811			} else {
 812				if (user && !access_ok(addr, 8 * rvar))
 813					goto sigbus;
 814			}
 815			if (rvar == 9)
 816				rvar = 8;
 817
 818			for (i = 16; rvar; rvar--, i++) {
 819				LoadDW(addr, value, res);
 820				if (res)
 821					goto fault;
 822				addr += 4;
 823				regs->regs[i] = value;
 824			}
 825			if ((reg & 0xf) == 9) {
 826				LoadDW(addr, value, res);
 827				if (res)
 828					goto fault;
 829				addr += 8;
 830				regs->regs[30] = value;
 831			}
 832			if (reg & 0x10) {
 833				LoadDW(addr, value, res);
 834				if (res)
 835					goto fault;
 836				regs->regs[31] = value;
 837			}
 838			goto success;
 839#endif /* CONFIG_64BIT */
 840
 841			goto sigill;
 842
 843		case mm_sdm_func:
 844#ifdef CONFIG_64BIT
 845			reg = insn.mm_m_format.rd;
 846			rvar = reg & 0xf;
 847			if ((rvar > 9) || !reg)
 848				goto sigill;
 849			if (reg & 0x10) {
 850				if (user && !access_ok(addr, 8 * (rvar + 1)))
 
 851					goto sigbus;
 852			} else {
 853				if (user && !access_ok(addr, 8 * rvar))
 854					goto sigbus;
 855			}
 856			if (rvar == 9)
 857				rvar = 8;
 858
 859			for (i = 16; rvar; rvar--, i++) {
 860				value = regs->regs[i];
 861				StoreDW(addr, value, res);
 862				if (res)
 863					goto fault;
 864				addr += 8;
 865			}
 866			if ((reg & 0xf) == 9) {
 867				value = regs->regs[30];
 868				StoreDW(addr, value, res);
 869				if (res)
 870					goto fault;
 871				addr += 8;
 872			}
 873			if (reg & 0x10) {
 874				value = regs->regs[31];
 875				StoreDW(addr, value, res);
 876				if (res)
 877					goto fault;
 878			}
 879			goto success;
 880#endif /* CONFIG_64BIT */
 881
 882			goto sigill;
 883
 884			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
 885		}
 886
 887		goto sigbus;
 888
 889	case mm_pool32c_op:
 890		switch (insn.mm_m_format.func) {
 891		case mm_lwu_func:
 892			reg = insn.mm_m_format.rd;
 893			goto loadWU;
 894		}
 895
 896		/*  LL,SC,LLD,SCD are not serviced */
 897		goto sigbus;
 898
 899#ifdef CONFIG_MIPS_FP_SUPPORT
 900	case mm_pool32f_op:
 901		switch (insn.mm_x_format.func) {
 902		case mm_lwxc1_func:
 903		case mm_swxc1_func:
 904		case mm_ldxc1_func:
 905		case mm_sdxc1_func:
 906			goto fpu_emul;
 907		}
 908
 909		goto sigbus;
 910
 911	case mm_ldc132_op:
 912	case mm_sdc132_op:
 913	case mm_lwc132_op:
 914	case mm_swc132_op: {
 915		void __user *fault_addr = NULL;
 916
 917fpu_emul:
 918		/* roll back jump/branch */
 919		regs->cp0_epc = origpc;
 920		regs->regs[31] = orig31;
 921
 922		die_if_kernel("Unaligned FP access in kernel code", regs);
 923		BUG_ON(!used_math());
 924		BUG_ON(!is_fpu_owner());
 925
 
 926		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 927					       &fault_addr);
 928		own_fpu(1);	/* restore FPU state */
 929
 930		/* If something went wrong, signal */
 931		process_fpemu_return(res, fault_addr, 0);
 932
 933		if (res == 0)
 934			goto success;
 935		return;
 936	}
 937#endif /* CONFIG_MIPS_FP_SUPPORT */
 938
 939	case mm_lh32_op:
 940		reg = insn.mm_i_format.rt;
 941		goto loadHW;
 942
 943	case mm_lhu32_op:
 944		reg = insn.mm_i_format.rt;
 945		goto loadHWU;
 946
 947	case mm_lw32_op:
 948		reg = insn.mm_i_format.rt;
 949		goto loadW;
 950
 951	case mm_sh32_op:
 952		reg = insn.mm_i_format.rt;
 953		goto storeHW;
 954
 955	case mm_sw32_op:
 956		reg = insn.mm_i_format.rt;
 957		goto storeW;
 958
 959	case mm_ld32_op:
 960		reg = insn.mm_i_format.rt;
 961		goto loadDW;
 962
 963	case mm_sd32_op:
 964		reg = insn.mm_i_format.rt;
 965		goto storeDW;
 966
 967	case mm_pool16c_op:
 968		switch (insn.mm16_m_format.func) {
 969		case mm_lwm16_op:
 970			reg = insn.mm16_m_format.rlist;
 971			rvar = reg + 1;
 972			if (user && !access_ok(addr, 4 * rvar))
 973				goto sigbus;
 974
 975			for (i = 16; rvar; rvar--, i++) {
 976				LoadW(addr, value, res);
 977				if (res)
 978					goto fault;
 979				addr += 4;
 980				regs->regs[i] = value;
 981			}
 982			LoadW(addr, value, res);
 983			if (res)
 984				goto fault;
 985			regs->regs[31] = value;
 986
 987			goto success;
 988
 989		case mm_swm16_op:
 990			reg = insn.mm16_m_format.rlist;
 991			rvar = reg + 1;
 992			if (user && !access_ok(addr, 4 * rvar))
 993				goto sigbus;
 994
 995			for (i = 16; rvar; rvar--, i++) {
 996				value = regs->regs[i];
 997				StoreW(addr, value, res);
 998				if (res)
 999					goto fault;
1000				addr += 4;
1001			}
1002			value = regs->regs[31];
1003			StoreW(addr, value, res);
1004			if (res)
1005				goto fault;
1006
1007			goto success;
1008
1009		}
1010
1011		goto sigbus;
1012
1013	case mm_lhu16_op:
1014		reg = reg16to32[insn.mm16_rb_format.rt];
1015		goto loadHWU;
1016
1017	case mm_lw16_op:
1018		reg = reg16to32[insn.mm16_rb_format.rt];
1019		goto loadW;
1020
1021	case mm_sh16_op:
1022		reg = reg16to32st[insn.mm16_rb_format.rt];
1023		goto storeHW;
1024
1025	case mm_sw16_op:
1026		reg = reg16to32st[insn.mm16_rb_format.rt];
1027		goto storeW;
1028
1029	case mm_lwsp16_op:
1030		reg = insn.mm16_r5_format.rt;
1031		goto loadW;
1032
1033	case mm_swsp16_op:
1034		reg = insn.mm16_r5_format.rt;
1035		goto storeW;
1036
1037	case mm_lwgp16_op:
1038		reg = reg16to32[insn.mm16_r3_format.rt];
1039		goto loadW;
1040
1041	default:
1042		goto sigill;
1043	}
1044
1045loadHW:
1046	if (user && !access_ok(addr, 2))
1047		goto sigbus;
1048
1049	LoadHW(addr, value, res);
1050	if (res)
1051		goto fault;
1052	regs->regs[reg] = value;
1053	goto success;
1054
1055loadHWU:
1056	if (user && !access_ok(addr, 2))
1057		goto sigbus;
1058
1059	LoadHWU(addr, value, res);
1060	if (res)
1061		goto fault;
1062	regs->regs[reg] = value;
1063	goto success;
1064
1065loadW:
1066	if (user && !access_ok(addr, 4))
1067		goto sigbus;
1068
1069	LoadW(addr, value, res);
1070	if (res)
1071		goto fault;
1072	regs->regs[reg] = value;
1073	goto success;
1074
1075loadWU:
1076#ifdef CONFIG_64BIT
1077	/*
1078	 * A 32-bit kernel might be running on a 64-bit processor.  But
1079	 * if we're on a 32-bit processor and an i-cache incoherency
1080	 * or race makes us see a 64-bit instruction here the sdl/sdr
1081	 * would blow up, so for now we don't handle unaligned 64-bit
1082	 * instructions on 32-bit kernels.
1083	 */
1084	if (user && !access_ok(addr, 4))
1085		goto sigbus;
1086
1087	LoadWU(addr, value, res);
1088	if (res)
1089		goto fault;
1090	regs->regs[reg] = value;
1091	goto success;
1092#endif /* CONFIG_64BIT */
1093
1094	/* Cannot handle 64-bit instructions in 32-bit kernel */
1095	goto sigill;
1096
1097loadDW:
1098#ifdef CONFIG_64BIT
1099	/*
1100	 * A 32-bit kernel might be running on a 64-bit processor.  But
1101	 * if we're on a 32-bit processor and an i-cache incoherency
1102	 * or race makes us see a 64-bit instruction here the sdl/sdr
1103	 * would blow up, so for now we don't handle unaligned 64-bit
1104	 * instructions on 32-bit kernels.
1105	 */
1106	if (user && !access_ok(addr, 8))
1107		goto sigbus;
1108
1109	LoadDW(addr, value, res);
1110	if (res)
1111		goto fault;
1112	regs->regs[reg] = value;
1113	goto success;
1114#endif /* CONFIG_64BIT */
1115
1116	/* Cannot handle 64-bit instructions in 32-bit kernel */
1117	goto sigill;
1118
1119storeHW:
1120	if (user && !access_ok(addr, 2))
1121		goto sigbus;
1122
1123	value = regs->regs[reg];
1124	StoreHW(addr, value, res);
1125	if (res)
1126		goto fault;
1127	goto success;
1128
1129storeW:
1130	if (user && !access_ok(addr, 4))
1131		goto sigbus;
1132
1133	value = regs->regs[reg];
1134	StoreW(addr, value, res);
1135	if (res)
1136		goto fault;
1137	goto success;
1138
1139storeDW:
1140#ifdef CONFIG_64BIT
1141	/*
1142	 * A 32-bit kernel might be running on a 64-bit processor.  But
1143	 * if we're on a 32-bit processor and an i-cache incoherency
1144	 * or race makes us see a 64-bit instruction here the sdl/sdr
1145	 * would blow up, so for now we don't handle unaligned 64-bit
1146	 * instructions on 32-bit kernels.
1147	 */
1148	if (user && !access_ok(addr, 8))
1149		goto sigbus;
1150
1151	value = regs->regs[reg];
1152	StoreDW(addr, value, res);
1153	if (res)
1154		goto fault;
1155	goto success;
1156#endif /* CONFIG_64BIT */
1157
1158	/* Cannot handle 64-bit instructions in 32-bit kernel */
1159	goto sigill;
1160
1161success:
1162	regs->cp0_epc = contpc;	/* advance or branch */
1163
1164#ifdef CONFIG_DEBUG_FS
1165	unaligned_instructions++;
1166#endif
1167	return;
1168
1169fault:
1170	/* roll back jump/branch */
1171	regs->cp0_epc = origpc;
1172	regs->regs[31] = orig31;
1173	/* Did we have an exception handler installed? */
1174	if (fixup_exception(regs))
1175		return;
1176
1177	die_if_kernel("Unhandled kernel unaligned access", regs);
1178	force_sig(SIGSEGV);
1179
1180	return;
1181
1182sigbus:
1183	die_if_kernel("Unhandled kernel unaligned access", regs);
1184	force_sig(SIGBUS);
1185
1186	return;
1187
1188sigill:
1189	die_if_kernel
1190	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1191	force_sig(SIGILL);
1192}
1193
1194static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1195{
1196	unsigned long value;
1197	unsigned int res;
1198	int reg;
1199	unsigned long orig31;
1200	u16 __user *pc16;
1201	unsigned long origpc;
1202	union mips16e_instruction mips16inst, oldinst;
1203	unsigned int opcode;
1204	int extended = 0;
1205	bool user = user_mode(regs);
1206
1207	origpc = regs->cp0_epc;
1208	orig31 = regs->regs[31];
1209	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1210	/*
1211	 * This load never faults.
1212	 */
1213	__get_user(mips16inst.full, pc16);
1214	oldinst = mips16inst;
1215
1216	/* skip EXTEND instruction */
1217	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1218		extended = 1;
1219		pc16++;
1220		__get_user(mips16inst.full, pc16);
1221	} else if (delay_slot(regs)) {
1222		/*  skip jump instructions */
1223		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1224		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1225			pc16++;
1226		pc16++;
1227		if (get_user(mips16inst.full, pc16))
1228			goto sigbus;
1229	}
1230
1231	opcode = mips16inst.ri.opcode;
1232	switch (opcode) {
1233	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1234		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1235		case MIPS16e_ldpc_func:
1236		case MIPS16e_ldsp_func:
1237			reg = reg16to32[mips16inst.ri64.ry];
1238			goto loadDW;
1239
1240		case MIPS16e_sdsp_func:
1241			reg = reg16to32[mips16inst.ri64.ry];
1242			goto writeDW;
1243
1244		case MIPS16e_sdrasp_func:
1245			reg = 29;	/* GPRSP */
1246			goto writeDW;
1247		}
1248
1249		goto sigbus;
1250
1251	case MIPS16e_swsp_op:
1252		reg = reg16to32[mips16inst.ri.rx];
1253		if (extended && cpu_has_mips16e2)
1254			switch (mips16inst.ri.imm >> 5) {
1255			case 0:		/* SWSP */
1256			case 1:		/* SWGP */
1257				break;
1258			case 2:		/* SHGP */
1259				opcode = MIPS16e_sh_op;
1260				break;
1261			default:
1262				goto sigbus;
1263			}
1264		break;
1265
1266	case MIPS16e_lwpc_op:
1267		reg = reg16to32[mips16inst.ri.rx];
1268		break;
1269
1270	case MIPS16e_lwsp_op:
1271		reg = reg16to32[mips16inst.ri.rx];
1272		if (extended && cpu_has_mips16e2)
1273			switch (mips16inst.ri.imm >> 5) {
1274			case 0:		/* LWSP */
1275			case 1:		/* LWGP */
1276				break;
1277			case 2:		/* LHGP */
1278				opcode = MIPS16e_lh_op;
1279				break;
1280			case 4:		/* LHUGP */
1281				opcode = MIPS16e_lhu_op;
1282				break;
1283			default:
1284				goto sigbus;
1285			}
1286		break;
1287
1288	case MIPS16e_i8_op:
1289		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1290			goto sigbus;
1291		reg = 29;	/* GPRSP */
1292		break;
1293
1294	default:
1295		reg = reg16to32[mips16inst.rri.ry];
1296		break;
1297	}
1298
1299	switch (opcode) {
1300
1301	case MIPS16e_lb_op:
1302	case MIPS16e_lbu_op:
1303	case MIPS16e_sb_op:
1304		goto sigbus;
1305
1306	case MIPS16e_lh_op:
1307		if (user && !access_ok(addr, 2))
1308			goto sigbus;
1309
1310		LoadHW(addr, value, res);
1311		if (res)
1312			goto fault;
1313		MIPS16e_compute_return_epc(regs, &oldinst);
1314		regs->regs[reg] = value;
1315		break;
1316
1317	case MIPS16e_lhu_op:
1318		if (user && !access_ok(addr, 2))
1319			goto sigbus;
1320
1321		LoadHWU(addr, value, res);
1322		if (res)
1323			goto fault;
1324		MIPS16e_compute_return_epc(regs, &oldinst);
1325		regs->regs[reg] = value;
1326		break;
1327
1328	case MIPS16e_lw_op:
1329	case MIPS16e_lwpc_op:
1330	case MIPS16e_lwsp_op:
1331		if (user && !access_ok(addr, 4))
1332			goto sigbus;
1333
1334		LoadW(addr, value, res);
1335		if (res)
1336			goto fault;
1337		MIPS16e_compute_return_epc(regs, &oldinst);
1338		regs->regs[reg] = value;
1339		break;
1340
1341	case MIPS16e_lwu_op:
1342#ifdef CONFIG_64BIT
1343		/*
1344		 * A 32-bit kernel might be running on a 64-bit processor.  But
1345		 * if we're on a 32-bit processor and an i-cache incoherency
1346		 * or race makes us see a 64-bit instruction here the sdl/sdr
1347		 * would blow up, so for now we don't handle unaligned 64-bit
1348		 * instructions on 32-bit kernels.
1349		 */
1350		if (user && !access_ok(addr, 4))
1351			goto sigbus;
1352
1353		LoadWU(addr, value, res);
1354		if (res)
1355			goto fault;
1356		MIPS16e_compute_return_epc(regs, &oldinst);
1357		regs->regs[reg] = value;
1358		break;
1359#endif /* CONFIG_64BIT */
1360
1361		/* Cannot handle 64-bit instructions in 32-bit kernel */
1362		goto sigill;
1363
1364	case MIPS16e_ld_op:
1365loadDW:
1366#ifdef CONFIG_64BIT
1367		/*
1368		 * A 32-bit kernel might be running on a 64-bit processor.  But
1369		 * if we're on a 32-bit processor and an i-cache incoherency
1370		 * or race makes us see a 64-bit instruction here the sdl/sdr
1371		 * would blow up, so for now we don't handle unaligned 64-bit
1372		 * instructions on 32-bit kernels.
1373		 */
1374		if (user && !access_ok(addr, 8))
1375			goto sigbus;
1376
1377		LoadDW(addr, value, res);
1378		if (res)
1379			goto fault;
1380		MIPS16e_compute_return_epc(regs, &oldinst);
1381		regs->regs[reg] = value;
1382		break;
1383#endif /* CONFIG_64BIT */
1384
1385		/* Cannot handle 64-bit instructions in 32-bit kernel */
1386		goto sigill;
1387
1388	case MIPS16e_sh_op:
1389		if (user && !access_ok(addr, 2))
1390			goto sigbus;
1391
1392		MIPS16e_compute_return_epc(regs, &oldinst);
1393		value = regs->regs[reg];
1394		StoreHW(addr, value, res);
1395		if (res)
1396			goto fault;
1397		break;
1398
1399	case MIPS16e_sw_op:
1400	case MIPS16e_swsp_op:
1401	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1402		if (user && !access_ok(addr, 4))
1403			goto sigbus;
1404
1405		MIPS16e_compute_return_epc(regs, &oldinst);
1406		value = regs->regs[reg];
1407		StoreW(addr, value, res);
1408		if (res)
1409			goto fault;
1410		break;
1411
1412	case MIPS16e_sd_op:
1413writeDW:
1414#ifdef CONFIG_64BIT
1415		/*
1416		 * A 32-bit kernel might be running on a 64-bit processor.  But
1417		 * if we're on a 32-bit processor and an i-cache incoherency
1418		 * or race makes us see a 64-bit instruction here the sdl/sdr
1419		 * would blow up, so for now we don't handle unaligned 64-bit
1420		 * instructions on 32-bit kernels.
1421		 */
1422		if (user && !access_ok(addr, 8))
1423			goto sigbus;
1424
1425		MIPS16e_compute_return_epc(regs, &oldinst);
1426		value = regs->regs[reg];
1427		StoreDW(addr, value, res);
1428		if (res)
1429			goto fault;
1430		break;
1431#endif /* CONFIG_64BIT */
1432
1433		/* Cannot handle 64-bit instructions in 32-bit kernel */
1434		goto sigill;
1435
1436	default:
1437		/*
1438		 * Pheeee...  We encountered an yet unknown instruction or
1439		 * cache coherence problem.  Die sucker, die ...
1440		 */
1441		goto sigill;
1442	}
1443
1444#ifdef CONFIG_DEBUG_FS
1445	unaligned_instructions++;
1446#endif
1447
1448	return;
1449
1450fault:
1451	/* roll back jump/branch */
1452	regs->cp0_epc = origpc;
1453	regs->regs[31] = orig31;
1454	/* Did we have an exception handler installed? */
1455	if (fixup_exception(regs))
1456		return;
1457
1458	die_if_kernel("Unhandled kernel unaligned access", regs);
1459	force_sig(SIGSEGV);
1460
1461	return;
1462
1463sigbus:
1464	die_if_kernel("Unhandled kernel unaligned access", regs);
1465	force_sig(SIGBUS);
1466
1467	return;
1468
1469sigill:
1470	die_if_kernel
1471	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1472	force_sig(SIGILL);
1473}
1474
1475asmlinkage void do_ade(struct pt_regs *regs)
1476{
1477	enum ctx_state prev_state;
1478	unsigned int *pc;
 
1479
1480	prev_state = exception_enter();
1481	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1482			1, regs, regs->cp0_badvaddr);
1483
1484#ifdef CONFIG_64BIT
1485	/*
1486	 * check, if we are hitting space between CPU implemented maximum
1487	 * virtual user address and 64bit maximum virtual user address
1488	 * and do exception handling to get EFAULTs for get_user/put_user
1489	 */
1490	if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1491	    (regs->cp0_badvaddr < XKSSEG)) {
1492		if (fixup_exception(regs)) {
1493			current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1494			return;
1495		}
1496		goto sigbus;
1497	}
1498#endif
1499
1500	/*
1501	 * Did we catch a fault trying to load an instruction?
1502	 */
1503	if (regs->cp0_badvaddr == regs->cp0_epc)
1504		goto sigbus;
1505
1506	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1507		goto sigbus;
1508	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1509		goto sigbus;
1510
1511	/*
1512	 * Do branch emulation only if we didn't forward the exception.
1513	 * This is all so but ugly ...
1514	 */
1515
1516	/*
1517	 * Are we running in microMIPS mode?
1518	 */
1519	if (get_isa16_mode(regs->cp0_epc)) {
1520		/*
1521		 * Did we catch a fault trying to load an instruction in
1522		 * 16-bit mode?
1523		 */
1524		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1525			goto sigbus;
1526		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1527			show_registers(regs);
1528
1529		if (cpu_has_mmips) {
 
 
 
1530			emulate_load_store_microMIPS(regs,
1531				(void __user *)regs->cp0_badvaddr);
 
 
1532			return;
1533		}
1534
1535		if (cpu_has_mips16) {
 
 
 
1536			emulate_load_store_MIPS16e(regs,
1537				(void __user *)regs->cp0_badvaddr);
 
 
1538			return;
1539		}
1540
1541		goto sigbus;
1542	}
1543
1544	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1545		show_registers(regs);
1546	pc = (unsigned int *)exception_epc(regs);
1547
 
 
 
1548	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
 
1549
1550	return;
1551
1552sigbus:
1553	die_if_kernel("Kernel unaligned instruction access", regs);
1554	force_sig(SIGBUS);
1555
1556	/*
1557	 * XXX On return from the signal handler we should advance the epc
1558	 */
1559	exception_exit(prev_state);
1560}
1561
1562#ifdef CONFIG_DEBUG_FS
1563static int __init debugfs_unaligned(void)
1564{
1565	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1566			   &unaligned_instructions);
1567	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1568			   mips_debugfs_dir, &unaligned_action);
 
 
 
 
 
 
 
 
1569	return 0;
1570}
1571arch_initcall(debugfs_unaligned);
1572#endif
v4.10.11
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
 
 
  92#include <linux/uaccess.h>
  93
  94#define STR(x)	__STR(x)
  95#define __STR(x)  #x
  96
  97enum {
  98	UNALIGNED_ACTION_QUIET,
  99	UNALIGNED_ACTION_SIGNAL,
 100	UNALIGNED_ACTION_SHOW,
 101};
 102#ifdef CONFIG_DEBUG_FS
 103static u32 unaligned_instructions;
 104static u32 unaligned_action;
 105#else
 106#define unaligned_action UNALIGNED_ACTION_QUIET
 107#endif
 108extern void show_registers(struct pt_regs *regs);
 109
 110#ifdef __BIG_ENDIAN
 111#define     _LoadHW(addr, value, res, type)  \
 112do {                                                        \
 113		__asm__ __volatile__ (".set\tnoat\n"        \
 114			"1:\t"type##_lb("%0", "0(%2)")"\n"  \
 115			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
 116			"sll\t%0, 0x8\n\t"                  \
 117			"or\t%0, $1\n\t"                    \
 118			"li\t%1, 0\n"                       \
 119			"3:\t.set\tat\n\t"                  \
 120			".insn\n\t"                         \
 121			".section\t.fixup,\"ax\"\n\t"       \
 122			"4:\tli\t%1, %3\n\t"                \
 123			"j\t3b\n\t"                         \
 124			".previous\n\t"                     \
 125			".section\t__ex_table,\"a\"\n\t"    \
 126			STR(PTR)"\t1b, 4b\n\t"              \
 127			STR(PTR)"\t2b, 4b\n\t"              \
 128			".previous"                         \
 129			: "=&r" (value), "=r" (res)         \
 130			: "r" (addr), "i" (-EFAULT));       \
 131} while(0)
 132
 133#ifndef CONFIG_CPU_MIPSR6
 134#define     _LoadW(addr, value, res, type)   \
 135do {                                                        \
 136		__asm__ __volatile__ (                      \
 137			"1:\t"type##_lwl("%0", "(%2)")"\n"   \
 138			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
 139			"li\t%1, 0\n"                       \
 140			"3:\n\t"                            \
 141			".insn\n\t"                         \
 142			".section\t.fixup,\"ax\"\n\t"       \
 143			"4:\tli\t%1, %3\n\t"                \
 144			"j\t3b\n\t"                         \
 145			".previous\n\t"                     \
 146			".section\t__ex_table,\"a\"\n\t"    \
 147			STR(PTR)"\t1b, 4b\n\t"              \
 148			STR(PTR)"\t2b, 4b\n\t"              \
 149			".previous"                         \
 150			: "=&r" (value), "=r" (res)         \
 151			: "r" (addr), "i" (-EFAULT));       \
 152} while(0)
 153
 154#else
 155/* MIPSR6 has no lwl instruction */
 156#define     _LoadW(addr, value, res, type) \
 157do {                                                        \
 158		__asm__ __volatile__ (			    \
 159			".set\tpush\n"			    \
 160			".set\tnoat\n\t"		    \
 161			"1:"type##_lb("%0", "0(%2)")"\n\t"  \
 162			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
 163			"sll\t%0, 0x8\n\t"		    \
 164			"or\t%0, $1\n\t"		    \
 165			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
 166			"sll\t%0, 0x8\n\t"		    \
 167			"or\t%0, $1\n\t"		    \
 168			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
 169			"sll\t%0, 0x8\n\t"		    \
 170			"or\t%0, $1\n\t"		    \
 171			"li\t%1, 0\n"			    \
 172			".set\tpop\n"			    \
 173			"10:\n\t"			    \
 174			".insn\n\t"			    \
 175			".section\t.fixup,\"ax\"\n\t"	    \
 176			"11:\tli\t%1, %3\n\t"		    \
 177			"j\t10b\n\t"			    \
 178			".previous\n\t"			    \
 179			".section\t__ex_table,\"a\"\n\t"    \
 180			STR(PTR)"\t1b, 11b\n\t"		    \
 181			STR(PTR)"\t2b, 11b\n\t"		    \
 182			STR(PTR)"\t3b, 11b\n\t"		    \
 183			STR(PTR)"\t4b, 11b\n\t"		    \
 184			".previous"			    \
 185			: "=&r" (value), "=r" (res)	    \
 186			: "r" (addr), "i" (-EFAULT));       \
 187} while(0)
 188
 189#endif /* CONFIG_CPU_MIPSR6 */
 190
 191#define     _LoadHWU(addr, value, res, type) \
 192do {                                                        \
 193		__asm__ __volatile__ (                      \
 194			".set\tnoat\n"                      \
 195			"1:\t"type##_lbu("%0", "0(%2)")"\n" \
 196			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
 197			"sll\t%0, 0x8\n\t"                  \
 198			"or\t%0, $1\n\t"                    \
 199			"li\t%1, 0\n"                       \
 200			"3:\n\t"                            \
 201			".insn\n\t"                         \
 202			".set\tat\n\t"                      \
 203			".section\t.fixup,\"ax\"\n\t"       \
 204			"4:\tli\t%1, %3\n\t"                \
 205			"j\t3b\n\t"                         \
 206			".previous\n\t"                     \
 207			".section\t__ex_table,\"a\"\n\t"    \
 208			STR(PTR)"\t1b, 4b\n\t"              \
 209			STR(PTR)"\t2b, 4b\n\t"              \
 210			".previous"                         \
 211			: "=&r" (value), "=r" (res)         \
 212			: "r" (addr), "i" (-EFAULT));       \
 213} while(0)
 214
 215#ifndef CONFIG_CPU_MIPSR6
 216#define     _LoadWU(addr, value, res, type)  \
 217do {                                                        \
 218		__asm__ __volatile__ (                      \
 219			"1:\t"type##_lwl("%0", "(%2)")"\n"  \
 220			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
 221			"dsll\t%0, %0, 32\n\t"              \
 222			"dsrl\t%0, %0, 32\n\t"              \
 223			"li\t%1, 0\n"                       \
 224			"3:\n\t"                            \
 225			".insn\n\t"                         \
 226			"\t.section\t.fixup,\"ax\"\n\t"     \
 227			"4:\tli\t%1, %3\n\t"                \
 228			"j\t3b\n\t"                         \
 229			".previous\n\t"                     \
 230			".section\t__ex_table,\"a\"\n\t"    \
 231			STR(PTR)"\t1b, 4b\n\t"              \
 232			STR(PTR)"\t2b, 4b\n\t"              \
 233			".previous"                         \
 234			: "=&r" (value), "=r" (res)         \
 235			: "r" (addr), "i" (-EFAULT));       \
 236} while(0)
 237
 238#define     _LoadDW(addr, value, res)  \
 239do {                                                        \
 240		__asm__ __volatile__ (                      \
 241			"1:\tldl\t%0, (%2)\n"               \
 242			"2:\tldr\t%0, 7(%2)\n\t"            \
 243			"li\t%1, 0\n"                       \
 244			"3:\n\t"                            \
 245			".insn\n\t"                         \
 246			"\t.section\t.fixup,\"ax\"\n\t"     \
 247			"4:\tli\t%1, %3\n\t"                \
 248			"j\t3b\n\t"                         \
 249			".previous\n\t"                     \
 250			".section\t__ex_table,\"a\"\n\t"    \
 251			STR(PTR)"\t1b, 4b\n\t"              \
 252			STR(PTR)"\t2b, 4b\n\t"              \
 253			".previous"                         \
 254			: "=&r" (value), "=r" (res)         \
 255			: "r" (addr), "i" (-EFAULT));       \
 256} while(0)
 257
 258#else
 259/* MIPSR6 has not lwl and ldl instructions */
 260#define	    _LoadWU(addr, value, res, type) \
 261do {                                                        \
 262		__asm__ __volatile__ (			    \
 263			".set\tpush\n\t"		    \
 264			".set\tnoat\n\t"		    \
 265			"1:"type##_lbu("%0", "0(%2)")"\n\t" \
 266			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
 267			"sll\t%0, 0x8\n\t"		    \
 268			"or\t%0, $1\n\t"		    \
 269			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
 270			"sll\t%0, 0x8\n\t"		    \
 271			"or\t%0, $1\n\t"		    \
 272			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
 273			"sll\t%0, 0x8\n\t"		    \
 274			"or\t%0, $1\n\t"		    \
 275			"li\t%1, 0\n"			    \
 276			".set\tpop\n"			    \
 277			"10:\n\t"			    \
 278			".insn\n\t"			    \
 279			".section\t.fixup,\"ax\"\n\t"	    \
 280			"11:\tli\t%1, %3\n\t"		    \
 281			"j\t10b\n\t"			    \
 282			".previous\n\t"			    \
 283			".section\t__ex_table,\"a\"\n\t"    \
 284			STR(PTR)"\t1b, 11b\n\t"		    \
 285			STR(PTR)"\t2b, 11b\n\t"		    \
 286			STR(PTR)"\t3b, 11b\n\t"		    \
 287			STR(PTR)"\t4b, 11b\n\t"		    \
 288			".previous"			    \
 289			: "=&r" (value), "=r" (res)	    \
 290			: "r" (addr), "i" (-EFAULT));       \
 291} while(0)
 292
 293#define     _LoadDW(addr, value, res)  \
 294do {                                                        \
 295		__asm__ __volatile__ (			    \
 296			".set\tpush\n\t"		    \
 297			".set\tnoat\n\t"		    \
 298			"1:lb\t%0, 0(%2)\n\t"    	    \
 299			"2:lbu\t $1, 1(%2)\n\t"   	    \
 300			"dsll\t%0, 0x8\n\t"		    \
 301			"or\t%0, $1\n\t"		    \
 302			"3:lbu\t$1, 2(%2)\n\t"   	    \
 303			"dsll\t%0, 0x8\n\t"		    \
 304			"or\t%0, $1\n\t"		    \
 305			"4:lbu\t$1, 3(%2)\n\t"   	    \
 306			"dsll\t%0, 0x8\n\t"		    \
 307			"or\t%0, $1\n\t"		    \
 308			"5:lbu\t$1, 4(%2)\n\t"   	    \
 309			"dsll\t%0, 0x8\n\t"		    \
 310			"or\t%0, $1\n\t"		    \
 311			"6:lbu\t$1, 5(%2)\n\t"   	    \
 312			"dsll\t%0, 0x8\n\t"		    \
 313			"or\t%0, $1\n\t"		    \
 314			"7:lbu\t$1, 6(%2)\n\t"   	    \
 315			"dsll\t%0, 0x8\n\t"		    \
 316			"or\t%0, $1\n\t"		    \
 317			"8:lbu\t$1, 7(%2)\n\t"   	    \
 318			"dsll\t%0, 0x8\n\t"		    \
 319			"or\t%0, $1\n\t"		    \
 320			"li\t%1, 0\n"			    \
 321			".set\tpop\n\t"			    \
 322			"10:\n\t"			    \
 323			".insn\n\t"			    \
 324			".section\t.fixup,\"ax\"\n\t"	    \
 325			"11:\tli\t%1, %3\n\t"		    \
 326			"j\t10b\n\t"			    \
 327			".previous\n\t"			    \
 328			".section\t__ex_table,\"a\"\n\t"    \
 329			STR(PTR)"\t1b, 11b\n\t"		    \
 330			STR(PTR)"\t2b, 11b\n\t"		    \
 331			STR(PTR)"\t3b, 11b\n\t"		    \
 332			STR(PTR)"\t4b, 11b\n\t"		    \
 333			STR(PTR)"\t5b, 11b\n\t"		    \
 334			STR(PTR)"\t6b, 11b\n\t"		    \
 335			STR(PTR)"\t7b, 11b\n\t"		    \
 336			STR(PTR)"\t8b, 11b\n\t"		    \
 337			".previous"			    \
 338			: "=&r" (value), "=r" (res)	    \
 339			: "r" (addr), "i" (-EFAULT));       \
 340} while(0)
 341
 342#endif /* CONFIG_CPU_MIPSR6 */
 343
 344
 345#define     _StoreHW(addr, value, res, type) \
 346do {                                                        \
 347		__asm__ __volatile__ (                      \
 348			".set\tnoat\n"                      \
 349			"1:\t"type##_sb("%1", "1(%2)")"\n"  \
 350			"srl\t$1, %1, 0x8\n"                \
 351			"2:\t"type##_sb("$1", "0(%2)")"\n"  \
 352			".set\tat\n\t"                      \
 353			"li\t%0, 0\n"                       \
 354			"3:\n\t"                            \
 355			".insn\n\t"                         \
 356			".section\t.fixup,\"ax\"\n\t"       \
 357			"4:\tli\t%0, %3\n\t"                \
 358			"j\t3b\n\t"                         \
 359			".previous\n\t"                     \
 360			".section\t__ex_table,\"a\"\n\t"    \
 361			STR(PTR)"\t1b, 4b\n\t"              \
 362			STR(PTR)"\t2b, 4b\n\t"              \
 363			".previous"                         \
 364			: "=r" (res)                        \
 365			: "r" (value), "r" (addr), "i" (-EFAULT));\
 366} while(0)
 367
 368#ifndef CONFIG_CPU_MIPSR6
 369#define     _StoreW(addr, value, res, type)  \
 370do {                                                        \
 371		__asm__ __volatile__ (                      \
 372			"1:\t"type##_swl("%1", "(%2)")"\n"  \
 373			"2:\t"type##_swr("%1", "3(%2)")"\n\t"\
 374			"li\t%0, 0\n"                       \
 375			"3:\n\t"                            \
 376			".insn\n\t"                         \
 377			".section\t.fixup,\"ax\"\n\t"       \
 378			"4:\tli\t%0, %3\n\t"                \
 379			"j\t3b\n\t"                         \
 380			".previous\n\t"                     \
 381			".section\t__ex_table,\"a\"\n\t"    \
 382			STR(PTR)"\t1b, 4b\n\t"              \
 383			STR(PTR)"\t2b, 4b\n\t"              \
 384			".previous"                         \
 385		: "=r" (res)                                \
 386		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 387} while(0)
 388
 389#define     _StoreDW(addr, value, res) \
 390do {                                                        \
 391		__asm__ __volatile__ (                      \
 392			"1:\tsdl\t%1,(%2)\n"                \
 393			"2:\tsdr\t%1, 7(%2)\n\t"            \
 394			"li\t%0, 0\n"                       \
 395			"3:\n\t"                            \
 396			".insn\n\t"                         \
 397			".section\t.fixup,\"ax\"\n\t"       \
 398			"4:\tli\t%0, %3\n\t"                \
 399			"j\t3b\n\t"                         \
 400			".previous\n\t"                     \
 401			".section\t__ex_table,\"a\"\n\t"    \
 402			STR(PTR)"\t1b, 4b\n\t"              \
 403			STR(PTR)"\t2b, 4b\n\t"              \
 404			".previous"                         \
 405		: "=r" (res)                                \
 406		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 407} while(0)
 408
 409#else
 410/* MIPSR6 has no swl and sdl instructions */
 411#define     _StoreW(addr, value, res, type)  \
 412do {                                                        \
 413		__asm__ __volatile__ (                      \
 414			".set\tpush\n\t"		    \
 415			".set\tnoat\n\t"		    \
 416			"1:"type##_sb("%1", "3(%2)")"\n\t"  \
 417			"srl\t$1, %1, 0x8\n\t"		    \
 418			"2:"type##_sb("$1", "2(%2)")"\n\t"  \
 419			"srl\t$1, $1,  0x8\n\t"		    \
 420			"3:"type##_sb("$1", "1(%2)")"\n\t"  \
 421			"srl\t$1, $1, 0x8\n\t"		    \
 422			"4:"type##_sb("$1", "0(%2)")"\n\t"  \
 423			".set\tpop\n\t"			    \
 424			"li\t%0, 0\n"			    \
 425			"10:\n\t"			    \
 426			".insn\n\t"			    \
 427			".section\t.fixup,\"ax\"\n\t"	    \
 428			"11:\tli\t%0, %3\n\t"		    \
 429			"j\t10b\n\t"			    \
 430			".previous\n\t"			    \
 431			".section\t__ex_table,\"a\"\n\t"    \
 432			STR(PTR)"\t1b, 11b\n\t"		    \
 433			STR(PTR)"\t2b, 11b\n\t"		    \
 434			STR(PTR)"\t3b, 11b\n\t"		    \
 435			STR(PTR)"\t4b, 11b\n\t"		    \
 436			".previous"			    \
 437		: "=&r" (res)			    	    \
 438		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 439		: "memory");                                \
 440} while(0)
 441
 442#define     _StoreDW(addr, value, res) \
 443do {                                                        \
 444		__asm__ __volatile__ (                      \
 445			".set\tpush\n\t"		    \
 446			".set\tnoat\n\t"		    \
 447			"1:sb\t%1, 7(%2)\n\t"    	    \
 448			"dsrl\t$1, %1, 0x8\n\t"		    \
 449			"2:sb\t$1, 6(%2)\n\t"    	    \
 450			"dsrl\t$1, $1, 0x8\n\t"		    \
 451			"3:sb\t$1, 5(%2)\n\t"    	    \
 452			"dsrl\t$1, $1, 0x8\n\t"		    \
 453			"4:sb\t$1, 4(%2)\n\t"    	    \
 454			"dsrl\t$1, $1, 0x8\n\t"		    \
 455			"5:sb\t$1, 3(%2)\n\t"    	    \
 456			"dsrl\t$1, $1, 0x8\n\t"		    \
 457			"6:sb\t$1, 2(%2)\n\t"    	    \
 458			"dsrl\t$1, $1, 0x8\n\t"		    \
 459			"7:sb\t$1, 1(%2)\n\t"    	    \
 460			"dsrl\t$1, $1, 0x8\n\t"		    \
 461			"8:sb\t$1, 0(%2)\n\t"    	    \
 462			"dsrl\t$1, $1, 0x8\n\t"		    \
 463			".set\tpop\n\t"			    \
 464			"li\t%0, 0\n"			    \
 465			"10:\n\t"			    \
 466			".insn\n\t"			    \
 467			".section\t.fixup,\"ax\"\n\t"	    \
 468			"11:\tli\t%0, %3\n\t"		    \
 469			"j\t10b\n\t"			    \
 470			".previous\n\t"			    \
 471			".section\t__ex_table,\"a\"\n\t"    \
 472			STR(PTR)"\t1b, 11b\n\t"		    \
 473			STR(PTR)"\t2b, 11b\n\t"		    \
 474			STR(PTR)"\t3b, 11b\n\t"		    \
 475			STR(PTR)"\t4b, 11b\n\t"		    \
 476			STR(PTR)"\t5b, 11b\n\t"		    \
 477			STR(PTR)"\t6b, 11b\n\t"		    \
 478			STR(PTR)"\t7b, 11b\n\t"		    \
 479			STR(PTR)"\t8b, 11b\n\t"		    \
 480			".previous"			    \
 481		: "=&r" (res)			    	    \
 482		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 483		: "memory");                                \
 484} while(0)
 485
 486#endif /* CONFIG_CPU_MIPSR6 */
 487
 488#else /* __BIG_ENDIAN */
 489
 490#define     _LoadHW(addr, value, res, type)  \
 491do {                                                        \
 492		__asm__ __volatile__ (".set\tnoat\n"        \
 493			"1:\t"type##_lb("%0", "1(%2)")"\n"  \
 494			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
 495			"sll\t%0, 0x8\n\t"                  \
 496			"or\t%0, $1\n\t"                    \
 497			"li\t%1, 0\n"                       \
 498			"3:\t.set\tat\n\t"                  \
 499			".insn\n\t"                         \
 500			".section\t.fixup,\"ax\"\n\t"       \
 501			"4:\tli\t%1, %3\n\t"                \
 502			"j\t3b\n\t"                         \
 503			".previous\n\t"                     \
 504			".section\t__ex_table,\"a\"\n\t"    \
 505			STR(PTR)"\t1b, 4b\n\t"              \
 506			STR(PTR)"\t2b, 4b\n\t"              \
 507			".previous"                         \
 508			: "=&r" (value), "=r" (res)         \
 509			: "r" (addr), "i" (-EFAULT));       \
 510} while(0)
 511
 512#ifndef CONFIG_CPU_MIPSR6
 513#define     _LoadW(addr, value, res, type)   \
 514do {                                                        \
 515		__asm__ __volatile__ (                      \
 516			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
 517			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
 518			"li\t%1, 0\n"                       \
 519			"3:\n\t"                            \
 520			".insn\n\t"                         \
 521			".section\t.fixup,\"ax\"\n\t"       \
 522			"4:\tli\t%1, %3\n\t"                \
 523			"j\t3b\n\t"                         \
 524			".previous\n\t"                     \
 525			".section\t__ex_table,\"a\"\n\t"    \
 526			STR(PTR)"\t1b, 4b\n\t"              \
 527			STR(PTR)"\t2b, 4b\n\t"              \
 528			".previous"                         \
 529			: "=&r" (value), "=r" (res)         \
 530			: "r" (addr), "i" (-EFAULT));       \
 531} while(0)
 532
 533#else
 534/* MIPSR6 has no lwl instruction */
 535#define     _LoadW(addr, value, res, type) \
 536do {                                                        \
 537		__asm__ __volatile__ (			    \
 538			".set\tpush\n"			    \
 539			".set\tnoat\n\t"		    \
 540			"1:"type##_lb("%0", "3(%2)")"\n\t"  \
 541			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
 542			"sll\t%0, 0x8\n\t"		    \
 543			"or\t%0, $1\n\t"		    \
 544			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
 545			"sll\t%0, 0x8\n\t"		    \
 546			"or\t%0, $1\n\t"		    \
 547			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
 548			"sll\t%0, 0x8\n\t"		    \
 549			"or\t%0, $1\n\t"		    \
 550			"li\t%1, 0\n"			    \
 551			".set\tpop\n"			    \
 552			"10:\n\t"			    \
 553			".insn\n\t"			    \
 554			".section\t.fixup,\"ax\"\n\t"	    \
 555			"11:\tli\t%1, %3\n\t"		    \
 556			"j\t10b\n\t"			    \
 557			".previous\n\t"			    \
 558			".section\t__ex_table,\"a\"\n\t"    \
 559			STR(PTR)"\t1b, 11b\n\t"		    \
 560			STR(PTR)"\t2b, 11b\n\t"		    \
 561			STR(PTR)"\t3b, 11b\n\t"		    \
 562			STR(PTR)"\t4b, 11b\n\t"		    \
 563			".previous"			    \
 564			: "=&r" (value), "=r" (res)	    \
 565			: "r" (addr), "i" (-EFAULT));       \
 566} while(0)
 567
 568#endif /* CONFIG_CPU_MIPSR6 */
 569
 570
 571#define     _LoadHWU(addr, value, res, type) \
 572do {                                                        \
 573		__asm__ __volatile__ (                      \
 574			".set\tnoat\n"                      \
 575			"1:\t"type##_lbu("%0", "1(%2)")"\n" \
 576			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
 577			"sll\t%0, 0x8\n\t"                  \
 578			"or\t%0, $1\n\t"                    \
 579			"li\t%1, 0\n"                       \
 580			"3:\n\t"                            \
 581			".insn\n\t"                         \
 582			".set\tat\n\t"                      \
 583			".section\t.fixup,\"ax\"\n\t"       \
 584			"4:\tli\t%1, %3\n\t"                \
 585			"j\t3b\n\t"                         \
 586			".previous\n\t"                     \
 587			".section\t__ex_table,\"a\"\n\t"    \
 588			STR(PTR)"\t1b, 4b\n\t"              \
 589			STR(PTR)"\t2b, 4b\n\t"              \
 590			".previous"                         \
 591			: "=&r" (value), "=r" (res)         \
 592			: "r" (addr), "i" (-EFAULT));       \
 593} while(0)
 594
 595#ifndef CONFIG_CPU_MIPSR6
 596#define     _LoadWU(addr, value, res, type)  \
 597do {                                                        \
 598		__asm__ __volatile__ (                      \
 599			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
 600			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
 601			"dsll\t%0, %0, 32\n\t"              \
 602			"dsrl\t%0, %0, 32\n\t"              \
 603			"li\t%1, 0\n"                       \
 604			"3:\n\t"                            \
 605			".insn\n\t"                         \
 606			"\t.section\t.fixup,\"ax\"\n\t"     \
 607			"4:\tli\t%1, %3\n\t"                \
 608			"j\t3b\n\t"                         \
 609			".previous\n\t"                     \
 610			".section\t__ex_table,\"a\"\n\t"    \
 611			STR(PTR)"\t1b, 4b\n\t"              \
 612			STR(PTR)"\t2b, 4b\n\t"              \
 613			".previous"                         \
 614			: "=&r" (value), "=r" (res)         \
 615			: "r" (addr), "i" (-EFAULT));       \
 616} while(0)
 617
 618#define     _LoadDW(addr, value, res)  \
 619do {                                                        \
 620		__asm__ __volatile__ (                      \
 621			"1:\tldl\t%0, 7(%2)\n"              \
 622			"2:\tldr\t%0, (%2)\n\t"             \
 623			"li\t%1, 0\n"                       \
 624			"3:\n\t"                            \
 625			".insn\n\t"                         \
 626			"\t.section\t.fixup,\"ax\"\n\t"     \
 627			"4:\tli\t%1, %3\n\t"                \
 628			"j\t3b\n\t"                         \
 629			".previous\n\t"                     \
 630			".section\t__ex_table,\"a\"\n\t"    \
 631			STR(PTR)"\t1b, 4b\n\t"              \
 632			STR(PTR)"\t2b, 4b\n\t"              \
 633			".previous"                         \
 634			: "=&r" (value), "=r" (res)         \
 635			: "r" (addr), "i" (-EFAULT));       \
 636} while(0)
 637
 638#else
 639/* MIPSR6 has not lwl and ldl instructions */
 640#define	    _LoadWU(addr, value, res, type) \
 641do {                                                        \
 642		__asm__ __volatile__ (			    \
 643			".set\tpush\n\t"		    \
 644			".set\tnoat\n\t"		    \
 645			"1:"type##_lbu("%0", "3(%2)")"\n\t" \
 646			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
 647			"sll\t%0, 0x8\n\t"		    \
 648			"or\t%0, $1\n\t"		    \
 649			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
 650			"sll\t%0, 0x8\n\t"		    \
 651			"or\t%0, $1\n\t"		    \
 652			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
 653			"sll\t%0, 0x8\n\t"		    \
 654			"or\t%0, $1\n\t"		    \
 655			"li\t%1, 0\n"			    \
 656			".set\tpop\n"			    \
 657			"10:\n\t"			    \
 658			".insn\n\t"			    \
 659			".section\t.fixup,\"ax\"\n\t"	    \
 660			"11:\tli\t%1, %3\n\t"		    \
 661			"j\t10b\n\t"			    \
 662			".previous\n\t"			    \
 663			".section\t__ex_table,\"a\"\n\t"    \
 664			STR(PTR)"\t1b, 11b\n\t"		    \
 665			STR(PTR)"\t2b, 11b\n\t"		    \
 666			STR(PTR)"\t3b, 11b\n\t"		    \
 667			STR(PTR)"\t4b, 11b\n\t"		    \
 668			".previous"			    \
 669			: "=&r" (value), "=r" (res)	    \
 670			: "r" (addr), "i" (-EFAULT));       \
 671} while(0)
 672
 673#define     _LoadDW(addr, value, res)  \
 674do {                                                        \
 675		__asm__ __volatile__ (			    \
 676			".set\tpush\n\t"		    \
 677			".set\tnoat\n\t"		    \
 678			"1:lb\t%0, 7(%2)\n\t"    	    \
 679			"2:lbu\t$1, 6(%2)\n\t"   	    \
 680			"dsll\t%0, 0x8\n\t"		    \
 681			"or\t%0, $1\n\t"		    \
 682			"3:lbu\t$1, 5(%2)\n\t"   	    \
 683			"dsll\t%0, 0x8\n\t"		    \
 684			"or\t%0, $1\n\t"		    \
 685			"4:lbu\t$1, 4(%2)\n\t"   	    \
 686			"dsll\t%0, 0x8\n\t"		    \
 687			"or\t%0, $1\n\t"		    \
 688			"5:lbu\t$1, 3(%2)\n\t"   	    \
 689			"dsll\t%0, 0x8\n\t"		    \
 690			"or\t%0, $1\n\t"		    \
 691			"6:lbu\t$1, 2(%2)\n\t"   	    \
 692			"dsll\t%0, 0x8\n\t"		    \
 693			"or\t%0, $1\n\t"		    \
 694			"7:lbu\t$1, 1(%2)\n\t"   	    \
 695			"dsll\t%0, 0x8\n\t"		    \
 696			"or\t%0, $1\n\t"		    \
 697			"8:lbu\t$1, 0(%2)\n\t"   	    \
 698			"dsll\t%0, 0x8\n\t"		    \
 699			"or\t%0, $1\n\t"		    \
 700			"li\t%1, 0\n"			    \
 701			".set\tpop\n\t"			    \
 702			"10:\n\t"			    \
 703			".insn\n\t"			    \
 704			".section\t.fixup,\"ax\"\n\t"	    \
 705			"11:\tli\t%1, %3\n\t"		    \
 706			"j\t10b\n\t"			    \
 707			".previous\n\t"			    \
 708			".section\t__ex_table,\"a\"\n\t"    \
 709			STR(PTR)"\t1b, 11b\n\t"		    \
 710			STR(PTR)"\t2b, 11b\n\t"		    \
 711			STR(PTR)"\t3b, 11b\n\t"		    \
 712			STR(PTR)"\t4b, 11b\n\t"		    \
 713			STR(PTR)"\t5b, 11b\n\t"		    \
 714			STR(PTR)"\t6b, 11b\n\t"		    \
 715			STR(PTR)"\t7b, 11b\n\t"		    \
 716			STR(PTR)"\t8b, 11b\n\t"		    \
 717			".previous"			    \
 718			: "=&r" (value), "=r" (res)	    \
 719			: "r" (addr), "i" (-EFAULT));       \
 720} while(0)
 721#endif /* CONFIG_CPU_MIPSR6 */
 722
 723#define     _StoreHW(addr, value, res, type) \
 724do {                                                        \
 725		__asm__ __volatile__ (                      \
 726			".set\tnoat\n"                      \
 727			"1:\t"type##_sb("%1", "0(%2)")"\n"  \
 728			"srl\t$1,%1, 0x8\n"                 \
 729			"2:\t"type##_sb("$1", "1(%2)")"\n"  \
 730			".set\tat\n\t"                      \
 731			"li\t%0, 0\n"                       \
 732			"3:\n\t"                            \
 733			".insn\n\t"                         \
 734			".section\t.fixup,\"ax\"\n\t"       \
 735			"4:\tli\t%0, %3\n\t"                \
 736			"j\t3b\n\t"                         \
 737			".previous\n\t"                     \
 738			".section\t__ex_table,\"a\"\n\t"    \
 739			STR(PTR)"\t1b, 4b\n\t"              \
 740			STR(PTR)"\t2b, 4b\n\t"              \
 741			".previous"                         \
 742			: "=r" (res)                        \
 743			: "r" (value), "r" (addr), "i" (-EFAULT));\
 744} while(0)
 745
 746#ifndef CONFIG_CPU_MIPSR6
 747#define     _StoreW(addr, value, res, type)  \
 748do {                                                        \
 749		__asm__ __volatile__ (                      \
 750			"1:\t"type##_swl("%1", "3(%2)")"\n" \
 751			"2:\t"type##_swr("%1", "(%2)")"\n\t"\
 752			"li\t%0, 0\n"                       \
 753			"3:\n\t"                            \
 754			".insn\n\t"                         \
 755			".section\t.fixup,\"ax\"\n\t"       \
 756			"4:\tli\t%0, %3\n\t"                \
 757			"j\t3b\n\t"                         \
 758			".previous\n\t"                     \
 759			".section\t__ex_table,\"a\"\n\t"    \
 760			STR(PTR)"\t1b, 4b\n\t"              \
 761			STR(PTR)"\t2b, 4b\n\t"              \
 762			".previous"                         \
 763		: "=r" (res)                                \
 764		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 765} while(0)
 766
 767#define     _StoreDW(addr, value, res) \
 768do {                                                        \
 769		__asm__ __volatile__ (                      \
 770			"1:\tsdl\t%1, 7(%2)\n"              \
 771			"2:\tsdr\t%1, (%2)\n\t"             \
 772			"li\t%0, 0\n"                       \
 773			"3:\n\t"                            \
 774			".insn\n\t"                         \
 775			".section\t.fixup,\"ax\"\n\t"       \
 776			"4:\tli\t%0, %3\n\t"                \
 777			"j\t3b\n\t"                         \
 778			".previous\n\t"                     \
 779			".section\t__ex_table,\"a\"\n\t"    \
 780			STR(PTR)"\t1b, 4b\n\t"              \
 781			STR(PTR)"\t2b, 4b\n\t"              \
 782			".previous"                         \
 783		: "=r" (res)                                \
 784		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 785} while(0)
 786
 787#else
 788/* MIPSR6 has no swl and sdl instructions */
 789#define     _StoreW(addr, value, res, type)  \
 790do {                                                        \
 791		__asm__ __volatile__ (                      \
 792			".set\tpush\n\t"		    \
 793			".set\tnoat\n\t"		    \
 794			"1:"type##_sb("%1", "0(%2)")"\n\t"  \
 795			"srl\t$1, %1, 0x8\n\t"		    \
 796			"2:"type##_sb("$1", "1(%2)")"\n\t"  \
 797			"srl\t$1, $1,  0x8\n\t"		    \
 798			"3:"type##_sb("$1", "2(%2)")"\n\t"  \
 799			"srl\t$1, $1, 0x8\n\t"		    \
 800			"4:"type##_sb("$1", "3(%2)")"\n\t"  \
 801			".set\tpop\n\t"			    \
 802			"li\t%0, 0\n"			    \
 803			"10:\n\t"			    \
 804			".insn\n\t"			    \
 805			".section\t.fixup,\"ax\"\n\t"	    \
 806			"11:\tli\t%0, %3\n\t"		    \
 807			"j\t10b\n\t"			    \
 808			".previous\n\t"			    \
 809			".section\t__ex_table,\"a\"\n\t"    \
 810			STR(PTR)"\t1b, 11b\n\t"		    \
 811			STR(PTR)"\t2b, 11b\n\t"		    \
 812			STR(PTR)"\t3b, 11b\n\t"		    \
 813			STR(PTR)"\t4b, 11b\n\t"		    \
 814			".previous"			    \
 815		: "=&r" (res)			    	    \
 816		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 817		: "memory");                                \
 818} while(0)
 819
 820#define     _StoreDW(addr, value, res) \
 821do {                                                        \
 822		__asm__ __volatile__ (                      \
 823			".set\tpush\n\t"		    \
 824			".set\tnoat\n\t"		    \
 825			"1:sb\t%1, 0(%2)\n\t"    	    \
 826			"dsrl\t$1, %1, 0x8\n\t"		    \
 827			"2:sb\t$1, 1(%2)\n\t"    	    \
 828			"dsrl\t$1, $1, 0x8\n\t"		    \
 829			"3:sb\t$1, 2(%2)\n\t"    	    \
 830			"dsrl\t$1, $1, 0x8\n\t"		    \
 831			"4:sb\t$1, 3(%2)\n\t"    	    \
 832			"dsrl\t$1, $1, 0x8\n\t"		    \
 833			"5:sb\t$1, 4(%2)\n\t"    	    \
 834			"dsrl\t$1, $1, 0x8\n\t"		    \
 835			"6:sb\t$1, 5(%2)\n\t"    	    \
 836			"dsrl\t$1, $1, 0x8\n\t"		    \
 837			"7:sb\t$1, 6(%2)\n\t"    	    \
 838			"dsrl\t$1, $1, 0x8\n\t"		    \
 839			"8:sb\t$1, 7(%2)\n\t"    	    \
 840			"dsrl\t$1, $1, 0x8\n\t"		    \
 841			".set\tpop\n\t"			    \
 842			"li\t%0, 0\n"			    \
 843			"10:\n\t"			    \
 844			".insn\n\t"			    \
 845			".section\t.fixup,\"ax\"\n\t"	    \
 846			"11:\tli\t%0, %3\n\t"		    \
 847			"j\t10b\n\t"			    \
 848			".previous\n\t"			    \
 849			".section\t__ex_table,\"a\"\n\t"    \
 850			STR(PTR)"\t1b, 11b\n\t"		    \
 851			STR(PTR)"\t2b, 11b\n\t"		    \
 852			STR(PTR)"\t3b, 11b\n\t"		    \
 853			STR(PTR)"\t4b, 11b\n\t"		    \
 854			STR(PTR)"\t5b, 11b\n\t"		    \
 855			STR(PTR)"\t6b, 11b\n\t"		    \
 856			STR(PTR)"\t7b, 11b\n\t"		    \
 857			STR(PTR)"\t8b, 11b\n\t"		    \
 858			".previous"			    \
 859		: "=&r" (res)			    	    \
 860		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 861		: "memory");                                \
 862} while(0)
 863
 864#endif /* CONFIG_CPU_MIPSR6 */
 865#endif
 866
 867#define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
 868#define LoadHWUE(addr, value, res)	_LoadHWU(addr, value, res, user)
 869#define LoadWU(addr, value, res)	_LoadWU(addr, value, res, kernel)
 870#define LoadWUE(addr, value, res)	_LoadWU(addr, value, res, user)
 871#define LoadHW(addr, value, res)	_LoadHW(addr, value, res, kernel)
 872#define LoadHWE(addr, value, res)	_LoadHW(addr, value, res, user)
 873#define LoadW(addr, value, res)		_LoadW(addr, value, res, kernel)
 874#define LoadWE(addr, value, res)	_LoadW(addr, value, res, user)
 875#define LoadDW(addr, value, res)	_LoadDW(addr, value, res)
 876
 877#define StoreHW(addr, value, res)	_StoreHW(addr, value, res, kernel)
 878#define StoreHWE(addr, value, res)	_StoreHW(addr, value, res, user)
 879#define StoreW(addr, value, res)	_StoreW(addr, value, res, kernel)
 880#define StoreWE(addr, value, res)	_StoreW(addr, value, res, user)
 881#define StoreDW(addr, value, res)	_StoreDW(addr, value, res)
 882
 883static void emulate_load_store_insn(struct pt_regs *regs,
 884	void __user *addr, unsigned int __user *pc)
 885{
 
 886	union mips_instruction insn;
 887	unsigned long value;
 888	unsigned int res, preempted;
 889	unsigned long origpc;
 890	unsigned long orig31;
 891	void __user *fault_addr = NULL;
 892#ifdef	CONFIG_EVA
 893	mm_segment_t seg;
 894#endif
 895	union fpureg *fpr;
 896	enum msa_2b_fmt df;
 897	unsigned int wd;
 898	origpc = (unsigned long)pc;
 899	orig31 = regs->regs[31];
 900
 901	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 902
 903	/*
 904	 * This load never faults.
 905	 */
 906	__get_user(insn.word, pc);
 907
 908	switch (insn.i_format.opcode) {
 909		/*
 910		 * These are instructions that a compiler doesn't generate.  We
 911		 * can assume therefore that the code is MIPS-aware and
 912		 * really buggy.  Emulating these instructions would break the
 913		 * semantics anyway.
 914		 */
 915	case ll_op:
 916	case lld_op:
 917	case sc_op:
 918	case scd_op:
 919
 920		/*
 921		 * For these instructions the only way to create an address
 922		 * error is an attempted access to kernel/supervisor address
 923		 * space.
 924		 */
 925	case ldl_op:
 926	case ldr_op:
 927	case lwl_op:
 928	case lwr_op:
 929	case sdl_op:
 930	case sdr_op:
 931	case swl_op:
 932	case swr_op:
 933	case lb_op:
 934	case lbu_op:
 935	case sb_op:
 936		goto sigbus;
 937
 938		/*
 939		 * The remaining opcodes are the ones that are really of
 940		 * interest.
 941		 */
 942#ifdef CONFIG_EVA
 943	case spec3_op:
 944		/*
 945		 * we can land here only from kernel accessing user memory,
 946		 * so we need to "switch" the address limit to user space, so
 947		 * address check can work properly.
 948		 */
 949		seg = get_fs();
 950		set_fs(USER_DS);
 951		switch (insn.spec3_format.func) {
 952		case lhe_op:
 953			if (!access_ok(VERIFY_READ, addr, 2)) {
 954				set_fs(seg);
 955				goto sigbus;
 956			}
 957			LoadHWE(addr, value, res);
 958			if (res) {
 959				set_fs(seg);
 960				goto fault;
 961			}
 962			compute_return_epc(regs);
 963			regs->regs[insn.spec3_format.rt] = value;
 964			break;
 965		case lwe_op:
 966			if (!access_ok(VERIFY_READ, addr, 4)) {
 967				set_fs(seg);
 968				goto sigbus;
 969			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970				LoadWE(addr, value, res);
 971			if (res) {
 972				set_fs(seg);
 973				goto fault;
 974			}
 975			compute_return_epc(regs);
 976			regs->regs[insn.spec3_format.rt] = value;
 977			break;
 978		case lhue_op:
 979			if (!access_ok(VERIFY_READ, addr, 2)) {
 980				set_fs(seg);
 981				goto sigbus;
 982			}
 983			LoadHWUE(addr, value, res);
 984			if (res) {
 985				set_fs(seg);
 986				goto fault;
 987			}
 988			compute_return_epc(regs);
 989			regs->regs[insn.spec3_format.rt] = value;
 990			break;
 991		case she_op:
 992			if (!access_ok(VERIFY_WRITE, addr, 2)) {
 993				set_fs(seg);
 994				goto sigbus;
 995			}
 996			compute_return_epc(regs);
 997			value = regs->regs[insn.spec3_format.rt];
 998			StoreHWE(addr, value, res);
 999			if (res) {
1000				set_fs(seg);
1001				goto fault;
1002			}
1003			break;
1004		case swe_op:
1005			if (!access_ok(VERIFY_WRITE, addr, 4)) {
1006				set_fs(seg);
1007				goto sigbus;
1008			}
1009			compute_return_epc(regs);
1010			value = regs->regs[insn.spec3_format.rt];
1011			StoreWE(addr, value, res);
1012			if (res) {
1013				set_fs(seg);
1014				goto fault;
1015			}
1016			break;
1017		default:
1018			set_fs(seg);
1019			goto sigill;
1020		}
1021		set_fs(seg);
1022		break;
1023#endif
1024	case lh_op:
1025		if (!access_ok(VERIFY_READ, addr, 2))
1026			goto sigbus;
1027
1028		if (IS_ENABLED(CONFIG_EVA)) {
1029			if (segment_eq(get_fs(), get_ds()))
1030				LoadHW(addr, value, res);
1031			else
1032				LoadHWE(addr, value, res);
1033		} else {
1034			LoadHW(addr, value, res);
1035		}
1036
1037		if (res)
1038			goto fault;
1039		compute_return_epc(regs);
1040		regs->regs[insn.i_format.rt] = value;
1041		break;
1042
1043	case lw_op:
1044		if (!access_ok(VERIFY_READ, addr, 4))
1045			goto sigbus;
1046
1047		if (IS_ENABLED(CONFIG_EVA)) {
1048			if (segment_eq(get_fs(), get_ds()))
1049				LoadW(addr, value, res);
1050			else
1051				LoadWE(addr, value, res);
1052		} else {
1053			LoadW(addr, value, res);
1054		}
1055
1056		if (res)
1057			goto fault;
1058		compute_return_epc(regs);
1059		regs->regs[insn.i_format.rt] = value;
1060		break;
1061
1062	case lhu_op:
1063		if (!access_ok(VERIFY_READ, addr, 2))
1064			goto sigbus;
1065
1066		if (IS_ENABLED(CONFIG_EVA)) {
1067			if (segment_eq(get_fs(), get_ds()))
1068				LoadHWU(addr, value, res);
1069			else
1070				LoadHWUE(addr, value, res);
1071		} else {
1072			LoadHWU(addr, value, res);
1073		}
1074
1075		if (res)
1076			goto fault;
1077		compute_return_epc(regs);
1078		regs->regs[insn.i_format.rt] = value;
1079		break;
1080
1081	case lwu_op:
1082#ifdef CONFIG_64BIT
1083		/*
1084		 * A 32-bit kernel might be running on a 64-bit processor.  But
1085		 * if we're on a 32-bit processor and an i-cache incoherency
1086		 * or race makes us see a 64-bit instruction here the sdl/sdr
1087		 * would blow up, so for now we don't handle unaligned 64-bit
1088		 * instructions on 32-bit kernels.
1089		 */
1090		if (!access_ok(VERIFY_READ, addr, 4))
1091			goto sigbus;
1092
1093		LoadWU(addr, value, res);
1094		if (res)
1095			goto fault;
1096		compute_return_epc(regs);
1097		regs->regs[insn.i_format.rt] = value;
1098		break;
1099#endif /* CONFIG_64BIT */
1100
1101		/* Cannot handle 64-bit instructions in 32-bit kernel */
1102		goto sigill;
1103
1104	case ld_op:
1105#ifdef CONFIG_64BIT
1106		/*
1107		 * A 32-bit kernel might be running on a 64-bit processor.  But
1108		 * if we're on a 32-bit processor and an i-cache incoherency
1109		 * or race makes us see a 64-bit instruction here the sdl/sdr
1110		 * would blow up, so for now we don't handle unaligned 64-bit
1111		 * instructions on 32-bit kernels.
1112		 */
1113		if (!access_ok(VERIFY_READ, addr, 8))
1114			goto sigbus;
1115
1116		LoadDW(addr, value, res);
1117		if (res)
1118			goto fault;
1119		compute_return_epc(regs);
1120		regs->regs[insn.i_format.rt] = value;
1121		break;
1122#endif /* CONFIG_64BIT */
1123
1124		/* Cannot handle 64-bit instructions in 32-bit kernel */
1125		goto sigill;
1126
1127	case sh_op:
1128		if (!access_ok(VERIFY_WRITE, addr, 2))
1129			goto sigbus;
1130
1131		compute_return_epc(regs);
1132		value = regs->regs[insn.i_format.rt];
1133
1134		if (IS_ENABLED(CONFIG_EVA)) {
1135			if (segment_eq(get_fs(), get_ds()))
1136				StoreHW(addr, value, res);
1137			else
1138				StoreHWE(addr, value, res);
1139		} else {
1140			StoreHW(addr, value, res);
1141		}
1142
1143		if (res)
1144			goto fault;
1145		break;
1146
1147	case sw_op:
1148		if (!access_ok(VERIFY_WRITE, addr, 4))
1149			goto sigbus;
1150
1151		compute_return_epc(regs);
1152		value = regs->regs[insn.i_format.rt];
1153
1154		if (IS_ENABLED(CONFIG_EVA)) {
1155			if (segment_eq(get_fs(), get_ds()))
1156				StoreW(addr, value, res);
1157			else
1158				StoreWE(addr, value, res);
1159		} else {
1160			StoreW(addr, value, res);
1161		}
1162
1163		if (res)
1164			goto fault;
1165		break;
1166
1167	case sd_op:
1168#ifdef CONFIG_64BIT
1169		/*
1170		 * A 32-bit kernel might be running on a 64-bit processor.  But
1171		 * if we're on a 32-bit processor and an i-cache incoherency
1172		 * or race makes us see a 64-bit instruction here the sdl/sdr
1173		 * would blow up, so for now we don't handle unaligned 64-bit
1174		 * instructions on 32-bit kernels.
1175		 */
1176		if (!access_ok(VERIFY_WRITE, addr, 8))
1177			goto sigbus;
1178
1179		compute_return_epc(regs);
1180		value = regs->regs[insn.i_format.rt];
1181		StoreDW(addr, value, res);
1182		if (res)
1183			goto fault;
1184		break;
1185#endif /* CONFIG_64BIT */
1186
1187		/* Cannot handle 64-bit instructions in 32-bit kernel */
1188		goto sigill;
1189
 
 
1190	case lwc1_op:
1191	case ldc1_op:
1192	case swc1_op:
1193	case sdc1_op:
1194	case cop1x_op:
 
 
1195		die_if_kernel("Unaligned FP access in kernel code", regs);
1196		BUG_ON(!used_math());
1197
1198		lose_fpu(1);	/* Save FPU state for the emulator. */
1199		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1200					       &fault_addr);
1201		own_fpu(1);	/* Restore FPU state. */
1202
1203		/* Signal if something went wrong. */
1204		process_fpemu_return(res, fault_addr, 0);
1205
1206		if (res == 0)
1207			break;
1208		return;
 
 
 
 
 
 
 
 
 
1209
1210	case msa_op:
1211		if (!cpu_has_msa)
1212			goto sigill;
1213
1214		/*
1215		 * If we've reached this point then userland should have taken
1216		 * the MSA disabled exception & initialised vector context at
1217		 * some point in the past.
1218		 */
1219		BUG_ON(!thread_msa_context_live());
1220
1221		df = insn.msa_mi10_format.df;
1222		wd = insn.msa_mi10_format.wd;
1223		fpr = &current->thread.fpu.fpr[wd];
1224
1225		switch (insn.msa_mi10_format.func) {
1226		case msa_ld_op:
1227			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1228				goto sigbus;
1229
1230			do {
1231				/*
1232				 * If we have live MSA context keep track of
1233				 * whether we get preempted in order to avoid
1234				 * the register context we load being clobbered
1235				 * by the live context as it's saved during
1236				 * preemption. If we don't have live context
1237				 * then it can't be saved to clobber the value
1238				 * we load.
1239				 */
1240				preempted = test_thread_flag(TIF_USEDMSA);
1241
1242				res = __copy_from_user_inatomic(fpr, addr,
1243								sizeof(*fpr));
1244				if (res)
1245					goto fault;
1246
1247				/*
1248				 * Update the hardware register if it is in use
1249				 * by the task in this quantum, in order to
1250				 * avoid having to save & restore the whole
1251				 * vector context.
1252				 */
1253				preempt_disable();
1254				if (test_thread_flag(TIF_USEDMSA)) {
1255					write_msa_wr(wd, fpr, df);
1256					preempted = 0;
1257				}
1258				preempt_enable();
1259			} while (preempted);
1260			break;
1261
1262		case msa_st_op:
1263			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
1264				goto sigbus;
1265
1266			/*
1267			 * Update from the hardware register if it is in use by
1268			 * the task in this quantum, in order to avoid having to
1269			 * save & restore the whole vector context.
1270			 */
1271			preempt_disable();
1272			if (test_thread_flag(TIF_USEDMSA))
1273				read_msa_wr(wd, fpr, df);
1274			preempt_enable();
1275
1276			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1277			if (res)
1278				goto fault;
1279			break;
1280
1281		default:
1282			goto sigbus;
1283		}
1284
1285		compute_return_epc(regs);
1286		break;
 
 
1287
1288#ifndef CONFIG_CPU_MIPSR6
1289	/*
1290	 * COP2 is available to implementor for application specific use.
1291	 * It's up to applications to register a notifier chain and do
1292	 * whatever they have to do, including possible sending of signals.
1293	 *
1294	 * This instruction has been reallocated in Release 6
1295	 */
1296	case lwc2_op:
1297		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1298		break;
1299
1300	case ldc2_op:
1301		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1302		break;
1303
1304	case swc2_op:
1305		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1306		break;
1307
1308	case sdc2_op:
1309		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1310		break;
1311#endif
1312	default:
1313		/*
1314		 * Pheeee...  We encountered an yet unknown instruction or
1315		 * cache coherence problem.  Die sucker, die ...
1316		 */
1317		goto sigill;
1318	}
1319
1320#ifdef CONFIG_DEBUG_FS
1321	unaligned_instructions++;
1322#endif
1323
1324	return;
1325
1326fault:
1327	/* roll back jump/branch */
1328	regs->cp0_epc = origpc;
1329	regs->regs[31] = orig31;
1330	/* Did we have an exception handler installed? */
1331	if (fixup_exception(regs))
1332		return;
1333
1334	die_if_kernel("Unhandled kernel unaligned access", regs);
1335	force_sig(SIGSEGV, current);
1336
1337	return;
1338
1339sigbus:
1340	die_if_kernel("Unhandled kernel unaligned access", regs);
1341	force_sig(SIGBUS, current);
1342
1343	return;
1344
1345sigill:
1346	die_if_kernel
1347	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1348	force_sig(SIGILL, current);
1349}
1350
1351/* Recode table from 16-bit register notation to 32-bit GPR. */
1352const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1353
1354/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1355const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1356
1357static void emulate_load_store_microMIPS(struct pt_regs *regs,
1358					 void __user *addr)
1359{
1360	unsigned long value;
1361	unsigned int res;
1362	int i;
1363	unsigned int reg = 0, rvar;
1364	unsigned long orig31;
1365	u16 __user *pc16;
1366	u16 halfword;
1367	unsigned int word;
1368	unsigned long origpc, contpc;
1369	union mips_instruction insn;
1370	struct mm_decoded_insn mminsn;
1371	void __user *fault_addr = NULL;
1372
1373	origpc = regs->cp0_epc;
1374	orig31 = regs->regs[31];
1375
1376	mminsn.micro_mips_mode = 1;
1377
1378	/*
1379	 * This load never faults.
1380	 */
1381	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1382	__get_user(halfword, pc16);
1383	pc16++;
1384	contpc = regs->cp0_epc + 2;
1385	word = ((unsigned int)halfword << 16);
1386	mminsn.pc_inc = 2;
1387
1388	if (!mm_insn_16bit(halfword)) {
1389		__get_user(halfword, pc16);
1390		pc16++;
1391		contpc = regs->cp0_epc + 4;
1392		mminsn.pc_inc = 4;
1393		word |= halfword;
1394	}
1395	mminsn.insn = word;
1396
1397	if (get_user(halfword, pc16))
1398		goto fault;
1399	mminsn.next_pc_inc = 2;
1400	word = ((unsigned int)halfword << 16);
1401
1402	if (!mm_insn_16bit(halfword)) {
1403		pc16++;
1404		if (get_user(halfword, pc16))
1405			goto fault;
1406		mminsn.next_pc_inc = 4;
1407		word |= halfword;
1408	}
1409	mminsn.next_insn = word;
1410
1411	insn = (union mips_instruction)(mminsn.insn);
1412	if (mm_isBranchInstr(regs, mminsn, &contpc))
1413		insn = (union mips_instruction)(mminsn.next_insn);
1414
1415	/*  Parse instruction to find what to do */
1416
1417	switch (insn.mm_i_format.opcode) {
1418
1419	case mm_pool32a_op:
1420		switch (insn.mm_x_format.func) {
1421		case mm_lwxs_op:
1422			reg = insn.mm_x_format.rd;
1423			goto loadW;
1424		}
1425
1426		goto sigbus;
1427
1428	case mm_pool32b_op:
1429		switch (insn.mm_m_format.func) {
1430		case mm_lwp_func:
1431			reg = insn.mm_m_format.rd;
1432			if (reg == 31)
1433				goto sigbus;
1434
1435			if (!access_ok(VERIFY_READ, addr, 8))
1436				goto sigbus;
1437
1438			LoadW(addr, value, res);
1439			if (res)
1440				goto fault;
1441			regs->regs[reg] = value;
1442			addr += 4;
1443			LoadW(addr, value, res);
1444			if (res)
1445				goto fault;
1446			regs->regs[reg + 1] = value;
1447			goto success;
1448
1449		case mm_swp_func:
1450			reg = insn.mm_m_format.rd;
1451			if (reg == 31)
1452				goto sigbus;
1453
1454			if (!access_ok(VERIFY_WRITE, addr, 8))
1455				goto sigbus;
1456
1457			value = regs->regs[reg];
1458			StoreW(addr, value, res);
1459			if (res)
1460				goto fault;
1461			addr += 4;
1462			value = regs->regs[reg + 1];
1463			StoreW(addr, value, res);
1464			if (res)
1465				goto fault;
1466			goto success;
1467
1468		case mm_ldp_func:
1469#ifdef CONFIG_64BIT
1470			reg = insn.mm_m_format.rd;
1471			if (reg == 31)
1472				goto sigbus;
1473
1474			if (!access_ok(VERIFY_READ, addr, 16))
1475				goto sigbus;
1476
1477			LoadDW(addr, value, res);
1478			if (res)
1479				goto fault;
1480			regs->regs[reg] = value;
1481			addr += 8;
1482			LoadDW(addr, value, res);
1483			if (res)
1484				goto fault;
1485			regs->regs[reg + 1] = value;
1486			goto success;
1487#endif /* CONFIG_64BIT */
1488
1489			goto sigill;
1490
1491		case mm_sdp_func:
1492#ifdef CONFIG_64BIT
1493			reg = insn.mm_m_format.rd;
1494			if (reg == 31)
1495				goto sigbus;
1496
1497			if (!access_ok(VERIFY_WRITE, addr, 16))
1498				goto sigbus;
1499
1500			value = regs->regs[reg];
1501			StoreDW(addr, value, res);
1502			if (res)
1503				goto fault;
1504			addr += 8;
1505			value = regs->regs[reg + 1];
1506			StoreDW(addr, value, res);
1507			if (res)
1508				goto fault;
1509			goto success;
1510#endif /* CONFIG_64BIT */
1511
1512			goto sigill;
1513
1514		case mm_lwm32_func:
1515			reg = insn.mm_m_format.rd;
1516			rvar = reg & 0xf;
1517			if ((rvar > 9) || !reg)
1518				goto sigill;
1519			if (reg & 0x10) {
1520				if (!access_ok
1521				    (VERIFY_READ, addr, 4 * (rvar + 1)))
1522					goto sigbus;
1523			} else {
1524				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1525					goto sigbus;
1526			}
1527			if (rvar == 9)
1528				rvar = 8;
1529			for (i = 16; rvar; rvar--, i++) {
1530				LoadW(addr, value, res);
1531				if (res)
1532					goto fault;
1533				addr += 4;
1534				regs->regs[i] = value;
1535			}
1536			if ((reg & 0xf) == 9) {
1537				LoadW(addr, value, res);
1538				if (res)
1539					goto fault;
1540				addr += 4;
1541				regs->regs[30] = value;
1542			}
1543			if (reg & 0x10) {
1544				LoadW(addr, value, res);
1545				if (res)
1546					goto fault;
1547				regs->regs[31] = value;
1548			}
1549			goto success;
1550
1551		case mm_swm32_func:
1552			reg = insn.mm_m_format.rd;
1553			rvar = reg & 0xf;
1554			if ((rvar > 9) || !reg)
1555				goto sigill;
1556			if (reg & 0x10) {
1557				if (!access_ok
1558				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1559					goto sigbus;
1560			} else {
1561				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1562					goto sigbus;
1563			}
1564			if (rvar == 9)
1565				rvar = 8;
1566			for (i = 16; rvar; rvar--, i++) {
1567				value = regs->regs[i];
1568				StoreW(addr, value, res);
1569				if (res)
1570					goto fault;
1571				addr += 4;
1572			}
1573			if ((reg & 0xf) == 9) {
1574				value = regs->regs[30];
1575				StoreW(addr, value, res);
1576				if (res)
1577					goto fault;
1578				addr += 4;
1579			}
1580			if (reg & 0x10) {
1581				value = regs->regs[31];
1582				StoreW(addr, value, res);
1583				if (res)
1584					goto fault;
1585			}
1586			goto success;
1587
1588		case mm_ldm_func:
1589#ifdef CONFIG_64BIT
1590			reg = insn.mm_m_format.rd;
1591			rvar = reg & 0xf;
1592			if ((rvar > 9) || !reg)
1593				goto sigill;
1594			if (reg & 0x10) {
1595				if (!access_ok
1596				    (VERIFY_READ, addr, 8 * (rvar + 1)))
1597					goto sigbus;
1598			} else {
1599				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1600					goto sigbus;
1601			}
1602			if (rvar == 9)
1603				rvar = 8;
1604
1605			for (i = 16; rvar; rvar--, i++) {
1606				LoadDW(addr, value, res);
1607				if (res)
1608					goto fault;
1609				addr += 4;
1610				regs->regs[i] = value;
1611			}
1612			if ((reg & 0xf) == 9) {
1613				LoadDW(addr, value, res);
1614				if (res)
1615					goto fault;
1616				addr += 8;
1617				regs->regs[30] = value;
1618			}
1619			if (reg & 0x10) {
1620				LoadDW(addr, value, res);
1621				if (res)
1622					goto fault;
1623				regs->regs[31] = value;
1624			}
1625			goto success;
1626#endif /* CONFIG_64BIT */
1627
1628			goto sigill;
1629
1630		case mm_sdm_func:
1631#ifdef CONFIG_64BIT
1632			reg = insn.mm_m_format.rd;
1633			rvar = reg & 0xf;
1634			if ((rvar > 9) || !reg)
1635				goto sigill;
1636			if (reg & 0x10) {
1637				if (!access_ok
1638				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1639					goto sigbus;
1640			} else {
1641				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1642					goto sigbus;
1643			}
1644			if (rvar == 9)
1645				rvar = 8;
1646
1647			for (i = 16; rvar; rvar--, i++) {
1648				value = regs->regs[i];
1649				StoreDW(addr, value, res);
1650				if (res)
1651					goto fault;
1652				addr += 8;
1653			}
1654			if ((reg & 0xf) == 9) {
1655				value = regs->regs[30];
1656				StoreDW(addr, value, res);
1657				if (res)
1658					goto fault;
1659				addr += 8;
1660			}
1661			if (reg & 0x10) {
1662				value = regs->regs[31];
1663				StoreDW(addr, value, res);
1664				if (res)
1665					goto fault;
1666			}
1667			goto success;
1668#endif /* CONFIG_64BIT */
1669
1670			goto sigill;
1671
1672			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1673		}
1674
1675		goto sigbus;
1676
1677	case mm_pool32c_op:
1678		switch (insn.mm_m_format.func) {
1679		case mm_lwu_func:
1680			reg = insn.mm_m_format.rd;
1681			goto loadWU;
1682		}
1683
1684		/*  LL,SC,LLD,SCD are not serviced */
1685		goto sigbus;
1686
 
1687	case mm_pool32f_op:
1688		switch (insn.mm_x_format.func) {
1689		case mm_lwxc1_func:
1690		case mm_swxc1_func:
1691		case mm_ldxc1_func:
1692		case mm_sdxc1_func:
1693			goto fpu_emul;
1694		}
1695
1696		goto sigbus;
1697
1698	case mm_ldc132_op:
1699	case mm_sdc132_op:
1700	case mm_lwc132_op:
1701	case mm_swc132_op:
 
 
1702fpu_emul:
1703		/* roll back jump/branch */
1704		regs->cp0_epc = origpc;
1705		regs->regs[31] = orig31;
1706
1707		die_if_kernel("Unaligned FP access in kernel code", regs);
1708		BUG_ON(!used_math());
1709		BUG_ON(!is_fpu_owner());
1710
1711		lose_fpu(1);	/* save the FPU state for the emulator */
1712		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1713					       &fault_addr);
1714		own_fpu(1);	/* restore FPU state */
1715
1716		/* If something went wrong, signal */
1717		process_fpemu_return(res, fault_addr, 0);
1718
1719		if (res == 0)
1720			goto success;
1721		return;
 
 
1722
1723	case mm_lh32_op:
1724		reg = insn.mm_i_format.rt;
1725		goto loadHW;
1726
1727	case mm_lhu32_op:
1728		reg = insn.mm_i_format.rt;
1729		goto loadHWU;
1730
1731	case mm_lw32_op:
1732		reg = insn.mm_i_format.rt;
1733		goto loadW;
1734
1735	case mm_sh32_op:
1736		reg = insn.mm_i_format.rt;
1737		goto storeHW;
1738
1739	case mm_sw32_op:
1740		reg = insn.mm_i_format.rt;
1741		goto storeW;
1742
1743	case mm_ld32_op:
1744		reg = insn.mm_i_format.rt;
1745		goto loadDW;
1746
1747	case mm_sd32_op:
1748		reg = insn.mm_i_format.rt;
1749		goto storeDW;
1750
1751	case mm_pool16c_op:
1752		switch (insn.mm16_m_format.func) {
1753		case mm_lwm16_op:
1754			reg = insn.mm16_m_format.rlist;
1755			rvar = reg + 1;
1756			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1757				goto sigbus;
1758
1759			for (i = 16; rvar; rvar--, i++) {
1760				LoadW(addr, value, res);
1761				if (res)
1762					goto fault;
1763				addr += 4;
1764				regs->regs[i] = value;
1765			}
1766			LoadW(addr, value, res);
1767			if (res)
1768				goto fault;
1769			regs->regs[31] = value;
1770
1771			goto success;
1772
1773		case mm_swm16_op:
1774			reg = insn.mm16_m_format.rlist;
1775			rvar = reg + 1;
1776			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1777				goto sigbus;
1778
1779			for (i = 16; rvar; rvar--, i++) {
1780				value = regs->regs[i];
1781				StoreW(addr, value, res);
1782				if (res)
1783					goto fault;
1784				addr += 4;
1785			}
1786			value = regs->regs[31];
1787			StoreW(addr, value, res);
1788			if (res)
1789				goto fault;
1790
1791			goto success;
1792
1793		}
1794
1795		goto sigbus;
1796
1797	case mm_lhu16_op:
1798		reg = reg16to32[insn.mm16_rb_format.rt];
1799		goto loadHWU;
1800
1801	case mm_lw16_op:
1802		reg = reg16to32[insn.mm16_rb_format.rt];
1803		goto loadW;
1804
1805	case mm_sh16_op:
1806		reg = reg16to32st[insn.mm16_rb_format.rt];
1807		goto storeHW;
1808
1809	case mm_sw16_op:
1810		reg = reg16to32st[insn.mm16_rb_format.rt];
1811		goto storeW;
1812
1813	case mm_lwsp16_op:
1814		reg = insn.mm16_r5_format.rt;
1815		goto loadW;
1816
1817	case mm_swsp16_op:
1818		reg = insn.mm16_r5_format.rt;
1819		goto storeW;
1820
1821	case mm_lwgp16_op:
1822		reg = reg16to32[insn.mm16_r3_format.rt];
1823		goto loadW;
1824
1825	default:
1826		goto sigill;
1827	}
1828
1829loadHW:
1830	if (!access_ok(VERIFY_READ, addr, 2))
1831		goto sigbus;
1832
1833	LoadHW(addr, value, res);
1834	if (res)
1835		goto fault;
1836	regs->regs[reg] = value;
1837	goto success;
1838
1839loadHWU:
1840	if (!access_ok(VERIFY_READ, addr, 2))
1841		goto sigbus;
1842
1843	LoadHWU(addr, value, res);
1844	if (res)
1845		goto fault;
1846	regs->regs[reg] = value;
1847	goto success;
1848
1849loadW:
1850	if (!access_ok(VERIFY_READ, addr, 4))
1851		goto sigbus;
1852
1853	LoadW(addr, value, res);
1854	if (res)
1855		goto fault;
1856	regs->regs[reg] = value;
1857	goto success;
1858
1859loadWU:
1860#ifdef CONFIG_64BIT
1861	/*
1862	 * A 32-bit kernel might be running on a 64-bit processor.  But
1863	 * if we're on a 32-bit processor and an i-cache incoherency
1864	 * or race makes us see a 64-bit instruction here the sdl/sdr
1865	 * would blow up, so for now we don't handle unaligned 64-bit
1866	 * instructions on 32-bit kernels.
1867	 */
1868	if (!access_ok(VERIFY_READ, addr, 4))
1869		goto sigbus;
1870
1871	LoadWU(addr, value, res);
1872	if (res)
1873		goto fault;
1874	regs->regs[reg] = value;
1875	goto success;
1876#endif /* CONFIG_64BIT */
1877
1878	/* Cannot handle 64-bit instructions in 32-bit kernel */
1879	goto sigill;
1880
1881loadDW:
1882#ifdef CONFIG_64BIT
1883	/*
1884	 * A 32-bit kernel might be running on a 64-bit processor.  But
1885	 * if we're on a 32-bit processor and an i-cache incoherency
1886	 * or race makes us see a 64-bit instruction here the sdl/sdr
1887	 * would blow up, so for now we don't handle unaligned 64-bit
1888	 * instructions on 32-bit kernels.
1889	 */
1890	if (!access_ok(VERIFY_READ, addr, 8))
1891		goto sigbus;
1892
1893	LoadDW(addr, value, res);
1894	if (res)
1895		goto fault;
1896	regs->regs[reg] = value;
1897	goto success;
1898#endif /* CONFIG_64BIT */
1899
1900	/* Cannot handle 64-bit instructions in 32-bit kernel */
1901	goto sigill;
1902
1903storeHW:
1904	if (!access_ok(VERIFY_WRITE, addr, 2))
1905		goto sigbus;
1906
1907	value = regs->regs[reg];
1908	StoreHW(addr, value, res);
1909	if (res)
1910		goto fault;
1911	goto success;
1912
1913storeW:
1914	if (!access_ok(VERIFY_WRITE, addr, 4))
1915		goto sigbus;
1916
1917	value = regs->regs[reg];
1918	StoreW(addr, value, res);
1919	if (res)
1920		goto fault;
1921	goto success;
1922
1923storeDW:
1924#ifdef CONFIG_64BIT
1925	/*
1926	 * A 32-bit kernel might be running on a 64-bit processor.  But
1927	 * if we're on a 32-bit processor and an i-cache incoherency
1928	 * or race makes us see a 64-bit instruction here the sdl/sdr
1929	 * would blow up, so for now we don't handle unaligned 64-bit
1930	 * instructions on 32-bit kernels.
1931	 */
1932	if (!access_ok(VERIFY_WRITE, addr, 8))
1933		goto sigbus;
1934
1935	value = regs->regs[reg];
1936	StoreDW(addr, value, res);
1937	if (res)
1938		goto fault;
1939	goto success;
1940#endif /* CONFIG_64BIT */
1941
1942	/* Cannot handle 64-bit instructions in 32-bit kernel */
1943	goto sigill;
1944
1945success:
1946	regs->cp0_epc = contpc;	/* advance or branch */
1947
1948#ifdef CONFIG_DEBUG_FS
1949	unaligned_instructions++;
1950#endif
1951	return;
1952
1953fault:
1954	/* roll back jump/branch */
1955	regs->cp0_epc = origpc;
1956	regs->regs[31] = orig31;
1957	/* Did we have an exception handler installed? */
1958	if (fixup_exception(regs))
1959		return;
1960
1961	die_if_kernel("Unhandled kernel unaligned access", regs);
1962	force_sig(SIGSEGV, current);
1963
1964	return;
1965
1966sigbus:
1967	die_if_kernel("Unhandled kernel unaligned access", regs);
1968	force_sig(SIGBUS, current);
1969
1970	return;
1971
1972sigill:
1973	die_if_kernel
1974	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1975	force_sig(SIGILL, current);
1976}
1977
1978static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1979{
1980	unsigned long value;
1981	unsigned int res;
1982	int reg;
1983	unsigned long orig31;
1984	u16 __user *pc16;
1985	unsigned long origpc;
1986	union mips16e_instruction mips16inst, oldinst;
 
 
 
1987
1988	origpc = regs->cp0_epc;
1989	orig31 = regs->regs[31];
1990	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1991	/*
1992	 * This load never faults.
1993	 */
1994	__get_user(mips16inst.full, pc16);
1995	oldinst = mips16inst;
1996
1997	/* skip EXTEND instruction */
1998	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
 
1999		pc16++;
2000		__get_user(mips16inst.full, pc16);
2001	} else if (delay_slot(regs)) {
2002		/*  skip jump instructions */
2003		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
2004		if (mips16inst.ri.opcode == MIPS16e_jal_op)
2005			pc16++;
2006		pc16++;
2007		if (get_user(mips16inst.full, pc16))
2008			goto sigbus;
2009	}
2010
2011	switch (mips16inst.ri.opcode) {
 
2012	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
2013		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
2014		case MIPS16e_ldpc_func:
2015		case MIPS16e_ldsp_func:
2016			reg = reg16to32[mips16inst.ri64.ry];
2017			goto loadDW;
2018
2019		case MIPS16e_sdsp_func:
2020			reg = reg16to32[mips16inst.ri64.ry];
2021			goto writeDW;
2022
2023		case MIPS16e_sdrasp_func:
2024			reg = 29;	/* GPRSP */
2025			goto writeDW;
2026		}
2027
2028		goto sigbus;
2029
2030	case MIPS16e_swsp_op:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2031	case MIPS16e_lwpc_op:
 
 
 
2032	case MIPS16e_lwsp_op:
2033		reg = reg16to32[mips16inst.ri.rx];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034		break;
2035
2036	case MIPS16e_i8_op:
2037		if (mips16inst.i8.func != MIPS16e_swrasp_func)
2038			goto sigbus;
2039		reg = 29;	/* GPRSP */
2040		break;
2041
2042	default:
2043		reg = reg16to32[mips16inst.rri.ry];
2044		break;
2045	}
2046
2047	switch (mips16inst.ri.opcode) {
2048
2049	case MIPS16e_lb_op:
2050	case MIPS16e_lbu_op:
2051	case MIPS16e_sb_op:
2052		goto sigbus;
2053
2054	case MIPS16e_lh_op:
2055		if (!access_ok(VERIFY_READ, addr, 2))
2056			goto sigbus;
2057
2058		LoadHW(addr, value, res);
2059		if (res)
2060			goto fault;
2061		MIPS16e_compute_return_epc(regs, &oldinst);
2062		regs->regs[reg] = value;
2063		break;
2064
2065	case MIPS16e_lhu_op:
2066		if (!access_ok(VERIFY_READ, addr, 2))
2067			goto sigbus;
2068
2069		LoadHWU(addr, value, res);
2070		if (res)
2071			goto fault;
2072		MIPS16e_compute_return_epc(regs, &oldinst);
2073		regs->regs[reg] = value;
2074		break;
2075
2076	case MIPS16e_lw_op:
2077	case MIPS16e_lwpc_op:
2078	case MIPS16e_lwsp_op:
2079		if (!access_ok(VERIFY_READ, addr, 4))
2080			goto sigbus;
2081
2082		LoadW(addr, value, res);
2083		if (res)
2084			goto fault;
2085		MIPS16e_compute_return_epc(regs, &oldinst);
2086		regs->regs[reg] = value;
2087		break;
2088
2089	case MIPS16e_lwu_op:
2090#ifdef CONFIG_64BIT
2091		/*
2092		 * A 32-bit kernel might be running on a 64-bit processor.  But
2093		 * if we're on a 32-bit processor and an i-cache incoherency
2094		 * or race makes us see a 64-bit instruction here the sdl/sdr
2095		 * would blow up, so for now we don't handle unaligned 64-bit
2096		 * instructions on 32-bit kernels.
2097		 */
2098		if (!access_ok(VERIFY_READ, addr, 4))
2099			goto sigbus;
2100
2101		LoadWU(addr, value, res);
2102		if (res)
2103			goto fault;
2104		MIPS16e_compute_return_epc(regs, &oldinst);
2105		regs->regs[reg] = value;
2106		break;
2107#endif /* CONFIG_64BIT */
2108
2109		/* Cannot handle 64-bit instructions in 32-bit kernel */
2110		goto sigill;
2111
2112	case MIPS16e_ld_op:
2113loadDW:
2114#ifdef CONFIG_64BIT
2115		/*
2116		 * A 32-bit kernel might be running on a 64-bit processor.  But
2117		 * if we're on a 32-bit processor and an i-cache incoherency
2118		 * or race makes us see a 64-bit instruction here the sdl/sdr
2119		 * would blow up, so for now we don't handle unaligned 64-bit
2120		 * instructions on 32-bit kernels.
2121		 */
2122		if (!access_ok(VERIFY_READ, addr, 8))
2123			goto sigbus;
2124
2125		LoadDW(addr, value, res);
2126		if (res)
2127			goto fault;
2128		MIPS16e_compute_return_epc(regs, &oldinst);
2129		regs->regs[reg] = value;
2130		break;
2131#endif /* CONFIG_64BIT */
2132
2133		/* Cannot handle 64-bit instructions in 32-bit kernel */
2134		goto sigill;
2135
2136	case MIPS16e_sh_op:
2137		if (!access_ok(VERIFY_WRITE, addr, 2))
2138			goto sigbus;
2139
2140		MIPS16e_compute_return_epc(regs, &oldinst);
2141		value = regs->regs[reg];
2142		StoreHW(addr, value, res);
2143		if (res)
2144			goto fault;
2145		break;
2146
2147	case MIPS16e_sw_op:
2148	case MIPS16e_swsp_op:
2149	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
2150		if (!access_ok(VERIFY_WRITE, addr, 4))
2151			goto sigbus;
2152
2153		MIPS16e_compute_return_epc(regs, &oldinst);
2154		value = regs->regs[reg];
2155		StoreW(addr, value, res);
2156		if (res)
2157			goto fault;
2158		break;
2159
2160	case MIPS16e_sd_op:
2161writeDW:
2162#ifdef CONFIG_64BIT
2163		/*
2164		 * A 32-bit kernel might be running on a 64-bit processor.  But
2165		 * if we're on a 32-bit processor and an i-cache incoherency
2166		 * or race makes us see a 64-bit instruction here the sdl/sdr
2167		 * would blow up, so for now we don't handle unaligned 64-bit
2168		 * instructions on 32-bit kernels.
2169		 */
2170		if (!access_ok(VERIFY_WRITE, addr, 8))
2171			goto sigbus;
2172
2173		MIPS16e_compute_return_epc(regs, &oldinst);
2174		value = regs->regs[reg];
2175		StoreDW(addr, value, res);
2176		if (res)
2177			goto fault;
2178		break;
2179#endif /* CONFIG_64BIT */
2180
2181		/* Cannot handle 64-bit instructions in 32-bit kernel */
2182		goto sigill;
2183
2184	default:
2185		/*
2186		 * Pheeee...  We encountered an yet unknown instruction or
2187		 * cache coherence problem.  Die sucker, die ...
2188		 */
2189		goto sigill;
2190	}
2191
2192#ifdef CONFIG_DEBUG_FS
2193	unaligned_instructions++;
2194#endif
2195
2196	return;
2197
2198fault:
2199	/* roll back jump/branch */
2200	regs->cp0_epc = origpc;
2201	regs->regs[31] = orig31;
2202	/* Did we have an exception handler installed? */
2203	if (fixup_exception(regs))
2204		return;
2205
2206	die_if_kernel("Unhandled kernel unaligned access", regs);
2207	force_sig(SIGSEGV, current);
2208
2209	return;
2210
2211sigbus:
2212	die_if_kernel("Unhandled kernel unaligned access", regs);
2213	force_sig(SIGBUS, current);
2214
2215	return;
2216
2217sigill:
2218	die_if_kernel
2219	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2220	force_sig(SIGILL, current);
2221}
2222
2223asmlinkage void do_ade(struct pt_regs *regs)
2224{
2225	enum ctx_state prev_state;
2226	unsigned int __user *pc;
2227	mm_segment_t seg;
2228
2229	prev_state = exception_enter();
2230	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2231			1, regs, regs->cp0_badvaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232	/*
2233	 * Did we catch a fault trying to load an instruction?
2234	 */
2235	if (regs->cp0_badvaddr == regs->cp0_epc)
2236		goto sigbus;
2237
2238	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2239		goto sigbus;
2240	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2241		goto sigbus;
2242
2243	/*
2244	 * Do branch emulation only if we didn't forward the exception.
2245	 * This is all so but ugly ...
2246	 */
2247
2248	/*
2249	 * Are we running in microMIPS mode?
2250	 */
2251	if (get_isa16_mode(regs->cp0_epc)) {
2252		/*
2253		 * Did we catch a fault trying to load an instruction in
2254		 * 16-bit mode?
2255		 */
2256		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2257			goto sigbus;
2258		if (unaligned_action == UNALIGNED_ACTION_SHOW)
2259			show_registers(regs);
2260
2261		if (cpu_has_mmips) {
2262			seg = get_fs();
2263			if (!user_mode(regs))
2264				set_fs(KERNEL_DS);
2265			emulate_load_store_microMIPS(regs,
2266				(void __user *)regs->cp0_badvaddr);
2267			set_fs(seg);
2268
2269			return;
2270		}
2271
2272		if (cpu_has_mips16) {
2273			seg = get_fs();
2274			if (!user_mode(regs))
2275				set_fs(KERNEL_DS);
2276			emulate_load_store_MIPS16e(regs,
2277				(void __user *)regs->cp0_badvaddr);
2278			set_fs(seg);
2279
2280			return;
2281	}
2282
2283		goto sigbus;
2284	}
2285
2286	if (unaligned_action == UNALIGNED_ACTION_SHOW)
2287		show_registers(regs);
2288	pc = (unsigned int __user *)exception_epc(regs);
2289
2290	seg = get_fs();
2291	if (!user_mode(regs))
2292		set_fs(KERNEL_DS);
2293	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2294	set_fs(seg);
2295
2296	return;
2297
2298sigbus:
2299	die_if_kernel("Kernel unaligned instruction access", regs);
2300	force_sig(SIGBUS, current);
2301
2302	/*
2303	 * XXX On return from the signal handler we should advance the epc
2304	 */
2305	exception_exit(prev_state);
2306}
2307
2308#ifdef CONFIG_DEBUG_FS
2309static int __init debugfs_unaligned(void)
2310{
2311	struct dentry *d;
2312
2313	if (!mips_debugfs_dir)
2314		return -ENODEV;
2315	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2316			       mips_debugfs_dir, &unaligned_instructions);
2317	if (!d)
2318		return -ENOMEM;
2319	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2320			       mips_debugfs_dir, &unaligned_action);
2321	if (!d)
2322		return -ENOMEM;
2323	return 0;
2324}
2325arch_initcall(debugfs_unaligned);
2326#endif