Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
 
 
 
  92#include <linux/uaccess.h>
  93
  94#define STR(x)	__STR(x)
  95#define __STR(x)  #x
  96
  97enum {
  98	UNALIGNED_ACTION_QUIET,
  99	UNALIGNED_ACTION_SIGNAL,
 100	UNALIGNED_ACTION_SHOW,
 101};
 102#ifdef CONFIG_DEBUG_FS
 103static u32 unaligned_instructions;
 104static u32 unaligned_action;
 105#else
 106#define unaligned_action UNALIGNED_ACTION_QUIET
 107#endif
 108extern void show_registers(struct pt_regs *regs);
 109
 110#ifdef __BIG_ENDIAN
 111#define     _LoadHW(addr, value, res, type)  \
 112do {                                                        \
 113		__asm__ __volatile__ (".set\tnoat\n"        \
 114			"1:\t"type##_lb("%0", "0(%2)")"\n"  \
 115			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
 116			"sll\t%0, 0x8\n\t"                  \
 117			"or\t%0, $1\n\t"                    \
 118			"li\t%1, 0\n"                       \
 119			"3:\t.set\tat\n\t"                  \
 120			".insn\n\t"                         \
 121			".section\t.fixup,\"ax\"\n\t"       \
 122			"4:\tli\t%1, %3\n\t"                \
 123			"j\t3b\n\t"                         \
 124			".previous\n\t"                     \
 125			".section\t__ex_table,\"a\"\n\t"    \
 126			STR(PTR)"\t1b, 4b\n\t"              \
 127			STR(PTR)"\t2b, 4b\n\t"              \
 128			".previous"                         \
 129			: "=&r" (value), "=r" (res)         \
 130			: "r" (addr), "i" (-EFAULT));       \
 131} while(0)
 132
 133#ifndef CONFIG_CPU_MIPSR6
 134#define     _LoadW(addr, value, res, type)   \
 135do {                                                        \
 136		__asm__ __volatile__ (                      \
 137			"1:\t"type##_lwl("%0", "(%2)")"\n"   \
 138			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
 139			"li\t%1, 0\n"                       \
 140			"3:\n\t"                            \
 141			".insn\n\t"                         \
 142			".section\t.fixup,\"ax\"\n\t"       \
 143			"4:\tli\t%1, %3\n\t"                \
 144			"j\t3b\n\t"                         \
 145			".previous\n\t"                     \
 146			".section\t__ex_table,\"a\"\n\t"    \
 147			STR(PTR)"\t1b, 4b\n\t"              \
 148			STR(PTR)"\t2b, 4b\n\t"              \
 149			".previous"                         \
 150			: "=&r" (value), "=r" (res)         \
 151			: "r" (addr), "i" (-EFAULT));       \
 152} while(0)
 153
 154#else
 155/* MIPSR6 has no lwl instruction */
 156#define     _LoadW(addr, value, res, type) \
 157do {                                                        \
 158		__asm__ __volatile__ (			    \
 159			".set\tpush\n"			    \
 160			".set\tnoat\n\t"		    \
 161			"1:"type##_lb("%0", "0(%2)")"\n\t"  \
 162			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
 163			"sll\t%0, 0x8\n\t"		    \
 164			"or\t%0, $1\n\t"		    \
 165			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
 166			"sll\t%0, 0x8\n\t"		    \
 167			"or\t%0, $1\n\t"		    \
 168			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
 169			"sll\t%0, 0x8\n\t"		    \
 170			"or\t%0, $1\n\t"		    \
 171			"li\t%1, 0\n"			    \
 172			".set\tpop\n"			    \
 173			"10:\n\t"			    \
 174			".insn\n\t"			    \
 175			".section\t.fixup,\"ax\"\n\t"	    \
 176			"11:\tli\t%1, %3\n\t"		    \
 177			"j\t10b\n\t"			    \
 178			".previous\n\t"			    \
 179			".section\t__ex_table,\"a\"\n\t"    \
 180			STR(PTR)"\t1b, 11b\n\t"		    \
 181			STR(PTR)"\t2b, 11b\n\t"		    \
 182			STR(PTR)"\t3b, 11b\n\t"		    \
 183			STR(PTR)"\t4b, 11b\n\t"		    \
 184			".previous"			    \
 185			: "=&r" (value), "=r" (res)	    \
 186			: "r" (addr), "i" (-EFAULT));       \
 187} while(0)
 188
 189#endif /* CONFIG_CPU_MIPSR6 */
 190
 191#define     _LoadHWU(addr, value, res, type) \
 192do {                                                        \
 193		__asm__ __volatile__ (                      \
 194			".set\tnoat\n"                      \
 195			"1:\t"type##_lbu("%0", "0(%2)")"\n" \
 196			"2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
 197			"sll\t%0, 0x8\n\t"                  \
 198			"or\t%0, $1\n\t"                    \
 199			"li\t%1, 0\n"                       \
 200			"3:\n\t"                            \
 201			".insn\n\t"                         \
 202			".set\tat\n\t"                      \
 203			".section\t.fixup,\"ax\"\n\t"       \
 204			"4:\tli\t%1, %3\n\t"                \
 205			"j\t3b\n\t"                         \
 206			".previous\n\t"                     \
 207			".section\t__ex_table,\"a\"\n\t"    \
 208			STR(PTR)"\t1b, 4b\n\t"              \
 209			STR(PTR)"\t2b, 4b\n\t"              \
 210			".previous"                         \
 211			: "=&r" (value), "=r" (res)         \
 212			: "r" (addr), "i" (-EFAULT));       \
 213} while(0)
 214
 215#ifndef CONFIG_CPU_MIPSR6
 216#define     _LoadWU(addr, value, res, type)  \
 217do {                                                        \
 218		__asm__ __volatile__ (                      \
 219			"1:\t"type##_lwl("%0", "(%2)")"\n"  \
 220			"2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
 221			"dsll\t%0, %0, 32\n\t"              \
 222			"dsrl\t%0, %0, 32\n\t"              \
 223			"li\t%1, 0\n"                       \
 224			"3:\n\t"                            \
 225			".insn\n\t"                         \
 226			"\t.section\t.fixup,\"ax\"\n\t"     \
 227			"4:\tli\t%1, %3\n\t"                \
 228			"j\t3b\n\t"                         \
 229			".previous\n\t"                     \
 230			".section\t__ex_table,\"a\"\n\t"    \
 231			STR(PTR)"\t1b, 4b\n\t"              \
 232			STR(PTR)"\t2b, 4b\n\t"              \
 233			".previous"                         \
 234			: "=&r" (value), "=r" (res)         \
 235			: "r" (addr), "i" (-EFAULT));       \
 236} while(0)
 237
 238#define     _LoadDW(addr, value, res)  \
 239do {                                                        \
 240		__asm__ __volatile__ (                      \
 241			"1:\tldl\t%0, (%2)\n"               \
 242			"2:\tldr\t%0, 7(%2)\n\t"            \
 243			"li\t%1, 0\n"                       \
 244			"3:\n\t"                            \
 245			".insn\n\t"                         \
 246			"\t.section\t.fixup,\"ax\"\n\t"     \
 247			"4:\tli\t%1, %3\n\t"                \
 248			"j\t3b\n\t"                         \
 249			".previous\n\t"                     \
 250			".section\t__ex_table,\"a\"\n\t"    \
 251			STR(PTR)"\t1b, 4b\n\t"              \
 252			STR(PTR)"\t2b, 4b\n\t"              \
 253			".previous"                         \
 254			: "=&r" (value), "=r" (res)         \
 255			: "r" (addr), "i" (-EFAULT));       \
 256} while(0)
 257
 258#else
 259/* MIPSR6 has not lwl and ldl instructions */
 260#define	    _LoadWU(addr, value, res, type) \
 261do {                                                        \
 262		__asm__ __volatile__ (			    \
 263			".set\tpush\n\t"		    \
 264			".set\tnoat\n\t"		    \
 265			"1:"type##_lbu("%0", "0(%2)")"\n\t" \
 266			"2:"type##_lbu("$1", "1(%2)")"\n\t" \
 267			"sll\t%0, 0x8\n\t"		    \
 268			"or\t%0, $1\n\t"		    \
 269			"3:"type##_lbu("$1", "2(%2)")"\n\t" \
 270			"sll\t%0, 0x8\n\t"		    \
 271			"or\t%0, $1\n\t"		    \
 272			"4:"type##_lbu("$1", "3(%2)")"\n\t" \
 273			"sll\t%0, 0x8\n\t"		    \
 274			"or\t%0, $1\n\t"		    \
 275			"li\t%1, 0\n"			    \
 276			".set\tpop\n"			    \
 277			"10:\n\t"			    \
 278			".insn\n\t"			    \
 279			".section\t.fixup,\"ax\"\n\t"	    \
 280			"11:\tli\t%1, %3\n\t"		    \
 281			"j\t10b\n\t"			    \
 282			".previous\n\t"			    \
 283			".section\t__ex_table,\"a\"\n\t"    \
 284			STR(PTR)"\t1b, 11b\n\t"		    \
 285			STR(PTR)"\t2b, 11b\n\t"		    \
 286			STR(PTR)"\t3b, 11b\n\t"		    \
 287			STR(PTR)"\t4b, 11b\n\t"		    \
 288			".previous"			    \
 289			: "=&r" (value), "=r" (res)	    \
 290			: "r" (addr), "i" (-EFAULT));       \
 291} while(0)
 292
 293#define     _LoadDW(addr, value, res)  \
 294do {                                                        \
 295		__asm__ __volatile__ (			    \
 296			".set\tpush\n\t"		    \
 297			".set\tnoat\n\t"		    \
 298			"1:lb\t%0, 0(%2)\n\t"    	    \
 299			"2:lbu\t $1, 1(%2)\n\t"   	    \
 300			"dsll\t%0, 0x8\n\t"		    \
 301			"or\t%0, $1\n\t"		    \
 302			"3:lbu\t$1, 2(%2)\n\t"   	    \
 303			"dsll\t%0, 0x8\n\t"		    \
 304			"or\t%0, $1\n\t"		    \
 305			"4:lbu\t$1, 3(%2)\n\t"   	    \
 306			"dsll\t%0, 0x8\n\t"		    \
 307			"or\t%0, $1\n\t"		    \
 308			"5:lbu\t$1, 4(%2)\n\t"   	    \
 309			"dsll\t%0, 0x8\n\t"		    \
 310			"or\t%0, $1\n\t"		    \
 311			"6:lbu\t$1, 5(%2)\n\t"   	    \
 312			"dsll\t%0, 0x8\n\t"		    \
 313			"or\t%0, $1\n\t"		    \
 314			"7:lbu\t$1, 6(%2)\n\t"   	    \
 315			"dsll\t%0, 0x8\n\t"		    \
 316			"or\t%0, $1\n\t"		    \
 317			"8:lbu\t$1, 7(%2)\n\t"   	    \
 318			"dsll\t%0, 0x8\n\t"		    \
 319			"or\t%0, $1\n\t"		    \
 320			"li\t%1, 0\n"			    \
 321			".set\tpop\n\t"			    \
 322			"10:\n\t"			    \
 323			".insn\n\t"			    \
 324			".section\t.fixup,\"ax\"\n\t"	    \
 325			"11:\tli\t%1, %3\n\t"		    \
 326			"j\t10b\n\t"			    \
 327			".previous\n\t"			    \
 328			".section\t__ex_table,\"a\"\n\t"    \
 329			STR(PTR)"\t1b, 11b\n\t"		    \
 330			STR(PTR)"\t2b, 11b\n\t"		    \
 331			STR(PTR)"\t3b, 11b\n\t"		    \
 332			STR(PTR)"\t4b, 11b\n\t"		    \
 333			STR(PTR)"\t5b, 11b\n\t"		    \
 334			STR(PTR)"\t6b, 11b\n\t"		    \
 335			STR(PTR)"\t7b, 11b\n\t"		    \
 336			STR(PTR)"\t8b, 11b\n\t"		    \
 337			".previous"			    \
 338			: "=&r" (value), "=r" (res)	    \
 339			: "r" (addr), "i" (-EFAULT));       \
 340} while(0)
 341
 342#endif /* CONFIG_CPU_MIPSR6 */
 343
 344
 345#define     _StoreHW(addr, value, res, type) \
 346do {                                                        \
 347		__asm__ __volatile__ (                      \
 348			".set\tnoat\n"                      \
 349			"1:\t"type##_sb("%1", "1(%2)")"\n"  \
 350			"srl\t$1, %1, 0x8\n"                \
 351			"2:\t"type##_sb("$1", "0(%2)")"\n"  \
 352			".set\tat\n\t"                      \
 353			"li\t%0, 0\n"                       \
 354			"3:\n\t"                            \
 355			".insn\n\t"                         \
 356			".section\t.fixup,\"ax\"\n\t"       \
 357			"4:\tli\t%0, %3\n\t"                \
 358			"j\t3b\n\t"                         \
 359			".previous\n\t"                     \
 360			".section\t__ex_table,\"a\"\n\t"    \
 361			STR(PTR)"\t1b, 4b\n\t"              \
 362			STR(PTR)"\t2b, 4b\n\t"              \
 363			".previous"                         \
 364			: "=r" (res)                        \
 365			: "r" (value), "r" (addr), "i" (-EFAULT));\
 366} while(0)
 367
 368#ifndef CONFIG_CPU_MIPSR6
 369#define     _StoreW(addr, value, res, type)  \
 370do {                                                        \
 371		__asm__ __volatile__ (                      \
 372			"1:\t"type##_swl("%1", "(%2)")"\n"  \
 373			"2:\t"type##_swr("%1", "3(%2)")"\n\t"\
 374			"li\t%0, 0\n"                       \
 375			"3:\n\t"                            \
 376			".insn\n\t"                         \
 377			".section\t.fixup,\"ax\"\n\t"       \
 378			"4:\tli\t%0, %3\n\t"                \
 379			"j\t3b\n\t"                         \
 380			".previous\n\t"                     \
 381			".section\t__ex_table,\"a\"\n\t"    \
 382			STR(PTR)"\t1b, 4b\n\t"              \
 383			STR(PTR)"\t2b, 4b\n\t"              \
 384			".previous"                         \
 385		: "=r" (res)                                \
 386		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 387} while(0)
 388
 389#define     _StoreDW(addr, value, res) \
 390do {                                                        \
 391		__asm__ __volatile__ (                      \
 392			"1:\tsdl\t%1,(%2)\n"                \
 393			"2:\tsdr\t%1, 7(%2)\n\t"            \
 394			"li\t%0, 0\n"                       \
 395			"3:\n\t"                            \
 396			".insn\n\t"                         \
 397			".section\t.fixup,\"ax\"\n\t"       \
 398			"4:\tli\t%0, %3\n\t"                \
 399			"j\t3b\n\t"                         \
 400			".previous\n\t"                     \
 401			".section\t__ex_table,\"a\"\n\t"    \
 402			STR(PTR)"\t1b, 4b\n\t"              \
 403			STR(PTR)"\t2b, 4b\n\t"              \
 404			".previous"                         \
 405		: "=r" (res)                                \
 406		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 407} while(0)
 408
 409#else
 410/* MIPSR6 has no swl and sdl instructions */
 411#define     _StoreW(addr, value, res, type)  \
 412do {                                                        \
 413		__asm__ __volatile__ (                      \
 414			".set\tpush\n\t"		    \
 415			".set\tnoat\n\t"		    \
 416			"1:"type##_sb("%1", "3(%2)")"\n\t"  \
 417			"srl\t$1, %1, 0x8\n\t"		    \
 418			"2:"type##_sb("$1", "2(%2)")"\n\t"  \
 419			"srl\t$1, $1,  0x8\n\t"		    \
 420			"3:"type##_sb("$1", "1(%2)")"\n\t"  \
 421			"srl\t$1, $1, 0x8\n\t"		    \
 422			"4:"type##_sb("$1", "0(%2)")"\n\t"  \
 423			".set\tpop\n\t"			    \
 424			"li\t%0, 0\n"			    \
 425			"10:\n\t"			    \
 426			".insn\n\t"			    \
 427			".section\t.fixup,\"ax\"\n\t"	    \
 428			"11:\tli\t%0, %3\n\t"		    \
 429			"j\t10b\n\t"			    \
 430			".previous\n\t"			    \
 431			".section\t__ex_table,\"a\"\n\t"    \
 432			STR(PTR)"\t1b, 11b\n\t"		    \
 433			STR(PTR)"\t2b, 11b\n\t"		    \
 434			STR(PTR)"\t3b, 11b\n\t"		    \
 435			STR(PTR)"\t4b, 11b\n\t"		    \
 436			".previous"			    \
 437		: "=&r" (res)			    	    \
 438		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 439		: "memory");                                \
 440} while(0)
 441
 442#define     _StoreDW(addr, value, res) \
 443do {                                                        \
 444		__asm__ __volatile__ (                      \
 445			".set\tpush\n\t"		    \
 446			".set\tnoat\n\t"		    \
 447			"1:sb\t%1, 7(%2)\n\t"    	    \
 448			"dsrl\t$1, %1, 0x8\n\t"		    \
 449			"2:sb\t$1, 6(%2)\n\t"    	    \
 450			"dsrl\t$1, $1, 0x8\n\t"		    \
 451			"3:sb\t$1, 5(%2)\n\t"    	    \
 452			"dsrl\t$1, $1, 0x8\n\t"		    \
 453			"4:sb\t$1, 4(%2)\n\t"    	    \
 454			"dsrl\t$1, $1, 0x8\n\t"		    \
 455			"5:sb\t$1, 3(%2)\n\t"    	    \
 456			"dsrl\t$1, $1, 0x8\n\t"		    \
 457			"6:sb\t$1, 2(%2)\n\t"    	    \
 458			"dsrl\t$1, $1, 0x8\n\t"		    \
 459			"7:sb\t$1, 1(%2)\n\t"    	    \
 460			"dsrl\t$1, $1, 0x8\n\t"		    \
 461			"8:sb\t$1, 0(%2)\n\t"    	    \
 462			"dsrl\t$1, $1, 0x8\n\t"		    \
 463			".set\tpop\n\t"			    \
 464			"li\t%0, 0\n"			    \
 465			"10:\n\t"			    \
 466			".insn\n\t"			    \
 467			".section\t.fixup,\"ax\"\n\t"	    \
 468			"11:\tli\t%0, %3\n\t"		    \
 469			"j\t10b\n\t"			    \
 470			".previous\n\t"			    \
 471			".section\t__ex_table,\"a\"\n\t"    \
 472			STR(PTR)"\t1b, 11b\n\t"		    \
 473			STR(PTR)"\t2b, 11b\n\t"		    \
 474			STR(PTR)"\t3b, 11b\n\t"		    \
 475			STR(PTR)"\t4b, 11b\n\t"		    \
 476			STR(PTR)"\t5b, 11b\n\t"		    \
 477			STR(PTR)"\t6b, 11b\n\t"		    \
 478			STR(PTR)"\t7b, 11b\n\t"		    \
 479			STR(PTR)"\t8b, 11b\n\t"		    \
 480			".previous"			    \
 481		: "=&r" (res)			    	    \
 482		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 483		: "memory");                                \
 484} while(0)
 485
 486#endif /* CONFIG_CPU_MIPSR6 */
 487
 488#else /* __BIG_ENDIAN */
 489
 490#define     _LoadHW(addr, value, res, type)  \
 491do {                                                        \
 492		__asm__ __volatile__ (".set\tnoat\n"        \
 493			"1:\t"type##_lb("%0", "1(%2)")"\n"  \
 494			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
 495			"sll\t%0, 0x8\n\t"                  \
 496			"or\t%0, $1\n\t"                    \
 497			"li\t%1, 0\n"                       \
 498			"3:\t.set\tat\n\t"                  \
 499			".insn\n\t"                         \
 500			".section\t.fixup,\"ax\"\n\t"       \
 501			"4:\tli\t%1, %3\n\t"                \
 502			"j\t3b\n\t"                         \
 503			".previous\n\t"                     \
 504			".section\t__ex_table,\"a\"\n\t"    \
 505			STR(PTR)"\t1b, 4b\n\t"              \
 506			STR(PTR)"\t2b, 4b\n\t"              \
 507			".previous"                         \
 508			: "=&r" (value), "=r" (res)         \
 509			: "r" (addr), "i" (-EFAULT));       \
 510} while(0)
 511
 512#ifndef CONFIG_CPU_MIPSR6
 513#define     _LoadW(addr, value, res, type)   \
 514do {                                                        \
 515		__asm__ __volatile__ (                      \
 516			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
 517			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
 518			"li\t%1, 0\n"                       \
 519			"3:\n\t"                            \
 520			".insn\n\t"                         \
 521			".section\t.fixup,\"ax\"\n\t"       \
 522			"4:\tli\t%1, %3\n\t"                \
 523			"j\t3b\n\t"                         \
 524			".previous\n\t"                     \
 525			".section\t__ex_table,\"a\"\n\t"    \
 526			STR(PTR)"\t1b, 4b\n\t"              \
 527			STR(PTR)"\t2b, 4b\n\t"              \
 528			".previous"                         \
 529			: "=&r" (value), "=r" (res)         \
 530			: "r" (addr), "i" (-EFAULT));       \
 531} while(0)
 532
 533#else
 534/* MIPSR6 has no lwl instruction */
 535#define     _LoadW(addr, value, res, type) \
 536do {                                                        \
 537		__asm__ __volatile__ (			    \
 538			".set\tpush\n"			    \
 539			".set\tnoat\n\t"		    \
 540			"1:"type##_lb("%0", "3(%2)")"\n\t"  \
 541			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
 542			"sll\t%0, 0x8\n\t"		    \
 543			"or\t%0, $1\n\t"		    \
 544			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
 545			"sll\t%0, 0x8\n\t"		    \
 546			"or\t%0, $1\n\t"		    \
 547			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
 548			"sll\t%0, 0x8\n\t"		    \
 549			"or\t%0, $1\n\t"		    \
 550			"li\t%1, 0\n"			    \
 551			".set\tpop\n"			    \
 552			"10:\n\t"			    \
 553			".insn\n\t"			    \
 554			".section\t.fixup,\"ax\"\n\t"	    \
 555			"11:\tli\t%1, %3\n\t"		    \
 556			"j\t10b\n\t"			    \
 557			".previous\n\t"			    \
 558			".section\t__ex_table,\"a\"\n\t"    \
 559			STR(PTR)"\t1b, 11b\n\t"		    \
 560			STR(PTR)"\t2b, 11b\n\t"		    \
 561			STR(PTR)"\t3b, 11b\n\t"		    \
 562			STR(PTR)"\t4b, 11b\n\t"		    \
 563			".previous"			    \
 564			: "=&r" (value), "=r" (res)	    \
 565			: "r" (addr), "i" (-EFAULT));       \
 566} while(0)
 567
 568#endif /* CONFIG_CPU_MIPSR6 */
 569
 570
 571#define     _LoadHWU(addr, value, res, type) \
 572do {                                                        \
 573		__asm__ __volatile__ (                      \
 574			".set\tnoat\n"                      \
 575			"1:\t"type##_lbu("%0", "1(%2)")"\n" \
 576			"2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
 577			"sll\t%0, 0x8\n\t"                  \
 578			"or\t%0, $1\n\t"                    \
 579			"li\t%1, 0\n"                       \
 580			"3:\n\t"                            \
 581			".insn\n\t"                         \
 582			".set\tat\n\t"                      \
 583			".section\t.fixup,\"ax\"\n\t"       \
 584			"4:\tli\t%1, %3\n\t"                \
 585			"j\t3b\n\t"                         \
 586			".previous\n\t"                     \
 587			".section\t__ex_table,\"a\"\n\t"    \
 588			STR(PTR)"\t1b, 4b\n\t"              \
 589			STR(PTR)"\t2b, 4b\n\t"              \
 590			".previous"                         \
 591			: "=&r" (value), "=r" (res)         \
 592			: "r" (addr), "i" (-EFAULT));       \
 593} while(0)
 594
 595#ifndef CONFIG_CPU_MIPSR6
 596#define     _LoadWU(addr, value, res, type)  \
 597do {                                                        \
 598		__asm__ __volatile__ (                      \
 599			"1:\t"type##_lwl("%0", "3(%2)")"\n" \
 600			"2:\t"type##_lwr("%0", "(%2)")"\n\t"\
 601			"dsll\t%0, %0, 32\n\t"              \
 602			"dsrl\t%0, %0, 32\n\t"              \
 603			"li\t%1, 0\n"                       \
 604			"3:\n\t"                            \
 605			".insn\n\t"                         \
 606			"\t.section\t.fixup,\"ax\"\n\t"     \
 607			"4:\tli\t%1, %3\n\t"                \
 608			"j\t3b\n\t"                         \
 609			".previous\n\t"                     \
 610			".section\t__ex_table,\"a\"\n\t"    \
 611			STR(PTR)"\t1b, 4b\n\t"              \
 612			STR(PTR)"\t2b, 4b\n\t"              \
 613			".previous"                         \
 614			: "=&r" (value), "=r" (res)         \
 615			: "r" (addr), "i" (-EFAULT));       \
 616} while(0)
 617
 618#define     _LoadDW(addr, value, res)  \
 619do {                                                        \
 620		__asm__ __volatile__ (                      \
 621			"1:\tldl\t%0, 7(%2)\n"              \
 622			"2:\tldr\t%0, (%2)\n\t"             \
 623			"li\t%1, 0\n"                       \
 624			"3:\n\t"                            \
 625			".insn\n\t"                         \
 626			"\t.section\t.fixup,\"ax\"\n\t"     \
 627			"4:\tli\t%1, %3\n\t"                \
 628			"j\t3b\n\t"                         \
 629			".previous\n\t"                     \
 630			".section\t__ex_table,\"a\"\n\t"    \
 631			STR(PTR)"\t1b, 4b\n\t"              \
 632			STR(PTR)"\t2b, 4b\n\t"              \
 633			".previous"                         \
 634			: "=&r" (value), "=r" (res)         \
 635			: "r" (addr), "i" (-EFAULT));       \
 636} while(0)
 637
 638#else
 639/* MIPSR6 has not lwl and ldl instructions */
 640#define	    _LoadWU(addr, value, res, type) \
 641do {                                                        \
 642		__asm__ __volatile__ (			    \
 643			".set\tpush\n\t"		    \
 644			".set\tnoat\n\t"		    \
 645			"1:"type##_lbu("%0", "3(%2)")"\n\t" \
 646			"2:"type##_lbu("$1", "2(%2)")"\n\t" \
 647			"sll\t%0, 0x8\n\t"		    \
 648			"or\t%0, $1\n\t"		    \
 649			"3:"type##_lbu("$1", "1(%2)")"\n\t" \
 650			"sll\t%0, 0x8\n\t"		    \
 651			"or\t%0, $1\n\t"		    \
 652			"4:"type##_lbu("$1", "0(%2)")"\n\t" \
 653			"sll\t%0, 0x8\n\t"		    \
 654			"or\t%0, $1\n\t"		    \
 655			"li\t%1, 0\n"			    \
 656			".set\tpop\n"			    \
 657			"10:\n\t"			    \
 658			".insn\n\t"			    \
 659			".section\t.fixup,\"ax\"\n\t"	    \
 660			"11:\tli\t%1, %3\n\t"		    \
 661			"j\t10b\n\t"			    \
 662			".previous\n\t"			    \
 663			".section\t__ex_table,\"a\"\n\t"    \
 664			STR(PTR)"\t1b, 11b\n\t"		    \
 665			STR(PTR)"\t2b, 11b\n\t"		    \
 666			STR(PTR)"\t3b, 11b\n\t"		    \
 667			STR(PTR)"\t4b, 11b\n\t"		    \
 668			".previous"			    \
 669			: "=&r" (value), "=r" (res)	    \
 670			: "r" (addr), "i" (-EFAULT));       \
 671} while(0)
 672
 673#define     _LoadDW(addr, value, res)  \
 674do {                                                        \
 675		__asm__ __volatile__ (			    \
 676			".set\tpush\n\t"		    \
 677			".set\tnoat\n\t"		    \
 678			"1:lb\t%0, 7(%2)\n\t"    	    \
 679			"2:lbu\t$1, 6(%2)\n\t"   	    \
 680			"dsll\t%0, 0x8\n\t"		    \
 681			"or\t%0, $1\n\t"		    \
 682			"3:lbu\t$1, 5(%2)\n\t"   	    \
 683			"dsll\t%0, 0x8\n\t"		    \
 684			"or\t%0, $1\n\t"		    \
 685			"4:lbu\t$1, 4(%2)\n\t"   	    \
 686			"dsll\t%0, 0x8\n\t"		    \
 687			"or\t%0, $1\n\t"		    \
 688			"5:lbu\t$1, 3(%2)\n\t"   	    \
 689			"dsll\t%0, 0x8\n\t"		    \
 690			"or\t%0, $1\n\t"		    \
 691			"6:lbu\t$1, 2(%2)\n\t"   	    \
 692			"dsll\t%0, 0x8\n\t"		    \
 693			"or\t%0, $1\n\t"		    \
 694			"7:lbu\t$1, 1(%2)\n\t"   	    \
 695			"dsll\t%0, 0x8\n\t"		    \
 696			"or\t%0, $1\n\t"		    \
 697			"8:lbu\t$1, 0(%2)\n\t"   	    \
 698			"dsll\t%0, 0x8\n\t"		    \
 699			"or\t%0, $1\n\t"		    \
 700			"li\t%1, 0\n"			    \
 701			".set\tpop\n\t"			    \
 702			"10:\n\t"			    \
 703			".insn\n\t"			    \
 704			".section\t.fixup,\"ax\"\n\t"	    \
 705			"11:\tli\t%1, %3\n\t"		    \
 706			"j\t10b\n\t"			    \
 707			".previous\n\t"			    \
 708			".section\t__ex_table,\"a\"\n\t"    \
 709			STR(PTR)"\t1b, 11b\n\t"		    \
 710			STR(PTR)"\t2b, 11b\n\t"		    \
 711			STR(PTR)"\t3b, 11b\n\t"		    \
 712			STR(PTR)"\t4b, 11b\n\t"		    \
 713			STR(PTR)"\t5b, 11b\n\t"		    \
 714			STR(PTR)"\t6b, 11b\n\t"		    \
 715			STR(PTR)"\t7b, 11b\n\t"		    \
 716			STR(PTR)"\t8b, 11b\n\t"		    \
 717			".previous"			    \
 718			: "=&r" (value), "=r" (res)	    \
 719			: "r" (addr), "i" (-EFAULT));       \
 720} while(0)
 721#endif /* CONFIG_CPU_MIPSR6 */
 722
 723#define     _StoreHW(addr, value, res, type) \
 724do {                                                        \
 725		__asm__ __volatile__ (                      \
 726			".set\tnoat\n"                      \
 727			"1:\t"type##_sb("%1", "0(%2)")"\n"  \
 728			"srl\t$1,%1, 0x8\n"                 \
 729			"2:\t"type##_sb("$1", "1(%2)")"\n"  \
 730			".set\tat\n\t"                      \
 731			"li\t%0, 0\n"                       \
 732			"3:\n\t"                            \
 733			".insn\n\t"                         \
 734			".section\t.fixup,\"ax\"\n\t"       \
 735			"4:\tli\t%0, %3\n\t"                \
 736			"j\t3b\n\t"                         \
 737			".previous\n\t"                     \
 738			".section\t__ex_table,\"a\"\n\t"    \
 739			STR(PTR)"\t1b, 4b\n\t"              \
 740			STR(PTR)"\t2b, 4b\n\t"              \
 741			".previous"                         \
 742			: "=r" (res)                        \
 743			: "r" (value), "r" (addr), "i" (-EFAULT));\
 744} while(0)
 745
 746#ifndef CONFIG_CPU_MIPSR6
 747#define     _StoreW(addr, value, res, type)  \
 748do {                                                        \
 749		__asm__ __volatile__ (                      \
 750			"1:\t"type##_swl("%1", "3(%2)")"\n" \
 751			"2:\t"type##_swr("%1", "(%2)")"\n\t"\
 752			"li\t%0, 0\n"                       \
 753			"3:\n\t"                            \
 754			".insn\n\t"                         \
 755			".section\t.fixup,\"ax\"\n\t"       \
 756			"4:\tli\t%0, %3\n\t"                \
 757			"j\t3b\n\t"                         \
 758			".previous\n\t"                     \
 759			".section\t__ex_table,\"a\"\n\t"    \
 760			STR(PTR)"\t1b, 4b\n\t"              \
 761			STR(PTR)"\t2b, 4b\n\t"              \
 762			".previous"                         \
 763		: "=r" (res)                                \
 764		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 765} while(0)
 766
 767#define     _StoreDW(addr, value, res) \
 768do {                                                        \
 769		__asm__ __volatile__ (                      \
 770			"1:\tsdl\t%1, 7(%2)\n"              \
 771			"2:\tsdr\t%1, (%2)\n\t"             \
 772			"li\t%0, 0\n"                       \
 773			"3:\n\t"                            \
 774			".insn\n\t"                         \
 775			".section\t.fixup,\"ax\"\n\t"       \
 776			"4:\tli\t%0, %3\n\t"                \
 777			"j\t3b\n\t"                         \
 778			".previous\n\t"                     \
 779			".section\t__ex_table,\"a\"\n\t"    \
 780			STR(PTR)"\t1b, 4b\n\t"              \
 781			STR(PTR)"\t2b, 4b\n\t"              \
 782			".previous"                         \
 783		: "=r" (res)                                \
 784		: "r" (value), "r" (addr), "i" (-EFAULT));  \
 785} while(0)
 786
 787#else
 788/* MIPSR6 has no swl and sdl instructions */
 789#define     _StoreW(addr, value, res, type)  \
 790do {                                                        \
 791		__asm__ __volatile__ (                      \
 792			".set\tpush\n\t"		    \
 793			".set\tnoat\n\t"		    \
 794			"1:"type##_sb("%1", "0(%2)")"\n\t"  \
 795			"srl\t$1, %1, 0x8\n\t"		    \
 796			"2:"type##_sb("$1", "1(%2)")"\n\t"  \
 797			"srl\t$1, $1,  0x8\n\t"		    \
 798			"3:"type##_sb("$1", "2(%2)")"\n\t"  \
 799			"srl\t$1, $1, 0x8\n\t"		    \
 800			"4:"type##_sb("$1", "3(%2)")"\n\t"  \
 801			".set\tpop\n\t"			    \
 802			"li\t%0, 0\n"			    \
 803			"10:\n\t"			    \
 804			".insn\n\t"			    \
 805			".section\t.fixup,\"ax\"\n\t"	    \
 806			"11:\tli\t%0, %3\n\t"		    \
 807			"j\t10b\n\t"			    \
 808			".previous\n\t"			    \
 809			".section\t__ex_table,\"a\"\n\t"    \
 810			STR(PTR)"\t1b, 11b\n\t"		    \
 811			STR(PTR)"\t2b, 11b\n\t"		    \
 812			STR(PTR)"\t3b, 11b\n\t"		    \
 813			STR(PTR)"\t4b, 11b\n\t"		    \
 814			".previous"			    \
 815		: "=&r" (res)			    	    \
 816		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 817		: "memory");                                \
 818} while(0)
 819
 820#define     _StoreDW(addr, value, res) \
 821do {                                                        \
 822		__asm__ __volatile__ (                      \
 823			".set\tpush\n\t"		    \
 824			".set\tnoat\n\t"		    \
 825			"1:sb\t%1, 0(%2)\n\t"    	    \
 826			"dsrl\t$1, %1, 0x8\n\t"		    \
 827			"2:sb\t$1, 1(%2)\n\t"    	    \
 828			"dsrl\t$1, $1, 0x8\n\t"		    \
 829			"3:sb\t$1, 2(%2)\n\t"    	    \
 830			"dsrl\t$1, $1, 0x8\n\t"		    \
 831			"4:sb\t$1, 3(%2)\n\t"    	    \
 832			"dsrl\t$1, $1, 0x8\n\t"		    \
 833			"5:sb\t$1, 4(%2)\n\t"    	    \
 834			"dsrl\t$1, $1, 0x8\n\t"		    \
 835			"6:sb\t$1, 5(%2)\n\t"    	    \
 836			"dsrl\t$1, $1, 0x8\n\t"		    \
 837			"7:sb\t$1, 6(%2)\n\t"    	    \
 838			"dsrl\t$1, $1, 0x8\n\t"		    \
 839			"8:sb\t$1, 7(%2)\n\t"    	    \
 840			"dsrl\t$1, $1, 0x8\n\t"		    \
 841			".set\tpop\n\t"			    \
 842			"li\t%0, 0\n"			    \
 843			"10:\n\t"			    \
 844			".insn\n\t"			    \
 845			".section\t.fixup,\"ax\"\n\t"	    \
 846			"11:\tli\t%0, %3\n\t"		    \
 847			"j\t10b\n\t"			    \
 848			".previous\n\t"			    \
 849			".section\t__ex_table,\"a\"\n\t"    \
 850			STR(PTR)"\t1b, 11b\n\t"		    \
 851			STR(PTR)"\t2b, 11b\n\t"		    \
 852			STR(PTR)"\t3b, 11b\n\t"		    \
 853			STR(PTR)"\t4b, 11b\n\t"		    \
 854			STR(PTR)"\t5b, 11b\n\t"		    \
 855			STR(PTR)"\t6b, 11b\n\t"		    \
 856			STR(PTR)"\t7b, 11b\n\t"		    \
 857			STR(PTR)"\t8b, 11b\n\t"		    \
 858			".previous"			    \
 859		: "=&r" (res)			    	    \
 860		: "r" (value), "r" (addr), "i" (-EFAULT)    \
 861		: "memory");                                \
 862} while(0)
 863
 864#endif /* CONFIG_CPU_MIPSR6 */
 865#endif
 866
 867#define LoadHWU(addr, value, res)	_LoadHWU(addr, value, res, kernel)
 868#define LoadHWUE(addr, value, res)	_LoadHWU(addr, value, res, user)
 869#define LoadWU(addr, value, res)	_LoadWU(addr, value, res, kernel)
 870#define LoadWUE(addr, value, res)	_LoadWU(addr, value, res, user)
 871#define LoadHW(addr, value, res)	_LoadHW(addr, value, res, kernel)
 872#define LoadHWE(addr, value, res)	_LoadHW(addr, value, res, user)
 873#define LoadW(addr, value, res)		_LoadW(addr, value, res, kernel)
 874#define LoadWE(addr, value, res)	_LoadW(addr, value, res, user)
 875#define LoadDW(addr, value, res)	_LoadDW(addr, value, res)
 876
 877#define StoreHW(addr, value, res)	_StoreHW(addr, value, res, kernel)
 878#define StoreHWE(addr, value, res)	_StoreHW(addr, value, res, user)
 879#define StoreW(addr, value, res)	_StoreW(addr, value, res, kernel)
 880#define StoreWE(addr, value, res)	_StoreW(addr, value, res, user)
 881#define StoreDW(addr, value, res)	_StoreDW(addr, value, res)
 882
 883static void emulate_load_store_insn(struct pt_regs *regs,
 884	void __user *addr, unsigned int __user *pc)
 885{
 
 886	union mips_instruction insn;
 887	unsigned long value;
 888	unsigned int res, preempted;
 889	unsigned long origpc;
 890	unsigned long orig31;
 891	void __user *fault_addr = NULL;
 892#ifdef	CONFIG_EVA
 893	mm_segment_t seg;
 894#endif
 895	union fpureg *fpr;
 896	enum msa_2b_fmt df;
 897	unsigned int wd;
 898	origpc = (unsigned long)pc;
 899	orig31 = regs->regs[31];
 900
 901	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 902
 903	/*
 904	 * This load never faults.
 905	 */
 906	__get_user(insn.word, pc);
 907
 908	switch (insn.i_format.opcode) {
 909		/*
 910		 * These are instructions that a compiler doesn't generate.  We
 911		 * can assume therefore that the code is MIPS-aware and
 912		 * really buggy.  Emulating these instructions would break the
 913		 * semantics anyway.
 914		 */
 915	case ll_op:
 916	case lld_op:
 917	case sc_op:
 918	case scd_op:
 919
 920		/*
 921		 * For these instructions the only way to create an address
 922		 * error is an attempted access to kernel/supervisor address
 923		 * space.
 924		 */
 925	case ldl_op:
 926	case ldr_op:
 927	case lwl_op:
 928	case lwr_op:
 929	case sdl_op:
 930	case sdr_op:
 931	case swl_op:
 932	case swr_op:
 933	case lb_op:
 934	case lbu_op:
 935	case sb_op:
 936		goto sigbus;
 937
 938		/*
 939		 * The remaining opcodes are the ones that are really of
 940		 * interest.
 941		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942	case spec3_op:
 943		if (insn.dsp_format.func == lx_op) {
 944			switch (insn.dsp_format.op) {
 945			case lwx_op:
 946				if (!access_ok(VERIFY_READ, addr, 4))
 947					goto sigbus;
 948				LoadW(addr, value, res);
 949				if (res)
 950					goto fault;
 951				compute_return_epc(regs);
 952				regs->regs[insn.dsp_format.rd] = value;
 953				break;
 954			case lhx_op:
 955				if (!access_ok(VERIFY_READ, addr, 2))
 956					goto sigbus;
 957				LoadHW(addr, value, res);
 958				if (res)
 959					goto fault;
 960				compute_return_epc(regs);
 961				regs->regs[insn.dsp_format.rd] = value;
 962				break;
 963			default:
 964				goto sigill;
 965			}
 966		}
 967#ifdef CONFIG_EVA
 968		else {
 969			/*
 970			 * we can land here only from kernel accessing user
 971			 * memory, so we need to "switch" the address limit to
 972			 * user space, so that address check can work properly.
 973			 */
 974			seg = get_fs();
 975			set_fs(USER_DS);
 976			switch (insn.spec3_format.func) {
 977			case lhe_op:
 978				if (!access_ok(VERIFY_READ, addr, 2)) {
 979					set_fs(seg);
 980					goto sigbus;
 981				}
 982				LoadHWE(addr, value, res);
 983				if (res) {
 984					set_fs(seg);
 985					goto fault;
 986				}
 987				compute_return_epc(regs);
 988				regs->regs[insn.spec3_format.rt] = value;
 989				break;
 990			case lwe_op:
 991				if (!access_ok(VERIFY_READ, addr, 4)) {
 992					set_fs(seg);
 993					goto sigbus;
 994				}
 995				LoadWE(addr, value, res);
 996				if (res) {
 997					set_fs(seg);
 998					goto fault;
 999				}
1000				compute_return_epc(regs);
1001				regs->regs[insn.spec3_format.rt] = value;
1002				break;
1003			case lhue_op:
1004				if (!access_ok(VERIFY_READ, addr, 2)) {
1005					set_fs(seg);
1006					goto sigbus;
1007				}
1008				LoadHWUE(addr, value, res);
1009				if (res) {
1010					set_fs(seg);
1011					goto fault;
1012				}
1013				compute_return_epc(regs);
1014				regs->regs[insn.spec3_format.rt] = value;
1015				break;
1016			case she_op:
1017				if (!access_ok(VERIFY_WRITE, addr, 2)) {
1018					set_fs(seg);
1019					goto sigbus;
1020				}
1021				compute_return_epc(regs);
1022				value = regs->regs[insn.spec3_format.rt];
1023				StoreHWE(addr, value, res);
1024				if (res) {
1025					set_fs(seg);
1026					goto fault;
1027				}
1028				break;
1029			case swe_op:
1030				if (!access_ok(VERIFY_WRITE, addr, 4)) {
1031					set_fs(seg);
1032					goto sigbus;
1033				}
1034				compute_return_epc(regs);
1035				value = regs->regs[insn.spec3_format.rt];
1036				StoreWE(addr, value, res);
1037				if (res) {
1038					set_fs(seg);
1039					goto fault;
1040				}
1041				break;
1042			default:
1043				set_fs(seg);
1044				goto sigill;
1045			}
1046			set_fs(seg);
1047		}
1048#endif
1049		break;
1050	case lh_op:
1051		if (!access_ok(VERIFY_READ, addr, 2))
1052			goto sigbus;
1053
1054		if (IS_ENABLED(CONFIG_EVA)) {
1055			if (uaccess_kernel())
1056				LoadHW(addr, value, res);
1057			else
1058				LoadHWE(addr, value, res);
1059		} else {
1060			LoadHW(addr, value, res);
1061		}
1062
1063		if (res)
1064			goto fault;
1065		compute_return_epc(regs);
1066		regs->regs[insn.i_format.rt] = value;
1067		break;
1068
1069	case lw_op:
1070		if (!access_ok(VERIFY_READ, addr, 4))
1071			goto sigbus;
1072
1073		if (IS_ENABLED(CONFIG_EVA)) {
1074			if (uaccess_kernel())
1075				LoadW(addr, value, res);
1076			else
1077				LoadWE(addr, value, res);
1078		} else {
1079			LoadW(addr, value, res);
1080		}
1081
1082		if (res)
1083			goto fault;
1084		compute_return_epc(regs);
1085		regs->regs[insn.i_format.rt] = value;
1086		break;
1087
1088	case lhu_op:
1089		if (!access_ok(VERIFY_READ, addr, 2))
1090			goto sigbus;
1091
1092		if (IS_ENABLED(CONFIG_EVA)) {
1093			if (uaccess_kernel())
1094				LoadHWU(addr, value, res);
1095			else
1096				LoadHWUE(addr, value, res);
1097		} else {
1098			LoadHWU(addr, value, res);
1099		}
1100
1101		if (res)
1102			goto fault;
1103		compute_return_epc(regs);
1104		regs->regs[insn.i_format.rt] = value;
1105		break;
1106
1107	case lwu_op:
1108#ifdef CONFIG_64BIT
1109		/*
1110		 * A 32-bit kernel might be running on a 64-bit processor.  But
1111		 * if we're on a 32-bit processor and an i-cache incoherency
1112		 * or race makes us see a 64-bit instruction here the sdl/sdr
1113		 * would blow up, so for now we don't handle unaligned 64-bit
1114		 * instructions on 32-bit kernels.
1115		 */
1116		if (!access_ok(VERIFY_READ, addr, 4))
1117			goto sigbus;
1118
1119		LoadWU(addr, value, res);
1120		if (res)
1121			goto fault;
1122		compute_return_epc(regs);
1123		regs->regs[insn.i_format.rt] = value;
1124		break;
1125#endif /* CONFIG_64BIT */
1126
1127		/* Cannot handle 64-bit instructions in 32-bit kernel */
1128		goto sigill;
1129
1130	case ld_op:
1131#ifdef CONFIG_64BIT
1132		/*
1133		 * A 32-bit kernel might be running on a 64-bit processor.  But
1134		 * if we're on a 32-bit processor and an i-cache incoherency
1135		 * or race makes us see a 64-bit instruction here the sdl/sdr
1136		 * would blow up, so for now we don't handle unaligned 64-bit
1137		 * instructions on 32-bit kernels.
1138		 */
1139		if (!access_ok(VERIFY_READ, addr, 8))
1140			goto sigbus;
1141
1142		LoadDW(addr, value, res);
1143		if (res)
1144			goto fault;
1145		compute_return_epc(regs);
1146		regs->regs[insn.i_format.rt] = value;
1147		break;
1148#endif /* CONFIG_64BIT */
1149
1150		/* Cannot handle 64-bit instructions in 32-bit kernel */
1151		goto sigill;
1152
1153	case sh_op:
1154		if (!access_ok(VERIFY_WRITE, addr, 2))
1155			goto sigbus;
1156
1157		compute_return_epc(regs);
1158		value = regs->regs[insn.i_format.rt];
1159
1160		if (IS_ENABLED(CONFIG_EVA)) {
1161			if (uaccess_kernel())
1162				StoreHW(addr, value, res);
1163			else
1164				StoreHWE(addr, value, res);
1165		} else {
1166			StoreHW(addr, value, res);
1167		}
1168
1169		if (res)
1170			goto fault;
1171		break;
1172
1173	case sw_op:
1174		if (!access_ok(VERIFY_WRITE, addr, 4))
1175			goto sigbus;
1176
1177		compute_return_epc(regs);
1178		value = regs->regs[insn.i_format.rt];
1179
1180		if (IS_ENABLED(CONFIG_EVA)) {
1181			if (uaccess_kernel())
1182				StoreW(addr, value, res);
1183			else
1184				StoreWE(addr, value, res);
1185		} else {
1186			StoreW(addr, value, res);
1187		}
1188
1189		if (res)
1190			goto fault;
1191		break;
1192
1193	case sd_op:
1194#ifdef CONFIG_64BIT
1195		/*
1196		 * A 32-bit kernel might be running on a 64-bit processor.  But
1197		 * if we're on a 32-bit processor and an i-cache incoherency
1198		 * or race makes us see a 64-bit instruction here the sdl/sdr
1199		 * would blow up, so for now we don't handle unaligned 64-bit
1200		 * instructions on 32-bit kernels.
1201		 */
1202		if (!access_ok(VERIFY_WRITE, addr, 8))
1203			goto sigbus;
1204
1205		compute_return_epc(regs);
1206		value = regs->regs[insn.i_format.rt];
1207		StoreDW(addr, value, res);
1208		if (res)
1209			goto fault;
1210		break;
1211#endif /* CONFIG_64BIT */
1212
1213		/* Cannot handle 64-bit instructions in 32-bit kernel */
1214		goto sigill;
1215
 
 
1216	case lwc1_op:
1217	case ldc1_op:
1218	case swc1_op:
1219	case sdc1_op:
1220	case cop1x_op:
 
 
1221		die_if_kernel("Unaligned FP access in kernel code", regs);
1222		BUG_ON(!used_math());
1223
1224		lose_fpu(1);	/* Save FPU state for the emulator. */
1225		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1226					       &fault_addr);
1227		own_fpu(1);	/* Restore FPU state. */
1228
1229		/* Signal if something went wrong. */
1230		process_fpemu_return(res, fault_addr, 0);
1231
1232		if (res == 0)
1233			break;
1234		return;
 
 
 
 
 
 
 
 
 
1235
1236	case msa_op:
1237		if (!cpu_has_msa)
1238			goto sigill;
1239
1240		/*
1241		 * If we've reached this point then userland should have taken
1242		 * the MSA disabled exception & initialised vector context at
1243		 * some point in the past.
1244		 */
1245		BUG_ON(!thread_msa_context_live());
1246
1247		df = insn.msa_mi10_format.df;
1248		wd = insn.msa_mi10_format.wd;
1249		fpr = &current->thread.fpu.fpr[wd];
1250
1251		switch (insn.msa_mi10_format.func) {
1252		case msa_ld_op:
1253			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
1254				goto sigbus;
1255
1256			do {
1257				/*
1258				 * If we have live MSA context keep track of
1259				 * whether we get preempted in order to avoid
1260				 * the register context we load being clobbered
1261				 * by the live context as it's saved during
1262				 * preemption. If we don't have live context
1263				 * then it can't be saved to clobber the value
1264				 * we load.
1265				 */
1266				preempted = test_thread_flag(TIF_USEDMSA);
1267
1268				res = __copy_from_user_inatomic(fpr, addr,
1269								sizeof(*fpr));
1270				if (res)
1271					goto fault;
1272
1273				/*
1274				 * Update the hardware register if it is in use
1275				 * by the task in this quantum, in order to
1276				 * avoid having to save & restore the whole
1277				 * vector context.
1278				 */
1279				preempt_disable();
1280				if (test_thread_flag(TIF_USEDMSA)) {
1281					write_msa_wr(wd, fpr, df);
1282					preempted = 0;
1283				}
1284				preempt_enable();
1285			} while (preempted);
1286			break;
1287
1288		case msa_st_op:
1289			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
1290				goto sigbus;
1291
1292			/*
1293			 * Update from the hardware register if it is in use by
1294			 * the task in this quantum, in order to avoid having to
1295			 * save & restore the whole vector context.
1296			 */
1297			preempt_disable();
1298			if (test_thread_flag(TIF_USEDMSA))
1299				read_msa_wr(wd, fpr, df);
1300			preempt_enable();
1301
1302			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
1303			if (res)
1304				goto fault;
1305			break;
1306
1307		default:
1308			goto sigbus;
1309		}
1310
1311		compute_return_epc(regs);
1312		break;
 
 
1313
1314#ifndef CONFIG_CPU_MIPSR6
1315	/*
1316	 * COP2 is available to implementor for application specific use.
1317	 * It's up to applications to register a notifier chain and do
1318	 * whatever they have to do, including possible sending of signals.
1319	 *
1320	 * This instruction has been reallocated in Release 6
1321	 */
1322	case lwc2_op:
1323		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
1324		break;
1325
1326	case ldc2_op:
1327		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
1328		break;
1329
1330	case swc2_op:
1331		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
1332		break;
1333
1334	case sdc2_op:
1335		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
1336		break;
1337#endif
1338	default:
1339		/*
1340		 * Pheeee...  We encountered an yet unknown instruction or
1341		 * cache coherence problem.  Die sucker, die ...
1342		 */
1343		goto sigill;
1344	}
1345
1346#ifdef CONFIG_DEBUG_FS
1347	unaligned_instructions++;
1348#endif
1349
1350	return;
1351
1352fault:
1353	/* roll back jump/branch */
1354	regs->cp0_epc = origpc;
1355	regs->regs[31] = orig31;
1356	/* Did we have an exception handler installed? */
1357	if (fixup_exception(regs))
1358		return;
1359
1360	die_if_kernel("Unhandled kernel unaligned access", regs);
1361	force_sig(SIGSEGV, current);
1362
1363	return;
1364
1365sigbus:
1366	die_if_kernel("Unhandled kernel unaligned access", regs);
1367	force_sig(SIGBUS, current);
1368
1369	return;
1370
1371sigill:
1372	die_if_kernel
1373	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1374	force_sig(SIGILL, current);
1375}
1376
1377/* Recode table from 16-bit register notation to 32-bit GPR. */
1378const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1379
1380/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1381static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1382
1383static void emulate_load_store_microMIPS(struct pt_regs *regs,
1384					 void __user *addr)
1385{
1386	unsigned long value;
1387	unsigned int res;
1388	int i;
1389	unsigned int reg = 0, rvar;
1390	unsigned long orig31;
1391	u16 __user *pc16;
1392	u16 halfword;
1393	unsigned int word;
1394	unsigned long origpc, contpc;
1395	union mips_instruction insn;
1396	struct mm_decoded_insn mminsn;
1397	void __user *fault_addr = NULL;
1398
1399	origpc = regs->cp0_epc;
1400	orig31 = regs->regs[31];
1401
1402	mminsn.micro_mips_mode = 1;
1403
1404	/*
1405	 * This load never faults.
1406	 */
1407	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
1408	__get_user(halfword, pc16);
1409	pc16++;
1410	contpc = regs->cp0_epc + 2;
1411	word = ((unsigned int)halfword << 16);
1412	mminsn.pc_inc = 2;
1413
1414	if (!mm_insn_16bit(halfword)) {
1415		__get_user(halfword, pc16);
1416		pc16++;
1417		contpc = regs->cp0_epc + 4;
1418		mminsn.pc_inc = 4;
1419		word |= halfword;
1420	}
1421	mminsn.insn = word;
1422
1423	if (get_user(halfword, pc16))
1424		goto fault;
1425	mminsn.next_pc_inc = 2;
1426	word = ((unsigned int)halfword << 16);
1427
1428	if (!mm_insn_16bit(halfword)) {
1429		pc16++;
1430		if (get_user(halfword, pc16))
1431			goto fault;
1432		mminsn.next_pc_inc = 4;
1433		word |= halfword;
1434	}
1435	mminsn.next_insn = word;
1436
1437	insn = (union mips_instruction)(mminsn.insn);
1438	if (mm_isBranchInstr(regs, mminsn, &contpc))
1439		insn = (union mips_instruction)(mminsn.next_insn);
1440
1441	/*  Parse instruction to find what to do */
1442
1443	switch (insn.mm_i_format.opcode) {
1444
1445	case mm_pool32a_op:
1446		switch (insn.mm_x_format.func) {
1447		case mm_lwxs_op:
1448			reg = insn.mm_x_format.rd;
1449			goto loadW;
1450		}
1451
1452		goto sigbus;
1453
1454	case mm_pool32b_op:
1455		switch (insn.mm_m_format.func) {
1456		case mm_lwp_func:
1457			reg = insn.mm_m_format.rd;
1458			if (reg == 31)
1459				goto sigbus;
1460
1461			if (!access_ok(VERIFY_READ, addr, 8))
1462				goto sigbus;
1463
1464			LoadW(addr, value, res);
1465			if (res)
1466				goto fault;
1467			regs->regs[reg] = value;
1468			addr += 4;
1469			LoadW(addr, value, res);
1470			if (res)
1471				goto fault;
1472			regs->regs[reg + 1] = value;
1473			goto success;
1474
1475		case mm_swp_func:
1476			reg = insn.mm_m_format.rd;
1477			if (reg == 31)
1478				goto sigbus;
1479
1480			if (!access_ok(VERIFY_WRITE, addr, 8))
1481				goto sigbus;
1482
1483			value = regs->regs[reg];
1484			StoreW(addr, value, res);
1485			if (res)
1486				goto fault;
1487			addr += 4;
1488			value = regs->regs[reg + 1];
1489			StoreW(addr, value, res);
1490			if (res)
1491				goto fault;
1492			goto success;
1493
1494		case mm_ldp_func:
1495#ifdef CONFIG_64BIT
1496			reg = insn.mm_m_format.rd;
1497			if (reg == 31)
1498				goto sigbus;
1499
1500			if (!access_ok(VERIFY_READ, addr, 16))
1501				goto sigbus;
1502
1503			LoadDW(addr, value, res);
1504			if (res)
1505				goto fault;
1506			regs->regs[reg] = value;
1507			addr += 8;
1508			LoadDW(addr, value, res);
1509			if (res)
1510				goto fault;
1511			regs->regs[reg + 1] = value;
1512			goto success;
1513#endif /* CONFIG_64BIT */
1514
1515			goto sigill;
1516
1517		case mm_sdp_func:
1518#ifdef CONFIG_64BIT
1519			reg = insn.mm_m_format.rd;
1520			if (reg == 31)
1521				goto sigbus;
1522
1523			if (!access_ok(VERIFY_WRITE, addr, 16))
1524				goto sigbus;
1525
1526			value = regs->regs[reg];
1527			StoreDW(addr, value, res);
1528			if (res)
1529				goto fault;
1530			addr += 8;
1531			value = regs->regs[reg + 1];
1532			StoreDW(addr, value, res);
1533			if (res)
1534				goto fault;
1535			goto success;
1536#endif /* CONFIG_64BIT */
1537
1538			goto sigill;
1539
1540		case mm_lwm32_func:
1541			reg = insn.mm_m_format.rd;
1542			rvar = reg & 0xf;
1543			if ((rvar > 9) || !reg)
1544				goto sigill;
1545			if (reg & 0x10) {
1546				if (!access_ok
1547				    (VERIFY_READ, addr, 4 * (rvar + 1)))
1548					goto sigbus;
1549			} else {
1550				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1551					goto sigbus;
1552			}
1553			if (rvar == 9)
1554				rvar = 8;
1555			for (i = 16; rvar; rvar--, i++) {
1556				LoadW(addr, value, res);
1557				if (res)
1558					goto fault;
1559				addr += 4;
1560				regs->regs[i] = value;
1561			}
1562			if ((reg & 0xf) == 9) {
1563				LoadW(addr, value, res);
1564				if (res)
1565					goto fault;
1566				addr += 4;
1567				regs->regs[30] = value;
1568			}
1569			if (reg & 0x10) {
1570				LoadW(addr, value, res);
1571				if (res)
1572					goto fault;
1573				regs->regs[31] = value;
1574			}
1575			goto success;
1576
1577		case mm_swm32_func:
1578			reg = insn.mm_m_format.rd;
1579			rvar = reg & 0xf;
1580			if ((rvar > 9) || !reg)
1581				goto sigill;
1582			if (reg & 0x10) {
1583				if (!access_ok
1584				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
1585					goto sigbus;
1586			} else {
1587				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1588					goto sigbus;
1589			}
1590			if (rvar == 9)
1591				rvar = 8;
1592			for (i = 16; rvar; rvar--, i++) {
1593				value = regs->regs[i];
1594				StoreW(addr, value, res);
1595				if (res)
1596					goto fault;
1597				addr += 4;
1598			}
1599			if ((reg & 0xf) == 9) {
1600				value = regs->regs[30];
1601				StoreW(addr, value, res);
1602				if (res)
1603					goto fault;
1604				addr += 4;
1605			}
1606			if (reg & 0x10) {
1607				value = regs->regs[31];
1608				StoreW(addr, value, res);
1609				if (res)
1610					goto fault;
1611			}
1612			goto success;
1613
1614		case mm_ldm_func:
1615#ifdef CONFIG_64BIT
1616			reg = insn.mm_m_format.rd;
1617			rvar = reg & 0xf;
1618			if ((rvar > 9) || !reg)
1619				goto sigill;
1620			if (reg & 0x10) {
1621				if (!access_ok
1622				    (VERIFY_READ, addr, 8 * (rvar + 1)))
1623					goto sigbus;
1624			} else {
1625				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1626					goto sigbus;
1627			}
1628			if (rvar == 9)
1629				rvar = 8;
1630
1631			for (i = 16; rvar; rvar--, i++) {
1632				LoadDW(addr, value, res);
1633				if (res)
1634					goto fault;
1635				addr += 4;
1636				regs->regs[i] = value;
1637			}
1638			if ((reg & 0xf) == 9) {
1639				LoadDW(addr, value, res);
1640				if (res)
1641					goto fault;
1642				addr += 8;
1643				regs->regs[30] = value;
1644			}
1645			if (reg & 0x10) {
1646				LoadDW(addr, value, res);
1647				if (res)
1648					goto fault;
1649				regs->regs[31] = value;
1650			}
1651			goto success;
1652#endif /* CONFIG_64BIT */
1653
1654			goto sigill;
1655
1656		case mm_sdm_func:
1657#ifdef CONFIG_64BIT
1658			reg = insn.mm_m_format.rd;
1659			rvar = reg & 0xf;
1660			if ((rvar > 9) || !reg)
1661				goto sigill;
1662			if (reg & 0x10) {
1663				if (!access_ok
1664				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1665					goto sigbus;
1666			} else {
1667				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1668					goto sigbus;
1669			}
1670			if (rvar == 9)
1671				rvar = 8;
1672
1673			for (i = 16; rvar; rvar--, i++) {
1674				value = regs->regs[i];
1675				StoreDW(addr, value, res);
1676				if (res)
1677					goto fault;
1678				addr += 8;
1679			}
1680			if ((reg & 0xf) == 9) {
1681				value = regs->regs[30];
1682				StoreDW(addr, value, res);
1683				if (res)
1684					goto fault;
1685				addr += 8;
1686			}
1687			if (reg & 0x10) {
1688				value = regs->regs[31];
1689				StoreDW(addr, value, res);
1690				if (res)
1691					goto fault;
1692			}
1693			goto success;
1694#endif /* CONFIG_64BIT */
1695
1696			goto sigill;
1697
1698			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1699		}
1700
1701		goto sigbus;
1702
1703	case mm_pool32c_op:
1704		switch (insn.mm_m_format.func) {
1705		case mm_lwu_func:
1706			reg = insn.mm_m_format.rd;
1707			goto loadWU;
1708		}
1709
1710		/*  LL,SC,LLD,SCD are not serviced */
1711		goto sigbus;
1712
 
1713	case mm_pool32f_op:
1714		switch (insn.mm_x_format.func) {
1715		case mm_lwxc1_func:
1716		case mm_swxc1_func:
1717		case mm_ldxc1_func:
1718		case mm_sdxc1_func:
1719			goto fpu_emul;
1720		}
1721
1722		goto sigbus;
1723
1724	case mm_ldc132_op:
1725	case mm_sdc132_op:
1726	case mm_lwc132_op:
1727	case mm_swc132_op:
 
 
1728fpu_emul:
1729		/* roll back jump/branch */
1730		regs->cp0_epc = origpc;
1731		regs->regs[31] = orig31;
1732
1733		die_if_kernel("Unaligned FP access in kernel code", regs);
1734		BUG_ON(!used_math());
1735		BUG_ON(!is_fpu_owner());
1736
1737		lose_fpu(1);	/* save the FPU state for the emulator */
1738		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1739					       &fault_addr);
1740		own_fpu(1);	/* restore FPU state */
1741
1742		/* If something went wrong, signal */
1743		process_fpemu_return(res, fault_addr, 0);
1744
1745		if (res == 0)
1746			goto success;
1747		return;
 
 
1748
1749	case mm_lh32_op:
1750		reg = insn.mm_i_format.rt;
1751		goto loadHW;
1752
1753	case mm_lhu32_op:
1754		reg = insn.mm_i_format.rt;
1755		goto loadHWU;
1756
1757	case mm_lw32_op:
1758		reg = insn.mm_i_format.rt;
1759		goto loadW;
1760
1761	case mm_sh32_op:
1762		reg = insn.mm_i_format.rt;
1763		goto storeHW;
1764
1765	case mm_sw32_op:
1766		reg = insn.mm_i_format.rt;
1767		goto storeW;
1768
1769	case mm_ld32_op:
1770		reg = insn.mm_i_format.rt;
1771		goto loadDW;
1772
1773	case mm_sd32_op:
1774		reg = insn.mm_i_format.rt;
1775		goto storeDW;
1776
1777	case mm_pool16c_op:
1778		switch (insn.mm16_m_format.func) {
1779		case mm_lwm16_op:
1780			reg = insn.mm16_m_format.rlist;
1781			rvar = reg + 1;
1782			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1783				goto sigbus;
1784
1785			for (i = 16; rvar; rvar--, i++) {
1786				LoadW(addr, value, res);
1787				if (res)
1788					goto fault;
1789				addr += 4;
1790				regs->regs[i] = value;
1791			}
1792			LoadW(addr, value, res);
1793			if (res)
1794				goto fault;
1795			regs->regs[31] = value;
1796
1797			goto success;
1798
1799		case mm_swm16_op:
1800			reg = insn.mm16_m_format.rlist;
1801			rvar = reg + 1;
1802			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1803				goto sigbus;
1804
1805			for (i = 16; rvar; rvar--, i++) {
1806				value = regs->regs[i];
1807				StoreW(addr, value, res);
1808				if (res)
1809					goto fault;
1810				addr += 4;
1811			}
1812			value = regs->regs[31];
1813			StoreW(addr, value, res);
1814			if (res)
1815				goto fault;
1816
1817			goto success;
1818
1819		}
1820
1821		goto sigbus;
1822
1823	case mm_lhu16_op:
1824		reg = reg16to32[insn.mm16_rb_format.rt];
1825		goto loadHWU;
1826
1827	case mm_lw16_op:
1828		reg = reg16to32[insn.mm16_rb_format.rt];
1829		goto loadW;
1830
1831	case mm_sh16_op:
1832		reg = reg16to32st[insn.mm16_rb_format.rt];
1833		goto storeHW;
1834
1835	case mm_sw16_op:
1836		reg = reg16to32st[insn.mm16_rb_format.rt];
1837		goto storeW;
1838
1839	case mm_lwsp16_op:
1840		reg = insn.mm16_r5_format.rt;
1841		goto loadW;
1842
1843	case mm_swsp16_op:
1844		reg = insn.mm16_r5_format.rt;
1845		goto storeW;
1846
1847	case mm_lwgp16_op:
1848		reg = reg16to32[insn.mm16_r3_format.rt];
1849		goto loadW;
1850
1851	default:
1852		goto sigill;
1853	}
1854
1855loadHW:
1856	if (!access_ok(VERIFY_READ, addr, 2))
1857		goto sigbus;
1858
1859	LoadHW(addr, value, res);
1860	if (res)
1861		goto fault;
1862	regs->regs[reg] = value;
1863	goto success;
1864
1865loadHWU:
1866	if (!access_ok(VERIFY_READ, addr, 2))
1867		goto sigbus;
1868
1869	LoadHWU(addr, value, res);
1870	if (res)
1871		goto fault;
1872	regs->regs[reg] = value;
1873	goto success;
1874
1875loadW:
1876	if (!access_ok(VERIFY_READ, addr, 4))
1877		goto sigbus;
1878
1879	LoadW(addr, value, res);
1880	if (res)
1881		goto fault;
1882	regs->regs[reg] = value;
1883	goto success;
1884
1885loadWU:
1886#ifdef CONFIG_64BIT
1887	/*
1888	 * A 32-bit kernel might be running on a 64-bit processor.  But
1889	 * if we're on a 32-bit processor and an i-cache incoherency
1890	 * or race makes us see a 64-bit instruction here the sdl/sdr
1891	 * would blow up, so for now we don't handle unaligned 64-bit
1892	 * instructions on 32-bit kernels.
1893	 */
1894	if (!access_ok(VERIFY_READ, addr, 4))
1895		goto sigbus;
1896
1897	LoadWU(addr, value, res);
1898	if (res)
1899		goto fault;
1900	regs->regs[reg] = value;
1901	goto success;
1902#endif /* CONFIG_64BIT */
1903
1904	/* Cannot handle 64-bit instructions in 32-bit kernel */
1905	goto sigill;
1906
1907loadDW:
1908#ifdef CONFIG_64BIT
1909	/*
1910	 * A 32-bit kernel might be running on a 64-bit processor.  But
1911	 * if we're on a 32-bit processor and an i-cache incoherency
1912	 * or race makes us see a 64-bit instruction here the sdl/sdr
1913	 * would blow up, so for now we don't handle unaligned 64-bit
1914	 * instructions on 32-bit kernels.
1915	 */
1916	if (!access_ok(VERIFY_READ, addr, 8))
1917		goto sigbus;
1918
1919	LoadDW(addr, value, res);
1920	if (res)
1921		goto fault;
1922	regs->regs[reg] = value;
1923	goto success;
1924#endif /* CONFIG_64BIT */
1925
1926	/* Cannot handle 64-bit instructions in 32-bit kernel */
1927	goto sigill;
1928
1929storeHW:
1930	if (!access_ok(VERIFY_WRITE, addr, 2))
1931		goto sigbus;
1932
1933	value = regs->regs[reg];
1934	StoreHW(addr, value, res);
1935	if (res)
1936		goto fault;
1937	goto success;
1938
1939storeW:
1940	if (!access_ok(VERIFY_WRITE, addr, 4))
1941		goto sigbus;
1942
1943	value = regs->regs[reg];
1944	StoreW(addr, value, res);
1945	if (res)
1946		goto fault;
1947	goto success;
1948
1949storeDW:
1950#ifdef CONFIG_64BIT
1951	/*
1952	 * A 32-bit kernel might be running on a 64-bit processor.  But
1953	 * if we're on a 32-bit processor and an i-cache incoherency
1954	 * or race makes us see a 64-bit instruction here the sdl/sdr
1955	 * would blow up, so for now we don't handle unaligned 64-bit
1956	 * instructions on 32-bit kernels.
1957	 */
1958	if (!access_ok(VERIFY_WRITE, addr, 8))
1959		goto sigbus;
1960
1961	value = regs->regs[reg];
1962	StoreDW(addr, value, res);
1963	if (res)
1964		goto fault;
1965	goto success;
1966#endif /* CONFIG_64BIT */
1967
1968	/* Cannot handle 64-bit instructions in 32-bit kernel */
1969	goto sigill;
1970
1971success:
1972	regs->cp0_epc = contpc;	/* advance or branch */
1973
1974#ifdef CONFIG_DEBUG_FS
1975	unaligned_instructions++;
1976#endif
1977	return;
1978
1979fault:
1980	/* roll back jump/branch */
1981	regs->cp0_epc = origpc;
1982	regs->regs[31] = orig31;
1983	/* Did we have an exception handler installed? */
1984	if (fixup_exception(regs))
1985		return;
1986
1987	die_if_kernel("Unhandled kernel unaligned access", regs);
1988	force_sig(SIGSEGV, current);
1989
1990	return;
1991
1992sigbus:
1993	die_if_kernel("Unhandled kernel unaligned access", regs);
1994	force_sig(SIGBUS, current);
1995
1996	return;
1997
1998sigill:
1999	die_if_kernel
2000	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2001	force_sig(SIGILL, current);
2002}
2003
2004static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
2005{
2006	unsigned long value;
2007	unsigned int res;
2008	int reg;
2009	unsigned long orig31;
2010	u16 __user *pc16;
2011	unsigned long origpc;
2012	union mips16e_instruction mips16inst, oldinst;
2013	unsigned int opcode;
2014	int extended = 0;
 
2015
2016	origpc = regs->cp0_epc;
2017	orig31 = regs->regs[31];
2018	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
2019	/*
2020	 * This load never faults.
2021	 */
2022	__get_user(mips16inst.full, pc16);
2023	oldinst = mips16inst;
2024
2025	/* skip EXTEND instruction */
2026	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
2027		extended = 1;
2028		pc16++;
2029		__get_user(mips16inst.full, pc16);
2030	} else if (delay_slot(regs)) {
2031		/*  skip jump instructions */
2032		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
2033		if (mips16inst.ri.opcode == MIPS16e_jal_op)
2034			pc16++;
2035		pc16++;
2036		if (get_user(mips16inst.full, pc16))
2037			goto sigbus;
2038	}
2039
2040	opcode = mips16inst.ri.opcode;
2041	switch (opcode) {
2042	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
2043		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
2044		case MIPS16e_ldpc_func:
2045		case MIPS16e_ldsp_func:
2046			reg = reg16to32[mips16inst.ri64.ry];
2047			goto loadDW;
2048
2049		case MIPS16e_sdsp_func:
2050			reg = reg16to32[mips16inst.ri64.ry];
2051			goto writeDW;
2052
2053		case MIPS16e_sdrasp_func:
2054			reg = 29;	/* GPRSP */
2055			goto writeDW;
2056		}
2057
2058		goto sigbus;
2059
2060	case MIPS16e_swsp_op:
2061		reg = reg16to32[mips16inst.ri.rx];
2062		if (extended && cpu_has_mips16e2)
2063			switch (mips16inst.ri.imm >> 5) {
2064			case 0:		/* SWSP */
2065			case 1:		/* SWGP */
2066				break;
2067			case 2:		/* SHGP */
2068				opcode = MIPS16e_sh_op;
2069				break;
2070			default:
2071				goto sigbus;
2072			}
2073		break;
2074
2075	case MIPS16e_lwpc_op:
2076		reg = reg16to32[mips16inst.ri.rx];
2077		break;
2078
2079	case MIPS16e_lwsp_op:
2080		reg = reg16to32[mips16inst.ri.rx];
2081		if (extended && cpu_has_mips16e2)
2082			switch (mips16inst.ri.imm >> 5) {
2083			case 0:		/* LWSP */
2084			case 1:		/* LWGP */
2085				break;
2086			case 2:		/* LHGP */
2087				opcode = MIPS16e_lh_op;
2088				break;
2089			case 4:		/* LHUGP */
2090				opcode = MIPS16e_lhu_op;
2091				break;
2092			default:
2093				goto sigbus;
2094			}
2095		break;
2096
2097	case MIPS16e_i8_op:
2098		if (mips16inst.i8.func != MIPS16e_swrasp_func)
2099			goto sigbus;
2100		reg = 29;	/* GPRSP */
2101		break;
2102
2103	default:
2104		reg = reg16to32[mips16inst.rri.ry];
2105		break;
2106	}
2107
2108	switch (opcode) {
2109
2110	case MIPS16e_lb_op:
2111	case MIPS16e_lbu_op:
2112	case MIPS16e_sb_op:
2113		goto sigbus;
2114
2115	case MIPS16e_lh_op:
2116		if (!access_ok(VERIFY_READ, addr, 2))
2117			goto sigbus;
2118
2119		LoadHW(addr, value, res);
2120		if (res)
2121			goto fault;
2122		MIPS16e_compute_return_epc(regs, &oldinst);
2123		regs->regs[reg] = value;
2124		break;
2125
2126	case MIPS16e_lhu_op:
2127		if (!access_ok(VERIFY_READ, addr, 2))
2128			goto sigbus;
2129
2130		LoadHWU(addr, value, res);
2131		if (res)
2132			goto fault;
2133		MIPS16e_compute_return_epc(regs, &oldinst);
2134		regs->regs[reg] = value;
2135		break;
2136
2137	case MIPS16e_lw_op:
2138	case MIPS16e_lwpc_op:
2139	case MIPS16e_lwsp_op:
2140		if (!access_ok(VERIFY_READ, addr, 4))
2141			goto sigbus;
2142
2143		LoadW(addr, value, res);
2144		if (res)
2145			goto fault;
2146		MIPS16e_compute_return_epc(regs, &oldinst);
2147		regs->regs[reg] = value;
2148		break;
2149
2150	case MIPS16e_lwu_op:
2151#ifdef CONFIG_64BIT
2152		/*
2153		 * A 32-bit kernel might be running on a 64-bit processor.  But
2154		 * if we're on a 32-bit processor and an i-cache incoherency
2155		 * or race makes us see a 64-bit instruction here the sdl/sdr
2156		 * would blow up, so for now we don't handle unaligned 64-bit
2157		 * instructions on 32-bit kernels.
2158		 */
2159		if (!access_ok(VERIFY_READ, addr, 4))
2160			goto sigbus;
2161
2162		LoadWU(addr, value, res);
2163		if (res)
2164			goto fault;
2165		MIPS16e_compute_return_epc(regs, &oldinst);
2166		regs->regs[reg] = value;
2167		break;
2168#endif /* CONFIG_64BIT */
2169
2170		/* Cannot handle 64-bit instructions in 32-bit kernel */
2171		goto sigill;
2172
2173	case MIPS16e_ld_op:
2174loadDW:
2175#ifdef CONFIG_64BIT
2176		/*
2177		 * A 32-bit kernel might be running on a 64-bit processor.  But
2178		 * if we're on a 32-bit processor and an i-cache incoherency
2179		 * or race makes us see a 64-bit instruction here the sdl/sdr
2180		 * would blow up, so for now we don't handle unaligned 64-bit
2181		 * instructions on 32-bit kernels.
2182		 */
2183		if (!access_ok(VERIFY_READ, addr, 8))
2184			goto sigbus;
2185
2186		LoadDW(addr, value, res);
2187		if (res)
2188			goto fault;
2189		MIPS16e_compute_return_epc(regs, &oldinst);
2190		regs->regs[reg] = value;
2191		break;
2192#endif /* CONFIG_64BIT */
2193
2194		/* Cannot handle 64-bit instructions in 32-bit kernel */
2195		goto sigill;
2196
2197	case MIPS16e_sh_op:
2198		if (!access_ok(VERIFY_WRITE, addr, 2))
2199			goto sigbus;
2200
2201		MIPS16e_compute_return_epc(regs, &oldinst);
2202		value = regs->regs[reg];
2203		StoreHW(addr, value, res);
2204		if (res)
2205			goto fault;
2206		break;
2207
2208	case MIPS16e_sw_op:
2209	case MIPS16e_swsp_op:
2210	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
2211		if (!access_ok(VERIFY_WRITE, addr, 4))
2212			goto sigbus;
2213
2214		MIPS16e_compute_return_epc(regs, &oldinst);
2215		value = regs->regs[reg];
2216		StoreW(addr, value, res);
2217		if (res)
2218			goto fault;
2219		break;
2220
2221	case MIPS16e_sd_op:
2222writeDW:
2223#ifdef CONFIG_64BIT
2224		/*
2225		 * A 32-bit kernel might be running on a 64-bit processor.  But
2226		 * if we're on a 32-bit processor and an i-cache incoherency
2227		 * or race makes us see a 64-bit instruction here the sdl/sdr
2228		 * would blow up, so for now we don't handle unaligned 64-bit
2229		 * instructions on 32-bit kernels.
2230		 */
2231		if (!access_ok(VERIFY_WRITE, addr, 8))
2232			goto sigbus;
2233
2234		MIPS16e_compute_return_epc(regs, &oldinst);
2235		value = regs->regs[reg];
2236		StoreDW(addr, value, res);
2237		if (res)
2238			goto fault;
2239		break;
2240#endif /* CONFIG_64BIT */
2241
2242		/* Cannot handle 64-bit instructions in 32-bit kernel */
2243		goto sigill;
2244
2245	default:
2246		/*
2247		 * Pheeee...  We encountered an yet unknown instruction or
2248		 * cache coherence problem.  Die sucker, die ...
2249		 */
2250		goto sigill;
2251	}
2252
2253#ifdef CONFIG_DEBUG_FS
2254	unaligned_instructions++;
2255#endif
2256
2257	return;
2258
2259fault:
2260	/* roll back jump/branch */
2261	regs->cp0_epc = origpc;
2262	regs->regs[31] = orig31;
2263	/* Did we have an exception handler installed? */
2264	if (fixup_exception(regs))
2265		return;
2266
2267	die_if_kernel("Unhandled kernel unaligned access", regs);
2268	force_sig(SIGSEGV, current);
2269
2270	return;
2271
2272sigbus:
2273	die_if_kernel("Unhandled kernel unaligned access", regs);
2274	force_sig(SIGBUS, current);
2275
2276	return;
2277
2278sigill:
2279	die_if_kernel
2280	    ("Unhandled kernel unaligned access or invalid instruction", regs);
2281	force_sig(SIGILL, current);
2282}
2283
2284asmlinkage void do_ade(struct pt_regs *regs)
2285{
2286	enum ctx_state prev_state;
2287	unsigned int __user *pc;
2288	mm_segment_t seg;
2289
2290	prev_state = exception_enter();
2291	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
2292			1, regs, regs->cp0_badvaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2293	/*
2294	 * Did we catch a fault trying to load an instruction?
2295	 */
2296	if (regs->cp0_badvaddr == regs->cp0_epc)
2297		goto sigbus;
2298
2299	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
2300		goto sigbus;
2301	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
2302		goto sigbus;
2303
2304	/*
2305	 * Do branch emulation only if we didn't forward the exception.
2306	 * This is all so but ugly ...
2307	 */
2308
2309	/*
2310	 * Are we running in microMIPS mode?
2311	 */
2312	if (get_isa16_mode(regs->cp0_epc)) {
2313		/*
2314		 * Did we catch a fault trying to load an instruction in
2315		 * 16-bit mode?
2316		 */
2317		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
2318			goto sigbus;
2319		if (unaligned_action == UNALIGNED_ACTION_SHOW)
2320			show_registers(regs);
2321
2322		if (cpu_has_mmips) {
2323			seg = get_fs();
2324			if (!user_mode(regs))
2325				set_fs(KERNEL_DS);
2326			emulate_load_store_microMIPS(regs,
2327				(void __user *)regs->cp0_badvaddr);
2328			set_fs(seg);
2329
2330			return;
2331		}
2332
2333		if (cpu_has_mips16) {
2334			seg = get_fs();
2335			if (!user_mode(regs))
2336				set_fs(KERNEL_DS);
2337			emulate_load_store_MIPS16e(regs,
2338				(void __user *)regs->cp0_badvaddr);
2339			set_fs(seg);
2340
2341			return;
2342	}
2343
2344		goto sigbus;
2345	}
2346
2347	if (unaligned_action == UNALIGNED_ACTION_SHOW)
2348		show_registers(regs);
2349	pc = (unsigned int __user *)exception_epc(regs);
2350
2351	seg = get_fs();
2352	if (!user_mode(regs))
2353		set_fs(KERNEL_DS);
2354	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
2355	set_fs(seg);
2356
2357	return;
2358
2359sigbus:
2360	die_if_kernel("Kernel unaligned instruction access", regs);
2361	force_sig(SIGBUS, current);
2362
2363	/*
2364	 * XXX On return from the signal handler we should advance the epc
2365	 */
2366	exception_exit(prev_state);
2367}
2368
2369#ifdef CONFIG_DEBUG_FS
2370static int __init debugfs_unaligned(void)
2371{
2372	struct dentry *d;
2373
2374	if (!mips_debugfs_dir)
2375		return -ENODEV;
2376	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
2377			       mips_debugfs_dir, &unaligned_instructions);
2378	if (!d)
2379		return -ENOMEM;
2380	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
2381			       mips_debugfs_dir, &unaligned_action);
2382	if (!d)
2383		return -ENOMEM;
2384	return 0;
2385}
2386arch_initcall(debugfs_unaligned);
2387#endif
v6.13.7
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
  92#include <asm/unaligned-emul.h>
  93#include <asm/mmu_context.h>
  94#include <asm/traps.h>
  95#include <linux/uaccess.h>
  96
  97#include "access-helper.h"
 
  98
  99enum {
 100	UNALIGNED_ACTION_QUIET,
 101	UNALIGNED_ACTION_SIGNAL,
 102	UNALIGNED_ACTION_SHOW,
 103};
 104#ifdef CONFIG_DEBUG_FS
 105static u32 unaligned_instructions;
 106static u32 unaligned_action;
 107#else
 108#define unaligned_action UNALIGNED_ACTION_QUIET
 109#endif
 110extern void show_registers(struct pt_regs *regs);
 111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 112static void emulate_load_store_insn(struct pt_regs *regs,
 113	void __user *addr, unsigned int *pc)
 114{
 115	unsigned long origpc, orig31, value;
 116	union mips_instruction insn;
 117	unsigned int res;
 118	bool user = user_mode(regs);
 119
 
 
 
 
 
 
 
 
 120	origpc = (unsigned long)pc;
 121	orig31 = regs->regs[31];
 122
 123	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 124
 125	/*
 126	 * This load never faults.
 127	 */
 128	__get_inst32(&insn.word, pc, user);
 129
 130	switch (insn.i_format.opcode) {
 131		/*
 132		 * These are instructions that a compiler doesn't generate.  We
 133		 * can assume therefore that the code is MIPS-aware and
 134		 * really buggy.  Emulating these instructions would break the
 135		 * semantics anyway.
 136		 */
 137	case ll_op:
 138	case lld_op:
 139	case sc_op:
 140	case scd_op:
 141
 142		/*
 143		 * For these instructions the only way to create an address
 144		 * error is an attempted access to kernel/supervisor address
 145		 * space.
 146		 */
 147	case ldl_op:
 148	case ldr_op:
 149	case lwl_op:
 150	case lwr_op:
 151	case sdl_op:
 152	case sdr_op:
 153	case swl_op:
 154	case swr_op:
 155	case lb_op:
 156	case lbu_op:
 157	case sb_op:
 158		goto sigbus;
 159
 160		/*
 161		 * The remaining opcodes are the ones that are really of
 162		 * interest.
 163		 */
 164#ifdef CONFIG_MACH_INGENIC
 165	case spec2_op:
 166		if (insn.mxu_lx_format.func != mxu_lx_op)
 167			goto sigbus; /* other MXU instructions we don't care */
 168
 169		switch (insn.mxu_lx_format.op) {
 170		case mxu_lxw_op:
 171			if (user && !access_ok(addr, 4))
 172				goto sigbus;
 173			LoadW(addr, value, res);
 174			if (res)
 175				goto fault;
 176			compute_return_epc(regs);
 177			regs->regs[insn.mxu_lx_format.rd] = value;
 178			break;
 179		case mxu_lxh_op:
 180			if (user && !access_ok(addr, 2))
 181				goto sigbus;
 182			LoadHW(addr, value, res);
 183			if (res)
 184				goto fault;
 185			compute_return_epc(regs);
 186			regs->regs[insn.dsp_format.rd] = value;
 187			break;
 188		case mxu_lxhu_op:
 189			if (user && !access_ok(addr, 2))
 190				goto sigbus;
 191			LoadHWU(addr, value, res);
 192			if (res)
 193				goto fault;
 194			compute_return_epc(regs);
 195			regs->regs[insn.dsp_format.rd] = value;
 196			break;
 197		case mxu_lxb_op:
 198		case mxu_lxbu_op:
 199			goto sigbus;
 200		default:
 201			goto sigill;
 202		}
 203		break;
 204#endif
 205	case spec3_op:
 206		if (insn.dsp_format.func == lx_op) {
 207			switch (insn.dsp_format.op) {
 208			case lwx_op:
 209				if (user && !access_ok(addr, 4))
 210					goto sigbus;
 211				LoadW(addr, value, res);
 212				if (res)
 213					goto fault;
 214				compute_return_epc(regs);
 215				regs->regs[insn.dsp_format.rd] = value;
 216				break;
 217			case lhx_op:
 218				if (user && !access_ok(addr, 2))
 219					goto sigbus;
 220				LoadHW(addr, value, res);
 221				if (res)
 222					goto fault;
 223				compute_return_epc(regs);
 224				regs->regs[insn.dsp_format.rd] = value;
 225				break;
 226			default:
 227				goto sigill;
 228			}
 229		}
 230#ifdef CONFIG_EVA
 231		else {
 232			/*
 233			 * we can land here only from kernel accessing user
 234			 * memory, so we need to "switch" the address limit to
 235			 * user space, so that address check can work properly.
 236			 */
 
 
 237			switch (insn.spec3_format.func) {
 238			case lhe_op:
 239				if (!access_ok(addr, 2))
 
 240					goto sigbus;
 
 241				LoadHWE(addr, value, res);
 242				if (res)
 
 243					goto fault;
 
 244				compute_return_epc(regs);
 245				regs->regs[insn.spec3_format.rt] = value;
 246				break;
 247			case lwe_op:
 248				if (!access_ok(addr, 4))
 
 249					goto sigbus;
 
 250				LoadWE(addr, value, res);
 251				if (res)
 
 252					goto fault;
 
 253				compute_return_epc(regs);
 254				regs->regs[insn.spec3_format.rt] = value;
 255				break;
 256			case lhue_op:
 257				if (!access_ok(addr, 2))
 
 258					goto sigbus;
 
 259				LoadHWUE(addr, value, res);
 260				if (res)
 
 261					goto fault;
 
 262				compute_return_epc(regs);
 263				regs->regs[insn.spec3_format.rt] = value;
 264				break;
 265			case she_op:
 266				if (!access_ok(addr, 2))
 
 267					goto sigbus;
 
 268				compute_return_epc(regs);
 269				value = regs->regs[insn.spec3_format.rt];
 270				StoreHWE(addr, value, res);
 271				if (res)
 
 272					goto fault;
 
 273				break;
 274			case swe_op:
 275				if (!access_ok(addr, 4))
 
 276					goto sigbus;
 
 277				compute_return_epc(regs);
 278				value = regs->regs[insn.spec3_format.rt];
 279				StoreWE(addr, value, res);
 280				if (res)
 
 281					goto fault;
 
 282				break;
 283			default:
 
 284				goto sigill;
 285			}
 
 286		}
 287#endif
 288		break;
 289	case lh_op:
 290		if (user && !access_ok(addr, 2))
 291			goto sigbus;
 292
 293		if (IS_ENABLED(CONFIG_EVA) && user)
 294			LoadHWE(addr, value, res);
 295		else
 
 
 
 296			LoadHW(addr, value, res);
 
 297
 298		if (res)
 299			goto fault;
 300		compute_return_epc(regs);
 301		regs->regs[insn.i_format.rt] = value;
 302		break;
 303
 304	case lw_op:
 305		if (user && !access_ok(addr, 4))
 306			goto sigbus;
 307
 308		if (IS_ENABLED(CONFIG_EVA) && user)
 309			LoadWE(addr, value, res);
 310		else
 
 
 
 311			LoadW(addr, value, res);
 
 312
 313		if (res)
 314			goto fault;
 315		compute_return_epc(regs);
 316		regs->regs[insn.i_format.rt] = value;
 317		break;
 318
 319	case lhu_op:
 320		if (user && !access_ok(addr, 2))
 321			goto sigbus;
 322
 323		if (IS_ENABLED(CONFIG_EVA) && user)
 324			LoadHWUE(addr, value, res);
 325		else
 
 
 
 326			LoadHWU(addr, value, res);
 
 327
 328		if (res)
 329			goto fault;
 330		compute_return_epc(regs);
 331		regs->regs[insn.i_format.rt] = value;
 332		break;
 333
 334	case lwu_op:
 335#ifdef CONFIG_64BIT
 336		/*
 337		 * A 32-bit kernel might be running on a 64-bit processor.  But
 338		 * if we're on a 32-bit processor and an i-cache incoherency
 339		 * or race makes us see a 64-bit instruction here the sdl/sdr
 340		 * would blow up, so for now we don't handle unaligned 64-bit
 341		 * instructions on 32-bit kernels.
 342		 */
 343		if (user && !access_ok(addr, 4))
 344			goto sigbus;
 345
 346		LoadWU(addr, value, res);
 347		if (res)
 348			goto fault;
 349		compute_return_epc(regs);
 350		regs->regs[insn.i_format.rt] = value;
 351		break;
 352#endif /* CONFIG_64BIT */
 353
 354		/* Cannot handle 64-bit instructions in 32-bit kernel */
 355		goto sigill;
 356
 357	case ld_op:
 358#ifdef CONFIG_64BIT
 359		/*
 360		 * A 32-bit kernel might be running on a 64-bit processor.  But
 361		 * if we're on a 32-bit processor and an i-cache incoherency
 362		 * or race makes us see a 64-bit instruction here the sdl/sdr
 363		 * would blow up, so for now we don't handle unaligned 64-bit
 364		 * instructions on 32-bit kernels.
 365		 */
 366		if (user && !access_ok(addr, 8))
 367			goto sigbus;
 368
 369		LoadDW(addr, value, res);
 370		if (res)
 371			goto fault;
 372		compute_return_epc(regs);
 373		regs->regs[insn.i_format.rt] = value;
 374		break;
 375#endif /* CONFIG_64BIT */
 376
 377		/* Cannot handle 64-bit instructions in 32-bit kernel */
 378		goto sigill;
 379
 380	case sh_op:
 381		if (user && !access_ok(addr, 2))
 382			goto sigbus;
 383
 384		compute_return_epc(regs);
 385		value = regs->regs[insn.i_format.rt];
 386
 387		if (IS_ENABLED(CONFIG_EVA) && user)
 388			StoreHWE(addr, value, res);
 389		else
 
 
 
 390			StoreHW(addr, value, res);
 
 391
 392		if (res)
 393			goto fault;
 394		break;
 395
 396	case sw_op:
 397		if (user && !access_ok(addr, 4))
 398			goto sigbus;
 399
 400		compute_return_epc(regs);
 401		value = regs->regs[insn.i_format.rt];
 402
 403		if (IS_ENABLED(CONFIG_EVA) && user)
 404			StoreWE(addr, value, res);
 405		else
 
 
 
 406			StoreW(addr, value, res);
 
 407
 408		if (res)
 409			goto fault;
 410		break;
 411
 412	case sd_op:
 413#ifdef CONFIG_64BIT
 414		/*
 415		 * A 32-bit kernel might be running on a 64-bit processor.  But
 416		 * if we're on a 32-bit processor and an i-cache incoherency
 417		 * or race makes us see a 64-bit instruction here the sdl/sdr
 418		 * would blow up, so for now we don't handle unaligned 64-bit
 419		 * instructions on 32-bit kernels.
 420		 */
 421		if (user && !access_ok(addr, 8))
 422			goto sigbus;
 423
 424		compute_return_epc(regs);
 425		value = regs->regs[insn.i_format.rt];
 426		StoreDW(addr, value, res);
 427		if (res)
 428			goto fault;
 429		break;
 430#endif /* CONFIG_64BIT */
 431
 432		/* Cannot handle 64-bit instructions in 32-bit kernel */
 433		goto sigill;
 434
 435#ifdef CONFIG_MIPS_FP_SUPPORT
 436
 437	case lwc1_op:
 438	case ldc1_op:
 439	case swc1_op:
 440	case sdc1_op:
 441	case cop1x_op: {
 442		void __user *fault_addr = NULL;
 443
 444		die_if_kernel("Unaligned FP access in kernel code", regs);
 445		BUG_ON(!used_math());
 446
 
 447		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 448					       &fault_addr);
 449		own_fpu(1);	/* Restore FPU state. */
 450
 451		/* Signal if something went wrong. */
 452		process_fpemu_return(res, fault_addr, 0);
 453
 454		if (res == 0)
 455			break;
 456		return;
 457	}
 458#endif /* CONFIG_MIPS_FP_SUPPORT */
 459
 460#ifdef CONFIG_CPU_HAS_MSA
 461
 462	case msa_op: {
 463		unsigned int wd, preempted;
 464		enum msa_2b_fmt df;
 465		union fpureg *fpr;
 466
 
 467		if (!cpu_has_msa)
 468			goto sigill;
 469
 470		/*
 471		 * If we've reached this point then userland should have taken
 472		 * the MSA disabled exception & initialised vector context at
 473		 * some point in the past.
 474		 */
 475		BUG_ON(!thread_msa_context_live());
 476
 477		df = insn.msa_mi10_format.df;
 478		wd = insn.msa_mi10_format.wd;
 479		fpr = &current->thread.fpu.fpr[wd];
 480
 481		switch (insn.msa_mi10_format.func) {
 482		case msa_ld_op:
 483			if (!access_ok(addr, sizeof(*fpr)))
 484				goto sigbus;
 485
 486			do {
 487				/*
 488				 * If we have live MSA context keep track of
 489				 * whether we get preempted in order to avoid
 490				 * the register context we load being clobbered
 491				 * by the live context as it's saved during
 492				 * preemption. If we don't have live context
 493				 * then it can't be saved to clobber the value
 494				 * we load.
 495				 */
 496				preempted = test_thread_flag(TIF_USEDMSA);
 497
 498				res = __copy_from_user_inatomic(fpr, addr,
 499								sizeof(*fpr));
 500				if (res)
 501					goto fault;
 502
 503				/*
 504				 * Update the hardware register if it is in use
 505				 * by the task in this quantum, in order to
 506				 * avoid having to save & restore the whole
 507				 * vector context.
 508				 */
 509				preempt_disable();
 510				if (test_thread_flag(TIF_USEDMSA)) {
 511					write_msa_wr(wd, fpr, df);
 512					preempted = 0;
 513				}
 514				preempt_enable();
 515			} while (preempted);
 516			break;
 517
 518		case msa_st_op:
 519			if (!access_ok(addr, sizeof(*fpr)))
 520				goto sigbus;
 521
 522			/*
 523			 * Update from the hardware register if it is in use by
 524			 * the task in this quantum, in order to avoid having to
 525			 * save & restore the whole vector context.
 526			 */
 527			preempt_disable();
 528			if (test_thread_flag(TIF_USEDMSA))
 529				read_msa_wr(wd, fpr, df);
 530			preempt_enable();
 531
 532			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
 533			if (res)
 534				goto fault;
 535			break;
 536
 537		default:
 538			goto sigbus;
 539		}
 540
 541		compute_return_epc(regs);
 542		break;
 543	}
 544#endif /* CONFIG_CPU_HAS_MSA */
 545
 546#ifndef CONFIG_CPU_MIPSR6
 547	/*
 548	 * COP2 is available to implementor for application specific use.
 549	 * It's up to applications to register a notifier chain and do
 550	 * whatever they have to do, including possible sending of signals.
 551	 *
 552	 * This instruction has been reallocated in Release 6
 553	 */
 554	case lwc2_op:
 555		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 556		break;
 557
 558	case ldc2_op:
 559		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
 560		break;
 561
 562	case swc2_op:
 563		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
 564		break;
 565
 566	case sdc2_op:
 567		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 568		break;
 569#endif
 570	default:
 571		/*
 572		 * Pheeee...  We encountered an yet unknown instruction or
 573		 * cache coherence problem.  Die sucker, die ...
 574		 */
 575		goto sigill;
 576	}
 577
 578#ifdef CONFIG_DEBUG_FS
 579	unaligned_instructions++;
 580#endif
 581
 582	return;
 583
 584fault:
 585	/* roll back jump/branch */
 586	regs->cp0_epc = origpc;
 587	regs->regs[31] = orig31;
 588	/* Did we have an exception handler installed? */
 589	if (fixup_exception(regs))
 590		return;
 591
 592	die_if_kernel("Unhandled kernel unaligned access", regs);
 593	force_sig(SIGSEGV);
 594
 595	return;
 596
 597sigbus:
 598	die_if_kernel("Unhandled kernel unaligned access", regs);
 599	force_sig(SIGBUS);
 600
 601	return;
 602
 603sigill:
 604	die_if_kernel
 605	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 606	force_sig(SIGILL);
 607}
 608
 609/* Recode table from 16-bit register notation to 32-bit GPR. */
 610const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
 611
 612/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
 613static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
 614
 615static void emulate_load_store_microMIPS(struct pt_regs *regs,
 616					 void __user *addr)
 617{
 618	unsigned long value;
 619	unsigned int res;
 620	int i;
 621	unsigned int reg = 0, rvar;
 622	unsigned long orig31;
 623	u16 __user *pc16;
 624	u16 halfword;
 625	unsigned int word;
 626	unsigned long origpc, contpc;
 627	union mips_instruction insn;
 628	struct mm_decoded_insn mminsn;
 629	bool user = user_mode(regs);
 630
 631	origpc = regs->cp0_epc;
 632	orig31 = regs->regs[31];
 633
 634	mminsn.micro_mips_mode = 1;
 635
 636	/*
 637	 * This load never faults.
 638	 */
 639	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
 640	__get_user(halfword, pc16);
 641	pc16++;
 642	contpc = regs->cp0_epc + 2;
 643	word = ((unsigned int)halfword << 16);
 644	mminsn.pc_inc = 2;
 645
 646	if (!mm_insn_16bit(halfword)) {
 647		__get_user(halfword, pc16);
 648		pc16++;
 649		contpc = regs->cp0_epc + 4;
 650		mminsn.pc_inc = 4;
 651		word |= halfword;
 652	}
 653	mminsn.insn = word;
 654
 655	if (get_user(halfword, pc16))
 656		goto fault;
 657	mminsn.next_pc_inc = 2;
 658	word = ((unsigned int)halfword << 16);
 659
 660	if (!mm_insn_16bit(halfword)) {
 661		pc16++;
 662		if (get_user(halfword, pc16))
 663			goto fault;
 664		mminsn.next_pc_inc = 4;
 665		word |= halfword;
 666	}
 667	mminsn.next_insn = word;
 668
 669	insn = (union mips_instruction)(mminsn.insn);
 670	if (mm_isBranchInstr(regs, mminsn, &contpc))
 671		insn = (union mips_instruction)(mminsn.next_insn);
 672
 673	/*  Parse instruction to find what to do */
 674
 675	switch (insn.mm_i_format.opcode) {
 676
 677	case mm_pool32a_op:
 678		switch (insn.mm_x_format.func) {
 679		case mm_lwxs_op:
 680			reg = insn.mm_x_format.rd;
 681			goto loadW;
 682		}
 683
 684		goto sigbus;
 685
 686	case mm_pool32b_op:
 687		switch (insn.mm_m_format.func) {
 688		case mm_lwp_func:
 689			reg = insn.mm_m_format.rd;
 690			if (reg == 31)
 691				goto sigbus;
 692
 693			if (user && !access_ok(addr, 8))
 694				goto sigbus;
 695
 696			LoadW(addr, value, res);
 697			if (res)
 698				goto fault;
 699			regs->regs[reg] = value;
 700			addr += 4;
 701			LoadW(addr, value, res);
 702			if (res)
 703				goto fault;
 704			regs->regs[reg + 1] = value;
 705			goto success;
 706
 707		case mm_swp_func:
 708			reg = insn.mm_m_format.rd;
 709			if (reg == 31)
 710				goto sigbus;
 711
 712			if (user && !access_ok(addr, 8))
 713				goto sigbus;
 714
 715			value = regs->regs[reg];
 716			StoreW(addr, value, res);
 717			if (res)
 718				goto fault;
 719			addr += 4;
 720			value = regs->regs[reg + 1];
 721			StoreW(addr, value, res);
 722			if (res)
 723				goto fault;
 724			goto success;
 725
 726		case mm_ldp_func:
 727#ifdef CONFIG_64BIT
 728			reg = insn.mm_m_format.rd;
 729			if (reg == 31)
 730				goto sigbus;
 731
 732			if (user && !access_ok(addr, 16))
 733				goto sigbus;
 734
 735			LoadDW(addr, value, res);
 736			if (res)
 737				goto fault;
 738			regs->regs[reg] = value;
 739			addr += 8;
 740			LoadDW(addr, value, res);
 741			if (res)
 742				goto fault;
 743			regs->regs[reg + 1] = value;
 744			goto success;
 745#endif /* CONFIG_64BIT */
 746
 747			goto sigill;
 748
 749		case mm_sdp_func:
 750#ifdef CONFIG_64BIT
 751			reg = insn.mm_m_format.rd;
 752			if (reg == 31)
 753				goto sigbus;
 754
 755			if (user && !access_ok(addr, 16))
 756				goto sigbus;
 757
 758			value = regs->regs[reg];
 759			StoreDW(addr, value, res);
 760			if (res)
 761				goto fault;
 762			addr += 8;
 763			value = regs->regs[reg + 1];
 764			StoreDW(addr, value, res);
 765			if (res)
 766				goto fault;
 767			goto success;
 768#endif /* CONFIG_64BIT */
 769
 770			goto sigill;
 771
 772		case mm_lwm32_func:
 773			reg = insn.mm_m_format.rd;
 774			rvar = reg & 0xf;
 775			if ((rvar > 9) || !reg)
 776				goto sigill;
 777			if (reg & 0x10) {
 778				if (user && !access_ok(addr, 4 * (rvar + 1)))
 
 779					goto sigbus;
 780			} else {
 781				if (user && !access_ok(addr, 4 * rvar))
 782					goto sigbus;
 783			}
 784			if (rvar == 9)
 785				rvar = 8;
 786			for (i = 16; rvar; rvar--, i++) {
 787				LoadW(addr, value, res);
 788				if (res)
 789					goto fault;
 790				addr += 4;
 791				regs->regs[i] = value;
 792			}
 793			if ((reg & 0xf) == 9) {
 794				LoadW(addr, value, res);
 795				if (res)
 796					goto fault;
 797				addr += 4;
 798				regs->regs[30] = value;
 799			}
 800			if (reg & 0x10) {
 801				LoadW(addr, value, res);
 802				if (res)
 803					goto fault;
 804				regs->regs[31] = value;
 805			}
 806			goto success;
 807
 808		case mm_swm32_func:
 809			reg = insn.mm_m_format.rd;
 810			rvar = reg & 0xf;
 811			if ((rvar > 9) || !reg)
 812				goto sigill;
 813			if (reg & 0x10) {
 814				if (user && !access_ok(addr, 4 * (rvar + 1)))
 
 815					goto sigbus;
 816			} else {
 817				if (user && !access_ok(addr, 4 * rvar))
 818					goto sigbus;
 819			}
 820			if (rvar == 9)
 821				rvar = 8;
 822			for (i = 16; rvar; rvar--, i++) {
 823				value = regs->regs[i];
 824				StoreW(addr, value, res);
 825				if (res)
 826					goto fault;
 827				addr += 4;
 828			}
 829			if ((reg & 0xf) == 9) {
 830				value = regs->regs[30];
 831				StoreW(addr, value, res);
 832				if (res)
 833					goto fault;
 834				addr += 4;
 835			}
 836			if (reg & 0x10) {
 837				value = regs->regs[31];
 838				StoreW(addr, value, res);
 839				if (res)
 840					goto fault;
 841			}
 842			goto success;
 843
 844		case mm_ldm_func:
 845#ifdef CONFIG_64BIT
 846			reg = insn.mm_m_format.rd;
 847			rvar = reg & 0xf;
 848			if ((rvar > 9) || !reg)
 849				goto sigill;
 850			if (reg & 0x10) {
 851				if (user && !access_ok(addr, 8 * (rvar + 1)))
 
 852					goto sigbus;
 853			} else {
 854				if (user && !access_ok(addr, 8 * rvar))
 855					goto sigbus;
 856			}
 857			if (rvar == 9)
 858				rvar = 8;
 859
 860			for (i = 16; rvar; rvar--, i++) {
 861				LoadDW(addr, value, res);
 862				if (res)
 863					goto fault;
 864				addr += 4;
 865				regs->regs[i] = value;
 866			}
 867			if ((reg & 0xf) == 9) {
 868				LoadDW(addr, value, res);
 869				if (res)
 870					goto fault;
 871				addr += 8;
 872				regs->regs[30] = value;
 873			}
 874			if (reg & 0x10) {
 875				LoadDW(addr, value, res);
 876				if (res)
 877					goto fault;
 878				regs->regs[31] = value;
 879			}
 880			goto success;
 881#endif /* CONFIG_64BIT */
 882
 883			goto sigill;
 884
 885		case mm_sdm_func:
 886#ifdef CONFIG_64BIT
 887			reg = insn.mm_m_format.rd;
 888			rvar = reg & 0xf;
 889			if ((rvar > 9) || !reg)
 890				goto sigill;
 891			if (reg & 0x10) {
 892				if (user && !access_ok(addr, 8 * (rvar + 1)))
 
 893					goto sigbus;
 894			} else {
 895				if (user && !access_ok(addr, 8 * rvar))
 896					goto sigbus;
 897			}
 898			if (rvar == 9)
 899				rvar = 8;
 900
 901			for (i = 16; rvar; rvar--, i++) {
 902				value = regs->regs[i];
 903				StoreDW(addr, value, res);
 904				if (res)
 905					goto fault;
 906				addr += 8;
 907			}
 908			if ((reg & 0xf) == 9) {
 909				value = regs->regs[30];
 910				StoreDW(addr, value, res);
 911				if (res)
 912					goto fault;
 913				addr += 8;
 914			}
 915			if (reg & 0x10) {
 916				value = regs->regs[31];
 917				StoreDW(addr, value, res);
 918				if (res)
 919					goto fault;
 920			}
 921			goto success;
 922#endif /* CONFIG_64BIT */
 923
 924			goto sigill;
 925
 926			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
 927		}
 928
 929		goto sigbus;
 930
 931	case mm_pool32c_op:
 932		switch (insn.mm_m_format.func) {
 933		case mm_lwu_func:
 934			reg = insn.mm_m_format.rd;
 935			goto loadWU;
 936		}
 937
 938		/*  LL,SC,LLD,SCD are not serviced */
 939		goto sigbus;
 940
 941#ifdef CONFIG_MIPS_FP_SUPPORT
 942	case mm_pool32f_op:
 943		switch (insn.mm_x_format.func) {
 944		case mm_lwxc1_func:
 945		case mm_swxc1_func:
 946		case mm_ldxc1_func:
 947		case mm_sdxc1_func:
 948			goto fpu_emul;
 949		}
 950
 951		goto sigbus;
 952
 953	case mm_ldc132_op:
 954	case mm_sdc132_op:
 955	case mm_lwc132_op:
 956	case mm_swc132_op: {
 957		void __user *fault_addr = NULL;
 958
 959fpu_emul:
 960		/* roll back jump/branch */
 961		regs->cp0_epc = origpc;
 962		regs->regs[31] = orig31;
 963
 964		die_if_kernel("Unaligned FP access in kernel code", regs);
 965		BUG_ON(!used_math());
 966		BUG_ON(!is_fpu_owner());
 967
 
 968		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 969					       &fault_addr);
 970		own_fpu(1);	/* restore FPU state */
 971
 972		/* If something went wrong, signal */
 973		process_fpemu_return(res, fault_addr, 0);
 974
 975		if (res == 0)
 976			goto success;
 977		return;
 978	}
 979#endif /* CONFIG_MIPS_FP_SUPPORT */
 980
 981	case mm_lh32_op:
 982		reg = insn.mm_i_format.rt;
 983		goto loadHW;
 984
 985	case mm_lhu32_op:
 986		reg = insn.mm_i_format.rt;
 987		goto loadHWU;
 988
 989	case mm_lw32_op:
 990		reg = insn.mm_i_format.rt;
 991		goto loadW;
 992
 993	case mm_sh32_op:
 994		reg = insn.mm_i_format.rt;
 995		goto storeHW;
 996
 997	case mm_sw32_op:
 998		reg = insn.mm_i_format.rt;
 999		goto storeW;
1000
1001	case mm_ld32_op:
1002		reg = insn.mm_i_format.rt;
1003		goto loadDW;
1004
1005	case mm_sd32_op:
1006		reg = insn.mm_i_format.rt;
1007		goto storeDW;
1008
1009	case mm_pool16c_op:
1010		switch (insn.mm16_m_format.func) {
1011		case mm_lwm16_op:
1012			reg = insn.mm16_m_format.rlist;
1013			rvar = reg + 1;
1014			if (user && !access_ok(addr, 4 * rvar))
1015				goto sigbus;
1016
1017			for (i = 16; rvar; rvar--, i++) {
1018				LoadW(addr, value, res);
1019				if (res)
1020					goto fault;
1021				addr += 4;
1022				regs->regs[i] = value;
1023			}
1024			LoadW(addr, value, res);
1025			if (res)
1026				goto fault;
1027			regs->regs[31] = value;
1028
1029			goto success;
1030
1031		case mm_swm16_op:
1032			reg = insn.mm16_m_format.rlist;
1033			rvar = reg + 1;
1034			if (user && !access_ok(addr, 4 * rvar))
1035				goto sigbus;
1036
1037			for (i = 16; rvar; rvar--, i++) {
1038				value = regs->regs[i];
1039				StoreW(addr, value, res);
1040				if (res)
1041					goto fault;
1042				addr += 4;
1043			}
1044			value = regs->regs[31];
1045			StoreW(addr, value, res);
1046			if (res)
1047				goto fault;
1048
1049			goto success;
1050
1051		}
1052
1053		goto sigbus;
1054
1055	case mm_lhu16_op:
1056		reg = reg16to32[insn.mm16_rb_format.rt];
1057		goto loadHWU;
1058
1059	case mm_lw16_op:
1060		reg = reg16to32[insn.mm16_rb_format.rt];
1061		goto loadW;
1062
1063	case mm_sh16_op:
1064		reg = reg16to32st[insn.mm16_rb_format.rt];
1065		goto storeHW;
1066
1067	case mm_sw16_op:
1068		reg = reg16to32st[insn.mm16_rb_format.rt];
1069		goto storeW;
1070
1071	case mm_lwsp16_op:
1072		reg = insn.mm16_r5_format.rt;
1073		goto loadW;
1074
1075	case mm_swsp16_op:
1076		reg = insn.mm16_r5_format.rt;
1077		goto storeW;
1078
1079	case mm_lwgp16_op:
1080		reg = reg16to32[insn.mm16_r3_format.rt];
1081		goto loadW;
1082
1083	default:
1084		goto sigill;
1085	}
1086
1087loadHW:
1088	if (user && !access_ok(addr, 2))
1089		goto sigbus;
1090
1091	LoadHW(addr, value, res);
1092	if (res)
1093		goto fault;
1094	regs->regs[reg] = value;
1095	goto success;
1096
1097loadHWU:
1098	if (user && !access_ok(addr, 2))
1099		goto sigbus;
1100
1101	LoadHWU(addr, value, res);
1102	if (res)
1103		goto fault;
1104	regs->regs[reg] = value;
1105	goto success;
1106
1107loadW:
1108	if (user && !access_ok(addr, 4))
1109		goto sigbus;
1110
1111	LoadW(addr, value, res);
1112	if (res)
1113		goto fault;
1114	regs->regs[reg] = value;
1115	goto success;
1116
1117loadWU:
1118#ifdef CONFIG_64BIT
1119	/*
1120	 * A 32-bit kernel might be running on a 64-bit processor.  But
1121	 * if we're on a 32-bit processor and an i-cache incoherency
1122	 * or race makes us see a 64-bit instruction here the sdl/sdr
1123	 * would blow up, so for now we don't handle unaligned 64-bit
1124	 * instructions on 32-bit kernels.
1125	 */
1126	if (user && !access_ok(addr, 4))
1127		goto sigbus;
1128
1129	LoadWU(addr, value, res);
1130	if (res)
1131		goto fault;
1132	regs->regs[reg] = value;
1133	goto success;
1134#endif /* CONFIG_64BIT */
1135
1136	/* Cannot handle 64-bit instructions in 32-bit kernel */
1137	goto sigill;
1138
1139loadDW:
1140#ifdef CONFIG_64BIT
1141	/*
1142	 * A 32-bit kernel might be running on a 64-bit processor.  But
1143	 * if we're on a 32-bit processor and an i-cache incoherency
1144	 * or race makes us see a 64-bit instruction here the sdl/sdr
1145	 * would blow up, so for now we don't handle unaligned 64-bit
1146	 * instructions on 32-bit kernels.
1147	 */
1148	if (user && !access_ok(addr, 8))
1149		goto sigbus;
1150
1151	LoadDW(addr, value, res);
1152	if (res)
1153		goto fault;
1154	regs->regs[reg] = value;
1155	goto success;
1156#endif /* CONFIG_64BIT */
1157
1158	/* Cannot handle 64-bit instructions in 32-bit kernel */
1159	goto sigill;
1160
1161storeHW:
1162	if (user && !access_ok(addr, 2))
1163		goto sigbus;
1164
1165	value = regs->regs[reg];
1166	StoreHW(addr, value, res);
1167	if (res)
1168		goto fault;
1169	goto success;
1170
1171storeW:
1172	if (user && !access_ok(addr, 4))
1173		goto sigbus;
1174
1175	value = regs->regs[reg];
1176	StoreW(addr, value, res);
1177	if (res)
1178		goto fault;
1179	goto success;
1180
1181storeDW:
1182#ifdef CONFIG_64BIT
1183	/*
1184	 * A 32-bit kernel might be running on a 64-bit processor.  But
1185	 * if we're on a 32-bit processor and an i-cache incoherency
1186	 * or race makes us see a 64-bit instruction here the sdl/sdr
1187	 * would blow up, so for now we don't handle unaligned 64-bit
1188	 * instructions on 32-bit kernels.
1189	 */
1190	if (user && !access_ok(addr, 8))
1191		goto sigbus;
1192
1193	value = regs->regs[reg];
1194	StoreDW(addr, value, res);
1195	if (res)
1196		goto fault;
1197	goto success;
1198#endif /* CONFIG_64BIT */
1199
1200	/* Cannot handle 64-bit instructions in 32-bit kernel */
1201	goto sigill;
1202
1203success:
1204	regs->cp0_epc = contpc;	/* advance or branch */
1205
1206#ifdef CONFIG_DEBUG_FS
1207	unaligned_instructions++;
1208#endif
1209	return;
1210
1211fault:
1212	/* roll back jump/branch */
1213	regs->cp0_epc = origpc;
1214	regs->regs[31] = orig31;
1215	/* Did we have an exception handler installed? */
1216	if (fixup_exception(regs))
1217		return;
1218
1219	die_if_kernel("Unhandled kernel unaligned access", regs);
1220	force_sig(SIGSEGV);
1221
1222	return;
1223
1224sigbus:
1225	die_if_kernel("Unhandled kernel unaligned access", regs);
1226	force_sig(SIGBUS);
1227
1228	return;
1229
1230sigill:
1231	die_if_kernel
1232	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1233	force_sig(SIGILL);
1234}
1235
1236static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1237{
1238	unsigned long value;
1239	unsigned int res;
1240	int reg;
1241	unsigned long orig31;
1242	u16 __user *pc16;
1243	unsigned long origpc;
1244	union mips16e_instruction mips16inst, oldinst;
1245	unsigned int opcode;
1246	int extended = 0;
1247	bool user = user_mode(regs);
1248
1249	origpc = regs->cp0_epc;
1250	orig31 = regs->regs[31];
1251	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1252	/*
1253	 * This load never faults.
1254	 */
1255	__get_user(mips16inst.full, pc16);
1256	oldinst = mips16inst;
1257
1258	/* skip EXTEND instruction */
1259	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1260		extended = 1;
1261		pc16++;
1262		__get_user(mips16inst.full, pc16);
1263	} else if (delay_slot(regs)) {
1264		/*  skip jump instructions */
1265		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1266		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1267			pc16++;
1268		pc16++;
1269		if (get_user(mips16inst.full, pc16))
1270			goto sigbus;
1271	}
1272
1273	opcode = mips16inst.ri.opcode;
1274	switch (opcode) {
1275	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1276		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1277		case MIPS16e_ldpc_func:
1278		case MIPS16e_ldsp_func:
1279			reg = reg16to32[mips16inst.ri64.ry];
1280			goto loadDW;
1281
1282		case MIPS16e_sdsp_func:
1283			reg = reg16to32[mips16inst.ri64.ry];
1284			goto writeDW;
1285
1286		case MIPS16e_sdrasp_func:
1287			reg = 29;	/* GPRSP */
1288			goto writeDW;
1289		}
1290
1291		goto sigbus;
1292
1293	case MIPS16e_swsp_op:
1294		reg = reg16to32[mips16inst.ri.rx];
1295		if (extended && cpu_has_mips16e2)
1296			switch (mips16inst.ri.imm >> 5) {
1297			case 0:		/* SWSP */
1298			case 1:		/* SWGP */
1299				break;
1300			case 2:		/* SHGP */
1301				opcode = MIPS16e_sh_op;
1302				break;
1303			default:
1304				goto sigbus;
1305			}
1306		break;
1307
1308	case MIPS16e_lwpc_op:
1309		reg = reg16to32[mips16inst.ri.rx];
1310		break;
1311
1312	case MIPS16e_lwsp_op:
1313		reg = reg16to32[mips16inst.ri.rx];
1314		if (extended && cpu_has_mips16e2)
1315			switch (mips16inst.ri.imm >> 5) {
1316			case 0:		/* LWSP */
1317			case 1:		/* LWGP */
1318				break;
1319			case 2:		/* LHGP */
1320				opcode = MIPS16e_lh_op;
1321				break;
1322			case 4:		/* LHUGP */
1323				opcode = MIPS16e_lhu_op;
1324				break;
1325			default:
1326				goto sigbus;
1327			}
1328		break;
1329
1330	case MIPS16e_i8_op:
1331		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1332			goto sigbus;
1333		reg = 29;	/* GPRSP */
1334		break;
1335
1336	default:
1337		reg = reg16to32[mips16inst.rri.ry];
1338		break;
1339	}
1340
1341	switch (opcode) {
1342
1343	case MIPS16e_lb_op:
1344	case MIPS16e_lbu_op:
1345	case MIPS16e_sb_op:
1346		goto sigbus;
1347
1348	case MIPS16e_lh_op:
1349		if (user && !access_ok(addr, 2))
1350			goto sigbus;
1351
1352		LoadHW(addr, value, res);
1353		if (res)
1354			goto fault;
1355		MIPS16e_compute_return_epc(regs, &oldinst);
1356		regs->regs[reg] = value;
1357		break;
1358
1359	case MIPS16e_lhu_op:
1360		if (user && !access_ok(addr, 2))
1361			goto sigbus;
1362
1363		LoadHWU(addr, value, res);
1364		if (res)
1365			goto fault;
1366		MIPS16e_compute_return_epc(regs, &oldinst);
1367		regs->regs[reg] = value;
1368		break;
1369
1370	case MIPS16e_lw_op:
1371	case MIPS16e_lwpc_op:
1372	case MIPS16e_lwsp_op:
1373		if (user && !access_ok(addr, 4))
1374			goto sigbus;
1375
1376		LoadW(addr, value, res);
1377		if (res)
1378			goto fault;
1379		MIPS16e_compute_return_epc(regs, &oldinst);
1380		regs->regs[reg] = value;
1381		break;
1382
1383	case MIPS16e_lwu_op:
1384#ifdef CONFIG_64BIT
1385		/*
1386		 * A 32-bit kernel might be running on a 64-bit processor.  But
1387		 * if we're on a 32-bit processor and an i-cache incoherency
1388		 * or race makes us see a 64-bit instruction here the sdl/sdr
1389		 * would blow up, so for now we don't handle unaligned 64-bit
1390		 * instructions on 32-bit kernels.
1391		 */
1392		if (user && !access_ok(addr, 4))
1393			goto sigbus;
1394
1395		LoadWU(addr, value, res);
1396		if (res)
1397			goto fault;
1398		MIPS16e_compute_return_epc(regs, &oldinst);
1399		regs->regs[reg] = value;
1400		break;
1401#endif /* CONFIG_64BIT */
1402
1403		/* Cannot handle 64-bit instructions in 32-bit kernel */
1404		goto sigill;
1405
1406	case MIPS16e_ld_op:
1407loadDW:
1408#ifdef CONFIG_64BIT
1409		/*
1410		 * A 32-bit kernel might be running on a 64-bit processor.  But
1411		 * if we're on a 32-bit processor and an i-cache incoherency
1412		 * or race makes us see a 64-bit instruction here the sdl/sdr
1413		 * would blow up, so for now we don't handle unaligned 64-bit
1414		 * instructions on 32-bit kernels.
1415		 */
1416		if (user && !access_ok(addr, 8))
1417			goto sigbus;
1418
1419		LoadDW(addr, value, res);
1420		if (res)
1421			goto fault;
1422		MIPS16e_compute_return_epc(regs, &oldinst);
1423		regs->regs[reg] = value;
1424		break;
1425#endif /* CONFIG_64BIT */
1426
1427		/* Cannot handle 64-bit instructions in 32-bit kernel */
1428		goto sigill;
1429
1430	case MIPS16e_sh_op:
1431		if (user && !access_ok(addr, 2))
1432			goto sigbus;
1433
1434		MIPS16e_compute_return_epc(regs, &oldinst);
1435		value = regs->regs[reg];
1436		StoreHW(addr, value, res);
1437		if (res)
1438			goto fault;
1439		break;
1440
1441	case MIPS16e_sw_op:
1442	case MIPS16e_swsp_op:
1443	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1444		if (user && !access_ok(addr, 4))
1445			goto sigbus;
1446
1447		MIPS16e_compute_return_epc(regs, &oldinst);
1448		value = regs->regs[reg];
1449		StoreW(addr, value, res);
1450		if (res)
1451			goto fault;
1452		break;
1453
1454	case MIPS16e_sd_op:
1455writeDW:
1456#ifdef CONFIG_64BIT
1457		/*
1458		 * A 32-bit kernel might be running on a 64-bit processor.  But
1459		 * if we're on a 32-bit processor and an i-cache incoherency
1460		 * or race makes us see a 64-bit instruction here the sdl/sdr
1461		 * would blow up, so for now we don't handle unaligned 64-bit
1462		 * instructions on 32-bit kernels.
1463		 */
1464		if (user && !access_ok(addr, 8))
1465			goto sigbus;
1466
1467		MIPS16e_compute_return_epc(regs, &oldinst);
1468		value = regs->regs[reg];
1469		StoreDW(addr, value, res);
1470		if (res)
1471			goto fault;
1472		break;
1473#endif /* CONFIG_64BIT */
1474
1475		/* Cannot handle 64-bit instructions in 32-bit kernel */
1476		goto sigill;
1477
1478	default:
1479		/*
1480		 * Pheeee...  We encountered an yet unknown instruction or
1481		 * cache coherence problem.  Die sucker, die ...
1482		 */
1483		goto sigill;
1484	}
1485
1486#ifdef CONFIG_DEBUG_FS
1487	unaligned_instructions++;
1488#endif
1489
1490	return;
1491
1492fault:
1493	/* roll back jump/branch */
1494	regs->cp0_epc = origpc;
1495	regs->regs[31] = orig31;
1496	/* Did we have an exception handler installed? */
1497	if (fixup_exception(regs))
1498		return;
1499
1500	die_if_kernel("Unhandled kernel unaligned access", regs);
1501	force_sig(SIGSEGV);
1502
1503	return;
1504
1505sigbus:
1506	die_if_kernel("Unhandled kernel unaligned access", regs);
1507	force_sig(SIGBUS);
1508
1509	return;
1510
1511sigill:
1512	die_if_kernel
1513	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1514	force_sig(SIGILL);
1515}
1516
1517asmlinkage void do_ade(struct pt_regs *regs)
1518{
1519	enum ctx_state prev_state;
1520	unsigned int *pc;
 
1521
1522	prev_state = exception_enter();
1523	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1524			1, regs, regs->cp0_badvaddr);
1525
1526#ifdef CONFIG_64BIT
1527	/*
1528	 * check, if we are hitting space between CPU implemented maximum
1529	 * virtual user address and 64bit maximum virtual user address
1530	 * and do exception handling to get EFAULTs for get_user/put_user
1531	 */
1532	if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
1533	    (regs->cp0_badvaddr < XKSSEG)) {
1534		if (fixup_exception(regs)) {
1535			current->thread.cp0_baduaddr = regs->cp0_badvaddr;
1536			return;
1537		}
1538		goto sigbus;
1539	}
1540#endif
1541
1542	/*
1543	 * Did we catch a fault trying to load an instruction?
1544	 */
1545	if (regs->cp0_badvaddr == regs->cp0_epc)
1546		goto sigbus;
1547
1548	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1549		goto sigbus;
1550	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1551		goto sigbus;
1552
1553	/*
1554	 * Do branch emulation only if we didn't forward the exception.
1555	 * This is all so but ugly ...
1556	 */
1557
1558	/*
1559	 * Are we running in microMIPS mode?
1560	 */
1561	if (get_isa16_mode(regs->cp0_epc)) {
1562		/*
1563		 * Did we catch a fault trying to load an instruction in
1564		 * 16-bit mode?
1565		 */
1566		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1567			goto sigbus;
1568		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1569			show_registers(regs);
1570
1571		if (cpu_has_mmips) {
 
 
 
1572			emulate_load_store_microMIPS(regs,
1573				(void __user *)regs->cp0_badvaddr);
 
 
1574			return;
1575		}
1576
1577		if (cpu_has_mips16) {
 
 
 
1578			emulate_load_store_MIPS16e(regs,
1579				(void __user *)regs->cp0_badvaddr);
 
 
1580			return;
1581		}
1582
1583		goto sigbus;
1584	}
1585
1586	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1587		show_registers(regs);
1588	pc = (unsigned int *)exception_epc(regs);
1589
 
 
 
1590	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
 
1591
1592	return;
1593
1594sigbus:
1595	die_if_kernel("Kernel unaligned instruction access", regs);
1596	force_sig(SIGBUS);
1597
1598	/*
1599	 * XXX On return from the signal handler we should advance the epc
1600	 */
1601	exception_exit(prev_state);
1602}
1603
1604#ifdef CONFIG_DEBUG_FS
1605static int __init debugfs_unaligned(void)
1606{
1607	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1608			   &unaligned_instructions);
1609	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1610			   mips_debugfs_dir, &unaligned_action);
 
 
 
 
 
 
 
 
1611	return 0;
1612}
1613arch_initcall(debugfs_unaligned);
1614#endif