Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (c) 2014 Imagination Technologies Ltd.
   7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
   8 * Author: Markos Chandras <markos.chandras@imgtec.com>
   9 *
  10 *      MIPS R2 user space instruction emulator for MIPS R6
  11 *
  12 */
  13#include <linux/bug.h>
  14#include <linux/compiler.h>
  15#include <linux/debugfs.h>
  16#include <linux/init.h>
  17#include <linux/kernel.h>
  18#include <linux/ptrace.h>
  19#include <linux/seq_file.h>
  20
  21#include <asm/asm.h>
  22#include <asm/branch.h>
  23#include <asm/break.h>
  24#include <asm/debug.h>
  25#include <asm/fpu.h>
  26#include <asm/fpu_emulator.h>
  27#include <asm/inst.h>
  28#include <asm/mips-r2-to-r6-emul.h>
  29#include <asm/local.h>
  30#include <asm/mipsregs.h>
  31#include <asm/ptrace.h>
  32#include <linux/uaccess.h>
  33
  34#ifdef CONFIG_64BIT
  35#define ADDIU	"daddiu "
  36#define INS	"dins "
  37#define EXT	"dext "
  38#else
  39#define ADDIU	"addiu "
  40#define INS	"ins "
  41#define EXT	"ext "
  42#endif /* CONFIG_64BIT */
  43
  44#define SB	"sb "
  45#define LB	"lb "
  46#define LL	"ll "
  47#define SC	"sc "
  48
  49#ifdef CONFIG_DEBUG_FS
  50static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
  51static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
  52static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
  53#endif
  54
  55extern const unsigned int fpucondbit[8];
  56
  57#define MIPS_R2_EMUL_TOTAL_PASS	10
  58
  59int mipsr2_emulation = 0;
  60
  61static int __init mipsr2emu_enable(char *s)
  62{
  63	mipsr2_emulation = 1;
  64
  65	pr_info("MIPS R2-to-R6 Emulator Enabled!");
  66
  67	return 1;
  68}
  69__setup("mipsr2emu", mipsr2emu_enable);
  70
  71/**
  72 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
  73 * for performance instead of the traditional way of using a stack trampoline
  74 * which is rather slow.
  75 * @regs: Process register set
  76 * @ir: Instruction
  77 */
  78static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
  79{
  80	switch (MIPSInst_OPCODE(ir)) {
  81	case addiu_op:
  82		if (MIPSInst_RT(ir))
  83			regs->regs[MIPSInst_RT(ir)] =
  84				(s32)regs->regs[MIPSInst_RS(ir)] +
  85				(s32)MIPSInst_SIMM(ir);
  86		return 0;
  87	case daddiu_op:
  88		if (IS_ENABLED(CONFIG_32BIT))
  89			break;
  90
  91		if (MIPSInst_RT(ir))
  92			regs->regs[MIPSInst_RT(ir)] =
  93				(s64)regs->regs[MIPSInst_RS(ir)] +
  94				(s64)MIPSInst_SIMM(ir);
  95		return 0;
  96	case lwc1_op:
  97	case swc1_op:
  98	case cop1_op:
  99	case cop1x_op:
 100		/* FPU instructions in delay slot */
 101		return -SIGFPE;
 102	case spec_op:
 103		switch (MIPSInst_FUNC(ir)) {
 104		case or_op:
 105			if (MIPSInst_RD(ir))
 106				regs->regs[MIPSInst_RD(ir)] =
 107					regs->regs[MIPSInst_RS(ir)] |
 108					regs->regs[MIPSInst_RT(ir)];
 109			return 0;
 110		case sll_op:
 111			if (MIPSInst_RS(ir))
 112				break;
 113
 114			if (MIPSInst_RD(ir))
 115				regs->regs[MIPSInst_RD(ir)] =
 116					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
 117						MIPSInst_FD(ir));
 118			return 0;
 119		case srl_op:
 120			if (MIPSInst_RS(ir))
 121				break;
 122
 123			if (MIPSInst_RD(ir))
 124				regs->regs[MIPSInst_RD(ir)] =
 125					(s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
 126						MIPSInst_FD(ir));
 127			return 0;
 128		case addu_op:
 129			if (MIPSInst_FD(ir))
 130				break;
 131
 132			if (MIPSInst_RD(ir))
 133				regs->regs[MIPSInst_RD(ir)] =
 134					(s32)((u32)regs->regs[MIPSInst_RS(ir)] +
 135					      (u32)regs->regs[MIPSInst_RT(ir)]);
 136			return 0;
 137		case subu_op:
 138			if (MIPSInst_FD(ir))
 139				break;
 140
 141			if (MIPSInst_RD(ir))
 142				regs->regs[MIPSInst_RD(ir)] =
 143					(s32)((u32)regs->regs[MIPSInst_RS(ir)] -
 144					      (u32)regs->regs[MIPSInst_RT(ir)]);
 145			return 0;
 146		case dsll_op:
 147			if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
 148				break;
 149
 150			if (MIPSInst_RD(ir))
 151				regs->regs[MIPSInst_RD(ir)] =
 152					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
 153						MIPSInst_FD(ir));
 154			return 0;
 155		case dsrl_op:
 156			if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
 157				break;
 158
 159			if (MIPSInst_RD(ir))
 160				regs->regs[MIPSInst_RD(ir)] =
 161					(s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
 162						MIPSInst_FD(ir));
 163			return 0;
 164		case daddu_op:
 165			if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
 166				break;
 167
 168			if (MIPSInst_RD(ir))
 169				regs->regs[MIPSInst_RD(ir)] =
 170					(u64)regs->regs[MIPSInst_RS(ir)] +
 171					(u64)regs->regs[MIPSInst_RT(ir)];
 172			return 0;
 173		case dsubu_op:
 174			if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
 175				break;
 176
 177			if (MIPSInst_RD(ir))
 178				regs->regs[MIPSInst_RD(ir)] =
 179					(s64)((u64)regs->regs[MIPSInst_RS(ir)] -
 180					      (u64)regs->regs[MIPSInst_RT(ir)]);
 181			return 0;
 182		}
 183		break;
 184	default:
 185		pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
 186			 ir, MIPSInst_OPCODE(ir));
 187	}
 188
 189	return SIGILL;
 190}
 191
 192/**
 193 * movf_func - Emulate a MOVF instruction
 194 * @regs: Process register set
 195 * @ir: Instruction
 196 *
 197 * Returns 0 since it always succeeds.
 198 */
 199static int movf_func(struct pt_regs *regs, u32 ir)
 200{
 201	u32 csr;
 202	u32 cond;
 203
 204	csr = current->thread.fpu.fcr31;
 205	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
 206
 207	if (((csr & cond) == 0) && MIPSInst_RD(ir))
 208		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
 209
 210	MIPS_R2_STATS(movs);
 211
 212	return 0;
 213}
 214
 215/**
 216 * movt_func - Emulate a MOVT instruction
 217 * @regs: Process register set
 218 * @ir: Instruction
 219 *
 220 * Returns 0 since it always succeeds.
 221 */
 222static int movt_func(struct pt_regs *regs, u32 ir)
 223{
 224	u32 csr;
 225	u32 cond;
 226
 227	csr = current->thread.fpu.fcr31;
 228	cond = fpucondbit[MIPSInst_RT(ir) >> 2];
 229
 230	if (((csr & cond) != 0) && MIPSInst_RD(ir))
 231		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
 232
 233	MIPS_R2_STATS(movs);
 234
 235	return 0;
 236}
 237
 238/**
 239 * jr_func - Emulate a JR instruction.
 240 * @pt_regs: Process register set
 241 * @ir: Instruction
 242 *
 243 * Returns SIGILL if JR was in delay slot, SIGEMT if we
 244 * can't compute the EPC, SIGSEGV if we can't access the
 245 * userland instruction or 0 on success.
 246 */
 247static int jr_func(struct pt_regs *regs, u32 ir)
 248{
 249	int err;
 250	unsigned long cepc, epc, nepc;
 251	u32 nir;
 252
 253	if (delay_slot(regs))
 254		return SIGILL;
 255
 256	/* EPC after the RI/JR instruction */
 257	nepc = regs->cp0_epc;
 258	/* Roll back to the reserved R2 JR instruction */
 259	regs->cp0_epc -= 4;
 260	epc = regs->cp0_epc;
 261	err = __compute_return_epc(regs);
 262
 263	if (err < 0)
 264		return SIGEMT;
 265
 266
 267	/* Computed EPC */
 268	cepc = regs->cp0_epc;
 269
 270	/* Get DS instruction */
 271	err = __get_user(nir, (u32 __user *)nepc);
 272	if (err)
 273		return SIGSEGV;
 274
 275	MIPS_R2BR_STATS(jrs);
 276
 277	/* If nir == 0(NOP), then nothing else to do */
 278	if (nir) {
 279		/*
 280		 * Negative err means FPU instruction in BD-slot,
 281		 * Zero err means 'BD-slot emulation done'
 282		 * For anything else we go back to trampoline emulation.
 283		 */
 284		err = mipsr6_emul(regs, nir);
 285		if (err > 0) {
 286			regs->cp0_epc = nepc;
 287			err = mips_dsemul(regs, nir, epc, cepc);
 288			if (err == SIGILL)
 289				err = SIGEMT;
 290			MIPS_R2_STATS(dsemul);
 291		}
 292	}
 293
 294	return err;
 295}
 296
 297/**
 298 * movz_func - Emulate a MOVZ instruction
 299 * @regs: Process register set
 300 * @ir: Instruction
 301 *
 302 * Returns 0 since it always succeeds.
 303 */
 304static int movz_func(struct pt_regs *regs, u32 ir)
 305{
 306	if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
 307		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
 308	MIPS_R2_STATS(movs);
 309
 310	return 0;
 311}
 312
 313/**
 314 * movn_func - Emulate a MOVZ instruction
 315 * @regs: Process register set
 316 * @ir: Instruction
 317 *
 318 * Returns 0 since it always succeeds.
 319 */
 320static int movn_func(struct pt_regs *regs, u32 ir)
 321{
 322	if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
 323		regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
 324	MIPS_R2_STATS(movs);
 325
 326	return 0;
 327}
 328
 329/**
 330 * mfhi_func - Emulate a MFHI instruction
 331 * @regs: Process register set
 332 * @ir: Instruction
 333 *
 334 * Returns 0 since it always succeeds.
 335 */
 336static int mfhi_func(struct pt_regs *regs, u32 ir)
 337{
 338	if (MIPSInst_RD(ir))
 339		regs->regs[MIPSInst_RD(ir)] = regs->hi;
 340
 341	MIPS_R2_STATS(hilo);
 342
 343	return 0;
 344}
 345
 346/**
 347 * mthi_func - Emulate a MTHI instruction
 348 * @regs: Process register set
 349 * @ir: Instruction
 350 *
 351 * Returns 0 since it always succeeds.
 352 */
 353static int mthi_func(struct pt_regs *regs, u32 ir)
 354{
 355	regs->hi = regs->regs[MIPSInst_RS(ir)];
 356
 357	MIPS_R2_STATS(hilo);
 358
 359	return 0;
 360}
 361
 362/**
 363 * mflo_func - Emulate a MFLO instruction
 364 * @regs: Process register set
 365 * @ir: Instruction
 366 *
 367 * Returns 0 since it always succeeds.
 368 */
 369static int mflo_func(struct pt_regs *regs, u32 ir)
 370{
 371	if (MIPSInst_RD(ir))
 372		regs->regs[MIPSInst_RD(ir)] = regs->lo;
 373
 374	MIPS_R2_STATS(hilo);
 375
 376	return 0;
 377}
 378
 379/**
 380 * mtlo_func - Emulate a MTLO instruction
 381 * @regs: Process register set
 382 * @ir: Instruction
 383 *
 384 * Returns 0 since it always succeeds.
 385 */
 386static int mtlo_func(struct pt_regs *regs, u32 ir)
 387{
 388	regs->lo = regs->regs[MIPSInst_RS(ir)];
 389
 390	MIPS_R2_STATS(hilo);
 391
 392	return 0;
 393}
 394
 395/**
 396 * mult_func - Emulate a MULT instruction
 397 * @regs: Process register set
 398 * @ir: Instruction
 399 *
 400 * Returns 0 since it always succeeds.
 401 */
 402static int mult_func(struct pt_regs *regs, u32 ir)
 403{
 404	s64 res;
 405	s32 rt, rs;
 406
 407	rt = regs->regs[MIPSInst_RT(ir)];
 408	rs = regs->regs[MIPSInst_RS(ir)];
 409	res = (s64)rt * (s64)rs;
 410
 411	rs = res;
 412	regs->lo = (s64)rs;
 413	rt = res >> 32;
 414	res = (s64)rt;
 415	regs->hi = res;
 416
 417	MIPS_R2_STATS(muls);
 418
 419	return 0;
 420}
 421
 422/**
 423 * multu_func - Emulate a MULTU instruction
 424 * @regs: Process register set
 425 * @ir: Instruction
 426 *
 427 * Returns 0 since it always succeeds.
 428 */
 429static int multu_func(struct pt_regs *regs, u32 ir)
 430{
 431	u64 res;
 432	u32 rt, rs;
 433
 434	rt = regs->regs[MIPSInst_RT(ir)];
 435	rs = regs->regs[MIPSInst_RS(ir)];
 436	res = (u64)rt * (u64)rs;
 437	rt = res;
 438	regs->lo = (s64)(s32)rt;
 439	regs->hi = (s64)(s32)(res >> 32);
 440
 441	MIPS_R2_STATS(muls);
 442
 443	return 0;
 444}
 445
 446/**
 447 * div_func - Emulate a DIV instruction
 448 * @regs: Process register set
 449 * @ir: Instruction
 450 *
 451 * Returns 0 since it always succeeds.
 452 */
 453static int div_func(struct pt_regs *regs, u32 ir)
 454{
 455	s32 rt, rs;
 456
 457	rt = regs->regs[MIPSInst_RT(ir)];
 458	rs = regs->regs[MIPSInst_RS(ir)];
 459
 460	regs->lo = (s64)(rs / rt);
 461	regs->hi = (s64)(rs % rt);
 462
 463	MIPS_R2_STATS(divs);
 464
 465	return 0;
 466}
 467
 468/**
 469 * divu_func - Emulate a DIVU instruction
 470 * @regs: Process register set
 471 * @ir: Instruction
 472 *
 473 * Returns 0 since it always succeeds.
 474 */
 475static int divu_func(struct pt_regs *regs, u32 ir)
 476{
 477	u32 rt, rs;
 478
 479	rt = regs->regs[MIPSInst_RT(ir)];
 480	rs = regs->regs[MIPSInst_RS(ir)];
 481
 482	regs->lo = (s64)(rs / rt);
 483	regs->hi = (s64)(rs % rt);
 484
 485	MIPS_R2_STATS(divs);
 486
 487	return 0;
 488}
 489
 490/**
 491 * dmult_func - Emulate a DMULT instruction
 492 * @regs: Process register set
 493 * @ir: Instruction
 494 *
 495 * Returns 0 on success or SIGILL for 32-bit kernels.
 496 */
 497static int dmult_func(struct pt_regs *regs, u32 ir)
 498{
 499	s64 res;
 500	s64 rt, rs;
 501
 502	if (IS_ENABLED(CONFIG_32BIT))
 503		return SIGILL;
 504
 505	rt = regs->regs[MIPSInst_RT(ir)];
 506	rs = regs->regs[MIPSInst_RS(ir)];
 507	res = rt * rs;
 508
 509	regs->lo = res;
 510	__asm__ __volatile__(
 511		"dmuh %0, %1, %2\t\n"
 512		: "=r"(res)
 513		: "r"(rt), "r"(rs));
 514
 515	regs->hi = res;
 516
 517	MIPS_R2_STATS(muls);
 518
 519	return 0;
 520}
 521
 522/**
 523 * dmultu_func - Emulate a DMULTU instruction
 524 * @regs: Process register set
 525 * @ir: Instruction
 526 *
 527 * Returns 0 on success or SIGILL for 32-bit kernels.
 528 */
 529static int dmultu_func(struct pt_regs *regs, u32 ir)
 530{
 531	u64 res;
 532	u64 rt, rs;
 533
 534	if (IS_ENABLED(CONFIG_32BIT))
 535		return SIGILL;
 536
 537	rt = regs->regs[MIPSInst_RT(ir)];
 538	rs = regs->regs[MIPSInst_RS(ir)];
 539	res = rt * rs;
 540
 541	regs->lo = res;
 542	__asm__ __volatile__(
 543		"dmuhu %0, %1, %2\t\n"
 544		: "=r"(res)
 545		: "r"(rt), "r"(rs));
 546
 547	regs->hi = res;
 548
 549	MIPS_R2_STATS(muls);
 550
 551	return 0;
 552}
 553
 554/**
 555 * ddiv_func - Emulate a DDIV instruction
 556 * @regs: Process register set
 557 * @ir: Instruction
 558 *
 559 * Returns 0 on success or SIGILL for 32-bit kernels.
 560 */
 561static int ddiv_func(struct pt_regs *regs, u32 ir)
 562{
 563	s64 rt, rs;
 564
 565	if (IS_ENABLED(CONFIG_32BIT))
 566		return SIGILL;
 567
 568	rt = regs->regs[MIPSInst_RT(ir)];
 569	rs = regs->regs[MIPSInst_RS(ir)];
 570
 571	regs->lo = rs / rt;
 572	regs->hi = rs % rt;
 573
 574	MIPS_R2_STATS(divs);
 575
 576	return 0;
 577}
 578
 579/**
 580 * ddivu_func - Emulate a DDIVU instruction
 581 * @regs: Process register set
 582 * @ir: Instruction
 583 *
 584 * Returns 0 on success or SIGILL for 32-bit kernels.
 585 */
 586static int ddivu_func(struct pt_regs *regs, u32 ir)
 587{
 588	u64 rt, rs;
 589
 590	if (IS_ENABLED(CONFIG_32BIT))
 591		return SIGILL;
 592
 593	rt = regs->regs[MIPSInst_RT(ir)];
 594	rs = regs->regs[MIPSInst_RS(ir)];
 595
 596	regs->lo = rs / rt;
 597	regs->hi = rs % rt;
 598
 599	MIPS_R2_STATS(divs);
 600
 601	return 0;
 602}
 603
 604/* R6 removed instructions for the SPECIAL opcode */
 605static const struct r2_decoder_table spec_op_table[] = {
 606	{ 0xfc1ff83f, 0x00000008, jr_func },
 607	{ 0xfc00ffff, 0x00000018, mult_func },
 608	{ 0xfc00ffff, 0x00000019, multu_func },
 609	{ 0xfc00ffff, 0x0000001c, dmult_func },
 610	{ 0xfc00ffff, 0x0000001d, dmultu_func },
 611	{ 0xffff07ff, 0x00000010, mfhi_func },
 612	{ 0xfc1fffff, 0x00000011, mthi_func },
 613	{ 0xffff07ff, 0x00000012, mflo_func },
 614	{ 0xfc1fffff, 0x00000013, mtlo_func },
 615	{ 0xfc0307ff, 0x00000001, movf_func },
 616	{ 0xfc0307ff, 0x00010001, movt_func },
 617	{ 0xfc0007ff, 0x0000000a, movz_func },
 618	{ 0xfc0007ff, 0x0000000b, movn_func },
 619	{ 0xfc00ffff, 0x0000001a, div_func },
 620	{ 0xfc00ffff, 0x0000001b, divu_func },
 621	{ 0xfc00ffff, 0x0000001e, ddiv_func },
 622	{ 0xfc00ffff, 0x0000001f, ddivu_func },
 623	{}
 624};
 625
 626/**
 627 * madd_func - Emulate a MADD instruction
 628 * @regs: Process register set
 629 * @ir: Instruction
 630 *
 631 * Returns 0 since it always succeeds.
 632 */
 633static int madd_func(struct pt_regs *regs, u32 ir)
 634{
 635	s64 res;
 636	s32 rt, rs;
 637
 638	rt = regs->regs[MIPSInst_RT(ir)];
 639	rs = regs->regs[MIPSInst_RS(ir)];
 640	res = (s64)rt * (s64)rs;
 641	rt = regs->hi;
 642	rs = regs->lo;
 643	res += ((((s64)rt) << 32) | (u32)rs);
 644
 645	rt = res;
 646	regs->lo = (s64)rt;
 647	rs = res >> 32;
 648	regs->hi = (s64)rs;
 649
 650	MIPS_R2_STATS(dsps);
 651
 652	return 0;
 653}
 654
 655/**
 656 * maddu_func - Emulate a MADDU instruction
 657 * @regs: Process register set
 658 * @ir: Instruction
 659 *
 660 * Returns 0 since it always succeeds.
 661 */
 662static int maddu_func(struct pt_regs *regs, u32 ir)
 663{
 664	u64 res;
 665	u32 rt, rs;
 666
 667	rt = regs->regs[MIPSInst_RT(ir)];
 668	rs = regs->regs[MIPSInst_RS(ir)];
 669	res = (u64)rt * (u64)rs;
 670	rt = regs->hi;
 671	rs = regs->lo;
 672	res += ((((s64)rt) << 32) | (u32)rs);
 673
 674	rt = res;
 675	regs->lo = (s64)(s32)rt;
 676	rs = res >> 32;
 677	regs->hi = (s64)(s32)rs;
 678
 679	MIPS_R2_STATS(dsps);
 680
 681	return 0;
 682}
 683
 684/**
 685 * msub_func - Emulate a MSUB instruction
 686 * @regs: Process register set
 687 * @ir: Instruction
 688 *
 689 * Returns 0 since it always succeeds.
 690 */
 691static int msub_func(struct pt_regs *regs, u32 ir)
 692{
 693	s64 res;
 694	s32 rt, rs;
 695
 696	rt = regs->regs[MIPSInst_RT(ir)];
 697	rs = regs->regs[MIPSInst_RS(ir)];
 698	res = (s64)rt * (s64)rs;
 699	rt = regs->hi;
 700	rs = regs->lo;
 701	res = ((((s64)rt) << 32) | (u32)rs) - res;
 702
 703	rt = res;
 704	regs->lo = (s64)rt;
 705	rs = res >> 32;
 706	regs->hi = (s64)rs;
 707
 708	MIPS_R2_STATS(dsps);
 709
 710	return 0;
 711}
 712
 713/**
 714 * msubu_func - Emulate a MSUBU instruction
 715 * @regs: Process register set
 716 * @ir: Instruction
 717 *
 718 * Returns 0 since it always succeeds.
 719 */
 720static int msubu_func(struct pt_regs *regs, u32 ir)
 721{
 722	u64 res;
 723	u32 rt, rs;
 724
 725	rt = regs->regs[MIPSInst_RT(ir)];
 726	rs = regs->regs[MIPSInst_RS(ir)];
 727	res = (u64)rt * (u64)rs;
 728	rt = regs->hi;
 729	rs = regs->lo;
 730	res = ((((s64)rt) << 32) | (u32)rs) - res;
 731
 732	rt = res;
 733	regs->lo = (s64)(s32)rt;
 734	rs = res >> 32;
 735	regs->hi = (s64)(s32)rs;
 736
 737	MIPS_R2_STATS(dsps);
 738
 739	return 0;
 740}
 741
 742/**
 743 * mul_func - Emulate a MUL instruction
 744 * @regs: Process register set
 745 * @ir: Instruction
 746 *
 747 * Returns 0 since it always succeeds.
 748 */
 749static int mul_func(struct pt_regs *regs, u32 ir)
 750{
 751	s64 res;
 752	s32 rt, rs;
 753
 754	if (!MIPSInst_RD(ir))
 755		return 0;
 756	rt = regs->regs[MIPSInst_RT(ir)];
 757	rs = regs->regs[MIPSInst_RS(ir)];
 758	res = (s64)rt * (s64)rs;
 759
 760	rs = res;
 761	regs->regs[MIPSInst_RD(ir)] = (s64)rs;
 762
 763	MIPS_R2_STATS(muls);
 764
 765	return 0;
 766}
 767
 768/**
 769 * clz_func - Emulate a CLZ instruction
 770 * @regs: Process register set
 771 * @ir: Instruction
 772 *
 773 * Returns 0 since it always succeeds.
 774 */
 775static int clz_func(struct pt_regs *regs, u32 ir)
 776{
 777	u32 res;
 778	u32 rs;
 779
 780	if (!MIPSInst_RD(ir))
 781		return 0;
 782
 783	rs = regs->regs[MIPSInst_RS(ir)];
 784	__asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
 785	regs->regs[MIPSInst_RD(ir)] = res;
 786
 787	MIPS_R2_STATS(bops);
 788
 789	return 0;
 790}
 791
 792/**
 793 * clo_func - Emulate a CLO instruction
 794 * @regs: Process register set
 795 * @ir: Instruction
 796 *
 797 * Returns 0 since it always succeeds.
 798 */
 799
 800static int clo_func(struct pt_regs *regs, u32 ir)
 801{
 802	u32 res;
 803	u32 rs;
 804
 805	if (!MIPSInst_RD(ir))
 806		return 0;
 807
 808	rs = regs->regs[MIPSInst_RS(ir)];
 809	__asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
 810	regs->regs[MIPSInst_RD(ir)] = res;
 811
 812	MIPS_R2_STATS(bops);
 813
 814	return 0;
 815}
 816
 817/**
 818 * dclz_func - Emulate a DCLZ instruction
 819 * @regs: Process register set
 820 * @ir: Instruction
 821 *
 822 * Returns 0 since it always succeeds.
 823 */
 824static int dclz_func(struct pt_regs *regs, u32 ir)
 825{
 826	u64 res;
 827	u64 rs;
 828
 829	if (IS_ENABLED(CONFIG_32BIT))
 830		return SIGILL;
 831
 832	if (!MIPSInst_RD(ir))
 833		return 0;
 834
 835	rs = regs->regs[MIPSInst_RS(ir)];
 836	__asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
 837	regs->regs[MIPSInst_RD(ir)] = res;
 838
 839	MIPS_R2_STATS(bops);
 840
 841	return 0;
 842}
 843
 844/**
 845 * dclo_func - Emulate a DCLO instruction
 846 * @regs: Process register set
 847 * @ir: Instruction
 848 *
 849 * Returns 0 since it always succeeds.
 850 */
 851static int dclo_func(struct pt_regs *regs, u32 ir)
 852{
 853	u64 res;
 854	u64 rs;
 855
 856	if (IS_ENABLED(CONFIG_32BIT))
 857		return SIGILL;
 858
 859	if (!MIPSInst_RD(ir))
 860		return 0;
 861
 862	rs = regs->regs[MIPSInst_RS(ir)];
 863	__asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
 864	regs->regs[MIPSInst_RD(ir)] = res;
 865
 866	MIPS_R2_STATS(bops);
 867
 868	return 0;
 869}
 870
 871/* R6 removed instructions for the SPECIAL2 opcode */
 872static const struct r2_decoder_table spec2_op_table[] = {
 873	{ 0xfc00ffff, 0x70000000, madd_func },
 874	{ 0xfc00ffff, 0x70000001, maddu_func },
 875	{ 0xfc0007ff, 0x70000002, mul_func },
 876	{ 0xfc00ffff, 0x70000004, msub_func },
 877	{ 0xfc00ffff, 0x70000005, msubu_func },
 878	{ 0xfc0007ff, 0x70000020, clz_func },
 879	{ 0xfc0007ff, 0x70000021, clo_func },
 880	{ 0xfc0007ff, 0x70000024, dclz_func },
 881	{ 0xfc0007ff, 0x70000025, dclo_func },
 882	{ }
 883};
 884
 885static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
 886				      const struct r2_decoder_table *table)
 887{
 888	const struct r2_decoder_table *p;
 889	int err;
 890
 891	for (p = table; p->func; p++) {
 892		if ((inst & p->mask) == p->code) {
 893			err = (p->func)(regs, inst);
 894			return err;
 895		}
 896	}
 897	return SIGILL;
 898}
 899
 900/**
 901 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
 902 * @regs: Process register set
 903 * @inst: Instruction to decode and emulate
 904 * @fcr31: Floating Point Control and Status Register Cause bits returned
 905 */
 906int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
 907{
 908	int err = 0;
 909	unsigned long vaddr;
 910	u32 nir;
 911	unsigned long cpc, epc, nepc, r31, res, rs, rt;
 912
 913	void __user *fault_addr = NULL;
 914	int pass = 0;
 915
 916repeat:
 917	r31 = regs->regs[31];
 918	epc = regs->cp0_epc;
 919	err = compute_return_epc(regs);
 920	if (err < 0) {
 921		BUG();
 922		return SIGEMT;
 923	}
 924	pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
 925		 inst, epc, pass);
 926
 927	switch (MIPSInst_OPCODE(inst)) {
 928	case spec_op:
 929		err = mipsr2_find_op_func(regs, inst, spec_op_table);
 930		if (err < 0) {
 931			/* FPU instruction under JR */
 932			regs->cp0_cause |= CAUSEF_BD;
 933			goto fpu_emul;
 934		}
 935		break;
 936	case spec2_op:
 937		err = mipsr2_find_op_func(regs, inst, spec2_op_table);
 938		break;
 939	case bcond_op:
 940		rt = MIPSInst_RT(inst);
 941		rs = MIPSInst_RS(inst);
 942		switch (rt) {
 943		case tgei_op:
 944			if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
 945				do_trap_or_bp(regs, 0, 0, "TGEI");
 946
 947			MIPS_R2_STATS(traps);
 948
 949			break;
 950		case tgeiu_op:
 951			if (regs->regs[rs] >= MIPSInst_UIMM(inst))
 952				do_trap_or_bp(regs, 0, 0, "TGEIU");
 953
 954			MIPS_R2_STATS(traps);
 955
 956			break;
 957		case tlti_op:
 958			if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
 959				do_trap_or_bp(regs, 0, 0, "TLTI");
 960
 961			MIPS_R2_STATS(traps);
 962
 963			break;
 964		case tltiu_op:
 965			if (regs->regs[rs] < MIPSInst_UIMM(inst))
 966				do_trap_or_bp(regs, 0, 0, "TLTIU");
 967
 968			MIPS_R2_STATS(traps);
 969
 970			break;
 971		case teqi_op:
 972			if (regs->regs[rs] == MIPSInst_SIMM(inst))
 973				do_trap_or_bp(regs, 0, 0, "TEQI");
 974
 975			MIPS_R2_STATS(traps);
 976
 977			break;
 978		case tnei_op:
 979			if (regs->regs[rs] != MIPSInst_SIMM(inst))
 980				do_trap_or_bp(regs, 0, 0, "TNEI");
 981
 982			MIPS_R2_STATS(traps);
 983
 984			break;
 985		case bltzl_op:
 986		case bgezl_op:
 987		case bltzall_op:
 988		case bgezall_op:
 989			if (delay_slot(regs)) {
 990				err = SIGILL;
 991				break;
 992			}
 993			regs->regs[31] = r31;
 994			regs->cp0_epc = epc;
 995			err = __compute_return_epc(regs);
 996			if (err < 0)
 997				return SIGEMT;
 998			if (err != BRANCH_LIKELY_TAKEN)
 999				break;
1000			cpc = regs->cp0_epc;
1001			nepc = epc + 4;
1002			err = __get_user(nir, (u32 __user *)nepc);
1003			if (err) {
1004				err = SIGSEGV;
1005				break;
1006			}
1007			/*
1008			 * This will probably be optimized away when
1009			 * CONFIG_DEBUG_FS is not enabled
1010			 */
1011			switch (rt) {
1012			case bltzl_op:
1013				MIPS_R2BR_STATS(bltzl);
1014				break;
1015			case bgezl_op:
1016				MIPS_R2BR_STATS(bgezl);
1017				break;
1018			case bltzall_op:
1019				MIPS_R2BR_STATS(bltzall);
1020				break;
1021			case bgezall_op:
1022				MIPS_R2BR_STATS(bgezall);
1023				break;
1024			}
1025
1026			switch (MIPSInst_OPCODE(nir)) {
1027			case cop1_op:
1028			case cop1x_op:
1029			case lwc1_op:
1030			case swc1_op:
1031				regs->cp0_cause |= CAUSEF_BD;
1032				goto fpu_emul;
1033			}
1034			if (nir) {
1035				err = mipsr6_emul(regs, nir);
1036				if (err > 0) {
1037					err = mips_dsemul(regs, nir, epc, cpc);
1038					if (err == SIGILL)
1039						err = SIGEMT;
1040					MIPS_R2_STATS(dsemul);
1041				}
1042			}
1043			break;
1044		case bltzal_op:
1045		case bgezal_op:
1046			if (delay_slot(regs)) {
1047				err = SIGILL;
1048				break;
1049			}
1050			regs->regs[31] = r31;
1051			regs->cp0_epc = epc;
1052			err = __compute_return_epc(regs);
1053			if (err < 0)
1054				return SIGEMT;
1055			cpc = regs->cp0_epc;
1056			nepc = epc + 4;
1057			err = __get_user(nir, (u32 __user *)nepc);
1058			if (err) {
1059				err = SIGSEGV;
1060				break;
1061			}
1062			/*
1063			 * This will probably be optimized away when
1064			 * CONFIG_DEBUG_FS is not enabled
1065			 */
1066			switch (rt) {
1067			case bltzal_op:
1068				MIPS_R2BR_STATS(bltzal);
1069				break;
1070			case bgezal_op:
1071				MIPS_R2BR_STATS(bgezal);
1072				break;
1073			}
1074
1075			switch (MIPSInst_OPCODE(nir)) {
1076			case cop1_op:
1077			case cop1x_op:
1078			case lwc1_op:
1079			case swc1_op:
1080				regs->cp0_cause |= CAUSEF_BD;
1081				goto fpu_emul;
1082			}
1083			if (nir) {
1084				err = mipsr6_emul(regs, nir);
1085				if (err > 0) {
1086					err = mips_dsemul(regs, nir, epc, cpc);
1087					if (err == SIGILL)
1088						err = SIGEMT;
1089					MIPS_R2_STATS(dsemul);
1090				}
1091			}
1092			break;
1093		default:
1094			regs->regs[31] = r31;
1095			regs->cp0_epc = epc;
1096			err = SIGILL;
1097			break;
1098		}
1099		break;
1100
1101	case blezl_op:
1102	case bgtzl_op:
1103		/*
1104		 * For BLEZL and BGTZL, rt field must be set to 0. If this
1105		 * is not the case, this may be an encoding of a MIPS R6
1106		 * instruction, so return to CPU execution if this occurs
1107		 */
1108		if (MIPSInst_RT(inst)) {
1109			err = SIGILL;
1110			break;
1111		}
1112		fallthrough;
1113	case beql_op:
1114	case bnel_op:
1115		if (delay_slot(regs)) {
1116			err = SIGILL;
1117			break;
1118		}
1119		regs->regs[31] = r31;
1120		regs->cp0_epc = epc;
1121		err = __compute_return_epc(regs);
1122		if (err < 0)
1123			return SIGEMT;
1124		if (err != BRANCH_LIKELY_TAKEN)
1125			break;
1126		cpc = regs->cp0_epc;
1127		nepc = epc + 4;
1128		err = __get_user(nir, (u32 __user *)nepc);
1129		if (err) {
1130			err = SIGSEGV;
1131			break;
1132		}
1133		/*
1134		 * This will probably be optimized away when
1135		 * CONFIG_DEBUG_FS is not enabled
1136		 */
1137		switch (MIPSInst_OPCODE(inst)) {
1138		case beql_op:
1139			MIPS_R2BR_STATS(beql);
1140			break;
1141		case bnel_op:
1142			MIPS_R2BR_STATS(bnel);
1143			break;
1144		case blezl_op:
1145			MIPS_R2BR_STATS(blezl);
1146			break;
1147		case bgtzl_op:
1148			MIPS_R2BR_STATS(bgtzl);
1149			break;
1150		}
1151
1152		switch (MIPSInst_OPCODE(nir)) {
1153		case cop1_op:
1154		case cop1x_op:
1155		case lwc1_op:
1156		case swc1_op:
1157			regs->cp0_cause |= CAUSEF_BD;
1158			goto fpu_emul;
1159		}
1160		if (nir) {
1161			err = mipsr6_emul(regs, nir);
1162			if (err > 0) {
1163				err = mips_dsemul(regs, nir, epc, cpc);
1164				if (err == SIGILL)
1165					err = SIGEMT;
1166				MIPS_R2_STATS(dsemul);
1167			}
1168		}
1169		break;
1170	case lwc1_op:
1171	case swc1_op:
1172	case cop1_op:
1173	case cop1x_op:
1174fpu_emul:
1175		regs->regs[31] = r31;
1176		regs->cp0_epc = epc;
1177
1178		err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1179					       &fault_addr);
1180
1181		/*
1182		 * We can't allow the emulated instruction to leave any
1183		 * enabled Cause bits set in $fcr31.
1184		 */
1185		*fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
1186		current->thread.fpu.fcr31 &= ~res;
1187
1188		/*
1189		 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1190		 * if FPU is owned and effectively cancels user level LL/SC.
1191		 * So, it could be logical to don't restore FPU ownership here.
1192		 * But the sequence of multiple FPU instructions is much much
1193		 * more often than LL-FPU-SC and I prefer loop here until
1194		 * next scheduler cycle cancels FPU ownership
1195		 */
1196		own_fpu(1);	/* Restore FPU state. */
1197
1198		if (err)
1199			current->thread.cp0_baduaddr = (unsigned long)fault_addr;
1200
1201		MIPS_R2_STATS(fpus);
1202
1203		break;
1204
1205	case lwl_op:
1206		rt = regs->regs[MIPSInst_RT(inst)];
1207		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1208		if (!access_ok((void __user *)vaddr, 4)) {
1209			current->thread.cp0_baduaddr = vaddr;
1210			err = SIGSEGV;
1211			break;
1212		}
1213		__asm__ __volatile__(
1214			"	.set	push\n"
1215			"	.set	reorder\n"
1216#ifdef CONFIG_CPU_LITTLE_ENDIAN
1217			"1:"	LB	"%1, 0(%2)\n"
1218				INS	"%0, %1, 24, 8\n"
1219			"	andi	%1, %2, 0x3\n"
1220			"	beq	$0, %1, 9f\n"
1221				ADDIU	"%2, %2, -1\n"
1222			"2:"	LB	"%1, 0(%2)\n"
1223				INS	"%0, %1, 16, 8\n"
1224			"	andi	%1, %2, 0x3\n"
1225			"	beq	$0, %1, 9f\n"
1226				ADDIU	"%2, %2, -1\n"
1227			"3:"	LB	"%1, 0(%2)\n"
1228				INS	"%0, %1, 8, 8\n"
1229			"	andi	%1, %2, 0x3\n"
1230			"	beq	$0, %1, 9f\n"
1231				ADDIU	"%2, %2, -1\n"
1232			"4:"	LB	"%1, 0(%2)\n"
1233				INS	"%0, %1, 0, 8\n"
1234#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1235			"1:"	LB	"%1, 0(%2)\n"
1236				INS	"%0, %1, 24, 8\n"
1237				ADDIU	"%2, %2, 1\n"
1238			"	andi	%1, %2, 0x3\n"
1239			"	beq	$0, %1, 9f\n"
1240			"2:"	LB	"%1, 0(%2)\n"
1241				INS	"%0, %1, 16, 8\n"
1242				ADDIU	"%2, %2, 1\n"
1243			"	andi	%1, %2, 0x3\n"
1244			"	beq	$0, %1, 9f\n"
1245			"3:"	LB	"%1, 0(%2)\n"
1246				INS	"%0, %1, 8, 8\n"
1247				ADDIU	"%2, %2, 1\n"
1248			"	andi	%1, %2, 0x3\n"
1249			"	beq	$0, %1, 9f\n"
1250			"4:"	LB	"%1, 0(%2)\n"
1251				INS	"%0, %1, 0, 8\n"
1252#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1253			"9:	sll	%0, %0, 0\n"
1254			"10:\n"
1255			"	.insn\n"
1256			"	.section	.fixup,\"ax\"\n"
1257			"8:	li	%3,%4\n"
1258			"	j	10b\n"
1259			"	.previous\n"
1260			"	.section	__ex_table,\"a\"\n"
1261			STR(PTR_WD) " 1b,8b\n"
1262			STR(PTR_WD) " 2b,8b\n"
1263			STR(PTR_WD) " 3b,8b\n"
1264			STR(PTR_WD) " 4b,8b\n"
1265			"	.previous\n"
1266			"	.set	pop\n"
1267			: "+&r"(rt), "=&r"(rs),
1268			  "+&r"(vaddr), "+&r"(err)
1269			: "i"(SIGSEGV));
1270
1271		if (MIPSInst_RT(inst) && !err)
1272			regs->regs[MIPSInst_RT(inst)] = rt;
1273
1274		MIPS_R2_STATS(loads);
1275
1276		break;
1277
1278	case lwr_op:
1279		rt = regs->regs[MIPSInst_RT(inst)];
1280		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1281		if (!access_ok((void __user *)vaddr, 4)) {
1282			current->thread.cp0_baduaddr = vaddr;
1283			err = SIGSEGV;
1284			break;
1285		}
1286		__asm__ __volatile__(
1287			"       .set	push\n"
1288			"       .set	reorder\n"
1289#ifdef CONFIG_CPU_LITTLE_ENDIAN
1290			"1:"    LB	"%1, 0(%2)\n"
1291				INS	"%0, %1, 0, 8\n"
1292				ADDIU	"%2, %2, 1\n"
1293			"       andi	%1, %2, 0x3\n"
1294			"       beq	$0, %1, 9f\n"
1295			"2:"    LB	"%1, 0(%2)\n"
1296				INS	"%0, %1, 8, 8\n"
1297				ADDIU	"%2, %2, 1\n"
1298			"       andi	%1, %2, 0x3\n"
1299			"       beq	$0, %1, 9f\n"
1300			"3:"    LB	"%1, 0(%2)\n"
1301				INS	"%0, %1, 16, 8\n"
1302				ADDIU	"%2, %2, 1\n"
1303			"       andi	%1, %2, 0x3\n"
1304			"       beq	$0, %1, 9f\n"
1305			"4:"    LB	"%1, 0(%2)\n"
1306				INS	"%0, %1, 24, 8\n"
1307			"       sll	%0, %0, 0\n"
1308#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1309			"1:"    LB	"%1, 0(%2)\n"
1310				INS	"%0, %1, 0, 8\n"
1311			"       andi	%1, %2, 0x3\n"
1312			"       beq	$0, %1, 9f\n"
1313				ADDIU	"%2, %2, -1\n"
1314			"2:"    LB	"%1, 0(%2)\n"
1315				INS	"%0, %1, 8, 8\n"
1316			"       andi	%1, %2, 0x3\n"
1317			"       beq	$0, %1, 9f\n"
1318				ADDIU	"%2, %2, -1\n"
1319			"3:"    LB	"%1, 0(%2)\n"
1320				INS	"%0, %1, 16, 8\n"
1321			"       andi	%1, %2, 0x3\n"
1322			"       beq	$0, %1, 9f\n"
1323				ADDIU	"%2, %2, -1\n"
1324			"4:"    LB	"%1, 0(%2)\n"
1325				INS	"%0, %1, 24, 8\n"
1326			"       sll	%0, %0, 0\n"
1327#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1328			"9:\n"
1329			"10:\n"
1330			"	.insn\n"
1331			"	.section	.fixup,\"ax\"\n"
1332			"8:	li	%3,%4\n"
1333			"	j	10b\n"
1334			"       .previous\n"
1335			"	.section	__ex_table,\"a\"\n"
1336			STR(PTR_WD) " 1b,8b\n"
1337			STR(PTR_WD) " 2b,8b\n"
1338			STR(PTR_WD) " 3b,8b\n"
1339			STR(PTR_WD) " 4b,8b\n"
1340			"	.previous\n"
1341			"	.set	pop\n"
1342			: "+&r"(rt), "=&r"(rs),
1343			  "+&r"(vaddr), "+&r"(err)
1344			: "i"(SIGSEGV));
1345		if (MIPSInst_RT(inst) && !err)
1346			regs->regs[MIPSInst_RT(inst)] = rt;
1347
1348		MIPS_R2_STATS(loads);
1349
1350		break;
1351
1352	case swl_op:
1353		rt = regs->regs[MIPSInst_RT(inst)];
1354		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1355		if (!access_ok((void __user *)vaddr, 4)) {
1356			current->thread.cp0_baduaddr = vaddr;
1357			err = SIGSEGV;
1358			break;
1359		}
1360		__asm__ __volatile__(
1361			"	.set	push\n"
1362			"	.set	reorder\n"
1363#ifdef CONFIG_CPU_LITTLE_ENDIAN
1364				EXT	"%1, %0, 24, 8\n"
1365			"1:"	SB	"%1, 0(%2)\n"
1366			"	andi	%1, %2, 0x3\n"
1367			"	beq	$0, %1, 9f\n"
1368				ADDIU	"%2, %2, -1\n"
1369				EXT	"%1, %0, 16, 8\n"
1370			"2:"	SB	"%1, 0(%2)\n"
1371			"	andi	%1, %2, 0x3\n"
1372			"	beq	$0, %1, 9f\n"
1373				ADDIU	"%2, %2, -1\n"
1374				EXT	"%1, %0, 8, 8\n"
1375			"3:"	SB	"%1, 0(%2)\n"
1376			"	andi	%1, %2, 0x3\n"
1377			"	beq	$0, %1, 9f\n"
1378				ADDIU	"%2, %2, -1\n"
1379				EXT	"%1, %0, 0, 8\n"
1380			"4:"	SB	"%1, 0(%2)\n"
1381#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1382				EXT	"%1, %0, 24, 8\n"
1383			"1:"	SB	"%1, 0(%2)\n"
1384				ADDIU	"%2, %2, 1\n"
1385			"	andi	%1, %2, 0x3\n"
1386			"	beq	$0, %1, 9f\n"
1387				EXT	"%1, %0, 16, 8\n"
1388			"2:"	SB	"%1, 0(%2)\n"
1389				ADDIU	"%2, %2, 1\n"
1390			"	andi	%1, %2, 0x3\n"
1391			"	beq	$0, %1, 9f\n"
1392				EXT	"%1, %0, 8, 8\n"
1393			"3:"	SB	"%1, 0(%2)\n"
1394				ADDIU	"%2, %2, 1\n"
1395			"	andi	%1, %2, 0x3\n"
1396			"	beq	$0, %1, 9f\n"
1397				EXT	"%1, %0, 0, 8\n"
1398			"4:"	SB	"%1, 0(%2)\n"
1399#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1400			"9:\n"
1401			"	.insn\n"
1402			"       .section        .fixup,\"ax\"\n"
1403			"8:	li	%3,%4\n"
1404			"	j	9b\n"
1405			"	.previous\n"
1406			"	.section        __ex_table,\"a\"\n"
1407			STR(PTR_WD) " 1b,8b\n"
1408			STR(PTR_WD) " 2b,8b\n"
1409			STR(PTR_WD) " 3b,8b\n"
1410			STR(PTR_WD) " 4b,8b\n"
1411			"	.previous\n"
1412			"	.set	pop\n"
1413			: "+&r"(rt), "=&r"(rs),
1414			  "+&r"(vaddr), "+&r"(err)
1415			: "i"(SIGSEGV)
1416			: "memory");
1417
1418		MIPS_R2_STATS(stores);
1419
1420		break;
1421
1422	case swr_op:
1423		rt = regs->regs[MIPSInst_RT(inst)];
1424		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1425		if (!access_ok((void __user *)vaddr, 4)) {
1426			current->thread.cp0_baduaddr = vaddr;
1427			err = SIGSEGV;
1428			break;
1429		}
1430		__asm__ __volatile__(
1431			"	.set	push\n"
1432			"	.set	reorder\n"
1433#ifdef CONFIG_CPU_LITTLE_ENDIAN
1434				EXT	"%1, %0, 0, 8\n"
1435			"1:"	SB	"%1, 0(%2)\n"
1436				ADDIU	"%2, %2, 1\n"
1437			"	andi	%1, %2, 0x3\n"
1438			"	beq	$0, %1, 9f\n"
1439				EXT	"%1, %0, 8, 8\n"
1440			"2:"	SB	"%1, 0(%2)\n"
1441				ADDIU	"%2, %2, 1\n"
1442			"	andi	%1, %2, 0x3\n"
1443			"	beq	$0, %1, 9f\n"
1444				EXT	"%1, %0, 16, 8\n"
1445			"3:"	SB	"%1, 0(%2)\n"
1446				ADDIU	"%2, %2, 1\n"
1447			"	andi	%1, %2, 0x3\n"
1448			"	beq	$0, %1, 9f\n"
1449				EXT	"%1, %0, 24, 8\n"
1450			"4:"	SB	"%1, 0(%2)\n"
1451#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1452				EXT	"%1, %0, 0, 8\n"
1453			"1:"	SB	"%1, 0(%2)\n"
1454			"	andi	%1, %2, 0x3\n"
1455			"	beq	$0, %1, 9f\n"
1456				ADDIU	"%2, %2, -1\n"
1457				EXT	"%1, %0, 8, 8\n"
1458			"2:"	SB	"%1, 0(%2)\n"
1459			"	andi	%1, %2, 0x3\n"
1460			"	beq	$0, %1, 9f\n"
1461				ADDIU	"%2, %2, -1\n"
1462				EXT	"%1, %0, 16, 8\n"
1463			"3:"	SB	"%1, 0(%2)\n"
1464			"	andi	%1, %2, 0x3\n"
1465			"	beq	$0, %1, 9f\n"
1466				ADDIU	"%2, %2, -1\n"
1467				EXT	"%1, %0, 24, 8\n"
1468			"4:"	SB	"%1, 0(%2)\n"
1469#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1470			"9:\n"
1471			"	.insn\n"
1472			"	.section        .fixup,\"ax\"\n"
1473			"8:	li	%3,%4\n"
1474			"	j	9b\n"
1475			"	.previous\n"
1476			"	.section        __ex_table,\"a\"\n"
1477			STR(PTR_WD) " 1b,8b\n"
1478			STR(PTR_WD) " 2b,8b\n"
1479			STR(PTR_WD) " 3b,8b\n"
1480			STR(PTR_WD) " 4b,8b\n"
1481			"	.previous\n"
1482			"	.set	pop\n"
1483			: "+&r"(rt), "=&r"(rs),
1484			  "+&r"(vaddr), "+&r"(err)
1485			: "i"(SIGSEGV)
1486			: "memory");
1487
1488		MIPS_R2_STATS(stores);
1489
1490		break;
1491
1492	case ldl_op:
1493		if (IS_ENABLED(CONFIG_32BIT)) {
1494		    err = SIGILL;
1495		    break;
1496		}
1497
1498		rt = regs->regs[MIPSInst_RT(inst)];
1499		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1500		if (!access_ok((void __user *)vaddr, 8)) {
1501			current->thread.cp0_baduaddr = vaddr;
1502			err = SIGSEGV;
1503			break;
1504		}
1505		__asm__ __volatile__(
1506			"	.set    push\n"
1507			"	.set    reorder\n"
1508#ifdef CONFIG_CPU_LITTLE_ENDIAN
1509			"1:	lb	%1, 0(%2)\n"
1510			"	dinsu	%0, %1, 56, 8\n"
1511			"	andi	%1, %2, 0x7\n"
1512			"	beq	$0, %1, 9f\n"
1513			"	daddiu	%2, %2, -1\n"
1514			"2:	lb	%1, 0(%2)\n"
1515			"	dinsu	%0, %1, 48, 8\n"
1516			"	andi	%1, %2, 0x7\n"
1517			"	beq	$0, %1, 9f\n"
1518			"	daddiu	%2, %2, -1\n"
1519			"3:	lb	%1, 0(%2)\n"
1520			"	dinsu	%0, %1, 40, 8\n"
1521			"	andi	%1, %2, 0x7\n"
1522			"	beq	$0, %1, 9f\n"
1523			"	daddiu	%2, %2, -1\n"
1524			"4:	lb	%1, 0(%2)\n"
1525			"	dinsu	%0, %1, 32, 8\n"
1526			"	andi	%1, %2, 0x7\n"
1527			"	beq	$0, %1, 9f\n"
1528			"	daddiu	%2, %2, -1\n"
1529			"5:	lb	%1, 0(%2)\n"
1530			"	dins	%0, %1, 24, 8\n"
1531			"	andi	%1, %2, 0x7\n"
1532			"	beq	$0, %1, 9f\n"
1533			"	daddiu	%2, %2, -1\n"
1534			"6:	lb	%1, 0(%2)\n"
1535			"	dins	%0, %1, 16, 8\n"
1536			"	andi	%1, %2, 0x7\n"
1537			"	beq	$0, %1, 9f\n"
1538			"	daddiu	%2, %2, -1\n"
1539			"7:	lb	%1, 0(%2)\n"
1540			"	dins	%0, %1, 8, 8\n"
1541			"	andi	%1, %2, 0x7\n"
1542			"	beq	$0, %1, 9f\n"
1543			"	daddiu	%2, %2, -1\n"
1544			"0:	lb	%1, 0(%2)\n"
1545			"	dins	%0, %1, 0, 8\n"
1546#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1547			"1:	lb	%1, 0(%2)\n"
1548			"	dinsu	%0, %1, 56, 8\n"
1549			"	daddiu	%2, %2, 1\n"
1550			"	andi	%1, %2, 0x7\n"
1551			"	beq	$0, %1, 9f\n"
1552			"2:	lb	%1, 0(%2)\n"
1553			"	dinsu	%0, %1, 48, 8\n"
1554			"	daddiu	%2, %2, 1\n"
1555			"	andi	%1, %2, 0x7\n"
1556			"	beq	$0, %1, 9f\n"
1557			"3:	lb	%1, 0(%2)\n"
1558			"	dinsu	%0, %1, 40, 8\n"
1559			"	daddiu  %2, %2, 1\n"
1560			"	andi	%1, %2, 0x7\n"
1561			"	beq	$0, %1, 9f\n"
1562			"4:	lb	%1, 0(%2)\n"
1563			"	dinsu	%0, %1, 32, 8\n"
1564			"	daddiu	%2, %2, 1\n"
1565			"	andi	%1, %2, 0x7\n"
1566			"	beq	$0, %1, 9f\n"
1567			"5:	lb	%1, 0(%2)\n"
1568			"	dins	%0, %1, 24, 8\n"
1569			"	daddiu	%2, %2, 1\n"
1570			"	andi	%1, %2, 0x7\n"
1571			"	beq	$0, %1, 9f\n"
1572			"6:	lb	%1, 0(%2)\n"
1573			"	dins	%0, %1, 16, 8\n"
1574			"	daddiu	%2, %2, 1\n"
1575			"	andi	%1, %2, 0x7\n"
1576			"	beq	$0, %1, 9f\n"
1577			"7:	lb	%1, 0(%2)\n"
1578			"	dins	%0, %1, 8, 8\n"
1579			"	daddiu	%2, %2, 1\n"
1580			"	andi	%1, %2, 0x7\n"
1581			"	beq	$0, %1, 9f\n"
1582			"0:	lb	%1, 0(%2)\n"
1583			"	dins	%0, %1, 0, 8\n"
1584#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1585			"9:\n"
1586			"	.insn\n"
1587			"	.section        .fixup,\"ax\"\n"
1588			"8:	li	%3,%4\n"
1589			"	j	9b\n"
1590			"	.previous\n"
1591			"	.section        __ex_table,\"a\"\n"
1592			STR(PTR_WD) " 1b,8b\n"
1593			STR(PTR_WD) " 2b,8b\n"
1594			STR(PTR_WD) " 3b,8b\n"
1595			STR(PTR_WD) " 4b,8b\n"
1596			STR(PTR_WD) " 5b,8b\n"
1597			STR(PTR_WD) " 6b,8b\n"
1598			STR(PTR_WD) " 7b,8b\n"
1599			STR(PTR_WD) " 0b,8b\n"
1600			"	.previous\n"
1601			"	.set	pop\n"
1602			: "+&r"(rt), "=&r"(rs),
1603			  "+&r"(vaddr), "+&r"(err)
1604			: "i"(SIGSEGV));
1605		if (MIPSInst_RT(inst) && !err)
1606			regs->regs[MIPSInst_RT(inst)] = rt;
1607
1608		MIPS_R2_STATS(loads);
1609		break;
1610
1611	case ldr_op:
1612		if (IS_ENABLED(CONFIG_32BIT)) {
1613		    err = SIGILL;
1614		    break;
1615		}
1616
1617		rt = regs->regs[MIPSInst_RT(inst)];
1618		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1619		if (!access_ok((void __user *)vaddr, 8)) {
1620			current->thread.cp0_baduaddr = vaddr;
1621			err = SIGSEGV;
1622			break;
1623		}
1624		__asm__ __volatile__(
1625			"	.set    push\n"
1626			"	.set    reorder\n"
1627#ifdef CONFIG_CPU_LITTLE_ENDIAN
1628			"1:	lb      %1, 0(%2)\n"
1629			"	dins   %0, %1, 0, 8\n"
1630			"	daddiu  %2, %2, 1\n"
1631			"	andi    %1, %2, 0x7\n"
1632			"	beq     $0, %1, 9f\n"
1633			"2:	lb      %1, 0(%2)\n"
1634			"	dins   %0, %1, 8, 8\n"
1635			"	daddiu  %2, %2, 1\n"
1636			"	andi    %1, %2, 0x7\n"
1637			"	beq     $0, %1, 9f\n"
1638			"3:	lb      %1, 0(%2)\n"
1639			"	dins   %0, %1, 16, 8\n"
1640			"	daddiu  %2, %2, 1\n"
1641			"	andi    %1, %2, 0x7\n"
1642			"	beq     $0, %1, 9f\n"
1643			"4:	lb      %1, 0(%2)\n"
1644			"	dins   %0, %1, 24, 8\n"
1645			"	daddiu  %2, %2, 1\n"
1646			"	andi    %1, %2, 0x7\n"
1647			"	beq     $0, %1, 9f\n"
1648			"5:	lb      %1, 0(%2)\n"
1649			"	dinsu    %0, %1, 32, 8\n"
1650			"	daddiu  %2, %2, 1\n"
1651			"	andi    %1, %2, 0x7\n"
1652			"	beq     $0, %1, 9f\n"
1653			"6:	lb      %1, 0(%2)\n"
1654			"	dinsu    %0, %1, 40, 8\n"
1655			"	daddiu  %2, %2, 1\n"
1656			"	andi    %1, %2, 0x7\n"
1657			"	beq     $0, %1, 9f\n"
1658			"7:	lb      %1, 0(%2)\n"
1659			"	dinsu    %0, %1, 48, 8\n"
1660			"	daddiu  %2, %2, 1\n"
1661			"	andi    %1, %2, 0x7\n"
1662			"	beq     $0, %1, 9f\n"
1663			"0:	lb      %1, 0(%2)\n"
1664			"	dinsu    %0, %1, 56, 8\n"
1665#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1666			"1:	lb      %1, 0(%2)\n"
1667			"	dins   %0, %1, 0, 8\n"
1668			"	andi    %1, %2, 0x7\n"
1669			"	beq     $0, %1, 9f\n"
1670			"	daddiu  %2, %2, -1\n"
1671			"2:	lb      %1, 0(%2)\n"
1672			"	dins   %0, %1, 8, 8\n"
1673			"	andi    %1, %2, 0x7\n"
1674			"	beq     $0, %1, 9f\n"
1675			"	daddiu  %2, %2, -1\n"
1676			"3:	lb      %1, 0(%2)\n"
1677			"	dins   %0, %1, 16, 8\n"
1678			"	andi    %1, %2, 0x7\n"
1679			"	beq     $0, %1, 9f\n"
1680			"	daddiu  %2, %2, -1\n"
1681			"4:	lb      %1, 0(%2)\n"
1682			"	dins   %0, %1, 24, 8\n"
1683			"	andi    %1, %2, 0x7\n"
1684			"	beq     $0, %1, 9f\n"
1685			"	daddiu  %2, %2, -1\n"
1686			"5:	lb      %1, 0(%2)\n"
1687			"	dinsu    %0, %1, 32, 8\n"
1688			"	andi    %1, %2, 0x7\n"
1689			"	beq     $0, %1, 9f\n"
1690			"	daddiu  %2, %2, -1\n"
1691			"6:	lb      %1, 0(%2)\n"
1692			"	dinsu    %0, %1, 40, 8\n"
1693			"	andi    %1, %2, 0x7\n"
1694			"	beq     $0, %1, 9f\n"
1695			"	daddiu  %2, %2, -1\n"
1696			"7:	lb      %1, 0(%2)\n"
1697			"	dinsu    %0, %1, 48, 8\n"
1698			"	andi    %1, %2, 0x7\n"
1699			"	beq     $0, %1, 9f\n"
1700			"	daddiu  %2, %2, -1\n"
1701			"0:	lb      %1, 0(%2)\n"
1702			"	dinsu    %0, %1, 56, 8\n"
1703#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1704			"9:\n"
1705			"	.insn\n"
1706			"	.section        .fixup,\"ax\"\n"
1707			"8:	li     %3,%4\n"
1708			"	j      9b\n"
1709			"	.previous\n"
1710			"	.section        __ex_table,\"a\"\n"
1711			STR(PTR_WD) " 1b,8b\n"
1712			STR(PTR_WD) " 2b,8b\n"
1713			STR(PTR_WD) " 3b,8b\n"
1714			STR(PTR_WD) " 4b,8b\n"
1715			STR(PTR_WD) " 5b,8b\n"
1716			STR(PTR_WD) " 6b,8b\n"
1717			STR(PTR_WD) " 7b,8b\n"
1718			STR(PTR_WD) " 0b,8b\n"
1719			"	.previous\n"
1720			"	.set    pop\n"
1721			: "+&r"(rt), "=&r"(rs),
1722			  "+&r"(vaddr), "+&r"(err)
1723			: "i"(SIGSEGV));
1724		if (MIPSInst_RT(inst) && !err)
1725			regs->regs[MIPSInst_RT(inst)] = rt;
1726
1727		MIPS_R2_STATS(loads);
1728		break;
1729
1730	case sdl_op:
1731		if (IS_ENABLED(CONFIG_32BIT)) {
1732		    err = SIGILL;
1733		    break;
1734		}
1735
1736		rt = regs->regs[MIPSInst_RT(inst)];
1737		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1738		if (!access_ok((void __user *)vaddr, 8)) {
1739			current->thread.cp0_baduaddr = vaddr;
1740			err = SIGSEGV;
1741			break;
1742		}
1743		__asm__ __volatile__(
1744			"	.set	push\n"
1745			"	.set	reorder\n"
1746#ifdef CONFIG_CPU_LITTLE_ENDIAN
1747			"	dextu	%1, %0, 56, 8\n"
1748			"1:	sb	%1, 0(%2)\n"
1749			"	andi	%1, %2, 0x7\n"
1750			"	beq	$0, %1, 9f\n"
1751			"	daddiu	%2, %2, -1\n"
1752			"	dextu	%1, %0, 48, 8\n"
1753			"2:	sb	%1, 0(%2)\n"
1754			"	andi	%1, %2, 0x7\n"
1755			"	beq	$0, %1, 9f\n"
1756			"	daddiu	%2, %2, -1\n"
1757			"	dextu	%1, %0, 40, 8\n"
1758			"3:	sb	%1, 0(%2)\n"
1759			"	andi	%1, %2, 0x7\n"
1760			"	beq	$0, %1, 9f\n"
1761			"	daddiu	%2, %2, -1\n"
1762			"	dextu	%1, %0, 32, 8\n"
1763			"4:	sb	%1, 0(%2)\n"
1764			"	andi	%1, %2, 0x7\n"
1765			"	beq	$0, %1, 9f\n"
1766			"	daddiu	%2, %2, -1\n"
1767			"	dext	%1, %0, 24, 8\n"
1768			"5:	sb	%1, 0(%2)\n"
1769			"	andi	%1, %2, 0x7\n"
1770			"	beq	$0, %1, 9f\n"
1771			"	daddiu	%2, %2, -1\n"
1772			"	dext	%1, %0, 16, 8\n"
1773			"6:	sb	%1, 0(%2)\n"
1774			"	andi	%1, %2, 0x7\n"
1775			"	beq	$0, %1, 9f\n"
1776			"	daddiu	%2, %2, -1\n"
1777			"	dext	%1, %0, 8, 8\n"
1778			"7:	sb	%1, 0(%2)\n"
1779			"	andi	%1, %2, 0x7\n"
1780			"	beq	$0, %1, 9f\n"
1781			"	daddiu	%2, %2, -1\n"
1782			"	dext	%1, %0, 0, 8\n"
1783			"0:	sb	%1, 0(%2)\n"
1784#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1785			"	dextu	%1, %0, 56, 8\n"
1786			"1:	sb	%1, 0(%2)\n"
1787			"	daddiu	%2, %2, 1\n"
1788			"	andi	%1, %2, 0x7\n"
1789			"	beq	$0, %1, 9f\n"
1790			"	dextu	%1, %0, 48, 8\n"
1791			"2:	sb	%1, 0(%2)\n"
1792			"	daddiu	%2, %2, 1\n"
1793			"	andi	%1, %2, 0x7\n"
1794			"	beq	$0, %1, 9f\n"
1795			"	dextu	%1, %0, 40, 8\n"
1796			"3:	sb	%1, 0(%2)\n"
1797			"	daddiu	%2, %2, 1\n"
1798			"	andi	%1, %2, 0x7\n"
1799			"	beq	$0, %1, 9f\n"
1800			"	dextu	%1, %0, 32, 8\n"
1801			"4:	sb	%1, 0(%2)\n"
1802			"	daddiu	%2, %2, 1\n"
1803			"	andi	%1, %2, 0x7\n"
1804			"	beq	$0, %1, 9f\n"
1805			"	dext	%1, %0, 24, 8\n"
1806			"5:	sb	%1, 0(%2)\n"
1807			"	daddiu	%2, %2, 1\n"
1808			"	andi	%1, %2, 0x7\n"
1809			"	beq	$0, %1, 9f\n"
1810			"	dext	%1, %0, 16, 8\n"
1811			"6:	sb	%1, 0(%2)\n"
1812			"	daddiu	%2, %2, 1\n"
1813			"	andi	%1, %2, 0x7\n"
1814			"	beq	$0, %1, 9f\n"
1815			"	dext	%1, %0, 8, 8\n"
1816			"7:	sb	%1, 0(%2)\n"
1817			"	daddiu	%2, %2, 1\n"
1818			"	andi	%1, %2, 0x7\n"
1819			"	beq	$0, %1, 9f\n"
1820			"	dext	%1, %0, 0, 8\n"
1821			"0:	sb	%1, 0(%2)\n"
1822#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1823			"9:\n"
1824			"	.insn\n"
1825			"	.section        .fixup,\"ax\"\n"
1826			"8:	li	%3,%4\n"
1827			"	j	9b\n"
1828			"	.previous\n"
1829			"	.section        __ex_table,\"a\"\n"
1830			STR(PTR_WD) " 1b,8b\n"
1831			STR(PTR_WD) " 2b,8b\n"
1832			STR(PTR_WD) " 3b,8b\n"
1833			STR(PTR_WD) " 4b,8b\n"
1834			STR(PTR_WD) " 5b,8b\n"
1835			STR(PTR_WD) " 6b,8b\n"
1836			STR(PTR_WD) " 7b,8b\n"
1837			STR(PTR_WD) " 0b,8b\n"
1838			"	.previous\n"
1839			"	.set	pop\n"
1840			: "+&r"(rt), "=&r"(rs),
1841			  "+&r"(vaddr), "+&r"(err)
1842			: "i"(SIGSEGV)
1843			: "memory");
1844
1845		MIPS_R2_STATS(stores);
1846		break;
1847
1848	case sdr_op:
1849		if (IS_ENABLED(CONFIG_32BIT)) {
1850		    err = SIGILL;
1851		    break;
1852		}
1853
1854		rt = regs->regs[MIPSInst_RT(inst)];
1855		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1856		if (!access_ok((void __user *)vaddr, 8)) {
1857			current->thread.cp0_baduaddr = vaddr;
1858			err = SIGSEGV;
1859			break;
1860		}
1861		__asm__ __volatile__(
1862			"       .set	push\n"
1863			"       .set	reorder\n"
1864#ifdef CONFIG_CPU_LITTLE_ENDIAN
1865			"       dext	%1, %0, 0, 8\n"
1866			"1:     sb	%1, 0(%2)\n"
1867			"       daddiu	%2, %2, 1\n"
1868			"       andi	%1, %2, 0x7\n"
1869			"       beq	$0, %1, 9f\n"
1870			"       dext	%1, %0, 8, 8\n"
1871			"2:     sb	%1, 0(%2)\n"
1872			"       daddiu	%2, %2, 1\n"
1873			"       andi	%1, %2, 0x7\n"
1874			"       beq	$0, %1, 9f\n"
1875			"       dext	%1, %0, 16, 8\n"
1876			"3:     sb	%1, 0(%2)\n"
1877			"       daddiu	%2, %2, 1\n"
1878			"       andi	%1, %2, 0x7\n"
1879			"       beq	$0, %1, 9f\n"
1880			"       dext	%1, %0, 24, 8\n"
1881			"4:     sb	%1, 0(%2)\n"
1882			"       daddiu	%2, %2, 1\n"
1883			"       andi	%1, %2, 0x7\n"
1884			"       beq	$0, %1, 9f\n"
1885			"       dextu	%1, %0, 32, 8\n"
1886			"5:     sb	%1, 0(%2)\n"
1887			"       daddiu	%2, %2, 1\n"
1888			"       andi	%1, %2, 0x7\n"
1889			"       beq	$0, %1, 9f\n"
1890			"       dextu	%1, %0, 40, 8\n"
1891			"6:     sb	%1, 0(%2)\n"
1892			"       daddiu	%2, %2, 1\n"
1893			"       andi	%1, %2, 0x7\n"
1894			"       beq	$0, %1, 9f\n"
1895			"       dextu	%1, %0, 48, 8\n"
1896			"7:     sb	%1, 0(%2)\n"
1897			"       daddiu	%2, %2, 1\n"
1898			"       andi	%1, %2, 0x7\n"
1899			"       beq	$0, %1, 9f\n"
1900			"       dextu	%1, %0, 56, 8\n"
1901			"0:     sb	%1, 0(%2)\n"
1902#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1903			"       dext	%1, %0, 0, 8\n"
1904			"1:     sb	%1, 0(%2)\n"
1905			"       andi	%1, %2, 0x7\n"
1906			"       beq	$0, %1, 9f\n"
1907			"       daddiu	%2, %2, -1\n"
1908			"       dext	%1, %0, 8, 8\n"
1909			"2:     sb	%1, 0(%2)\n"
1910			"       andi	%1, %2, 0x7\n"
1911			"       beq	$0, %1, 9f\n"
1912			"       daddiu	%2, %2, -1\n"
1913			"       dext	%1, %0, 16, 8\n"
1914			"3:     sb	%1, 0(%2)\n"
1915			"       andi	%1, %2, 0x7\n"
1916			"       beq	$0, %1, 9f\n"
1917			"       daddiu	%2, %2, -1\n"
1918			"       dext	%1, %0, 24, 8\n"
1919			"4:     sb	%1, 0(%2)\n"
1920			"       andi	%1, %2, 0x7\n"
1921			"       beq	$0, %1, 9f\n"
1922			"       daddiu	%2, %2, -1\n"
1923			"       dextu	%1, %0, 32, 8\n"
1924			"5:     sb	%1, 0(%2)\n"
1925			"       andi	%1, %2, 0x7\n"
1926			"       beq	$0, %1, 9f\n"
1927			"       daddiu	%2, %2, -1\n"
1928			"       dextu	%1, %0, 40, 8\n"
1929			"6:     sb	%1, 0(%2)\n"
1930			"       andi	%1, %2, 0x7\n"
1931			"       beq	$0, %1, 9f\n"
1932			"       daddiu	%2, %2, -1\n"
1933			"       dextu	%1, %0, 48, 8\n"
1934			"7:     sb	%1, 0(%2)\n"
1935			"       andi	%1, %2, 0x7\n"
1936			"       beq	$0, %1, 9f\n"
1937			"       daddiu	%2, %2, -1\n"
1938			"       dextu	%1, %0, 56, 8\n"
1939			"0:     sb	%1, 0(%2)\n"
1940#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1941			"9:\n"
1942			"       .insn\n"
1943			"       .section        .fixup,\"ax\"\n"
1944			"8:     li	%3,%4\n"
1945			"       j	9b\n"
1946			"       .previous\n"
1947			"       .section        __ex_table,\"a\"\n"
1948			STR(PTR_WD) " 1b,8b\n"
1949			STR(PTR_WD) " 2b,8b\n"
1950			STR(PTR_WD) " 3b,8b\n"
1951			STR(PTR_WD) " 4b,8b\n"
1952			STR(PTR_WD) " 5b,8b\n"
1953			STR(PTR_WD) " 6b,8b\n"
1954			STR(PTR_WD) " 7b,8b\n"
1955			STR(PTR_WD) " 0b,8b\n"
1956			"       .previous\n"
1957			"       .set	pop\n"
1958			: "+&r"(rt), "=&r"(rs),
1959			  "+&r"(vaddr), "+&r"(err)
1960			: "i"(SIGSEGV)
1961			: "memory");
1962
1963		MIPS_R2_STATS(stores);
1964
1965		break;
1966	case ll_op:
1967		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1968		if (vaddr & 0x3) {
1969			current->thread.cp0_baduaddr = vaddr;
1970			err = SIGBUS;
1971			break;
1972		}
1973		if (!access_ok((void __user *)vaddr, 4)) {
1974			current->thread.cp0_baduaddr = vaddr;
1975			err = SIGBUS;
1976			break;
1977		}
1978
1979		if (!cpu_has_rw_llb) {
1980			/*
1981			 * An LL/SC block can't be safely emulated without
1982			 * a Config5/LLB availability. So it's probably time to
1983			 * kill our process before things get any worse. This is
1984			 * because Config5/LLB allows us to use ERETNC so that
1985			 * the LLAddr/LLB bit is not cleared when we return from
1986			 * an exception. MIPS R2 LL/SC instructions trap with an
1987			 * RI exception so once we emulate them here, we return
1988			 * back to userland with ERETNC. That preserves the
1989			 * LLAddr/LLB so the subsequent SC instruction will
1990			 * succeed preserving the atomic semantics of the LL/SC
1991			 * block. Without that, there is no safe way to emulate
1992			 * an LL/SC block in MIPSR2 userland.
1993			 */
1994			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1995			err = SIGKILL;
1996			break;
1997		}
1998
1999		__asm__ __volatile__(
2000			"1:\n"
2001			"ll	%0, 0(%2)\n"
2002			"2:\n"
2003			".insn\n"
2004			".section        .fixup,\"ax\"\n"
2005			"3:\n"
2006			"li	%1, %3\n"
2007			"j	2b\n"
2008			".previous\n"
2009			".section        __ex_table,\"a\"\n"
2010			STR(PTR_WD) " 1b,3b\n"
2011			".previous\n"
2012			: "=&r"(res), "+&r"(err)
2013			: "r"(vaddr), "i"(SIGSEGV)
2014			: "memory");
2015
2016		if (MIPSInst_RT(inst) && !err)
2017			regs->regs[MIPSInst_RT(inst)] = res;
2018		MIPS_R2_STATS(llsc);
2019
2020		break;
2021
2022	case sc_op:
2023		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2024		if (vaddr & 0x3) {
2025			current->thread.cp0_baduaddr = vaddr;
2026			err = SIGBUS;
2027			break;
2028		}
2029		if (!access_ok((void __user *)vaddr, 4)) {
2030			current->thread.cp0_baduaddr = vaddr;
2031			err = SIGBUS;
2032			break;
2033		}
2034
2035		if (!cpu_has_rw_llb) {
2036			/*
2037			 * An LL/SC block can't be safely emulated without
2038			 * a Config5/LLB availability. So it's probably time to
2039			 * kill our process before things get any worse. This is
2040			 * because Config5/LLB allows us to use ERETNC so that
2041			 * the LLAddr/LLB bit is not cleared when we return from
2042			 * an exception. MIPS R2 LL/SC instructions trap with an
2043			 * RI exception so once we emulate them here, we return
2044			 * back to userland with ERETNC. That preserves the
2045			 * LLAddr/LLB so the subsequent SC instruction will
2046			 * succeed preserving the atomic semantics of the LL/SC
2047			 * block. Without that, there is no safe way to emulate
2048			 * an LL/SC block in MIPSR2 userland.
2049			 */
2050			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2051			err = SIGKILL;
2052			break;
2053		}
2054
2055		res = regs->regs[MIPSInst_RT(inst)];
2056
2057		__asm__ __volatile__(
2058			"1:\n"
2059			"sc	%0, 0(%2)\n"
2060			"2:\n"
2061			".insn\n"
2062			".section        .fixup,\"ax\"\n"
2063			"3:\n"
2064			"li	%1, %3\n"
2065			"j	2b\n"
2066			".previous\n"
2067			".section        __ex_table,\"a\"\n"
2068			STR(PTR_WD) " 1b,3b\n"
2069			".previous\n"
2070			: "+&r"(res), "+&r"(err)
2071			: "r"(vaddr), "i"(SIGSEGV));
2072
2073		if (MIPSInst_RT(inst) && !err)
2074			regs->regs[MIPSInst_RT(inst)] = res;
2075
2076		MIPS_R2_STATS(llsc);
2077
2078		break;
2079
2080	case lld_op:
2081		if (IS_ENABLED(CONFIG_32BIT)) {
2082		    err = SIGILL;
2083		    break;
2084		}
2085
2086		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2087		if (vaddr & 0x7) {
2088			current->thread.cp0_baduaddr = vaddr;
2089			err = SIGBUS;
2090			break;
2091		}
2092		if (!access_ok((void __user *)vaddr, 8)) {
2093			current->thread.cp0_baduaddr = vaddr;
2094			err = SIGBUS;
2095			break;
2096		}
2097
2098		if (!cpu_has_rw_llb) {
2099			/*
2100			 * An LL/SC block can't be safely emulated without
2101			 * a Config5/LLB availability. So it's probably time to
2102			 * kill our process before things get any worse. This is
2103			 * because Config5/LLB allows us to use ERETNC so that
2104			 * the LLAddr/LLB bit is not cleared when we return from
2105			 * an exception. MIPS R2 LL/SC instructions trap with an
2106			 * RI exception so once we emulate them here, we return
2107			 * back to userland with ERETNC. That preserves the
2108			 * LLAddr/LLB so the subsequent SC instruction will
2109			 * succeed preserving the atomic semantics of the LL/SC
2110			 * block. Without that, there is no safe way to emulate
2111			 * an LL/SC block in MIPSR2 userland.
2112			 */
2113			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2114			err = SIGKILL;
2115			break;
2116		}
2117
2118		__asm__ __volatile__(
2119			"1:\n"
2120			"lld	%0, 0(%2)\n"
2121			"2:\n"
2122			".insn\n"
2123			".section        .fixup,\"ax\"\n"
2124			"3:\n"
2125			"li	%1, %3\n"
2126			"j	2b\n"
2127			".previous\n"
2128			".section        __ex_table,\"a\"\n"
2129			STR(PTR_WD) " 1b,3b\n"
2130			".previous\n"
2131			: "=&r"(res), "+&r"(err)
2132			: "r"(vaddr), "i"(SIGSEGV)
2133			: "memory");
2134		if (MIPSInst_RT(inst) && !err)
2135			regs->regs[MIPSInst_RT(inst)] = res;
2136
2137		MIPS_R2_STATS(llsc);
2138
2139		break;
2140
2141	case scd_op:
2142		if (IS_ENABLED(CONFIG_32BIT)) {
2143		    err = SIGILL;
2144		    break;
2145		}
2146
2147		vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2148		if (vaddr & 0x7) {
2149			current->thread.cp0_baduaddr = vaddr;
2150			err = SIGBUS;
2151			break;
2152		}
2153		if (!access_ok((void __user *)vaddr, 8)) {
2154			current->thread.cp0_baduaddr = vaddr;
2155			err = SIGBUS;
2156			break;
2157		}
2158
2159		if (!cpu_has_rw_llb) {
2160			/*
2161			 * An LL/SC block can't be safely emulated without
2162			 * a Config5/LLB availability. So it's probably time to
2163			 * kill our process before things get any worse. This is
2164			 * because Config5/LLB allows us to use ERETNC so that
2165			 * the LLAddr/LLB bit is not cleared when we return from
2166			 * an exception. MIPS R2 LL/SC instructions trap with an
2167			 * RI exception so once we emulate them here, we return
2168			 * back to userland with ERETNC. That preserves the
2169			 * LLAddr/LLB so the subsequent SC instruction will
2170			 * succeed preserving the atomic semantics of the LL/SC
2171			 * block. Without that, there is no safe way to emulate
2172			 * an LL/SC block in MIPSR2 userland.
2173			 */
2174			pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2175			err = SIGKILL;
2176			break;
2177		}
2178
2179		res = regs->regs[MIPSInst_RT(inst)];
2180
2181		__asm__ __volatile__(
2182			"1:\n"
2183			"scd	%0, 0(%2)\n"
2184			"2:\n"
2185			".insn\n"
2186			".section        .fixup,\"ax\"\n"
2187			"3:\n"
2188			"li	%1, %3\n"
2189			"j	2b\n"
2190			".previous\n"
2191			".section        __ex_table,\"a\"\n"
2192			STR(PTR_WD) " 1b,3b\n"
2193			".previous\n"
2194			: "+&r"(res), "+&r"(err)
2195			: "r"(vaddr), "i"(SIGSEGV));
2196
2197		if (MIPSInst_RT(inst) && !err)
2198			regs->regs[MIPSInst_RT(inst)] = res;
2199
2200		MIPS_R2_STATS(llsc);
2201
2202		break;
2203	case pref_op:
2204		/* skip it */
2205		break;
2206	default:
2207		err = SIGILL;
2208	}
2209
2210	/*
2211	 * Let's not return to userland just yet. It's costly and
2212	 * it's likely we have more R2 instructions to emulate
2213	 */
2214	if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
2215		regs->cp0_cause &= ~CAUSEF_BD;
2216		err = get_user(inst, (u32 __user *)regs->cp0_epc);
2217		if (!err)
2218			goto repeat;
2219
2220		if (err < 0)
2221			err = SIGSEGV;
2222	}
2223
2224	if (err && (err != SIGEMT)) {
2225		regs->regs[31] = r31;
2226		regs->cp0_epc = epc;
2227	}
2228
2229	/* Likely a MIPS R6 compatible instruction */
2230	if (pass && (err == SIGILL))
2231		err = 0;
2232
2233	return err;
2234}
2235
2236#ifdef CONFIG_DEBUG_FS
2237
2238static int mipsr2_emul_show(struct seq_file *s, void *unused)
2239{
2240
2241	seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
2242	seq_printf(s, "movs\t\t%ld\t%ld\n",
2243		   (unsigned long)__this_cpu_read(mipsr2emustats.movs),
2244		   (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
2245	seq_printf(s, "hilo\t\t%ld\t%ld\n",
2246		   (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
2247		   (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
2248	seq_printf(s, "muls\t\t%ld\t%ld\n",
2249		   (unsigned long)__this_cpu_read(mipsr2emustats.muls),
2250		   (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
2251	seq_printf(s, "divs\t\t%ld\t%ld\n",
2252		   (unsigned long)__this_cpu_read(mipsr2emustats.divs),
2253		   (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
2254	seq_printf(s, "dsps\t\t%ld\t%ld\n",
2255		   (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
2256		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
2257	seq_printf(s, "bops\t\t%ld\t%ld\n",
2258		   (unsigned long)__this_cpu_read(mipsr2emustats.bops),
2259		   (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
2260	seq_printf(s, "traps\t\t%ld\t%ld\n",
2261		   (unsigned long)__this_cpu_read(mipsr2emustats.traps),
2262		   (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
2263	seq_printf(s, "fpus\t\t%ld\t%ld\n",
2264		   (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
2265		   (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
2266	seq_printf(s, "loads\t\t%ld\t%ld\n",
2267		   (unsigned long)__this_cpu_read(mipsr2emustats.loads),
2268		   (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
2269	seq_printf(s, "stores\t\t%ld\t%ld\n",
2270		   (unsigned long)__this_cpu_read(mipsr2emustats.stores),
2271		   (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
2272	seq_printf(s, "llsc\t\t%ld\t%ld\n",
2273		   (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
2274		   (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
2275	seq_printf(s, "dsemul\t\t%ld\t%ld\n",
2276		   (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
2277		   (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
2278	seq_printf(s, "jr\t\t%ld\n",
2279		   (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
2280	seq_printf(s, "bltzl\t\t%ld\n",
2281		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
2282	seq_printf(s, "bgezl\t\t%ld\n",
2283		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
2284	seq_printf(s, "bltzll\t\t%ld\n",
2285		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
2286	seq_printf(s, "bgezll\t\t%ld\n",
2287		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
2288	seq_printf(s, "bltzal\t\t%ld\n",
2289		   (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
2290	seq_printf(s, "bgezal\t\t%ld\n",
2291		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
2292	seq_printf(s, "beql\t\t%ld\n",
2293		   (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
2294	seq_printf(s, "bnel\t\t%ld\n",
2295		   (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
2296	seq_printf(s, "blezl\t\t%ld\n",
2297		   (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
2298	seq_printf(s, "bgtzl\t\t%ld\n",
2299		   (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
2300
2301	return 0;
2302}
2303
2304static int mipsr2_clear_show(struct seq_file *s, void *unused)
2305{
2306	mipsr2_emul_show(s, unused);
2307
2308	__this_cpu_write((mipsr2emustats).movs, 0);
2309	__this_cpu_write((mipsr2bdemustats).movs, 0);
2310	__this_cpu_write((mipsr2emustats).hilo, 0);
2311	__this_cpu_write((mipsr2bdemustats).hilo, 0);
2312	__this_cpu_write((mipsr2emustats).muls, 0);
2313	__this_cpu_write((mipsr2bdemustats).muls, 0);
2314	__this_cpu_write((mipsr2emustats).divs, 0);
2315	__this_cpu_write((mipsr2bdemustats).divs, 0);
2316	__this_cpu_write((mipsr2emustats).dsps, 0);
2317	__this_cpu_write((mipsr2bdemustats).dsps, 0);
2318	__this_cpu_write((mipsr2emustats).bops, 0);
2319	__this_cpu_write((mipsr2bdemustats).bops, 0);
2320	__this_cpu_write((mipsr2emustats).traps, 0);
2321	__this_cpu_write((mipsr2bdemustats).traps, 0);
2322	__this_cpu_write((mipsr2emustats).fpus, 0);
2323	__this_cpu_write((mipsr2bdemustats).fpus, 0);
2324	__this_cpu_write((mipsr2emustats).loads, 0);
2325	__this_cpu_write((mipsr2bdemustats).loads, 0);
2326	__this_cpu_write((mipsr2emustats).stores, 0);
2327	__this_cpu_write((mipsr2bdemustats).stores, 0);
2328	__this_cpu_write((mipsr2emustats).llsc, 0);
2329	__this_cpu_write((mipsr2bdemustats).llsc, 0);
2330	__this_cpu_write((mipsr2emustats).dsemul, 0);
2331	__this_cpu_write((mipsr2bdemustats).dsemul, 0);
2332	__this_cpu_write((mipsr2bremustats).jrs, 0);
2333	__this_cpu_write((mipsr2bremustats).bltzl, 0);
2334	__this_cpu_write((mipsr2bremustats).bgezl, 0);
2335	__this_cpu_write((mipsr2bremustats).bltzll, 0);
2336	__this_cpu_write((mipsr2bremustats).bgezll, 0);
2337	__this_cpu_write((mipsr2bremustats).bltzall, 0);
2338	__this_cpu_write((mipsr2bremustats).bgezall, 0);
2339	__this_cpu_write((mipsr2bremustats).bltzal, 0);
2340	__this_cpu_write((mipsr2bremustats).bgezal, 0);
2341	__this_cpu_write((mipsr2bremustats).beql, 0);
2342	__this_cpu_write((mipsr2bremustats).bnel, 0);
2343	__this_cpu_write((mipsr2bremustats).blezl, 0);
2344	__this_cpu_write((mipsr2bremustats).bgtzl, 0);
2345
2346	return 0;
2347}
2348
2349DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
2350DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
2351
2352static int __init mipsr2_init_debugfs(void)
2353{
2354	debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
2355			    &mipsr2_emul_fops);
2356	debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
2357			    NULL, &mipsr2_clear_fops);
2358	return 0;
2359}
2360
2361device_initcall(mipsr2_init_debugfs);
2362
2363#endif /* CONFIG_DEBUG_FS */