Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Single-step support.
   4 *
   5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   6 */
   7#include <linux/kernel.h>
   8#include <linux/kprobes.h>
   9#include <linux/ptrace.h>
  10#include <linux/prefetch.h>
  11#include <asm/sstep.h>
  12#include <asm/processor.h>
  13#include <linux/uaccess.h>
  14#include <asm/cpu_has_feature.h>
  15#include <asm/cputable.h>
  16#include <asm/disassemble.h>
  17
 
 
 
  18#ifdef CONFIG_PPC64
  19/* Bits in SRR1 that are copied from MSR */
  20#define MSR_MASK	0xffffffff87c0ffffUL
  21#else
  22#define MSR_MASK	0x87c0ffff
  23#endif
  24
  25/* Bits in XER */
  26#define XER_SO		0x80000000U
  27#define XER_OV		0x40000000U
  28#define XER_CA		0x20000000U
  29#define XER_OV32	0x00080000U
  30#define XER_CA32	0x00040000U
  31
  32#ifdef CONFIG_VSX
  33#define VSX_REGISTER_XTP(rd)   ((((rd) & 1) << 5) | ((rd) & 0xfe))
  34#endif
  35
  36#ifdef CONFIG_PPC_FPU
  37/*
  38 * Functions in ldstfp.S
  39 */
  40extern void get_fpr(int rn, double *p);
  41extern void put_fpr(int rn, const double *p);
  42extern void get_vr(int rn, __vector128 *p);
  43extern void put_vr(int rn, __vector128 *p);
  44extern void load_vsrn(int vsr, const void *p);
  45extern void store_vsrn(int vsr, void *p);
  46extern void conv_sp_to_dp(const float *sp, double *dp);
  47extern void conv_dp_to_sp(const double *dp, float *sp);
  48#endif
  49
  50#ifdef __powerpc64__
  51/*
  52 * Functions in quad.S
  53 */
  54extern int do_lq(unsigned long ea, unsigned long *regs);
  55extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
  56extern int do_lqarx(unsigned long ea, unsigned long *regs);
  57extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
  58		    unsigned int *crp);
  59#endif
  60
  61#ifdef __LITTLE_ENDIAN__
  62#define IS_LE	1
  63#define IS_BE	0
  64#else
  65#define IS_LE	0
  66#define IS_BE	1
  67#endif
  68
  69/*
  70 * Emulate the truncation of 64 bit values in 32-bit mode.
  71 */
  72static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
  73							unsigned long val)
  74{
 
  75	if ((msr & MSR_64BIT) == 0)
  76		val &= 0xffffffffUL;
 
  77	return val;
  78}
  79
  80/*
  81 * Determine whether a conditional branch instruction would branch.
  82 */
  83static nokprobe_inline int branch_taken(unsigned int instr,
  84					const struct pt_regs *regs,
  85					struct instruction_op *op)
  86{
  87	unsigned int bo = (instr >> 21) & 0x1f;
  88	unsigned int bi;
  89
  90	if ((bo & 4) == 0) {
  91		/* decrement counter */
  92		op->type |= DECCTR;
  93		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
  94			return 0;
  95	}
  96	if ((bo & 0x10) == 0) {
  97		/* check bit from CR */
  98		bi = (instr >> 16) & 0x1f;
  99		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
 100			return 0;
 101	}
 102	return 1;
 103}
 104
 105static nokprobe_inline long address_ok(struct pt_regs *regs,
 106				       unsigned long ea, int nb)
 107{
 108	if (!user_mode(regs))
 109		return 1;
 110	if (access_ok((void __user *)ea, nb))
 111		return 1;
 112	if (access_ok((void __user *)ea, 1))
 113		/* Access overlaps the end of the user region */
 114		regs->dar = TASK_SIZE_MAX - 1;
 115	else
 116		regs->dar = ea;
 117	return 0;
 118}
 119
 120/*
 121 * Calculate effective address for a D-form instruction
 122 */
 123static nokprobe_inline unsigned long dform_ea(unsigned int instr,
 124					      const struct pt_regs *regs)
 125{
 126	int ra;
 127	unsigned long ea;
 128
 129	ra = (instr >> 16) & 0x1f;
 130	ea = (signed short) instr;		/* sign-extend */
 131	if (ra)
 132		ea += regs->gpr[ra];
 133
 134	return ea;
 135}
 136
 137#ifdef __powerpc64__
 138/*
 139 * Calculate effective address for a DS-form instruction
 140 */
 141static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
 142					       const struct pt_regs *regs)
 143{
 144	int ra;
 145	unsigned long ea;
 146
 147	ra = (instr >> 16) & 0x1f;
 148	ea = (signed short) (instr & ~3);	/* sign-extend */
 149	if (ra)
 150		ea += regs->gpr[ra];
 151
 152	return ea;
 153}
 154
 155/*
 156 * Calculate effective address for a DQ-form instruction
 157 */
 158static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
 159					       const struct pt_regs *regs)
 160{
 161	int ra;
 162	unsigned long ea;
 163
 164	ra = (instr >> 16) & 0x1f;
 165	ea = (signed short) (instr & ~0xf);	/* sign-extend */
 166	if (ra)
 167		ea += regs->gpr[ra];
 168
 169	return ea;
 170}
 171#endif /* __powerpc64 */
 172
 173/*
 174 * Calculate effective address for an X-form instruction
 175 */
 176static nokprobe_inline unsigned long xform_ea(unsigned int instr,
 177					      const struct pt_regs *regs)
 178{
 179	int ra, rb;
 180	unsigned long ea;
 181
 182	ra = (instr >> 16) & 0x1f;
 183	rb = (instr >> 11) & 0x1f;
 184	ea = regs->gpr[rb];
 185	if (ra)
 186		ea += regs->gpr[ra];
 187
 188	return ea;
 189}
 190
 191/*
 192 * Calculate effective address for a MLS:D-form / 8LS:D-form
 193 * prefixed instruction
 194 */
 195static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
 196						  unsigned int suffix,
 197						  const struct pt_regs *regs)
 198{
 199	int ra, prefix_r;
 200	unsigned int  dd;
 201	unsigned long ea, d0, d1, d;
 202
 203	prefix_r = GET_PREFIX_R(instr);
 204	ra = GET_PREFIX_RA(suffix);
 205
 206	d0 = instr & 0x3ffff;
 207	d1 = suffix & 0xffff;
 208	d = (d0 << 16) | d1;
 209
 210	/*
 211	 * sign extend a 34 bit number
 212	 */
 213	dd = (unsigned int)(d >> 2);
 214	ea = (signed int)dd;
 215	ea = (ea << 2) | (d & 0x3);
 216
 217	if (!prefix_r && ra)
 218		ea += regs->gpr[ra];
 219	else if (!prefix_r && !ra)
 220		; /* Leave ea as is */
 221	else if (prefix_r)
 222		ea += regs->nip;
 223
 224	/*
 225	 * (prefix_r && ra) is an invalid form. Should already be
 226	 * checked for by caller!
 227	 */
 228
 229	return ea;
 230}
 231
 232/*
 233 * Return the largest power of 2, not greater than sizeof(unsigned long),
 234 * such that x is a multiple of it.
 235 */
 236static nokprobe_inline unsigned long max_align(unsigned long x)
 237{
 238	x |= sizeof(unsigned long);
 239	return x & -x;		/* isolates rightmost bit */
 240}
 241
 242static nokprobe_inline unsigned long byterev_2(unsigned long x)
 243{
 244	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
 245}
 246
 247static nokprobe_inline unsigned long byterev_4(unsigned long x)
 248{
 249	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
 250		((x & 0xff00) << 8) | ((x & 0xff) << 24);
 251}
 252
 253#ifdef __powerpc64__
 254static nokprobe_inline unsigned long byterev_8(unsigned long x)
 255{
 256	return (byterev_4(x) << 32) | byterev_4(x >> 32);
 257}
 258#endif
 259
 260static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
 261{
 262	switch (nb) {
 263	case 2:
 264		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
 265		break;
 266	case 4:
 267		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
 268		break;
 269#ifdef __powerpc64__
 270	case 8:
 271		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
 272		break;
 273	case 16: {
 274		unsigned long *up = (unsigned long *)ptr;
 275		unsigned long tmp;
 276		tmp = byterev_8(up[0]);
 277		up[0] = byterev_8(up[1]);
 278		up[1] = tmp;
 279		break;
 280	}
 281	case 32: {
 282		unsigned long *up = (unsigned long *)ptr;
 283		unsigned long tmp;
 284
 285		tmp = byterev_8(up[0]);
 286		up[0] = byterev_8(up[3]);
 287		up[3] = tmp;
 288		tmp = byterev_8(up[2]);
 289		up[2] = byterev_8(up[1]);
 290		up[1] = tmp;
 291		break;
 292	}
 293
 294#endif
 295	default:
 296		WARN_ON_ONCE(1);
 297	}
 298}
 299
 300static __always_inline int
 301__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
 
 302{
 
 303	unsigned long x = 0;
 304
 305	switch (nb) {
 306	case 1:
 307		unsafe_get_user(x, (unsigned char __user *)ea, Efault);
 308		break;
 309	case 2:
 310		unsafe_get_user(x, (unsigned short __user *)ea, Efault);
 311		break;
 312	case 4:
 313		unsafe_get_user(x, (unsigned int __user *)ea, Efault);
 314		break;
 315#ifdef __powerpc64__
 316	case 8:
 317		unsafe_get_user(x, (unsigned long __user *)ea, Efault);
 318		break;
 319#endif
 320	}
 321	*dest = x;
 322	return 0;
 323
 324Efault:
 325	regs->dar = ea;
 326	return -EFAULT;
 327}
 328
 329static nokprobe_inline int
 330read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
 331{
 332	int err;
 333
 334	if (is_kernel_addr(ea))
 335		return __read_mem_aligned(dest, ea, nb, regs);
 336
 337	if (user_read_access_begin((void __user *)ea, nb)) {
 338		err = __read_mem_aligned(dest, ea, nb, regs);
 339		user_read_access_end();
 340	} else {
 341		err = -EFAULT;
 342		regs->dar = ea;
 343	}
 344
 345	return err;
 346}
 347
 348/*
 349 * Copy from userspace to a buffer, using the largest possible
 350 * aligned accesses, up to sizeof(long).
 351 */
 352static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 
 353{
 
 354	int c;
 355
 356	for (; nb > 0; nb -= c) {
 357		c = max_align(ea);
 358		if (c > nb)
 359			c = max_align(nb);
 360		switch (c) {
 361		case 1:
 362			unsafe_get_user(*dest, (u8 __user *)ea, Efault);
 363			break;
 364		case 2:
 365			unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
 
 366			break;
 367		case 4:
 368			unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
 
 369			break;
 370#ifdef __powerpc64__
 371		case 8:
 372			unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
 
 373			break;
 374#endif
 375		}
 
 
 
 
 376		dest += c;
 377		ea += c;
 378	}
 379	return 0;
 380
 381Efault:
 382	regs->dar = ea;
 383	return -EFAULT;
 384}
 385
 386static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 387{
 388	int err;
 389
 390	if (is_kernel_addr(ea))
 391		return __copy_mem_in(dest, ea, nb, regs);
 392
 393	if (user_read_access_begin((void __user *)ea, nb)) {
 394		err = __copy_mem_in(dest, ea, nb, regs);
 395		user_read_access_end();
 396	} else {
 397		err = -EFAULT;
 398		regs->dar = ea;
 399	}
 400
 401	return err;
 402}
 403
 404static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
 405					      unsigned long ea, int nb,
 406					      struct pt_regs *regs)
 407{
 408	union {
 409		unsigned long ul;
 410		u8 b[sizeof(unsigned long)];
 411	} u;
 412	int i;
 413	int err;
 414
 415	u.ul = 0;
 416	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 417	err = copy_mem_in(&u.b[i], ea, nb, regs);
 418	if (!err)
 419		*dest = u.ul;
 420	return err;
 421}
 422
 423/*
 424 * Read memory at address ea for nb bytes, return 0 for success
 425 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 426 * If nb < sizeof(long), the result is right-justified on BE systems.
 427 */
 428static int read_mem(unsigned long *dest, unsigned long ea, int nb,
 429			      struct pt_regs *regs)
 430{
 431	if (!address_ok(regs, ea, nb))
 432		return -EFAULT;
 433	if ((ea & (nb - 1)) == 0)
 434		return read_mem_aligned(dest, ea, nb, regs);
 435	return read_mem_unaligned(dest, ea, nb, regs);
 436}
 437NOKPROBE_SYMBOL(read_mem);
 438
 439static __always_inline int
 440__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
 
 441{
 
 
 442	switch (nb) {
 443	case 1:
 444		unsafe_put_user(val, (unsigned char __user *)ea, Efault);
 445		break;
 446	case 2:
 447		unsafe_put_user(val, (unsigned short __user *)ea, Efault);
 448		break;
 449	case 4:
 450		unsafe_put_user(val, (unsigned int __user *)ea, Efault);
 451		break;
 452#ifdef __powerpc64__
 453	case 8:
 454		unsafe_put_user(val, (unsigned long __user *)ea, Efault);
 455		break;
 456#endif
 457	}
 458	return 0;
 459
 460Efault:
 461	regs->dar = ea;
 462	return -EFAULT;
 463}
 464
 465static nokprobe_inline int
 466write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
 467{
 468	int err;
 469
 470	if (is_kernel_addr(ea))
 471		return __write_mem_aligned(val, ea, nb, regs);
 472
 473	if (user_write_access_begin((void __user *)ea, nb)) {
 474		err = __write_mem_aligned(val, ea, nb, regs);
 475		user_write_access_end();
 476	} else {
 477		err = -EFAULT;
 478		regs->dar = ea;
 479	}
 480
 481	return err;
 482}
 483
 484/*
 485 * Copy from a buffer to userspace, using the largest possible
 486 * aligned accesses, up to sizeof(long).
 487 */
 488static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 
 489{
 
 490	int c;
 491
 492	for (; nb > 0; nb -= c) {
 493		c = max_align(ea);
 494		if (c > nb)
 495			c = max_align(nb);
 496		switch (c) {
 497		case 1:
 498			unsafe_put_user(*dest, (u8 __user *)ea, Efault);
 499			break;
 500		case 2:
 501			unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
 
 502			break;
 503		case 4:
 504			unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
 
 505			break;
 506#ifdef __powerpc64__
 507		case 8:
 508			unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
 
 509			break;
 510#endif
 511		}
 
 
 
 
 512		dest += c;
 513		ea += c;
 514	}
 515	return 0;
 516
 517Efault:
 518	regs->dar = ea;
 519	return -EFAULT;
 520}
 521
 522static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 523{
 524	int err;
 525
 526	if (is_kernel_addr(ea))
 527		return __copy_mem_out(dest, ea, nb, regs);
 528
 529	if (user_write_access_begin((void __user *)ea, nb)) {
 530		err = __copy_mem_out(dest, ea, nb, regs);
 531		user_write_access_end();
 532	} else {
 533		err = -EFAULT;
 534		regs->dar = ea;
 535	}
 536
 537	return err;
 538}
 539
 540static nokprobe_inline int write_mem_unaligned(unsigned long val,
 541					       unsigned long ea, int nb,
 542					       struct pt_regs *regs)
 543{
 544	union {
 545		unsigned long ul;
 546		u8 b[sizeof(unsigned long)];
 547	} u;
 548	int i;
 549
 550	u.ul = val;
 551	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 552	return copy_mem_out(&u.b[i], ea, nb, regs);
 553}
 554
 555/*
 556 * Write memory at address ea for nb bytes, return 0 for success
 557 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 558 */
 559static int write_mem(unsigned long val, unsigned long ea, int nb,
 560			       struct pt_regs *regs)
 561{
 562	if (!address_ok(regs, ea, nb))
 563		return -EFAULT;
 564	if ((ea & (nb - 1)) == 0)
 565		return write_mem_aligned(val, ea, nb, regs);
 566	return write_mem_unaligned(val, ea, nb, regs);
 567}
 568NOKPROBE_SYMBOL(write_mem);
 569
 570#ifdef CONFIG_PPC_FPU
 571/*
 572 * These access either the real FP register or the image in the
 573 * thread_struct, depending on regs->msr & MSR_FP.
 574 */
 575static int do_fp_load(struct instruction_op *op, unsigned long ea,
 576		      struct pt_regs *regs, bool cross_endian)
 577{
 578	int err, rn, nb;
 579	union {
 580		int i;
 581		unsigned int u;
 582		float f;
 583		double d[2];
 584		unsigned long l[2];
 585		u8 b[2 * sizeof(double)];
 586	} u;
 587
 588	nb = GETSIZE(op->type);
 589	if (nb > sizeof(u))
 590		return -EINVAL;
 591	if (!address_ok(regs, ea, nb))
 592		return -EFAULT;
 593	rn = op->reg;
 594	err = copy_mem_in(u.b, ea, nb, regs);
 595	if (err)
 596		return err;
 597	if (unlikely(cross_endian)) {
 598		do_byte_reverse(u.b, min(nb, 8));
 599		if (nb == 16)
 600			do_byte_reverse(&u.b[8], 8);
 601	}
 602	preempt_disable();
 603	if (nb == 4) {
 604		if (op->type & FPCONV)
 605			conv_sp_to_dp(&u.f, &u.d[0]);
 606		else if (op->type & SIGNEXT)
 607			u.l[0] = u.i;
 608		else
 609			u.l[0] = u.u;
 610	}
 611	if (regs->msr & MSR_FP)
 612		put_fpr(rn, &u.d[0]);
 613	else
 614		current->thread.TS_FPR(rn) = u.l[0];
 615	if (nb == 16) {
 616		/* lfdp */
 617		rn |= 1;
 618		if (regs->msr & MSR_FP)
 619			put_fpr(rn, &u.d[1]);
 620		else
 621			current->thread.TS_FPR(rn) = u.l[1];
 622	}
 623	preempt_enable();
 624	return 0;
 625}
 626NOKPROBE_SYMBOL(do_fp_load);
 627
 628static int do_fp_store(struct instruction_op *op, unsigned long ea,
 629		       struct pt_regs *regs, bool cross_endian)
 630{
 631	int rn, nb;
 632	union {
 633		unsigned int u;
 634		float f;
 635		double d[2];
 636		unsigned long l[2];
 637		u8 b[2 * sizeof(double)];
 638	} u;
 639
 640	nb = GETSIZE(op->type);
 641	if (nb > sizeof(u))
 642		return -EINVAL;
 643	if (!address_ok(regs, ea, nb))
 644		return -EFAULT;
 645	rn = op->reg;
 646	preempt_disable();
 647	if (regs->msr & MSR_FP)
 648		get_fpr(rn, &u.d[0]);
 649	else
 650		u.l[0] = current->thread.TS_FPR(rn);
 651	if (nb == 4) {
 652		if (op->type & FPCONV)
 653			conv_dp_to_sp(&u.d[0], &u.f);
 654		else
 655			u.u = u.l[0];
 656	}
 657	if (nb == 16) {
 658		rn |= 1;
 659		if (regs->msr & MSR_FP)
 660			get_fpr(rn, &u.d[1]);
 661		else
 662			u.l[1] = current->thread.TS_FPR(rn);
 663	}
 664	preempt_enable();
 665	if (unlikely(cross_endian)) {
 666		do_byte_reverse(u.b, min(nb, 8));
 667		if (nb == 16)
 668			do_byte_reverse(&u.b[8], 8);
 669	}
 670	return copy_mem_out(u.b, ea, nb, regs);
 671}
 672NOKPROBE_SYMBOL(do_fp_store);
 673#endif
 674
 675#ifdef CONFIG_ALTIVEC
 676/* For Altivec/VMX, no need to worry about alignment */
 677static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
 678				       int size, struct pt_regs *regs,
 679				       bool cross_endian)
 680{
 681	int err;
 682	union {
 683		__vector128 v;
 684		u8 b[sizeof(__vector128)];
 685	} u = {};
 686
 687	if (size > sizeof(u))
 688		return -EINVAL;
 689
 690	if (!address_ok(regs, ea & ~0xfUL, 16))
 691		return -EFAULT;
 692	/* align to multiple of size */
 693	ea &= ~(size - 1);
 694	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
 695	if (err)
 696		return err;
 697	if (unlikely(cross_endian))
 698		do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
 699	preempt_disable();
 700	if (regs->msr & MSR_VEC)
 701		put_vr(rn, &u.v);
 702	else
 703		current->thread.vr_state.vr[rn] = u.v;
 704	preempt_enable();
 705	return 0;
 706}
 707
 708static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
 709					int size, struct pt_regs *regs,
 710					bool cross_endian)
 711{
 712	union {
 713		__vector128 v;
 714		u8 b[sizeof(__vector128)];
 715	} u;
 716
 717	if (size > sizeof(u))
 718		return -EINVAL;
 719
 720	if (!address_ok(regs, ea & ~0xfUL, 16))
 721		return -EFAULT;
 722	/* align to multiple of size */
 723	ea &= ~(size - 1);
 724
 725	preempt_disable();
 726	if (regs->msr & MSR_VEC)
 727		get_vr(rn, &u.v);
 728	else
 729		u.v = current->thread.vr_state.vr[rn];
 730	preempt_enable();
 731	if (unlikely(cross_endian))
 732		do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
 733	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
 734}
 735#endif /* CONFIG_ALTIVEC */
 736
 737#ifdef __powerpc64__
 738static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
 739				      int reg, bool cross_endian)
 740{
 741	int err;
 742
 743	if (!address_ok(regs, ea, 16))
 744		return -EFAULT;
 745	/* if aligned, should be atomic */
 746	if ((ea & 0xf) == 0) {
 747		err = do_lq(ea, &regs->gpr[reg]);
 748	} else {
 749		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
 750		if (!err)
 751			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
 752	}
 753	if (!err && unlikely(cross_endian))
 754		do_byte_reverse(&regs->gpr[reg], 16);
 755	return err;
 756}
 757
 758static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
 759				       int reg, bool cross_endian)
 760{
 761	int err;
 762	unsigned long vals[2];
 763
 764	if (!address_ok(regs, ea, 16))
 765		return -EFAULT;
 766	vals[0] = regs->gpr[reg];
 767	vals[1] = regs->gpr[reg + 1];
 768	if (unlikely(cross_endian))
 769		do_byte_reverse(vals, 16);
 770
 771	/* if aligned, should be atomic */
 772	if ((ea & 0xf) == 0)
 773		return do_stq(ea, vals[0], vals[1]);
 774
 775	err = write_mem(vals[IS_LE], ea, 8, regs);
 776	if (!err)
 777		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
 778	return err;
 779}
 780#endif /* __powerpc64 */
 781
 782#ifdef CONFIG_VSX
 783void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
 784		      const void *mem, bool rev)
 785{
 786	int size, read_size;
 787	int i, j;
 788	const unsigned int *wp;
 789	const unsigned short *hp;
 790	const unsigned char *bp;
 791
 792	size = GETSIZE(op->type);
 793	reg->d[0] = reg->d[1] = 0;
 794
 795	switch (op->element_size) {
 796	case 32:
 797		/* [p]lxvp[x] */
 798	case 16:
 799		/* whole vector; lxv[x] or lxvl[l] */
 800		if (size == 0)
 801			break;
 802		memcpy(reg, mem, size);
 803		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 804			rev = !rev;
 805		if (rev)
 806			do_byte_reverse(reg, size);
 807		break;
 808	case 8:
 809		/* scalar loads, lxvd2x, lxvdsx */
 810		read_size = (size >= 8) ? 8 : size;
 811		i = IS_LE ? 8 : 8 - read_size;
 812		memcpy(&reg->b[i], mem, read_size);
 813		if (rev)
 814			do_byte_reverse(&reg->b[i], 8);
 815		if (size < 8) {
 816			if (op->type & SIGNEXT) {
 817				/* size == 4 is the only case here */
 818				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
 819			} else if (op->vsx_flags & VSX_FPCONV) {
 820				preempt_disable();
 821				conv_sp_to_dp(&reg->fp[1 + IS_LE],
 822					      &reg->dp[IS_LE]);
 823				preempt_enable();
 824			}
 825		} else {
 826			if (size == 16) {
 827				unsigned long v = *(unsigned long *)(mem + 8);
 828				reg->d[IS_BE] = !rev ? v : byterev_8(v);
 829			} else if (op->vsx_flags & VSX_SPLAT)
 830				reg->d[IS_BE] = reg->d[IS_LE];
 831		}
 832		break;
 833	case 4:
 834		/* lxvw4x, lxvwsx */
 835		wp = mem;
 836		for (j = 0; j < size / 4; ++j) {
 837			i = IS_LE ? 3 - j : j;
 838			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
 839		}
 840		if (op->vsx_flags & VSX_SPLAT) {
 841			u32 val = reg->w[IS_LE ? 3 : 0];
 842			for (; j < 4; ++j) {
 843				i = IS_LE ? 3 - j : j;
 844				reg->w[i] = val;
 845			}
 846		}
 847		break;
 848	case 2:
 849		/* lxvh8x */
 850		hp = mem;
 851		for (j = 0; j < size / 2; ++j) {
 852			i = IS_LE ? 7 - j : j;
 853			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
 854		}
 855		break;
 856	case 1:
 857		/* lxvb16x */
 858		bp = mem;
 859		for (j = 0; j < size; ++j) {
 860			i = IS_LE ? 15 - j : j;
 861			reg->b[i] = *bp++;
 862		}
 863		break;
 864	}
 865}
 866EXPORT_SYMBOL_GPL(emulate_vsx_load);
 867NOKPROBE_SYMBOL(emulate_vsx_load);
 868
 869void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
 870		       void *mem, bool rev)
 871{
 872	int size, write_size;
 873	int i, j;
 874	union vsx_reg buf;
 875	unsigned int *wp;
 876	unsigned short *hp;
 877	unsigned char *bp;
 878
 879	size = GETSIZE(op->type);
 880
 881	switch (op->element_size) {
 882	case 32:
 883		/* [p]stxvp[x] */
 884		if (size == 0)
 885			break;
 886		if (rev) {
 887			/* reverse 32 bytes */
 888			union vsx_reg buf32[2];
 889			buf32[0].d[0] = byterev_8(reg[1].d[1]);
 890			buf32[0].d[1] = byterev_8(reg[1].d[0]);
 891			buf32[1].d[0] = byterev_8(reg[0].d[1]);
 892			buf32[1].d[1] = byterev_8(reg[0].d[0]);
 893			memcpy(mem, buf32, size);
 894		} else {
 895			memcpy(mem, reg, size);
 896		}
 897		break;
 898	case 16:
 899		/* stxv, stxvx, stxvl, stxvll */
 900		if (size == 0)
 901			break;
 902		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 903			rev = !rev;
 904		if (rev) {
 905			/* reverse 16 bytes */
 906			buf.d[0] = byterev_8(reg->d[1]);
 907			buf.d[1] = byterev_8(reg->d[0]);
 908			reg = &buf;
 909		}
 910		memcpy(mem, reg, size);
 911		break;
 912	case 8:
 913		/* scalar stores, stxvd2x */
 914		write_size = (size >= 8) ? 8 : size;
 915		i = IS_LE ? 8 : 8 - write_size;
 916		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
 917			buf.d[0] = buf.d[1] = 0;
 918			preempt_disable();
 919			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
 920			preempt_enable();
 921			reg = &buf;
 922		}
 923		memcpy(mem, &reg->b[i], write_size);
 924		if (size == 16)
 925			memcpy(mem + 8, &reg->d[IS_BE], 8);
 926		if (unlikely(rev)) {
 927			do_byte_reverse(mem, write_size);
 928			if (size == 16)
 929				do_byte_reverse(mem + 8, 8);
 930		}
 931		break;
 932	case 4:
 933		/* stxvw4x */
 934		wp = mem;
 935		for (j = 0; j < size / 4; ++j) {
 936			i = IS_LE ? 3 - j : j;
 937			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
 938		}
 939		break;
 940	case 2:
 941		/* stxvh8x */
 942		hp = mem;
 943		for (j = 0; j < size / 2; ++j) {
 944			i = IS_LE ? 7 - j : j;
 945			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
 946		}
 947		break;
 948	case 1:
 949		/* stvxb16x */
 950		bp = mem;
 951		for (j = 0; j < size; ++j) {
 952			i = IS_LE ? 15 - j : j;
 953			*bp++ = reg->b[i];
 954		}
 955		break;
 956	}
 957}
 958EXPORT_SYMBOL_GPL(emulate_vsx_store);
 959NOKPROBE_SYMBOL(emulate_vsx_store);
 960
 961static nokprobe_inline int do_vsx_load(struct instruction_op *op,
 962				       unsigned long ea, struct pt_regs *regs,
 963				       bool cross_endian)
 964{
 965	int reg = op->reg;
 966	int i, j, nr_vsx_regs;
 967	u8 mem[32];
 968	union vsx_reg buf[2];
 969	int size = GETSIZE(op->type);
 970
 971	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
 972		return -EFAULT;
 973
 974	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
 975	emulate_vsx_load(op, buf, mem, cross_endian);
 976	preempt_disable();
 977	if (reg < 32) {
 978		/* FP regs + extensions */
 979		if (regs->msr & MSR_FP) {
 980			for (i = 0; i < nr_vsx_regs; i++) {
 981				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 982				load_vsrn(reg + i, &buf[j].v);
 983			}
 984		} else {
 985			for (i = 0; i < nr_vsx_regs; i++) {
 986				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 987				current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
 988				current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
 989			}
 990		}
 991	} else {
 992		if (regs->msr & MSR_VEC) {
 993			for (i = 0; i < nr_vsx_regs; i++) {
 994				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 995				load_vsrn(reg + i, &buf[j].v);
 996			}
 997		} else {
 998			for (i = 0; i < nr_vsx_regs; i++) {
 999				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1000				current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
1001			}
1002		}
1003	}
1004	preempt_enable();
1005	return 0;
1006}
1007
1008static nokprobe_inline int do_vsx_store(struct instruction_op *op,
1009					unsigned long ea, struct pt_regs *regs,
1010					bool cross_endian)
1011{
1012	int reg = op->reg;
1013	int i, j, nr_vsx_regs;
1014	u8 mem[32];
1015	union vsx_reg buf[2];
1016	int size = GETSIZE(op->type);
1017
1018	if (!address_ok(regs, ea, size))
1019		return -EFAULT;
1020
1021	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
1022	preempt_disable();
1023	if (reg < 32) {
1024		/* FP regs + extensions */
1025		if (regs->msr & MSR_FP) {
1026			for (i = 0; i < nr_vsx_regs; i++) {
1027				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1028				store_vsrn(reg + i, &buf[j].v);
1029			}
1030		} else {
1031			for (i = 0; i < nr_vsx_regs; i++) {
1032				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1033				buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
1034				buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
1035			}
1036		}
1037	} else {
1038		if (regs->msr & MSR_VEC) {
1039			for (i = 0; i < nr_vsx_regs; i++) {
1040				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1041				store_vsrn(reg + i, &buf[j].v);
1042			}
1043		} else {
1044			for (i = 0; i < nr_vsx_regs; i++) {
1045				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1046				buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
1047			}
1048		}
1049	}
1050	preempt_enable();
1051	emulate_vsx_store(op, buf, mem, cross_endian);
1052	return  copy_mem_out(mem, ea, size, regs);
1053}
1054#endif /* CONFIG_VSX */
1055
1056static __always_inline int __emulate_dcbz(unsigned long ea)
1057{
1058	unsigned long i;
1059	unsigned long size = l1_dcache_bytes();
1060
1061	for (i = 0; i < size; i += sizeof(long))
1062		unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
1063
1064	return 0;
1065
1066Efault:
1067	return -EFAULT;
1068}
1069
1070int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1071{
1072	int err;
1073	unsigned long size = l1_dcache_bytes();
1074
1075	ea = truncate_if_32bit(regs->msr, ea);
 
 
 
 
 
 
1076	ea &= ~(size - 1);
1077	if (!address_ok(regs, ea, size))
1078		return -EFAULT;
1079
1080	if (is_kernel_addr(ea)) {
1081		err = __emulate_dcbz(ea);
1082	} else if (user_write_access_begin((void __user *)ea, size)) {
1083		err = __emulate_dcbz(ea);
1084		user_write_access_end();
1085	} else {
1086		err = -EFAULT;
1087	}
1088
1089	if (err)
1090		regs->dar = ea;
1091
1092
1093	return err;
1094}
1095NOKPROBE_SYMBOL(emulate_dcbz);
1096
1097#define __put_user_asmx(x, addr, err, op, cr)		\
1098	__asm__ __volatile__(				\
1099		".machine push\n"			\
1100		".machine power8\n"			\
1101		"1:	" op " %2,0,%3\n"		\
1102		".machine pop\n"			\
1103		"	mfcr	%1\n"			\
1104		"2:\n"					\
1105		".section .fixup,\"ax\"\n"		\
1106		"3:	li	%0,%4\n"		\
1107		"	b	2b\n"			\
1108		".previous\n"				\
1109		EX_TABLE(1b, 3b)			\
1110		: "=r" (err), "=r" (cr)			\
1111		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1112
1113#define __get_user_asmx(x, addr, err, op)		\
1114	__asm__ __volatile__(				\
1115		".machine push\n"			\
1116		".machine power8\n"			\
1117		"1:	"op" %1,0,%2\n"			\
1118		".machine pop\n"			\
1119		"2:\n"					\
1120		".section .fixup,\"ax\"\n"		\
1121		"3:	li	%0,%3\n"		\
1122		"	b	2b\n"			\
1123		".previous\n"				\
1124		EX_TABLE(1b, 3b)			\
1125		: "=r" (err), "=r" (x)			\
1126		: "r" (addr), "i" (-EFAULT), "0" (err))
1127
1128#define __cacheop_user_asmx(addr, err, op)		\
1129	__asm__ __volatile__(				\
1130		"1:	"op" 0,%1\n"			\
1131		"2:\n"					\
1132		".section .fixup,\"ax\"\n"		\
1133		"3:	li	%0,%3\n"		\
1134		"	b	2b\n"			\
1135		".previous\n"				\
1136		EX_TABLE(1b, 3b)			\
1137		: "=r" (err)				\
1138		: "r" (addr), "i" (-EFAULT), "0" (err))
1139
1140static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1141				    struct instruction_op *op)
1142{
1143	long val = op->val;
1144
1145	op->type |= SETCC;
1146	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
 
1147	if (!(regs->msr & MSR_64BIT))
1148		val = (int) val;
 
1149	if (val < 0)
1150		op->ccval |= 0x80000000;
1151	else if (val > 0)
1152		op->ccval |= 0x40000000;
1153	else
1154		op->ccval |= 0x20000000;
1155}
1156
1157static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1158{
1159	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1160		if (val)
1161			op->xerval |= XER_CA32;
1162		else
1163			op->xerval &= ~XER_CA32;
1164	}
1165}
1166
1167static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1168				     struct instruction_op *op, int rd,
1169				     unsigned long val1, unsigned long val2,
1170				     unsigned long carry_in)
1171{
1172	unsigned long val = val1 + val2;
1173
1174	if (carry_in)
1175		++val;
1176	op->type = COMPUTE | SETREG | SETXER;
1177	op->reg = rd;
1178	op->val = val;
1179	val = truncate_if_32bit(regs->msr, val);
1180	val1 = truncate_if_32bit(regs->msr, val1);
 
 
 
 
1181	op->xerval = regs->xer;
1182	if (val < val1 || (carry_in && val == val1))
1183		op->xerval |= XER_CA;
1184	else
1185		op->xerval &= ~XER_CA;
1186
1187	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1188			(carry_in && (unsigned int)val == (unsigned int)val1));
1189}
1190
1191static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1192					  struct instruction_op *op,
1193					  long v1, long v2, int crfld)
1194{
1195	unsigned int crval, shift;
1196
1197	op->type = COMPUTE | SETCC;
1198	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1199	if (v1 < v2)
1200		crval |= 8;
1201	else if (v1 > v2)
1202		crval |= 4;
1203	else
1204		crval |= 2;
1205	shift = (7 - crfld) * 4;
1206	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1207}
1208
1209static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1210					    struct instruction_op *op,
1211					    unsigned long v1,
1212					    unsigned long v2, int crfld)
1213{
1214	unsigned int crval, shift;
1215
1216	op->type = COMPUTE | SETCC;
1217	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1218	if (v1 < v2)
1219		crval |= 8;
1220	else if (v1 > v2)
1221		crval |= 4;
1222	else
1223		crval |= 2;
1224	shift = (7 - crfld) * 4;
1225	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1226}
1227
1228static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1229				    struct instruction_op *op,
1230				    unsigned long v1, unsigned long v2)
1231{
1232	unsigned long long out_val, mask;
1233	int i;
1234
1235	out_val = 0;
1236	for (i = 0; i < 8; i++) {
1237		mask = 0xffUL << (i * 8);
1238		if ((v1 & mask) == (v2 & mask))
1239			out_val |= mask;
1240	}
1241	op->val = out_val;
1242}
1243
1244/*
1245 * The size parameter is used to adjust the equivalent popcnt instruction.
1246 * popcntb = 8, popcntw = 32, popcntd = 64
1247 */
1248static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1249				      struct instruction_op *op,
1250				      unsigned long v1, int size)
1251{
1252	unsigned long long out = v1;
1253
1254	out -= (out >> 1) & 0x5555555555555555ULL;
1255	out = (0x3333333333333333ULL & out) +
1256	      (0x3333333333333333ULL & (out >> 2));
1257	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1258
1259	if (size == 8) {	/* popcntb */
1260		op->val = out;
1261		return;
1262	}
1263	out += out >> 8;
1264	out += out >> 16;
1265	if (size == 32) {	/* popcntw */
1266		op->val = out & 0x0000003f0000003fULL;
1267		return;
1268	}
1269
1270	out = (out + (out >> 32)) & 0x7f;
1271	op->val = out;	/* popcntd */
1272}
1273
1274#ifdef CONFIG_PPC64
1275static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1276				      struct instruction_op *op,
1277				      unsigned long v1, unsigned long v2)
1278{
1279	unsigned char perm, idx;
1280	unsigned int i;
1281
1282	perm = 0;
1283	for (i = 0; i < 8; i++) {
1284		idx = (v1 >> (i * 8)) & 0xff;
1285		if (idx < 64)
1286			if (v2 & PPC_BIT(idx))
1287				perm |= 1 << i;
1288	}
1289	op->val = perm;
1290}
1291#endif /* CONFIG_PPC64 */
1292/*
1293 * The size parameter adjusts the equivalent prty instruction.
1294 * prtyw = 32, prtyd = 64
1295 */
1296static nokprobe_inline void do_prty(const struct pt_regs *regs,
1297				    struct instruction_op *op,
1298				    unsigned long v, int size)
1299{
1300	unsigned long long res = v ^ (v >> 8);
1301
1302	res ^= res >> 16;
1303	if (size == 32) {		/* prtyw */
1304		op->val = res & 0x0000000100000001ULL;
1305		return;
1306	}
1307
1308	res ^= res >> 32;
1309	op->val = res & 1;	/*prtyd */
1310}
1311
1312static nokprobe_inline int trap_compare(long v1, long v2)
1313{
1314	int ret = 0;
1315
1316	if (v1 < v2)
1317		ret |= 0x10;
1318	else if (v1 > v2)
1319		ret |= 0x08;
1320	else
1321		ret |= 0x04;
1322	if ((unsigned long)v1 < (unsigned long)v2)
1323		ret |= 0x02;
1324	else if ((unsigned long)v1 > (unsigned long)v2)
1325		ret |= 0x01;
1326	return ret;
1327}
1328
1329/*
1330 * Elements of 32-bit rotate and mask instructions.
1331 */
1332#define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1333			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1334#ifdef __powerpc64__
1335#define MASK64_L(mb)	(~0UL >> (mb))
1336#define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1337#define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1338#define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1339#else
1340#define DATA32(x)	(x)
1341#endif
1342#define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1343
1344/*
1345 * Decode an instruction, and return information about it in *op
1346 * without changing *regs.
1347 * Integer arithmetic and logical instructions, branches, and barrier
1348 * instructions can be emulated just using the information in *op.
1349 *
1350 * Return value is 1 if the instruction can be emulated just by
1351 * updating *regs with the information in *op, -1 if we need the
1352 * GPRs but *regs doesn't contain the full register set, or 0
1353 * otherwise.
1354 */
1355int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1356		  ppc_inst_t instr)
1357{
1358#ifdef CONFIG_PPC64
1359	unsigned int suffixopcode, prefixtype, prefix_r;
1360#endif
1361	unsigned int opcode, ra, rb, rc, rd, spr, u;
1362	unsigned long int imm;
1363	unsigned long int val, val2;
1364	unsigned int mb, me, sh;
1365	unsigned int word, suffix;
1366	long ival;
1367
1368	word = ppc_inst_val(instr);
1369	suffix = ppc_inst_suffix(instr);
1370
1371	op->type = COMPUTE;
1372
1373	opcode = ppc_inst_primary_opcode(instr);
1374	switch (opcode) {
1375	case 16:	/* bc */
1376		op->type = BRANCH;
1377		imm = (signed short)(word & 0xfffc);
1378		if ((word & 2) == 0)
1379			imm += regs->nip;
1380		op->val = truncate_if_32bit(regs->msr, imm);
1381		if (word & 1)
1382			op->type |= SETLK;
1383		if (branch_taken(word, regs, op))
1384			op->type |= BRTAKEN;
1385		return 1;
 
1386	case 17:	/* sc */
1387		if ((word & 0xfe2) == 2)
1388			op->type = SYSCALL;
1389		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1390				(word & 0xfe3) == 1) {	/* scv */
1391			op->type = SYSCALL_VECTORED_0;
1392			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1393				goto unknown_opcode;
1394		} else
1395			op->type = UNKNOWN;
1396		return 0;
 
1397	case 18:	/* b */
1398		op->type = BRANCH | BRTAKEN;
1399		imm = word & 0x03fffffc;
1400		if (imm & 0x02000000)
1401			imm -= 0x04000000;
1402		if ((word & 2) == 0)
1403			imm += regs->nip;
1404		op->val = truncate_if_32bit(regs->msr, imm);
1405		if (word & 1)
1406			op->type |= SETLK;
1407		return 1;
1408	case 19:
1409		switch ((word >> 1) & 0x3ff) {
1410		case 0:		/* mcrf */
1411			op->type = COMPUTE + SETCC;
1412			rd = 7 - ((word >> 23) & 0x7);
1413			ra = 7 - ((word >> 18) & 0x7);
1414			rd *= 4;
1415			ra *= 4;
1416			val = (regs->ccr >> ra) & 0xf;
1417			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1418			return 1;
1419
1420		case 16:	/* bclr */
1421		case 528:	/* bcctr */
1422			op->type = BRANCH;
1423			imm = (word & 0x400)? regs->ctr: regs->link;
1424			op->val = truncate_if_32bit(regs->msr, imm);
1425			if (word & 1)
1426				op->type |= SETLK;
1427			if (branch_taken(word, regs, op))
1428				op->type |= BRTAKEN;
1429			return 1;
1430
1431		case 18:	/* rfid, scary */
1432			if (regs->msr & MSR_PR)
1433				goto priv;
1434			op->type = RFI;
1435			return 0;
1436
1437		case 150:	/* isync */
1438			op->type = BARRIER | BARRIER_ISYNC;
1439			return 1;
1440
1441		case 33:	/* crnor */
1442		case 129:	/* crandc */
1443		case 193:	/* crxor */
1444		case 225:	/* crnand */
1445		case 257:	/* crand */
1446		case 289:	/* creqv */
1447		case 417:	/* crorc */
1448		case 449:	/* cror */
1449			op->type = COMPUTE + SETCC;
1450			ra = (word >> 16) & 0x1f;
1451			rb = (word >> 11) & 0x1f;
1452			rd = (word >> 21) & 0x1f;
1453			ra = (regs->ccr >> (31 - ra)) & 1;
1454			rb = (regs->ccr >> (31 - rb)) & 1;
1455			val = (word >> (6 + ra * 2 + rb)) & 1;
1456			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1457				(val << (31 - rd));
1458			return 1;
1459		}
1460		break;
1461	case 31:
1462		switch ((word >> 1) & 0x3ff) {
1463		case 598:	/* sync */
1464			op->type = BARRIER + BARRIER_SYNC;
1465#ifdef __powerpc64__
1466			switch ((word >> 21) & 3) {
1467			case 1:		/* lwsync */
1468				op->type = BARRIER + BARRIER_LWSYNC;
1469				break;
1470			case 2:		/* ptesync */
1471				op->type = BARRIER + BARRIER_PTESYNC;
1472				break;
1473			}
1474#endif
1475			return 1;
1476
1477		case 854:	/* eieio */
1478			op->type = BARRIER + BARRIER_EIEIO;
1479			return 1;
1480		}
1481		break;
1482	}
1483
 
 
 
 
1484	rd = (word >> 21) & 0x1f;
1485	ra = (word >> 16) & 0x1f;
1486	rb = (word >> 11) & 0x1f;
1487	rc = (word >> 6) & 0x1f;
1488
1489	switch (opcode) {
1490#ifdef __powerpc64__
1491	case 1:
1492		if (!cpu_has_feature(CPU_FTR_ARCH_31))
1493			goto unknown_opcode;
1494
1495		prefix_r = GET_PREFIX_R(word);
1496		ra = GET_PREFIX_RA(suffix);
1497		rd = (suffix >> 21) & 0x1f;
1498		op->reg = rd;
1499		op->val = regs->gpr[rd];
1500		suffixopcode = get_op(suffix);
1501		prefixtype = (word >> 24) & 0x3;
1502		switch (prefixtype) {
1503		case 2:
1504			if (prefix_r && ra)
1505				return 0;
1506			switch (suffixopcode) {
1507			case 14:	/* paddi */
1508				op->type = COMPUTE | PREFIXED;
1509				op->val = mlsd_8lsd_ea(word, suffix, regs);
1510				goto compute_done;
1511			}
1512		}
1513		break;
1514	case 2:		/* tdi */
1515		if (rd & trap_compare(regs->gpr[ra], (short) word))
1516			goto trap;
1517		return 1;
1518#endif
1519	case 3:		/* twi */
1520		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1521			goto trap;
1522		return 1;
1523
1524#ifdef __powerpc64__
1525	case 4:
1526		/*
1527		 * There are very many instructions with this primary opcode
1528		 * introduced in the ISA as early as v2.03. However, the ones
1529		 * we currently emulate were all introduced with ISA 3.0
1530		 */
1531		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1532			goto unknown_opcode;
1533
1534		switch (word & 0x3f) {
1535		case 48:	/* maddhd */
1536			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1537				     "=r" (op->val) : "r" (regs->gpr[ra]),
1538				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1539			goto compute_done;
1540
1541		case 49:	/* maddhdu */
1542			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1543				     "=r" (op->val) : "r" (regs->gpr[ra]),
1544				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1545			goto compute_done;
1546
1547		case 51:	/* maddld */
1548			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1549				     "=r" (op->val) : "r" (regs->gpr[ra]),
1550				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1551			goto compute_done;
1552		}
1553
1554		/*
1555		 * There are other instructions from ISA 3.0 with the same
1556		 * primary opcode which do not have emulation support yet.
1557		 */
1558		goto unknown_opcode;
1559#endif
1560
1561	case 7:		/* mulli */
1562		op->val = regs->gpr[ra] * (short) word;
1563		goto compute_done;
1564
1565	case 8:		/* subfic */
1566		imm = (short) word;
1567		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1568		return 1;
1569
1570	case 10:	/* cmpli */
1571		imm = (unsigned short) word;
1572		val = regs->gpr[ra];
1573#ifdef __powerpc64__
1574		if ((rd & 1) == 0)
1575			val = (unsigned int) val;
1576#endif
1577		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1578		return 1;
1579
1580	case 11:	/* cmpi */
1581		imm = (short) word;
1582		val = regs->gpr[ra];
1583#ifdef __powerpc64__
1584		if ((rd & 1) == 0)
1585			val = (int) val;
1586#endif
1587		do_cmp_signed(regs, op, val, imm, rd >> 2);
1588		return 1;
1589
1590	case 12:	/* addic */
1591		imm = (short) word;
1592		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1593		return 1;
1594
1595	case 13:	/* addic. */
1596		imm = (short) word;
1597		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1598		set_cr0(regs, op);
1599		return 1;
1600
1601	case 14:	/* addi */
1602		imm = (short) word;
1603		if (ra)
1604			imm += regs->gpr[ra];
1605		op->val = imm;
1606		goto compute_done;
1607
1608	case 15:	/* addis */
1609		imm = ((short) word) << 16;
1610		if (ra)
1611			imm += regs->gpr[ra];
1612		op->val = imm;
1613		goto compute_done;
1614
1615	case 19:
1616		if (((word >> 1) & 0x1f) == 2) {
1617			/* addpcis */
1618			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1619				goto unknown_opcode;
1620			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1621			imm |= (word >> 15) & 0x3e;	/* d1 field */
1622			op->val = regs->nip + (imm << 16) + 4;
1623			goto compute_done;
1624		}
1625		op->type = UNKNOWN;
1626		return 0;
1627
1628	case 20:	/* rlwimi */
1629		mb = (word >> 6) & 0x1f;
1630		me = (word >> 1) & 0x1f;
1631		val = DATA32(regs->gpr[rd]);
1632		imm = MASK32(mb, me);
1633		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1634		goto logical_done;
1635
1636	case 21:	/* rlwinm */
1637		mb = (word >> 6) & 0x1f;
1638		me = (word >> 1) & 0x1f;
1639		val = DATA32(regs->gpr[rd]);
1640		op->val = ROTATE(val, rb) & MASK32(mb, me);
1641		goto logical_done;
1642
1643	case 23:	/* rlwnm */
1644		mb = (word >> 6) & 0x1f;
1645		me = (word >> 1) & 0x1f;
1646		rb = regs->gpr[rb] & 0x1f;
1647		val = DATA32(regs->gpr[rd]);
1648		op->val = ROTATE(val, rb) & MASK32(mb, me);
1649		goto logical_done;
1650
1651	case 24:	/* ori */
1652		op->val = regs->gpr[rd] | (unsigned short) word;
1653		goto logical_done_nocc;
1654
1655	case 25:	/* oris */
1656		imm = (unsigned short) word;
1657		op->val = regs->gpr[rd] | (imm << 16);
1658		goto logical_done_nocc;
1659
1660	case 26:	/* xori */
1661		op->val = regs->gpr[rd] ^ (unsigned short) word;
1662		goto logical_done_nocc;
1663
1664	case 27:	/* xoris */
1665		imm = (unsigned short) word;
1666		op->val = regs->gpr[rd] ^ (imm << 16);
1667		goto logical_done_nocc;
1668
1669	case 28:	/* andi. */
1670		op->val = regs->gpr[rd] & (unsigned short) word;
1671		set_cr0(regs, op);
1672		goto logical_done_nocc;
1673
1674	case 29:	/* andis. */
1675		imm = (unsigned short) word;
1676		op->val = regs->gpr[rd] & (imm << 16);
1677		set_cr0(regs, op);
1678		goto logical_done_nocc;
1679
1680#ifdef __powerpc64__
1681	case 30:	/* rld* */
1682		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1683		val = regs->gpr[rd];
1684		if ((word & 0x10) == 0) {
1685			sh = rb | ((word & 2) << 4);
1686			val = ROTATE(val, sh);
1687			switch ((word >> 2) & 3) {
1688			case 0:		/* rldicl */
1689				val &= MASK64_L(mb);
1690				break;
1691			case 1:		/* rldicr */
1692				val &= MASK64_R(mb);
1693				break;
1694			case 2:		/* rldic */
1695				val &= MASK64(mb, 63 - sh);
1696				break;
1697			case 3:		/* rldimi */
1698				imm = MASK64(mb, 63 - sh);
1699				val = (regs->gpr[ra] & ~imm) |
1700					(val & imm);
1701			}
1702			op->val = val;
1703			goto logical_done;
1704		} else {
1705			sh = regs->gpr[rb] & 0x3f;
1706			val = ROTATE(val, sh);
1707			switch ((word >> 1) & 7) {
1708			case 0:		/* rldcl */
1709				op->val = val & MASK64_L(mb);
1710				goto logical_done;
1711			case 1:		/* rldcr */
1712				op->val = val & MASK64_R(mb);
1713				goto logical_done;
1714			}
1715		}
1716#endif
1717		op->type = UNKNOWN;	/* illegal instruction */
1718		return 0;
1719
1720	case 31:
1721		/* isel occupies 32 minor opcodes */
1722		if (((word >> 1) & 0x1f) == 15) {
1723			mb = (word >> 6) & 0x1f; /* bc field */
1724			val = (regs->ccr >> (31 - mb)) & 1;
1725			val2 = (ra) ? regs->gpr[ra] : 0;
1726
1727			op->val = (val) ? val2 : regs->gpr[rb];
1728			goto compute_done;
1729		}
1730
1731		switch ((word >> 1) & 0x3ff) {
1732		case 4:		/* tw */
1733			if (rd == 0x1f ||
1734			    (rd & trap_compare((int)regs->gpr[ra],
1735					       (int)regs->gpr[rb])))
1736				goto trap;
1737			return 1;
1738#ifdef __powerpc64__
1739		case 68:	/* td */
1740			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1741				goto trap;
1742			return 1;
1743#endif
1744		case 83:	/* mfmsr */
1745			if (regs->msr & MSR_PR)
1746				goto priv;
1747			op->type = MFMSR;
1748			op->reg = rd;
1749			return 0;
1750		case 146:	/* mtmsr */
1751			if (regs->msr & MSR_PR)
1752				goto priv;
1753			op->type = MTMSR;
1754			op->reg = rd;
1755			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1756			return 0;
1757#ifdef CONFIG_PPC64
1758		case 178:	/* mtmsrd */
1759			if (regs->msr & MSR_PR)
1760				goto priv;
1761			op->type = MTMSR;
1762			op->reg = rd;
1763			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1764			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1765			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1766			op->val = imm;
1767			return 0;
1768#endif
1769
1770		case 19:	/* mfcr */
1771			imm = 0xffffffffUL;
1772			if ((word >> 20) & 1) {
1773				imm = 0xf0000000UL;
1774				for (sh = 0; sh < 8; ++sh) {
1775					if (word & (0x80000 >> sh))
1776						break;
1777					imm >>= 4;
1778				}
1779			}
1780			op->val = regs->ccr & imm;
1781			goto compute_done;
1782
1783		case 128:	/* setb */
1784			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1785				goto unknown_opcode;
1786			/*
1787			 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1788			 * Since each CR field is 4 bits,
1789			 * we can simply mask off the bottom two bits (bfa * 4)
1790			 * to yield the first bit in the CR field.
1791			 */
1792			ra = ra & ~0x3;
1793			/* 'val' stores bits of the CR field (bfa) */
1794			val = regs->ccr >> (CR0_SHIFT - ra);
1795			/* checks if the LT bit of CR field (bfa) is set */
1796			if (val & 8)
1797				op->val = -1;
1798			/* checks if the GT bit of CR field (bfa) is set */
1799			else if (val & 4)
1800				op->val = 1;
1801			else
1802				op->val = 0;
1803			goto compute_done;
1804
1805		case 144:	/* mtcrf */
1806			op->type = COMPUTE + SETCC;
1807			imm = 0xf0000000UL;
1808			val = regs->gpr[rd];
1809			op->ccval = regs->ccr;
1810			for (sh = 0; sh < 8; ++sh) {
1811				if (word & (0x80000 >> sh))
1812					op->ccval = (op->ccval & ~imm) |
1813						(val & imm);
1814				imm >>= 4;
1815			}
1816			return 1;
1817
1818		case 339:	/* mfspr */
1819			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1820			op->type = MFSPR;
1821			op->reg = rd;
1822			op->spr = spr;
1823			if (spr == SPRN_XER || spr == SPRN_LR ||
1824			    spr == SPRN_CTR)
1825				return 1;
1826			return 0;
1827
1828		case 467:	/* mtspr */
1829			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1830			op->type = MTSPR;
1831			op->val = regs->gpr[rd];
1832			op->spr = spr;
1833			if (spr == SPRN_XER || spr == SPRN_LR ||
1834			    spr == SPRN_CTR)
1835				return 1;
1836			return 0;
1837
1838/*
1839 * Compare instructions
1840 */
1841		case 0:	/* cmp */
1842			val = regs->gpr[ra];
1843			val2 = regs->gpr[rb];
1844#ifdef __powerpc64__
1845			if ((rd & 1) == 0) {
1846				/* word (32-bit) compare */
1847				val = (int) val;
1848				val2 = (int) val2;
1849			}
1850#endif
1851			do_cmp_signed(regs, op, val, val2, rd >> 2);
1852			return 1;
1853
1854		case 32:	/* cmpl */
1855			val = regs->gpr[ra];
1856			val2 = regs->gpr[rb];
1857#ifdef __powerpc64__
1858			if ((rd & 1) == 0) {
1859				/* word (32-bit) compare */
1860				val = (unsigned int) val;
1861				val2 = (unsigned int) val2;
1862			}
1863#endif
1864			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1865			return 1;
1866
1867		case 508: /* cmpb */
1868			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1869			goto logical_done_nocc;
1870
1871/*
1872 * Arithmetic instructions
1873 */
1874		case 8:	/* subfc */
1875			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1876				       regs->gpr[rb], 1);
1877			goto arith_done;
1878#ifdef __powerpc64__
1879		case 9:	/* mulhdu */
1880			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1881			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1882			goto arith_done;
1883#endif
1884		case 10:	/* addc */
1885			add_with_carry(regs, op, rd, regs->gpr[ra],
1886				       regs->gpr[rb], 0);
1887			goto arith_done;
1888
1889		case 11:	/* mulhwu */
1890			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1891			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1892			goto arith_done;
1893
1894		case 40:	/* subf */
1895			op->val = regs->gpr[rb] - regs->gpr[ra];
1896			goto arith_done;
1897#ifdef __powerpc64__
1898		case 73:	/* mulhd */
1899			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1900			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1901			goto arith_done;
1902#endif
1903		case 75:	/* mulhw */
1904			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1905			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1906			goto arith_done;
1907
1908		case 104:	/* neg */
1909			op->val = -regs->gpr[ra];
1910			goto arith_done;
1911
1912		case 136:	/* subfe */
1913			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1914				       regs->gpr[rb], regs->xer & XER_CA);
1915			goto arith_done;
1916
1917		case 138:	/* adde */
1918			add_with_carry(regs, op, rd, regs->gpr[ra],
1919				       regs->gpr[rb], regs->xer & XER_CA);
1920			goto arith_done;
1921
1922		case 200:	/* subfze */
1923			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1924				       regs->xer & XER_CA);
1925			goto arith_done;
1926
1927		case 202:	/* addze */
1928			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1929				       regs->xer & XER_CA);
1930			goto arith_done;
1931
1932		case 232:	/* subfme */
1933			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1934				       regs->xer & XER_CA);
1935			goto arith_done;
1936#ifdef __powerpc64__
1937		case 233:	/* mulld */
1938			op->val = regs->gpr[ra] * regs->gpr[rb];
1939			goto arith_done;
1940#endif
1941		case 234:	/* addme */
1942			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1943				       regs->xer & XER_CA);
1944			goto arith_done;
1945
1946		case 235:	/* mullw */
1947			op->val = (long)(int) regs->gpr[ra] *
1948				(int) regs->gpr[rb];
1949
1950			goto arith_done;
1951#ifdef __powerpc64__
1952		case 265:	/* modud */
1953			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1954				goto unknown_opcode;
1955			op->val = regs->gpr[ra] % regs->gpr[rb];
1956			goto compute_done;
1957#endif
1958		case 266:	/* add */
1959			op->val = regs->gpr[ra] + regs->gpr[rb];
1960			goto arith_done;
1961
1962		case 267:	/* moduw */
1963			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1964				goto unknown_opcode;
1965			op->val = (unsigned int) regs->gpr[ra] %
1966				(unsigned int) regs->gpr[rb];
1967			goto compute_done;
1968#ifdef __powerpc64__
1969		case 457:	/* divdu */
1970			op->val = regs->gpr[ra] / regs->gpr[rb];
1971			goto arith_done;
1972#endif
1973		case 459:	/* divwu */
1974			op->val = (unsigned int) regs->gpr[ra] /
1975				(unsigned int) regs->gpr[rb];
1976			goto arith_done;
1977#ifdef __powerpc64__
1978		case 489:	/* divd */
1979			op->val = (long int) regs->gpr[ra] /
1980				(long int) regs->gpr[rb];
1981			goto arith_done;
1982#endif
1983		case 491:	/* divw */
1984			op->val = (int) regs->gpr[ra] /
1985				(int) regs->gpr[rb];
1986			goto arith_done;
1987#ifdef __powerpc64__
1988		case 425:	/* divde[.] */
1989			asm volatile(PPC_DIVDE(%0, %1, %2) :
1990				"=r" (op->val) : "r" (regs->gpr[ra]),
1991				"r" (regs->gpr[rb]));
1992			goto arith_done;
1993		case 393:	/* divdeu[.] */
1994			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1995				"=r" (op->val) : "r" (regs->gpr[ra]),
1996				"r" (regs->gpr[rb]));
1997			goto arith_done;
1998#endif
1999		case 755:	/* darn */
2000			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2001				goto unknown_opcode;
2002			switch (ra & 0x3) {
2003			case 0:
2004				/* 32-bit conditioned */
2005				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
2006				goto compute_done;
2007
2008			case 1:
2009				/* 64-bit conditioned */
2010				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
2011				goto compute_done;
2012
2013			case 2:
2014				/* 64-bit raw */
2015				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
2016				goto compute_done;
2017			}
2018
2019			goto unknown_opcode;
2020#ifdef __powerpc64__
2021		case 777:	/* modsd */
2022			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2023				goto unknown_opcode;
2024			op->val = (long int) regs->gpr[ra] %
2025				(long int) regs->gpr[rb];
2026			goto compute_done;
2027#endif
2028		case 779:	/* modsw */
2029			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2030				goto unknown_opcode;
2031			op->val = (int) regs->gpr[ra] %
2032				(int) regs->gpr[rb];
2033			goto compute_done;
2034
2035
2036/*
2037 * Logical instructions
2038 */
2039		case 26:	/* cntlzw */
2040			val = (unsigned int) regs->gpr[rd];
2041			op->val = ( val ? __builtin_clz(val) : 32 );
2042			goto logical_done;
2043#ifdef __powerpc64__
2044		case 58:	/* cntlzd */
2045			val = regs->gpr[rd];
2046			op->val = ( val ? __builtin_clzl(val) : 64 );
2047			goto logical_done;
2048#endif
2049		case 28:	/* and */
2050			op->val = regs->gpr[rd] & regs->gpr[rb];
2051			goto logical_done;
2052
2053		case 60:	/* andc */
2054			op->val = regs->gpr[rd] & ~regs->gpr[rb];
2055			goto logical_done;
2056
2057		case 122:	/* popcntb */
2058			do_popcnt(regs, op, regs->gpr[rd], 8);
2059			goto logical_done_nocc;
2060
2061		case 124:	/* nor */
2062			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
2063			goto logical_done;
2064
2065		case 154:	/* prtyw */
2066			do_prty(regs, op, regs->gpr[rd], 32);
2067			goto logical_done_nocc;
2068
2069		case 186:	/* prtyd */
2070			do_prty(regs, op, regs->gpr[rd], 64);
2071			goto logical_done_nocc;
2072#ifdef CONFIG_PPC64
2073		case 252:	/* bpermd */
2074			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2075			goto logical_done_nocc;
2076#endif
2077		case 284:	/* xor */
2078			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2079			goto logical_done;
2080
2081		case 316:	/* xor */
2082			op->val = regs->gpr[rd] ^ regs->gpr[rb];
2083			goto logical_done;
2084
2085		case 378:	/* popcntw */
2086			do_popcnt(regs, op, regs->gpr[rd], 32);
2087			goto logical_done_nocc;
2088
2089		case 412:	/* orc */
2090			op->val = regs->gpr[rd] | ~regs->gpr[rb];
2091			goto logical_done;
2092
2093		case 444:	/* or */
2094			op->val = regs->gpr[rd] | regs->gpr[rb];
2095			goto logical_done;
2096
2097		case 476:	/* nand */
2098			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2099			goto logical_done;
2100#ifdef CONFIG_PPC64
2101		case 506:	/* popcntd */
2102			do_popcnt(regs, op, regs->gpr[rd], 64);
2103			goto logical_done_nocc;
2104#endif
2105		case 538:	/* cnttzw */
2106			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2107				goto unknown_opcode;
2108			val = (unsigned int) regs->gpr[rd];
2109			op->val = (val ? __builtin_ctz(val) : 32);
2110			goto logical_done;
2111#ifdef __powerpc64__
2112		case 570:	/* cnttzd */
2113			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2114				goto unknown_opcode;
2115			val = regs->gpr[rd];
2116			op->val = (val ? __builtin_ctzl(val) : 64);
2117			goto logical_done;
2118#endif
2119		case 922:	/* extsh */
2120			op->val = (signed short) regs->gpr[rd];
2121			goto logical_done;
2122
2123		case 954:	/* extsb */
2124			op->val = (signed char) regs->gpr[rd];
2125			goto logical_done;
2126#ifdef __powerpc64__
2127		case 986:	/* extsw */
2128			op->val = (signed int) regs->gpr[rd];
2129			goto logical_done;
2130#endif
2131
2132/*
2133 * Shift instructions
2134 */
2135		case 24:	/* slw */
2136			sh = regs->gpr[rb] & 0x3f;
2137			if (sh < 32)
2138				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2139			else
2140				op->val = 0;
2141			goto logical_done;
2142
2143		case 536:	/* srw */
2144			sh = regs->gpr[rb] & 0x3f;
2145			if (sh < 32)
2146				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2147			else
2148				op->val = 0;
2149			goto logical_done;
2150
2151		case 792:	/* sraw */
2152			op->type = COMPUTE + SETREG + SETXER;
2153			sh = regs->gpr[rb] & 0x3f;
2154			ival = (signed int) regs->gpr[rd];
2155			op->val = ival >> (sh < 32 ? sh : 31);
2156			op->xerval = regs->xer;
2157			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2158				op->xerval |= XER_CA;
2159			else
2160				op->xerval &= ~XER_CA;
2161			set_ca32(op, op->xerval & XER_CA);
2162			goto logical_done;
2163
2164		case 824:	/* srawi */
2165			op->type = COMPUTE + SETREG + SETXER;
2166			sh = rb;
2167			ival = (signed int) regs->gpr[rd];
2168			op->val = ival >> sh;
2169			op->xerval = regs->xer;
2170			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2171				op->xerval |= XER_CA;
2172			else
2173				op->xerval &= ~XER_CA;
2174			set_ca32(op, op->xerval & XER_CA);
2175			goto logical_done;
2176
2177#ifdef __powerpc64__
2178		case 27:	/* sld */
2179			sh = regs->gpr[rb] & 0x7f;
2180			if (sh < 64)
2181				op->val = regs->gpr[rd] << sh;
2182			else
2183				op->val = 0;
2184			goto logical_done;
2185
2186		case 539:	/* srd */
2187			sh = regs->gpr[rb] & 0x7f;
2188			if (sh < 64)
2189				op->val = regs->gpr[rd] >> sh;
2190			else
2191				op->val = 0;
2192			goto logical_done;
2193
2194		case 794:	/* srad */
2195			op->type = COMPUTE + SETREG + SETXER;
2196			sh = regs->gpr[rb] & 0x7f;
2197			ival = (signed long int) regs->gpr[rd];
2198			op->val = ival >> (sh < 64 ? sh : 63);
2199			op->xerval = regs->xer;
2200			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2201				op->xerval |= XER_CA;
2202			else
2203				op->xerval &= ~XER_CA;
2204			set_ca32(op, op->xerval & XER_CA);
2205			goto logical_done;
2206
2207		case 826:	/* sradi with sh_5 = 0 */
2208		case 827:	/* sradi with sh_5 = 1 */
2209			op->type = COMPUTE + SETREG + SETXER;
2210			sh = rb | ((word & 2) << 4);
2211			ival = (signed long int) regs->gpr[rd];
2212			op->val = ival >> sh;
2213			op->xerval = regs->xer;
2214			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2215				op->xerval |= XER_CA;
2216			else
2217				op->xerval &= ~XER_CA;
2218			set_ca32(op, op->xerval & XER_CA);
2219			goto logical_done;
2220
2221		case 890:	/* extswsli with sh_5 = 0 */
2222		case 891:	/* extswsli with sh_5 = 1 */
2223			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2224				goto unknown_opcode;
2225			op->type = COMPUTE + SETREG;
2226			sh = rb | ((word & 2) << 4);
2227			val = (signed int) regs->gpr[rd];
2228			if (sh)
2229				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2230			else
2231				op->val = val;
2232			goto logical_done;
2233
2234#endif /* __powerpc64__ */
2235
2236/*
2237 * Cache instructions
2238 */
2239		case 54:	/* dcbst */
2240			op->type = MKOP(CACHEOP, DCBST, 0);
2241			op->ea = xform_ea(word, regs);
2242			return 0;
2243
2244		case 86:	/* dcbf */
2245			op->type = MKOP(CACHEOP, DCBF, 0);
2246			op->ea = xform_ea(word, regs);
2247			return 0;
2248
2249		case 246:	/* dcbtst */
2250			op->type = MKOP(CACHEOP, DCBTST, 0);
2251			op->ea = xform_ea(word, regs);
2252			op->reg = rd;
2253			return 0;
2254
2255		case 278:	/* dcbt */
2256			op->type = MKOP(CACHEOP, DCBTST, 0);
2257			op->ea = xform_ea(word, regs);
2258			op->reg = rd;
2259			return 0;
2260
2261		case 982:	/* icbi */
2262			op->type = MKOP(CACHEOP, ICBI, 0);
2263			op->ea = xform_ea(word, regs);
2264			return 0;
2265
2266		case 1014:	/* dcbz */
2267			op->type = MKOP(CACHEOP, DCBZ, 0);
2268			op->ea = xform_ea(word, regs);
2269			return 0;
2270		}
2271		break;
2272	}
2273
2274/*
2275 * Loads and stores.
2276 */
2277	op->type = UNKNOWN;
2278	op->update_reg = ra;
2279	op->reg = rd;
2280	op->val = regs->gpr[rd];
2281	u = (word >> 20) & UPDATE;
2282	op->vsx_flags = 0;
2283
2284	switch (opcode) {
2285	case 31:
2286		u = word & UPDATE;
2287		op->ea = xform_ea(word, regs);
2288		switch ((word >> 1) & 0x3ff) {
2289		case 20:	/* lwarx */
2290			op->type = MKOP(LARX, 0, 4);
2291			break;
2292
2293		case 150:	/* stwcx. */
2294			op->type = MKOP(STCX, 0, 4);
2295			break;
2296
2297#ifdef CONFIG_PPC_HAS_LBARX_LHARX
 
 
 
 
 
 
 
 
2298		case 52:	/* lbarx */
2299			op->type = MKOP(LARX, 0, 1);
2300			break;
2301
2302		case 694:	/* stbcx. */
2303			op->type = MKOP(STCX, 0, 1);
2304			break;
2305
2306		case 116:	/* lharx */
2307			op->type = MKOP(LARX, 0, 2);
2308			break;
2309
2310		case 726:	/* sthcx. */
2311			op->type = MKOP(STCX, 0, 2);
2312			break;
2313#endif
2314#ifdef __powerpc64__
2315		case 84:	/* ldarx */
2316			op->type = MKOP(LARX, 0, 8);
2317			break;
2318
2319		case 214:	/* stdcx. */
2320			op->type = MKOP(STCX, 0, 8);
2321			break;
2322
2323		case 276:	/* lqarx */
2324			if (!((rd & 1) || rd == ra || rd == rb))
2325				op->type = MKOP(LARX, 0, 16);
2326			break;
2327
2328		case 182:	/* stqcx. */
2329			if (!(rd & 1))
2330				op->type = MKOP(STCX, 0, 16);
2331			break;
2332#endif
2333
2334		case 23:	/* lwzx */
2335		case 55:	/* lwzux */
2336			op->type = MKOP(LOAD, u, 4);
2337			break;
2338
2339		case 87:	/* lbzx */
2340		case 119:	/* lbzux */
2341			op->type = MKOP(LOAD, u, 1);
2342			break;
2343
2344#ifdef CONFIG_ALTIVEC
2345		/*
2346		 * Note: for the load/store vector element instructions,
2347		 * bits of the EA say which field of the VMX register to use.
2348		 */
2349		case 7:		/* lvebx */
2350			op->type = MKOP(LOAD_VMX, 0, 1);
2351			op->element_size = 1;
2352			break;
2353
2354		case 39:	/* lvehx */
2355			op->type = MKOP(LOAD_VMX, 0, 2);
2356			op->element_size = 2;
2357			break;
2358
2359		case 71:	/* lvewx */
2360			op->type = MKOP(LOAD_VMX, 0, 4);
2361			op->element_size = 4;
2362			break;
2363
2364		case 103:	/* lvx */
2365		case 359:	/* lvxl */
2366			op->type = MKOP(LOAD_VMX, 0, 16);
2367			op->element_size = 16;
2368			break;
2369
2370		case 135:	/* stvebx */
2371			op->type = MKOP(STORE_VMX, 0, 1);
2372			op->element_size = 1;
2373			break;
2374
2375		case 167:	/* stvehx */
2376			op->type = MKOP(STORE_VMX, 0, 2);
2377			op->element_size = 2;
2378			break;
2379
2380		case 199:	/* stvewx */
2381			op->type = MKOP(STORE_VMX, 0, 4);
2382			op->element_size = 4;
2383			break;
2384
2385		case 231:	/* stvx */
2386		case 487:	/* stvxl */
2387			op->type = MKOP(STORE_VMX, 0, 16);
2388			break;
2389#endif /* CONFIG_ALTIVEC */
2390
2391#ifdef __powerpc64__
2392		case 21:	/* ldx */
2393		case 53:	/* ldux */
2394			op->type = MKOP(LOAD, u, 8);
2395			break;
2396
2397		case 149:	/* stdx */
2398		case 181:	/* stdux */
2399			op->type = MKOP(STORE, u, 8);
2400			break;
2401#endif
2402
2403		case 151:	/* stwx */
2404		case 183:	/* stwux */
2405			op->type = MKOP(STORE, u, 4);
2406			break;
2407
2408		case 215:	/* stbx */
2409		case 247:	/* stbux */
2410			op->type = MKOP(STORE, u, 1);
2411			break;
2412
2413		case 279:	/* lhzx */
2414		case 311:	/* lhzux */
2415			op->type = MKOP(LOAD, u, 2);
2416			break;
2417
2418#ifdef __powerpc64__
2419		case 341:	/* lwax */
2420		case 373:	/* lwaux */
2421			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2422			break;
2423#endif
2424
2425		case 343:	/* lhax */
2426		case 375:	/* lhaux */
2427			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2428			break;
2429
2430		case 407:	/* sthx */
2431		case 439:	/* sthux */
2432			op->type = MKOP(STORE, u, 2);
2433			break;
2434
2435#ifdef __powerpc64__
2436		case 532:	/* ldbrx */
2437			op->type = MKOP(LOAD, BYTEREV, 8);
2438			break;
2439
2440#endif
2441		case 533:	/* lswx */
2442			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2443			break;
2444
2445		case 534:	/* lwbrx */
2446			op->type = MKOP(LOAD, BYTEREV, 4);
2447			break;
2448
2449		case 597:	/* lswi */
2450			if (rb == 0)
2451				rb = 32;	/* # bytes to load */
2452			op->type = MKOP(LOAD_MULTI, 0, rb);
2453			op->ea = ra ? regs->gpr[ra] : 0;
2454			break;
2455
2456#ifdef CONFIG_PPC_FPU
2457		case 535:	/* lfsx */
2458		case 567:	/* lfsux */
2459			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2460			break;
2461
2462		case 599:	/* lfdx */
2463		case 631:	/* lfdux */
2464			op->type = MKOP(LOAD_FP, u, 8);
2465			break;
2466
2467		case 663:	/* stfsx */
2468		case 695:	/* stfsux */
2469			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2470			break;
2471
2472		case 727:	/* stfdx */
2473		case 759:	/* stfdux */
2474			op->type = MKOP(STORE_FP, u, 8);
2475			break;
2476
2477#ifdef __powerpc64__
2478		case 791:	/* lfdpx */
2479			op->type = MKOP(LOAD_FP, 0, 16);
2480			break;
2481
2482		case 855:	/* lfiwax */
2483			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2484			break;
2485
2486		case 887:	/* lfiwzx */
2487			op->type = MKOP(LOAD_FP, 0, 4);
2488			break;
2489
2490		case 919:	/* stfdpx */
2491			op->type = MKOP(STORE_FP, 0, 16);
2492			break;
2493
2494		case 983:	/* stfiwx */
2495			op->type = MKOP(STORE_FP, 0, 4);
2496			break;
2497#endif /* __powerpc64 */
2498#endif /* CONFIG_PPC_FPU */
2499
2500#ifdef __powerpc64__
2501		case 660:	/* stdbrx */
2502			op->type = MKOP(STORE, BYTEREV, 8);
2503			op->val = byterev_8(regs->gpr[rd]);
2504			break;
2505
2506#endif
2507		case 661:	/* stswx */
2508			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2509			break;
2510
2511		case 662:	/* stwbrx */
2512			op->type = MKOP(STORE, BYTEREV, 4);
2513			op->val = byterev_4(regs->gpr[rd]);
2514			break;
2515
2516		case 725:	/* stswi */
2517			if (rb == 0)
2518				rb = 32;	/* # bytes to store */
2519			op->type = MKOP(STORE_MULTI, 0, rb);
2520			op->ea = ra ? regs->gpr[ra] : 0;
2521			break;
2522
2523		case 790:	/* lhbrx */
2524			op->type = MKOP(LOAD, BYTEREV, 2);
2525			break;
2526
2527		case 918:	/* sthbrx */
2528			op->type = MKOP(STORE, BYTEREV, 2);
2529			op->val = byterev_2(regs->gpr[rd]);
2530			break;
2531
2532#ifdef CONFIG_VSX
2533		case 12:	/* lxsiwzx */
2534			op->reg = rd | ((word & 1) << 5);
2535			op->type = MKOP(LOAD_VSX, 0, 4);
2536			op->element_size = 8;
2537			break;
2538
2539		case 76:	/* lxsiwax */
2540			op->reg = rd | ((word & 1) << 5);
2541			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2542			op->element_size = 8;
2543			break;
2544
2545		case 140:	/* stxsiwx */
2546			op->reg = rd | ((word & 1) << 5);
2547			op->type = MKOP(STORE_VSX, 0, 4);
2548			op->element_size = 8;
2549			break;
2550
2551		case 268:	/* lxvx */
2552			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2553				goto unknown_opcode;
2554			op->reg = rd | ((word & 1) << 5);
2555			op->type = MKOP(LOAD_VSX, 0, 16);
2556			op->element_size = 16;
2557			op->vsx_flags = VSX_CHECK_VEC;
2558			break;
2559
2560		case 269:	/* lxvl */
2561		case 301: {	/* lxvll */
2562			int nb;
2563			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2564				goto unknown_opcode;
2565			op->reg = rd | ((word & 1) << 5);
2566			op->ea = ra ? regs->gpr[ra] : 0;
2567			nb = regs->gpr[rb] & 0xff;
2568			if (nb > 16)
2569				nb = 16;
2570			op->type = MKOP(LOAD_VSX, 0, nb);
2571			op->element_size = 16;
2572			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2573				VSX_CHECK_VEC;
2574			break;
2575		}
2576		case 332:	/* lxvdsx */
2577			op->reg = rd | ((word & 1) << 5);
2578			op->type = MKOP(LOAD_VSX, 0, 8);
2579			op->element_size = 8;
2580			op->vsx_flags = VSX_SPLAT;
2581			break;
2582
2583		case 333:       /* lxvpx */
2584			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2585				goto unknown_opcode;
2586			op->reg = VSX_REGISTER_XTP(rd);
2587			op->type = MKOP(LOAD_VSX, 0, 32);
2588			op->element_size = 32;
2589			break;
2590
2591		case 364:	/* lxvwsx */
2592			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2593				goto unknown_opcode;
2594			op->reg = rd | ((word & 1) << 5);
2595			op->type = MKOP(LOAD_VSX, 0, 4);
2596			op->element_size = 4;
2597			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2598			break;
2599
2600		case 396:	/* stxvx */
2601			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2602				goto unknown_opcode;
2603			op->reg = rd | ((word & 1) << 5);
2604			op->type = MKOP(STORE_VSX, 0, 16);
2605			op->element_size = 16;
2606			op->vsx_flags = VSX_CHECK_VEC;
2607			break;
2608
2609		case 397:	/* stxvl */
2610		case 429: {	/* stxvll */
2611			int nb;
2612			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2613				goto unknown_opcode;
2614			op->reg = rd | ((word & 1) << 5);
2615			op->ea = ra ? regs->gpr[ra] : 0;
2616			nb = regs->gpr[rb] & 0xff;
2617			if (nb > 16)
2618				nb = 16;
2619			op->type = MKOP(STORE_VSX, 0, nb);
2620			op->element_size = 16;
2621			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2622				VSX_CHECK_VEC;
2623			break;
2624		}
2625		case 461:       /* stxvpx */
2626			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2627				goto unknown_opcode;
2628			op->reg = VSX_REGISTER_XTP(rd);
2629			op->type = MKOP(STORE_VSX, 0, 32);
2630			op->element_size = 32;
2631			break;
2632		case 524:	/* lxsspx */
2633			op->reg = rd | ((word & 1) << 5);
2634			op->type = MKOP(LOAD_VSX, 0, 4);
2635			op->element_size = 8;
2636			op->vsx_flags = VSX_FPCONV;
2637			break;
2638
2639		case 588:	/* lxsdx */
2640			op->reg = rd | ((word & 1) << 5);
2641			op->type = MKOP(LOAD_VSX, 0, 8);
2642			op->element_size = 8;
2643			break;
2644
2645		case 652:	/* stxsspx */
2646			op->reg = rd | ((word & 1) << 5);
2647			op->type = MKOP(STORE_VSX, 0, 4);
2648			op->element_size = 8;
2649			op->vsx_flags = VSX_FPCONV;
2650			break;
2651
2652		case 716:	/* stxsdx */
2653			op->reg = rd | ((word & 1) << 5);
2654			op->type = MKOP(STORE_VSX, 0, 8);
2655			op->element_size = 8;
2656			break;
2657
2658		case 780:	/* lxvw4x */
2659			op->reg = rd | ((word & 1) << 5);
2660			op->type = MKOP(LOAD_VSX, 0, 16);
2661			op->element_size = 4;
2662			break;
2663
2664		case 781:	/* lxsibzx */
2665			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2666				goto unknown_opcode;
2667			op->reg = rd | ((word & 1) << 5);
2668			op->type = MKOP(LOAD_VSX, 0, 1);
2669			op->element_size = 8;
2670			op->vsx_flags = VSX_CHECK_VEC;
2671			break;
2672
2673		case 812:	/* lxvh8x */
2674			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2675				goto unknown_opcode;
2676			op->reg = rd | ((word & 1) << 5);
2677			op->type = MKOP(LOAD_VSX, 0, 16);
2678			op->element_size = 2;
2679			op->vsx_flags = VSX_CHECK_VEC;
2680			break;
2681
2682		case 813:	/* lxsihzx */
2683			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2684				goto unknown_opcode;
2685			op->reg = rd | ((word & 1) << 5);
2686			op->type = MKOP(LOAD_VSX, 0, 2);
2687			op->element_size = 8;
2688			op->vsx_flags = VSX_CHECK_VEC;
2689			break;
2690
2691		case 844:	/* lxvd2x */
2692			op->reg = rd | ((word & 1) << 5);
2693			op->type = MKOP(LOAD_VSX, 0, 16);
2694			op->element_size = 8;
2695			break;
2696
2697		case 876:	/* lxvb16x */
2698			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2699				goto unknown_opcode;
2700			op->reg = rd | ((word & 1) << 5);
2701			op->type = MKOP(LOAD_VSX, 0, 16);
2702			op->element_size = 1;
2703			op->vsx_flags = VSX_CHECK_VEC;
2704			break;
2705
2706		case 908:	/* stxvw4x */
2707			op->reg = rd | ((word & 1) << 5);
2708			op->type = MKOP(STORE_VSX, 0, 16);
2709			op->element_size = 4;
2710			break;
2711
2712		case 909:	/* stxsibx */
2713			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2714				goto unknown_opcode;
2715			op->reg = rd | ((word & 1) << 5);
2716			op->type = MKOP(STORE_VSX, 0, 1);
2717			op->element_size = 8;
2718			op->vsx_flags = VSX_CHECK_VEC;
2719			break;
2720
2721		case 940:	/* stxvh8x */
2722			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2723				goto unknown_opcode;
2724			op->reg = rd | ((word & 1) << 5);
2725			op->type = MKOP(STORE_VSX, 0, 16);
2726			op->element_size = 2;
2727			op->vsx_flags = VSX_CHECK_VEC;
2728			break;
2729
2730		case 941:	/* stxsihx */
2731			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2732				goto unknown_opcode;
2733			op->reg = rd | ((word & 1) << 5);
2734			op->type = MKOP(STORE_VSX, 0, 2);
2735			op->element_size = 8;
2736			op->vsx_flags = VSX_CHECK_VEC;
2737			break;
2738
2739		case 972:	/* stxvd2x */
2740			op->reg = rd | ((word & 1) << 5);
2741			op->type = MKOP(STORE_VSX, 0, 16);
2742			op->element_size = 8;
2743			break;
2744
2745		case 1004:	/* stxvb16x */
2746			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2747				goto unknown_opcode;
2748			op->reg = rd | ((word & 1) << 5);
2749			op->type = MKOP(STORE_VSX, 0, 16);
2750			op->element_size = 1;
2751			op->vsx_flags = VSX_CHECK_VEC;
2752			break;
2753
2754#endif /* CONFIG_VSX */
2755		}
2756		break;
2757
2758	case 32:	/* lwz */
2759	case 33:	/* lwzu */
2760		op->type = MKOP(LOAD, u, 4);
2761		op->ea = dform_ea(word, regs);
2762		break;
2763
2764	case 34:	/* lbz */
2765	case 35:	/* lbzu */
2766		op->type = MKOP(LOAD, u, 1);
2767		op->ea = dform_ea(word, regs);
2768		break;
2769
2770	case 36:	/* stw */
2771	case 37:	/* stwu */
2772		op->type = MKOP(STORE, u, 4);
2773		op->ea = dform_ea(word, regs);
2774		break;
2775
2776	case 38:	/* stb */
2777	case 39:	/* stbu */
2778		op->type = MKOP(STORE, u, 1);
2779		op->ea = dform_ea(word, regs);
2780		break;
2781
2782	case 40:	/* lhz */
2783	case 41:	/* lhzu */
2784		op->type = MKOP(LOAD, u, 2);
2785		op->ea = dform_ea(word, regs);
2786		break;
2787
2788	case 42:	/* lha */
2789	case 43:	/* lhau */
2790		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2791		op->ea = dform_ea(word, regs);
2792		break;
2793
2794	case 44:	/* sth */
2795	case 45:	/* sthu */
2796		op->type = MKOP(STORE, u, 2);
2797		op->ea = dform_ea(word, regs);
2798		break;
2799
2800	case 46:	/* lmw */
2801		if (ra >= rd)
2802			break;		/* invalid form, ra in range to load */
2803		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2804		op->ea = dform_ea(word, regs);
2805		break;
2806
2807	case 47:	/* stmw */
2808		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2809		op->ea = dform_ea(word, regs);
2810		break;
2811
2812#ifdef CONFIG_PPC_FPU
2813	case 48:	/* lfs */
2814	case 49:	/* lfsu */
2815		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2816		op->ea = dform_ea(word, regs);
2817		break;
2818
2819	case 50:	/* lfd */
2820	case 51:	/* lfdu */
2821		op->type = MKOP(LOAD_FP, u, 8);
2822		op->ea = dform_ea(word, regs);
2823		break;
2824
2825	case 52:	/* stfs */
2826	case 53:	/* stfsu */
2827		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2828		op->ea = dform_ea(word, regs);
2829		break;
2830
2831	case 54:	/* stfd */
2832	case 55:	/* stfdu */
2833		op->type = MKOP(STORE_FP, u, 8);
2834		op->ea = dform_ea(word, regs);
2835		break;
2836#endif
2837
2838#ifdef __powerpc64__
2839	case 56:	/* lq */
2840		if (!((rd & 1) || (rd == ra)))
2841			op->type = MKOP(LOAD, 0, 16);
2842		op->ea = dqform_ea(word, regs);
2843		break;
2844#endif
2845
2846#ifdef CONFIG_VSX
2847	case 57:	/* lfdp, lxsd, lxssp */
2848		op->ea = dsform_ea(word, regs);
2849		switch (word & 3) {
2850		case 0:		/* lfdp */
2851			if (rd & 1)
2852				break;		/* reg must be even */
2853			op->type = MKOP(LOAD_FP, 0, 16);
2854			break;
2855		case 2:		/* lxsd */
2856			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2857				goto unknown_opcode;
2858			op->reg = rd + 32;
2859			op->type = MKOP(LOAD_VSX, 0, 8);
2860			op->element_size = 8;
2861			op->vsx_flags = VSX_CHECK_VEC;
2862			break;
2863		case 3:		/* lxssp */
2864			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2865				goto unknown_opcode;
2866			op->reg = rd + 32;
2867			op->type = MKOP(LOAD_VSX, 0, 4);
2868			op->element_size = 8;
2869			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2870			break;
2871		}
2872		break;
2873#endif /* CONFIG_VSX */
2874
2875#ifdef __powerpc64__
2876	case 58:	/* ld[u], lwa */
2877		op->ea = dsform_ea(word, regs);
2878		switch (word & 3) {
2879		case 0:		/* ld */
2880			op->type = MKOP(LOAD, 0, 8);
2881			break;
2882		case 1:		/* ldu */
2883			op->type = MKOP(LOAD, UPDATE, 8);
2884			break;
2885		case 2:		/* lwa */
2886			op->type = MKOP(LOAD, SIGNEXT, 4);
2887			break;
2888		}
2889		break;
2890#endif
2891
2892#ifdef CONFIG_VSX
2893	case 6:
2894		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2895			goto unknown_opcode;
2896		op->ea = dqform_ea(word, regs);
2897		op->reg = VSX_REGISTER_XTP(rd);
2898		op->element_size = 32;
2899		switch (word & 0xf) {
2900		case 0:         /* lxvp */
2901			op->type = MKOP(LOAD_VSX, 0, 32);
2902			break;
2903		case 1:         /* stxvp */
2904			op->type = MKOP(STORE_VSX, 0, 32);
2905			break;
2906		}
2907		break;
2908
2909	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2910		switch (word & 7) {
2911		case 0:		/* stfdp with LSB of DS field = 0 */
2912		case 4:		/* stfdp with LSB of DS field = 1 */
2913			op->ea = dsform_ea(word, regs);
2914			op->type = MKOP(STORE_FP, 0, 16);
2915			break;
2916
2917		case 1:		/* lxv */
2918			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2919				goto unknown_opcode;
2920			op->ea = dqform_ea(word, regs);
2921			if (word & 8)
2922				op->reg = rd + 32;
2923			op->type = MKOP(LOAD_VSX, 0, 16);
2924			op->element_size = 16;
2925			op->vsx_flags = VSX_CHECK_VEC;
2926			break;
2927
2928		case 2:		/* stxsd with LSB of DS field = 0 */
2929		case 6:		/* stxsd with LSB of DS field = 1 */
2930			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2931				goto unknown_opcode;
2932			op->ea = dsform_ea(word, regs);
2933			op->reg = rd + 32;
2934			op->type = MKOP(STORE_VSX, 0, 8);
2935			op->element_size = 8;
2936			op->vsx_flags = VSX_CHECK_VEC;
2937			break;
2938
2939		case 3:		/* stxssp with LSB of DS field = 0 */
2940		case 7:		/* stxssp with LSB of DS field = 1 */
2941			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2942				goto unknown_opcode;
2943			op->ea = dsform_ea(word, regs);
2944			op->reg = rd + 32;
2945			op->type = MKOP(STORE_VSX, 0, 4);
2946			op->element_size = 8;
2947			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2948			break;
2949
2950		case 5:		/* stxv */
2951			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2952				goto unknown_opcode;
2953			op->ea = dqform_ea(word, regs);
2954			if (word & 8)
2955				op->reg = rd + 32;
2956			op->type = MKOP(STORE_VSX, 0, 16);
2957			op->element_size = 16;
2958			op->vsx_flags = VSX_CHECK_VEC;
2959			break;
2960		}
2961		break;
2962#endif /* CONFIG_VSX */
2963
2964#ifdef __powerpc64__
2965	case 62:	/* std[u] */
2966		op->ea = dsform_ea(word, regs);
2967		switch (word & 3) {
2968		case 0:		/* std */
2969			op->type = MKOP(STORE, 0, 8);
2970			break;
2971		case 1:		/* stdu */
2972			op->type = MKOP(STORE, UPDATE, 8);
2973			break;
2974		case 2:		/* stq */
2975			if (!(rd & 1))
2976				op->type = MKOP(STORE, 0, 16);
2977			break;
2978		}
2979		break;
2980	case 1: /* Prefixed instructions */
2981		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2982			goto unknown_opcode;
2983
2984		prefix_r = GET_PREFIX_R(word);
2985		ra = GET_PREFIX_RA(suffix);
2986		op->update_reg = ra;
2987		rd = (suffix >> 21) & 0x1f;
2988		op->reg = rd;
2989		op->val = regs->gpr[rd];
2990
2991		suffixopcode = get_op(suffix);
2992		prefixtype = (word >> 24) & 0x3;
2993		switch (prefixtype) {
2994		case 0: /* Type 00  Eight-Byte Load/Store */
2995			if (prefix_r && ra)
2996				break;
2997			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2998			switch (suffixopcode) {
2999			case 41:	/* plwa */
3000				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
3001				break;
3002#ifdef CONFIG_VSX
3003			case 42:        /* plxsd */
3004				op->reg = rd + 32;
3005				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
3006				op->element_size = 8;
3007				op->vsx_flags = VSX_CHECK_VEC;
3008				break;
3009			case 43:	/* plxssp */
3010				op->reg = rd + 32;
3011				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
3012				op->element_size = 8;
3013				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3014				break;
3015			case 46:	/* pstxsd */
3016				op->reg = rd + 32;
3017				op->type = MKOP(STORE_VSX, PREFIXED, 8);
3018				op->element_size = 8;
3019				op->vsx_flags = VSX_CHECK_VEC;
3020				break;
3021			case 47:	/* pstxssp */
3022				op->reg = rd + 32;
3023				op->type = MKOP(STORE_VSX, PREFIXED, 4);
3024				op->element_size = 8;
3025				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3026				break;
3027			case 51:	/* plxv1 */
3028				op->reg += 32;
3029				fallthrough;
3030			case 50:	/* plxv0 */
3031				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
3032				op->element_size = 16;
3033				op->vsx_flags = VSX_CHECK_VEC;
3034				break;
3035			case 55:	/* pstxv1 */
3036				op->reg = rd + 32;
3037				fallthrough;
3038			case 54:	/* pstxv0 */
3039				op->type = MKOP(STORE_VSX, PREFIXED, 16);
3040				op->element_size = 16;
3041				op->vsx_flags = VSX_CHECK_VEC;
3042				break;
3043#endif /* CONFIG_VSX */
3044			case 56:        /* plq */
3045				op->type = MKOP(LOAD, PREFIXED, 16);
3046				break;
3047			case 57:	/* pld */
3048				op->type = MKOP(LOAD, PREFIXED, 8);
3049				break;
3050#ifdef CONFIG_VSX
3051			case 58:        /* plxvp */
3052				op->reg = VSX_REGISTER_XTP(rd);
3053				op->type = MKOP(LOAD_VSX, PREFIXED, 32);
3054				op->element_size = 32;
3055				break;
3056#endif /* CONFIG_VSX */
3057			case 60:        /* pstq */
3058				op->type = MKOP(STORE, PREFIXED, 16);
3059				break;
3060			case 61:	/* pstd */
3061				op->type = MKOP(STORE, PREFIXED, 8);
3062				break;
3063#ifdef CONFIG_VSX
3064			case 62:        /* pstxvp */
3065				op->reg = VSX_REGISTER_XTP(rd);
3066				op->type = MKOP(STORE_VSX, PREFIXED, 32);
3067				op->element_size = 32;
3068				break;
3069#endif /* CONFIG_VSX */
3070			}
3071			break;
3072		case 1: /* Type 01 Eight-Byte Register-to-Register */
3073			break;
3074		case 2: /* Type 10 Modified Load/Store */
3075			if (prefix_r && ra)
3076				break;
3077			op->ea = mlsd_8lsd_ea(word, suffix, regs);
3078			switch (suffixopcode) {
3079			case 32:	/* plwz */
3080				op->type = MKOP(LOAD, PREFIXED, 4);
3081				break;
3082			case 34:	/* plbz */
3083				op->type = MKOP(LOAD, PREFIXED, 1);
3084				break;
3085			case 36:	/* pstw */
3086				op->type = MKOP(STORE, PREFIXED, 4);
3087				break;
3088			case 38:	/* pstb */
3089				op->type = MKOP(STORE, PREFIXED, 1);
3090				break;
3091			case 40:	/* plhz */
3092				op->type = MKOP(LOAD, PREFIXED, 2);
3093				break;
3094			case 42:	/* plha */
3095				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3096				break;
3097			case 44:	/* psth */
3098				op->type = MKOP(STORE, PREFIXED, 2);
3099				break;
3100			case 48:        /* plfs */
3101				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3102				break;
3103			case 50:        /* plfd */
3104				op->type = MKOP(LOAD_FP, PREFIXED, 8);
3105				break;
3106			case 52:        /* pstfs */
3107				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3108				break;
3109			case 54:        /* pstfd */
3110				op->type = MKOP(STORE_FP, PREFIXED, 8);
3111				break;
3112			}
3113			break;
3114		case 3: /* Type 11 Modified Register-to-Register */
3115			break;
3116		}
3117#endif /* __powerpc64__ */
3118
3119	}
3120
3121	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3122		switch (GETTYPE(op->type)) {
3123		case LOAD:
3124			if (ra == rd)
3125				goto unknown_opcode;
3126			fallthrough;
3127		case STORE:
3128		case LOAD_FP:
3129		case STORE_FP:
3130			if (ra == 0)
3131				goto unknown_opcode;
3132		}
3133	}
3134
3135#ifdef CONFIG_VSX
3136	if ((GETTYPE(op->type) == LOAD_VSX ||
3137	     GETTYPE(op->type) == STORE_VSX) &&
3138	    !cpu_has_feature(CPU_FTR_VSX)) {
3139		return -1;
3140	}
3141#endif /* CONFIG_VSX */
3142
3143	return 0;
3144
3145 unknown_opcode:
3146	op->type = UNKNOWN;
3147	return 0;
3148
3149 logical_done:
3150	if (word & 1)
3151		set_cr0(regs, op);
3152 logical_done_nocc:
3153	op->reg = ra;
3154	op->type |= SETREG;
3155	return 1;
3156
3157 arith_done:
3158	if (word & 1)
3159		set_cr0(regs, op);
3160 compute_done:
3161	op->reg = rd;
3162	op->type |= SETREG;
3163	return 1;
3164
3165 priv:
3166	op->type = INTERRUPT | 0x700;
3167	op->val = SRR1_PROGPRIV;
3168	return 0;
3169
3170 trap:
3171	op->type = INTERRUPT | 0x700;
3172	op->val = SRR1_PROGTRAP;
3173	return 0;
3174}
3175EXPORT_SYMBOL_GPL(analyse_instr);
3176NOKPROBE_SYMBOL(analyse_instr);
3177
3178/*
3179 * For PPC32 we always use stwu with r1 to change the stack pointer.
3180 * So this emulated store may corrupt the exception frame, now we
3181 * have to provide the exception frame trampoline, which is pushed
3182 * below the kprobed function stack. So we only update gpr[1] but
3183 * don't emulate the real store operation. We will do real store
3184 * operation safely in exception return code by checking this flag.
3185 */
3186static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3187{
 
 
 
 
 
 
 
 
 
3188	/*
3189	 * Check if we already set since that means we'll
3190	 * lose the previous value.
3191	 */
3192	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3193	set_thread_flag(TIF_EMULATE_STACK_STORE);
3194	return 0;
3195}
3196
3197static nokprobe_inline void do_signext(unsigned long *valp, int size)
3198{
3199	switch (size) {
3200	case 2:
3201		*valp = (signed short) *valp;
3202		break;
3203	case 4:
3204		*valp = (signed int) *valp;
3205		break;
3206	}
3207}
3208
3209static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3210{
3211	switch (size) {
3212	case 2:
3213		*valp = byterev_2(*valp);
3214		break;
3215	case 4:
3216		*valp = byterev_4(*valp);
3217		break;
3218#ifdef __powerpc64__
3219	case 8:
3220		*valp = byterev_8(*valp);
3221		break;
3222#endif
3223	}
3224}
3225
3226/*
3227 * Emulate an instruction that can be executed just by updating
3228 * fields in *regs.
3229 */
3230void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3231{
3232	unsigned long next_pc;
3233
3234	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3235	switch (GETTYPE(op->type)) {
3236	case COMPUTE:
3237		if (op->type & SETREG)
3238			regs->gpr[op->reg] = op->val;
3239		if (op->type & SETCC)
3240			regs->ccr = op->ccval;
3241		if (op->type & SETXER)
3242			regs->xer = op->xerval;
3243		break;
3244
3245	case BRANCH:
3246		if (op->type & SETLK)
3247			regs->link = next_pc;
3248		if (op->type & BRTAKEN)
3249			next_pc = op->val;
3250		if (op->type & DECCTR)
3251			--regs->ctr;
3252		break;
3253
3254	case BARRIER:
3255		switch (op->type & BARRIER_MASK) {
3256		case BARRIER_SYNC:
3257			mb();
3258			break;
3259		case BARRIER_ISYNC:
3260			isync();
3261			break;
3262		case BARRIER_EIEIO:
3263			eieio();
3264			break;
3265#ifdef CONFIG_PPC64
3266		case BARRIER_LWSYNC:
3267			asm volatile("lwsync" : : : "memory");
3268			break;
3269		case BARRIER_PTESYNC:
3270			asm volatile("ptesync" : : : "memory");
3271			break;
3272#endif
3273		}
3274		break;
3275
3276	case MFSPR:
3277		switch (op->spr) {
3278		case SPRN_XER:
3279			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3280			break;
3281		case SPRN_LR:
3282			regs->gpr[op->reg] = regs->link;
3283			break;
3284		case SPRN_CTR:
3285			regs->gpr[op->reg] = regs->ctr;
3286			break;
3287		default:
3288			WARN_ON_ONCE(1);
3289		}
3290		break;
3291
3292	case MTSPR:
3293		switch (op->spr) {
3294		case SPRN_XER:
3295			regs->xer = op->val & 0xffffffffUL;
3296			break;
3297		case SPRN_LR:
3298			regs->link = op->val;
3299			break;
3300		case SPRN_CTR:
3301			regs->ctr = op->val;
3302			break;
3303		default:
3304			WARN_ON_ONCE(1);
3305		}
3306		break;
3307
3308	default:
3309		WARN_ON_ONCE(1);
3310	}
3311	regs_set_return_ip(regs, next_pc);
3312}
3313NOKPROBE_SYMBOL(emulate_update_regs);
3314
3315/*
3316 * Emulate a previously-analysed load or store instruction.
3317 * Return values are:
3318 * 0 = instruction emulated successfully
3319 * -EFAULT = address out of range or access faulted (regs->dar
3320 *	     contains the faulting address)
3321 * -EACCES = misaligned access, instruction requires alignment
3322 * -EINVAL = unknown operation in *op
3323 */
3324int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3325{
3326	int err, size, type;
3327	int i, rd, nb;
3328	unsigned int cr;
3329	unsigned long val;
3330	unsigned long ea;
3331	bool cross_endian;
3332
3333	err = 0;
3334	size = GETSIZE(op->type);
3335	type = GETTYPE(op->type);
3336	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3337	ea = truncate_if_32bit(regs->msr, op->ea);
3338
3339	switch (type) {
3340	case LARX:
3341		if (ea & (size - 1))
3342			return -EACCES;		/* can't handle misaligned */
3343		if (!address_ok(regs, ea, size))
3344			return -EFAULT;
3345		err = 0;
3346		val = 0;
3347		switch (size) {
3348#ifdef CONFIG_PPC_HAS_LBARX_LHARX
3349		case 1:
3350			__get_user_asmx(val, ea, err, "lbarx");
3351			break;
3352		case 2:
3353			__get_user_asmx(val, ea, err, "lharx");
3354			break;
3355#endif
3356		case 4:
3357			__get_user_asmx(val, ea, err, "lwarx");
3358			break;
3359#ifdef __powerpc64__
3360		case 8:
3361			__get_user_asmx(val, ea, err, "ldarx");
3362			break;
3363		case 16:
3364			err = do_lqarx(ea, &regs->gpr[op->reg]);
3365			break;
3366#endif
3367		default:
3368			return -EINVAL;
3369		}
3370		if (err) {
3371			regs->dar = ea;
3372			break;
3373		}
3374		if (size < 16)
3375			regs->gpr[op->reg] = val;
3376		break;
3377
3378	case STCX:
3379		if (ea & (size - 1))
3380			return -EACCES;		/* can't handle misaligned */
3381		if (!address_ok(regs, ea, size))
3382			return -EFAULT;
3383		err = 0;
3384		switch (size) {
3385#ifdef __powerpc64__
3386		case 1:
3387			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3388			break;
3389		case 2:
3390			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3391			break;
3392#endif
3393		case 4:
3394			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3395			break;
3396#ifdef __powerpc64__
3397		case 8:
3398			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3399			break;
3400		case 16:
3401			err = do_stqcx(ea, regs->gpr[op->reg],
3402				       regs->gpr[op->reg + 1], &cr);
3403			break;
3404#endif
3405		default:
3406			return -EINVAL;
3407		}
3408		if (!err)
3409			regs->ccr = (regs->ccr & 0x0fffffff) |
3410				(cr & 0xe0000000) |
3411				((regs->xer >> 3) & 0x10000000);
3412		else
3413			regs->dar = ea;
3414		break;
3415
3416	case LOAD:
3417#ifdef __powerpc64__
3418		if (size == 16) {
3419			err = emulate_lq(regs, ea, op->reg, cross_endian);
3420			break;
3421		}
3422#endif
3423		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3424		if (!err) {
3425			if (op->type & SIGNEXT)
3426				do_signext(&regs->gpr[op->reg], size);
3427			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3428				do_byterev(&regs->gpr[op->reg], size);
3429		}
3430		break;
3431
3432#ifdef CONFIG_PPC_FPU
3433	case LOAD_FP:
3434		/*
3435		 * If the instruction is in userspace, we can emulate it even
3436		 * if the VMX state is not live, because we have the state
3437		 * stored in the thread_struct.  If the instruction is in
3438		 * the kernel, we must not touch the state in the thread_struct.
3439		 */
3440		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3441			return 0;
3442		err = do_fp_load(op, ea, regs, cross_endian);
3443		break;
3444#endif
3445#ifdef CONFIG_ALTIVEC
3446	case LOAD_VMX:
3447		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3448			return 0;
3449		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3450		break;
3451#endif
3452#ifdef CONFIG_VSX
3453	case LOAD_VSX: {
3454		unsigned long msrbit = MSR_VSX;
3455
3456		/*
3457		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3458		 * when the target of the instruction is a vector register.
3459		 */
3460		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3461			msrbit = MSR_VEC;
3462		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3463			return 0;
3464		err = do_vsx_load(op, ea, regs, cross_endian);
3465		break;
3466	}
3467#endif
3468	case LOAD_MULTI:
3469		if (!address_ok(regs, ea, size))
3470			return -EFAULT;
3471		rd = op->reg;
3472		for (i = 0; i < size; i += 4) {
3473			unsigned int v32 = 0;
3474
3475			nb = size - i;
3476			if (nb > 4)
3477				nb = 4;
3478			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3479			if (err)
3480				break;
3481			if (unlikely(cross_endian))
3482				v32 = byterev_4(v32);
3483			regs->gpr[rd] = v32;
3484			ea += 4;
3485			/* reg number wraps from 31 to 0 for lsw[ix] */
3486			rd = (rd + 1) & 0x1f;
3487		}
3488		break;
3489
3490	case STORE:
3491#ifdef __powerpc64__
3492		if (size == 16) {
3493			err = emulate_stq(regs, ea, op->reg, cross_endian);
3494			break;
3495		}
3496#endif
3497		if ((op->type & UPDATE) && size == sizeof(long) &&
3498		    op->reg == 1 && op->update_reg == 1 &&
3499		    !(regs->msr & MSR_PR) &&
3500		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3501			err = handle_stack_update(ea, regs);
3502			break;
3503		}
3504		if (unlikely(cross_endian))
3505			do_byterev(&op->val, size);
3506		err = write_mem(op->val, ea, size, regs);
3507		break;
3508
3509#ifdef CONFIG_PPC_FPU
3510	case STORE_FP:
3511		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3512			return 0;
3513		err = do_fp_store(op, ea, regs, cross_endian);
3514		break;
3515#endif
3516#ifdef CONFIG_ALTIVEC
3517	case STORE_VMX:
3518		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3519			return 0;
3520		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3521		break;
3522#endif
3523#ifdef CONFIG_VSX
3524	case STORE_VSX: {
3525		unsigned long msrbit = MSR_VSX;
3526
3527		/*
3528		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3529		 * when the target of the instruction is a vector register.
3530		 */
3531		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3532			msrbit = MSR_VEC;
3533		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3534			return 0;
3535		err = do_vsx_store(op, ea, regs, cross_endian);
3536		break;
3537	}
3538#endif
3539	case STORE_MULTI:
3540		if (!address_ok(regs, ea, size))
3541			return -EFAULT;
3542		rd = op->reg;
3543		for (i = 0; i < size; i += 4) {
3544			unsigned int v32 = regs->gpr[rd];
3545
3546			nb = size - i;
3547			if (nb > 4)
3548				nb = 4;
3549			if (unlikely(cross_endian))
3550				v32 = byterev_4(v32);
3551			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3552			if (err)
3553				break;
3554			ea += 4;
3555			/* reg number wraps from 31 to 0 for stsw[ix] */
3556			rd = (rd + 1) & 0x1f;
3557		}
3558		break;
3559
3560	default:
3561		return -EINVAL;
3562	}
3563
3564	if (err)
3565		return err;
3566
3567	if (op->type & UPDATE)
3568		regs->gpr[op->update_reg] = op->ea;
3569
3570	return 0;
3571}
3572NOKPROBE_SYMBOL(emulate_loadstore);
3573
3574/*
3575 * Emulate instructions that cause a transfer of control,
3576 * loads and stores, and a few other instructions.
3577 * Returns 1 if the step was emulated, 0 if not,
3578 * or -1 if the instruction is one that should not be stepped,
3579 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3580 */
3581int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
3582{
3583	struct instruction_op op;
3584	int r, err, type;
3585	unsigned long val;
3586	unsigned long ea;
3587
3588	r = analyse_instr(&op, regs, instr);
3589	if (r < 0)
3590		return r;
3591	if (r > 0) {
3592		emulate_update_regs(regs, &op);
3593		return 1;
3594	}
3595
3596	err = 0;
3597	type = GETTYPE(op.type);
3598
3599	if (OP_IS_LOAD_STORE(type)) {
3600		err = emulate_loadstore(regs, &op);
3601		if (err)
3602			return 0;
3603		goto instr_done;
3604	}
3605
3606	switch (type) {
3607	case CACHEOP:
3608		ea = truncate_if_32bit(regs->msr, op.ea);
3609		if (!address_ok(regs, ea, 8))
3610			return 0;
3611		switch (op.type & CACHEOP_MASK) {
3612		case DCBST:
3613			__cacheop_user_asmx(ea, err, "dcbst");
3614			break;
3615		case DCBF:
3616			__cacheop_user_asmx(ea, err, "dcbf");
3617			break;
3618		case DCBTST:
3619			if (op.reg == 0)
3620				prefetchw((void *) ea);
3621			break;
3622		case DCBT:
3623			if (op.reg == 0)
3624				prefetch((void *) ea);
3625			break;
3626		case ICBI:
3627			__cacheop_user_asmx(ea, err, "icbi");
3628			break;
3629		case DCBZ:
3630			err = emulate_dcbz(ea, regs);
3631			break;
3632		}
3633		if (err) {
3634			regs->dar = ea;
3635			return 0;
3636		}
3637		goto instr_done;
3638
3639	case MFMSR:
3640		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3641		goto instr_done;
3642
3643	case MTMSR:
3644		val = regs->gpr[op.reg];
3645		if ((val & MSR_RI) == 0)
3646			/* can't step mtmsr[d] that would clear MSR_RI */
3647			return -1;
3648		/* here op.val is the mask of bits to change */
3649		regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3650		goto instr_done;
3651
 
3652	case SYSCALL:	/* sc */
3653		/*
3654		 * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
3655		 * single step a system call instruction:
3656		 *
3657		 *   Successful completion for an instruction means that the
3658		 *   instruction caused no other interrupt. Thus a Trace
3659		 *   interrupt never occurs for a System Call or System Call
3660		 *   Vectored instruction, or for a Trap instruction that
3661		 *   traps.
3662		 */
3663		return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3664	case SYSCALL_VECTORED_0:	/* scv 0 */
3665		return -1;
 
 
 
 
 
 
 
 
 
3666	case RFI:
3667		return -1;
 
3668	}
3669	return 0;
3670
3671 instr_done:
3672	regs_set_return_ip(regs,
3673		truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3674	return 1;
3675}
3676NOKPROBE_SYMBOL(emulate_step);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Single-step support.
   4 *
   5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   6 */
   7#include <linux/kernel.h>
   8#include <linux/kprobes.h>
   9#include <linux/ptrace.h>
  10#include <linux/prefetch.h>
  11#include <asm/sstep.h>
  12#include <asm/processor.h>
  13#include <linux/uaccess.h>
  14#include <asm/cpu_has_feature.h>
  15#include <asm/cputable.h>
  16#include <asm/disassemble.h>
  17
  18extern char system_call_common[];
  19extern char system_call_vectored_emulate[];
  20
  21#ifdef CONFIG_PPC64
  22/* Bits in SRR1 that are copied from MSR */
  23#define MSR_MASK	0xffffffff87c0ffffUL
  24#else
  25#define MSR_MASK	0x87c0ffff
  26#endif
  27
  28/* Bits in XER */
  29#define XER_SO		0x80000000U
  30#define XER_OV		0x40000000U
  31#define XER_CA		0x20000000U
  32#define XER_OV32	0x00080000U
  33#define XER_CA32	0x00040000U
  34
 
 
 
 
  35#ifdef CONFIG_PPC_FPU
  36/*
  37 * Functions in ldstfp.S
  38 */
  39extern void get_fpr(int rn, double *p);
  40extern void put_fpr(int rn, const double *p);
  41extern void get_vr(int rn, __vector128 *p);
  42extern void put_vr(int rn, __vector128 *p);
  43extern void load_vsrn(int vsr, const void *p);
  44extern void store_vsrn(int vsr, void *p);
  45extern void conv_sp_to_dp(const float *sp, double *dp);
  46extern void conv_dp_to_sp(const double *dp, float *sp);
  47#endif
  48
  49#ifdef __powerpc64__
  50/*
  51 * Functions in quad.S
  52 */
  53extern int do_lq(unsigned long ea, unsigned long *regs);
  54extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
  55extern int do_lqarx(unsigned long ea, unsigned long *regs);
  56extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
  57		    unsigned int *crp);
  58#endif
  59
  60#ifdef __LITTLE_ENDIAN__
  61#define IS_LE	1
  62#define IS_BE	0
  63#else
  64#define IS_LE	0
  65#define IS_BE	1
  66#endif
  67
  68/*
  69 * Emulate the truncation of 64 bit values in 32-bit mode.
  70 */
  71static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
  72							unsigned long val)
  73{
  74#ifdef __powerpc64__
  75	if ((msr & MSR_64BIT) == 0)
  76		val &= 0xffffffffUL;
  77#endif
  78	return val;
  79}
  80
  81/*
  82 * Determine whether a conditional branch instruction would branch.
  83 */
  84static nokprobe_inline int branch_taken(unsigned int instr,
  85					const struct pt_regs *regs,
  86					struct instruction_op *op)
  87{
  88	unsigned int bo = (instr >> 21) & 0x1f;
  89	unsigned int bi;
  90
  91	if ((bo & 4) == 0) {
  92		/* decrement counter */
  93		op->type |= DECCTR;
  94		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
  95			return 0;
  96	}
  97	if ((bo & 0x10) == 0) {
  98		/* check bit from CR */
  99		bi = (instr >> 16) & 0x1f;
 100		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
 101			return 0;
 102	}
 103	return 1;
 104}
 105
 106static nokprobe_inline long address_ok(struct pt_regs *regs,
 107				       unsigned long ea, int nb)
 108{
 109	if (!user_mode(regs))
 110		return 1;
 111	if (__access_ok(ea, nb, USER_DS))
 112		return 1;
 113	if (__access_ok(ea, 1, USER_DS))
 114		/* Access overlaps the end of the user region */
 115		regs->dar = USER_DS.seg;
 116	else
 117		regs->dar = ea;
 118	return 0;
 119}
 120
 121/*
 122 * Calculate effective address for a D-form instruction
 123 */
 124static nokprobe_inline unsigned long dform_ea(unsigned int instr,
 125					      const struct pt_regs *regs)
 126{
 127	int ra;
 128	unsigned long ea;
 129
 130	ra = (instr >> 16) & 0x1f;
 131	ea = (signed short) instr;		/* sign-extend */
 132	if (ra)
 133		ea += regs->gpr[ra];
 134
 135	return ea;
 136}
 137
 138#ifdef __powerpc64__
 139/*
 140 * Calculate effective address for a DS-form instruction
 141 */
 142static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
 143					       const struct pt_regs *regs)
 144{
 145	int ra;
 146	unsigned long ea;
 147
 148	ra = (instr >> 16) & 0x1f;
 149	ea = (signed short) (instr & ~3);	/* sign-extend */
 150	if (ra)
 151		ea += regs->gpr[ra];
 152
 153	return ea;
 154}
 155
 156/*
 157 * Calculate effective address for a DQ-form instruction
 158 */
 159static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
 160					       const struct pt_regs *regs)
 161{
 162	int ra;
 163	unsigned long ea;
 164
 165	ra = (instr >> 16) & 0x1f;
 166	ea = (signed short) (instr & ~0xf);	/* sign-extend */
 167	if (ra)
 168		ea += regs->gpr[ra];
 169
 170	return ea;
 171}
 172#endif /* __powerpc64 */
 173
 174/*
 175 * Calculate effective address for an X-form instruction
 176 */
 177static nokprobe_inline unsigned long xform_ea(unsigned int instr,
 178					      const struct pt_regs *regs)
 179{
 180	int ra, rb;
 181	unsigned long ea;
 182
 183	ra = (instr >> 16) & 0x1f;
 184	rb = (instr >> 11) & 0x1f;
 185	ea = regs->gpr[rb];
 186	if (ra)
 187		ea += regs->gpr[ra];
 188
 189	return ea;
 190}
 191
 192/*
 193 * Calculate effective address for a MLS:D-form / 8LS:D-form
 194 * prefixed instruction
 195 */
 196static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
 197						  unsigned int suffix,
 198						  const struct pt_regs *regs)
 199{
 200	int ra, prefix_r;
 201	unsigned int  dd;
 202	unsigned long ea, d0, d1, d;
 203
 204	prefix_r = GET_PREFIX_R(instr);
 205	ra = GET_PREFIX_RA(suffix);
 206
 207	d0 = instr & 0x3ffff;
 208	d1 = suffix & 0xffff;
 209	d = (d0 << 16) | d1;
 210
 211	/*
 212	 * sign extend a 34 bit number
 213	 */
 214	dd = (unsigned int)(d >> 2);
 215	ea = (signed int)dd;
 216	ea = (ea << 2) | (d & 0x3);
 217
 218	if (!prefix_r && ra)
 219		ea += regs->gpr[ra];
 220	else if (!prefix_r && !ra)
 221		; /* Leave ea as is */
 222	else if (prefix_r && !ra)
 223		ea += regs->nip;
 224	else if (prefix_r && ra)
 225		; /* Invalid form. Should already be checked for by caller! */
 
 
 
 226
 227	return ea;
 228}
 229
 230/*
 231 * Return the largest power of 2, not greater than sizeof(unsigned long),
 232 * such that x is a multiple of it.
 233 */
 234static nokprobe_inline unsigned long max_align(unsigned long x)
 235{
 236	x |= sizeof(unsigned long);
 237	return x & -x;		/* isolates rightmost bit */
 238}
 239
 240static nokprobe_inline unsigned long byterev_2(unsigned long x)
 241{
 242	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
 243}
 244
 245static nokprobe_inline unsigned long byterev_4(unsigned long x)
 246{
 247	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
 248		((x & 0xff00) << 8) | ((x & 0xff) << 24);
 249}
 250
 251#ifdef __powerpc64__
 252static nokprobe_inline unsigned long byterev_8(unsigned long x)
 253{
 254	return (byterev_4(x) << 32) | byterev_4(x >> 32);
 255}
 256#endif
 257
 258static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
 259{
 260	switch (nb) {
 261	case 2:
 262		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
 263		break;
 264	case 4:
 265		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
 266		break;
 267#ifdef __powerpc64__
 268	case 8:
 269		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
 270		break;
 271	case 16: {
 272		unsigned long *up = (unsigned long *)ptr;
 273		unsigned long tmp;
 274		tmp = byterev_8(up[0]);
 275		up[0] = byterev_8(up[1]);
 276		up[1] = tmp;
 277		break;
 278	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 279#endif
 280	default:
 281		WARN_ON_ONCE(1);
 282	}
 283}
 284
 285static nokprobe_inline int read_mem_aligned(unsigned long *dest,
 286					    unsigned long ea, int nb,
 287					    struct pt_regs *regs)
 288{
 289	int err = 0;
 290	unsigned long x = 0;
 291
 292	switch (nb) {
 293	case 1:
 294		err = __get_user(x, (unsigned char __user *) ea);
 295		break;
 296	case 2:
 297		err = __get_user(x, (unsigned short __user *) ea);
 298		break;
 299	case 4:
 300		err = __get_user(x, (unsigned int __user *) ea);
 301		break;
 302#ifdef __powerpc64__
 303	case 8:
 304		err = __get_user(x, (unsigned long __user *) ea);
 305		break;
 306#endif
 307	}
 308	if (!err)
 309		*dest = x;
 310	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311		regs->dar = ea;
 
 
 312	return err;
 313}
 314
 315/*
 316 * Copy from userspace to a buffer, using the largest possible
 317 * aligned accesses, up to sizeof(long).
 318 */
 319static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
 320				       struct pt_regs *regs)
 321{
 322	int err = 0;
 323	int c;
 324
 325	for (; nb > 0; nb -= c) {
 326		c = max_align(ea);
 327		if (c > nb)
 328			c = max_align(nb);
 329		switch (c) {
 330		case 1:
 331			err = __get_user(*dest, (unsigned char __user *) ea);
 332			break;
 333		case 2:
 334			err = __get_user(*(u16 *)dest,
 335					 (unsigned short __user *) ea);
 336			break;
 337		case 4:
 338			err = __get_user(*(u32 *)dest,
 339					 (unsigned int __user *) ea);
 340			break;
 341#ifdef __powerpc64__
 342		case 8:
 343			err = __get_user(*(unsigned long *)dest,
 344					 (unsigned long __user *) ea);
 345			break;
 346#endif
 347		}
 348		if (err) {
 349			regs->dar = ea;
 350			return err;
 351		}
 352		dest += c;
 353		ea += c;
 354	}
 355	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356}
 357
 358static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
 359					      unsigned long ea, int nb,
 360					      struct pt_regs *regs)
 361{
 362	union {
 363		unsigned long ul;
 364		u8 b[sizeof(unsigned long)];
 365	} u;
 366	int i;
 367	int err;
 368
 369	u.ul = 0;
 370	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 371	err = copy_mem_in(&u.b[i], ea, nb, regs);
 372	if (!err)
 373		*dest = u.ul;
 374	return err;
 375}
 376
 377/*
 378 * Read memory at address ea for nb bytes, return 0 for success
 379 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 380 * If nb < sizeof(long), the result is right-justified on BE systems.
 381 */
 382static int read_mem(unsigned long *dest, unsigned long ea, int nb,
 383			      struct pt_regs *regs)
 384{
 385	if (!address_ok(regs, ea, nb))
 386		return -EFAULT;
 387	if ((ea & (nb - 1)) == 0)
 388		return read_mem_aligned(dest, ea, nb, regs);
 389	return read_mem_unaligned(dest, ea, nb, regs);
 390}
 391NOKPROBE_SYMBOL(read_mem);
 392
 393static nokprobe_inline int write_mem_aligned(unsigned long val,
 394					     unsigned long ea, int nb,
 395					     struct pt_regs *regs)
 396{
 397	int err = 0;
 398
 399	switch (nb) {
 400	case 1:
 401		err = __put_user(val, (unsigned char __user *) ea);
 402		break;
 403	case 2:
 404		err = __put_user(val, (unsigned short __user *) ea);
 405		break;
 406	case 4:
 407		err = __put_user(val, (unsigned int __user *) ea);
 408		break;
 409#ifdef __powerpc64__
 410	case 8:
 411		err = __put_user(val, (unsigned long __user *) ea);
 412		break;
 413#endif
 414	}
 415	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 416		regs->dar = ea;
 
 
 417	return err;
 418}
 419
 420/*
 421 * Copy from a buffer to userspace, using the largest possible
 422 * aligned accesses, up to sizeof(long).
 423 */
 424static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
 425					struct pt_regs *regs)
 426{
 427	int err = 0;
 428	int c;
 429
 430	for (; nb > 0; nb -= c) {
 431		c = max_align(ea);
 432		if (c > nb)
 433			c = max_align(nb);
 434		switch (c) {
 435		case 1:
 436			err = __put_user(*dest, (unsigned char __user *) ea);
 437			break;
 438		case 2:
 439			err = __put_user(*(u16 *)dest,
 440					 (unsigned short __user *) ea);
 441			break;
 442		case 4:
 443			err = __put_user(*(u32 *)dest,
 444					 (unsigned int __user *) ea);
 445			break;
 446#ifdef __powerpc64__
 447		case 8:
 448			err = __put_user(*(unsigned long *)dest,
 449					 (unsigned long __user *) ea);
 450			break;
 451#endif
 452		}
 453		if (err) {
 454			regs->dar = ea;
 455			return err;
 456		}
 457		dest += c;
 458		ea += c;
 459	}
 460	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461}
 462
 463static nokprobe_inline int write_mem_unaligned(unsigned long val,
 464					       unsigned long ea, int nb,
 465					       struct pt_regs *regs)
 466{
 467	union {
 468		unsigned long ul;
 469		u8 b[sizeof(unsigned long)];
 470	} u;
 471	int i;
 472
 473	u.ul = val;
 474	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 475	return copy_mem_out(&u.b[i], ea, nb, regs);
 476}
 477
 478/*
 479 * Write memory at address ea for nb bytes, return 0 for success
 480 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 481 */
 482static int write_mem(unsigned long val, unsigned long ea, int nb,
 483			       struct pt_regs *regs)
 484{
 485	if (!address_ok(regs, ea, nb))
 486		return -EFAULT;
 487	if ((ea & (nb - 1)) == 0)
 488		return write_mem_aligned(val, ea, nb, regs);
 489	return write_mem_unaligned(val, ea, nb, regs);
 490}
 491NOKPROBE_SYMBOL(write_mem);
 492
 493#ifdef CONFIG_PPC_FPU
 494/*
 495 * These access either the real FP register or the image in the
 496 * thread_struct, depending on regs->msr & MSR_FP.
 497 */
 498static int do_fp_load(struct instruction_op *op, unsigned long ea,
 499		      struct pt_regs *regs, bool cross_endian)
 500{
 501	int err, rn, nb;
 502	union {
 503		int i;
 504		unsigned int u;
 505		float f;
 506		double d[2];
 507		unsigned long l[2];
 508		u8 b[2 * sizeof(double)];
 509	} u;
 510
 511	nb = GETSIZE(op->type);
 
 
 512	if (!address_ok(regs, ea, nb))
 513		return -EFAULT;
 514	rn = op->reg;
 515	err = copy_mem_in(u.b, ea, nb, regs);
 516	if (err)
 517		return err;
 518	if (unlikely(cross_endian)) {
 519		do_byte_reverse(u.b, min(nb, 8));
 520		if (nb == 16)
 521			do_byte_reverse(&u.b[8], 8);
 522	}
 523	preempt_disable();
 524	if (nb == 4) {
 525		if (op->type & FPCONV)
 526			conv_sp_to_dp(&u.f, &u.d[0]);
 527		else if (op->type & SIGNEXT)
 528			u.l[0] = u.i;
 529		else
 530			u.l[0] = u.u;
 531	}
 532	if (regs->msr & MSR_FP)
 533		put_fpr(rn, &u.d[0]);
 534	else
 535		current->thread.TS_FPR(rn) = u.l[0];
 536	if (nb == 16) {
 537		/* lfdp */
 538		rn |= 1;
 539		if (regs->msr & MSR_FP)
 540			put_fpr(rn, &u.d[1]);
 541		else
 542			current->thread.TS_FPR(rn) = u.l[1];
 543	}
 544	preempt_enable();
 545	return 0;
 546}
 547NOKPROBE_SYMBOL(do_fp_load);
 548
 549static int do_fp_store(struct instruction_op *op, unsigned long ea,
 550		       struct pt_regs *regs, bool cross_endian)
 551{
 552	int rn, nb;
 553	union {
 554		unsigned int u;
 555		float f;
 556		double d[2];
 557		unsigned long l[2];
 558		u8 b[2 * sizeof(double)];
 559	} u;
 560
 561	nb = GETSIZE(op->type);
 
 
 562	if (!address_ok(regs, ea, nb))
 563		return -EFAULT;
 564	rn = op->reg;
 565	preempt_disable();
 566	if (regs->msr & MSR_FP)
 567		get_fpr(rn, &u.d[0]);
 568	else
 569		u.l[0] = current->thread.TS_FPR(rn);
 570	if (nb == 4) {
 571		if (op->type & FPCONV)
 572			conv_dp_to_sp(&u.d[0], &u.f);
 573		else
 574			u.u = u.l[0];
 575	}
 576	if (nb == 16) {
 577		rn |= 1;
 578		if (regs->msr & MSR_FP)
 579			get_fpr(rn, &u.d[1]);
 580		else
 581			u.l[1] = current->thread.TS_FPR(rn);
 582	}
 583	preempt_enable();
 584	if (unlikely(cross_endian)) {
 585		do_byte_reverse(u.b, min(nb, 8));
 586		if (nb == 16)
 587			do_byte_reverse(&u.b[8], 8);
 588	}
 589	return copy_mem_out(u.b, ea, nb, regs);
 590}
 591NOKPROBE_SYMBOL(do_fp_store);
 592#endif
 593
 594#ifdef CONFIG_ALTIVEC
 595/* For Altivec/VMX, no need to worry about alignment */
 596static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
 597				       int size, struct pt_regs *regs,
 598				       bool cross_endian)
 599{
 600	int err;
 601	union {
 602		__vector128 v;
 603		u8 b[sizeof(__vector128)];
 604	} u = {};
 605
 
 
 
 606	if (!address_ok(regs, ea & ~0xfUL, 16))
 607		return -EFAULT;
 608	/* align to multiple of size */
 609	ea &= ~(size - 1);
 610	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
 611	if (err)
 612		return err;
 613	if (unlikely(cross_endian))
 614		do_byte_reverse(&u.b[ea & 0xf], size);
 615	preempt_disable();
 616	if (regs->msr & MSR_VEC)
 617		put_vr(rn, &u.v);
 618	else
 619		current->thread.vr_state.vr[rn] = u.v;
 620	preempt_enable();
 621	return 0;
 622}
 623
 624static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
 625					int size, struct pt_regs *regs,
 626					bool cross_endian)
 627{
 628	union {
 629		__vector128 v;
 630		u8 b[sizeof(__vector128)];
 631	} u;
 632
 
 
 
 633	if (!address_ok(regs, ea & ~0xfUL, 16))
 634		return -EFAULT;
 635	/* align to multiple of size */
 636	ea &= ~(size - 1);
 637
 638	preempt_disable();
 639	if (regs->msr & MSR_VEC)
 640		get_vr(rn, &u.v);
 641	else
 642		u.v = current->thread.vr_state.vr[rn];
 643	preempt_enable();
 644	if (unlikely(cross_endian))
 645		do_byte_reverse(&u.b[ea & 0xf], size);
 646	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
 647}
 648#endif /* CONFIG_ALTIVEC */
 649
 650#ifdef __powerpc64__
 651static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
 652				      int reg, bool cross_endian)
 653{
 654	int err;
 655
 656	if (!address_ok(regs, ea, 16))
 657		return -EFAULT;
 658	/* if aligned, should be atomic */
 659	if ((ea & 0xf) == 0) {
 660		err = do_lq(ea, &regs->gpr[reg]);
 661	} else {
 662		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
 663		if (!err)
 664			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
 665	}
 666	if (!err && unlikely(cross_endian))
 667		do_byte_reverse(&regs->gpr[reg], 16);
 668	return err;
 669}
 670
 671static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
 672				       int reg, bool cross_endian)
 673{
 674	int err;
 675	unsigned long vals[2];
 676
 677	if (!address_ok(regs, ea, 16))
 678		return -EFAULT;
 679	vals[0] = regs->gpr[reg];
 680	vals[1] = regs->gpr[reg + 1];
 681	if (unlikely(cross_endian))
 682		do_byte_reverse(vals, 16);
 683
 684	/* if aligned, should be atomic */
 685	if ((ea & 0xf) == 0)
 686		return do_stq(ea, vals[0], vals[1]);
 687
 688	err = write_mem(vals[IS_LE], ea, 8, regs);
 689	if (!err)
 690		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
 691	return err;
 692}
 693#endif /* __powerpc64 */
 694
 695#ifdef CONFIG_VSX
 696void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
 697		      const void *mem, bool rev)
 698{
 699	int size, read_size;
 700	int i, j;
 701	const unsigned int *wp;
 702	const unsigned short *hp;
 703	const unsigned char *bp;
 704
 705	size = GETSIZE(op->type);
 706	reg->d[0] = reg->d[1] = 0;
 707
 708	switch (op->element_size) {
 
 
 709	case 16:
 710		/* whole vector; lxv[x] or lxvl[l] */
 711		if (size == 0)
 712			break;
 713		memcpy(reg, mem, size);
 714		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 715			rev = !rev;
 716		if (rev)
 717			do_byte_reverse(reg, 16);
 718		break;
 719	case 8:
 720		/* scalar loads, lxvd2x, lxvdsx */
 721		read_size = (size >= 8) ? 8 : size;
 722		i = IS_LE ? 8 : 8 - read_size;
 723		memcpy(&reg->b[i], mem, read_size);
 724		if (rev)
 725			do_byte_reverse(&reg->b[i], 8);
 726		if (size < 8) {
 727			if (op->type & SIGNEXT) {
 728				/* size == 4 is the only case here */
 729				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
 730			} else if (op->vsx_flags & VSX_FPCONV) {
 731				preempt_disable();
 732				conv_sp_to_dp(&reg->fp[1 + IS_LE],
 733					      &reg->dp[IS_LE]);
 734				preempt_enable();
 735			}
 736		} else {
 737			if (size == 16) {
 738				unsigned long v = *(unsigned long *)(mem + 8);
 739				reg->d[IS_BE] = !rev ? v : byterev_8(v);
 740			} else if (op->vsx_flags & VSX_SPLAT)
 741				reg->d[IS_BE] = reg->d[IS_LE];
 742		}
 743		break;
 744	case 4:
 745		/* lxvw4x, lxvwsx */
 746		wp = mem;
 747		for (j = 0; j < size / 4; ++j) {
 748			i = IS_LE ? 3 - j : j;
 749			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
 750		}
 751		if (op->vsx_flags & VSX_SPLAT) {
 752			u32 val = reg->w[IS_LE ? 3 : 0];
 753			for (; j < 4; ++j) {
 754				i = IS_LE ? 3 - j : j;
 755				reg->w[i] = val;
 756			}
 757		}
 758		break;
 759	case 2:
 760		/* lxvh8x */
 761		hp = mem;
 762		for (j = 0; j < size / 2; ++j) {
 763			i = IS_LE ? 7 - j : j;
 764			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
 765		}
 766		break;
 767	case 1:
 768		/* lxvb16x */
 769		bp = mem;
 770		for (j = 0; j < size; ++j) {
 771			i = IS_LE ? 15 - j : j;
 772			reg->b[i] = *bp++;
 773		}
 774		break;
 775	}
 776}
 777EXPORT_SYMBOL_GPL(emulate_vsx_load);
 778NOKPROBE_SYMBOL(emulate_vsx_load);
 779
 780void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
 781		       void *mem, bool rev)
 782{
 783	int size, write_size;
 784	int i, j;
 785	union vsx_reg buf;
 786	unsigned int *wp;
 787	unsigned short *hp;
 788	unsigned char *bp;
 789
 790	size = GETSIZE(op->type);
 791
 792	switch (op->element_size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793	case 16:
 794		/* stxv, stxvx, stxvl, stxvll */
 795		if (size == 0)
 796			break;
 797		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 798			rev = !rev;
 799		if (rev) {
 800			/* reverse 16 bytes */
 801			buf.d[0] = byterev_8(reg->d[1]);
 802			buf.d[1] = byterev_8(reg->d[0]);
 803			reg = &buf;
 804		}
 805		memcpy(mem, reg, size);
 806		break;
 807	case 8:
 808		/* scalar stores, stxvd2x */
 809		write_size = (size >= 8) ? 8 : size;
 810		i = IS_LE ? 8 : 8 - write_size;
 811		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
 812			buf.d[0] = buf.d[1] = 0;
 813			preempt_disable();
 814			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
 815			preempt_enable();
 816			reg = &buf;
 817		}
 818		memcpy(mem, &reg->b[i], write_size);
 819		if (size == 16)
 820			memcpy(mem + 8, &reg->d[IS_BE], 8);
 821		if (unlikely(rev)) {
 822			do_byte_reverse(mem, write_size);
 823			if (size == 16)
 824				do_byte_reverse(mem + 8, 8);
 825		}
 826		break;
 827	case 4:
 828		/* stxvw4x */
 829		wp = mem;
 830		for (j = 0; j < size / 4; ++j) {
 831			i = IS_LE ? 3 - j : j;
 832			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
 833		}
 834		break;
 835	case 2:
 836		/* stxvh8x */
 837		hp = mem;
 838		for (j = 0; j < size / 2; ++j) {
 839			i = IS_LE ? 7 - j : j;
 840			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
 841		}
 842		break;
 843	case 1:
 844		/* stvxb16x */
 845		bp = mem;
 846		for (j = 0; j < size; ++j) {
 847			i = IS_LE ? 15 - j : j;
 848			*bp++ = reg->b[i];
 849		}
 850		break;
 851	}
 852}
 853EXPORT_SYMBOL_GPL(emulate_vsx_store);
 854NOKPROBE_SYMBOL(emulate_vsx_store);
 855
 856static nokprobe_inline int do_vsx_load(struct instruction_op *op,
 857				       unsigned long ea, struct pt_regs *regs,
 858				       bool cross_endian)
 859{
 860	int reg = op->reg;
 861	u8 mem[16];
 862	union vsx_reg buf;
 
 863	int size = GETSIZE(op->type);
 864
 865	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
 866		return -EFAULT;
 867
 868	emulate_vsx_load(op, &buf, mem, cross_endian);
 
 869	preempt_disable();
 870	if (reg < 32) {
 871		/* FP regs + extensions */
 872		if (regs->msr & MSR_FP) {
 873			load_vsrn(reg, &buf);
 
 
 
 874		} else {
 875			current->thread.fp_state.fpr[reg][0] = buf.d[0];
 876			current->thread.fp_state.fpr[reg][1] = buf.d[1];
 
 
 
 877		}
 878	} else {
 879		if (regs->msr & MSR_VEC)
 880			load_vsrn(reg, &buf);
 881		else
 882			current->thread.vr_state.vr[reg - 32] = buf.v;
 
 
 
 
 
 
 
 883	}
 884	preempt_enable();
 885	return 0;
 886}
 887
 888static nokprobe_inline int do_vsx_store(struct instruction_op *op,
 889					unsigned long ea, struct pt_regs *regs,
 890					bool cross_endian)
 891{
 892	int reg = op->reg;
 893	u8 mem[16];
 894	union vsx_reg buf;
 
 895	int size = GETSIZE(op->type);
 896
 897	if (!address_ok(regs, ea, size))
 898		return -EFAULT;
 899
 
 900	preempt_disable();
 901	if (reg < 32) {
 902		/* FP regs + extensions */
 903		if (regs->msr & MSR_FP) {
 904			store_vsrn(reg, &buf);
 
 
 
 905		} else {
 906			buf.d[0] = current->thread.fp_state.fpr[reg][0];
 907			buf.d[1] = current->thread.fp_state.fpr[reg][1];
 
 
 
 908		}
 909	} else {
 910		if (regs->msr & MSR_VEC)
 911			store_vsrn(reg, &buf);
 912		else
 913			buf.v = current->thread.vr_state.vr[reg - 32];
 
 
 
 
 
 
 
 914	}
 915	preempt_enable();
 916	emulate_vsx_store(op, &buf, mem, cross_endian);
 917	return  copy_mem_out(mem, ea, size, regs);
 918}
 919#endif /* CONFIG_VSX */
 920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
 922{
 923	int err;
 924	unsigned long i, size;
 925
 926#ifdef __powerpc64__
 927	size = ppc64_caches.l1d.block_size;
 928	if (!(regs->msr & MSR_64BIT))
 929		ea &= 0xffffffffUL;
 930#else
 931	size = L1_CACHE_BYTES;
 932#endif
 933	ea &= ~(size - 1);
 934	if (!address_ok(regs, ea, size))
 935		return -EFAULT;
 936	for (i = 0; i < size; i += sizeof(long)) {
 937		err = __put_user(0, (unsigned long __user *) (ea + i));
 938		if (err) {
 939			regs->dar = ea;
 940			return err;
 941		}
 
 
 942	}
 943	return 0;
 
 
 
 
 
 944}
 945NOKPROBE_SYMBOL(emulate_dcbz);
 946
 947#define __put_user_asmx(x, addr, err, op, cr)		\
 948	__asm__ __volatile__(				\
 
 
 949		"1:	" op " %2,0,%3\n"		\
 
 950		"	mfcr	%1\n"			\
 951		"2:\n"					\
 952		".section .fixup,\"ax\"\n"		\
 953		"3:	li	%0,%4\n"		\
 954		"	b	2b\n"			\
 955		".previous\n"				\
 956		EX_TABLE(1b, 3b)			\
 957		: "=r" (err), "=r" (cr)			\
 958		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
 959
 960#define __get_user_asmx(x, addr, err, op)		\
 961	__asm__ __volatile__(				\
 
 
 962		"1:	"op" %1,0,%2\n"			\
 
 963		"2:\n"					\
 964		".section .fixup,\"ax\"\n"		\
 965		"3:	li	%0,%3\n"		\
 966		"	b	2b\n"			\
 967		".previous\n"				\
 968		EX_TABLE(1b, 3b)			\
 969		: "=r" (err), "=r" (x)			\
 970		: "r" (addr), "i" (-EFAULT), "0" (err))
 971
 972#define __cacheop_user_asmx(addr, err, op)		\
 973	__asm__ __volatile__(				\
 974		"1:	"op" 0,%1\n"			\
 975		"2:\n"					\
 976		".section .fixup,\"ax\"\n"		\
 977		"3:	li	%0,%3\n"		\
 978		"	b	2b\n"			\
 979		".previous\n"				\
 980		EX_TABLE(1b, 3b)			\
 981		: "=r" (err)				\
 982		: "r" (addr), "i" (-EFAULT), "0" (err))
 983
 984static nokprobe_inline void set_cr0(const struct pt_regs *regs,
 985				    struct instruction_op *op)
 986{
 987	long val = op->val;
 988
 989	op->type |= SETCC;
 990	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
 991#ifdef __powerpc64__
 992	if (!(regs->msr & MSR_64BIT))
 993		val = (int) val;
 994#endif
 995	if (val < 0)
 996		op->ccval |= 0x80000000;
 997	else if (val > 0)
 998		op->ccval |= 0x40000000;
 999	else
1000		op->ccval |= 0x20000000;
1001}
1002
1003static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1004{
1005	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1006		if (val)
1007			op->xerval |= XER_CA32;
1008		else
1009			op->xerval &= ~XER_CA32;
1010	}
1011}
1012
1013static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1014				     struct instruction_op *op, int rd,
1015				     unsigned long val1, unsigned long val2,
1016				     unsigned long carry_in)
1017{
1018	unsigned long val = val1 + val2;
1019
1020	if (carry_in)
1021		++val;
1022	op->type = COMPUTE + SETREG + SETXER;
1023	op->reg = rd;
1024	op->val = val;
1025#ifdef __powerpc64__
1026	if (!(regs->msr & MSR_64BIT)) {
1027		val = (unsigned int) val;
1028		val1 = (unsigned int) val1;
1029	}
1030#endif
1031	op->xerval = regs->xer;
1032	if (val < val1 || (carry_in && val == val1))
1033		op->xerval |= XER_CA;
1034	else
1035		op->xerval &= ~XER_CA;
1036
1037	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1038			(carry_in && (unsigned int)val == (unsigned int)val1));
1039}
1040
1041static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1042					  struct instruction_op *op,
1043					  long v1, long v2, int crfld)
1044{
1045	unsigned int crval, shift;
1046
1047	op->type = COMPUTE + SETCC;
1048	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1049	if (v1 < v2)
1050		crval |= 8;
1051	else if (v1 > v2)
1052		crval |= 4;
1053	else
1054		crval |= 2;
1055	shift = (7 - crfld) * 4;
1056	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1057}
1058
1059static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1060					    struct instruction_op *op,
1061					    unsigned long v1,
1062					    unsigned long v2, int crfld)
1063{
1064	unsigned int crval, shift;
1065
1066	op->type = COMPUTE + SETCC;
1067	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1068	if (v1 < v2)
1069		crval |= 8;
1070	else if (v1 > v2)
1071		crval |= 4;
1072	else
1073		crval |= 2;
1074	shift = (7 - crfld) * 4;
1075	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1076}
1077
1078static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1079				    struct instruction_op *op,
1080				    unsigned long v1, unsigned long v2)
1081{
1082	unsigned long long out_val, mask;
1083	int i;
1084
1085	out_val = 0;
1086	for (i = 0; i < 8; i++) {
1087		mask = 0xffUL << (i * 8);
1088		if ((v1 & mask) == (v2 & mask))
1089			out_val |= mask;
1090	}
1091	op->val = out_val;
1092}
1093
1094/*
1095 * The size parameter is used to adjust the equivalent popcnt instruction.
1096 * popcntb = 8, popcntw = 32, popcntd = 64
1097 */
1098static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1099				      struct instruction_op *op,
1100				      unsigned long v1, int size)
1101{
1102	unsigned long long out = v1;
1103
1104	out -= (out >> 1) & 0x5555555555555555ULL;
1105	out = (0x3333333333333333ULL & out) +
1106	      (0x3333333333333333ULL & (out >> 2));
1107	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1108
1109	if (size == 8) {	/* popcntb */
1110		op->val = out;
1111		return;
1112	}
1113	out += out >> 8;
1114	out += out >> 16;
1115	if (size == 32) {	/* popcntw */
1116		op->val = out & 0x0000003f0000003fULL;
1117		return;
1118	}
1119
1120	out = (out + (out >> 32)) & 0x7f;
1121	op->val = out;	/* popcntd */
1122}
1123
1124#ifdef CONFIG_PPC64
1125static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1126				      struct instruction_op *op,
1127				      unsigned long v1, unsigned long v2)
1128{
1129	unsigned char perm, idx;
1130	unsigned int i;
1131
1132	perm = 0;
1133	for (i = 0; i < 8; i++) {
1134		idx = (v1 >> (i * 8)) & 0xff;
1135		if (idx < 64)
1136			if (v2 & PPC_BIT(idx))
1137				perm |= 1 << i;
1138	}
1139	op->val = perm;
1140}
1141#endif /* CONFIG_PPC64 */
1142/*
1143 * The size parameter adjusts the equivalent prty instruction.
1144 * prtyw = 32, prtyd = 64
1145 */
1146static nokprobe_inline void do_prty(const struct pt_regs *regs,
1147				    struct instruction_op *op,
1148				    unsigned long v, int size)
1149{
1150	unsigned long long res = v ^ (v >> 8);
1151
1152	res ^= res >> 16;
1153	if (size == 32) {		/* prtyw */
1154		op->val = res & 0x0000000100000001ULL;
1155		return;
1156	}
1157
1158	res ^= res >> 32;
1159	op->val = res & 1;	/*prtyd */
1160}
1161
1162static nokprobe_inline int trap_compare(long v1, long v2)
1163{
1164	int ret = 0;
1165
1166	if (v1 < v2)
1167		ret |= 0x10;
1168	else if (v1 > v2)
1169		ret |= 0x08;
1170	else
1171		ret |= 0x04;
1172	if ((unsigned long)v1 < (unsigned long)v2)
1173		ret |= 0x02;
1174	else if ((unsigned long)v1 > (unsigned long)v2)
1175		ret |= 0x01;
1176	return ret;
1177}
1178
1179/*
1180 * Elements of 32-bit rotate and mask instructions.
1181 */
1182#define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1183			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1184#ifdef __powerpc64__
1185#define MASK64_L(mb)	(~0UL >> (mb))
1186#define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1187#define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1188#define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1189#else
1190#define DATA32(x)	(x)
1191#endif
1192#define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1193
1194/*
1195 * Decode an instruction, and return information about it in *op
1196 * without changing *regs.
1197 * Integer arithmetic and logical instructions, branches, and barrier
1198 * instructions can be emulated just using the information in *op.
1199 *
1200 * Return value is 1 if the instruction can be emulated just by
1201 * updating *regs with the information in *op, -1 if we need the
1202 * GPRs but *regs doesn't contain the full register set, or 0
1203 * otherwise.
1204 */
1205int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1206		  struct ppc_inst instr)
1207{
1208#ifdef CONFIG_PPC64
1209	unsigned int suffixopcode, prefixtype, prefix_r;
1210#endif
1211	unsigned int opcode, ra, rb, rc, rd, spr, u;
1212	unsigned long int imm;
1213	unsigned long int val, val2;
1214	unsigned int mb, me, sh;
1215	unsigned int word, suffix;
1216	long ival;
1217
1218	word = ppc_inst_val(instr);
1219	suffix = ppc_inst_suffix(instr);
1220
1221	op->type = COMPUTE;
1222
1223	opcode = ppc_inst_primary_opcode(instr);
1224	switch (opcode) {
1225	case 16:	/* bc */
1226		op->type = BRANCH;
1227		imm = (signed short)(word & 0xfffc);
1228		if ((word & 2) == 0)
1229			imm += regs->nip;
1230		op->val = truncate_if_32bit(regs->msr, imm);
1231		if (word & 1)
1232			op->type |= SETLK;
1233		if (branch_taken(word, regs, op))
1234			op->type |= BRTAKEN;
1235		return 1;
1236#ifdef CONFIG_PPC64
1237	case 17:	/* sc */
1238		if ((word & 0xfe2) == 2)
1239			op->type = SYSCALL;
1240		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1241				(word & 0xfe3) == 1)
1242			op->type = SYSCALL_VECTORED_0;
1243		else
 
 
1244			op->type = UNKNOWN;
1245		return 0;
1246#endif
1247	case 18:	/* b */
1248		op->type = BRANCH | BRTAKEN;
1249		imm = word & 0x03fffffc;
1250		if (imm & 0x02000000)
1251			imm -= 0x04000000;
1252		if ((word & 2) == 0)
1253			imm += regs->nip;
1254		op->val = truncate_if_32bit(regs->msr, imm);
1255		if (word & 1)
1256			op->type |= SETLK;
1257		return 1;
1258	case 19:
1259		switch ((word >> 1) & 0x3ff) {
1260		case 0:		/* mcrf */
1261			op->type = COMPUTE + SETCC;
1262			rd = 7 - ((word >> 23) & 0x7);
1263			ra = 7 - ((word >> 18) & 0x7);
1264			rd *= 4;
1265			ra *= 4;
1266			val = (regs->ccr >> ra) & 0xf;
1267			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1268			return 1;
1269
1270		case 16:	/* bclr */
1271		case 528:	/* bcctr */
1272			op->type = BRANCH;
1273			imm = (word & 0x400)? regs->ctr: regs->link;
1274			op->val = truncate_if_32bit(regs->msr, imm);
1275			if (word & 1)
1276				op->type |= SETLK;
1277			if (branch_taken(word, regs, op))
1278				op->type |= BRTAKEN;
1279			return 1;
1280
1281		case 18:	/* rfid, scary */
1282			if (regs->msr & MSR_PR)
1283				goto priv;
1284			op->type = RFI;
1285			return 0;
1286
1287		case 150:	/* isync */
1288			op->type = BARRIER | BARRIER_ISYNC;
1289			return 1;
1290
1291		case 33:	/* crnor */
1292		case 129:	/* crandc */
1293		case 193:	/* crxor */
1294		case 225:	/* crnand */
1295		case 257:	/* crand */
1296		case 289:	/* creqv */
1297		case 417:	/* crorc */
1298		case 449:	/* cror */
1299			op->type = COMPUTE + SETCC;
1300			ra = (word >> 16) & 0x1f;
1301			rb = (word >> 11) & 0x1f;
1302			rd = (word >> 21) & 0x1f;
1303			ra = (regs->ccr >> (31 - ra)) & 1;
1304			rb = (regs->ccr >> (31 - rb)) & 1;
1305			val = (word >> (6 + ra * 2 + rb)) & 1;
1306			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1307				(val << (31 - rd));
1308			return 1;
1309		}
1310		break;
1311	case 31:
1312		switch ((word >> 1) & 0x3ff) {
1313		case 598:	/* sync */
1314			op->type = BARRIER + BARRIER_SYNC;
1315#ifdef __powerpc64__
1316			switch ((word >> 21) & 3) {
1317			case 1:		/* lwsync */
1318				op->type = BARRIER + BARRIER_LWSYNC;
1319				break;
1320			case 2:		/* ptesync */
1321				op->type = BARRIER + BARRIER_PTESYNC;
1322				break;
1323			}
1324#endif
1325			return 1;
1326
1327		case 854:	/* eieio */
1328			op->type = BARRIER + BARRIER_EIEIO;
1329			return 1;
1330		}
1331		break;
1332	}
1333
1334	/* Following cases refer to regs->gpr[], so we need all regs */
1335	if (!FULL_REGS(regs))
1336		return -1;
1337
1338	rd = (word >> 21) & 0x1f;
1339	ra = (word >> 16) & 0x1f;
1340	rb = (word >> 11) & 0x1f;
1341	rc = (word >> 6) & 0x1f;
1342
1343	switch (opcode) {
1344#ifdef __powerpc64__
1345	case 1:
 
 
 
1346		prefix_r = GET_PREFIX_R(word);
1347		ra = GET_PREFIX_RA(suffix);
1348		rd = (suffix >> 21) & 0x1f;
1349		op->reg = rd;
1350		op->val = regs->gpr[rd];
1351		suffixopcode = get_op(suffix);
1352		prefixtype = (word >> 24) & 0x3;
1353		switch (prefixtype) {
1354		case 2:
1355			if (prefix_r && ra)
1356				return 0;
1357			switch (suffixopcode) {
1358			case 14:	/* paddi */
1359				op->type = COMPUTE | PREFIXED;
1360				op->val = mlsd_8lsd_ea(word, suffix, regs);
1361				goto compute_done;
1362			}
1363		}
1364		break;
1365	case 2:		/* tdi */
1366		if (rd & trap_compare(regs->gpr[ra], (short) word))
1367			goto trap;
1368		return 1;
1369#endif
1370	case 3:		/* twi */
1371		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1372			goto trap;
1373		return 1;
1374
1375#ifdef __powerpc64__
1376	case 4:
 
 
 
 
 
1377		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1378			return -1;
1379
1380		switch (word & 0x3f) {
1381		case 48:	/* maddhd */
1382			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1383				     "=r" (op->val) : "r" (regs->gpr[ra]),
1384				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1385			goto compute_done;
1386
1387		case 49:	/* maddhdu */
1388			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1389				     "=r" (op->val) : "r" (regs->gpr[ra]),
1390				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1391			goto compute_done;
1392
1393		case 51:	/* maddld */
1394			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1395				     "=r" (op->val) : "r" (regs->gpr[ra]),
1396				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1397			goto compute_done;
1398		}
1399
1400		/*
1401		 * There are other instructions from ISA 3.0 with the same
1402		 * primary opcode which do not have emulation support yet.
1403		 */
1404		return -1;
1405#endif
1406
1407	case 7:		/* mulli */
1408		op->val = regs->gpr[ra] * (short) word;
1409		goto compute_done;
1410
1411	case 8:		/* subfic */
1412		imm = (short) word;
1413		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1414		return 1;
1415
1416	case 10:	/* cmpli */
1417		imm = (unsigned short) word;
1418		val = regs->gpr[ra];
1419#ifdef __powerpc64__
1420		if ((rd & 1) == 0)
1421			val = (unsigned int) val;
1422#endif
1423		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1424		return 1;
1425
1426	case 11:	/* cmpi */
1427		imm = (short) word;
1428		val = regs->gpr[ra];
1429#ifdef __powerpc64__
1430		if ((rd & 1) == 0)
1431			val = (int) val;
1432#endif
1433		do_cmp_signed(regs, op, val, imm, rd >> 2);
1434		return 1;
1435
1436	case 12:	/* addic */
1437		imm = (short) word;
1438		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1439		return 1;
1440
1441	case 13:	/* addic. */
1442		imm = (short) word;
1443		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1444		set_cr0(regs, op);
1445		return 1;
1446
1447	case 14:	/* addi */
1448		imm = (short) word;
1449		if (ra)
1450			imm += regs->gpr[ra];
1451		op->val = imm;
1452		goto compute_done;
1453
1454	case 15:	/* addis */
1455		imm = ((short) word) << 16;
1456		if (ra)
1457			imm += regs->gpr[ra];
1458		op->val = imm;
1459		goto compute_done;
1460
1461	case 19:
1462		if (((word >> 1) & 0x1f) == 2) {
1463			/* addpcis */
 
 
1464			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1465			imm |= (word >> 15) & 0x3e;	/* d1 field */
1466			op->val = regs->nip + (imm << 16) + 4;
1467			goto compute_done;
1468		}
1469		op->type = UNKNOWN;
1470		return 0;
1471
1472	case 20:	/* rlwimi */
1473		mb = (word >> 6) & 0x1f;
1474		me = (word >> 1) & 0x1f;
1475		val = DATA32(regs->gpr[rd]);
1476		imm = MASK32(mb, me);
1477		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1478		goto logical_done;
1479
1480	case 21:	/* rlwinm */
1481		mb = (word >> 6) & 0x1f;
1482		me = (word >> 1) & 0x1f;
1483		val = DATA32(regs->gpr[rd]);
1484		op->val = ROTATE(val, rb) & MASK32(mb, me);
1485		goto logical_done;
1486
1487	case 23:	/* rlwnm */
1488		mb = (word >> 6) & 0x1f;
1489		me = (word >> 1) & 0x1f;
1490		rb = regs->gpr[rb] & 0x1f;
1491		val = DATA32(regs->gpr[rd]);
1492		op->val = ROTATE(val, rb) & MASK32(mb, me);
1493		goto logical_done;
1494
1495	case 24:	/* ori */
1496		op->val = regs->gpr[rd] | (unsigned short) word;
1497		goto logical_done_nocc;
1498
1499	case 25:	/* oris */
1500		imm = (unsigned short) word;
1501		op->val = regs->gpr[rd] | (imm << 16);
1502		goto logical_done_nocc;
1503
1504	case 26:	/* xori */
1505		op->val = regs->gpr[rd] ^ (unsigned short) word;
1506		goto logical_done_nocc;
1507
1508	case 27:	/* xoris */
1509		imm = (unsigned short) word;
1510		op->val = regs->gpr[rd] ^ (imm << 16);
1511		goto logical_done_nocc;
1512
1513	case 28:	/* andi. */
1514		op->val = regs->gpr[rd] & (unsigned short) word;
1515		set_cr0(regs, op);
1516		goto logical_done_nocc;
1517
1518	case 29:	/* andis. */
1519		imm = (unsigned short) word;
1520		op->val = regs->gpr[rd] & (imm << 16);
1521		set_cr0(regs, op);
1522		goto logical_done_nocc;
1523
1524#ifdef __powerpc64__
1525	case 30:	/* rld* */
1526		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1527		val = regs->gpr[rd];
1528		if ((word & 0x10) == 0) {
1529			sh = rb | ((word & 2) << 4);
1530			val = ROTATE(val, sh);
1531			switch ((word >> 2) & 3) {
1532			case 0:		/* rldicl */
1533				val &= MASK64_L(mb);
1534				break;
1535			case 1:		/* rldicr */
1536				val &= MASK64_R(mb);
1537				break;
1538			case 2:		/* rldic */
1539				val &= MASK64(mb, 63 - sh);
1540				break;
1541			case 3:		/* rldimi */
1542				imm = MASK64(mb, 63 - sh);
1543				val = (regs->gpr[ra] & ~imm) |
1544					(val & imm);
1545			}
1546			op->val = val;
1547			goto logical_done;
1548		} else {
1549			sh = regs->gpr[rb] & 0x3f;
1550			val = ROTATE(val, sh);
1551			switch ((word >> 1) & 7) {
1552			case 0:		/* rldcl */
1553				op->val = val & MASK64_L(mb);
1554				goto logical_done;
1555			case 1:		/* rldcr */
1556				op->val = val & MASK64_R(mb);
1557				goto logical_done;
1558			}
1559		}
1560#endif
1561		op->type = UNKNOWN;	/* illegal instruction */
1562		return 0;
1563
1564	case 31:
1565		/* isel occupies 32 minor opcodes */
1566		if (((word >> 1) & 0x1f) == 15) {
1567			mb = (word >> 6) & 0x1f; /* bc field */
1568			val = (regs->ccr >> (31 - mb)) & 1;
1569			val2 = (ra) ? regs->gpr[ra] : 0;
1570
1571			op->val = (val) ? val2 : regs->gpr[rb];
1572			goto compute_done;
1573		}
1574
1575		switch ((word >> 1) & 0x3ff) {
1576		case 4:		/* tw */
1577			if (rd == 0x1f ||
1578			    (rd & trap_compare((int)regs->gpr[ra],
1579					       (int)regs->gpr[rb])))
1580				goto trap;
1581			return 1;
1582#ifdef __powerpc64__
1583		case 68:	/* td */
1584			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1585				goto trap;
1586			return 1;
1587#endif
1588		case 83:	/* mfmsr */
1589			if (regs->msr & MSR_PR)
1590				goto priv;
1591			op->type = MFMSR;
1592			op->reg = rd;
1593			return 0;
1594		case 146:	/* mtmsr */
1595			if (regs->msr & MSR_PR)
1596				goto priv;
1597			op->type = MTMSR;
1598			op->reg = rd;
1599			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1600			return 0;
1601#ifdef CONFIG_PPC64
1602		case 178:	/* mtmsrd */
1603			if (regs->msr & MSR_PR)
1604				goto priv;
1605			op->type = MTMSR;
1606			op->reg = rd;
1607			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1608			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1609			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1610			op->val = imm;
1611			return 0;
1612#endif
1613
1614		case 19:	/* mfcr */
1615			imm = 0xffffffffUL;
1616			if ((word >> 20) & 1) {
1617				imm = 0xf0000000UL;
1618				for (sh = 0; sh < 8; ++sh) {
1619					if (word & (0x80000 >> sh))
1620						break;
1621					imm >>= 4;
1622				}
1623			}
1624			op->val = regs->ccr & imm;
1625			goto compute_done;
1626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1627		case 144:	/* mtcrf */
1628			op->type = COMPUTE + SETCC;
1629			imm = 0xf0000000UL;
1630			val = regs->gpr[rd];
1631			op->ccval = regs->ccr;
1632			for (sh = 0; sh < 8; ++sh) {
1633				if (word & (0x80000 >> sh))
1634					op->ccval = (op->ccval & ~imm) |
1635						(val & imm);
1636				imm >>= 4;
1637			}
1638			return 1;
1639
1640		case 339:	/* mfspr */
1641			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1642			op->type = MFSPR;
1643			op->reg = rd;
1644			op->spr = spr;
1645			if (spr == SPRN_XER || spr == SPRN_LR ||
1646			    spr == SPRN_CTR)
1647				return 1;
1648			return 0;
1649
1650		case 467:	/* mtspr */
1651			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1652			op->type = MTSPR;
1653			op->val = regs->gpr[rd];
1654			op->spr = spr;
1655			if (spr == SPRN_XER || spr == SPRN_LR ||
1656			    spr == SPRN_CTR)
1657				return 1;
1658			return 0;
1659
1660/*
1661 * Compare instructions
1662 */
1663		case 0:	/* cmp */
1664			val = regs->gpr[ra];
1665			val2 = regs->gpr[rb];
1666#ifdef __powerpc64__
1667			if ((rd & 1) == 0) {
1668				/* word (32-bit) compare */
1669				val = (int) val;
1670				val2 = (int) val2;
1671			}
1672#endif
1673			do_cmp_signed(regs, op, val, val2, rd >> 2);
1674			return 1;
1675
1676		case 32:	/* cmpl */
1677			val = regs->gpr[ra];
1678			val2 = regs->gpr[rb];
1679#ifdef __powerpc64__
1680			if ((rd & 1) == 0) {
1681				/* word (32-bit) compare */
1682				val = (unsigned int) val;
1683				val2 = (unsigned int) val2;
1684			}
1685#endif
1686			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1687			return 1;
1688
1689		case 508: /* cmpb */
1690			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1691			goto logical_done_nocc;
1692
1693/*
1694 * Arithmetic instructions
1695 */
1696		case 8:	/* subfc */
1697			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1698				       regs->gpr[rb], 1);
1699			goto arith_done;
1700#ifdef __powerpc64__
1701		case 9:	/* mulhdu */
1702			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1703			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1704			goto arith_done;
1705#endif
1706		case 10:	/* addc */
1707			add_with_carry(regs, op, rd, regs->gpr[ra],
1708				       regs->gpr[rb], 0);
1709			goto arith_done;
1710
1711		case 11:	/* mulhwu */
1712			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1713			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1714			goto arith_done;
1715
1716		case 40:	/* subf */
1717			op->val = regs->gpr[rb] - regs->gpr[ra];
1718			goto arith_done;
1719#ifdef __powerpc64__
1720		case 73:	/* mulhd */
1721			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1722			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1723			goto arith_done;
1724#endif
1725		case 75:	/* mulhw */
1726			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1727			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1728			goto arith_done;
1729
1730		case 104:	/* neg */
1731			op->val = -regs->gpr[ra];
1732			goto arith_done;
1733
1734		case 136:	/* subfe */
1735			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1736				       regs->gpr[rb], regs->xer & XER_CA);
1737			goto arith_done;
1738
1739		case 138:	/* adde */
1740			add_with_carry(regs, op, rd, regs->gpr[ra],
1741				       regs->gpr[rb], regs->xer & XER_CA);
1742			goto arith_done;
1743
1744		case 200:	/* subfze */
1745			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1746				       regs->xer & XER_CA);
1747			goto arith_done;
1748
1749		case 202:	/* addze */
1750			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1751				       regs->xer & XER_CA);
1752			goto arith_done;
1753
1754		case 232:	/* subfme */
1755			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1756				       regs->xer & XER_CA);
1757			goto arith_done;
1758#ifdef __powerpc64__
1759		case 233:	/* mulld */
1760			op->val = regs->gpr[ra] * regs->gpr[rb];
1761			goto arith_done;
1762#endif
1763		case 234:	/* addme */
1764			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1765				       regs->xer & XER_CA);
1766			goto arith_done;
1767
1768		case 235:	/* mullw */
1769			op->val = (long)(int) regs->gpr[ra] *
1770				(int) regs->gpr[rb];
1771
1772			goto arith_done;
1773#ifdef __powerpc64__
1774		case 265:	/* modud */
1775			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1776				return -1;
1777			op->val = regs->gpr[ra] % regs->gpr[rb];
1778			goto compute_done;
1779#endif
1780		case 266:	/* add */
1781			op->val = regs->gpr[ra] + regs->gpr[rb];
1782			goto arith_done;
1783
1784		case 267:	/* moduw */
1785			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1786				return -1;
1787			op->val = (unsigned int) regs->gpr[ra] %
1788				(unsigned int) regs->gpr[rb];
1789			goto compute_done;
1790#ifdef __powerpc64__
1791		case 457:	/* divdu */
1792			op->val = regs->gpr[ra] / regs->gpr[rb];
1793			goto arith_done;
1794#endif
1795		case 459:	/* divwu */
1796			op->val = (unsigned int) regs->gpr[ra] /
1797				(unsigned int) regs->gpr[rb];
1798			goto arith_done;
1799#ifdef __powerpc64__
1800		case 489:	/* divd */
1801			op->val = (long int) regs->gpr[ra] /
1802				(long int) regs->gpr[rb];
1803			goto arith_done;
1804#endif
1805		case 491:	/* divw */
1806			op->val = (int) regs->gpr[ra] /
1807				(int) regs->gpr[rb];
1808			goto arith_done;
1809#ifdef __powerpc64__
1810		case 425:	/* divde[.] */
1811			asm volatile(PPC_DIVDE(%0, %1, %2) :
1812				"=r" (op->val) : "r" (regs->gpr[ra]),
1813				"r" (regs->gpr[rb]));
1814			goto arith_done;
1815		case 393:	/* divdeu[.] */
1816			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1817				"=r" (op->val) : "r" (regs->gpr[ra]),
1818				"r" (regs->gpr[rb]));
1819			goto arith_done;
1820#endif
1821		case 755:	/* darn */
1822			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1823				return -1;
1824			switch (ra & 0x3) {
1825			case 0:
1826				/* 32-bit conditioned */
1827				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1828				goto compute_done;
1829
1830			case 1:
1831				/* 64-bit conditioned */
1832				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1833				goto compute_done;
1834
1835			case 2:
1836				/* 64-bit raw */
1837				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1838				goto compute_done;
1839			}
1840
1841			return -1;
1842#ifdef __powerpc64__
1843		case 777:	/* modsd */
1844			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1845				return -1;
1846			op->val = (long int) regs->gpr[ra] %
1847				(long int) regs->gpr[rb];
1848			goto compute_done;
1849#endif
1850		case 779:	/* modsw */
1851			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1852				return -1;
1853			op->val = (int) regs->gpr[ra] %
1854				(int) regs->gpr[rb];
1855			goto compute_done;
1856
1857
1858/*
1859 * Logical instructions
1860 */
1861		case 26:	/* cntlzw */
1862			val = (unsigned int) regs->gpr[rd];
1863			op->val = ( val ? __builtin_clz(val) : 32 );
1864			goto logical_done;
1865#ifdef __powerpc64__
1866		case 58:	/* cntlzd */
1867			val = regs->gpr[rd];
1868			op->val = ( val ? __builtin_clzl(val) : 64 );
1869			goto logical_done;
1870#endif
1871		case 28:	/* and */
1872			op->val = regs->gpr[rd] & regs->gpr[rb];
1873			goto logical_done;
1874
1875		case 60:	/* andc */
1876			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1877			goto logical_done;
1878
1879		case 122:	/* popcntb */
1880			do_popcnt(regs, op, regs->gpr[rd], 8);
1881			goto logical_done_nocc;
1882
1883		case 124:	/* nor */
1884			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1885			goto logical_done;
1886
1887		case 154:	/* prtyw */
1888			do_prty(regs, op, regs->gpr[rd], 32);
1889			goto logical_done_nocc;
1890
1891		case 186:	/* prtyd */
1892			do_prty(regs, op, regs->gpr[rd], 64);
1893			goto logical_done_nocc;
1894#ifdef CONFIG_PPC64
1895		case 252:	/* bpermd */
1896			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1897			goto logical_done_nocc;
1898#endif
1899		case 284:	/* xor */
1900			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1901			goto logical_done;
1902
1903		case 316:	/* xor */
1904			op->val = regs->gpr[rd] ^ regs->gpr[rb];
1905			goto logical_done;
1906
1907		case 378:	/* popcntw */
1908			do_popcnt(regs, op, regs->gpr[rd], 32);
1909			goto logical_done_nocc;
1910
1911		case 412:	/* orc */
1912			op->val = regs->gpr[rd] | ~regs->gpr[rb];
1913			goto logical_done;
1914
1915		case 444:	/* or */
1916			op->val = regs->gpr[rd] | regs->gpr[rb];
1917			goto logical_done;
1918
1919		case 476:	/* nand */
1920			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1921			goto logical_done;
1922#ifdef CONFIG_PPC64
1923		case 506:	/* popcntd */
1924			do_popcnt(regs, op, regs->gpr[rd], 64);
1925			goto logical_done_nocc;
1926#endif
1927		case 538:	/* cnttzw */
1928			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1929				return -1;
1930			val = (unsigned int) regs->gpr[rd];
1931			op->val = (val ? __builtin_ctz(val) : 32);
1932			goto logical_done;
1933#ifdef __powerpc64__
1934		case 570:	/* cnttzd */
1935			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1936				return -1;
1937			val = regs->gpr[rd];
1938			op->val = (val ? __builtin_ctzl(val) : 64);
1939			goto logical_done;
1940#endif
1941		case 922:	/* extsh */
1942			op->val = (signed short) regs->gpr[rd];
1943			goto logical_done;
1944
1945		case 954:	/* extsb */
1946			op->val = (signed char) regs->gpr[rd];
1947			goto logical_done;
1948#ifdef __powerpc64__
1949		case 986:	/* extsw */
1950			op->val = (signed int) regs->gpr[rd];
1951			goto logical_done;
1952#endif
1953
1954/*
1955 * Shift instructions
1956 */
1957		case 24:	/* slw */
1958			sh = regs->gpr[rb] & 0x3f;
1959			if (sh < 32)
1960				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1961			else
1962				op->val = 0;
1963			goto logical_done;
1964
1965		case 536:	/* srw */
1966			sh = regs->gpr[rb] & 0x3f;
1967			if (sh < 32)
1968				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1969			else
1970				op->val = 0;
1971			goto logical_done;
1972
1973		case 792:	/* sraw */
1974			op->type = COMPUTE + SETREG + SETXER;
1975			sh = regs->gpr[rb] & 0x3f;
1976			ival = (signed int) regs->gpr[rd];
1977			op->val = ival >> (sh < 32 ? sh : 31);
1978			op->xerval = regs->xer;
1979			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1980				op->xerval |= XER_CA;
1981			else
1982				op->xerval &= ~XER_CA;
1983			set_ca32(op, op->xerval & XER_CA);
1984			goto logical_done;
1985
1986		case 824:	/* srawi */
1987			op->type = COMPUTE + SETREG + SETXER;
1988			sh = rb;
1989			ival = (signed int) regs->gpr[rd];
1990			op->val = ival >> sh;
1991			op->xerval = regs->xer;
1992			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1993				op->xerval |= XER_CA;
1994			else
1995				op->xerval &= ~XER_CA;
1996			set_ca32(op, op->xerval & XER_CA);
1997			goto logical_done;
1998
1999#ifdef __powerpc64__
2000		case 27:	/* sld */
2001			sh = regs->gpr[rb] & 0x7f;
2002			if (sh < 64)
2003				op->val = regs->gpr[rd] << sh;
2004			else
2005				op->val = 0;
2006			goto logical_done;
2007
2008		case 539:	/* srd */
2009			sh = regs->gpr[rb] & 0x7f;
2010			if (sh < 64)
2011				op->val = regs->gpr[rd] >> sh;
2012			else
2013				op->val = 0;
2014			goto logical_done;
2015
2016		case 794:	/* srad */
2017			op->type = COMPUTE + SETREG + SETXER;
2018			sh = regs->gpr[rb] & 0x7f;
2019			ival = (signed long int) regs->gpr[rd];
2020			op->val = ival >> (sh < 64 ? sh : 63);
2021			op->xerval = regs->xer;
2022			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2023				op->xerval |= XER_CA;
2024			else
2025				op->xerval &= ~XER_CA;
2026			set_ca32(op, op->xerval & XER_CA);
2027			goto logical_done;
2028
2029		case 826:	/* sradi with sh_5 = 0 */
2030		case 827:	/* sradi with sh_5 = 1 */
2031			op->type = COMPUTE + SETREG + SETXER;
2032			sh = rb | ((word & 2) << 4);
2033			ival = (signed long int) regs->gpr[rd];
2034			op->val = ival >> sh;
2035			op->xerval = regs->xer;
2036			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2037				op->xerval |= XER_CA;
2038			else
2039				op->xerval &= ~XER_CA;
2040			set_ca32(op, op->xerval & XER_CA);
2041			goto logical_done;
2042
2043		case 890:	/* extswsli with sh_5 = 0 */
2044		case 891:	/* extswsli with sh_5 = 1 */
2045			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2046				return -1;
2047			op->type = COMPUTE + SETREG;
2048			sh = rb | ((word & 2) << 4);
2049			val = (signed int) regs->gpr[rd];
2050			if (sh)
2051				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2052			else
2053				op->val = val;
2054			goto logical_done;
2055
2056#endif /* __powerpc64__ */
2057
2058/*
2059 * Cache instructions
2060 */
2061		case 54:	/* dcbst */
2062			op->type = MKOP(CACHEOP, DCBST, 0);
2063			op->ea = xform_ea(word, regs);
2064			return 0;
2065
2066		case 86:	/* dcbf */
2067			op->type = MKOP(CACHEOP, DCBF, 0);
2068			op->ea = xform_ea(word, regs);
2069			return 0;
2070
2071		case 246:	/* dcbtst */
2072			op->type = MKOP(CACHEOP, DCBTST, 0);
2073			op->ea = xform_ea(word, regs);
2074			op->reg = rd;
2075			return 0;
2076
2077		case 278:	/* dcbt */
2078			op->type = MKOP(CACHEOP, DCBTST, 0);
2079			op->ea = xform_ea(word, regs);
2080			op->reg = rd;
2081			return 0;
2082
2083		case 982:	/* icbi */
2084			op->type = MKOP(CACHEOP, ICBI, 0);
2085			op->ea = xform_ea(word, regs);
2086			return 0;
2087
2088		case 1014:	/* dcbz */
2089			op->type = MKOP(CACHEOP, DCBZ, 0);
2090			op->ea = xform_ea(word, regs);
2091			return 0;
2092		}
2093		break;
2094	}
2095
2096/*
2097 * Loads and stores.
2098 */
2099	op->type = UNKNOWN;
2100	op->update_reg = ra;
2101	op->reg = rd;
2102	op->val = regs->gpr[rd];
2103	u = (word >> 20) & UPDATE;
2104	op->vsx_flags = 0;
2105
2106	switch (opcode) {
2107	case 31:
2108		u = word & UPDATE;
2109		op->ea = xform_ea(word, regs);
2110		switch ((word >> 1) & 0x3ff) {
2111		case 20:	/* lwarx */
2112			op->type = MKOP(LARX, 0, 4);
2113			break;
2114
2115		case 150:	/* stwcx. */
2116			op->type = MKOP(STCX, 0, 4);
2117			break;
2118
2119#ifdef __powerpc64__
2120		case 84:	/* ldarx */
2121			op->type = MKOP(LARX, 0, 8);
2122			break;
2123
2124		case 214:	/* stdcx. */
2125			op->type = MKOP(STCX, 0, 8);
2126			break;
2127
2128		case 52:	/* lbarx */
2129			op->type = MKOP(LARX, 0, 1);
2130			break;
2131
2132		case 694:	/* stbcx. */
2133			op->type = MKOP(STCX, 0, 1);
2134			break;
2135
2136		case 116:	/* lharx */
2137			op->type = MKOP(LARX, 0, 2);
2138			break;
2139
2140		case 726:	/* sthcx. */
2141			op->type = MKOP(STCX, 0, 2);
2142			break;
 
 
 
 
 
 
 
 
 
2143
2144		case 276:	/* lqarx */
2145			if (!((rd & 1) || rd == ra || rd == rb))
2146				op->type = MKOP(LARX, 0, 16);
2147			break;
2148
2149		case 182:	/* stqcx. */
2150			if (!(rd & 1))
2151				op->type = MKOP(STCX, 0, 16);
2152			break;
2153#endif
2154
2155		case 23:	/* lwzx */
2156		case 55:	/* lwzux */
2157			op->type = MKOP(LOAD, u, 4);
2158			break;
2159
2160		case 87:	/* lbzx */
2161		case 119:	/* lbzux */
2162			op->type = MKOP(LOAD, u, 1);
2163			break;
2164
2165#ifdef CONFIG_ALTIVEC
2166		/*
2167		 * Note: for the load/store vector element instructions,
2168		 * bits of the EA say which field of the VMX register to use.
2169		 */
2170		case 7:		/* lvebx */
2171			op->type = MKOP(LOAD_VMX, 0, 1);
2172			op->element_size = 1;
2173			break;
2174
2175		case 39:	/* lvehx */
2176			op->type = MKOP(LOAD_VMX, 0, 2);
2177			op->element_size = 2;
2178			break;
2179
2180		case 71:	/* lvewx */
2181			op->type = MKOP(LOAD_VMX, 0, 4);
2182			op->element_size = 4;
2183			break;
2184
2185		case 103:	/* lvx */
2186		case 359:	/* lvxl */
2187			op->type = MKOP(LOAD_VMX, 0, 16);
2188			op->element_size = 16;
2189			break;
2190
2191		case 135:	/* stvebx */
2192			op->type = MKOP(STORE_VMX, 0, 1);
2193			op->element_size = 1;
2194			break;
2195
2196		case 167:	/* stvehx */
2197			op->type = MKOP(STORE_VMX, 0, 2);
2198			op->element_size = 2;
2199			break;
2200
2201		case 199:	/* stvewx */
2202			op->type = MKOP(STORE_VMX, 0, 4);
2203			op->element_size = 4;
2204			break;
2205
2206		case 231:	/* stvx */
2207		case 487:	/* stvxl */
2208			op->type = MKOP(STORE_VMX, 0, 16);
2209			break;
2210#endif /* CONFIG_ALTIVEC */
2211
2212#ifdef __powerpc64__
2213		case 21:	/* ldx */
2214		case 53:	/* ldux */
2215			op->type = MKOP(LOAD, u, 8);
2216			break;
2217
2218		case 149:	/* stdx */
2219		case 181:	/* stdux */
2220			op->type = MKOP(STORE, u, 8);
2221			break;
2222#endif
2223
2224		case 151:	/* stwx */
2225		case 183:	/* stwux */
2226			op->type = MKOP(STORE, u, 4);
2227			break;
2228
2229		case 215:	/* stbx */
2230		case 247:	/* stbux */
2231			op->type = MKOP(STORE, u, 1);
2232			break;
2233
2234		case 279:	/* lhzx */
2235		case 311:	/* lhzux */
2236			op->type = MKOP(LOAD, u, 2);
2237			break;
2238
2239#ifdef __powerpc64__
2240		case 341:	/* lwax */
2241		case 373:	/* lwaux */
2242			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2243			break;
2244#endif
2245
2246		case 343:	/* lhax */
2247		case 375:	/* lhaux */
2248			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2249			break;
2250
2251		case 407:	/* sthx */
2252		case 439:	/* sthux */
2253			op->type = MKOP(STORE, u, 2);
2254			break;
2255
2256#ifdef __powerpc64__
2257		case 532:	/* ldbrx */
2258			op->type = MKOP(LOAD, BYTEREV, 8);
2259			break;
2260
2261#endif
2262		case 533:	/* lswx */
2263			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2264			break;
2265
2266		case 534:	/* lwbrx */
2267			op->type = MKOP(LOAD, BYTEREV, 4);
2268			break;
2269
2270		case 597:	/* lswi */
2271			if (rb == 0)
2272				rb = 32;	/* # bytes to load */
2273			op->type = MKOP(LOAD_MULTI, 0, rb);
2274			op->ea = ra ? regs->gpr[ra] : 0;
2275			break;
2276
2277#ifdef CONFIG_PPC_FPU
2278		case 535:	/* lfsx */
2279		case 567:	/* lfsux */
2280			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2281			break;
2282
2283		case 599:	/* lfdx */
2284		case 631:	/* lfdux */
2285			op->type = MKOP(LOAD_FP, u, 8);
2286			break;
2287
2288		case 663:	/* stfsx */
2289		case 695:	/* stfsux */
2290			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2291			break;
2292
2293		case 727:	/* stfdx */
2294		case 759:	/* stfdux */
2295			op->type = MKOP(STORE_FP, u, 8);
2296			break;
2297
2298#ifdef __powerpc64__
2299		case 791:	/* lfdpx */
2300			op->type = MKOP(LOAD_FP, 0, 16);
2301			break;
2302
2303		case 855:	/* lfiwax */
2304			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2305			break;
2306
2307		case 887:	/* lfiwzx */
2308			op->type = MKOP(LOAD_FP, 0, 4);
2309			break;
2310
2311		case 919:	/* stfdpx */
2312			op->type = MKOP(STORE_FP, 0, 16);
2313			break;
2314
2315		case 983:	/* stfiwx */
2316			op->type = MKOP(STORE_FP, 0, 4);
2317			break;
2318#endif /* __powerpc64 */
2319#endif /* CONFIG_PPC_FPU */
2320
2321#ifdef __powerpc64__
2322		case 660:	/* stdbrx */
2323			op->type = MKOP(STORE, BYTEREV, 8);
2324			op->val = byterev_8(regs->gpr[rd]);
2325			break;
2326
2327#endif
2328		case 661:	/* stswx */
2329			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2330			break;
2331
2332		case 662:	/* stwbrx */
2333			op->type = MKOP(STORE, BYTEREV, 4);
2334			op->val = byterev_4(regs->gpr[rd]);
2335			break;
2336
2337		case 725:	/* stswi */
2338			if (rb == 0)
2339				rb = 32;	/* # bytes to store */
2340			op->type = MKOP(STORE_MULTI, 0, rb);
2341			op->ea = ra ? regs->gpr[ra] : 0;
2342			break;
2343
2344		case 790:	/* lhbrx */
2345			op->type = MKOP(LOAD, BYTEREV, 2);
2346			break;
2347
2348		case 918:	/* sthbrx */
2349			op->type = MKOP(STORE, BYTEREV, 2);
2350			op->val = byterev_2(regs->gpr[rd]);
2351			break;
2352
2353#ifdef CONFIG_VSX
2354		case 12:	/* lxsiwzx */
2355			op->reg = rd | ((word & 1) << 5);
2356			op->type = MKOP(LOAD_VSX, 0, 4);
2357			op->element_size = 8;
2358			break;
2359
2360		case 76:	/* lxsiwax */
2361			op->reg = rd | ((word & 1) << 5);
2362			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2363			op->element_size = 8;
2364			break;
2365
2366		case 140:	/* stxsiwx */
2367			op->reg = rd | ((word & 1) << 5);
2368			op->type = MKOP(STORE_VSX, 0, 4);
2369			op->element_size = 8;
2370			break;
2371
2372		case 268:	/* lxvx */
 
 
2373			op->reg = rd | ((word & 1) << 5);
2374			op->type = MKOP(LOAD_VSX, 0, 16);
2375			op->element_size = 16;
2376			op->vsx_flags = VSX_CHECK_VEC;
2377			break;
2378
2379		case 269:	/* lxvl */
2380		case 301: {	/* lxvll */
2381			int nb;
 
 
2382			op->reg = rd | ((word & 1) << 5);
2383			op->ea = ra ? regs->gpr[ra] : 0;
2384			nb = regs->gpr[rb] & 0xff;
2385			if (nb > 16)
2386				nb = 16;
2387			op->type = MKOP(LOAD_VSX, 0, nb);
2388			op->element_size = 16;
2389			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2390				VSX_CHECK_VEC;
2391			break;
2392		}
2393		case 332:	/* lxvdsx */
2394			op->reg = rd | ((word & 1) << 5);
2395			op->type = MKOP(LOAD_VSX, 0, 8);
2396			op->element_size = 8;
2397			op->vsx_flags = VSX_SPLAT;
2398			break;
2399
 
 
 
 
 
 
 
 
2400		case 364:	/* lxvwsx */
 
 
2401			op->reg = rd | ((word & 1) << 5);
2402			op->type = MKOP(LOAD_VSX, 0, 4);
2403			op->element_size = 4;
2404			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2405			break;
2406
2407		case 396:	/* stxvx */
 
 
2408			op->reg = rd | ((word & 1) << 5);
2409			op->type = MKOP(STORE_VSX, 0, 16);
2410			op->element_size = 16;
2411			op->vsx_flags = VSX_CHECK_VEC;
2412			break;
2413
2414		case 397:	/* stxvl */
2415		case 429: {	/* stxvll */
2416			int nb;
 
 
2417			op->reg = rd | ((word & 1) << 5);
2418			op->ea = ra ? regs->gpr[ra] : 0;
2419			nb = regs->gpr[rb] & 0xff;
2420			if (nb > 16)
2421				nb = 16;
2422			op->type = MKOP(STORE_VSX, 0, nb);
2423			op->element_size = 16;
2424			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2425				VSX_CHECK_VEC;
2426			break;
2427		}
 
 
 
 
 
 
 
2428		case 524:	/* lxsspx */
2429			op->reg = rd | ((word & 1) << 5);
2430			op->type = MKOP(LOAD_VSX, 0, 4);
2431			op->element_size = 8;
2432			op->vsx_flags = VSX_FPCONV;
2433			break;
2434
2435		case 588:	/* lxsdx */
2436			op->reg = rd | ((word & 1) << 5);
2437			op->type = MKOP(LOAD_VSX, 0, 8);
2438			op->element_size = 8;
2439			break;
2440
2441		case 652:	/* stxsspx */
2442			op->reg = rd | ((word & 1) << 5);
2443			op->type = MKOP(STORE_VSX, 0, 4);
2444			op->element_size = 8;
2445			op->vsx_flags = VSX_FPCONV;
2446			break;
2447
2448		case 716:	/* stxsdx */
2449			op->reg = rd | ((word & 1) << 5);
2450			op->type = MKOP(STORE_VSX, 0, 8);
2451			op->element_size = 8;
2452			break;
2453
2454		case 780:	/* lxvw4x */
2455			op->reg = rd | ((word & 1) << 5);
2456			op->type = MKOP(LOAD_VSX, 0, 16);
2457			op->element_size = 4;
2458			break;
2459
2460		case 781:	/* lxsibzx */
 
 
2461			op->reg = rd | ((word & 1) << 5);
2462			op->type = MKOP(LOAD_VSX, 0, 1);
2463			op->element_size = 8;
2464			op->vsx_flags = VSX_CHECK_VEC;
2465			break;
2466
2467		case 812:	/* lxvh8x */
 
 
2468			op->reg = rd | ((word & 1) << 5);
2469			op->type = MKOP(LOAD_VSX, 0, 16);
2470			op->element_size = 2;
2471			op->vsx_flags = VSX_CHECK_VEC;
2472			break;
2473
2474		case 813:	/* lxsihzx */
 
 
2475			op->reg = rd | ((word & 1) << 5);
2476			op->type = MKOP(LOAD_VSX, 0, 2);
2477			op->element_size = 8;
2478			op->vsx_flags = VSX_CHECK_VEC;
2479			break;
2480
2481		case 844:	/* lxvd2x */
2482			op->reg = rd | ((word & 1) << 5);
2483			op->type = MKOP(LOAD_VSX, 0, 16);
2484			op->element_size = 8;
2485			break;
2486
2487		case 876:	/* lxvb16x */
 
 
2488			op->reg = rd | ((word & 1) << 5);
2489			op->type = MKOP(LOAD_VSX, 0, 16);
2490			op->element_size = 1;
2491			op->vsx_flags = VSX_CHECK_VEC;
2492			break;
2493
2494		case 908:	/* stxvw4x */
2495			op->reg = rd | ((word & 1) << 5);
2496			op->type = MKOP(STORE_VSX, 0, 16);
2497			op->element_size = 4;
2498			break;
2499
2500		case 909:	/* stxsibx */
 
 
2501			op->reg = rd | ((word & 1) << 5);
2502			op->type = MKOP(STORE_VSX, 0, 1);
2503			op->element_size = 8;
2504			op->vsx_flags = VSX_CHECK_VEC;
2505			break;
2506
2507		case 940:	/* stxvh8x */
 
 
2508			op->reg = rd | ((word & 1) << 5);
2509			op->type = MKOP(STORE_VSX, 0, 16);
2510			op->element_size = 2;
2511			op->vsx_flags = VSX_CHECK_VEC;
2512			break;
2513
2514		case 941:	/* stxsihx */
 
 
2515			op->reg = rd | ((word & 1) << 5);
2516			op->type = MKOP(STORE_VSX, 0, 2);
2517			op->element_size = 8;
2518			op->vsx_flags = VSX_CHECK_VEC;
2519			break;
2520
2521		case 972:	/* stxvd2x */
2522			op->reg = rd | ((word & 1) << 5);
2523			op->type = MKOP(STORE_VSX, 0, 16);
2524			op->element_size = 8;
2525			break;
2526
2527		case 1004:	/* stxvb16x */
 
 
2528			op->reg = rd | ((word & 1) << 5);
2529			op->type = MKOP(STORE_VSX, 0, 16);
2530			op->element_size = 1;
2531			op->vsx_flags = VSX_CHECK_VEC;
2532			break;
2533
2534#endif /* CONFIG_VSX */
2535		}
2536		break;
2537
2538	case 32:	/* lwz */
2539	case 33:	/* lwzu */
2540		op->type = MKOP(LOAD, u, 4);
2541		op->ea = dform_ea(word, regs);
2542		break;
2543
2544	case 34:	/* lbz */
2545	case 35:	/* lbzu */
2546		op->type = MKOP(LOAD, u, 1);
2547		op->ea = dform_ea(word, regs);
2548		break;
2549
2550	case 36:	/* stw */
2551	case 37:	/* stwu */
2552		op->type = MKOP(STORE, u, 4);
2553		op->ea = dform_ea(word, regs);
2554		break;
2555
2556	case 38:	/* stb */
2557	case 39:	/* stbu */
2558		op->type = MKOP(STORE, u, 1);
2559		op->ea = dform_ea(word, regs);
2560		break;
2561
2562	case 40:	/* lhz */
2563	case 41:	/* lhzu */
2564		op->type = MKOP(LOAD, u, 2);
2565		op->ea = dform_ea(word, regs);
2566		break;
2567
2568	case 42:	/* lha */
2569	case 43:	/* lhau */
2570		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2571		op->ea = dform_ea(word, regs);
2572		break;
2573
2574	case 44:	/* sth */
2575	case 45:	/* sthu */
2576		op->type = MKOP(STORE, u, 2);
2577		op->ea = dform_ea(word, regs);
2578		break;
2579
2580	case 46:	/* lmw */
2581		if (ra >= rd)
2582			break;		/* invalid form, ra in range to load */
2583		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2584		op->ea = dform_ea(word, regs);
2585		break;
2586
2587	case 47:	/* stmw */
2588		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2589		op->ea = dform_ea(word, regs);
2590		break;
2591
2592#ifdef CONFIG_PPC_FPU
2593	case 48:	/* lfs */
2594	case 49:	/* lfsu */
2595		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2596		op->ea = dform_ea(word, regs);
2597		break;
2598
2599	case 50:	/* lfd */
2600	case 51:	/* lfdu */
2601		op->type = MKOP(LOAD_FP, u, 8);
2602		op->ea = dform_ea(word, regs);
2603		break;
2604
2605	case 52:	/* stfs */
2606	case 53:	/* stfsu */
2607		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2608		op->ea = dform_ea(word, regs);
2609		break;
2610
2611	case 54:	/* stfd */
2612	case 55:	/* stfdu */
2613		op->type = MKOP(STORE_FP, u, 8);
2614		op->ea = dform_ea(word, regs);
2615		break;
2616#endif
2617
2618#ifdef __powerpc64__
2619	case 56:	/* lq */
2620		if (!((rd & 1) || (rd == ra)))
2621			op->type = MKOP(LOAD, 0, 16);
2622		op->ea = dqform_ea(word, regs);
2623		break;
2624#endif
2625
2626#ifdef CONFIG_VSX
2627	case 57:	/* lfdp, lxsd, lxssp */
2628		op->ea = dsform_ea(word, regs);
2629		switch (word & 3) {
2630		case 0:		/* lfdp */
2631			if (rd & 1)
2632				break;		/* reg must be even */
2633			op->type = MKOP(LOAD_FP, 0, 16);
2634			break;
2635		case 2:		/* lxsd */
 
 
2636			op->reg = rd + 32;
2637			op->type = MKOP(LOAD_VSX, 0, 8);
2638			op->element_size = 8;
2639			op->vsx_flags = VSX_CHECK_VEC;
2640			break;
2641		case 3:		/* lxssp */
 
 
2642			op->reg = rd + 32;
2643			op->type = MKOP(LOAD_VSX, 0, 4);
2644			op->element_size = 8;
2645			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2646			break;
2647		}
2648		break;
2649#endif /* CONFIG_VSX */
2650
2651#ifdef __powerpc64__
2652	case 58:	/* ld[u], lwa */
2653		op->ea = dsform_ea(word, regs);
2654		switch (word & 3) {
2655		case 0:		/* ld */
2656			op->type = MKOP(LOAD, 0, 8);
2657			break;
2658		case 1:		/* ldu */
2659			op->type = MKOP(LOAD, UPDATE, 8);
2660			break;
2661		case 2:		/* lwa */
2662			op->type = MKOP(LOAD, SIGNEXT, 4);
2663			break;
2664		}
2665		break;
2666#endif
2667
2668#ifdef CONFIG_VSX
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2669	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2670		switch (word & 7) {
2671		case 0:		/* stfdp with LSB of DS field = 0 */
2672		case 4:		/* stfdp with LSB of DS field = 1 */
2673			op->ea = dsform_ea(word, regs);
2674			op->type = MKOP(STORE_FP, 0, 16);
2675			break;
2676
2677		case 1:		/* lxv */
 
 
2678			op->ea = dqform_ea(word, regs);
2679			if (word & 8)
2680				op->reg = rd + 32;
2681			op->type = MKOP(LOAD_VSX, 0, 16);
2682			op->element_size = 16;
2683			op->vsx_flags = VSX_CHECK_VEC;
2684			break;
2685
2686		case 2:		/* stxsd with LSB of DS field = 0 */
2687		case 6:		/* stxsd with LSB of DS field = 1 */
 
 
2688			op->ea = dsform_ea(word, regs);
2689			op->reg = rd + 32;
2690			op->type = MKOP(STORE_VSX, 0, 8);
2691			op->element_size = 8;
2692			op->vsx_flags = VSX_CHECK_VEC;
2693			break;
2694
2695		case 3:		/* stxssp with LSB of DS field = 0 */
2696		case 7:		/* stxssp with LSB of DS field = 1 */
 
 
2697			op->ea = dsform_ea(word, regs);
2698			op->reg = rd + 32;
2699			op->type = MKOP(STORE_VSX, 0, 4);
2700			op->element_size = 8;
2701			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2702			break;
2703
2704		case 5:		/* stxv */
 
 
2705			op->ea = dqform_ea(word, regs);
2706			if (word & 8)
2707				op->reg = rd + 32;
2708			op->type = MKOP(STORE_VSX, 0, 16);
2709			op->element_size = 16;
2710			op->vsx_flags = VSX_CHECK_VEC;
2711			break;
2712		}
2713		break;
2714#endif /* CONFIG_VSX */
2715
2716#ifdef __powerpc64__
2717	case 62:	/* std[u] */
2718		op->ea = dsform_ea(word, regs);
2719		switch (word & 3) {
2720		case 0:		/* std */
2721			op->type = MKOP(STORE, 0, 8);
2722			break;
2723		case 1:		/* stdu */
2724			op->type = MKOP(STORE, UPDATE, 8);
2725			break;
2726		case 2:		/* stq */
2727			if (!(rd & 1))
2728				op->type = MKOP(STORE, 0, 16);
2729			break;
2730		}
2731		break;
2732	case 1: /* Prefixed instructions */
 
 
 
2733		prefix_r = GET_PREFIX_R(word);
2734		ra = GET_PREFIX_RA(suffix);
2735		op->update_reg = ra;
2736		rd = (suffix >> 21) & 0x1f;
2737		op->reg = rd;
2738		op->val = regs->gpr[rd];
2739
2740		suffixopcode = get_op(suffix);
2741		prefixtype = (word >> 24) & 0x3;
2742		switch (prefixtype) {
2743		case 0: /* Type 00  Eight-Byte Load/Store */
2744			if (prefix_r && ra)
2745				break;
2746			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2747			switch (suffixopcode) {
2748			case 41:	/* plwa */
2749				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2750				break;
 
2751			case 42:        /* plxsd */
2752				op->reg = rd + 32;
2753				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2754				op->element_size = 8;
2755				op->vsx_flags = VSX_CHECK_VEC;
2756				break;
2757			case 43:	/* plxssp */
2758				op->reg = rd + 32;
2759				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2760				op->element_size = 8;
2761				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2762				break;
2763			case 46:	/* pstxsd */
2764				op->reg = rd + 32;
2765				op->type = MKOP(STORE_VSX, PREFIXED, 8);
2766				op->element_size = 8;
2767				op->vsx_flags = VSX_CHECK_VEC;
2768				break;
2769			case 47:	/* pstxssp */
2770				op->reg = rd + 32;
2771				op->type = MKOP(STORE_VSX, PREFIXED, 4);
2772				op->element_size = 8;
2773				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2774				break;
2775			case 51:	/* plxv1 */
2776				op->reg += 32;
2777				fallthrough;
2778			case 50:	/* plxv0 */
2779				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2780				op->element_size = 16;
2781				op->vsx_flags = VSX_CHECK_VEC;
2782				break;
2783			case 55:	/* pstxv1 */
2784				op->reg = rd + 32;
2785				fallthrough;
2786			case 54:	/* pstxv0 */
2787				op->type = MKOP(STORE_VSX, PREFIXED, 16);
2788				op->element_size = 16;
2789				op->vsx_flags = VSX_CHECK_VEC;
2790				break;
 
2791			case 56:        /* plq */
2792				op->type = MKOP(LOAD, PREFIXED, 16);
2793				break;
2794			case 57:	/* pld */
2795				op->type = MKOP(LOAD, PREFIXED, 8);
2796				break;
2797			case 60:        /* stq */
 
 
 
 
 
 
 
2798				op->type = MKOP(STORE, PREFIXED, 16);
2799				break;
2800			case 61:	/* pstd */
2801				op->type = MKOP(STORE, PREFIXED, 8);
2802				break;
 
 
 
 
 
 
 
2803			}
2804			break;
2805		case 1: /* Type 01 Eight-Byte Register-to-Register */
2806			break;
2807		case 2: /* Type 10 Modified Load/Store */
2808			if (prefix_r && ra)
2809				break;
2810			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2811			switch (suffixopcode) {
2812			case 32:	/* plwz */
2813				op->type = MKOP(LOAD, PREFIXED, 4);
2814				break;
2815			case 34:	/* plbz */
2816				op->type = MKOP(LOAD, PREFIXED, 1);
2817				break;
2818			case 36:	/* pstw */
2819				op->type = MKOP(STORE, PREFIXED, 4);
2820				break;
2821			case 38:	/* pstb */
2822				op->type = MKOP(STORE, PREFIXED, 1);
2823				break;
2824			case 40:	/* plhz */
2825				op->type = MKOP(LOAD, PREFIXED, 2);
2826				break;
2827			case 42:	/* plha */
2828				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2829				break;
2830			case 44:	/* psth */
2831				op->type = MKOP(STORE, PREFIXED, 2);
2832				break;
2833			case 48:        /* plfs */
2834				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
2835				break;
2836			case 50:        /* plfd */
2837				op->type = MKOP(LOAD_FP, PREFIXED, 8);
2838				break;
2839			case 52:        /* pstfs */
2840				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
2841				break;
2842			case 54:        /* pstfd */
2843				op->type = MKOP(STORE_FP, PREFIXED, 8);
2844				break;
2845			}
2846			break;
2847		case 3: /* Type 11 Modified Register-to-Register */
2848			break;
2849		}
2850#endif /* __powerpc64__ */
2851
2852	}
2853
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854#ifdef CONFIG_VSX
2855	if ((GETTYPE(op->type) == LOAD_VSX ||
2856	     GETTYPE(op->type) == STORE_VSX) &&
2857	    !cpu_has_feature(CPU_FTR_VSX)) {
2858		return -1;
2859	}
2860#endif /* CONFIG_VSX */
2861
2862	return 0;
2863
 
 
 
 
2864 logical_done:
2865	if (word & 1)
2866		set_cr0(regs, op);
2867 logical_done_nocc:
2868	op->reg = ra;
2869	op->type |= SETREG;
2870	return 1;
2871
2872 arith_done:
2873	if (word & 1)
2874		set_cr0(regs, op);
2875 compute_done:
2876	op->reg = rd;
2877	op->type |= SETREG;
2878	return 1;
2879
2880 priv:
2881	op->type = INTERRUPT | 0x700;
2882	op->val = SRR1_PROGPRIV;
2883	return 0;
2884
2885 trap:
2886	op->type = INTERRUPT | 0x700;
2887	op->val = SRR1_PROGTRAP;
2888	return 0;
2889}
2890EXPORT_SYMBOL_GPL(analyse_instr);
2891NOKPROBE_SYMBOL(analyse_instr);
2892
2893/*
2894 * For PPC32 we always use stwu with r1 to change the stack pointer.
2895 * So this emulated store may corrupt the exception frame, now we
2896 * have to provide the exception frame trampoline, which is pushed
2897 * below the kprobed function stack. So we only update gpr[1] but
2898 * don't emulate the real store operation. We will do real store
2899 * operation safely in exception return code by checking this flag.
2900 */
2901static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2902{
2903#ifdef CONFIG_PPC32
2904	/*
2905	 * Check if we will touch kernel stack overflow
2906	 */
2907	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2908		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2909		return -EINVAL;
2910	}
2911#endif /* CONFIG_PPC32 */
2912	/*
2913	 * Check if we already set since that means we'll
2914	 * lose the previous value.
2915	 */
2916	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2917	set_thread_flag(TIF_EMULATE_STACK_STORE);
2918	return 0;
2919}
2920
2921static nokprobe_inline void do_signext(unsigned long *valp, int size)
2922{
2923	switch (size) {
2924	case 2:
2925		*valp = (signed short) *valp;
2926		break;
2927	case 4:
2928		*valp = (signed int) *valp;
2929		break;
2930	}
2931}
2932
2933static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2934{
2935	switch (size) {
2936	case 2:
2937		*valp = byterev_2(*valp);
2938		break;
2939	case 4:
2940		*valp = byterev_4(*valp);
2941		break;
2942#ifdef __powerpc64__
2943	case 8:
2944		*valp = byterev_8(*valp);
2945		break;
2946#endif
2947	}
2948}
2949
2950/*
2951 * Emulate an instruction that can be executed just by updating
2952 * fields in *regs.
2953 */
2954void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2955{
2956	unsigned long next_pc;
2957
2958	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
2959	switch (GETTYPE(op->type)) {
2960	case COMPUTE:
2961		if (op->type & SETREG)
2962			regs->gpr[op->reg] = op->val;
2963		if (op->type & SETCC)
2964			regs->ccr = op->ccval;
2965		if (op->type & SETXER)
2966			regs->xer = op->xerval;
2967		break;
2968
2969	case BRANCH:
2970		if (op->type & SETLK)
2971			regs->link = next_pc;
2972		if (op->type & BRTAKEN)
2973			next_pc = op->val;
2974		if (op->type & DECCTR)
2975			--regs->ctr;
2976		break;
2977
2978	case BARRIER:
2979		switch (op->type & BARRIER_MASK) {
2980		case BARRIER_SYNC:
2981			mb();
2982			break;
2983		case BARRIER_ISYNC:
2984			isync();
2985			break;
2986		case BARRIER_EIEIO:
2987			eieio();
2988			break;
 
2989		case BARRIER_LWSYNC:
2990			asm volatile("lwsync" : : : "memory");
2991			break;
2992		case BARRIER_PTESYNC:
2993			asm volatile("ptesync" : : : "memory");
2994			break;
 
2995		}
2996		break;
2997
2998	case MFSPR:
2999		switch (op->spr) {
3000		case SPRN_XER:
3001			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3002			break;
3003		case SPRN_LR:
3004			regs->gpr[op->reg] = regs->link;
3005			break;
3006		case SPRN_CTR:
3007			regs->gpr[op->reg] = regs->ctr;
3008			break;
3009		default:
3010			WARN_ON_ONCE(1);
3011		}
3012		break;
3013
3014	case MTSPR:
3015		switch (op->spr) {
3016		case SPRN_XER:
3017			regs->xer = op->val & 0xffffffffUL;
3018			break;
3019		case SPRN_LR:
3020			regs->link = op->val;
3021			break;
3022		case SPRN_CTR:
3023			regs->ctr = op->val;
3024			break;
3025		default:
3026			WARN_ON_ONCE(1);
3027		}
3028		break;
3029
3030	default:
3031		WARN_ON_ONCE(1);
3032	}
3033	regs->nip = next_pc;
3034}
3035NOKPROBE_SYMBOL(emulate_update_regs);
3036
3037/*
3038 * Emulate a previously-analysed load or store instruction.
3039 * Return values are:
3040 * 0 = instruction emulated successfully
3041 * -EFAULT = address out of range or access faulted (regs->dar
3042 *	     contains the faulting address)
3043 * -EACCES = misaligned access, instruction requires alignment
3044 * -EINVAL = unknown operation in *op
3045 */
3046int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3047{
3048	int err, size, type;
3049	int i, rd, nb;
3050	unsigned int cr;
3051	unsigned long val;
3052	unsigned long ea;
3053	bool cross_endian;
3054
3055	err = 0;
3056	size = GETSIZE(op->type);
3057	type = GETTYPE(op->type);
3058	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3059	ea = truncate_if_32bit(regs->msr, op->ea);
3060
3061	switch (type) {
3062	case LARX:
3063		if (ea & (size - 1))
3064			return -EACCES;		/* can't handle misaligned */
3065		if (!address_ok(regs, ea, size))
3066			return -EFAULT;
3067		err = 0;
3068		val = 0;
3069		switch (size) {
3070#ifdef __powerpc64__
3071		case 1:
3072			__get_user_asmx(val, ea, err, "lbarx");
3073			break;
3074		case 2:
3075			__get_user_asmx(val, ea, err, "lharx");
3076			break;
3077#endif
3078		case 4:
3079			__get_user_asmx(val, ea, err, "lwarx");
3080			break;
3081#ifdef __powerpc64__
3082		case 8:
3083			__get_user_asmx(val, ea, err, "ldarx");
3084			break;
3085		case 16:
3086			err = do_lqarx(ea, &regs->gpr[op->reg]);
3087			break;
3088#endif
3089		default:
3090			return -EINVAL;
3091		}
3092		if (err) {
3093			regs->dar = ea;
3094			break;
3095		}
3096		if (size < 16)
3097			regs->gpr[op->reg] = val;
3098		break;
3099
3100	case STCX:
3101		if (ea & (size - 1))
3102			return -EACCES;		/* can't handle misaligned */
3103		if (!address_ok(regs, ea, size))
3104			return -EFAULT;
3105		err = 0;
3106		switch (size) {
3107#ifdef __powerpc64__
3108		case 1:
3109			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3110			break;
3111		case 2:
3112			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3113			break;
3114#endif
3115		case 4:
3116			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3117			break;
3118#ifdef __powerpc64__
3119		case 8:
3120			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3121			break;
3122		case 16:
3123			err = do_stqcx(ea, regs->gpr[op->reg],
3124				       regs->gpr[op->reg + 1], &cr);
3125			break;
3126#endif
3127		default:
3128			return -EINVAL;
3129		}
3130		if (!err)
3131			regs->ccr = (regs->ccr & 0x0fffffff) |
3132				(cr & 0xe0000000) |
3133				((regs->xer >> 3) & 0x10000000);
3134		else
3135			regs->dar = ea;
3136		break;
3137
3138	case LOAD:
3139#ifdef __powerpc64__
3140		if (size == 16) {
3141			err = emulate_lq(regs, ea, op->reg, cross_endian);
3142			break;
3143		}
3144#endif
3145		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3146		if (!err) {
3147			if (op->type & SIGNEXT)
3148				do_signext(&regs->gpr[op->reg], size);
3149			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3150				do_byterev(&regs->gpr[op->reg], size);
3151		}
3152		break;
3153
3154#ifdef CONFIG_PPC_FPU
3155	case LOAD_FP:
3156		/*
3157		 * If the instruction is in userspace, we can emulate it even
3158		 * if the VMX state is not live, because we have the state
3159		 * stored in the thread_struct.  If the instruction is in
3160		 * the kernel, we must not touch the state in the thread_struct.
3161		 */
3162		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3163			return 0;
3164		err = do_fp_load(op, ea, regs, cross_endian);
3165		break;
3166#endif
3167#ifdef CONFIG_ALTIVEC
3168	case LOAD_VMX:
3169		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3170			return 0;
3171		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3172		break;
3173#endif
3174#ifdef CONFIG_VSX
3175	case LOAD_VSX: {
3176		unsigned long msrbit = MSR_VSX;
3177
3178		/*
3179		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3180		 * when the target of the instruction is a vector register.
3181		 */
3182		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3183			msrbit = MSR_VEC;
3184		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3185			return 0;
3186		err = do_vsx_load(op, ea, regs, cross_endian);
3187		break;
3188	}
3189#endif
3190	case LOAD_MULTI:
3191		if (!address_ok(regs, ea, size))
3192			return -EFAULT;
3193		rd = op->reg;
3194		for (i = 0; i < size; i += 4) {
3195			unsigned int v32 = 0;
3196
3197			nb = size - i;
3198			if (nb > 4)
3199				nb = 4;
3200			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3201			if (err)
3202				break;
3203			if (unlikely(cross_endian))
3204				v32 = byterev_4(v32);
3205			regs->gpr[rd] = v32;
3206			ea += 4;
3207			/* reg number wraps from 31 to 0 for lsw[ix] */
3208			rd = (rd + 1) & 0x1f;
3209		}
3210		break;
3211
3212	case STORE:
3213#ifdef __powerpc64__
3214		if (size == 16) {
3215			err = emulate_stq(regs, ea, op->reg, cross_endian);
3216			break;
3217		}
3218#endif
3219		if ((op->type & UPDATE) && size == sizeof(long) &&
3220		    op->reg == 1 && op->update_reg == 1 &&
3221		    !(regs->msr & MSR_PR) &&
3222		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3223			err = handle_stack_update(ea, regs);
3224			break;
3225		}
3226		if (unlikely(cross_endian))
3227			do_byterev(&op->val, size);
3228		err = write_mem(op->val, ea, size, regs);
3229		break;
3230
3231#ifdef CONFIG_PPC_FPU
3232	case STORE_FP:
3233		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3234			return 0;
3235		err = do_fp_store(op, ea, regs, cross_endian);
3236		break;
3237#endif
3238#ifdef CONFIG_ALTIVEC
3239	case STORE_VMX:
3240		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3241			return 0;
3242		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3243		break;
3244#endif
3245#ifdef CONFIG_VSX
3246	case STORE_VSX: {
3247		unsigned long msrbit = MSR_VSX;
3248
3249		/*
3250		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3251		 * when the target of the instruction is a vector register.
3252		 */
3253		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3254			msrbit = MSR_VEC;
3255		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3256			return 0;
3257		err = do_vsx_store(op, ea, regs, cross_endian);
3258		break;
3259	}
3260#endif
3261	case STORE_MULTI:
3262		if (!address_ok(regs, ea, size))
3263			return -EFAULT;
3264		rd = op->reg;
3265		for (i = 0; i < size; i += 4) {
3266			unsigned int v32 = regs->gpr[rd];
3267
3268			nb = size - i;
3269			if (nb > 4)
3270				nb = 4;
3271			if (unlikely(cross_endian))
3272				v32 = byterev_4(v32);
3273			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3274			if (err)
3275				break;
3276			ea += 4;
3277			/* reg number wraps from 31 to 0 for stsw[ix] */
3278			rd = (rd + 1) & 0x1f;
3279		}
3280		break;
3281
3282	default:
3283		return -EINVAL;
3284	}
3285
3286	if (err)
3287		return err;
3288
3289	if (op->type & UPDATE)
3290		regs->gpr[op->update_reg] = op->ea;
3291
3292	return 0;
3293}
3294NOKPROBE_SYMBOL(emulate_loadstore);
3295
3296/*
3297 * Emulate instructions that cause a transfer of control,
3298 * loads and stores, and a few other instructions.
3299 * Returns 1 if the step was emulated, 0 if not,
3300 * or -1 if the instruction is one that should not be stepped,
3301 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3302 */
3303int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3304{
3305	struct instruction_op op;
3306	int r, err, type;
3307	unsigned long val;
3308	unsigned long ea;
3309
3310	r = analyse_instr(&op, regs, instr);
3311	if (r < 0)
3312		return r;
3313	if (r > 0) {
3314		emulate_update_regs(regs, &op);
3315		return 1;
3316	}
3317
3318	err = 0;
3319	type = GETTYPE(op.type);
3320
3321	if (OP_IS_LOAD_STORE(type)) {
3322		err = emulate_loadstore(regs, &op);
3323		if (err)
3324			return 0;
3325		goto instr_done;
3326	}
3327
3328	switch (type) {
3329	case CACHEOP:
3330		ea = truncate_if_32bit(regs->msr, op.ea);
3331		if (!address_ok(regs, ea, 8))
3332			return 0;
3333		switch (op.type & CACHEOP_MASK) {
3334		case DCBST:
3335			__cacheop_user_asmx(ea, err, "dcbst");
3336			break;
3337		case DCBF:
3338			__cacheop_user_asmx(ea, err, "dcbf");
3339			break;
3340		case DCBTST:
3341			if (op.reg == 0)
3342				prefetchw((void *) ea);
3343			break;
3344		case DCBT:
3345			if (op.reg == 0)
3346				prefetch((void *) ea);
3347			break;
3348		case ICBI:
3349			__cacheop_user_asmx(ea, err, "icbi");
3350			break;
3351		case DCBZ:
3352			err = emulate_dcbz(ea, regs);
3353			break;
3354		}
3355		if (err) {
3356			regs->dar = ea;
3357			return 0;
3358		}
3359		goto instr_done;
3360
3361	case MFMSR:
3362		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3363		goto instr_done;
3364
3365	case MTMSR:
3366		val = regs->gpr[op.reg];
3367		if ((val & MSR_RI) == 0)
3368			/* can't step mtmsr[d] that would clear MSR_RI */
3369			return -1;
3370		/* here op.val is the mask of bits to change */
3371		regs->msr = (regs->msr & ~op.val) | (val & op.val);
3372		goto instr_done;
3373
3374#ifdef CONFIG_PPC64
3375	case SYSCALL:	/* sc */
3376		/*
3377		 * N.B. this uses knowledge about how the syscall
3378		 * entry code works.  If that is changed, this will
3379		 * need to be changed also.
 
 
 
 
 
3380		 */
3381		if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3382				cpu_has_feature(CPU_FTR_REAL_LE) &&
3383				regs->gpr[0] == 0x1ebe) {
3384			regs->msr ^= MSR_LE;
3385			goto instr_done;
3386		}
3387		regs->gpr[9] = regs->gpr[13];
3388		regs->gpr[10] = MSR_KERNEL;
3389		regs->gpr[11] = regs->nip + 4;
3390		regs->gpr[12] = regs->msr & MSR_MASK;
3391		regs->gpr[13] = (unsigned long) get_paca();
3392		regs->nip = (unsigned long) &system_call_common;
3393		regs->msr = MSR_KERNEL;
3394		return 1;
3395
3396#ifdef CONFIG_PPC_BOOK3S_64
3397	case SYSCALL_VECTORED_0:	/* scv 0 */
3398		regs->gpr[9] = regs->gpr[13];
3399		regs->gpr[10] = MSR_KERNEL;
3400		regs->gpr[11] = regs->nip + 4;
3401		regs->gpr[12] = regs->msr & MSR_MASK;
3402		regs->gpr[13] = (unsigned long) get_paca();
3403		regs->nip = (unsigned long) &system_call_vectored_emulate;
3404		regs->msr = MSR_KERNEL;
3405		return 1;
3406#endif
3407
3408	case RFI:
3409		return -1;
3410#endif
3411	}
3412	return 0;
3413
3414 instr_done:
3415	regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
 
3416	return 1;
3417}
3418NOKPROBE_SYMBOL(emulate_step);