Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Single-step support.
   4 *
   5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
 
 
 
 
 
   6 */
   7#include <linux/kernel.h>
   8#include <linux/kprobes.h>
   9#include <linux/ptrace.h>
  10#include <linux/prefetch.h>
  11#include <asm/sstep.h>
  12#include <asm/processor.h>
  13#include <linux/uaccess.h>
  14#include <asm/cpu_has_feature.h>
  15#include <asm/cputable.h>
  16#include <asm/disassemble.h>
 
  17
  18#ifdef CONFIG_PPC64
  19/* Bits in SRR1 that are copied from MSR */
  20#define MSR_MASK	0xffffffff87c0ffffUL
  21#else
  22#define MSR_MASK	0x87c0ffff
  23#endif
  24
  25/* Bits in XER */
  26#define XER_SO		0x80000000U
  27#define XER_OV		0x40000000U
  28#define XER_CA		0x20000000U
  29#define XER_OV32	0x00080000U
  30#define XER_CA32	0x00040000U
  31
  32#ifdef CONFIG_VSX
  33#define VSX_REGISTER_XTP(rd)   ((((rd) & 1) << 5) | ((rd) & 0xfe))
  34#endif
  35
  36#ifdef CONFIG_PPC_FPU
  37/*
  38 * Functions in ldstfp.S
  39 */
  40extern void get_fpr(int rn, double *p);
  41extern void put_fpr(int rn, const double *p);
  42extern void get_vr(int rn, __vector128 *p);
  43extern void put_vr(int rn, __vector128 *p);
  44extern void load_vsrn(int vsr, const void *p);
  45extern void store_vsrn(int vsr, void *p);
  46extern void conv_sp_to_dp(const float *sp, double *dp);
  47extern void conv_dp_to_sp(const double *dp, float *sp);
  48#endif
  49
  50#ifdef __powerpc64__
  51/*
  52 * Functions in quad.S
  53 */
  54extern int do_lq(unsigned long ea, unsigned long *regs);
  55extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
  56extern int do_lqarx(unsigned long ea, unsigned long *regs);
  57extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
  58		    unsigned int *crp);
  59#endif
  60
  61#ifdef __LITTLE_ENDIAN__
  62#define IS_LE	1
  63#define IS_BE	0
  64#else
  65#define IS_LE	0
  66#define IS_BE	1
  67#endif
  68
  69/*
  70 * Emulate the truncation of 64 bit values in 32-bit mode.
  71 */
  72static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
  73							unsigned long val)
  74{
 
  75	if ((msr & MSR_64BIT) == 0)
  76		val &= 0xffffffffUL;
 
  77	return val;
  78}
  79
  80/*
  81 * Determine whether a conditional branch instruction would branch.
  82 */
  83static nokprobe_inline int branch_taken(unsigned int instr,
  84					const struct pt_regs *regs,
  85					struct instruction_op *op)
  86{
  87	unsigned int bo = (instr >> 21) & 0x1f;
  88	unsigned int bi;
  89
  90	if ((bo & 4) == 0) {
  91		/* decrement counter */
  92		op->type |= DECCTR;
  93		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
  94			return 0;
  95	}
  96	if ((bo & 0x10) == 0) {
  97		/* check bit from CR */
  98		bi = (instr >> 16) & 0x1f;
  99		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
 100			return 0;
 101	}
 102	return 1;
 103}
 104
 105static nokprobe_inline long address_ok(struct pt_regs *regs,
 106				       unsigned long ea, int nb)
 107{
 108	if (!user_mode(regs))
 109		return 1;
 110	if (access_ok((void __user *)ea, nb))
 111		return 1;
 112	if (access_ok((void __user *)ea, 1))
 113		/* Access overlaps the end of the user region */
 114		regs->dar = TASK_SIZE_MAX - 1;
 115	else
 116		regs->dar = ea;
 117	return 0;
 118}
 119
 120/*
 121 * Calculate effective address for a D-form instruction
 122 */
 123static nokprobe_inline unsigned long dform_ea(unsigned int instr,
 124					      const struct pt_regs *regs)
 125{
 126	int ra;
 127	unsigned long ea;
 128
 129	ra = (instr >> 16) & 0x1f;
 130	ea = (signed short) instr;		/* sign-extend */
 131	if (ra)
 132		ea += regs->gpr[ra];
 133
 134	return ea;
 135}
 136
 137#ifdef __powerpc64__
 138/*
 139 * Calculate effective address for a DS-form instruction
 140 */
 141static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
 142					       const struct pt_regs *regs)
 143{
 144	int ra;
 145	unsigned long ea;
 146
 147	ra = (instr >> 16) & 0x1f;
 148	ea = (signed short) (instr & ~3);	/* sign-extend */
 149	if (ra)
 150		ea += regs->gpr[ra];
 151
 152	return ea;
 153}
 154
 155/*
 156 * Calculate effective address for a DQ-form instruction
 157 */
 158static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
 159					       const struct pt_regs *regs)
 160{
 161	int ra;
 162	unsigned long ea;
 163
 164	ra = (instr >> 16) & 0x1f;
 165	ea = (signed short) (instr & ~0xf);	/* sign-extend */
 166	if (ra)
 167		ea += regs->gpr[ra];
 168
 169	return ea;
 170}
 171#endif /* __powerpc64 */
 172
 173/*
 174 * Calculate effective address for an X-form instruction
 175 */
 176static nokprobe_inline unsigned long xform_ea(unsigned int instr,
 177					      const struct pt_regs *regs)
 178{
 179	int ra, rb;
 180	unsigned long ea;
 181
 182	ra = (instr >> 16) & 0x1f;
 183	rb = (instr >> 11) & 0x1f;
 184	ea = regs->gpr[rb];
 185	if (ra)
 186		ea += regs->gpr[ra];
 187
 188	return ea;
 189}
 190
 191/*
 192 * Calculate effective address for a MLS:D-form / 8LS:D-form
 193 * prefixed instruction
 194 */
 195static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
 196						  unsigned int suffix,
 197						  const struct pt_regs *regs)
 198{
 199	int ra, prefix_r;
 200	unsigned int  dd;
 201	unsigned long ea, d0, d1, d;
 202
 203	prefix_r = GET_PREFIX_R(instr);
 204	ra = GET_PREFIX_RA(suffix);
 205
 206	d0 = instr & 0x3ffff;
 207	d1 = suffix & 0xffff;
 208	d = (d0 << 16) | d1;
 209
 210	/*
 211	 * sign extend a 34 bit number
 212	 */
 213	dd = (unsigned int)(d >> 2);
 214	ea = (signed int)dd;
 215	ea = (ea << 2) | (d & 0x3);
 216
 217	if (!prefix_r && ra)
 218		ea += regs->gpr[ra];
 219	else if (!prefix_r && !ra)
 220		; /* Leave ea as is */
 221	else if (prefix_r)
 222		ea += regs->nip;
 223
 224	/*
 225	 * (prefix_r && ra) is an invalid form. Should already be
 226	 * checked for by caller!
 227	 */
 228
 229	return ea;
 230}
 231
 232/*
 233 * Return the largest power of 2, not greater than sizeof(unsigned long),
 234 * such that x is a multiple of it.
 235 */
 236static nokprobe_inline unsigned long max_align(unsigned long x)
 237{
 238	x |= sizeof(unsigned long);
 239	return x & -x;		/* isolates rightmost bit */
 240}
 241
 242static nokprobe_inline unsigned long byterev_2(unsigned long x)
 
 243{
 244	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
 245}
 246
 247static nokprobe_inline unsigned long byterev_4(unsigned long x)
 248{
 249	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
 250		((x & 0xff00) << 8) | ((x & 0xff) << 24);
 251}
 252
 253#ifdef __powerpc64__
 254static nokprobe_inline unsigned long byterev_8(unsigned long x)
 255{
 256	return (byterev_4(x) << 32) | byterev_4(x >> 32);
 257}
 258#endif
 259
 260static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
 261{
 262	switch (nb) {
 263	case 2:
 264		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
 265		break;
 266	case 4:
 267		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
 268		break;
 269#ifdef __powerpc64__
 270	case 8:
 271		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
 272		break;
 273	case 16: {
 274		unsigned long *up = (unsigned long *)ptr;
 275		unsigned long tmp;
 276		tmp = byterev_8(up[0]);
 277		up[0] = byterev_8(up[1]);
 278		up[1] = tmp;
 279		break;
 280	}
 281	case 32: {
 282		unsigned long *up = (unsigned long *)ptr;
 283		unsigned long tmp;
 284
 285		tmp = byterev_8(up[0]);
 286		up[0] = byterev_8(up[3]);
 287		up[3] = tmp;
 288		tmp = byterev_8(up[2]);
 289		up[2] = byterev_8(up[1]);
 290		up[1] = tmp;
 291		break;
 292	}
 293
 294#endif
 295	default:
 296		WARN_ON_ONCE(1);
 297	}
 298}
 299
 300static __always_inline int
 301__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
 302{
 
 303	unsigned long x = 0;
 304
 305	switch (nb) {
 306	case 1:
 307		unsafe_get_user(x, (unsigned char __user *)ea, Efault);
 308		break;
 309	case 2:
 310		unsafe_get_user(x, (unsigned short __user *)ea, Efault);
 311		break;
 312	case 4:
 313		unsafe_get_user(x, (unsigned int __user *)ea, Efault);
 314		break;
 315#ifdef __powerpc64__
 316	case 8:
 317		unsafe_get_user(x, (unsigned long __user *)ea, Efault);
 318		break;
 319#endif
 320	}
 321	*dest = x;
 322	return 0;
 323
 324Efault:
 325	regs->dar = ea;
 326	return -EFAULT;
 327}
 328
 329static nokprobe_inline int
 330read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
 331{
 332	int err;
 333
 334	if (is_kernel_addr(ea))
 335		return __read_mem_aligned(dest, ea, nb, regs);
 336
 337	if (user_read_access_begin((void __user *)ea, nb)) {
 338		err = __read_mem_aligned(dest, ea, nb, regs);
 339		user_read_access_end();
 340	} else {
 341		err = -EFAULT;
 342		regs->dar = ea;
 343	}
 344
 345	return err;
 346}
 347
 348/*
 349 * Copy from userspace to a buffer, using the largest possible
 350 * aligned accesses, up to sizeof(long).
 351 */
 352static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 353{
 354	int c;
 
 
 
 
 355
 
 
 356	for (; nb > 0; nb -= c) {
 
 
 
 
 357		c = max_align(ea);
 
 358		if (c > nb)
 359			c = max_align(nb);
 360		switch (c) {
 361		case 1:
 362			unsafe_get_user(*dest, (u8 __user *)ea, Efault);
 363			break;
 364		case 2:
 365			unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
 366			break;
 367		case 4:
 368			unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
 369			break;
 
 
 
 
 370#ifdef __powerpc64__
 371		case 8:
 372			unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
 373			break;
 374#endif
 375		}
 376		dest += c;
 377		ea += c;
 378	}
 
 
 
 
 379	return 0;
 380
 381Efault:
 382	regs->dar = ea;
 383	return -EFAULT;
 384}
 385
 386static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 387{
 388	int err;
 389
 390	if (is_kernel_addr(ea))
 391		return __copy_mem_in(dest, ea, nb, regs);
 392
 393	if (user_read_access_begin((void __user *)ea, nb)) {
 394		err = __copy_mem_in(dest, ea, nb, regs);
 395		user_read_access_end();
 396	} else {
 397		err = -EFAULT;
 398		regs->dar = ea;
 399	}
 400
 401	return err;
 402}
 403
 404static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
 405					      unsigned long ea, int nb,
 406					      struct pt_regs *regs)
 407{
 408	union {
 409		unsigned long ul;
 410		u8 b[sizeof(unsigned long)];
 411	} u;
 412	int i;
 413	int err;
 414
 415	u.ul = 0;
 416	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 417	err = copy_mem_in(&u.b[i], ea, nb, regs);
 418	if (!err)
 419		*dest = u.ul;
 420	return err;
 421}
 422
 423/*
 424 * Read memory at address ea for nb bytes, return 0 for success
 425 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 426 * If nb < sizeof(long), the result is right-justified on BE systems.
 427 */
 428static int read_mem(unsigned long *dest, unsigned long ea, int nb,
 429			      struct pt_regs *regs)
 430{
 431	if (!address_ok(regs, ea, nb))
 432		return -EFAULT;
 433	if ((ea & (nb - 1)) == 0)
 434		return read_mem_aligned(dest, ea, nb, regs);
 435	return read_mem_unaligned(dest, ea, nb, regs);
 436}
 437NOKPROBE_SYMBOL(read_mem);
 438
 439static __always_inline int
 440__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
 441{
 
 
 442	switch (nb) {
 443	case 1:
 444		unsafe_put_user(val, (unsigned char __user *)ea, Efault);
 445		break;
 446	case 2:
 447		unsafe_put_user(val, (unsigned short __user *)ea, Efault);
 448		break;
 449	case 4:
 450		unsafe_put_user(val, (unsigned int __user *)ea, Efault);
 451		break;
 452#ifdef __powerpc64__
 453	case 8:
 454		unsafe_put_user(val, (unsigned long __user *)ea, Efault);
 455		break;
 456#endif
 457	}
 458	return 0;
 459
 460Efault:
 461	regs->dar = ea;
 462	return -EFAULT;
 463}
 464
 465static nokprobe_inline int
 466write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
 467{
 468	int err;
 
 469
 470	if (is_kernel_addr(ea))
 471		return __write_mem_aligned(val, ea, nb, regs);
 472
 473	if (user_write_access_begin((void __user *)ea, nb)) {
 474		err = __write_mem_aligned(val, ea, nb, regs);
 475		user_write_access_end();
 476	} else {
 477		err = -EFAULT;
 478		regs->dar = ea;
 
 
 
 
 479	}
 480
 481	return err;
 482}
 483
 484/*
 485 * Copy from a buffer to userspace, using the largest possible
 486 * aligned accesses, up to sizeof(long).
 487 */
 488static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 489{
 490	int c;
 491
 492	for (; nb > 0; nb -= c) {
 
 
 
 
 493		c = max_align(ea);
 
 494		if (c > nb)
 495			c = max_align(nb);
 496		switch (c) {
 497		case 1:
 498			unsafe_put_user(*dest, (u8 __user *)ea, Efault);
 499			break;
 500		case 2:
 501			unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
 502			break;
 503		case 4:
 504			unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
 505			break;
 506#ifdef __powerpc64__
 507		case 8:
 508			unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
 509			break;
 510#endif
 511		}
 512		dest += c;
 513		ea += c;
 514	}
 515	return 0;
 516
 517Efault:
 518	regs->dar = ea;
 519	return -EFAULT;
 520}
 521
 522static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
 523{
 524	int err;
 525
 526	if (is_kernel_addr(ea))
 527		return __copy_mem_out(dest, ea, nb, regs);
 528
 529	if (user_write_access_begin((void __user *)ea, nb)) {
 530		err = __copy_mem_out(dest, ea, nb, regs);
 531		user_write_access_end();
 532	} else {
 533		err = -EFAULT;
 534		regs->dar = ea;
 535	}
 536
 537	return err;
 538}
 539
 540static nokprobe_inline int write_mem_unaligned(unsigned long val,
 541					       unsigned long ea, int nb,
 542					       struct pt_regs *regs)
 543{
 544	union {
 545		unsigned long ul;
 546		u8 b[sizeof(unsigned long)];
 547	} u;
 548	int i;
 549
 550	u.ul = val;
 551	i = IS_BE ? sizeof(unsigned long) - nb : 0;
 552	return copy_mem_out(&u.b[i], ea, nb, regs);
 553}
 554
 555/*
 556 * Write memory at address ea for nb bytes, return 0 for success
 557 * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
 558 */
 559static int write_mem(unsigned long val, unsigned long ea, int nb,
 560			       struct pt_regs *regs)
 561{
 562	if (!address_ok(regs, ea, nb))
 563		return -EFAULT;
 564	if ((ea & (nb - 1)) == 0)
 565		return write_mem_aligned(val, ea, nb, regs);
 566	return write_mem_unaligned(val, ea, nb, regs);
 567}
 568NOKPROBE_SYMBOL(write_mem);
 569
 570#ifdef CONFIG_PPC_FPU
 571/*
 572 * These access either the real FP register or the image in the
 573 * thread_struct, depending on regs->msr & MSR_FP.
 574 */
 575static int do_fp_load(struct instruction_op *op, unsigned long ea,
 576		      struct pt_regs *regs, bool cross_endian)
 
 577{
 578	int err, rn, nb;
 579	union {
 580		int i;
 581		unsigned int u;
 582		float f;
 583		double d[2];
 584		unsigned long l[2];
 585		u8 b[2 * sizeof(double)];
 586	} u;
 
 
 
 
 
 
 
 587
 588	nb = GETSIZE(op->type);
 589	if (nb > sizeof(u))
 590		return -EINVAL;
 591	if (!address_ok(regs, ea, nb))
 592		return -EFAULT;
 593	rn = op->reg;
 594	err = copy_mem_in(u.b, ea, nb, regs);
 
 
 
 
 
 
 
 
 
 
 
 595	if (err)
 596		return err;
 597	if (unlikely(cross_endian)) {
 598		do_byte_reverse(u.b, min(nb, 8));
 599		if (nb == 16)
 600			do_byte_reverse(&u.b[8], 8);
 601	}
 602	preempt_disable();
 603	if (nb == 4) {
 604		if (op->type & FPCONV)
 605			conv_sp_to_dp(&u.f, &u.d[0]);
 606		else if (op->type & SIGNEXT)
 607			u.l[0] = u.i;
 608		else
 609			u.l[0] = u.u;
 610	}
 611	if (regs->msr & MSR_FP)
 612		put_fpr(rn, &u.d[0]);
 613	else
 614		current->thread.TS_FPR(rn) = u.l[0];
 615	if (nb == 16) {
 616		/* lfdp */
 617		rn |= 1;
 618		if (regs->msr & MSR_FP)
 619			put_fpr(rn, &u.d[1]);
 620		else
 621			current->thread.TS_FPR(rn) = u.l[1];
 622	}
 623	preempt_enable();
 624	return 0;
 625}
 626NOKPROBE_SYMBOL(do_fp_load);
 627
 628static int do_fp_store(struct instruction_op *op, unsigned long ea,
 629		       struct pt_regs *regs, bool cross_endian)
 
 630{
 631	int rn, nb;
 632	union {
 633		unsigned int u;
 634		float f;
 635		double d[2];
 636		unsigned long l[2];
 637		u8 b[2 * sizeof(double)];
 638	} u;
 
 
 
 
 
 
 
 
 639
 640	nb = GETSIZE(op->type);
 641	if (nb > sizeof(u))
 642		return -EINVAL;
 643	if (!address_ok(regs, ea, nb))
 644		return -EFAULT;
 645	rn = op->reg;
 646	preempt_disable();
 647	if (regs->msr & MSR_FP)
 648		get_fpr(rn, &u.d[0]);
 649	else
 650		u.l[0] = current->thread.TS_FPR(rn);
 651	if (nb == 4) {
 652		if (op->type & FPCONV)
 653			conv_dp_to_sp(&u.d[0], &u.f);
 654		else
 655			u.u = u.l[0];
 656	}
 657	if (nb == 16) {
 658		rn |= 1;
 659		if (regs->msr & MSR_FP)
 660			get_fpr(rn, &u.d[1]);
 661		else
 662			u.l[1] = current->thread.TS_FPR(rn);
 663	}
 664	preempt_enable();
 665	if (unlikely(cross_endian)) {
 666		do_byte_reverse(u.b, min(nb, 8));
 667		if (nb == 16)
 668			do_byte_reverse(&u.b[8], 8);
 669	}
 670	return copy_mem_out(u.b, ea, nb, regs);
 671}
 672NOKPROBE_SYMBOL(do_fp_store);
 673#endif
 674
 675#ifdef CONFIG_ALTIVEC
 676/* For Altivec/VMX, no need to worry about alignment */
 677static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
 678				       int size, struct pt_regs *regs,
 679				       bool cross_endian)
 680{
 681	int err;
 682	union {
 683		__vector128 v;
 684		u8 b[sizeof(__vector128)];
 685	} u = {};
 686
 687	if (size > sizeof(u))
 688		return -EINVAL;
 689
 690	if (!address_ok(regs, ea & ~0xfUL, 16))
 691		return -EFAULT;
 692	/* align to multiple of size */
 693	ea &= ~(size - 1);
 694	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
 695	if (err)
 696		return err;
 697	if (unlikely(cross_endian))
 698		do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
 699	preempt_disable();
 700	if (regs->msr & MSR_VEC)
 701		put_vr(rn, &u.v);
 702	else
 703		current->thread.vr_state.vr[rn] = u.v;
 704	preempt_enable();
 705	return 0;
 706}
 707
 708static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
 709					int size, struct pt_regs *regs,
 710					bool cross_endian)
 711{
 712	union {
 713		__vector128 v;
 714		u8 b[sizeof(__vector128)];
 715	} u;
 716
 717	if (size > sizeof(u))
 718		return -EINVAL;
 719
 720	if (!address_ok(regs, ea & ~0xfUL, 16))
 721		return -EFAULT;
 722	/* align to multiple of size */
 723	ea &= ~(size - 1);
 724
 725	preempt_disable();
 726	if (regs->msr & MSR_VEC)
 727		get_vr(rn, &u.v);
 728	else
 729		u.v = current->thread.vr_state.vr[rn];
 730	preempt_enable();
 731	if (unlikely(cross_endian))
 732		do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
 733	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
 734}
 735#endif /* CONFIG_ALTIVEC */
 736
 737#ifdef __powerpc64__
 738static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
 739				      int reg, bool cross_endian)
 740{
 741	int err;
 
 742
 743	if (!address_ok(regs, ea, 16))
 744		return -EFAULT;
 745	/* if aligned, should be atomic */
 746	if ((ea & 0xf) == 0) {
 747		err = do_lq(ea, &regs->gpr[reg]);
 748	} else {
 749		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
 750		if (!err)
 751			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
 752	}
 753	if (!err && unlikely(cross_endian))
 754		do_byte_reverse(&regs->gpr[reg], 16);
 755	return err;
 756}
 757
 758static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
 759				       int reg, bool cross_endian)
 760{
 761	int err;
 762	unsigned long vals[2];
 763
 764	if (!address_ok(regs, ea, 16))
 765		return -EFAULT;
 766	vals[0] = regs->gpr[reg];
 767	vals[1] = regs->gpr[reg + 1];
 768	if (unlikely(cross_endian))
 769		do_byte_reverse(vals, 16);
 770
 771	/* if aligned, should be atomic */
 772	if ((ea & 0xf) == 0)
 773		return do_stq(ea, vals[0], vals[1]);
 774
 775	err = write_mem(vals[IS_LE], ea, 8, regs);
 776	if (!err)
 777		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
 778	return err;
 779}
 780#endif /* __powerpc64 */
 781
 782#ifdef CONFIG_VSX
 783void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
 784		      const void *mem, bool rev)
 785{
 786	int size, read_size;
 787	int i, j;
 788	const unsigned int *wp;
 789	const unsigned short *hp;
 790	const unsigned char *bp;
 791
 792	size = GETSIZE(op->type);
 793	reg->d[0] = reg->d[1] = 0;
 794
 795	switch (op->element_size) {
 796	case 32:
 797		/* [p]lxvp[x] */
 798	case 16:
 799		/* whole vector; lxv[x] or lxvl[l] */
 800		if (size == 0)
 801			break;
 802		memcpy(reg, mem, size);
 803		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 804			rev = !rev;
 805		if (rev)
 806			do_byte_reverse(reg, size);
 807		break;
 808	case 8:
 809		/* scalar loads, lxvd2x, lxvdsx */
 810		read_size = (size >= 8) ? 8 : size;
 811		i = IS_LE ? 8 : 8 - read_size;
 812		memcpy(&reg->b[i], mem, read_size);
 813		if (rev)
 814			do_byte_reverse(&reg->b[i], 8);
 815		if (size < 8) {
 816			if (op->type & SIGNEXT) {
 817				/* size == 4 is the only case here */
 818				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
 819			} else if (op->vsx_flags & VSX_FPCONV) {
 820				preempt_disable();
 821				conv_sp_to_dp(&reg->fp[1 + IS_LE],
 822					      &reg->dp[IS_LE]);
 823				preempt_enable();
 824			}
 825		} else {
 826			if (size == 16) {
 827				unsigned long v = *(unsigned long *)(mem + 8);
 828				reg->d[IS_BE] = !rev ? v : byterev_8(v);
 829			} else if (op->vsx_flags & VSX_SPLAT)
 830				reg->d[IS_BE] = reg->d[IS_LE];
 831		}
 832		break;
 833	case 4:
 834		/* lxvw4x, lxvwsx */
 835		wp = mem;
 836		for (j = 0; j < size / 4; ++j) {
 837			i = IS_LE ? 3 - j : j;
 838			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
 839		}
 840		if (op->vsx_flags & VSX_SPLAT) {
 841			u32 val = reg->w[IS_LE ? 3 : 0];
 842			for (; j < 4; ++j) {
 843				i = IS_LE ? 3 - j : j;
 844				reg->w[i] = val;
 845			}
 846		}
 847		break;
 848	case 2:
 849		/* lxvh8x */
 850		hp = mem;
 851		for (j = 0; j < size / 2; ++j) {
 852			i = IS_LE ? 7 - j : j;
 853			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
 854		}
 855		break;
 856	case 1:
 857		/* lxvb16x */
 858		bp = mem;
 859		for (j = 0; j < size; ++j) {
 860			i = IS_LE ? 15 - j : j;
 861			reg->b[i] = *bp++;
 862		}
 863		break;
 864	}
 865}
 866EXPORT_SYMBOL_GPL(emulate_vsx_load);
 867NOKPROBE_SYMBOL(emulate_vsx_load);
 868
 869void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
 870		       void *mem, bool rev)
 871{
 872	int size, write_size;
 873	int i, j;
 874	union vsx_reg buf;
 875	unsigned int *wp;
 876	unsigned short *hp;
 877	unsigned char *bp;
 878
 879	size = GETSIZE(op->type);
 880
 881	switch (op->element_size) {
 882	case 32:
 883		/* [p]stxvp[x] */
 884		if (size == 0)
 885			break;
 886		if (rev) {
 887			/* reverse 32 bytes */
 888			union vsx_reg buf32[2];
 889			buf32[0].d[0] = byterev_8(reg[1].d[1]);
 890			buf32[0].d[1] = byterev_8(reg[1].d[0]);
 891			buf32[1].d[0] = byterev_8(reg[0].d[1]);
 892			buf32[1].d[1] = byterev_8(reg[0].d[0]);
 893			memcpy(mem, buf32, size);
 894		} else {
 895			memcpy(mem, reg, size);
 896		}
 897		break;
 898	case 16:
 899		/* stxv, stxvx, stxvl, stxvll */
 900		if (size == 0)
 901			break;
 902		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
 903			rev = !rev;
 904		if (rev) {
 905			/* reverse 16 bytes */
 906			buf.d[0] = byterev_8(reg->d[1]);
 907			buf.d[1] = byterev_8(reg->d[0]);
 908			reg = &buf;
 909		}
 910		memcpy(mem, reg, size);
 911		break;
 912	case 8:
 913		/* scalar stores, stxvd2x */
 914		write_size = (size >= 8) ? 8 : size;
 915		i = IS_LE ? 8 : 8 - write_size;
 916		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
 917			buf.d[0] = buf.d[1] = 0;
 918			preempt_disable();
 919			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
 920			preempt_enable();
 921			reg = &buf;
 922		}
 923		memcpy(mem, &reg->b[i], write_size);
 924		if (size == 16)
 925			memcpy(mem + 8, &reg->d[IS_BE], 8);
 926		if (unlikely(rev)) {
 927			do_byte_reverse(mem, write_size);
 928			if (size == 16)
 929				do_byte_reverse(mem + 8, 8);
 930		}
 931		break;
 932	case 4:
 933		/* stxvw4x */
 934		wp = mem;
 935		for (j = 0; j < size / 4; ++j) {
 936			i = IS_LE ? 3 - j : j;
 937			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
 938		}
 939		break;
 940	case 2:
 941		/* stxvh8x */
 942		hp = mem;
 943		for (j = 0; j < size / 2; ++j) {
 944			i = IS_LE ? 7 - j : j;
 945			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
 946		}
 947		break;
 948	case 1:
 949		/* stvxb16x */
 950		bp = mem;
 951		for (j = 0; j < size; ++j) {
 952			i = IS_LE ? 15 - j : j;
 953			*bp++ = reg->b[i];
 954		}
 955		break;
 956	}
 957}
 958EXPORT_SYMBOL_GPL(emulate_vsx_store);
 959NOKPROBE_SYMBOL(emulate_vsx_store);
 960
 961static nokprobe_inline int do_vsx_load(struct instruction_op *op,
 962				       unsigned long ea, struct pt_regs *regs,
 963				       bool cross_endian)
 964{
 965	int reg = op->reg;
 966	int i, j, nr_vsx_regs;
 967	u8 mem[32];
 968	union vsx_reg buf[2];
 969	int size = GETSIZE(op->type);
 970
 971	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
 972		return -EFAULT;
 973
 974	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
 975	emulate_vsx_load(op, buf, mem, cross_endian);
 976	preempt_disable();
 977	if (reg < 32) {
 978		/* FP regs + extensions */
 979		if (regs->msr & MSR_FP) {
 980			for (i = 0; i < nr_vsx_regs; i++) {
 981				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 982				load_vsrn(reg + i, &buf[j].v);
 983			}
 984		} else {
 985			for (i = 0; i < nr_vsx_regs; i++) {
 986				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 987				current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
 988				current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
 989			}
 990		}
 991	} else {
 992		if (regs->msr & MSR_VEC) {
 993			for (i = 0; i < nr_vsx_regs; i++) {
 994				j = IS_LE ? nr_vsx_regs - i - 1 : i;
 995				load_vsrn(reg + i, &buf[j].v);
 996			}
 997		} else {
 998			for (i = 0; i < nr_vsx_regs; i++) {
 999				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1000				current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
1001			}
1002		}
1003	}
1004	preempt_enable();
1005	return 0;
1006}
1007
1008static nokprobe_inline int do_vsx_store(struct instruction_op *op,
1009					unsigned long ea, struct pt_regs *regs,
1010					bool cross_endian)
1011{
1012	int reg = op->reg;
1013	int i, j, nr_vsx_regs;
1014	u8 mem[32];
1015	union vsx_reg buf[2];
1016	int size = GETSIZE(op->type);
1017
1018	if (!address_ok(regs, ea, size))
1019		return -EFAULT;
1020
1021	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
1022	preempt_disable();
1023	if (reg < 32) {
1024		/* FP regs + extensions */
1025		if (regs->msr & MSR_FP) {
1026			for (i = 0; i < nr_vsx_regs; i++) {
1027				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1028				store_vsrn(reg + i, &buf[j].v);
1029			}
1030		} else {
1031			for (i = 0; i < nr_vsx_regs; i++) {
1032				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1033				buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
1034				buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
1035			}
1036		}
1037	} else {
1038		if (regs->msr & MSR_VEC) {
1039			for (i = 0; i < nr_vsx_regs; i++) {
1040				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1041				store_vsrn(reg + i, &buf[j].v);
1042			}
1043		} else {
1044			for (i = 0; i < nr_vsx_regs; i++) {
1045				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1046				buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
1047			}
1048		}
1049	}
1050	preempt_enable();
1051	emulate_vsx_store(op, buf, mem, cross_endian);
1052	return  copy_mem_out(mem, ea, size, regs);
1053}
1054#endif /* CONFIG_VSX */
1055
1056static __always_inline int __emulate_dcbz(unsigned long ea)
1057{
1058	unsigned long i;
1059	unsigned long size = l1_dcache_bytes();
1060
1061	for (i = 0; i < size; i += sizeof(long))
1062		unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
1063
1064	return 0;
1065
1066Efault:
1067	return -EFAULT;
1068}
1069
1070int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1071{
1072	int err;
1073	unsigned long size = l1_dcache_bytes();
1074
1075	ea = truncate_if_32bit(regs->msr, ea);
1076	ea &= ~(size - 1);
1077	if (!address_ok(regs, ea, size))
1078		return -EFAULT;
1079
1080	if (is_kernel_addr(ea)) {
1081		err = __emulate_dcbz(ea);
1082	} else if (user_write_access_begin((void __user *)ea, size)) {
1083		err = __emulate_dcbz(ea);
1084		user_write_access_end();
1085	} else {
1086		err = -EFAULT;
1087	}
1088
1089	if (err)
1090		regs->dar = ea;
1091
1092
1093	return err;
1094}
1095NOKPROBE_SYMBOL(emulate_dcbz);
1096
1097#define __put_user_asmx(x, addr, err, op, cr)		\
1098	__asm__ __volatile__(				\
1099		".machine push\n"			\
1100		".machine power8\n"			\
1101		"1:	" op " %2,0,%3\n"		\
1102		".machine pop\n"			\
1103		"	mfcr	%1\n"			\
1104		"2:\n"					\
1105		".section .fixup,\"ax\"\n"		\
1106		"3:	li	%0,%4\n"		\
1107		"	b	2b\n"			\
1108		".previous\n"				\
1109		EX_TABLE(1b, 3b)			\
 
 
 
1110		: "=r" (err), "=r" (cr)			\
1111		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1112
1113#define __get_user_asmx(x, addr, err, op)		\
1114	__asm__ __volatile__(				\
1115		".machine push\n"			\
1116		".machine power8\n"			\
1117		"1:	"op" %1,0,%2\n"			\
1118		".machine pop\n"			\
1119		"2:\n"					\
1120		".section .fixup,\"ax\"\n"		\
1121		"3:	li	%0,%3\n"		\
1122		"	b	2b\n"			\
1123		".previous\n"				\
1124		EX_TABLE(1b, 3b)			\
 
 
 
1125		: "=r" (err), "=r" (x)			\
1126		: "r" (addr), "i" (-EFAULT), "0" (err))
1127
1128#define __cacheop_user_asmx(addr, err, op)		\
1129	__asm__ __volatile__(				\
1130		"1:	"op" 0,%1\n"			\
1131		"2:\n"					\
1132		".section .fixup,\"ax\"\n"		\
1133		"3:	li	%0,%3\n"		\
1134		"	b	2b\n"			\
1135		".previous\n"				\
1136		EX_TABLE(1b, 3b)			\
 
 
 
1137		: "=r" (err)				\
1138		: "r" (addr), "i" (-EFAULT), "0" (err))
1139
1140static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1141				    struct instruction_op *op)
1142{
1143	long val = op->val;
1144
1145	op->type |= SETCC;
1146	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1147	if (!(regs->msr & MSR_64BIT))
1148		val = (int) val;
 
1149	if (val < 0)
1150		op->ccval |= 0x80000000;
1151	else if (val > 0)
1152		op->ccval |= 0x40000000;
1153	else
1154		op->ccval |= 0x20000000;
1155}
1156
1157static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1158{
1159	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1160		if (val)
1161			op->xerval |= XER_CA32;
1162		else
1163			op->xerval &= ~XER_CA32;
1164	}
1165}
1166
1167static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1168				     struct instruction_op *op, int rd,
1169				     unsigned long val1, unsigned long val2,
1170				     unsigned long carry_in)
1171{
1172	unsigned long val = val1 + val2;
1173
1174	if (carry_in)
1175		++val;
1176	op->type = COMPUTE | SETREG | SETXER;
1177	op->reg = rd;
1178	op->val = val;
1179	val = truncate_if_32bit(regs->msr, val);
1180	val1 = truncate_if_32bit(regs->msr, val1);
1181	op->xerval = regs->xer;
 
1182	if (val < val1 || (carry_in && val == val1))
1183		op->xerval |= XER_CA;
1184	else
1185		op->xerval &= ~XER_CA;
1186
1187	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1188			(carry_in && (unsigned int)val == (unsigned int)val1));
1189}
1190
1191static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1192					  struct instruction_op *op,
1193					  long v1, long v2, int crfld)
1194{
1195	unsigned int crval, shift;
1196
1197	op->type = COMPUTE | SETCC;
1198	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1199	if (v1 < v2)
1200		crval |= 8;
1201	else if (v1 > v2)
1202		crval |= 4;
1203	else
1204		crval |= 2;
1205	shift = (7 - crfld) * 4;
1206	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1207}
1208
1209static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1210					    struct instruction_op *op,
1211					    unsigned long v1,
1212					    unsigned long v2, int crfld)
1213{
1214	unsigned int crval, shift;
1215
1216	op->type = COMPUTE | SETCC;
1217	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1218	if (v1 < v2)
1219		crval |= 8;
1220	else if (v1 > v2)
1221		crval |= 4;
1222	else
1223		crval |= 2;
1224	shift = (7 - crfld) * 4;
1225	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1226}
1227
1228static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1229				    struct instruction_op *op,
1230				    unsigned long v1, unsigned long v2)
1231{
1232	unsigned long long out_val, mask;
1233	int i;
1234
1235	out_val = 0;
1236	for (i = 0; i < 8; i++) {
1237		mask = 0xffUL << (i * 8);
1238		if ((v1 & mask) == (v2 & mask))
1239			out_val |= mask;
1240	}
1241	op->val = out_val;
1242}
1243
1244/*
1245 * The size parameter is used to adjust the equivalent popcnt instruction.
1246 * popcntb = 8, popcntw = 32, popcntd = 64
1247 */
1248static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1249				      struct instruction_op *op,
1250				      unsigned long v1, int size)
1251{
1252	unsigned long long out = v1;
1253
1254	out -= (out >> 1) & 0x5555555555555555ULL;
1255	out = (0x3333333333333333ULL & out) +
1256	      (0x3333333333333333ULL & (out >> 2));
1257	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1258
1259	if (size == 8) {	/* popcntb */
1260		op->val = out;
1261		return;
1262	}
1263	out += out >> 8;
1264	out += out >> 16;
1265	if (size == 32) {	/* popcntw */
1266		op->val = out & 0x0000003f0000003fULL;
1267		return;
1268	}
1269
1270	out = (out + (out >> 32)) & 0x7f;
1271	op->val = out;	/* popcntd */
1272}
1273
1274#ifdef CONFIG_PPC64
1275static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1276				      struct instruction_op *op,
1277				      unsigned long v1, unsigned long v2)
1278{
1279	unsigned char perm, idx;
1280	unsigned int i;
1281
1282	perm = 0;
1283	for (i = 0; i < 8; i++) {
1284		idx = (v1 >> (i * 8)) & 0xff;
1285		if (idx < 64)
1286			if (v2 & PPC_BIT(idx))
1287				perm |= 1 << i;
1288	}
1289	op->val = perm;
1290}
1291#endif /* CONFIG_PPC64 */
1292/*
1293 * The size parameter adjusts the equivalent prty instruction.
1294 * prtyw = 32, prtyd = 64
1295 */
1296static nokprobe_inline void do_prty(const struct pt_regs *regs,
1297				    struct instruction_op *op,
1298				    unsigned long v, int size)
1299{
1300	unsigned long long res = v ^ (v >> 8);
1301
1302	res ^= res >> 16;
1303	if (size == 32) {		/* prtyw */
1304		op->val = res & 0x0000000100000001ULL;
1305		return;
1306	}
1307
1308	res ^= res >> 32;
1309	op->val = res & 1;	/*prtyd */
1310}
1311
1312static nokprobe_inline int trap_compare(long v1, long v2)
1313{
1314	int ret = 0;
1315
1316	if (v1 < v2)
1317		ret |= 0x10;
1318	else if (v1 > v2)
1319		ret |= 0x08;
1320	else
1321		ret |= 0x04;
1322	if ((unsigned long)v1 < (unsigned long)v2)
1323		ret |= 0x02;
1324	else if ((unsigned long)v1 > (unsigned long)v2)
1325		ret |= 0x01;
1326	return ret;
1327}
1328
1329/*
1330 * Elements of 32-bit rotate and mask instructions.
1331 */
1332#define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1333			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1334#ifdef __powerpc64__
1335#define MASK64_L(mb)	(~0UL >> (mb))
1336#define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1337#define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1338#define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1339#else
1340#define DATA32(x)	(x)
1341#endif
1342#define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1343
1344/*
1345 * Decode an instruction, and return information about it in *op
1346 * without changing *regs.
1347 * Integer arithmetic and logical instructions, branches, and barrier
1348 * instructions can be emulated just using the information in *op.
1349 *
1350 * Return value is 1 if the instruction can be emulated just by
1351 * updating *regs with the information in *op, -1 if we need the
1352 * GPRs but *regs doesn't contain the full register set, or 0
1353 * otherwise.
1354 */
1355int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1356		  ppc_inst_t instr)
1357{
1358#ifdef CONFIG_PPC64
1359	unsigned int suffixopcode, prefixtype, prefix_r;
1360#endif
1361	unsigned int opcode, ra, rb, rc, rd, spr, u;
1362	unsigned long int imm;
1363	unsigned long int val, val2;
1364	unsigned int mb, me, sh;
1365	unsigned int word, suffix;
1366	long ival;
1367
1368	word = ppc_inst_val(instr);
1369	suffix = ppc_inst_suffix(instr);
1370
1371	op->type = COMPUTE;
1372
1373	opcode = ppc_inst_primary_opcode(instr);
1374	switch (opcode) {
1375	case 16:	/* bc */
1376		op->type = BRANCH;
1377		imm = (signed short)(word & 0xfffc);
1378		if ((word & 2) == 0)
1379			imm += regs->nip;
1380		op->val = truncate_if_32bit(regs->msr, imm);
1381		if (word & 1)
1382			op->type |= SETLK;
1383		if (branch_taken(word, regs, op))
1384			op->type |= BRTAKEN;
 
1385		return 1;
 
1386	case 17:	/* sc */
1387		if ((word & 0xfe2) == 2)
1388			op->type = SYSCALL;
1389		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1390				(word & 0xfe3) == 1) {	/* scv */
1391			op->type = SYSCALL_VECTORED_0;
1392			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1393				goto unknown_opcode;
1394		} else
1395			op->type = UNKNOWN;
1396		return 0;
 
1397	case 18:	/* b */
1398		op->type = BRANCH | BRTAKEN;
1399		imm = word & 0x03fffffc;
1400		if (imm & 0x02000000)
1401			imm -= 0x04000000;
1402		if ((word & 2) == 0)
1403			imm += regs->nip;
1404		op->val = truncate_if_32bit(regs->msr, imm);
1405		if (word & 1)
1406			op->type |= SETLK;
 
1407		return 1;
1408	case 19:
1409		switch ((word >> 1) & 0x3ff) {
1410		case 0:		/* mcrf */
1411			op->type = COMPUTE + SETCC;
1412			rd = 7 - ((word >> 23) & 0x7);
1413			ra = 7 - ((word >> 18) & 0x7);
1414			rd *= 4;
1415			ra *= 4;
1416			val = (regs->ccr >> ra) & 0xf;
1417			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1418			return 1;
1419
1420		case 16:	/* bclr */
1421		case 528:	/* bcctr */
1422			op->type = BRANCH;
1423			imm = (word & 0x400)? regs->ctr: regs->link;
1424			op->val = truncate_if_32bit(regs->msr, imm);
1425			if (word & 1)
1426				op->type |= SETLK;
1427			if (branch_taken(word, regs, op))
1428				op->type |= BRTAKEN;
 
1429			return 1;
1430
1431		case 18:	/* rfid, scary */
1432			if (regs->msr & MSR_PR)
1433				goto priv;
1434			op->type = RFI;
1435			return 0;
1436
1437		case 150:	/* isync */
1438			op->type = BARRIER | BARRIER_ISYNC;
1439			return 1;
 
1440
1441		case 33:	/* crnor */
1442		case 129:	/* crandc */
1443		case 193:	/* crxor */
1444		case 225:	/* crnand */
1445		case 257:	/* crand */
1446		case 289:	/* creqv */
1447		case 417:	/* crorc */
1448		case 449:	/* cror */
1449			op->type = COMPUTE + SETCC;
1450			ra = (word >> 16) & 0x1f;
1451			rb = (word >> 11) & 0x1f;
1452			rd = (word >> 21) & 0x1f;
1453			ra = (regs->ccr >> (31 - ra)) & 1;
1454			rb = (regs->ccr >> (31 - rb)) & 1;
1455			val = (word >> (6 + ra * 2 + rb)) & 1;
1456			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1457				(val << (31 - rd));
1458			return 1;
1459		}
1460		break;
1461	case 31:
1462		switch ((word >> 1) & 0x3ff) {
1463		case 598:	/* sync */
1464			op->type = BARRIER + BARRIER_SYNC;
1465#ifdef __powerpc64__
1466			switch ((word >> 21) & 3) {
1467			case 1:		/* lwsync */
1468				op->type = BARRIER + BARRIER_LWSYNC;
1469				break;
1470			case 2:		/* ptesync */
1471				op->type = BARRIER + BARRIER_PTESYNC;
1472				break;
1473			}
1474#endif
1475			return 1;
 
1476
1477		case 854:	/* eieio */
1478			op->type = BARRIER + BARRIER_EIEIO;
1479			return 1;
 
1480		}
1481		break;
1482	}
1483
1484	rd = (word >> 21) & 0x1f;
1485	ra = (word >> 16) & 0x1f;
1486	rb = (word >> 11) & 0x1f;
1487	rc = (word >> 6) & 0x1f;
 
 
 
1488
1489	switch (opcode) {
1490#ifdef __powerpc64__
1491	case 1:
1492		if (!cpu_has_feature(CPU_FTR_ARCH_31))
1493			goto unknown_opcode;
1494
1495		prefix_r = GET_PREFIX_R(word);
1496		ra = GET_PREFIX_RA(suffix);
1497		rd = (suffix >> 21) & 0x1f;
1498		op->reg = rd;
1499		op->val = regs->gpr[rd];
1500		suffixopcode = get_op(suffix);
1501		prefixtype = (word >> 24) & 0x3;
1502		switch (prefixtype) {
1503		case 2:
1504			if (prefix_r && ra)
1505				return 0;
1506			switch (suffixopcode) {
1507			case 14:	/* paddi */
1508				op->type = COMPUTE | PREFIXED;
1509				op->val = mlsd_8lsd_ea(word, suffix, regs);
1510				goto compute_done;
1511			}
1512		}
1513		break;
1514	case 2:		/* tdi */
1515		if (rd & trap_compare(regs->gpr[ra], (short) word))
1516			goto trap;
1517		return 1;
1518#endif
1519	case 3:		/* twi */
1520		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1521			goto trap;
1522		return 1;
1523
1524#ifdef __powerpc64__
1525	case 4:
1526		/*
1527		 * There are very many instructions with this primary opcode
1528		 * introduced in the ISA as early as v2.03. However, the ones
1529		 * we currently emulate were all introduced with ISA 3.0
1530		 */
1531		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1532			goto unknown_opcode;
1533
1534		switch (word & 0x3f) {
1535		case 48:	/* maddhd */
1536			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1537				     "=r" (op->val) : "r" (regs->gpr[ra]),
1538				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1539			goto compute_done;
1540
1541		case 49:	/* maddhdu */
1542			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1543				     "=r" (op->val) : "r" (regs->gpr[ra]),
1544				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1545			goto compute_done;
1546
1547		case 51:	/* maddld */
1548			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1549				     "=r" (op->val) : "r" (regs->gpr[ra]),
1550				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1551			goto compute_done;
1552		}
1553
1554		/*
1555		 * There are other instructions from ISA 3.0 with the same
1556		 * primary opcode which do not have emulation support yet.
1557		 */
1558		goto unknown_opcode;
1559#endif
1560
1561	case 7:		/* mulli */
1562		op->val = regs->gpr[ra] * (short) word;
1563		goto compute_done;
1564
1565	case 8:		/* subfic */
1566		imm = (short) word;
1567		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1568		return 1;
1569
1570	case 10:	/* cmpli */
1571		imm = (unsigned short) word;
1572		val = regs->gpr[ra];
1573#ifdef __powerpc64__
1574		if ((rd & 1) == 0)
1575			val = (unsigned int) val;
1576#endif
1577		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1578		return 1;
1579
1580	case 11:	/* cmpi */
1581		imm = (short) word;
1582		val = regs->gpr[ra];
1583#ifdef __powerpc64__
1584		if ((rd & 1) == 0)
1585			val = (int) val;
1586#endif
1587		do_cmp_signed(regs, op, val, imm, rd >> 2);
1588		return 1;
1589
1590	case 12:	/* addic */
1591		imm = (short) word;
1592		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1593		return 1;
1594
1595	case 13:	/* addic. */
1596		imm = (short) word;
1597		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1598		set_cr0(regs, op);
1599		return 1;
1600
1601	case 14:	/* addi */
1602		imm = (short) word;
1603		if (ra)
1604			imm += regs->gpr[ra];
1605		op->val = imm;
1606		goto compute_done;
1607
1608	case 15:	/* addis */
1609		imm = ((short) word) << 16;
1610		if (ra)
1611			imm += regs->gpr[ra];
1612		op->val = imm;
1613		goto compute_done;
1614
1615	case 19:
1616		if (((word >> 1) & 0x1f) == 2) {
1617			/* addpcis */
1618			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1619				goto unknown_opcode;
1620			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1621			imm |= (word >> 15) & 0x3e;	/* d1 field */
1622			op->val = regs->nip + (imm << 16) + 4;
1623			goto compute_done;
1624		}
1625		op->type = UNKNOWN;
1626		return 0;
1627
1628	case 20:	/* rlwimi */
1629		mb = (word >> 6) & 0x1f;
1630		me = (word >> 1) & 0x1f;
1631		val = DATA32(regs->gpr[rd]);
1632		imm = MASK32(mb, me);
1633		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1634		goto logical_done;
1635
1636	case 21:	/* rlwinm */
1637		mb = (word >> 6) & 0x1f;
1638		me = (word >> 1) & 0x1f;
1639		val = DATA32(regs->gpr[rd]);
1640		op->val = ROTATE(val, rb) & MASK32(mb, me);
1641		goto logical_done;
1642
1643	case 23:	/* rlwnm */
1644		mb = (word >> 6) & 0x1f;
1645		me = (word >> 1) & 0x1f;
1646		rb = regs->gpr[rb] & 0x1f;
1647		val = DATA32(regs->gpr[rd]);
1648		op->val = ROTATE(val, rb) & MASK32(mb, me);
1649		goto logical_done;
1650
1651	case 24:	/* ori */
1652		op->val = regs->gpr[rd] | (unsigned short) word;
1653		goto logical_done_nocc;
 
1654
1655	case 25:	/* oris */
1656		imm = (unsigned short) word;
1657		op->val = regs->gpr[rd] | (imm << 16);
1658		goto logical_done_nocc;
1659
1660	case 26:	/* xori */
1661		op->val = regs->gpr[rd] ^ (unsigned short) word;
1662		goto logical_done_nocc;
 
1663
1664	case 27:	/* xoris */
1665		imm = (unsigned short) word;
1666		op->val = regs->gpr[rd] ^ (imm << 16);
1667		goto logical_done_nocc;
1668
1669	case 28:	/* andi. */
1670		op->val = regs->gpr[rd] & (unsigned short) word;
1671		set_cr0(regs, op);
1672		goto logical_done_nocc;
 
1673
1674	case 29:	/* andis. */
1675		imm = (unsigned short) word;
1676		op->val = regs->gpr[rd] & (imm << 16);
1677		set_cr0(regs, op);
1678		goto logical_done_nocc;
1679
1680#ifdef __powerpc64__
1681	case 30:	/* rld* */
1682		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1683		val = regs->gpr[rd];
1684		if ((word & 0x10) == 0) {
1685			sh = rb | ((word & 2) << 4);
1686			val = ROTATE(val, sh);
1687			switch ((word >> 2) & 3) {
1688			case 0:		/* rldicl */
1689				val &= MASK64_L(mb);
1690				break;
1691			case 1:		/* rldicr */
1692				val &= MASK64_R(mb);
1693				break;
1694			case 2:		/* rldic */
1695				val &= MASK64(mb, 63 - sh);
1696				break;
1697			case 3:		/* rldimi */
1698				imm = MASK64(mb, 63 - sh);
1699				val = (regs->gpr[ra] & ~imm) |
1700					(val & imm);
 
1701			}
1702			op->val = val;
1703			goto logical_done;
1704		} else {
1705			sh = regs->gpr[rb] & 0x3f;
1706			val = ROTATE(val, sh);
1707			switch ((word >> 1) & 7) {
1708			case 0:		/* rldcl */
1709				op->val = val & MASK64_L(mb);
1710				goto logical_done;
1711			case 1:		/* rldcr */
1712				op->val = val & MASK64_R(mb);
1713				goto logical_done;
1714			}
1715		}
1716#endif
1717		op->type = UNKNOWN;	/* illegal instruction */
1718		return 0;
1719
1720	case 31:
1721		/* isel occupies 32 minor opcodes */
1722		if (((word >> 1) & 0x1f) == 15) {
1723			mb = (word >> 6) & 0x1f; /* bc field */
1724			val = (regs->ccr >> (31 - mb)) & 1;
1725			val2 = (ra) ? regs->gpr[ra] : 0;
1726
1727			op->val = (val) ? val2 : regs->gpr[rb];
1728			goto compute_done;
1729		}
1730
1731		switch ((word >> 1) & 0x3ff) {
1732		case 4:		/* tw */
1733			if (rd == 0x1f ||
1734			    (rd & trap_compare((int)regs->gpr[ra],
1735					       (int)regs->gpr[rb])))
1736				goto trap;
1737			return 1;
1738#ifdef __powerpc64__
1739		case 68:	/* td */
1740			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1741				goto trap;
1742			return 1;
1743#endif
1744		case 83:	/* mfmsr */
1745			if (regs->msr & MSR_PR)
1746				goto priv;
1747			op->type = MFMSR;
1748			op->reg = rd;
1749			return 0;
1750		case 146:	/* mtmsr */
1751			if (regs->msr & MSR_PR)
1752				goto priv;
1753			op->type = MTMSR;
1754			op->reg = rd;
1755			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1756			return 0;
1757#ifdef CONFIG_PPC64
1758		case 178:	/* mtmsrd */
1759			if (regs->msr & MSR_PR)
1760				goto priv;
1761			op->type = MTMSR;
1762			op->reg = rd;
1763			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1764			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1765			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1766			op->val = imm;
1767			return 0;
1768#endif
1769
1770		case 19:	/* mfcr */
1771			imm = 0xffffffffUL;
1772			if ((word >> 20) & 1) {
1773				imm = 0xf0000000UL;
1774				for (sh = 0; sh < 8; ++sh) {
1775					if (word & (0x80000 >> sh))
1776						break;
1777					imm >>= 4;
1778				}
1779			}
1780			op->val = regs->ccr & imm;
1781			goto compute_done;
1782
1783		case 128:	/* setb */
1784			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1785				goto unknown_opcode;
1786			/*
1787			 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1788			 * Since each CR field is 4 bits,
1789			 * we can simply mask off the bottom two bits (bfa * 4)
1790			 * to yield the first bit in the CR field.
1791			 */
1792			ra = ra & ~0x3;
1793			/* 'val' stores bits of the CR field (bfa) */
1794			val = regs->ccr >> (CR0_SHIFT - ra);
1795			/* checks if the LT bit of CR field (bfa) is set */
1796			if (val & 8)
1797				op->val = -1;
1798			/* checks if the GT bit of CR field (bfa) is set */
1799			else if (val & 4)
1800				op->val = 1;
1801			else
1802				op->val = 0;
1803			goto compute_done;
1804
1805		case 144:	/* mtcrf */
1806			op->type = COMPUTE + SETCC;
1807			imm = 0xf0000000UL;
1808			val = regs->gpr[rd];
1809			op->ccval = regs->ccr;
1810			for (sh = 0; sh < 8; ++sh) {
1811				if (word & (0x80000 >> sh))
1812					op->ccval = (op->ccval & ~imm) |
1813						(val & imm);
1814				imm >>= 4;
1815			}
1816			return 1;
1817
1818		case 339:	/* mfspr */
1819			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1820			op->type = MFSPR;
1821			op->reg = rd;
1822			op->spr = spr;
1823			if (spr == SPRN_XER || spr == SPRN_LR ||
1824			    spr == SPRN_CTR)
1825				return 1;
1826			return 0;
 
 
 
 
 
 
 
 
 
 
 
1827
1828		case 467:	/* mtspr */
1829			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1830			op->type = MTSPR;
1831			op->val = regs->gpr[rd];
1832			op->spr = spr;
1833			if (spr == SPRN_XER || spr == SPRN_LR ||
1834			    spr == SPRN_CTR)
1835				return 1;
1836			return 0;
 
 
 
 
 
 
 
 
 
 
1837
1838/*
1839 * Compare instructions
1840 */
1841		case 0:	/* cmp */
1842			val = regs->gpr[ra];
1843			val2 = regs->gpr[rb];
1844#ifdef __powerpc64__
1845			if ((rd & 1) == 0) {
1846				/* word (32-bit) compare */
1847				val = (int) val;
1848				val2 = (int) val2;
1849			}
1850#endif
1851			do_cmp_signed(regs, op, val, val2, rd >> 2);
1852			return 1;
1853
1854		case 32:	/* cmpl */
1855			val = regs->gpr[ra];
1856			val2 = regs->gpr[rb];
1857#ifdef __powerpc64__
1858			if ((rd & 1) == 0) {
1859				/* word (32-bit) compare */
1860				val = (unsigned int) val;
1861				val2 = (unsigned int) val2;
1862			}
1863#endif
1864			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1865			return 1;
1866
1867		case 508: /* cmpb */
1868			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1869			goto logical_done_nocc;
1870
1871/*
1872 * Arithmetic instructions
1873 */
1874		case 8:	/* subfc */
1875			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1876				       regs->gpr[rb], 1);
1877			goto arith_done;
1878#ifdef __powerpc64__
1879		case 9:	/* mulhdu */
1880			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1881			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1882			goto arith_done;
1883#endif
1884		case 10:	/* addc */
1885			add_with_carry(regs, op, rd, regs->gpr[ra],
1886				       regs->gpr[rb], 0);
1887			goto arith_done;
1888
1889		case 11:	/* mulhwu */
1890			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1891			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1892			goto arith_done;
1893
1894		case 40:	/* subf */
1895			op->val = regs->gpr[rb] - regs->gpr[ra];
1896			goto arith_done;
1897#ifdef __powerpc64__
1898		case 73:	/* mulhd */
1899			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1900			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1901			goto arith_done;
1902#endif
1903		case 75:	/* mulhw */
1904			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1905			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1906			goto arith_done;
1907
1908		case 104:	/* neg */
1909			op->val = -regs->gpr[ra];
1910			goto arith_done;
1911
1912		case 136:	/* subfe */
1913			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1914				       regs->gpr[rb], regs->xer & XER_CA);
1915			goto arith_done;
1916
1917		case 138:	/* adde */
1918			add_with_carry(regs, op, rd, regs->gpr[ra],
1919				       regs->gpr[rb], regs->xer & XER_CA);
1920			goto arith_done;
1921
1922		case 200:	/* subfze */
1923			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1924				       regs->xer & XER_CA);
1925			goto arith_done;
1926
1927		case 202:	/* addze */
1928			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1929				       regs->xer & XER_CA);
1930			goto arith_done;
1931
1932		case 232:	/* subfme */
1933			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1934				       regs->xer & XER_CA);
1935			goto arith_done;
1936#ifdef __powerpc64__
1937		case 233:	/* mulld */
1938			op->val = regs->gpr[ra] * regs->gpr[rb];
1939			goto arith_done;
1940#endif
1941		case 234:	/* addme */
1942			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1943				       regs->xer & XER_CA);
1944			goto arith_done;
1945
1946		case 235:	/* mullw */
1947			op->val = (long)(int) regs->gpr[ra] *
1948				(int) regs->gpr[rb];
1949
1950			goto arith_done;
1951#ifdef __powerpc64__
1952		case 265:	/* modud */
1953			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1954				goto unknown_opcode;
1955			op->val = regs->gpr[ra] % regs->gpr[rb];
1956			goto compute_done;
1957#endif
1958		case 266:	/* add */
1959			op->val = regs->gpr[ra] + regs->gpr[rb];
1960			goto arith_done;
1961
1962		case 267:	/* moduw */
1963			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1964				goto unknown_opcode;
1965			op->val = (unsigned int) regs->gpr[ra] %
1966				(unsigned int) regs->gpr[rb];
1967			goto compute_done;
1968#ifdef __powerpc64__
1969		case 457:	/* divdu */
1970			op->val = regs->gpr[ra] / regs->gpr[rb];
1971			goto arith_done;
1972#endif
1973		case 459:	/* divwu */
1974			op->val = (unsigned int) regs->gpr[ra] /
1975				(unsigned int) regs->gpr[rb];
1976			goto arith_done;
1977#ifdef __powerpc64__
1978		case 489:	/* divd */
1979			op->val = (long int) regs->gpr[ra] /
1980				(long int) regs->gpr[rb];
1981			goto arith_done;
1982#endif
1983		case 491:	/* divw */
1984			op->val = (int) regs->gpr[ra] /
1985				(int) regs->gpr[rb];
1986			goto arith_done;
1987#ifdef __powerpc64__
1988		case 425:	/* divde[.] */
1989			asm volatile(PPC_DIVDE(%0, %1, %2) :
1990				"=r" (op->val) : "r" (regs->gpr[ra]),
1991				"r" (regs->gpr[rb]));
1992			goto arith_done;
1993		case 393:	/* divdeu[.] */
1994			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1995				"=r" (op->val) : "r" (regs->gpr[ra]),
1996				"r" (regs->gpr[rb]));
1997			goto arith_done;
1998#endif
1999		case 755:	/* darn */
2000			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2001				goto unknown_opcode;
2002			switch (ra & 0x3) {
2003			case 0:
2004				/* 32-bit conditioned */
2005				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
2006				goto compute_done;
2007
2008			case 1:
2009				/* 64-bit conditioned */
2010				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
2011				goto compute_done;
2012
2013			case 2:
2014				/* 64-bit raw */
2015				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
2016				goto compute_done;
2017			}
2018
2019			goto unknown_opcode;
2020#ifdef __powerpc64__
2021		case 777:	/* modsd */
2022			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2023				goto unknown_opcode;
2024			op->val = (long int) regs->gpr[ra] %
2025				(long int) regs->gpr[rb];
2026			goto compute_done;
2027#endif
2028		case 779:	/* modsw */
2029			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2030				goto unknown_opcode;
2031			op->val = (int) regs->gpr[ra] %
2032				(int) regs->gpr[rb];
2033			goto compute_done;
2034
2035
2036/*
2037 * Logical instructions
2038 */
2039		case 26:	/* cntlzw */
2040			val = (unsigned int) regs->gpr[rd];
2041			op->val = ( val ? __builtin_clz(val) : 32 );
2042			goto logical_done;
2043#ifdef __powerpc64__
2044		case 58:	/* cntlzd */
2045			val = regs->gpr[rd];
2046			op->val = ( val ? __builtin_clzl(val) : 64 );
2047			goto logical_done;
2048#endif
2049		case 28:	/* and */
2050			op->val = regs->gpr[rd] & regs->gpr[rb];
2051			goto logical_done;
2052
2053		case 60:	/* andc */
2054			op->val = regs->gpr[rd] & ~regs->gpr[rb];
2055			goto logical_done;
2056
2057		case 122:	/* popcntb */
2058			do_popcnt(regs, op, regs->gpr[rd], 8);
2059			goto logical_done_nocc;
2060
2061		case 124:	/* nor */
2062			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
2063			goto logical_done;
2064
2065		case 154:	/* prtyw */
2066			do_prty(regs, op, regs->gpr[rd], 32);
2067			goto logical_done_nocc;
2068
2069		case 186:	/* prtyd */
2070			do_prty(regs, op, regs->gpr[rd], 64);
2071			goto logical_done_nocc;
2072#ifdef CONFIG_PPC64
2073		case 252:	/* bpermd */
2074			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2075			goto logical_done_nocc;
2076#endif
2077		case 284:	/* xor */
2078			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2079			goto logical_done;
2080
2081		case 316:	/* xor */
2082			op->val = regs->gpr[rd] ^ regs->gpr[rb];
2083			goto logical_done;
2084
2085		case 378:	/* popcntw */
2086			do_popcnt(regs, op, regs->gpr[rd], 32);
2087			goto logical_done_nocc;
2088
2089		case 412:	/* orc */
2090			op->val = regs->gpr[rd] | ~regs->gpr[rb];
2091			goto logical_done;
2092
2093		case 444:	/* or */
2094			op->val = regs->gpr[rd] | regs->gpr[rb];
2095			goto logical_done;
2096
2097		case 476:	/* nand */
2098			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2099			goto logical_done;
2100#ifdef CONFIG_PPC64
2101		case 506:	/* popcntd */
2102			do_popcnt(regs, op, regs->gpr[rd], 64);
2103			goto logical_done_nocc;
2104#endif
2105		case 538:	/* cnttzw */
2106			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2107				goto unknown_opcode;
2108			val = (unsigned int) regs->gpr[rd];
2109			op->val = (val ? __builtin_ctz(val) : 32);
2110			goto logical_done;
2111#ifdef __powerpc64__
2112		case 570:	/* cnttzd */
2113			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2114				goto unknown_opcode;
2115			val = regs->gpr[rd];
2116			op->val = (val ? __builtin_ctzl(val) : 64);
2117			goto logical_done;
2118#endif
2119		case 922:	/* extsh */
2120			op->val = (signed short) regs->gpr[rd];
2121			goto logical_done;
2122
2123		case 954:	/* extsb */
2124			op->val = (signed char) regs->gpr[rd];
2125			goto logical_done;
2126#ifdef __powerpc64__
2127		case 986:	/* extsw */
2128			op->val = (signed int) regs->gpr[rd];
2129			goto logical_done;
2130#endif
2131
2132/*
2133 * Shift instructions
2134 */
2135		case 24:	/* slw */
2136			sh = regs->gpr[rb] & 0x3f;
2137			if (sh < 32)
2138				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2139			else
2140				op->val = 0;
2141			goto logical_done;
2142
2143		case 536:	/* srw */
2144			sh = regs->gpr[rb] & 0x3f;
2145			if (sh < 32)
2146				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2147			else
2148				op->val = 0;
2149			goto logical_done;
2150
2151		case 792:	/* sraw */
2152			op->type = COMPUTE + SETREG + SETXER;
2153			sh = regs->gpr[rb] & 0x3f;
2154			ival = (signed int) regs->gpr[rd];
2155			op->val = ival >> (sh < 32 ? sh : 31);
2156			op->xerval = regs->xer;
2157			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2158				op->xerval |= XER_CA;
2159			else
2160				op->xerval &= ~XER_CA;
2161			set_ca32(op, op->xerval & XER_CA);
2162			goto logical_done;
2163
2164		case 824:	/* srawi */
2165			op->type = COMPUTE + SETREG + SETXER;
2166			sh = rb;
2167			ival = (signed int) regs->gpr[rd];
2168			op->val = ival >> sh;
2169			op->xerval = regs->xer;
2170			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2171				op->xerval |= XER_CA;
2172			else
2173				op->xerval &= ~XER_CA;
2174			set_ca32(op, op->xerval & XER_CA);
2175			goto logical_done;
2176
2177#ifdef __powerpc64__
2178		case 27:	/* sld */
2179			sh = regs->gpr[rb] & 0x7f;
2180			if (sh < 64)
2181				op->val = regs->gpr[rd] << sh;
2182			else
2183				op->val = 0;
2184			goto logical_done;
2185
2186		case 539:	/* srd */
2187			sh = regs->gpr[rb] & 0x7f;
2188			if (sh < 64)
2189				op->val = regs->gpr[rd] >> sh;
2190			else
2191				op->val = 0;
2192			goto logical_done;
2193
2194		case 794:	/* srad */
2195			op->type = COMPUTE + SETREG + SETXER;
2196			sh = regs->gpr[rb] & 0x7f;
2197			ival = (signed long int) regs->gpr[rd];
2198			op->val = ival >> (sh < 64 ? sh : 63);
2199			op->xerval = regs->xer;
2200			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2201				op->xerval |= XER_CA;
2202			else
2203				op->xerval &= ~XER_CA;
2204			set_ca32(op, op->xerval & XER_CA);
2205			goto logical_done;
2206
2207		case 826:	/* sradi with sh_5 = 0 */
2208		case 827:	/* sradi with sh_5 = 1 */
2209			op->type = COMPUTE + SETREG + SETXER;
2210			sh = rb | ((word & 2) << 4);
2211			ival = (signed long int) regs->gpr[rd];
2212			op->val = ival >> sh;
2213			op->xerval = regs->xer;
2214			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2215				op->xerval |= XER_CA;
2216			else
2217				op->xerval &= ~XER_CA;
2218			set_ca32(op, op->xerval & XER_CA);
2219			goto logical_done;
2220
2221		case 890:	/* extswsli with sh_5 = 0 */
2222		case 891:	/* extswsli with sh_5 = 1 */
2223			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2224				goto unknown_opcode;
2225			op->type = COMPUTE + SETREG;
2226			sh = rb | ((word & 2) << 4);
2227			val = (signed int) regs->gpr[rd];
2228			if (sh)
2229				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2230			else
2231				op->val = val;
2232			goto logical_done;
2233
2234#endif /* __powerpc64__ */
2235
2236/*
2237 * Cache instructions
2238 */
2239		case 54:	/* dcbst */
2240			op->type = MKOP(CACHEOP, DCBST, 0);
2241			op->ea = xform_ea(word, regs);
2242			return 0;
2243
2244		case 86:	/* dcbf */
2245			op->type = MKOP(CACHEOP, DCBF, 0);
2246			op->ea = xform_ea(word, regs);
2247			return 0;
2248
2249		case 246:	/* dcbtst */
2250			op->type = MKOP(CACHEOP, DCBTST, 0);
2251			op->ea = xform_ea(word, regs);
2252			op->reg = rd;
2253			return 0;
2254
2255		case 278:	/* dcbt */
2256			op->type = MKOP(CACHEOP, DCBTST, 0);
2257			op->ea = xform_ea(word, regs);
2258			op->reg = rd;
2259			return 0;
2260
2261		case 982:	/* icbi */
2262			op->type = MKOP(CACHEOP, ICBI, 0);
2263			op->ea = xform_ea(word, regs);
2264			return 0;
2265
2266		case 1014:	/* dcbz */
2267			op->type = MKOP(CACHEOP, DCBZ, 0);
2268			op->ea = xform_ea(word, regs);
2269			return 0;
2270		}
2271		break;
2272	}
2273
2274/*
2275 * Loads and stores.
2276 */
2277	op->type = UNKNOWN;
2278	op->update_reg = ra;
2279	op->reg = rd;
2280	op->val = regs->gpr[rd];
2281	u = (word >> 20) & UPDATE;
2282	op->vsx_flags = 0;
2283
2284	switch (opcode) {
2285	case 31:
2286		u = word & UPDATE;
2287		op->ea = xform_ea(word, regs);
2288		switch ((word >> 1) & 0x3ff) {
2289		case 20:	/* lwarx */
2290			op->type = MKOP(LARX, 0, 4);
2291			break;
2292
2293		case 150:	/* stwcx. */
2294			op->type = MKOP(STCX, 0, 4);
2295			break;
2296
2297#ifdef CONFIG_PPC_HAS_LBARX_LHARX
2298		case 52:	/* lbarx */
2299			op->type = MKOP(LARX, 0, 1);
2300			break;
2301
2302		case 694:	/* stbcx. */
2303			op->type = MKOP(STCX, 0, 1);
2304			break;
2305
2306		case 116:	/* lharx */
2307			op->type = MKOP(LARX, 0, 2);
2308			break;
2309
2310		case 726:	/* sthcx. */
2311			op->type = MKOP(STCX, 0, 2);
2312			break;
2313#endif
2314#ifdef __powerpc64__
2315		case 84:	/* ldarx */
2316			op->type = MKOP(LARX, 0, 8);
2317			break;
2318
2319		case 214:	/* stdcx. */
2320			op->type = MKOP(STCX, 0, 8);
2321			break;
2322
2323		case 276:	/* lqarx */
2324			if (!((rd & 1) || rd == ra || rd == rb))
2325				op->type = MKOP(LARX, 0, 16);
2326			break;
2327
2328		case 182:	/* stqcx. */
2329			if (!(rd & 1))
2330				op->type = MKOP(STCX, 0, 16);
2331			break;
2332#endif
2333
2334		case 23:	/* lwzx */
2335		case 55:	/* lwzux */
2336			op->type = MKOP(LOAD, u, 4);
2337			break;
2338
2339		case 87:	/* lbzx */
2340		case 119:	/* lbzux */
2341			op->type = MKOP(LOAD, u, 1);
2342			break;
2343
2344#ifdef CONFIG_ALTIVEC
2345		/*
2346		 * Note: for the load/store vector element instructions,
2347		 * bits of the EA say which field of the VMX register to use.
2348		 */
2349		case 7:		/* lvebx */
2350			op->type = MKOP(LOAD_VMX, 0, 1);
2351			op->element_size = 1;
2352			break;
2353
2354		case 39:	/* lvehx */
2355			op->type = MKOP(LOAD_VMX, 0, 2);
2356			op->element_size = 2;
2357			break;
2358
2359		case 71:	/* lvewx */
2360			op->type = MKOP(LOAD_VMX, 0, 4);
2361			op->element_size = 4;
2362			break;
2363
2364		case 103:	/* lvx */
2365		case 359:	/* lvxl */
 
 
2366			op->type = MKOP(LOAD_VMX, 0, 16);
2367			op->element_size = 16;
2368			break;
2369
2370		case 135:	/* stvebx */
2371			op->type = MKOP(STORE_VMX, 0, 1);
2372			op->element_size = 1;
2373			break;
2374
2375		case 167:	/* stvehx */
2376			op->type = MKOP(STORE_VMX, 0, 2);
2377			op->element_size = 2;
2378			break;
2379
2380		case 199:	/* stvewx */
2381			op->type = MKOP(STORE_VMX, 0, 4);
2382			op->element_size = 4;
2383			break;
2384
2385		case 231:	/* stvx */
2386		case 487:	/* stvxl */
 
 
2387			op->type = MKOP(STORE_VMX, 0, 16);
2388			break;
2389#endif /* CONFIG_ALTIVEC */
2390
2391#ifdef __powerpc64__
2392		case 21:	/* ldx */
2393		case 53:	/* ldux */
2394			op->type = MKOP(LOAD, u, 8);
2395			break;
2396
2397		case 149:	/* stdx */
2398		case 181:	/* stdux */
2399			op->type = MKOP(STORE, u, 8);
2400			break;
2401#endif
2402
2403		case 151:	/* stwx */
2404		case 183:	/* stwux */
2405			op->type = MKOP(STORE, u, 4);
2406			break;
2407
2408		case 215:	/* stbx */
2409		case 247:	/* stbux */
2410			op->type = MKOP(STORE, u, 1);
2411			break;
2412
2413		case 279:	/* lhzx */
2414		case 311:	/* lhzux */
2415			op->type = MKOP(LOAD, u, 2);
2416			break;
2417
2418#ifdef __powerpc64__
2419		case 341:	/* lwax */
2420		case 373:	/* lwaux */
2421			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2422			break;
2423#endif
2424
2425		case 343:	/* lhax */
2426		case 375:	/* lhaux */
2427			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2428			break;
2429
2430		case 407:	/* sthx */
2431		case 439:	/* sthux */
2432			op->type = MKOP(STORE, u, 2);
2433			break;
2434
2435#ifdef __powerpc64__
2436		case 532:	/* ldbrx */
2437			op->type = MKOP(LOAD, BYTEREV, 8);
2438			break;
2439
2440#endif
2441		case 533:	/* lswx */
2442			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2443			break;
2444
2445		case 534:	/* lwbrx */
2446			op->type = MKOP(LOAD, BYTEREV, 4);
2447			break;
2448
2449		case 597:	/* lswi */
2450			if (rb == 0)
2451				rb = 32;	/* # bytes to load */
2452			op->type = MKOP(LOAD_MULTI, 0, rb);
2453			op->ea = ra ? regs->gpr[ra] : 0;
 
 
 
2454			break;
2455
2456#ifdef CONFIG_PPC_FPU
2457		case 535:	/* lfsx */
2458		case 567:	/* lfsux */
2459			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
 
 
2460			break;
2461
2462		case 599:	/* lfdx */
2463		case 631:	/* lfdux */
 
 
2464			op->type = MKOP(LOAD_FP, u, 8);
2465			break;
2466
2467		case 663:	/* stfsx */
2468		case 695:	/* stfsux */
2469			op->type = MKOP(STORE_FP, u | FPCONV, 4);
 
 
2470			break;
2471
2472		case 727:	/* stfdx */
2473		case 759:	/* stfdux */
 
 
2474			op->type = MKOP(STORE_FP, u, 8);
2475			break;
2476
2477#ifdef __powerpc64__
2478		case 791:	/* lfdpx */
2479			op->type = MKOP(LOAD_FP, 0, 16);
2480			break;
2481
2482		case 855:	/* lfiwax */
2483			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2484			break;
2485
2486		case 887:	/* lfiwzx */
2487			op->type = MKOP(LOAD_FP, 0, 4);
2488			break;
2489
2490		case 919:	/* stfdpx */
2491			op->type = MKOP(STORE_FP, 0, 16);
2492			break;
2493
2494		case 983:	/* stfiwx */
2495			op->type = MKOP(STORE_FP, 0, 4);
2496			break;
2497#endif /* __powerpc64 */
2498#endif /* CONFIG_PPC_FPU */
2499
2500#ifdef __powerpc64__
2501		case 660:	/* stdbrx */
2502			op->type = MKOP(STORE, BYTEREV, 8);
2503			op->val = byterev_8(regs->gpr[rd]);
2504			break;
2505
2506#endif
2507		case 661:	/* stswx */
2508			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2509			break;
2510
2511		case 662:	/* stwbrx */
2512			op->type = MKOP(STORE, BYTEREV, 4);
2513			op->val = byterev_4(regs->gpr[rd]);
2514			break;
2515
2516		case 725:	/* stswi */
2517			if (rb == 0)
2518				rb = 32;	/* # bytes to store */
2519			op->type = MKOP(STORE_MULTI, 0, rb);
2520			op->ea = ra ? regs->gpr[ra] : 0;
 
 
 
2521			break;
2522
2523		case 790:	/* lhbrx */
2524			op->type = MKOP(LOAD, BYTEREV, 2);
2525			break;
2526
2527		case 918:	/* sthbrx */
2528			op->type = MKOP(STORE, BYTEREV, 2);
2529			op->val = byterev_2(regs->gpr[rd]);
2530			break;
2531
2532#ifdef CONFIG_VSX
2533		case 12:	/* lxsiwzx */
2534			op->reg = rd | ((word & 1) << 5);
2535			op->type = MKOP(LOAD_VSX, 0, 4);
2536			op->element_size = 8;
2537			break;
2538
2539		case 76:	/* lxsiwax */
2540			op->reg = rd | ((word & 1) << 5);
2541			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2542			op->element_size = 8;
2543			break;
2544
2545		case 140:	/* stxsiwx */
2546			op->reg = rd | ((word & 1) << 5);
2547			op->type = MKOP(STORE_VSX, 0, 4);
2548			op->element_size = 8;
2549			break;
2550
2551		case 268:	/* lxvx */
2552			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2553				goto unknown_opcode;
2554			op->reg = rd | ((word & 1) << 5);
2555			op->type = MKOP(LOAD_VSX, 0, 16);
2556			op->element_size = 16;
2557			op->vsx_flags = VSX_CHECK_VEC;
2558			break;
2559
2560		case 269:	/* lxvl */
2561		case 301: {	/* lxvll */
2562			int nb;
2563			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2564				goto unknown_opcode;
2565			op->reg = rd | ((word & 1) << 5);
2566			op->ea = ra ? regs->gpr[ra] : 0;
2567			nb = regs->gpr[rb] & 0xff;
2568			if (nb > 16)
2569				nb = 16;
2570			op->type = MKOP(LOAD_VSX, 0, nb);
2571			op->element_size = 16;
2572			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2573				VSX_CHECK_VEC;
2574			break;
2575		}
2576		case 332:	/* lxvdsx */
2577			op->reg = rd | ((word & 1) << 5);
2578			op->type = MKOP(LOAD_VSX, 0, 8);
2579			op->element_size = 8;
2580			op->vsx_flags = VSX_SPLAT;
2581			break;
2582
2583		case 333:       /* lxvpx */
2584			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2585				goto unknown_opcode;
2586			op->reg = VSX_REGISTER_XTP(rd);
2587			op->type = MKOP(LOAD_VSX, 0, 32);
2588			op->element_size = 32;
2589			break;
2590
2591		case 364:	/* lxvwsx */
2592			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2593				goto unknown_opcode;
2594			op->reg = rd | ((word & 1) << 5);
2595			op->type = MKOP(LOAD_VSX, 0, 4);
2596			op->element_size = 4;
2597			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2598			break;
2599
2600		case 396:	/* stxvx */
2601			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2602				goto unknown_opcode;
2603			op->reg = rd | ((word & 1) << 5);
2604			op->type = MKOP(STORE_VSX, 0, 16);
2605			op->element_size = 16;
2606			op->vsx_flags = VSX_CHECK_VEC;
2607			break;
2608
2609		case 397:	/* stxvl */
2610		case 429: {	/* stxvll */
2611			int nb;
2612			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2613				goto unknown_opcode;
2614			op->reg = rd | ((word & 1) << 5);
2615			op->ea = ra ? regs->gpr[ra] : 0;
2616			nb = regs->gpr[rb] & 0xff;
2617			if (nb > 16)
2618				nb = 16;
2619			op->type = MKOP(STORE_VSX, 0, nb);
2620			op->element_size = 16;
2621			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2622				VSX_CHECK_VEC;
2623			break;
2624		}
2625		case 461:       /* stxvpx */
2626			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2627				goto unknown_opcode;
2628			op->reg = VSX_REGISTER_XTP(rd);
2629			op->type = MKOP(STORE_VSX, 0, 32);
2630			op->element_size = 32;
2631			break;
2632		case 524:	/* lxsspx */
2633			op->reg = rd | ((word & 1) << 5);
2634			op->type = MKOP(LOAD_VSX, 0, 4);
2635			op->element_size = 8;
2636			op->vsx_flags = VSX_FPCONV;
2637			break;
2638
2639		case 588:	/* lxsdx */
2640			op->reg = rd | ((word & 1) << 5);
2641			op->type = MKOP(LOAD_VSX, 0, 8);
2642			op->element_size = 8;
2643			break;
2644
2645		case 652:	/* stxsspx */
2646			op->reg = rd | ((word & 1) << 5);
2647			op->type = MKOP(STORE_VSX, 0, 4);
2648			op->element_size = 8;
2649			op->vsx_flags = VSX_FPCONV;
2650			break;
2651
2652		case 716:	/* stxsdx */
2653			op->reg = rd | ((word & 1) << 5);
2654			op->type = MKOP(STORE_VSX, 0, 8);
2655			op->element_size = 8;
2656			break;
2657
2658		case 780:	/* lxvw4x */
2659			op->reg = rd | ((word & 1) << 5);
2660			op->type = MKOP(LOAD_VSX, 0, 16);
2661			op->element_size = 4;
2662			break;
2663
2664		case 781:	/* lxsibzx */
2665			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2666				goto unknown_opcode;
2667			op->reg = rd | ((word & 1) << 5);
2668			op->type = MKOP(LOAD_VSX, 0, 1);
2669			op->element_size = 8;
2670			op->vsx_flags = VSX_CHECK_VEC;
2671			break;
2672
2673		case 812:	/* lxvh8x */
2674			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2675				goto unknown_opcode;
2676			op->reg = rd | ((word & 1) << 5);
2677			op->type = MKOP(LOAD_VSX, 0, 16);
2678			op->element_size = 2;
2679			op->vsx_flags = VSX_CHECK_VEC;
2680			break;
2681
2682		case 813:	/* lxsihzx */
2683			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2684				goto unknown_opcode;
2685			op->reg = rd | ((word & 1) << 5);
2686			op->type = MKOP(LOAD_VSX, 0, 2);
2687			op->element_size = 8;
2688			op->vsx_flags = VSX_CHECK_VEC;
2689			break;
2690
2691		case 844:	/* lxvd2x */
2692			op->reg = rd | ((word & 1) << 5);
2693			op->type = MKOP(LOAD_VSX, 0, 16);
2694			op->element_size = 8;
2695			break;
2696
2697		case 876:	/* lxvb16x */
2698			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2699				goto unknown_opcode;
2700			op->reg = rd | ((word & 1) << 5);
2701			op->type = MKOP(LOAD_VSX, 0, 16);
2702			op->element_size = 1;
2703			op->vsx_flags = VSX_CHECK_VEC;
2704			break;
2705
2706		case 908:	/* stxvw4x */
2707			op->reg = rd | ((word & 1) << 5);
2708			op->type = MKOP(STORE_VSX, 0, 16);
2709			op->element_size = 4;
2710			break;
2711
2712		case 909:	/* stxsibx */
2713			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2714				goto unknown_opcode;
2715			op->reg = rd | ((word & 1) << 5);
2716			op->type = MKOP(STORE_VSX, 0, 1);
2717			op->element_size = 8;
2718			op->vsx_flags = VSX_CHECK_VEC;
2719			break;
2720
2721		case 940:	/* stxvh8x */
2722			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2723				goto unknown_opcode;
2724			op->reg = rd | ((word & 1) << 5);
2725			op->type = MKOP(STORE_VSX, 0, 16);
2726			op->element_size = 2;
2727			op->vsx_flags = VSX_CHECK_VEC;
2728			break;
2729
2730		case 941:	/* stxsihx */
2731			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2732				goto unknown_opcode;
2733			op->reg = rd | ((word & 1) << 5);
2734			op->type = MKOP(STORE_VSX, 0, 2);
2735			op->element_size = 8;
2736			op->vsx_flags = VSX_CHECK_VEC;
2737			break;
2738
2739		case 972:	/* stxvd2x */
2740			op->reg = rd | ((word & 1) << 5);
2741			op->type = MKOP(STORE_VSX, 0, 16);
2742			op->element_size = 8;
2743			break;
2744
2745		case 1004:	/* stxvb16x */
2746			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2747				goto unknown_opcode;
2748			op->reg = rd | ((word & 1) << 5);
2749			op->type = MKOP(STORE_VSX, 0, 16);
2750			op->element_size = 1;
2751			op->vsx_flags = VSX_CHECK_VEC;
2752			break;
2753
2754#endif /* CONFIG_VSX */
2755		}
2756		break;
2757
2758	case 32:	/* lwz */
2759	case 33:	/* lwzu */
2760		op->type = MKOP(LOAD, u, 4);
2761		op->ea = dform_ea(word, regs);
2762		break;
2763
2764	case 34:	/* lbz */
2765	case 35:	/* lbzu */
2766		op->type = MKOP(LOAD, u, 1);
2767		op->ea = dform_ea(word, regs);
2768		break;
2769
2770	case 36:	/* stw */
2771	case 37:	/* stwu */
2772		op->type = MKOP(STORE, u, 4);
2773		op->ea = dform_ea(word, regs);
2774		break;
2775
2776	case 38:	/* stb */
2777	case 39:	/* stbu */
2778		op->type = MKOP(STORE, u, 1);
2779		op->ea = dform_ea(word, regs);
2780		break;
2781
2782	case 40:	/* lhz */
2783	case 41:	/* lhzu */
2784		op->type = MKOP(LOAD, u, 2);
2785		op->ea = dform_ea(word, regs);
2786		break;
2787
2788	case 42:	/* lha */
2789	case 43:	/* lhau */
2790		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2791		op->ea = dform_ea(word, regs);
2792		break;
2793
2794	case 44:	/* sth */
2795	case 45:	/* sthu */
2796		op->type = MKOP(STORE, u, 2);
2797		op->ea = dform_ea(word, regs);
2798		break;
2799
2800	case 46:	/* lmw */
2801		if (ra >= rd)
2802			break;		/* invalid form, ra in range to load */
2803		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2804		op->ea = dform_ea(word, regs);
2805		break;
2806
2807	case 47:	/* stmw */
2808		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2809		op->ea = dform_ea(word, regs);
2810		break;
2811
2812#ifdef CONFIG_PPC_FPU
2813	case 48:	/* lfs */
2814	case 49:	/* lfsu */
2815		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2816		op->ea = dform_ea(word, regs);
 
 
2817		break;
2818
2819	case 50:	/* lfd */
2820	case 51:	/* lfdu */
 
 
2821		op->type = MKOP(LOAD_FP, u, 8);
2822		op->ea = dform_ea(word, regs);
2823		break;
2824
2825	case 52:	/* stfs */
2826	case 53:	/* stfsu */
2827		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2828		op->ea = dform_ea(word, regs);
 
 
2829		break;
2830
2831	case 54:	/* stfd */
2832	case 55:	/* stfdu */
 
 
2833		op->type = MKOP(STORE_FP, u, 8);
2834		op->ea = dform_ea(word, regs);
2835		break;
2836#endif
2837
2838#ifdef __powerpc64__
2839	case 56:	/* lq */
2840		if (!((rd & 1) || (rd == ra)))
2841			op->type = MKOP(LOAD, 0, 16);
2842		op->ea = dqform_ea(word, regs);
2843		break;
2844#endif
2845
2846#ifdef CONFIG_VSX
2847	case 57:	/* lfdp, lxsd, lxssp */
2848		op->ea = dsform_ea(word, regs);
2849		switch (word & 3) {
2850		case 0:		/* lfdp */
2851			if (rd & 1)
2852				break;		/* reg must be even */
2853			op->type = MKOP(LOAD_FP, 0, 16);
2854			break;
2855		case 2:		/* lxsd */
2856			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2857				goto unknown_opcode;
2858			op->reg = rd + 32;
2859			op->type = MKOP(LOAD_VSX, 0, 8);
2860			op->element_size = 8;
2861			op->vsx_flags = VSX_CHECK_VEC;
2862			break;
2863		case 3:		/* lxssp */
2864			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2865				goto unknown_opcode;
2866			op->reg = rd + 32;
2867			op->type = MKOP(LOAD_VSX, 0, 4);
2868			op->element_size = 8;
2869			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2870			break;
2871		}
2872		break;
2873#endif /* CONFIG_VSX */
2874
2875#ifdef __powerpc64__
2876	case 58:	/* ld[u], lwa */
2877		op->ea = dsform_ea(word, regs);
2878		switch (word & 3) {
2879		case 0:		/* ld */
2880			op->type = MKOP(LOAD, 0, 8);
2881			break;
2882		case 1:		/* ldu */
2883			op->type = MKOP(LOAD, UPDATE, 8);
2884			break;
2885		case 2:		/* lwa */
2886			op->type = MKOP(LOAD, SIGNEXT, 4);
2887			break;
2888		}
2889		break;
2890#endif
2891
2892#ifdef CONFIG_VSX
2893	case 6:
2894		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2895			goto unknown_opcode;
2896		op->ea = dqform_ea(word, regs);
2897		op->reg = VSX_REGISTER_XTP(rd);
2898		op->element_size = 32;
2899		switch (word & 0xf) {
2900		case 0:         /* lxvp */
2901			op->type = MKOP(LOAD_VSX, 0, 32);
2902			break;
2903		case 1:         /* stxvp */
2904			op->type = MKOP(STORE_VSX, 0, 32);
2905			break;
2906		}
2907		break;
2908
2909	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2910		switch (word & 7) {
2911		case 0:		/* stfdp with LSB of DS field = 0 */
2912		case 4:		/* stfdp with LSB of DS field = 1 */
2913			op->ea = dsform_ea(word, regs);
2914			op->type = MKOP(STORE_FP, 0, 16);
2915			break;
2916
2917		case 1:		/* lxv */
2918			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2919				goto unknown_opcode;
2920			op->ea = dqform_ea(word, regs);
2921			if (word & 8)
2922				op->reg = rd + 32;
2923			op->type = MKOP(LOAD_VSX, 0, 16);
2924			op->element_size = 16;
2925			op->vsx_flags = VSX_CHECK_VEC;
2926			break;
2927
2928		case 2:		/* stxsd with LSB of DS field = 0 */
2929		case 6:		/* stxsd with LSB of DS field = 1 */
2930			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2931				goto unknown_opcode;
2932			op->ea = dsform_ea(word, regs);
2933			op->reg = rd + 32;
2934			op->type = MKOP(STORE_VSX, 0, 8);
2935			op->element_size = 8;
2936			op->vsx_flags = VSX_CHECK_VEC;
2937			break;
2938
2939		case 3:		/* stxssp with LSB of DS field = 0 */
2940		case 7:		/* stxssp with LSB of DS field = 1 */
2941			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2942				goto unknown_opcode;
2943			op->ea = dsform_ea(word, regs);
2944			op->reg = rd + 32;
2945			op->type = MKOP(STORE_VSX, 0, 4);
2946			op->element_size = 8;
2947			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2948			break;
2949
2950		case 5:		/* stxv */
2951			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2952				goto unknown_opcode;
2953			op->ea = dqform_ea(word, regs);
2954			if (word & 8)
2955				op->reg = rd + 32;
2956			op->type = MKOP(STORE_VSX, 0, 16);
2957			op->element_size = 16;
2958			op->vsx_flags = VSX_CHECK_VEC;
2959			break;
2960		}
2961		break;
2962#endif /* CONFIG_VSX */
2963
2964#ifdef __powerpc64__
2965	case 62:	/* std[u] */
2966		op->ea = dsform_ea(word, regs);
2967		switch (word & 3) {
2968		case 0:		/* std */
2969			op->type = MKOP(STORE, 0, 8);
2970			break;
2971		case 1:		/* stdu */
2972			op->type = MKOP(STORE, UPDATE, 8);
2973			break;
2974		case 2:		/* stq */
2975			if (!(rd & 1))
2976				op->type = MKOP(STORE, 0, 16);
2977			break;
2978		}
2979		break;
2980	case 1: /* Prefixed instructions */
2981		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2982			goto unknown_opcode;
2983
2984		prefix_r = GET_PREFIX_R(word);
2985		ra = GET_PREFIX_RA(suffix);
2986		op->update_reg = ra;
2987		rd = (suffix >> 21) & 0x1f;
2988		op->reg = rd;
2989		op->val = regs->gpr[rd];
2990
2991		suffixopcode = get_op(suffix);
2992		prefixtype = (word >> 24) & 0x3;
2993		switch (prefixtype) {
2994		case 0: /* Type 00  Eight-Byte Load/Store */
2995			if (prefix_r && ra)
2996				break;
2997			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2998			switch (suffixopcode) {
2999			case 41:	/* plwa */
3000				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
3001				break;
3002#ifdef CONFIG_VSX
3003			case 42:        /* plxsd */
3004				op->reg = rd + 32;
3005				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
3006				op->element_size = 8;
3007				op->vsx_flags = VSX_CHECK_VEC;
3008				break;
3009			case 43:	/* plxssp */
3010				op->reg = rd + 32;
3011				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
3012				op->element_size = 8;
3013				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3014				break;
3015			case 46:	/* pstxsd */
3016				op->reg = rd + 32;
3017				op->type = MKOP(STORE_VSX, PREFIXED, 8);
3018				op->element_size = 8;
3019				op->vsx_flags = VSX_CHECK_VEC;
3020				break;
3021			case 47:	/* pstxssp */
3022				op->reg = rd + 32;
3023				op->type = MKOP(STORE_VSX, PREFIXED, 4);
3024				op->element_size = 8;
3025				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3026				break;
3027			case 51:	/* plxv1 */
3028				op->reg += 32;
3029				fallthrough;
3030			case 50:	/* plxv0 */
3031				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
3032				op->element_size = 16;
3033				op->vsx_flags = VSX_CHECK_VEC;
3034				break;
3035			case 55:	/* pstxv1 */
3036				op->reg = rd + 32;
3037				fallthrough;
3038			case 54:	/* pstxv0 */
3039				op->type = MKOP(STORE_VSX, PREFIXED, 16);
3040				op->element_size = 16;
3041				op->vsx_flags = VSX_CHECK_VEC;
3042				break;
3043#endif /* CONFIG_VSX */
3044			case 56:        /* plq */
3045				op->type = MKOP(LOAD, PREFIXED, 16);
3046				break;
3047			case 57:	/* pld */
3048				op->type = MKOP(LOAD, PREFIXED, 8);
3049				break;
3050#ifdef CONFIG_VSX
3051			case 58:        /* plxvp */
3052				op->reg = VSX_REGISTER_XTP(rd);
3053				op->type = MKOP(LOAD_VSX, PREFIXED, 32);
3054				op->element_size = 32;
3055				break;
3056#endif /* CONFIG_VSX */
3057			case 60:        /* pstq */
3058				op->type = MKOP(STORE, PREFIXED, 16);
3059				break;
3060			case 61:	/* pstd */
3061				op->type = MKOP(STORE, PREFIXED, 8);
3062				break;
3063#ifdef CONFIG_VSX
3064			case 62:        /* pstxvp */
3065				op->reg = VSX_REGISTER_XTP(rd);
3066				op->type = MKOP(STORE_VSX, PREFIXED, 32);
3067				op->element_size = 32;
3068				break;
3069#endif /* CONFIG_VSX */
3070			}
3071			break;
3072		case 1: /* Type 01 Eight-Byte Register-to-Register */
3073			break;
3074		case 2: /* Type 10 Modified Load/Store */
3075			if (prefix_r && ra)
3076				break;
3077			op->ea = mlsd_8lsd_ea(word, suffix, regs);
3078			switch (suffixopcode) {
3079			case 32:	/* plwz */
3080				op->type = MKOP(LOAD, PREFIXED, 4);
3081				break;
3082			case 34:	/* plbz */
3083				op->type = MKOP(LOAD, PREFIXED, 1);
3084				break;
3085			case 36:	/* pstw */
3086				op->type = MKOP(STORE, PREFIXED, 4);
3087				break;
3088			case 38:	/* pstb */
3089				op->type = MKOP(STORE, PREFIXED, 1);
3090				break;
3091			case 40:	/* plhz */
3092				op->type = MKOP(LOAD, PREFIXED, 2);
3093				break;
3094			case 42:	/* plha */
3095				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3096				break;
3097			case 44:	/* psth */
3098				op->type = MKOP(STORE, PREFIXED, 2);
3099				break;
3100			case 48:        /* plfs */
3101				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3102				break;
3103			case 50:        /* plfd */
3104				op->type = MKOP(LOAD_FP, PREFIXED, 8);
3105				break;
3106			case 52:        /* pstfs */
3107				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3108				break;
3109			case 54:        /* pstfd */
3110				op->type = MKOP(STORE_FP, PREFIXED, 8);
3111				break;
3112			}
3113			break;
3114		case 3: /* Type 11 Modified Register-to-Register */
3115			break;
3116		}
3117#endif /* __powerpc64__ */
3118
3119	}
3120
3121	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3122		switch (GETTYPE(op->type)) {
3123		case LOAD:
3124			if (ra == rd)
3125				goto unknown_opcode;
3126			fallthrough;
3127		case STORE:
3128		case LOAD_FP:
3129		case STORE_FP:
3130			if (ra == 0)
3131				goto unknown_opcode;
3132		}
3133	}
3134
3135#ifdef CONFIG_VSX
3136	if ((GETTYPE(op->type) == LOAD_VSX ||
3137	     GETTYPE(op->type) == STORE_VSX) &&
3138	    !cpu_has_feature(CPU_FTR_VSX)) {
3139		return -1;
3140	}
3141#endif /* CONFIG_VSX */
3142
3143	return 0;
3144
3145 unknown_opcode:
3146	op->type = UNKNOWN;
3147	return 0;
3148
3149 logical_done:
3150	if (word & 1)
3151		set_cr0(regs, op);
3152 logical_done_nocc:
3153	op->reg = ra;
3154	op->type |= SETREG;
3155	return 1;
3156
3157 arith_done:
3158	if (word & 1)
3159		set_cr0(regs, op);
3160 compute_done:
3161	op->reg = rd;
3162	op->type |= SETREG;
3163	return 1;
3164
3165 priv:
3166	op->type = INTERRUPT | 0x700;
3167	op->val = SRR1_PROGPRIV;
3168	return 0;
3169
3170 trap:
3171	op->type = INTERRUPT | 0x700;
3172	op->val = SRR1_PROGTRAP;
3173	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3174}
3175EXPORT_SYMBOL_GPL(analyse_instr);
3176NOKPROBE_SYMBOL(analyse_instr);
3177
3178/*
3179 * For PPC32 we always use stwu with r1 to change the stack pointer.
3180 * So this emulated store may corrupt the exception frame, now we
3181 * have to provide the exception frame trampoline, which is pushed
3182 * below the kprobed function stack. So we only update gpr[1] but
3183 * don't emulate the real store operation. We will do real store
3184 * operation safely in exception return code by checking this flag.
3185 */
3186static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3187{
 
 
 
 
 
 
 
 
 
3188	/*
3189	 * Check if we already set since that means we'll
3190	 * lose the previous value.
3191	 */
3192	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3193	set_thread_flag(TIF_EMULATE_STACK_STORE);
3194	return 0;
3195}
3196
3197static nokprobe_inline void do_signext(unsigned long *valp, int size)
3198{
3199	switch (size) {
3200	case 2:
3201		*valp = (signed short) *valp;
3202		break;
3203	case 4:
3204		*valp = (signed int) *valp;
3205		break;
3206	}
3207}
3208
3209static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3210{
3211	switch (size) {
3212	case 2:
3213		*valp = byterev_2(*valp);
3214		break;
3215	case 4:
3216		*valp = byterev_4(*valp);
3217		break;
3218#ifdef __powerpc64__
3219	case 8:
3220		*valp = byterev_8(*valp);
3221		break;
3222#endif
3223	}
3224}
3225
3226/*
3227 * Emulate an instruction that can be executed just by updating
3228 * fields in *regs.
 
 
 
3229 */
3230void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3231{
3232	unsigned long next_pc;
 
 
 
 
3233
3234	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3235	switch (GETTYPE(op->type)) {
3236	case COMPUTE:
3237		if (op->type & SETREG)
3238			regs->gpr[op->reg] = op->val;
3239		if (op->type & SETCC)
3240			regs->ccr = op->ccval;
3241		if (op->type & SETXER)
3242			regs->xer = op->xerval;
3243		break;
3244
3245	case BRANCH:
3246		if (op->type & SETLK)
3247			regs->link = next_pc;
3248		if (op->type & BRTAKEN)
3249			next_pc = op->val;
3250		if (op->type & DECCTR)
3251			--regs->ctr;
3252		break;
3253
3254	case BARRIER:
3255		switch (op->type & BARRIER_MASK) {
3256		case BARRIER_SYNC:
3257			mb();
3258			break;
3259		case BARRIER_ISYNC:
3260			isync();
3261			break;
3262		case BARRIER_EIEIO:
3263			eieio();
3264			break;
3265#ifdef CONFIG_PPC64
3266		case BARRIER_LWSYNC:
3267			asm volatile("lwsync" : : : "memory");
3268			break;
3269		case BARRIER_PTESYNC:
3270			asm volatile("ptesync" : : : "memory");
3271			break;
3272#endif
3273		}
3274		break;
3275
3276	case MFSPR:
3277		switch (op->spr) {
3278		case SPRN_XER:
3279			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3280			break;
3281		case SPRN_LR:
3282			regs->gpr[op->reg] = regs->link;
 
 
3283			break;
3284		case SPRN_CTR:
3285			regs->gpr[op->reg] = regs->ctr;
3286			break;
3287		default:
3288			WARN_ON_ONCE(1);
3289		}
3290		break;
3291
3292	case MTSPR:
3293		switch (op->spr) {
3294		case SPRN_XER:
3295			regs->xer = op->val & 0xffffffffUL;
3296			break;
3297		case SPRN_LR:
3298			regs->link = op->val;
 
3299			break;
3300		case SPRN_CTR:
3301			regs->ctr = op->val;
3302			break;
3303		default:
3304			WARN_ON_ONCE(1);
3305		}
3306		break;
3307
3308	default:
3309		WARN_ON_ONCE(1);
3310	}
3311	regs_set_return_ip(regs, next_pc);
3312}
3313NOKPROBE_SYMBOL(emulate_update_regs);
3314
3315/*
3316 * Emulate a previously-analysed load or store instruction.
3317 * Return values are:
3318 * 0 = instruction emulated successfully
3319 * -EFAULT = address out of range or access faulted (regs->dar
3320 *	     contains the faulting address)
3321 * -EACCES = misaligned access, instruction requires alignment
3322 * -EINVAL = unknown operation in *op
3323 */
3324int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3325{
3326	int err, size, type;
3327	int i, rd, nb;
3328	unsigned int cr;
3329	unsigned long val;
3330	unsigned long ea;
3331	bool cross_endian;
3332
3333	err = 0;
3334	size = GETSIZE(op->type);
3335	type = GETTYPE(op->type);
3336	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3337	ea = truncate_if_32bit(regs->msr, op->ea);
3338
3339	switch (type) {
3340	case LARX:
3341		if (ea & (size - 1))
3342			return -EACCES;		/* can't handle misaligned */
3343		if (!address_ok(regs, ea, size))
3344			return -EFAULT;
 
 
 
3345		err = 0;
3346		val = 0;
3347		switch (size) {
3348#ifdef CONFIG_PPC_HAS_LBARX_LHARX
3349		case 1:
3350			__get_user_asmx(val, ea, err, "lbarx");
3351			break;
3352		case 2:
3353			__get_user_asmx(val, ea, err, "lharx");
3354			break;
3355#endif
3356		case 4:
3357			__get_user_asmx(val, ea, err, "lwarx");
3358			break;
3359#ifdef __powerpc64__
3360		case 8:
3361			__get_user_asmx(val, ea, err, "ldarx");
3362			break;
3363		case 16:
3364			err = do_lqarx(ea, &regs->gpr[op->reg]);
3365			break;
3366#endif
3367		default:
3368			return -EINVAL;
3369		}
3370		if (err) {
3371			regs->dar = ea;
3372			break;
3373		}
3374		if (size < 16)
3375			regs->gpr[op->reg] = val;
3376		break;
3377
3378	case STCX:
3379		if (ea & (size - 1))
3380			return -EACCES;		/* can't handle misaligned */
3381		if (!address_ok(regs, ea, size))
3382			return -EFAULT;
 
 
 
3383		err = 0;
3384		switch (size) {
3385#ifdef __powerpc64__
3386		case 1:
3387			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3388			break;
3389		case 2:
3390			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3391			break;
3392#endif
3393		case 4:
3394			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3395			break;
3396#ifdef __powerpc64__
3397		case 8:
3398			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3399			break;
3400		case 16:
3401			err = do_stqcx(ea, regs->gpr[op->reg],
3402				       regs->gpr[op->reg + 1], &cr);
3403			break;
3404#endif
3405		default:
3406			return -EINVAL;
3407		}
3408		if (!err)
3409			regs->ccr = (regs->ccr & 0x0fffffff) |
3410				(cr & 0xe0000000) |
3411				((regs->xer >> 3) & 0x10000000);
3412		else
3413			regs->dar = ea;
3414		break;
3415
3416	case LOAD:
3417#ifdef __powerpc64__
3418		if (size == 16) {
3419			err = emulate_lq(regs, ea, op->reg, cross_endian);
3420			break;
3421		}
3422#endif
3423		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3424		if (!err) {
3425			if (op->type & SIGNEXT)
3426				do_signext(&regs->gpr[op->reg], size);
3427			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3428				do_byterev(&regs->gpr[op->reg], size);
3429		}
3430		break;
3431
3432#ifdef CONFIG_PPC_FPU
3433	case LOAD_FP:
3434		/*
3435		 * If the instruction is in userspace, we can emulate it even
3436		 * if the VMX state is not live, because we have the state
3437		 * stored in the thread_struct.  If the instruction is in
3438		 * the kernel, we must not touch the state in the thread_struct.
3439		 */
3440		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3441			return 0;
3442		err = do_fp_load(op, ea, regs, cross_endian);
3443		break;
 
 
 
3444#endif
3445#ifdef CONFIG_ALTIVEC
3446	case LOAD_VMX:
3447		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3448			return 0;
3449		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3450		break;
3451#endif
3452#ifdef CONFIG_VSX
3453	case LOAD_VSX: {
3454		unsigned long msrbit = MSR_VSX;
3455
3456		/*
3457		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3458		 * when the target of the instruction is a vector register.
3459		 */
3460		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3461			msrbit = MSR_VEC;
3462		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3463			return 0;
3464		err = do_vsx_load(op, ea, regs, cross_endian);
3465		break;
3466	}
3467#endif
3468	case LOAD_MULTI:
3469		if (!address_ok(regs, ea, size))
3470			return -EFAULT;
3471		rd = op->reg;
3472		for (i = 0; i < size; i += 4) {
3473			unsigned int v32 = 0;
3474
3475			nb = size - i;
3476			if (nb > 4)
3477				nb = 4;
3478			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3479			if (err)
3480				break;
3481			if (unlikely(cross_endian))
3482				v32 = byterev_4(v32);
3483			regs->gpr[rd] = v32;
3484			ea += 4;
3485			/* reg number wraps from 31 to 0 for lsw[ix] */
3486			rd = (rd + 1) & 0x1f;
3487		}
3488		break;
3489
3490	case STORE:
3491#ifdef __powerpc64__
3492		if (size == 16) {
3493			err = emulate_stq(regs, ea, op->reg, cross_endian);
3494			break;
3495		}
3496#endif
3497		if ((op->type & UPDATE) && size == sizeof(long) &&
3498		    op->reg == 1 && op->update_reg == 1 &&
3499		    !(regs->msr & MSR_PR) &&
3500		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3501			err = handle_stack_update(ea, regs);
3502			break;
3503		}
3504		if (unlikely(cross_endian))
3505			do_byterev(&op->val, size);
3506		err = write_mem(op->val, ea, size, regs);
3507		break;
3508
3509#ifdef CONFIG_PPC_FPU
3510	case STORE_FP:
3511		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3512			return 0;
3513		err = do_fp_store(op, ea, regs, cross_endian);
3514		break;
 
 
 
3515#endif
3516#ifdef CONFIG_ALTIVEC
3517	case STORE_VMX:
3518		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3519			return 0;
3520		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3521		break;
3522#endif
3523#ifdef CONFIG_VSX
3524	case STORE_VSX: {
3525		unsigned long msrbit = MSR_VSX;
3526
3527		/*
3528		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3529		 * when the target of the instruction is a vector register.
3530		 */
3531		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3532			msrbit = MSR_VEC;
3533		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3534			return 0;
3535		err = do_vsx_store(op, ea, regs, cross_endian);
3536		break;
3537	}
3538#endif
3539	case STORE_MULTI:
3540		if (!address_ok(regs, ea, size))
3541			return -EFAULT;
3542		rd = op->reg;
3543		for (i = 0; i < size; i += 4) {
3544			unsigned int v32 = regs->gpr[rd];
3545
3546			nb = size - i;
3547			if (nb > 4)
3548				nb = 4;
3549			if (unlikely(cross_endian))
3550				v32 = byterev_4(v32);
3551			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3552			if (err)
3553				break;
3554			ea += 4;
3555			/* reg number wraps from 31 to 0 for stsw[ix] */
3556			rd = (rd + 1) & 0x1f;
3557		}
3558		break;
3559
3560	default:
3561		return -EINVAL;
3562	}
3563
3564	if (err)
3565		return err;
3566
3567	if (op->type & UPDATE)
3568		regs->gpr[op->update_reg] = op->ea;
3569
3570	return 0;
3571}
3572NOKPROBE_SYMBOL(emulate_loadstore);
3573
3574/*
3575 * Emulate instructions that cause a transfer of control,
3576 * loads and stores, and a few other instructions.
3577 * Returns 1 if the step was emulated, 0 if not,
3578 * or -1 if the instruction is one that should not be stepped,
3579 * such as an rfid, or a mtmsrd that would clear MSR_RI.
3580 */
3581int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
3582{
3583	struct instruction_op op;
3584	int r, err, type;
3585	unsigned long val;
3586	unsigned long ea;
3587
3588	r = analyse_instr(&op, regs, instr);
3589	if (r < 0)
3590		return r;
3591	if (r > 0) {
3592		emulate_update_regs(regs, &op);
3593		return 1;
3594	}
3595
3596	err = 0;
3597	type = GETTYPE(op.type);
3598
3599	if (OP_IS_LOAD_STORE(type)) {
3600		err = emulate_loadstore(regs, &op);
3601		if (err)
3602			return 0;
3603		goto instr_done;
3604	}
3605
3606	switch (type) {
3607	case CACHEOP:
3608		ea = truncate_if_32bit(regs->msr, op.ea);
3609		if (!address_ok(regs, ea, 8))
3610			return 0;
3611		switch (op.type & CACHEOP_MASK) {
3612		case DCBST:
3613			__cacheop_user_asmx(ea, err, "dcbst");
3614			break;
3615		case DCBF:
3616			__cacheop_user_asmx(ea, err, "dcbf");
3617			break;
3618		case DCBTST:
3619			if (op.reg == 0)
3620				prefetchw((void *) ea);
3621			break;
3622		case DCBT:
3623			if (op.reg == 0)
3624				prefetch((void *) ea);
3625			break;
3626		case ICBI:
3627			__cacheop_user_asmx(ea, err, "icbi");
3628			break;
3629		case DCBZ:
3630			err = emulate_dcbz(ea, regs);
3631			break;
3632		}
3633		if (err) {
3634			regs->dar = ea;
3635			return 0;
3636		}
3637		goto instr_done;
3638
3639	case MFMSR:
3640		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3641		goto instr_done;
3642
3643	case MTMSR:
3644		val = regs->gpr[op.reg];
3645		if ((val & MSR_RI) == 0)
3646			/* can't step mtmsr[d] that would clear MSR_RI */
3647			return -1;
3648		/* here op.val is the mask of bits to change */
3649		regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3650		goto instr_done;
3651
 
3652	case SYSCALL:	/* sc */
3653		/*
3654		 * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
3655		 * single step a system call instruction:
3656		 *
3657		 *   Successful completion for an instruction means that the
3658		 *   instruction caused no other interrupt. Thus a Trace
3659		 *   interrupt never occurs for a System Call or System Call
3660		 *   Vectored instruction, or for a Trap instruction that
3661		 *   traps.
3662		 */
3663		return -1;
3664	case SYSCALL_VECTORED_0:	/* scv 0 */
3665		return -1;
 
 
 
 
 
 
 
 
 
 
 
3666	case RFI:
3667		return -1;
 
3668	}
3669	return 0;
3670
 
 
 
 
 
 
3671 instr_done:
3672	regs_set_return_ip(regs,
3673		truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3674	return 1;
3675}
3676NOKPROBE_SYMBOL(emulate_step);
v4.6
 
   1/*
   2 * Single-step support.
   3 *
   4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/kprobes.h>
  13#include <linux/ptrace.h>
  14#include <linux/prefetch.h>
  15#include <asm/sstep.h>
  16#include <asm/processor.h>
  17#include <asm/uaccess.h>
 
  18#include <asm/cputable.h>
  19
  20extern char system_call_common[];
  21
  22#ifdef CONFIG_PPC64
  23/* Bits in SRR1 that are copied from MSR */
  24#define MSR_MASK	0xffffffff87c0ffffUL
  25#else
  26#define MSR_MASK	0x87c0ffff
  27#endif
  28
  29/* Bits in XER */
  30#define XER_SO		0x80000000U
  31#define XER_OV		0x40000000U
  32#define XER_CA		0x20000000U
 
 
 
 
 
 
  33
  34#ifdef CONFIG_PPC_FPU
  35/*
  36 * Functions in ldstfp.S
  37 */
  38extern int do_lfs(int rn, unsigned long ea);
  39extern int do_lfd(int rn, unsigned long ea);
  40extern int do_stfs(int rn, unsigned long ea);
  41extern int do_stfd(int rn, unsigned long ea);
  42extern int do_lvx(int rn, unsigned long ea);
  43extern int do_stvx(int rn, unsigned long ea);
  44extern int do_lxvd2x(int rn, unsigned long ea);
  45extern int do_stxvd2x(int rn, unsigned long ea);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46#endif
  47
  48/*
  49 * Emulate the truncation of 64 bit values in 32-bit mode.
  50 */
  51static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
 
  52{
  53#ifdef __powerpc64__
  54	if ((msr & MSR_64BIT) == 0)
  55		val &= 0xffffffffUL;
  56#endif
  57	return val;
  58}
  59
  60/*
  61 * Determine whether a conditional branch instruction would branch.
  62 */
  63static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
 
 
  64{
  65	unsigned int bo = (instr >> 21) & 0x1f;
  66	unsigned int bi;
  67
  68	if ((bo & 4) == 0) {
  69		/* decrement counter */
  70		--regs->ctr;
  71		if (((bo >> 1) & 1) ^ (regs->ctr == 0))
  72			return 0;
  73	}
  74	if ((bo & 0x10) == 0) {
  75		/* check bit from CR */
  76		bi = (instr >> 16) & 0x1f;
  77		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
  78			return 0;
  79	}
  80	return 1;
  81}
  82
  83
  84static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
  85{
  86	if (!user_mode(regs))
  87		return 1;
  88	return __access_ok(ea, nb, USER_DS);
 
 
 
 
 
 
 
  89}
  90
  91/*
  92 * Calculate effective address for a D-form instruction
  93 */
  94static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
 
  95{
  96	int ra;
  97	unsigned long ea;
  98
  99	ra = (instr >> 16) & 0x1f;
 100	ea = (signed short) instr;		/* sign-extend */
 101	if (ra)
 102		ea += regs->gpr[ra];
 103
 104	return truncate_if_32bit(regs->msr, ea);
 105}
 106
 107#ifdef __powerpc64__
 108/*
 109 * Calculate effective address for a DS-form instruction
 110 */
 111static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
 
 112{
 113	int ra;
 114	unsigned long ea;
 115
 116	ra = (instr >> 16) & 0x1f;
 117	ea = (signed short) (instr & ~3);	/* sign-extend */
 118	if (ra)
 119		ea += regs->gpr[ra];
 120
 121	return truncate_if_32bit(regs->msr, ea);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 122}
 123#endif /* __powerpc64 */
 124
 125/*
 126 * Calculate effective address for an X-form instruction
 127 */
 128static unsigned long __kprobes xform_ea(unsigned int instr,
 129					struct pt_regs *regs)
 130{
 131	int ra, rb;
 132	unsigned long ea;
 133
 134	ra = (instr >> 16) & 0x1f;
 135	rb = (instr >> 11) & 0x1f;
 136	ea = regs->gpr[rb];
 137	if (ra)
 138		ea += regs->gpr[ra];
 139
 140	return truncate_if_32bit(regs->msr, ea);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141}
 142
 143/*
 144 * Return the largest power of 2, not greater than sizeof(unsigned long),
 145 * such that x is a multiple of it.
 146 */
 147static inline unsigned long max_align(unsigned long x)
 148{
 149	x |= sizeof(unsigned long);
 150	return x & -x;		/* isolates rightmost bit */
 151}
 152
 153
 154static inline unsigned long byterev_2(unsigned long x)
 155{
 156	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
 157}
 158
 159static inline unsigned long byterev_4(unsigned long x)
 160{
 161	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
 162		((x & 0xff00) << 8) | ((x & 0xff) << 24);
 163}
 164
 165#ifdef __powerpc64__
 166static inline unsigned long byterev_8(unsigned long x)
 167{
 168	return (byterev_4(x) << 32) | byterev_4(x >> 32);
 169}
 170#endif
 171
 172static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
 173				      int nb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 174{
 175	int err = 0;
 176	unsigned long x = 0;
 177
 178	switch (nb) {
 179	case 1:
 180		err = __get_user(x, (unsigned char __user *) ea);
 181		break;
 182	case 2:
 183		err = __get_user(x, (unsigned short __user *) ea);
 184		break;
 185	case 4:
 186		err = __get_user(x, (unsigned int __user *) ea);
 187		break;
 188#ifdef __powerpc64__
 189	case 8:
 190		err = __get_user(x, (unsigned long __user *) ea);
 191		break;
 192#endif
 193	}
 194	if (!err)
 195		*dest = x;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 196	return err;
 197}
 198
 199static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
 200					int nb, struct pt_regs *regs)
 
 
 
 201{
 202	int err;
 203	unsigned long x, b, c;
 204#ifdef __LITTLE_ENDIAN__
 205	int len = nb; /* save a copy of the length for byte reversal */
 206#endif
 207
 208	/* unaligned, do this in pieces */
 209	x = 0;
 210	for (; nb > 0; nb -= c) {
 211#ifdef __LITTLE_ENDIAN__
 212		c = 1;
 213#endif
 214#ifdef __BIG_ENDIAN__
 215		c = max_align(ea);
 216#endif
 217		if (c > nb)
 218			c = max_align(nb);
 219		err = read_mem_aligned(&b, ea, c);
 220		if (err)
 221			return err;
 222		x = (x << (8 * c)) + b;
 223		ea += c;
 224	}
 225#ifdef __LITTLE_ENDIAN__
 226	switch (len) {
 227	case 2:
 228		*dest = byterev_2(x);
 229		break;
 230	case 4:
 231		*dest = byterev_4(x);
 232		break;
 233#ifdef __powerpc64__
 234	case 8:
 235		*dest = byterev_8(x);
 236		break;
 237#endif
 
 
 
 238	}
 239#endif
 240#ifdef __BIG_ENDIAN__
 241	*dest = x;
 242#endif
 243	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 244}
 245
 246/*
 247 * Read memory at address ea for nb bytes, return 0 for success
 248 * or -EFAULT if an error occurred.
 
 249 */
 250static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
 251			      struct pt_regs *regs)
 252{
 253	if (!address_ok(regs, ea, nb))
 254		return -EFAULT;
 255	if ((ea & (nb - 1)) == 0)
 256		return read_mem_aligned(dest, ea, nb);
 257	return read_mem_unaligned(dest, ea, nb, regs);
 258}
 
 259
 260static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
 261				       int nb)
 262{
 263	int err = 0;
 264
 265	switch (nb) {
 266	case 1:
 267		err = __put_user(val, (unsigned char __user *) ea);
 268		break;
 269	case 2:
 270		err = __put_user(val, (unsigned short __user *) ea);
 271		break;
 272	case 4:
 273		err = __put_user(val, (unsigned int __user *) ea);
 274		break;
 275#ifdef __powerpc64__
 276	case 8:
 277		err = __put_user(val, (unsigned long __user *) ea);
 278		break;
 279#endif
 280	}
 281	return err;
 
 
 
 
 282}
 283
 284static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
 285					 int nb, struct pt_regs *regs)
 286{
 287	int err;
 288	unsigned long c;
 289
 290#ifdef __LITTLE_ENDIAN__
 291	switch (nb) {
 292	case 2:
 293		val = byterev_2(val);
 294		break;
 295	case 4:
 296		val = byterev_4(val);
 297		break;
 298#ifdef __powerpc64__
 299	case 8:
 300		val = byterev_8(val);
 301		break;
 302#endif
 303	}
 304#endif
 305	/* unaligned or little-endian, do this in pieces */
 
 
 
 
 
 
 
 
 
 
 306	for (; nb > 0; nb -= c) {
 307#ifdef __LITTLE_ENDIAN__
 308		c = 1;
 309#endif
 310#ifdef __BIG_ENDIAN__
 311		c = max_align(ea);
 312#endif
 313		if (c > nb)
 314			c = max_align(nb);
 315		err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
 316		if (err)
 317			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 318		ea += c;
 319	}
 320	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321}
 322
 323/*
 324 * Write memory at address ea for nb bytes, return 0 for success
 325 * or -EFAULT if an error occurred.
 326 */
 327static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
 328			       struct pt_regs *regs)
 329{
 330	if (!address_ok(regs, ea, nb))
 331		return -EFAULT;
 332	if ((ea & (nb - 1)) == 0)
 333		return write_mem_aligned(val, ea, nb);
 334	return write_mem_unaligned(val, ea, nb, regs);
 335}
 
 336
 337#ifdef CONFIG_PPC_FPU
 338/*
 339 * Check the address and alignment, and call func to do the actual
 340 * load or store.
 341 */
 342static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
 343				unsigned long ea, int nb,
 344				struct pt_regs *regs)
 345{
 346	int err;
 347	union {
 348		double dbl;
 349		unsigned long ul[2];
 350		struct {
 351#ifdef __BIG_ENDIAN__
 352			unsigned _pad_;
 353			unsigned word;
 354#endif
 355#ifdef __LITTLE_ENDIAN__
 356			unsigned word;
 357			unsigned _pad_;
 358#endif
 359		} single;
 360	} data;
 361	unsigned long ptr;
 362
 
 
 
 363	if (!address_ok(regs, ea, nb))
 364		return -EFAULT;
 365	if ((ea & 3) == 0)
 366		return (*func)(rn, ea);
 367	ptr = (unsigned long) &data.ul;
 368	if (sizeof(unsigned long) == 8 || nb == 4) {
 369		err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
 370		if (nb == 4)
 371			ptr = (unsigned long)&(data.single.word);
 372	} else {
 373		/* reading a double on 32-bit */
 374		err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
 375		if (!err)
 376			err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
 377	}
 378	if (err)
 379		return err;
 380	return (*func)(rn, ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381}
 
 382
 383static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
 384				 unsigned long ea, int nb,
 385				 struct pt_regs *regs)
 386{
 387	int err;
 388	union {
 389		double dbl;
 390		unsigned long ul[2];
 391		struct {
 392#ifdef __BIG_ENDIAN__
 393			unsigned _pad_;
 394			unsigned word;
 395#endif
 396#ifdef __LITTLE_ENDIAN__
 397			unsigned word;
 398			unsigned _pad_;
 399#endif
 400		} single;
 401	} data;
 402	unsigned long ptr;
 403
 
 
 
 404	if (!address_ok(regs, ea, nb))
 405		return -EFAULT;
 406	if ((ea & 3) == 0)
 407		return (*func)(rn, ea);
 408	ptr = (unsigned long) &data.ul[0];
 409	if (sizeof(unsigned long) == 8 || nb == 4) {
 410		if (nb == 4)
 411			ptr = (unsigned long)&(data.single.word);
 412		err = (*func)(rn, ptr);
 413		if (err)
 414			return err;
 415		err = write_mem_unaligned(data.ul[0], ea, nb, regs);
 416	} else {
 417		/* writing a double on 32-bit */
 418		err = (*func)(rn, ptr);
 419		if (err)
 420			return err;
 421		err = write_mem_unaligned(data.ul[0], ea, 4, regs);
 422		if (!err)
 423			err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
 
 
 
 
 
 
 424	}
 425	return err;
 426}
 
 427#endif
 428
 429#ifdef CONFIG_ALTIVEC
 430/* For Altivec/VMX, no need to worry about alignment */
 431static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
 432				 unsigned long ea, struct pt_regs *regs)
 
 433{
 
 
 
 
 
 
 
 
 
 434	if (!address_ok(regs, ea & ~0xfUL, 16))
 435		return -EFAULT;
 436	return (*func)(rn, ea);
 
 
 
 
 
 
 
 
 
 
 
 
 
 437}
 438
 439static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
 440				  unsigned long ea, struct pt_regs *regs)
 
 441{
 
 
 
 
 
 
 
 
 442	if (!address_ok(regs, ea & ~0xfUL, 16))
 443		return -EFAULT;
 444	return (*func)(rn, ea);
 
 
 
 
 
 
 
 
 
 
 
 445}
 446#endif /* CONFIG_ALTIVEC */
 447
 448#ifdef CONFIG_VSX
 449static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
 450				 unsigned long ea, struct pt_regs *regs)
 451{
 452	int err;
 453	unsigned long val[2];
 454
 455	if (!address_ok(regs, ea, 16))
 456		return -EFAULT;
 457	if ((ea & 3) == 0)
 458		return (*func)(rn, ea);
 459	err = read_mem_unaligned(&val[0], ea, 8, regs);
 460	if (!err)
 461		err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
 462	if (!err)
 463		err = (*func)(rn, (unsigned long) &val[0]);
 
 
 
 464	return err;
 465}
 466
 467static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
 468				 unsigned long ea, struct pt_regs *regs)
 469{
 470	int err;
 471	unsigned long val[2];
 472
 473	if (!address_ok(regs, ea, 16))
 474		return -EFAULT;
 475	if ((ea & 3) == 0)
 476		return (*func)(rn, ea);
 477	err = (*func)(rn, (unsigned long) &val[0]);
 478	if (err)
 479		return err;
 480	err = write_mem_unaligned(val[0], ea, 8, regs);
 
 
 
 
 481	if (!err)
 482		err = write_mem_unaligned(val[1], ea + 8, 8, regs);
 483	return err;
 484}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485#endif /* CONFIG_VSX */
 486
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487#define __put_user_asmx(x, addr, err, op, cr)		\
 488	__asm__ __volatile__(				\
 
 
 489		"1:	" op " %2,0,%3\n"		\
 
 490		"	mfcr	%1\n"			\
 491		"2:\n"					\
 492		".section .fixup,\"ax\"\n"		\
 493		"3:	li	%0,%4\n"		\
 494		"	b	2b\n"			\
 495		".previous\n"				\
 496		".section __ex_table,\"a\"\n"		\
 497			PPC_LONG_ALIGN "\n"		\
 498			PPC_LONG "1b,3b\n"		\
 499		".previous"				\
 500		: "=r" (err), "=r" (cr)			\
 501		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
 502
 503#define __get_user_asmx(x, addr, err, op)		\
 504	__asm__ __volatile__(				\
 
 
 505		"1:	"op" %1,0,%2\n"			\
 
 506		"2:\n"					\
 507		".section .fixup,\"ax\"\n"		\
 508		"3:	li	%0,%3\n"		\
 509		"	b	2b\n"			\
 510		".previous\n"				\
 511		".section __ex_table,\"a\"\n"		\
 512			PPC_LONG_ALIGN "\n"		\
 513			PPC_LONG "1b,3b\n"		\
 514		".previous"				\
 515		: "=r" (err), "=r" (x)			\
 516		: "r" (addr), "i" (-EFAULT), "0" (err))
 517
 518#define __cacheop_user_asmx(addr, err, op)		\
 519	__asm__ __volatile__(				\
 520		"1:	"op" 0,%1\n"			\
 521		"2:\n"					\
 522		".section .fixup,\"ax\"\n"		\
 523		"3:	li	%0,%3\n"		\
 524		"	b	2b\n"			\
 525		".previous\n"				\
 526		".section __ex_table,\"a\"\n"		\
 527			PPC_LONG_ALIGN "\n"		\
 528			PPC_LONG "1b,3b\n"		\
 529		".previous"				\
 530		: "=r" (err)				\
 531		: "r" (addr), "i" (-EFAULT), "0" (err))
 532
 533static void __kprobes set_cr0(struct pt_regs *regs, int rd)
 
 534{
 535	long val = regs->gpr[rd];
 536
 537	regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
 538#ifdef __powerpc64__
 539	if (!(regs->msr & MSR_64BIT))
 540		val = (int) val;
 541#endif
 542	if (val < 0)
 543		regs->ccr |= 0x80000000;
 544	else if (val > 0)
 545		regs->ccr |= 0x40000000;
 546	else
 547		regs->ccr |= 0x20000000;
 
 
 
 
 
 
 
 
 
 
 548}
 549
 550static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
 
 551				     unsigned long val1, unsigned long val2,
 552				     unsigned long carry_in)
 553{
 554	unsigned long val = val1 + val2;
 555
 556	if (carry_in)
 557		++val;
 558	regs->gpr[rd] = val;
 559#ifdef __powerpc64__
 560	if (!(regs->msr & MSR_64BIT)) {
 561		val = (unsigned int) val;
 562		val1 = (unsigned int) val1;
 563	}
 564#endif
 565	if (val < val1 || (carry_in && val == val1))
 566		regs->xer |= XER_CA;
 567	else
 568		regs->xer &= ~XER_CA;
 
 
 
 569}
 570
 571static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
 572				    int crfld)
 
 573{
 574	unsigned int crval, shift;
 575
 
 576	crval = (regs->xer >> 31) & 1;		/* get SO bit */
 577	if (v1 < v2)
 578		crval |= 8;
 579	else if (v1 > v2)
 580		crval |= 4;
 581	else
 582		crval |= 2;
 583	shift = (7 - crfld) * 4;
 584	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
 585}
 586
 587static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
 588				      unsigned long v2, int crfld)
 
 
 589{
 590	unsigned int crval, shift;
 591
 
 592	crval = (regs->xer >> 31) & 1;		/* get SO bit */
 593	if (v1 < v2)
 594		crval |= 8;
 595	else if (v1 > v2)
 596		crval |= 4;
 597	else
 598		crval |= 2;
 599	shift = (7 - crfld) * 4;
 600	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
 601}
 602
 603static int __kprobes trap_compare(long v1, long v2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604{
 605	int ret = 0;
 606
 607	if (v1 < v2)
 608		ret |= 0x10;
 609	else if (v1 > v2)
 610		ret |= 0x08;
 611	else
 612		ret |= 0x04;
 613	if ((unsigned long)v1 < (unsigned long)v2)
 614		ret |= 0x02;
 615	else if ((unsigned long)v1 > (unsigned long)v2)
 616		ret |= 0x01;
 617	return ret;
 618}
 619
 620/*
 621 * Elements of 32-bit rotate and mask instructions.
 622 */
 623#define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
 624			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
 625#ifdef __powerpc64__
 626#define MASK64_L(mb)	(~0UL >> (mb))
 627#define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
 628#define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
 629#define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
 630#else
 631#define DATA32(x)	(x)
 632#endif
 633#define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
 634
 635/*
 636 * Decode an instruction, and execute it if that can be done just by
 637 * modifying *regs (i.e. integer arithmetic and logical instructions,
 638 * branches, and barrier instructions).
 639 * Returns 1 if the instruction has been executed, or 0 if not.
 640 * Sets *op to indicate what the instruction does.
 
 
 
 
 641 */
 642int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
 643			    unsigned int instr)
 644{
 645	unsigned int opcode, ra, rb, rd, spr, u;
 
 
 
 646	unsigned long int imm;
 647	unsigned long int val, val2;
 648	unsigned int mb, me, sh;
 
 649	long ival;
 650
 
 
 
 651	op->type = COMPUTE;
 652
 653	opcode = instr >> 26;
 654	switch (opcode) {
 655	case 16:	/* bc */
 656		op->type = BRANCH;
 657		imm = (signed short)(instr & 0xfffc);
 658		if ((instr & 2) == 0)
 659			imm += regs->nip;
 660		regs->nip += 4;
 661		regs->nip = truncate_if_32bit(regs->msr, regs->nip);
 662		if (instr & 1)
 663			regs->link = regs->nip;
 664		if (branch_taken(instr, regs))
 665			regs->nip = truncate_if_32bit(regs->msr, imm);
 666		return 1;
 667#ifdef CONFIG_PPC64
 668	case 17:	/* sc */
 669		if ((instr & 0xfe2) == 2)
 670			op->type = SYSCALL;
 671		else
 
 
 
 
 
 672			op->type = UNKNOWN;
 673		return 0;
 674#endif
 675	case 18:	/* b */
 676		op->type = BRANCH;
 677		imm = instr & 0x03fffffc;
 678		if (imm & 0x02000000)
 679			imm -= 0x04000000;
 680		if ((instr & 2) == 0)
 681			imm += regs->nip;
 682		if (instr & 1)
 683			regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
 684		imm = truncate_if_32bit(regs->msr, imm);
 685		regs->nip = imm;
 686		return 1;
 687	case 19:
 688		switch ((instr >> 1) & 0x3ff) {
 689		case 0:		/* mcrf */
 690			rd = (instr >> 21) & 0x1c;
 691			ra = (instr >> 16) & 0x1c;
 
 
 
 692			val = (regs->ccr >> ra) & 0xf;
 693			regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
 694			goto instr_done;
 695
 696		case 16:	/* bclr */
 697		case 528:	/* bcctr */
 698			op->type = BRANCH;
 699			imm = (instr & 0x400)? regs->ctr: regs->link;
 700			regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
 701			imm = truncate_if_32bit(regs->msr, imm);
 702			if (instr & 1)
 703				regs->link = regs->nip;
 704			if (branch_taken(instr, regs))
 705				regs->nip = imm;
 706			return 1;
 707
 708		case 18:	/* rfid, scary */
 709			if (regs->msr & MSR_PR)
 710				goto priv;
 711			op->type = RFI;
 712			return 0;
 713
 714		case 150:	/* isync */
 715			op->type = BARRIER;
 716			isync();
 717			goto instr_done;
 718
 719		case 33:	/* crnor */
 720		case 129:	/* crandc */
 721		case 193:	/* crxor */
 722		case 225:	/* crnand */
 723		case 257:	/* crand */
 724		case 289:	/* creqv */
 725		case 417:	/* crorc */
 726		case 449:	/* cror */
 727			ra = (instr >> 16) & 0x1f;
 728			rb = (instr >> 11) & 0x1f;
 729			rd = (instr >> 21) & 0x1f;
 
 730			ra = (regs->ccr >> (31 - ra)) & 1;
 731			rb = (regs->ccr >> (31 - rb)) & 1;
 732			val = (instr >> (6 + ra * 2 + rb)) & 1;
 733			regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
 734				(val << (31 - rd));
 735			goto instr_done;
 736		}
 737		break;
 738	case 31:
 739		switch ((instr >> 1) & 0x3ff) {
 740		case 598:	/* sync */
 741			op->type = BARRIER;
 742#ifdef __powerpc64__
 743			switch ((instr >> 21) & 3) {
 744			case 1:		/* lwsync */
 745				asm volatile("lwsync" : : : "memory");
 746				goto instr_done;
 747			case 2:		/* ptesync */
 748				asm volatile("ptesync" : : : "memory");
 749				goto instr_done;
 750			}
 751#endif
 752			mb();
 753			goto instr_done;
 754
 755		case 854:	/* eieio */
 756			op->type = BARRIER;
 757			eieio();
 758			goto instr_done;
 759		}
 760		break;
 761	}
 762
 763	/* Following cases refer to regs->gpr[], so we need all regs */
 764	if (!FULL_REGS(regs))
 765		return 0;
 766
 767	rd = (instr >> 21) & 0x1f;
 768	ra = (instr >> 16) & 0x1f;
 769	rb = (instr >> 11) & 0x1f;
 770
 771	switch (opcode) {
 772#ifdef __powerpc64__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 773	case 2:		/* tdi */
 774		if (rd & trap_compare(regs->gpr[ra], (short) instr))
 775			goto trap;
 776		goto instr_done;
 777#endif
 778	case 3:		/* twi */
 779		if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
 780			goto trap;
 781		goto instr_done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782
 783	case 7:		/* mulli */
 784		regs->gpr[rd] = regs->gpr[ra] * (short) instr;
 785		goto instr_done;
 786
 787	case 8:		/* subfic */
 788		imm = (short) instr;
 789		add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
 790		goto instr_done;
 791
 792	case 10:	/* cmpli */
 793		imm = (unsigned short) instr;
 794		val = regs->gpr[ra];
 795#ifdef __powerpc64__
 796		if ((rd & 1) == 0)
 797			val = (unsigned int) val;
 798#endif
 799		do_cmp_unsigned(regs, val, imm, rd >> 2);
 800		goto instr_done;
 801
 802	case 11:	/* cmpi */
 803		imm = (short) instr;
 804		val = regs->gpr[ra];
 805#ifdef __powerpc64__
 806		if ((rd & 1) == 0)
 807			val = (int) val;
 808#endif
 809		do_cmp_signed(regs, val, imm, rd >> 2);
 810		goto instr_done;
 811
 812	case 12:	/* addic */
 813		imm = (short) instr;
 814		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
 815		goto instr_done;
 816
 817	case 13:	/* addic. */
 818		imm = (short) instr;
 819		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
 820		set_cr0(regs, rd);
 821		goto instr_done;
 822
 823	case 14:	/* addi */
 824		imm = (short) instr;
 825		if (ra)
 826			imm += regs->gpr[ra];
 827		regs->gpr[rd] = imm;
 828		goto instr_done;
 829
 830	case 15:	/* addis */
 831		imm = ((short) instr) << 16;
 832		if (ra)
 833			imm += regs->gpr[ra];
 834		regs->gpr[rd] = imm;
 835		goto instr_done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 836
 837	case 20:	/* rlwimi */
 838		mb = (instr >> 6) & 0x1f;
 839		me = (instr >> 1) & 0x1f;
 840		val = DATA32(regs->gpr[rd]);
 841		imm = MASK32(mb, me);
 842		regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
 843		goto logical_done;
 844
 845	case 21:	/* rlwinm */
 846		mb = (instr >> 6) & 0x1f;
 847		me = (instr >> 1) & 0x1f;
 848		val = DATA32(regs->gpr[rd]);
 849		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
 850		goto logical_done;
 851
 852	case 23:	/* rlwnm */
 853		mb = (instr >> 6) & 0x1f;
 854		me = (instr >> 1) & 0x1f;
 855		rb = regs->gpr[rb] & 0x1f;
 856		val = DATA32(regs->gpr[rd]);
 857		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
 858		goto logical_done;
 859
 860	case 24:	/* ori */
 861		imm = (unsigned short) instr;
 862		regs->gpr[ra] = regs->gpr[rd] | imm;
 863		goto instr_done;
 864
 865	case 25:	/* oris */
 866		imm = (unsigned short) instr;
 867		regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
 868		goto instr_done;
 869
 870	case 26:	/* xori */
 871		imm = (unsigned short) instr;
 872		regs->gpr[ra] = regs->gpr[rd] ^ imm;
 873		goto instr_done;
 874
 875	case 27:	/* xoris */
 876		imm = (unsigned short) instr;
 877		regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
 878		goto instr_done;
 879
 880	case 28:	/* andi. */
 881		imm = (unsigned short) instr;
 882		regs->gpr[ra] = regs->gpr[rd] & imm;
 883		set_cr0(regs, ra);
 884		goto instr_done;
 885
 886	case 29:	/* andis. */
 887		imm = (unsigned short) instr;
 888		regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
 889		set_cr0(regs, ra);
 890		goto instr_done;
 891
 892#ifdef __powerpc64__
 893	case 30:	/* rld* */
 894		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
 895		val = regs->gpr[rd];
 896		if ((instr & 0x10) == 0) {
 897			sh = rb | ((instr & 2) << 4);
 898			val = ROTATE(val, sh);
 899			switch ((instr >> 2) & 3) {
 900			case 0:		/* rldicl */
 901				regs->gpr[ra] = val & MASK64_L(mb);
 902				goto logical_done;
 903			case 1:		/* rldicr */
 904				regs->gpr[ra] = val & MASK64_R(mb);
 905				goto logical_done;
 906			case 2:		/* rldic */
 907				regs->gpr[ra] = val & MASK64(mb, 63 - sh);
 908				goto logical_done;
 909			case 3:		/* rldimi */
 910				imm = MASK64(mb, 63 - sh);
 911				regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
 912					(val & imm);
 913				goto logical_done;
 914			}
 
 
 915		} else {
 916			sh = regs->gpr[rb] & 0x3f;
 917			val = ROTATE(val, sh);
 918			switch ((instr >> 1) & 7) {
 919			case 0:		/* rldcl */
 920				regs->gpr[ra] = val & MASK64_L(mb);
 921				goto logical_done;
 922			case 1:		/* rldcr */
 923				regs->gpr[ra] = val & MASK64_R(mb);
 924				goto logical_done;
 925			}
 926		}
 927#endif
 
 
 928
 929	case 31:
 930		switch ((instr >> 1) & 0x3ff) {
 
 
 
 
 
 
 
 
 
 
 931		case 4:		/* tw */
 932			if (rd == 0x1f ||
 933			    (rd & trap_compare((int)regs->gpr[ra],
 934					       (int)regs->gpr[rb])))
 935				goto trap;
 936			goto instr_done;
 937#ifdef __powerpc64__
 938		case 68:	/* td */
 939			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
 940				goto trap;
 941			goto instr_done;
 942#endif
 943		case 83:	/* mfmsr */
 944			if (regs->msr & MSR_PR)
 945				goto priv;
 946			op->type = MFMSR;
 947			op->reg = rd;
 948			return 0;
 949		case 146:	/* mtmsr */
 950			if (regs->msr & MSR_PR)
 951				goto priv;
 952			op->type = MTMSR;
 953			op->reg = rd;
 954			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
 955			return 0;
 956#ifdef CONFIG_PPC64
 957		case 178:	/* mtmsrd */
 958			if (regs->msr & MSR_PR)
 959				goto priv;
 960			op->type = MTMSR;
 961			op->reg = rd;
 962			/* only MSR_EE and MSR_RI get changed if bit 15 set */
 963			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
 964			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
 965			op->val = imm;
 966			return 0;
 967#endif
 968
 969		case 19:	/* mfcr */
 970			regs->gpr[rd] = regs->ccr;
 971			regs->gpr[rd] &= 0xffffffffUL;
 972			goto instr_done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973
 974		case 144:	/* mtcrf */
 
 975			imm = 0xf0000000UL;
 976			val = regs->gpr[rd];
 
 977			for (sh = 0; sh < 8; ++sh) {
 978				if (instr & (0x80000 >> sh))
 979					regs->ccr = (regs->ccr & ~imm) |
 980						(val & imm);
 981				imm >>= 4;
 982			}
 983			goto instr_done;
 984
 985		case 339:	/* mfspr */
 986			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
 987			switch (spr) {
 988			case SPRN_XER:	/* mfxer */
 989				regs->gpr[rd] = regs->xer;
 990				regs->gpr[rd] &= 0xffffffffUL;
 991				goto instr_done;
 992			case SPRN_LR:	/* mflr */
 993				regs->gpr[rd] = regs->link;
 994				goto instr_done;
 995			case SPRN_CTR:	/* mfctr */
 996				regs->gpr[rd] = regs->ctr;
 997				goto instr_done;
 998			default:
 999				op->type = MFSPR;
1000				op->reg = rd;
1001				op->spr = spr;
1002				return 0;
1003			}
1004			break;
1005
1006		case 467:	/* mtspr */
1007			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1008			switch (spr) {
1009			case SPRN_XER:	/* mtxer */
1010				regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1011				goto instr_done;
1012			case SPRN_LR:	/* mtlr */
1013				regs->link = regs->gpr[rd];
1014				goto instr_done;
1015			case SPRN_CTR:	/* mtctr */
1016				regs->ctr = regs->gpr[rd];
1017				goto instr_done;
1018			default:
1019				op->type = MTSPR;
1020				op->val = regs->gpr[rd];
1021				op->spr = spr;
1022				return 0;
1023			}
1024			break;
1025
1026/*
1027 * Compare instructions
1028 */
1029		case 0:	/* cmp */
1030			val = regs->gpr[ra];
1031			val2 = regs->gpr[rb];
1032#ifdef __powerpc64__
1033			if ((rd & 1) == 0) {
1034				/* word (32-bit) compare */
1035				val = (int) val;
1036				val2 = (int) val2;
1037			}
1038#endif
1039			do_cmp_signed(regs, val, val2, rd >> 2);
1040			goto instr_done;
1041
1042		case 32:	/* cmpl */
1043			val = regs->gpr[ra];
1044			val2 = regs->gpr[rb];
1045#ifdef __powerpc64__
1046			if ((rd & 1) == 0) {
1047				/* word (32-bit) compare */
1048				val = (unsigned int) val;
1049				val2 = (unsigned int) val2;
1050			}
1051#endif
1052			do_cmp_unsigned(regs, val, val2, rd >> 2);
1053			goto instr_done;
 
 
 
 
1054
1055/*
1056 * Arithmetic instructions
1057 */
1058		case 8:	/* subfc */
1059			add_with_carry(regs, rd, ~regs->gpr[ra],
1060				       regs->gpr[rb], 1);
1061			goto arith_done;
1062#ifdef __powerpc64__
1063		case 9:	/* mulhdu */
1064			asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1065			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1066			goto arith_done;
1067#endif
1068		case 10:	/* addc */
1069			add_with_carry(regs, rd, regs->gpr[ra],
1070				       regs->gpr[rb], 0);
1071			goto arith_done;
1072
1073		case 11:	/* mulhwu */
1074			asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1075			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1076			goto arith_done;
1077
1078		case 40:	/* subf */
1079			regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1080			goto arith_done;
1081#ifdef __powerpc64__
1082		case 73:	/* mulhd */
1083			asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1084			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1085			goto arith_done;
1086#endif
1087		case 75:	/* mulhw */
1088			asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1089			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1090			goto arith_done;
1091
1092		case 104:	/* neg */
1093			regs->gpr[rd] = -regs->gpr[ra];
1094			goto arith_done;
1095
1096		case 136:	/* subfe */
1097			add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1098				       regs->xer & XER_CA);
1099			goto arith_done;
1100
1101		case 138:	/* adde */
1102			add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1103				       regs->xer & XER_CA);
1104			goto arith_done;
1105
1106		case 200:	/* subfze */
1107			add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1108				       regs->xer & XER_CA);
1109			goto arith_done;
1110
1111		case 202:	/* addze */
1112			add_with_carry(regs, rd, regs->gpr[ra], 0L,
1113				       regs->xer & XER_CA);
1114			goto arith_done;
1115
1116		case 232:	/* subfme */
1117			add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1118				       regs->xer & XER_CA);
1119			goto arith_done;
1120#ifdef __powerpc64__
1121		case 233:	/* mulld */
1122			regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1123			goto arith_done;
1124#endif
1125		case 234:	/* addme */
1126			add_with_carry(regs, rd, regs->gpr[ra], -1L,
1127				       regs->xer & XER_CA);
1128			goto arith_done;
1129
1130		case 235:	/* mullw */
1131			regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1132				(unsigned int) regs->gpr[rb];
 
1133			goto arith_done;
1134
 
 
 
 
 
 
1135		case 266:	/* add */
1136			regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1137			goto arith_done;
 
 
 
 
 
 
 
1138#ifdef __powerpc64__
1139		case 457:	/* divdu */
1140			regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1141			goto arith_done;
1142#endif
1143		case 459:	/* divwu */
1144			regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1145				(unsigned int) regs->gpr[rb];
1146			goto arith_done;
1147#ifdef __powerpc64__
1148		case 489:	/* divd */
1149			regs->gpr[rd] = (long int) regs->gpr[ra] /
1150				(long int) regs->gpr[rb];
1151			goto arith_done;
1152#endif
1153		case 491:	/* divw */
1154			regs->gpr[rd] = (int) regs->gpr[ra] /
1155				(int) regs->gpr[rb];
1156			goto arith_done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157
1158
1159/*
1160 * Logical instructions
1161 */
1162		case 26:	/* cntlzw */
1163			asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1164			    "r" (regs->gpr[rd]));
1165			goto logical_done;
1166#ifdef __powerpc64__
1167		case 58:	/* cntlzd */
1168			asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1169			    "r" (regs->gpr[rd]));
1170			goto logical_done;
1171#endif
1172		case 28:	/* and */
1173			regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1174			goto logical_done;
1175
1176		case 60:	/* andc */
1177			regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1178			goto logical_done;
1179
 
 
 
 
1180		case 124:	/* nor */
1181			regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1182			goto logical_done;
1183
 
 
 
 
 
 
 
 
 
 
 
 
1184		case 284:	/* xor */
1185			regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1186			goto logical_done;
1187
1188		case 316:	/* xor */
1189			regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1190			goto logical_done;
1191
 
 
 
 
1192		case 412:	/* orc */
1193			regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1194			goto logical_done;
1195
1196		case 444:	/* or */
1197			regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1198			goto logical_done;
1199
1200		case 476:	/* nand */
1201			regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202			goto logical_done;
1203
1204		case 922:	/* extsh */
1205			regs->gpr[ra] = (signed short) regs->gpr[rd];
1206			goto logical_done;
1207
1208		case 954:	/* extsb */
1209			regs->gpr[ra] = (signed char) regs->gpr[rd];
1210			goto logical_done;
1211#ifdef __powerpc64__
1212		case 986:	/* extsw */
1213			regs->gpr[ra] = (signed int) regs->gpr[rd];
1214			goto logical_done;
1215#endif
1216
1217/*
1218 * Shift instructions
1219 */
1220		case 24:	/* slw */
1221			sh = regs->gpr[rb] & 0x3f;
1222			if (sh < 32)
1223				regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1224			else
1225				regs->gpr[ra] = 0;
1226			goto logical_done;
1227
1228		case 536:	/* srw */
1229			sh = regs->gpr[rb] & 0x3f;
1230			if (sh < 32)
1231				regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1232			else
1233				regs->gpr[ra] = 0;
1234			goto logical_done;
1235
1236		case 792:	/* sraw */
 
1237			sh = regs->gpr[rb] & 0x3f;
1238			ival = (signed int) regs->gpr[rd];
1239			regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
 
1240			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1241				regs->xer |= XER_CA;
1242			else
1243				regs->xer &= ~XER_CA;
 
1244			goto logical_done;
1245
1246		case 824:	/* srawi */
 
1247			sh = rb;
1248			ival = (signed int) regs->gpr[rd];
1249			regs->gpr[ra] = ival >> sh;
 
1250			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1251				regs->xer |= XER_CA;
1252			else
1253				regs->xer &= ~XER_CA;
 
1254			goto logical_done;
1255
1256#ifdef __powerpc64__
1257		case 27:	/* sld */
1258			sh = regs->gpr[rb] & 0x7f;
1259			if (sh < 64)
1260				regs->gpr[ra] = regs->gpr[rd] << sh;
1261			else
1262				regs->gpr[ra] = 0;
1263			goto logical_done;
1264
1265		case 539:	/* srd */
1266			sh = regs->gpr[rb] & 0x7f;
1267			if (sh < 64)
1268				regs->gpr[ra] = regs->gpr[rd] >> sh;
1269			else
1270				regs->gpr[ra] = 0;
1271			goto logical_done;
1272
1273		case 794:	/* srad */
 
1274			sh = regs->gpr[rb] & 0x7f;
1275			ival = (signed long int) regs->gpr[rd];
1276			regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
 
1277			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1278				regs->xer |= XER_CA;
1279			else
1280				regs->xer &= ~XER_CA;
 
1281			goto logical_done;
1282
1283		case 826:	/* sradi with sh_5 = 0 */
1284		case 827:	/* sradi with sh_5 = 1 */
1285			sh = rb | ((instr & 2) << 4);
 
1286			ival = (signed long int) regs->gpr[rd];
1287			regs->gpr[ra] = ival >> sh;
 
1288			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1289				regs->xer |= XER_CA;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290			else
1291				regs->xer &= ~XER_CA;
1292			goto logical_done;
 
1293#endif /* __powerpc64__ */
1294
1295/*
1296 * Cache instructions
1297 */
1298		case 54:	/* dcbst */
1299			op->type = MKOP(CACHEOP, DCBST, 0);
1300			op->ea = xform_ea(instr, regs);
1301			return 0;
1302
1303		case 86:	/* dcbf */
1304			op->type = MKOP(CACHEOP, DCBF, 0);
1305			op->ea = xform_ea(instr, regs);
1306			return 0;
1307
1308		case 246:	/* dcbtst */
1309			op->type = MKOP(CACHEOP, DCBTST, 0);
1310			op->ea = xform_ea(instr, regs);
1311			op->reg = rd;
1312			return 0;
1313
1314		case 278:	/* dcbt */
1315			op->type = MKOP(CACHEOP, DCBTST, 0);
1316			op->ea = xform_ea(instr, regs);
1317			op->reg = rd;
1318			return 0;
1319
1320		case 982:	/* icbi */
1321			op->type = MKOP(CACHEOP, ICBI, 0);
1322			op->ea = xform_ea(instr, regs);
 
 
 
 
 
1323			return 0;
1324		}
1325		break;
1326	}
1327
1328	/*
1329	 * Loads and stores.
1330	 */
1331	op->type = UNKNOWN;
1332	op->update_reg = ra;
1333	op->reg = rd;
1334	op->val = regs->gpr[rd];
1335	u = (instr >> 20) & UPDATE;
 
1336
1337	switch (opcode) {
1338	case 31:
1339		u = instr & UPDATE;
1340		op->ea = xform_ea(instr, regs);
1341		switch ((instr >> 1) & 0x3ff) {
1342		case 20:	/* lwarx */
1343			op->type = MKOP(LARX, 0, 4);
1344			break;
1345
1346		case 150:	/* stwcx. */
1347			op->type = MKOP(STCX, 0, 4);
1348			break;
1349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350#ifdef __powerpc64__
1351		case 84:	/* ldarx */
1352			op->type = MKOP(LARX, 0, 8);
1353			break;
1354
1355		case 214:	/* stdcx. */
1356			op->type = MKOP(STCX, 0, 8);
1357			break;
1358
1359		case 21:	/* ldx */
1360		case 53:	/* ldux */
1361			op->type = MKOP(LOAD, u, 8);
 
 
 
 
 
1362			break;
1363#endif
1364
1365		case 23:	/* lwzx */
1366		case 55:	/* lwzux */
1367			op->type = MKOP(LOAD, u, 4);
1368			break;
1369
1370		case 87:	/* lbzx */
1371		case 119:	/* lbzux */
1372			op->type = MKOP(LOAD, u, 1);
1373			break;
1374
1375#ifdef CONFIG_ALTIVEC
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376		case 103:	/* lvx */
1377		case 359:	/* lvxl */
1378			if (!(regs->msr & MSR_VEC))
1379				goto vecunavail;
1380			op->type = MKOP(LOAD_VMX, 0, 16);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381			break;
1382
1383		case 231:	/* stvx */
1384		case 487:	/* stvxl */
1385			if (!(regs->msr & MSR_VEC))
1386				goto vecunavail;
1387			op->type = MKOP(STORE_VMX, 0, 16);
1388			break;
1389#endif /* CONFIG_ALTIVEC */
1390
1391#ifdef __powerpc64__
 
 
 
 
 
1392		case 149:	/* stdx */
1393		case 181:	/* stdux */
1394			op->type = MKOP(STORE, u, 8);
1395			break;
1396#endif
1397
1398		case 151:	/* stwx */
1399		case 183:	/* stwux */
1400			op->type = MKOP(STORE, u, 4);
1401			break;
1402
1403		case 215:	/* stbx */
1404		case 247:	/* stbux */
1405			op->type = MKOP(STORE, u, 1);
1406			break;
1407
1408		case 279:	/* lhzx */
1409		case 311:	/* lhzux */
1410			op->type = MKOP(LOAD, u, 2);
1411			break;
1412
1413#ifdef __powerpc64__
1414		case 341:	/* lwax */
1415		case 373:	/* lwaux */
1416			op->type = MKOP(LOAD, SIGNEXT | u, 4);
1417			break;
1418#endif
1419
1420		case 343:	/* lhax */
1421		case 375:	/* lhaux */
1422			op->type = MKOP(LOAD, SIGNEXT | u, 2);
1423			break;
1424
1425		case 407:	/* sthx */
1426		case 439:	/* sthux */
1427			op->type = MKOP(STORE, u, 2);
1428			break;
1429
1430#ifdef __powerpc64__
1431		case 532:	/* ldbrx */
1432			op->type = MKOP(LOAD, BYTEREV, 8);
1433			break;
1434
1435#endif
1436		case 533:	/* lswx */
1437			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1438			break;
1439
1440		case 534:	/* lwbrx */
1441			op->type = MKOP(LOAD, BYTEREV, 4);
1442			break;
1443
1444		case 597:	/* lswi */
1445			if (rb == 0)
1446				rb = 32;	/* # bytes to load */
1447			op->type = MKOP(LOAD_MULTI, 0, rb);
1448			op->ea = 0;
1449			if (ra)
1450				op->ea = truncate_if_32bit(regs->msr,
1451							   regs->gpr[ra]);
1452			break;
1453
1454#ifdef CONFIG_PPC_FPU
1455		case 535:	/* lfsx */
1456		case 567:	/* lfsux */
1457			if (!(regs->msr & MSR_FP))
1458				goto fpunavail;
1459			op->type = MKOP(LOAD_FP, u, 4);
1460			break;
1461
1462		case 599:	/* lfdx */
1463		case 631:	/* lfdux */
1464			if (!(regs->msr & MSR_FP))
1465				goto fpunavail;
1466			op->type = MKOP(LOAD_FP, u, 8);
1467			break;
1468
1469		case 663:	/* stfsx */
1470		case 695:	/* stfsux */
1471			if (!(regs->msr & MSR_FP))
1472				goto fpunavail;
1473			op->type = MKOP(STORE_FP, u, 4);
1474			break;
1475
1476		case 727:	/* stfdx */
1477		case 759:	/* stfdux */
1478			if (!(regs->msr & MSR_FP))
1479				goto fpunavail;
1480			op->type = MKOP(STORE_FP, u, 8);
1481			break;
1482#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1483
1484#ifdef __powerpc64__
1485		case 660:	/* stdbrx */
1486			op->type = MKOP(STORE, BYTEREV, 8);
1487			op->val = byterev_8(regs->gpr[rd]);
1488			break;
1489
1490#endif
1491		case 661:	/* stswx */
1492			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1493			break;
1494
1495		case 662:	/* stwbrx */
1496			op->type = MKOP(STORE, BYTEREV, 4);
1497			op->val = byterev_4(regs->gpr[rd]);
1498			break;
1499
1500		case 725:
1501			if (rb == 0)
1502				rb = 32;	/* # bytes to store */
1503			op->type = MKOP(STORE_MULTI, 0, rb);
1504			op->ea = 0;
1505			if (ra)
1506				op->ea = truncate_if_32bit(regs->msr,
1507							   regs->gpr[ra]);
1508			break;
1509
1510		case 790:	/* lhbrx */
1511			op->type = MKOP(LOAD, BYTEREV, 2);
1512			break;
1513
1514		case 918:	/* sthbrx */
1515			op->type = MKOP(STORE, BYTEREV, 2);
1516			op->val = byterev_2(regs->gpr[rd]);
1517			break;
1518
1519#ifdef CONFIG_VSX
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1520		case 844:	/* lxvd2x */
1521		case 876:	/* lxvd2ux */
1522			if (!(regs->msr & MSR_VSX))
1523				goto vsxunavail;
1524			op->reg = rd | ((instr & 1) << 5);
1525			op->type = MKOP(LOAD_VSX, u, 16);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526			break;
1527
1528		case 972:	/* stxvd2x */
1529		case 1004:	/* stxvd2ux */
1530			if (!(regs->msr & MSR_VSX))
1531				goto vsxunavail;
1532			op->reg = rd | ((instr & 1) << 5);
1533			op->type = MKOP(STORE_VSX, u, 16);
 
 
 
 
 
 
 
1534			break;
1535
1536#endif /* CONFIG_VSX */
1537		}
1538		break;
1539
1540	case 32:	/* lwz */
1541	case 33:	/* lwzu */
1542		op->type = MKOP(LOAD, u, 4);
1543		op->ea = dform_ea(instr, regs);
1544		break;
1545
1546	case 34:	/* lbz */
1547	case 35:	/* lbzu */
1548		op->type = MKOP(LOAD, u, 1);
1549		op->ea = dform_ea(instr, regs);
1550		break;
1551
1552	case 36:	/* stw */
1553	case 37:	/* stwu */
1554		op->type = MKOP(STORE, u, 4);
1555		op->ea = dform_ea(instr, regs);
1556		break;
1557
1558	case 38:	/* stb */
1559	case 39:	/* stbu */
1560		op->type = MKOP(STORE, u, 1);
1561		op->ea = dform_ea(instr, regs);
1562		break;
1563
1564	case 40:	/* lhz */
1565	case 41:	/* lhzu */
1566		op->type = MKOP(LOAD, u, 2);
1567		op->ea = dform_ea(instr, regs);
1568		break;
1569
1570	case 42:	/* lha */
1571	case 43:	/* lhau */
1572		op->type = MKOP(LOAD, SIGNEXT | u, 2);
1573		op->ea = dform_ea(instr, regs);
1574		break;
1575
1576	case 44:	/* sth */
1577	case 45:	/* sthu */
1578		op->type = MKOP(STORE, u, 2);
1579		op->ea = dform_ea(instr, regs);
1580		break;
1581
1582	case 46:	/* lmw */
1583		if (ra >= rd)
1584			break;		/* invalid form, ra in range to load */
1585		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1586		op->ea = dform_ea(instr, regs);
1587		break;
1588
1589	case 47:	/* stmw */
1590		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1591		op->ea = dform_ea(instr, regs);
1592		break;
1593
1594#ifdef CONFIG_PPC_FPU
1595	case 48:	/* lfs */
1596	case 49:	/* lfsu */
1597		if (!(regs->msr & MSR_FP))
1598			goto fpunavail;
1599		op->type = MKOP(LOAD_FP, u, 4);
1600		op->ea = dform_ea(instr, regs);
1601		break;
1602
1603	case 50:	/* lfd */
1604	case 51:	/* lfdu */
1605		if (!(regs->msr & MSR_FP))
1606			goto fpunavail;
1607		op->type = MKOP(LOAD_FP, u, 8);
1608		op->ea = dform_ea(instr, regs);
1609		break;
1610
1611	case 52:	/* stfs */
1612	case 53:	/* stfsu */
1613		if (!(regs->msr & MSR_FP))
1614			goto fpunavail;
1615		op->type = MKOP(STORE_FP, u, 4);
1616		op->ea = dform_ea(instr, regs);
1617		break;
1618
1619	case 54:	/* stfd */
1620	case 55:	/* stfdu */
1621		if (!(regs->msr & MSR_FP))
1622			goto fpunavail;
1623		op->type = MKOP(STORE_FP, u, 8);
1624		op->ea = dform_ea(instr, regs);
1625		break;
1626#endif
1627
1628#ifdef __powerpc64__
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1629	case 58:	/* ld[u], lwa */
1630		op->ea = dsform_ea(instr, regs);
1631		switch (instr & 3) {
1632		case 0:		/* ld */
1633			op->type = MKOP(LOAD, 0, 8);
1634			break;
1635		case 1:		/* ldu */
1636			op->type = MKOP(LOAD, UPDATE, 8);
1637			break;
1638		case 2:		/* lwa */
1639			op->type = MKOP(LOAD, SIGNEXT, 4);
1640			break;
1641		}
1642		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1644	case 62:	/* std[u] */
1645		op->ea = dsform_ea(instr, regs);
1646		switch (instr & 3) {
1647		case 0:		/* std */
1648			op->type = MKOP(STORE, 0, 8);
1649			break;
1650		case 1:		/* stdu */
1651			op->type = MKOP(STORE, UPDATE, 8);
1652			break;
 
 
 
 
1653		}
1654		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1655#endif /* __powerpc64__ */
1656
1657	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1658	return 0;
1659
1660 logical_done:
1661	if (instr & 1)
1662		set_cr0(regs, ra);
1663	goto instr_done;
 
 
 
1664
1665 arith_done:
1666	if (instr & 1)
1667		set_cr0(regs, rd);
1668
1669 instr_done:
1670	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1671	return 1;
1672
1673 priv:
1674	op->type = INTERRUPT | 0x700;
1675	op->val = SRR1_PROGPRIV;
1676	return 0;
1677
1678 trap:
1679	op->type = INTERRUPT | 0x700;
1680	op->val = SRR1_PROGTRAP;
1681	return 0;
1682
1683#ifdef CONFIG_PPC_FPU
1684 fpunavail:
1685	op->type = INTERRUPT | 0x800;
1686	return 0;
1687#endif
1688
1689#ifdef CONFIG_ALTIVEC
1690 vecunavail:
1691	op->type = INTERRUPT | 0xf20;
1692	return 0;
1693#endif
1694
1695#ifdef CONFIG_VSX
1696 vsxunavail:
1697	op->type = INTERRUPT | 0xf40;
1698	return 0;
1699#endif
1700}
1701EXPORT_SYMBOL_GPL(analyse_instr);
 
1702
1703/*
1704 * For PPC32 we always use stwu with r1 to change the stack pointer.
1705 * So this emulated store may corrupt the exception frame, now we
1706 * have to provide the exception frame trampoline, which is pushed
1707 * below the kprobed function stack. So we only update gpr[1] but
1708 * don't emulate the real store operation. We will do real store
1709 * operation safely in exception return code by checking this flag.
1710 */
1711static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1712{
1713#ifdef CONFIG_PPC32
1714	/*
1715	 * Check if we will touch kernel stack overflow
1716	 */
1717	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1718		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1719		return -EINVAL;
1720	}
1721#endif /* CONFIG_PPC32 */
1722	/*
1723	 * Check if we already set since that means we'll
1724	 * lose the previous value.
1725	 */
1726	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1727	set_thread_flag(TIF_EMULATE_STACK_STORE);
1728	return 0;
1729}
1730
1731static __kprobes void do_signext(unsigned long *valp, int size)
1732{
1733	switch (size) {
1734	case 2:
1735		*valp = (signed short) *valp;
1736		break;
1737	case 4:
1738		*valp = (signed int) *valp;
1739		break;
1740	}
1741}
1742
1743static __kprobes void do_byterev(unsigned long *valp, int size)
1744{
1745	switch (size) {
1746	case 2:
1747		*valp = byterev_2(*valp);
1748		break;
1749	case 4:
1750		*valp = byterev_4(*valp);
1751		break;
1752#ifdef __powerpc64__
1753	case 8:
1754		*valp = byterev_8(*valp);
1755		break;
1756#endif
1757	}
1758}
1759
1760/*
1761 * Emulate instructions that cause a transfer of control,
1762 * loads and stores, and a few other instructions.
1763 * Returns 1 if the step was emulated, 0 if not,
1764 * or -1 if the instruction is one that should not be stepped,
1765 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1766 */
1767int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1768{
1769	struct instruction_op op;
1770	int r, err, size;
1771	unsigned long val;
1772	unsigned int cr;
1773	int i, rd, nb;
1774
1775	r = analyse_instr(&op, regs, instr);
1776	if (r != 0)
1777		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1778
1779	err = 0;
1780	size = GETSIZE(op.type);
1781	switch (op.type & INSTR_TYPE_MASK) {
1782	case CACHEOP:
1783		if (!address_ok(regs, op.ea, 8))
1784			return 0;
1785		switch (op.type & CACHEOP_MASK) {
1786		case DCBST:
1787			__cacheop_user_asmx(op.ea, err, "dcbst");
1788			break;
1789		case DCBF:
1790			__cacheop_user_asmx(op.ea, err, "dcbf");
1791			break;
1792		case DCBTST:
1793			if (op.reg == 0)
1794				prefetchw((void *) op.ea);
 
 
 
 
 
 
1795			break;
1796		case DCBT:
1797			if (op.reg == 0)
1798				prefetch((void *) op.ea);
1799			break;
1800		case ICBI:
1801			__cacheop_user_asmx(op.ea, err, "icbi");
1802			break;
 
 
1803		}
1804		if (err)
1805			return 0;
1806		goto instr_done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807
 
1808	case LARX:
1809		if (regs->msr & MSR_LE)
1810			return 0;
1811		if (op.ea & (size - 1))
1812			break;		/* can't handle misaligned */
1813		err = -EFAULT;
1814		if (!address_ok(regs, op.ea, size))
1815			goto ldst_done;
1816		err = 0;
 
1817		switch (size) {
 
 
 
 
 
 
 
 
1818		case 4:
1819			__get_user_asmx(val, op.ea, err, "lwarx");
1820			break;
 
1821		case 8:
1822			__get_user_asmx(val, op.ea, err, "ldarx");
1823			break;
 
 
 
 
1824		default:
1825			return 0;
 
 
 
 
1826		}
1827		if (!err)
1828			regs->gpr[op.reg] = val;
1829		goto ldst_done;
1830
1831	case STCX:
1832		if (regs->msr & MSR_LE)
1833			return 0;
1834		if (op.ea & (size - 1))
1835			break;		/* can't handle misaligned */
1836		err = -EFAULT;
1837		if (!address_ok(regs, op.ea, size))
1838			goto ldst_done;
1839		err = 0;
1840		switch (size) {
 
 
 
 
 
 
 
 
1841		case 4:
1842			__put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1843			break;
 
1844		case 8:
1845			__put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1846			break;
 
 
 
 
 
1847		default:
1848			return 0;
1849		}
1850		if (!err)
1851			regs->ccr = (regs->ccr & 0x0fffffff) |
1852				(cr & 0xe0000000) |
1853				((regs->xer >> 3) & 0x10000000);
1854		goto ldst_done;
 
 
1855
1856	case LOAD:
1857		if (regs->msr & MSR_LE)
1858			return 0;
1859		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
 
 
 
 
1860		if (!err) {
1861			if (op.type & SIGNEXT)
1862				do_signext(&regs->gpr[op.reg], size);
1863			if (op.type & BYTEREV)
1864				do_byterev(&regs->gpr[op.reg], size);
1865		}
1866		goto ldst_done;
1867
1868#ifdef CONFIG_PPC_FPU
1869	case LOAD_FP:
1870		if (regs->msr & MSR_LE)
 
 
 
 
 
 
1871			return 0;
1872		if (size == 4)
1873			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1874		else
1875			err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1876		goto ldst_done;
1877#endif
1878#ifdef CONFIG_ALTIVEC
1879	case LOAD_VMX:
1880		if (regs->msr & MSR_LE)
1881			return 0;
1882		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1883		goto ldst_done;
1884#endif
1885#ifdef CONFIG_VSX
1886	case LOAD_VSX:
1887		if (regs->msr & MSR_LE)
 
 
 
 
 
 
 
 
1888			return 0;
1889		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1890		goto ldst_done;
 
1891#endif
1892	case LOAD_MULTI:
1893		if (regs->msr & MSR_LE)
1894			return 0;
1895		rd = op.reg;
1896		for (i = 0; i < size; i += 4) {
 
 
1897			nb = size - i;
1898			if (nb > 4)
1899				nb = 4;
1900			err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
1901			if (err)
1902				return 0;
1903			if (nb < 4)	/* left-justify last bytes */
1904				regs->gpr[rd] <<= 32 - 8 * nb;
1905			op.ea += 4;
1906			++rd;
 
 
1907		}
1908		goto instr_done;
1909
1910	case STORE:
1911		if (regs->msr & MSR_LE)
1912			return 0;
1913		if ((op.type & UPDATE) && size == sizeof(long) &&
1914		    op.reg == 1 && op.update_reg == 1 &&
 
 
 
 
1915		    !(regs->msr & MSR_PR) &&
1916		    op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1917			err = handle_stack_update(op.ea, regs);
1918			goto ldst_done;
1919		}
1920		err = write_mem(op.val, op.ea, size, regs);
1921		goto ldst_done;
 
 
1922
1923#ifdef CONFIG_PPC_FPU
1924	case STORE_FP:
1925		if (regs->msr & MSR_LE)
1926			return 0;
1927		if (size == 4)
1928			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1929		else
1930			err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1931		goto ldst_done;
1932#endif
1933#ifdef CONFIG_ALTIVEC
1934	case STORE_VMX:
1935		if (regs->msr & MSR_LE)
1936			return 0;
1937		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1938		goto ldst_done;
1939#endif
1940#ifdef CONFIG_VSX
1941	case STORE_VSX:
1942		if (regs->msr & MSR_LE)
 
 
 
 
 
 
 
 
1943			return 0;
1944		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1945		goto ldst_done;
 
1946#endif
1947	case STORE_MULTI:
1948		if (regs->msr & MSR_LE)
1949			return 0;
1950		rd = op.reg;
1951		for (i = 0; i < size; i += 4) {
1952			val = regs->gpr[rd];
 
1953			nb = size - i;
1954			if (nb > 4)
1955				nb = 4;
1956			else
1957				val >>= 32 - 8 * nb;
1958			err = write_mem(val, op.ea, nb, regs);
1959			if (err)
1960				return 0;
1961			op.ea += 4;
1962			++rd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963		}
1964		goto instr_done;
1965
1966	case MFMSR:
1967		regs->gpr[op.reg] = regs->msr & MSR_MASK;
1968		goto instr_done;
1969
1970	case MTMSR:
1971		val = regs->gpr[op.reg];
1972		if ((val & MSR_RI) == 0)
1973			/* can't step mtmsr[d] that would clear MSR_RI */
1974			return -1;
1975		/* here op.val is the mask of bits to change */
1976		regs->msr = (regs->msr & ~op.val) | (val & op.val);
1977		goto instr_done;
1978
1979#ifdef CONFIG_PPC64
1980	case SYSCALL:	/* sc */
1981		/*
1982		 * N.B. this uses knowledge about how the syscall
1983		 * entry code works.  If that is changed, this will
1984		 * need to be changed also.
 
 
 
 
 
1985		 */
1986		if (regs->gpr[0] == 0x1ebe &&
1987		    cpu_has_feature(CPU_FTR_REAL_LE)) {
1988			regs->msr ^= MSR_LE;
1989			goto instr_done;
1990		}
1991		regs->gpr[9] = regs->gpr[13];
1992		regs->gpr[10] = MSR_KERNEL;
1993		regs->gpr[11] = regs->nip + 4;
1994		regs->gpr[12] = regs->msr & MSR_MASK;
1995		regs->gpr[13] = (unsigned long) get_paca();
1996		regs->nip = (unsigned long) &system_call_common;
1997		regs->msr = MSR_KERNEL;
1998		return 1;
1999
2000	case RFI:
2001		return -1;
2002#endif
2003	}
2004	return 0;
2005
2006 ldst_done:
2007	if (err)
2008		return 0;
2009	if (op.type & UPDATE)
2010		regs->gpr[op.update_reg] = op.ea;
2011
2012 instr_done:
2013	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
 
2014	return 1;
2015}