Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
 
   1/******************************************************************************
   2 * emulate.c
   3 *
   4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   5 *
   6 * Copyright (c) 2005 Keir Fraser
   7 *
   8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
   9 * privileged instructions:
  10 *
  11 * Copyright (C) 2006 Qumranet
  12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13 *
  14 *   Avi Kivity <avi@qumranet.com>
  15 *   Yaniv Kamay <yaniv@qumranet.com>
  16 *
  17 * This work is licensed under the terms of the GNU GPL, version 2.  See
  18 * the COPYING file in the top-level directory.
  19 *
  20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21 */
 
  22
  23#include <linux/kvm_host.h>
  24#include "kvm_cache_regs.h"
  25#include <linux/module.h>
  26#include <asm/kvm_emulate.h>
 
 
 
  27
  28#include "x86.h"
  29#include "tss.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  30
  31/*
  32 * Opcode effective-address decode tables.
  33 * Note that we only emulate instructions that have at least one memory
  34 * operand (excluding implicit stack references). We assume that stack
  35 * references and instruction fetches will never occur in special memory
  36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  37 * not be handled.
  38 */
  39
  40/* Operand sizes: 8-bit operands or specified/overridden size. */
  41#define ByteOp      (1<<0)	/* 8-bit operands. */
  42/* Destination operand type. */
  43#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
  44#define DstReg      (2<<1)	/* Register operand. */
  45#define DstMem      (3<<1)	/* Memory operand. */
  46#define DstAcc      (4<<1)	/* Destination Accumulator */
  47#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
  48#define DstMem64    (6<<1)	/* 64bit memory operand */
  49#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
  50#define DstDX       (8<<1)	/* Destination is in DX register */
  51#define DstMask     (0xf<<1)
 
 
 
  52/* Source operand type. */
  53#define SrcNone     (0<<5)	/* No source operand. */
  54#define SrcReg      (1<<5)	/* Register operand. */
  55#define SrcMem      (2<<5)	/* Memory operand. */
  56#define SrcMem16    (3<<5)	/* Memory operand (16-bit). */
  57#define SrcMem32    (4<<5)	/* Memory operand (32-bit). */
  58#define SrcImm      (5<<5)	/* Immediate operand. */
  59#define SrcImmByte  (6<<5)	/* 8-bit sign-extended immediate operand. */
  60#define SrcOne      (7<<5)	/* Implied '1' */
  61#define SrcImmUByte (8<<5)      /* 8-bit unsigned immediate operand. */
  62#define SrcImmU     (9<<5)      /* Immediate operand, unsigned */
  63#define SrcSI       (0xa<<5)	/* Source is in the DS:RSI */
  64#define SrcImmFAddr (0xb<<5)	/* Source is immediate far address */
  65#define SrcMemFAddr (0xc<<5)	/* Source is far address in memory */
  66#define SrcAcc      (0xd<<5)	/* Source Accumulator */
  67#define SrcImmU16   (0xe<<5)    /* Immediate operand, unsigned, 16 bits */
  68#define SrcDX       (0xf<<5)	/* Source is in DX register */
  69#define SrcMask     (0xf<<5)
  70/* Generic ModRM decode. */
  71#define ModRM       (1<<9)
  72/* Destination is only written; never read. */
  73#define Mov         (1<<10)
 
  74#define BitOp       (1<<11)
  75#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
  76#define String      (1<<13)     /* String instruction (rep capable) */
  77#define Stack       (1<<14)     /* Stack instruction (push/pop) */
  78#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
  79#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
  80#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
  81#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
  82#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 
 
 
  83#define Sse         (1<<18)     /* SSE Vector instruction */
 
 
 
 
  84/* Misc flags */
  85#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
  86#define VendorSpecific (1<<22) /* Vendor specific instruction */
  87#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
  88#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
  89#define Undefined   (1<<25) /* No Such Instruction */
  90#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
  91#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
  92#define No64	    (1<<28)
 
 
  93/* Source 2 operand type */
  94#define Src2None    (0<<29)
  95#define Src2CL      (1<<29)
  96#define Src2ImmByte (2<<29)
  97#define Src2One     (3<<29)
  98#define Src2Imm     (4<<29)
  99#define Src2Mask    (7<<29)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100
 101#define X2(x...) x, x
 102#define X3(x...) X2(x), x
 103#define X4(x...) X2(x), X2(x)
 104#define X5(x...) X4(x), x
 105#define X6(x...) X4(x), X2(x)
 106#define X7(x...) X4(x), X3(x)
 107#define X8(x...) X4(x), X4(x)
 108#define X16(x...) X8(x), X8(x)
 109
 110struct opcode {
 111	u32 flags;
 112	u8 intercept;
 
 113	union {
 114		int (*execute)(struct x86_emulate_ctxt *ctxt);
 115		struct opcode *group;
 116		struct group_dual *gdual;
 117		struct gprefix *gprefix;
 
 
 
 
 118	} u;
 119	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 120};
 121
 122struct group_dual {
 123	struct opcode mod012[8];
 124	struct opcode mod3[8];
 125};
 126
 127struct gprefix {
 128	struct opcode pfx_no;
 129	struct opcode pfx_66;
 130	struct opcode pfx_f2;
 131	struct opcode pfx_f3;
 132};
 133
 134/* EFLAGS bit definitions. */
 135#define EFLG_ID (1<<21)
 136#define EFLG_VIP (1<<20)
 137#define EFLG_VIF (1<<19)
 138#define EFLG_AC (1<<18)
 139#define EFLG_VM (1<<17)
 140#define EFLG_RF (1<<16)
 141#define EFLG_IOPL (3<<12)
 142#define EFLG_NT (1<<14)
 143#define EFLG_OF (1<<11)
 144#define EFLG_DF (1<<10)
 145#define EFLG_IF (1<<9)
 146#define EFLG_TF (1<<8)
 147#define EFLG_SF (1<<7)
 148#define EFLG_ZF (1<<6)
 149#define EFLG_AF (1<<4)
 150#define EFLG_PF (1<<2)
 151#define EFLG_CF (1<<0)
 152
 153#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 154#define EFLG_RESERVED_ONE_MASK 2
 155
 156/*
 157 * Instruction emulation:
 158 * Most instructions are emulated directly via a fragment of inline assembly
 159 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 160 * any modified flags.
 161 */
 162
 163#if defined(CONFIG_X86_64)
 164#define _LO32 "k"		/* force 32-bit operand */
 165#define _STK  "%%rsp"		/* stack pointer */
 166#elif defined(__i386__)
 167#define _LO32 ""		/* force 32-bit operand */
 168#define _STK  "%%esp"		/* stack pointer */
 169#endif
 
 
 
 
 
 
 
 170
 171/*
 172 * These EFLAGS bits are restored from saved value during emulation, and
 173 * any changes are written back to the saved value after emulation.
 174 */
 175#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
 176
 177/* Before executing instruction: restore necessary bits in EFLAGS. */
 178#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
 179	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
 180	"movl %"_sav",%"_LO32 _tmp"; "                                  \
 181	"push %"_tmp"; "                                                \
 182	"push %"_tmp"; "                                                \
 183	"movl %"_msk",%"_LO32 _tmp"; "                                  \
 184	"andl %"_LO32 _tmp",("_STK"); "                                 \
 185	"pushf; "                                                       \
 186	"notl %"_LO32 _tmp"; "                                          \
 187	"andl %"_LO32 _tmp",("_STK"); "                                 \
 188	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
 189	"pop  %"_tmp"; "                                                \
 190	"orl  %"_LO32 _tmp",("_STK"); "                                 \
 191	"popf; "                                                        \
 192	"pop  %"_sav"; "
 193
 194/* After executing instruction: write-back necessary bits in EFLAGS. */
 195#define _POST_EFLAGS(_sav, _msk, _tmp) \
 196	/* _sav |= EFLAGS & _msk; */		\
 197	"pushf; "				\
 198	"pop  %"_tmp"; "			\
 199	"andl %"_msk",%"_LO32 _tmp"; "		\
 200	"orl  %"_LO32 _tmp",%"_sav"; "
 201
 202#ifdef CONFIG_X86_64
 203#define ON64(x) x
 204#else
 205#define ON64(x)
 206#endif
 207
 208#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
 209	do {								\
 210		__asm__ __volatile__ (					\
 211			_PRE_EFLAGS("0", "4", "2")			\
 212			_op _suffix " %"_x"3,%1; "			\
 213			_POST_EFLAGS("0", "4", "2")			\
 214			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
 215			  "=&r" (_tmp)					\
 216			: _y ((_src).val), "i" (EFLAGS_MASK));		\
 217	} while (0)
 218
 219
 220/* Raw emulation: instruction has two explicit operands. */
 221#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
 222	do {								\
 223		unsigned long _tmp;					\
 224									\
 225		switch ((_dst).bytes) {					\
 226		case 2:							\
 227			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
 228			break;						\
 229		case 4:							\
 230			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
 231			break;						\
 232		case 8:							\
 233			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
 234			break;						\
 235		}							\
 236	} while (0)
 237
 238#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
 239	do {								     \
 240		unsigned long _tmp;					     \
 241		switch ((_dst).bytes) {				             \
 242		case 1:							     \
 243			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
 244			break;						     \
 245		default:						     \
 246			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
 247					     _wx, _wy, _lx, _ly, _qx, _qy);  \
 248			break;						     \
 249		}							     \
 250	} while (0)
 251
 252/* Source operand is byte-sized and may be restricted to just %cl. */
 253#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
 254	__emulate_2op(_op, _src, _dst, _eflags,				\
 255		      "b", "c", "b", "c", "b", "c", "b", "c")
 256
 257/* Source operand is byte, word, long or quad sized. */
 258#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
 259	__emulate_2op(_op, _src, _dst, _eflags,				\
 260		      "b", "q", "w", "r", _LO32, "r", "", "r")
 261
 262/* Source operand is word, long or quad sized. */
 263#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
 264	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
 265			     "w", "r", _LO32, "r", "", "r")
 266
 267/* Instruction has three operands and one operand is stored in ECX register */
 268#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type)	\
 269	do {								\
 270		unsigned long _tmp;					\
 271		_type _clv  = (_cl).val;				\
 272		_type _srcv = (_src).val;				\
 273		_type _dstv = (_dst).val;				\
 274									\
 275		__asm__ __volatile__ (					\
 276			_PRE_EFLAGS("0", "5", "2")			\
 277			_op _suffix " %4,%1 \n"				\
 278			_POST_EFLAGS("0", "5", "2")			\
 279			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)	\
 280			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
 281			);						\
 282									\
 283		(_cl).val  = (unsigned long) _clv;			\
 284		(_src).val = (unsigned long) _srcv;			\
 285		(_dst).val = (unsigned long) _dstv;			\
 286	} while (0)
 287
 288#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)			\
 289	do {								\
 290		switch ((_dst).bytes) {					\
 291		case 2:							\
 292			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
 293					 "w", unsigned short);         	\
 294			break;						\
 295		case 4:							\
 296			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
 297					 "l", unsigned int);           	\
 298			break;						\
 299		case 8:							\
 300			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
 301					      "q", unsigned long));	\
 302			break;						\
 303		}							\
 304	} while (0)
 305
 306#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
 307	do {								\
 308		unsigned long _tmp;					\
 309									\
 310		__asm__ __volatile__ (					\
 311			_PRE_EFLAGS("0", "3", "2")			\
 312			_op _suffix " %1; "				\
 313			_POST_EFLAGS("0", "3", "2")			\
 314			: "=m" (_eflags), "+m" ((_dst).val),		\
 315			  "=&r" (_tmp)					\
 316			: "i" (EFLAGS_MASK));				\
 317	} while (0)
 318
 319/* Instruction has only one explicit operand (no source operand). */
 320#define emulate_1op(_op, _dst, _eflags)                                    \
 321	do {								\
 322		switch ((_dst).bytes) {				        \
 323		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
 324		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
 325		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
 326		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
 327		}							\
 328	} while (0)
 329
 330#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
 331	do {								\
 332		unsigned long _tmp;					\
 333									\
 334		__asm__ __volatile__ (					\
 335			_PRE_EFLAGS("0", "4", "1")			\
 336			_op _suffix " %5; "				\
 337			_POST_EFLAGS("0", "4", "1")			\
 338			: "=m" (_eflags), "=&r" (_tmp),			\
 339			  "+a" (_rax), "+d" (_rdx)			\
 340			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
 341			  "a" (_rax), "d" (_rdx));			\
 342	} while (0)
 343
 344#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
 345	do {								\
 346		unsigned long _tmp;					\
 347									\
 348		__asm__ __volatile__ (					\
 349			_PRE_EFLAGS("0", "5", "1")			\
 350			"1: \n\t"					\
 351			_op _suffix " %6; "				\
 352			"2: \n\t"					\
 353			_POST_EFLAGS("0", "5", "1")			\
 354			".pushsection .fixup,\"ax\" \n\t"		\
 355			"3: movb $1, %4 \n\t"				\
 356			"jmp 2b \n\t"					\
 357			".popsection \n\t"				\
 358			_ASM_EXTABLE(1b, 3b)				\
 359			: "=m" (_eflags), "=&r" (_tmp),			\
 360			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
 361			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
 362			  "a" (_rax), "d" (_rdx));			\
 363	} while (0)
 364
 365/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
 366#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)		\
 367	do {								\
 368		switch((_src).bytes) {					\
 369		case 1:							\
 370			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 371					      _eflags, "b");		\
 372			break;						\
 373		case 2:							\
 374			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 375					      _eflags, "w");		\
 376			break;						\
 377		case 4:							\
 378			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 379					      _eflags, "l");		\
 380			break;						\
 381		case 8:							\
 382			ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
 383						   _eflags, "q"));	\
 384			break;						\
 385		}							\
 386	} while (0)
 387
 388#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
 389	do {								\
 390		switch((_src).bytes) {					\
 391		case 1:							\
 392			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
 393						 _eflags, "b", _ex);	\
 394			break;						\
 395		case 2:							\
 396			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 397						 _eflags, "w", _ex);	\
 398			break;						\
 399		case 4:							\
 400			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 401						 _eflags, "l", _ex);	\
 402			break;						\
 403		case 8: ON64(						\
 404			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 405						 _eflags, "q", _ex));	\
 406			break;						\
 407		}							\
 408	} while (0)
 409
 410static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 411				    enum x86_intercept intercept,
 412				    enum x86_intercept_stage stage)
 413{
 414	struct x86_instruction_info info = {
 415		.intercept  = intercept,
 416		.rep_prefix = ctxt->rep_prefix,
 417		.modrm_mod  = ctxt->modrm_mod,
 418		.modrm_reg  = ctxt->modrm_reg,
 419		.modrm_rm   = ctxt->modrm_rm,
 420		.src_val    = ctxt->src.val64,
 
 421		.src_bytes  = ctxt->src.bytes,
 422		.dst_bytes  = ctxt->dst.bytes,
 423		.ad_bytes   = ctxt->ad_bytes,
 424		.next_rip   = ctxt->eip,
 425	};
 426
 427	return ctxt->ops->intercept(ctxt, &info, stage);
 428}
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 431{
 432	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 433}
 434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435/* Access/update address held in a register, based on addressing mode. */
 436static inline unsigned long
 437address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 438{
 439	if (ctxt->ad_bytes == sizeof(unsigned long))
 440		return reg;
 441	else
 442		return reg & ad_mask(ctxt);
 443}
 444
 445static inline unsigned long
 446register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 447{
 448	return address_mask(ctxt, reg);
 
 
 
 
 
 449}
 450
 451static inline void
 452register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
 453{
 454	if (ctxt->ad_bytes == sizeof(unsigned long))
 455		*reg += inc;
 456	else
 457		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
 458}
 459
 460static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 461{
 462	register_address_increment(ctxt, &ctxt->_eip, rel);
 463}
 464
 465static u32 desc_limit_scaled(struct desc_struct *desc)
 466{
 467	u32 limit = get_desc_limit(desc);
 468
 469	return desc->g ? (limit << 12) | 0xfff : limit;
 470}
 471
 472static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
 473{
 474	ctxt->has_seg_override = true;
 475	ctxt->seg_override = seg;
 476}
 477
 478static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 479{
 480	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 481		return 0;
 482
 483	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 484}
 485
 486static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
 487{
 488	if (!ctxt->has_seg_override)
 489		return 0;
 490
 491	return ctxt->seg_override;
 492}
 493
 494static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 495			     u32 error, bool valid)
 496{
 
 
 
 497	ctxt->exception.vector = vec;
 498	ctxt->exception.error_code = error;
 499	ctxt->exception.error_code_valid = valid;
 500	return X86EMUL_PROPAGATE_FAULT;
 501}
 502
 503static int emulate_db(struct x86_emulate_ctxt *ctxt)
 504{
 505	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 506}
 507
 508static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 509{
 510	return emulate_exception(ctxt, GP_VECTOR, err, true);
 511}
 512
 513static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 514{
 515	return emulate_exception(ctxt, SS_VECTOR, err, true);
 516}
 517
 518static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 519{
 520	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 521}
 522
 523static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 524{
 525	return emulate_exception(ctxt, TS_VECTOR, err, true);
 526}
 527
 528static int emulate_de(struct x86_emulate_ctxt *ctxt)
 529{
 530	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 531}
 532
 533static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 534{
 535	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 536}
 537
 538static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 539{
 540	u16 selector;
 541	struct desc_struct desc;
 542
 543	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 544	return selector;
 545}
 546
 547static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 548				 unsigned seg)
 549{
 550	u16 dummy;
 551	u32 base3;
 552	struct desc_struct desc;
 553
 554	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 555	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 556}
 557
 558static int __linearize(struct x86_emulate_ctxt *ctxt,
 559		     struct segmented_address addr,
 560		     unsigned size, bool write, bool fetch,
 561		     ulong *linear)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562{
 563	struct desc_struct desc;
 564	bool usable;
 565	ulong la;
 566	u32 lim;
 567	u16 sel;
 568	unsigned cpl, rpl;
 569
 570	la = seg_base(ctxt, addr.seg) + addr.ea;
 571	switch (ctxt->mode) {
 572	case X86EMUL_MODE_REAL:
 573		break;
 574	case X86EMUL_MODE_PROT64:
 575		if (((signed long)la << 16) >> 16 != la)
 576			return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 577		break;
 578	default:
 
 579		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 580						addr.seg);
 581		if (!usable)
 582			goto bad;
 583		/* code segment or read-only data segment */
 584		if (((desc.type & 8) || !(desc.type & 2)) && write)
 
 585			goto bad;
 586		/* unreadable code segment */
 587		if (!fetch && (desc.type & 8) && !(desc.type & 2))
 588			goto bad;
 589		lim = desc_limit_scaled(&desc);
 590		if ((desc.type & 8) || !(desc.type & 4)) {
 591			/* expand-up segment */
 592			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
 593				goto bad;
 594		} else {
 595			/* exapand-down segment */
 596			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
 597				goto bad;
 598			lim = desc.d ? 0xffffffff : 0xffff;
 599			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
 600				goto bad;
 601		}
 602		cpl = ctxt->ops->cpl(ctxt);
 603		rpl = sel & 3;
 604		cpl = max(cpl, rpl);
 605		if (!(desc.type & 8)) {
 606			/* data segment */
 607			if (cpl > desc.dpl)
 608				goto bad;
 609		} else if ((desc.type & 8) && !(desc.type & 4)) {
 610			/* nonconforming code segment */
 611			if (cpl != desc.dpl)
 612				goto bad;
 613		} else if ((desc.type & 8) && (desc.type & 4)) {
 614			/* conforming code segment */
 615			if (cpl < desc.dpl)
 616				goto bad;
 617		}
 618		break;
 619	}
 620	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
 621		la &= (u32)-1;
 622	*linear = la;
 623	return X86EMUL_CONTINUE;
 624bad:
 625	if (addr.seg == VCPU_SREG_SS)
 626		return emulate_ss(ctxt, addr.seg);
 627	else
 628		return emulate_gp(ctxt, addr.seg);
 629}
 630
 631static int linearize(struct x86_emulate_ctxt *ctxt,
 632		     struct segmented_address addr,
 633		     unsigned size, bool write,
 634		     ulong *linear)
 635{
 636	return __linearize(ctxt, addr, size, write, false, linear);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 637}
 638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639
 640static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 641			      struct segmented_address addr,
 642			      void *data,
 643			      unsigned size)
 644{
 645	int rc;
 646	ulong linear;
 647
 648	rc = linearize(ctxt, addr, size, false, &linear);
 649	if (rc != X86EMUL_CONTINUE)
 650		return rc;
 651	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
 652}
 653
 654static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
 655			      unsigned long eip, u8 *dest)
 
 
 656{
 657	struct fetch_cache *fc = &ctxt->fetch;
 658	int rc;
 659	int size, cur_size;
 660
 661	if (eip == fc->end) {
 662		unsigned long linear;
 663		struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
 664		cur_size = fc->end - fc->start;
 665		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
 666		rc = __linearize(ctxt, addr, size, false, true, &linear);
 667		if (rc != X86EMUL_CONTINUE)
 668			return rc;
 669		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
 670				      size, &ctxt->exception);
 671		if (rc != X86EMUL_CONTINUE)
 672			return rc;
 673		fc->end += size;
 674	}
 675	*dest = fc->data[eip - fc->start];
 676	return X86EMUL_CONTINUE;
 677}
 678
 679static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
 680			 unsigned long eip, void *dest, unsigned size)
 
 
 
 681{
 682	int rc;
 
 
 
 
 
 683
 684	/* x86 instructions are limited to 15 bytes. */
 685	if (eip + size - ctxt->eip > 15)
 686		return X86EMUL_UNHANDLEABLE;
 687	while (size--) {
 688		rc = do_insn_fetch_byte(ctxt, eip++, dest++);
 689		if (rc != X86EMUL_CONTINUE)
 690			return rc;
 691	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692	return X86EMUL_CONTINUE;
 693}
 694
 
 
 
 
 
 
 
 
 
 
 
 695/* Fetch next part of the instruction being emulated. */
 696#define insn_fetch(_type, _size, _eip)					\
 697({	unsigned long _x;						\
 698	rc = do_insn_fetch(ctxt, (_eip), &_x, (_size));			\
 
 699	if (rc != X86EMUL_CONTINUE)					\
 700		goto done;						\
 701	(_eip) += (_size);						\
 702	(_type)_x;							\
 
 
 703})
 704
 705#define insn_fetch_arr(_arr, _size, _eip)				\
 706({	rc = do_insn_fetch(ctxt, (_eip), _arr, (_size));		\
 
 707	if (rc != X86EMUL_CONTINUE)					\
 708		goto done;						\
 709	(_eip) += (_size);						\
 
 
 710})
 711
 712/*
 713 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 714 * pointer into the block that addresses the relevant register.
 715 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 716 */
 717static void *decode_register(u8 modrm_reg, unsigned long *regs,
 718			     int highbyte_regs)
 719{
 720	void *p;
 
 721
 722	p = &regs[modrm_reg];
 723	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 724		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
 
 
 725	return p;
 726}
 727
 728static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 729			   struct segmented_address addr,
 730			   u16 *size, unsigned long *address, int op_bytes)
 731{
 732	int rc;
 733
 734	if (op_bytes == 2)
 735		op_bytes = 3;
 736	*address = 0;
 737	rc = segmented_read_std(ctxt, addr, size, 2);
 738	if (rc != X86EMUL_CONTINUE)
 739		return rc;
 740	addr.ea += 2;
 741	rc = segmented_read_std(ctxt, addr, address, op_bytes);
 742	return rc;
 743}
 744
 745static int test_cc(unsigned int condition, unsigned int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 746{
 747	int rc = 0;
 
 
 
 
 748
 749	switch ((condition & 15) >> 1) {
 750	case 0: /* o */
 751		rc |= (flags & EFLG_OF);
 752		break;
 753	case 1: /* b/c/nae */
 754		rc |= (flags & EFLG_CF);
 755		break;
 756	case 2: /* z/e */
 757		rc |= (flags & EFLG_ZF);
 758		break;
 759	case 3: /* be/na */
 760		rc |= (flags & (EFLG_CF|EFLG_ZF));
 761		break;
 762	case 4: /* s */
 763		rc |= (flags & EFLG_SF);
 764		break;
 765	case 5: /* p/pe */
 766		rc |= (flags & EFLG_PF);
 767		break;
 768	case 7: /* le/ng */
 769		rc |= (flags & EFLG_ZF);
 770		/* fall through */
 771	case 6: /* l/nge */
 772		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
 773		break;
 774	}
 775
 776	/* Odd condition identifiers (lsb == 1) have inverted sense. */
 777	return (!!rc ^ (condition & 1));
 
 
 
 
 
 
 
 778}
 779
 780static void fetch_register_operand(struct operand *op)
 781{
 782	switch (op->bytes) {
 783	case 1:
 784		op->val = *(u8 *)op->addr.reg;
 785		break;
 786	case 2:
 787		op->val = *(u16 *)op->addr.reg;
 788		break;
 789	case 4:
 790		op->val = *(u32 *)op->addr.reg;
 791		break;
 792	case 8:
 793		op->val = *(u64 *)op->addr.reg;
 794		break;
 795	}
 796}
 797
 798static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 799{
 800	ctxt->ops->get_fpu(ctxt);
 801	switch (reg) {
 802	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
 803	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
 804	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
 805	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
 806	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
 807	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
 808	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
 809	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
 810#ifdef CONFIG_X86_64
 811	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
 812	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
 813	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
 814	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
 815	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
 816	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
 817	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
 818	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
 819#endif
 820	default: BUG();
 821	}
 822	ctxt->ops->put_fpu(ctxt);
 823}
 824
 825static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
 826			  int reg)
 827{
 828	ctxt->ops->get_fpu(ctxt);
 829	switch (reg) {
 830	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
 831	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
 832	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
 833	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
 834	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
 835	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
 836	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
 837	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
 838#ifdef CONFIG_X86_64
 839	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
 840	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
 841	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
 842	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
 843	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
 844	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
 845	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
 846	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
 847#endif
 848	default: BUG();
 849	}
 850	ctxt->ops->put_fpu(ctxt);
 
 
 
 
 
 851}
 852
 853static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
 854				    struct operand *op,
 855				    int inhibit_bytereg)
 856{
 857	unsigned reg = ctxt->modrm_reg;
 858	int highbyte_regs = ctxt->rex_prefix == 0;
 859
 860	if (!(ctxt->d & ModRM))
 
 
 861		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
 862
 863	if (ctxt->d & Sse) {
 864		op->type = OP_XMM;
 865		op->bytes = 16;
 866		op->addr.xmm = reg;
 867		read_sse_reg(ctxt, &op->vec_val, reg);
 
 
 
 
 
 
 
 868		return;
 869	}
 870
 871	op->type = OP_REG;
 872	if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
 873		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
 874		op->bytes = 1;
 875	} else {
 876		op->addr.reg = decode_register(reg, ctxt->regs, 0);
 877		op->bytes = ctxt->op_bytes;
 878	}
 879	fetch_register_operand(op);
 880	op->orig_val = op->val;
 881}
 882
 
 
 
 
 
 
 883static int decode_modrm(struct x86_emulate_ctxt *ctxt,
 884			struct operand *op)
 885{
 886	u8 sib;
 887	int index_reg = 0, base_reg = 0, scale;
 888	int rc = X86EMUL_CONTINUE;
 889	ulong modrm_ea = 0;
 890
 891	if (ctxt->rex_prefix) {
 892		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
 893		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
 894		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
 895	}
 896
 897	ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
 898	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
 899	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
 900	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
 901	ctxt->modrm_seg = VCPU_SREG_DS;
 902
 903	if (ctxt->modrm_mod == 3) {
 904		op->type = OP_REG;
 905		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
 906		op->addr.reg = decode_register(ctxt->modrm_rm,
 907					       ctxt->regs, ctxt->d & ByteOp);
 908		if (ctxt->d & Sse) {
 909			op->type = OP_XMM;
 910			op->bytes = 16;
 911			op->addr.xmm = ctxt->modrm_rm;
 912			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
 
 
 
 
 
 
 913			return rc;
 914		}
 915		fetch_register_operand(op);
 916		return rc;
 917	}
 918
 919	op->type = OP_MEM;
 920
 921	if (ctxt->ad_bytes == 2) {
 922		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
 923		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
 924		unsigned si = ctxt->regs[VCPU_REGS_RSI];
 925		unsigned di = ctxt->regs[VCPU_REGS_RDI];
 926
 927		/* 16-bit ModR/M decode. */
 928		switch (ctxt->modrm_mod) {
 929		case 0:
 930			if (ctxt->modrm_rm == 6)
 931				modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
 932			break;
 933		case 1:
 934			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
 935			break;
 936		case 2:
 937			modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
 938			break;
 939		}
 940		switch (ctxt->modrm_rm) {
 941		case 0:
 942			modrm_ea += bx + si;
 943			break;
 944		case 1:
 945			modrm_ea += bx + di;
 946			break;
 947		case 2:
 948			modrm_ea += bp + si;
 949			break;
 950		case 3:
 951			modrm_ea += bp + di;
 952			break;
 953		case 4:
 954			modrm_ea += si;
 955			break;
 956		case 5:
 957			modrm_ea += di;
 958			break;
 959		case 6:
 960			if (ctxt->modrm_mod != 0)
 961				modrm_ea += bp;
 962			break;
 963		case 7:
 964			modrm_ea += bx;
 965			break;
 966		}
 967		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
 968		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
 969			ctxt->modrm_seg = VCPU_SREG_SS;
 970		modrm_ea = (u16)modrm_ea;
 971	} else {
 972		/* 32/64-bit ModR/M decode. */
 973		if ((ctxt->modrm_rm & 7) == 4) {
 974			sib = insn_fetch(u8, 1, ctxt->_eip);
 975			index_reg |= (sib >> 3) & 7;
 976			base_reg |= sib & 7;
 977			scale = sib >> 6;
 978
 979			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
 980				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
 981			else
 982				modrm_ea += ctxt->regs[base_reg];
 
 
 
 
 
 
 983			if (index_reg != 4)
 984				modrm_ea += ctxt->regs[index_reg] << scale;
 985		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
 
 986			if (ctxt->mode == X86EMUL_MODE_PROT64)
 987				ctxt->rip_relative = 1;
 988		} else
 989			modrm_ea += ctxt->regs[ctxt->modrm_rm];
 
 
 
 990		switch (ctxt->modrm_mod) {
 991		case 0:
 992			if (ctxt->modrm_rm == 5)
 993				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
 994			break;
 995		case 1:
 996			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
 997			break;
 998		case 2:
 999			modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
1000			break;
1001		}
1002	}
1003	op->addr.mem.ea = modrm_ea;
 
 
 
1004done:
1005	return rc;
1006}
1007
1008static int decode_abs(struct x86_emulate_ctxt *ctxt,
1009		      struct operand *op)
1010{
1011	int rc = X86EMUL_CONTINUE;
1012
1013	op->type = OP_MEM;
1014	switch (ctxt->ad_bytes) {
1015	case 2:
1016		op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
1017		break;
1018	case 4:
1019		op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
1020		break;
1021	case 8:
1022		op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
1023		break;
1024	}
1025done:
1026	return rc;
1027}
1028
1029static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1030{
1031	long sv = 0, mask;
1032
1033	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1034		mask = ~(ctxt->dst.bytes * 8 - 1);
1035
1036		if (ctxt->src.bytes == 2)
1037			sv = (s16)ctxt->src.val & (s16)mask;
1038		else if (ctxt->src.bytes == 4)
1039			sv = (s32)ctxt->src.val & (s32)mask;
 
 
1040
1041		ctxt->dst.addr.mem.ea += (sv >> 3);
 
1042	}
1043
1044	/* only subword offset */
1045	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1046}
1047
1048static int read_emulated(struct x86_emulate_ctxt *ctxt,
1049			 unsigned long addr, void *dest, unsigned size)
1050{
1051	int rc;
1052	struct read_cache *mc = &ctxt->mem_read;
1053
1054	while (size) {
1055		int n = min(size, 8u);
1056		size -= n;
1057		if (mc->pos < mc->end)
1058			goto read_cached;
1059
1060		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1061					      &ctxt->exception);
1062		if (rc != X86EMUL_CONTINUE)
1063			return rc;
1064		mc->end += n;
1065
1066	read_cached:
1067		memcpy(dest, mc->data + mc->pos, n);
1068		mc->pos += n;
1069		dest += n;
1070		addr += n;
1071	}
 
 
 
 
1072	return X86EMUL_CONTINUE;
1073}
1074
1075static int segmented_read(struct x86_emulate_ctxt *ctxt,
1076			  struct segmented_address addr,
1077			  void *data,
1078			  unsigned size)
1079{
1080	int rc;
1081	ulong linear;
1082
1083	rc = linearize(ctxt, addr, size, false, &linear);
1084	if (rc != X86EMUL_CONTINUE)
1085		return rc;
1086	return read_emulated(ctxt, linear, data, size);
1087}
1088
1089static int segmented_write(struct x86_emulate_ctxt *ctxt,
1090			   struct segmented_address addr,
1091			   const void *data,
1092			   unsigned size)
1093{
1094	int rc;
1095	ulong linear;
1096
1097	rc = linearize(ctxt, addr, size, true, &linear);
1098	if (rc != X86EMUL_CONTINUE)
1099		return rc;
1100	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1101					 &ctxt->exception);
1102}
1103
1104static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1105			     struct segmented_address addr,
1106			     const void *orig_data, const void *data,
1107			     unsigned size)
1108{
1109	int rc;
1110	ulong linear;
1111
1112	rc = linearize(ctxt, addr, size, true, &linear);
1113	if (rc != X86EMUL_CONTINUE)
1114		return rc;
1115	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1116					   size, &ctxt->exception);
1117}
1118
1119static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1120			   unsigned int size, unsigned short port,
1121			   void *dest)
1122{
1123	struct read_cache *rc = &ctxt->io_read;
1124
1125	if (rc->pos == rc->end) { /* refill pio read ahead */
1126		unsigned int in_page, n;
1127		unsigned int count = ctxt->rep_prefix ?
1128			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1129		in_page = (ctxt->eflags & EFLG_DF) ?
1130			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1131			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1132		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1133			count);
1134		if (n == 0)
1135			n = 1;
1136		rc->pos = rc->end = 0;
1137		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1138			return 0;
1139		rc->end = n * size;
1140	}
1141
1142	memcpy(dest, rc->data + rc->pos, size);
1143	rc->pos += size;
 
 
 
 
 
 
 
 
1144	return 1;
1145}
1146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1148				     u16 selector, struct desc_ptr *dt)
1149{
1150	struct x86_emulate_ops *ops = ctxt->ops;
 
1151
1152	if (selector & 1 << 2) {
1153		struct desc_struct desc;
1154		u16 sel;
1155
1156		memset (dt, 0, sizeof *dt);
1157		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
 
1158			return;
1159
1160		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1161		dt->address = get_desc_base(&desc);
1162	} else
1163		ops->get_gdt(ctxt, dt);
1164}
1165
1166/* allowed just for 8 bytes segments */
1167static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1168				   u16 selector, struct desc_struct *desc)
1169{
1170	struct desc_ptr dt;
1171	u16 index = selector >> 3;
1172	ulong addr;
1173
1174	get_descriptor_table_ptr(ctxt, selector, &dt);
1175
1176	if (dt.size < index * 8 + 7)
1177		return emulate_gp(ctxt, selector & 0xfffc);
1178
1179	addr = dt.address + index * 8;
1180	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1181				   &ctxt->exception);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182}
1183
1184/* allowed just for 8 bytes segments */
1185static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1186				    u16 selector, struct desc_struct *desc)
1187{
1188	struct desc_ptr dt;
1189	u16 index = selector >> 3;
1190	ulong addr;
1191
1192	get_descriptor_table_ptr(ctxt, selector, &dt);
1193
1194	if (dt.size < index * 8 + 7)
1195		return emulate_gp(ctxt, selector & 0xfffc);
1196
1197	addr = dt.address + index * 8;
1198	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1199				    &ctxt->exception);
1200}
1201
1202/* Does not support long mode */
1203static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1204				   u16 selector, int seg)
 
1205{
1206	struct desc_struct seg_desc;
1207	u8 dpl, rpl, cpl;
1208	unsigned err_vec = GP_VECTOR;
1209	u32 err_code = 0;
1210	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
 
1211	int ret;
 
 
1212
1213	memset(&seg_desc, 0, sizeof seg_desc);
1214
1215	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1216	    || ctxt->mode == X86EMUL_MODE_REAL) {
1217		/* set real mode segment descriptor */
 
 
 
 
 
1218		set_desc_base(&seg_desc, selector << 4);
1219		set_desc_limit(&seg_desc, 0xffff);
1220		seg_desc.type = 3;
1221		seg_desc.p = 1;
1222		seg_desc.s = 1;
 
1223		goto load;
1224	}
1225
1226	/* NULL selector is not valid for TR, CS and SS */
1227	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1228	    && null_selector)
1229		goto exception;
1230
1231	/* TR should be in GDT only */
1232	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1233		goto exception;
1234
1235	if (null_selector) /* for NULL selector skip all following checks */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236		goto load;
 
1237
1238	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1239	if (ret != X86EMUL_CONTINUE)
1240		return ret;
1241
1242	err_code = selector & 0xfffc;
1243	err_vec = GP_VECTOR;
1244
1245	/* can't load system descriptor into segment selecor */
1246	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1247		goto exception;
1248
1249	if (!seg_desc.p) {
1250		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
 
 
1251		goto exception;
1252	}
1253
1254	rpl = selector & 3;
1255	dpl = seg_desc.dpl;
1256	cpl = ctxt->ops->cpl(ctxt);
1257
1258	switch (seg) {
1259	case VCPU_SREG_SS:
1260		/*
1261		 * segment is not a writable data segment or segment
1262		 * selector's RPL != CPL or segment selector's RPL != CPL
1263		 */
1264		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1265			goto exception;
1266		break;
1267	case VCPU_SREG_CS:
 
 
 
 
 
 
 
 
1268		if (!(seg_desc.type & 8))
1269			goto exception;
1270
1271		if (seg_desc.type & 4) {
1272			/* conforming */
1273			if (dpl > cpl)
1274				goto exception;
1275		} else {
1276			/* nonconforming */
1277			if (rpl > cpl || dpl != cpl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278				goto exception;
1279		}
 
1280		/* CS(RPL) <- CPL */
1281		selector = (selector & 0xfffc) | cpl;
1282		break;
1283	case VCPU_SREG_TR:
1284		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1285			goto exception;
1286		break;
1287	case VCPU_SREG_LDTR:
1288		if (seg_desc.s || seg_desc.type != 2)
1289			goto exception;
1290		break;
1291	default: /*  DS, ES, FS, or GS */
1292		/*
1293		 * segment is not a data or readable code segment or
1294		 * ((segment is a data or nonconforming code segment)
1295		 * and (both RPL and CPL > DPL))
1296		 */
1297		if ((seg_desc.type & 0xa) == 0x8 ||
1298		    (((seg_desc.type & 0xc) != 0xc) &&
1299		     (rpl > dpl && cpl > dpl)))
1300			goto exception;
1301		break;
1302	}
1303
 
 
 
 
 
1304	if (seg_desc.s) {
1305		/* mark segment as accessed */
1306		seg_desc.type |= 1;
1307		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1308		if (ret != X86EMUL_CONTINUE)
1309			return ret;
1310	}
1311load:
1312	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
 
 
1313	return X86EMUL_CONTINUE;
1314exception:
1315	emulate_exception(ctxt, err_vec, err_code, true);
1316	return X86EMUL_PROPAGATE_FAULT;
1317}
1318
1319static void write_register_operand(struct operand *op)
 
1320{
1321	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1322	switch (op->bytes) {
1323	case 1:
1324		*(u8 *)op->addr.reg = (u8)op->val;
1325		break;
1326	case 2:
1327		*(u16 *)op->addr.reg = (u16)op->val;
1328		break;
1329	case 4:
1330		*op->addr.reg = (u32)op->val;
1331		break;	/* 64b: zero-extend */
1332	case 8:
1333		*op->addr.reg = op->val;
1334		break;
1335	}
 
 
 
1336}
1337
1338static int writeback(struct x86_emulate_ctxt *ctxt)
1339{
1340	int rc;
 
1341
1342	switch (ctxt->dst.type) {
 
 
1343	case OP_REG:
1344		write_register_operand(&ctxt->dst);
1345		break;
1346	case OP_MEM:
1347		if (ctxt->lock_prefix)
1348			rc = segmented_cmpxchg(ctxt,
1349					       ctxt->dst.addr.mem,
1350					       &ctxt->dst.orig_val,
1351					       &ctxt->dst.val,
1352					       ctxt->dst.bytes);
1353		else
1354			rc = segmented_write(ctxt,
1355					     ctxt->dst.addr.mem,
1356					     &ctxt->dst.val,
1357					     ctxt->dst.bytes);
1358		if (rc != X86EMUL_CONTINUE)
1359			return rc;
1360		break;
 
 
1361	case OP_XMM:
1362		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
 
 
 
1363		break;
1364	case OP_NONE:
1365		/* no writeback */
1366		break;
1367	default:
1368		break;
1369	}
1370	return X86EMUL_CONTINUE;
1371}
1372
1373static int em_push(struct x86_emulate_ctxt *ctxt)
1374{
1375	struct segmented_address addr;
1376
1377	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1378	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1379	addr.seg = VCPU_SREG_SS;
1380
 
 
 
 
 
1381	/* Disable writeback. */
1382	ctxt->dst.type = OP_NONE;
1383	return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1384}
1385
1386static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1387		       void *dest, int len)
1388{
1389	int rc;
1390	struct segmented_address addr;
1391
1392	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1393	addr.seg = VCPU_SREG_SS;
1394	rc = segmented_read(ctxt, addr, dest, len);
1395	if (rc != X86EMUL_CONTINUE)
1396		return rc;
1397
1398	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1399	return rc;
1400}
1401
1402static int em_pop(struct x86_emulate_ctxt *ctxt)
1403{
1404	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1405}
1406
1407static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1408			void *dest, int len)
1409{
1410	int rc;
1411	unsigned long val, change_mask;
1412	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
 
1413	int cpl = ctxt->ops->cpl(ctxt);
1414
1415	rc = emulate_pop(ctxt, &val, len);
1416	if (rc != X86EMUL_CONTINUE)
1417		return rc;
1418
1419	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1420		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
 
 
1421
1422	switch(ctxt->mode) {
1423	case X86EMUL_MODE_PROT64:
1424	case X86EMUL_MODE_PROT32:
1425	case X86EMUL_MODE_PROT16:
1426		if (cpl == 0)
1427			change_mask |= EFLG_IOPL;
1428		if (cpl <= iopl)
1429			change_mask |= EFLG_IF;
1430		break;
1431	case X86EMUL_MODE_VM86:
1432		if (iopl < 3)
1433			return emulate_gp(ctxt, 0);
1434		change_mask |= EFLG_IF;
1435		break;
1436	default: /* real mode */
1437		change_mask |= (EFLG_IOPL | EFLG_IF);
1438		break;
1439	}
1440
1441	*(unsigned long *)dest =
1442		(ctxt->eflags & ~change_mask) | (val & change_mask);
1443
1444	return rc;
1445}
1446
1447static int em_popf(struct x86_emulate_ctxt *ctxt)
1448{
1449	ctxt->dst.type = OP_REG;
1450	ctxt->dst.addr.reg = &ctxt->eflags;
1451	ctxt->dst.bytes = ctxt->op_bytes;
1452	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1453}
1454
1455static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456{
 
 
1457	ctxt->src.val = get_segment_selector(ctxt, seg);
 
 
 
 
1458
1459	return em_push(ctxt);
1460}
1461
1462static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1463{
1464	unsigned long selector;
 
1465	int rc;
1466
1467	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1468	if (rc != X86EMUL_CONTINUE)
1469		return rc;
1470
 
 
 
 
 
1471	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1472	return rc;
1473}
1474
1475static int em_pusha(struct x86_emulate_ctxt *ctxt)
1476{
1477	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1478	int rc = X86EMUL_CONTINUE;
1479	int reg = VCPU_REGS_RAX;
1480
1481	while (reg <= VCPU_REGS_RDI) {
1482		(reg == VCPU_REGS_RSP) ?
1483		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1484
1485		rc = em_push(ctxt);
1486		if (rc != X86EMUL_CONTINUE)
1487			return rc;
1488
1489		++reg;
1490	}
1491
1492	return rc;
1493}
1494
1495static int em_pushf(struct x86_emulate_ctxt *ctxt)
1496{
1497	ctxt->src.val =  (unsigned long)ctxt->eflags;
1498	return em_push(ctxt);
1499}
1500
1501static int em_popa(struct x86_emulate_ctxt *ctxt)
1502{
1503	int rc = X86EMUL_CONTINUE;
1504	int reg = VCPU_REGS_RDI;
 
1505
1506	while (reg >= VCPU_REGS_RAX) {
1507		if (reg == VCPU_REGS_RSP) {
1508			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1509							ctxt->op_bytes);
1510			--reg;
1511		}
1512
1513		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1514		if (rc != X86EMUL_CONTINUE)
1515			break;
 
1516		--reg;
1517	}
1518	return rc;
1519}
1520
1521int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1522{
1523	struct x86_emulate_ops *ops = ctxt->ops;
1524	int rc;
1525	struct desc_ptr dt;
1526	gva_t cs_addr;
1527	gva_t eip_addr;
1528	u16 cs, eip;
1529
1530	/* TODO: Add limit checks */
1531	ctxt->src.val = ctxt->eflags;
1532	rc = em_push(ctxt);
1533	if (rc != X86EMUL_CONTINUE)
1534		return rc;
1535
1536	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1537
1538	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1539	rc = em_push(ctxt);
1540	if (rc != X86EMUL_CONTINUE)
1541		return rc;
1542
1543	ctxt->src.val = ctxt->_eip;
1544	rc = em_push(ctxt);
1545	if (rc != X86EMUL_CONTINUE)
1546		return rc;
1547
1548	ops->get_idt(ctxt, &dt);
1549
1550	eip_addr = dt.address + (irq << 2);
1551	cs_addr = dt.address + (irq << 2) + 2;
1552
1553	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1554	if (rc != X86EMUL_CONTINUE)
1555		return rc;
1556
1557	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1558	if (rc != X86EMUL_CONTINUE)
1559		return rc;
1560
1561	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1562	if (rc != X86EMUL_CONTINUE)
1563		return rc;
1564
1565	ctxt->_eip = eip;
1566
1567	return rc;
1568}
1569
 
 
 
 
 
 
 
 
 
 
 
1570static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1571{
1572	switch(ctxt->mode) {
1573	case X86EMUL_MODE_REAL:
1574		return emulate_int_real(ctxt, irq);
1575	case X86EMUL_MODE_VM86:
1576	case X86EMUL_MODE_PROT16:
1577	case X86EMUL_MODE_PROT32:
1578	case X86EMUL_MODE_PROT64:
1579	default:
1580		/* Protected mode interrupts unimplemented yet */
1581		return X86EMUL_UNHANDLEABLE;
1582	}
1583}
1584
1585static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1586{
1587	int rc = X86EMUL_CONTINUE;
1588	unsigned long temp_eip = 0;
1589	unsigned long temp_eflags = 0;
1590	unsigned long cs = 0;
1591	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1592			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1593			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1594	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
 
 
 
 
1595
1596	/* TODO: Add stack limit check */
1597
1598	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1599
1600	if (rc != X86EMUL_CONTINUE)
1601		return rc;
1602
1603	if (temp_eip & ~0xffff)
1604		return emulate_gp(ctxt, 0);
1605
1606	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1607
1608	if (rc != X86EMUL_CONTINUE)
1609		return rc;
1610
1611	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1612
1613	if (rc != X86EMUL_CONTINUE)
1614		return rc;
1615
1616	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1617
1618	if (rc != X86EMUL_CONTINUE)
1619		return rc;
1620
1621	ctxt->_eip = temp_eip;
1622
1623
1624	if (ctxt->op_bytes == 4)
1625		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1626	else if (ctxt->op_bytes == 2) {
1627		ctxt->eflags &= ~0xffff;
1628		ctxt->eflags |= temp_eflags;
1629	}
1630
1631	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1632	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
 
1633
1634	return rc;
1635}
1636
1637static int em_iret(struct x86_emulate_ctxt *ctxt)
1638{
1639	switch(ctxt->mode) {
1640	case X86EMUL_MODE_REAL:
1641		return emulate_iret_real(ctxt);
1642	case X86EMUL_MODE_VM86:
1643	case X86EMUL_MODE_PROT16:
1644	case X86EMUL_MODE_PROT32:
1645	case X86EMUL_MODE_PROT64:
1646	default:
1647		/* iret from protected mode unimplemented yet */
1648		return X86EMUL_UNHANDLEABLE;
1649	}
1650}
1651
1652static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1653{
1654	int rc;
1655	unsigned short sel;
 
 
1656
1657	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1658
1659	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
 
 
1660	if (rc != X86EMUL_CONTINUE)
1661		return rc;
1662
1663	ctxt->_eip = 0;
1664	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1665	return X86EMUL_CONTINUE;
1666}
1667
1668static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1669{
1670	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1671}
1672
1673static int em_grp2(struct x86_emulate_ctxt *ctxt)
1674{
1675	switch (ctxt->modrm_reg) {
1676	case 0:	/* rol */
1677		emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags);
1678		break;
1679	case 1:	/* ror */
1680		emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags);
1681		break;
1682	case 2:	/* rcl */
1683		emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags);
1684		break;
1685	case 3:	/* rcr */
1686		emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags);
1687		break;
1688	case 4:	/* sal/shl */
1689	case 6:	/* sal/shl */
1690		emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags);
1691		break;
1692	case 5:	/* shr */
1693		emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags);
1694		break;
1695	case 7:	/* sar */
1696		emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags);
1697		break;
1698	}
1699	return X86EMUL_CONTINUE;
1700}
1701
1702static int em_grp3(struct x86_emulate_ctxt *ctxt)
1703{
1704	unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
1705	unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
1706	u8 de = 0;
1707
1708	switch (ctxt->modrm_reg) {
1709	case 0 ... 1:	/* test */
1710		emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
1711		break;
1712	case 2:	/* not */
1713		ctxt->dst.val = ~ctxt->dst.val;
1714		break;
1715	case 3:	/* neg */
1716		emulate_1op("neg", ctxt->dst, ctxt->eflags);
1717		break;
1718	case 4: /* mul */
1719		emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
1720		break;
1721	case 5: /* imul */
1722		emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
1723		break;
1724	case 6: /* div */
1725		emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
1726				       ctxt->eflags, de);
1727		break;
1728	case 7: /* idiv */
1729		emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
1730				       ctxt->eflags, de);
1731		break;
1732	default:
1733		return X86EMUL_UNHANDLEABLE;
1734	}
1735	if (de)
1736		return emulate_de(ctxt);
1737	return X86EMUL_CONTINUE;
1738}
1739
1740static int em_grp45(struct x86_emulate_ctxt *ctxt)
1741{
1742	int rc = X86EMUL_CONTINUE;
 
1743
1744	switch (ctxt->modrm_reg) {
1745	case 0:	/* inc */
1746		emulate_1op("inc", ctxt->dst, ctxt->eflags);
1747		break;
1748	case 1:	/* dec */
1749		emulate_1op("dec", ctxt->dst, ctxt->eflags);
1750		break;
1751	case 2: /* call near abs */ {
1752		long int old_eip;
1753		old_eip = ctxt->_eip;
1754		ctxt->_eip = ctxt->src.val;
1755		ctxt->src.val = old_eip;
1756		rc = em_push(ctxt);
1757		break;
1758	}
1759	case 4: /* jmp abs */
1760		ctxt->_eip = ctxt->src.val;
1761		break;
1762	case 5: /* jmp far */
1763		rc = em_jmp_far(ctxt);
1764		break;
1765	case 6:	/* push */
1766		rc = em_push(ctxt);
1767		break;
1768	}
1769	return rc;
1770}
1771
1772static int em_grp9(struct x86_emulate_ctxt *ctxt)
1773{
1774	u64 old = ctxt->dst.orig_val64;
1775
1776	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1777	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1778		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1779		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1780		ctxt->eflags &= ~EFLG_ZF;
 
 
 
1781	} else {
1782		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1783			(u32) ctxt->regs[VCPU_REGS_RBX];
1784
1785		ctxt->eflags |= EFLG_ZF;
1786	}
1787	return X86EMUL_CONTINUE;
1788}
1789
1790static int em_ret(struct x86_emulate_ctxt *ctxt)
1791{
1792	ctxt->dst.type = OP_REG;
1793	ctxt->dst.addr.reg = &ctxt->_eip;
1794	ctxt->dst.bytes = ctxt->op_bytes;
1795	return em_pop(ctxt);
 
 
 
 
1796}
1797
1798static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1799{
1800	int rc;
1801	unsigned long cs;
 
 
 
1802
1803	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1804	if (rc != X86EMUL_CONTINUE)
1805		return rc;
1806	if (ctxt->op_bytes == 4)
1807		ctxt->_eip = (u32)ctxt->_eip;
1808	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1809	if (rc != X86EMUL_CONTINUE)
1810		return rc;
1811	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
 
 
 
 
 
 
 
 
 
1812	return rc;
1813}
1814
1815static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1816{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1817	unsigned short sel;
1818	int rc;
1819
1820	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1821
1822	rc = load_segment_descriptor(ctxt, sel, seg);
1823	if (rc != X86EMUL_CONTINUE)
1824		return rc;
1825
1826	ctxt->dst.val = ctxt->src.val;
1827	return rc;
1828}
1829
1830static void
1831setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1832			struct desc_struct *cs, struct desc_struct *ss)
1833{
1834	u16 selector;
 
1835
1836	memset(cs, 0, sizeof(struct desc_struct));
1837	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1838	memset(ss, 0, sizeof(struct desc_struct));
1839
 
 
 
 
 
 
1840	cs->l = 0;		/* will be adjusted later */
1841	set_desc_base(cs, 0);	/* flat segment */
1842	cs->g = 1;		/* 4kb granularity */
1843	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1844	cs->type = 0x0b;	/* Read, Execute, Accessed */
1845	cs->s = 1;
1846	cs->dpl = 0;		/* will be adjusted later */
1847	cs->p = 1;
1848	cs->d = 1;
 
1849
1850	set_desc_base(ss, 0);	/* flat segment */
1851	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1852	ss->g = 1;		/* 4kb granularity */
1853	ss->s = 1;
1854	ss->type = 0x03;	/* Read/Write, Accessed */
1855	ss->d = 1;		/* 32bit stack segment */
1856	ss->dpl = 0;
1857	ss->p = 1;
 
 
1858}
1859
1860static int em_syscall(struct x86_emulate_ctxt *ctxt)
1861{
1862	struct x86_emulate_ops *ops = ctxt->ops;
1863	struct desc_struct cs, ss;
1864	u64 msr_data;
1865	u16 cs_sel, ss_sel;
1866	u64 efer = 0;
1867
1868	/* syscall is not available in real mode */
1869	if (ctxt->mode == X86EMUL_MODE_REAL ||
1870	    ctxt->mode == X86EMUL_MODE_VM86)
1871		return emulate_ud(ctxt);
1872
 
 
 
 
 
 
 
 
 
 
 
1873	ops->get_msr(ctxt, MSR_EFER, &efer);
1874	setup_syscalls_segments(ctxt, &cs, &ss);
 
1875
 
1876	ops->get_msr(ctxt, MSR_STAR, &msr_data);
1877	msr_data >>= 32;
1878	cs_sel = (u16)(msr_data & 0xfffc);
1879	ss_sel = (u16)(msr_data + 8);
1880
1881	if (efer & EFER_LMA) {
1882		cs.d = 0;
1883		cs.l = 1;
1884	}
1885	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1886	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1887
1888	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1889	if (efer & EFER_LMA) {
1890#ifdef CONFIG_X86_64
1891		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1892
1893		ops->get_msr(ctxt,
1894			     ctxt->mode == X86EMUL_MODE_PROT64 ?
1895			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1896		ctxt->_eip = msr_data;
1897
1898		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1899		ctxt->eflags &= ~(msr_data | EFLG_RF);
 
1900#endif
1901	} else {
1902		/* legacy mode */
1903		ops->get_msr(ctxt, MSR_STAR, &msr_data);
1904		ctxt->_eip = (u32)msr_data;
1905
1906		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1907	}
1908
 
1909	return X86EMUL_CONTINUE;
1910}
1911
1912static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1913{
1914	struct x86_emulate_ops *ops = ctxt->ops;
1915	struct desc_struct cs, ss;
1916	u64 msr_data;
1917	u16 cs_sel, ss_sel;
1918	u64 efer = 0;
1919
1920	ops->get_msr(ctxt, MSR_EFER, &efer);
1921	/* inject #GP if in real mode */
1922	if (ctxt->mode == X86EMUL_MODE_REAL)
1923		return emulate_gp(ctxt, 0);
1924
1925	/* XXX sysenter/sysexit have not been tested in 64bit mode.
1926	* Therefore, we inject an #UD.
1927	*/
1928	if (ctxt->mode == X86EMUL_MODE_PROT64)
 
 
1929		return emulate_ud(ctxt);
1930
1931	setup_syscalls_segments(ctxt, &cs, &ss);
 
 
1932
1933	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1934	switch (ctxt->mode) {
1935	case X86EMUL_MODE_PROT32:
1936		if ((msr_data & 0xfffc) == 0x0)
1937			return emulate_gp(ctxt, 0);
1938		break;
1939	case X86EMUL_MODE_PROT64:
1940		if (msr_data == 0x0)
1941			return emulate_gp(ctxt, 0);
1942		break;
1943	}
1944
1945	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1946	cs_sel = (u16)msr_data;
1947	cs_sel &= ~SELECTOR_RPL_MASK;
1948	ss_sel = cs_sel + 8;
1949	ss_sel &= ~SELECTOR_RPL_MASK;
1950	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1951		cs.d = 0;
1952		cs.l = 1;
1953	}
1954
1955	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1956	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1957
1958	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1959	ctxt->_eip = msr_data;
1960
1961	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1962	ctxt->regs[VCPU_REGS_RSP] = msr_data;
 
 
 
1963
1964	return X86EMUL_CONTINUE;
1965}
1966
1967static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1968{
1969	struct x86_emulate_ops *ops = ctxt->ops;
1970	struct desc_struct cs, ss;
1971	u64 msr_data;
1972	int usermode;
1973	u16 cs_sel = 0, ss_sel = 0;
1974
1975	/* inject #GP if in real mode or Virtual 8086 mode */
1976	if (ctxt->mode == X86EMUL_MODE_REAL ||
1977	    ctxt->mode == X86EMUL_MODE_VM86)
1978		return emulate_gp(ctxt, 0);
1979
1980	setup_syscalls_segments(ctxt, &cs, &ss);
1981
1982	if ((ctxt->rex_prefix & 0x8) != 0x0)
1983		usermode = X86EMUL_MODE_PROT64;
1984	else
1985		usermode = X86EMUL_MODE_PROT32;
1986
 
 
 
1987	cs.dpl = 3;
1988	ss.dpl = 3;
1989	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1990	switch (usermode) {
1991	case X86EMUL_MODE_PROT32:
1992		cs_sel = (u16)(msr_data + 16);
1993		if ((msr_data & 0xfffc) == 0x0)
1994			return emulate_gp(ctxt, 0);
1995		ss_sel = (u16)(msr_data + 24);
 
 
1996		break;
1997	case X86EMUL_MODE_PROT64:
1998		cs_sel = (u16)(msr_data + 32);
1999		if (msr_data == 0x0)
2000			return emulate_gp(ctxt, 0);
2001		ss_sel = cs_sel + 8;
2002		cs.d = 0;
2003		cs.l = 1;
 
 
 
2004		break;
2005	}
2006	cs_sel |= SELECTOR_RPL_MASK;
2007	ss_sel |= SELECTOR_RPL_MASK;
2008
2009	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2010	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2011
2012	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2013	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
 
2014
2015	return X86EMUL_CONTINUE;
2016}
2017
2018static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2019{
2020	int iopl;
2021	if (ctxt->mode == X86EMUL_MODE_REAL)
2022		return false;
2023	if (ctxt->mode == X86EMUL_MODE_VM86)
2024		return true;
2025	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2026	return ctxt->ops->cpl(ctxt) > iopl;
2027}
2028
 
 
 
2029static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2030					    u16 port, u16 len)
2031{
2032	struct x86_emulate_ops *ops = ctxt->ops;
2033	struct desc_struct tr_seg;
2034	u32 base3;
2035	int r;
2036	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2037	unsigned mask = (1 << len) - 1;
2038	unsigned long base;
2039
 
 
 
 
 
 
 
 
2040	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2041	if (!tr_seg.p)
2042		return false;
2043	if (desc_limit_scaled(&tr_seg) < 103)
2044		return false;
2045	base = get_desc_base(&tr_seg);
2046#ifdef CONFIG_X86_64
2047	base |= ((u64)base3) << 32;
2048#endif
2049	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2050	if (r != X86EMUL_CONTINUE)
2051		return false;
2052	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2053		return false;
2054	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2055	if (r != X86EMUL_CONTINUE)
2056		return false;
2057	if ((perm >> bit_idx) & mask)
2058		return false;
2059	return true;
2060}
2061
2062static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2063				 u16 port, u16 len)
2064{
2065	if (ctxt->perm_ok)
2066		return true;
2067
2068	if (emulator_bad_iopl(ctxt))
2069		if (!emulator_io_port_access_allowed(ctxt, port, len))
2070			return false;
2071
2072	ctxt->perm_ok = true;
2073
2074	return true;
2075}
2076
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2078				struct tss_segment_16 *tss)
2079{
2080	tss->ip = ctxt->_eip;
2081	tss->flag = ctxt->eflags;
2082	tss->ax = ctxt->regs[VCPU_REGS_RAX];
2083	tss->cx = ctxt->regs[VCPU_REGS_RCX];
2084	tss->dx = ctxt->regs[VCPU_REGS_RDX];
2085	tss->bx = ctxt->regs[VCPU_REGS_RBX];
2086	tss->sp = ctxt->regs[VCPU_REGS_RSP];
2087	tss->bp = ctxt->regs[VCPU_REGS_RBP];
2088	tss->si = ctxt->regs[VCPU_REGS_RSI];
2089	tss->di = ctxt->regs[VCPU_REGS_RDI];
2090
2091	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2092	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2093	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2094	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2095	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2096}
2097
2098static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2099				 struct tss_segment_16 *tss)
2100{
2101	int ret;
 
2102
2103	ctxt->_eip = tss->ip;
2104	ctxt->eflags = tss->flag | 2;
2105	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2106	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2107	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2108	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2109	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2110	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2111	ctxt->regs[VCPU_REGS_RSI] = tss->si;
2112	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2113
2114	/*
2115	 * SDM says that segment selectors are loaded before segment
2116	 * descriptors
2117	 */
2118	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2119	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2120	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2121	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2122	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2123
 
 
2124	/*
2125	 * Now load segment descriptors. If fault happenes at this stage
2126	 * it is handled in a context of new task
2127	 */
2128	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
 
2129	if (ret != X86EMUL_CONTINUE)
2130		return ret;
2131	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
 
2132	if (ret != X86EMUL_CONTINUE)
2133		return ret;
2134	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
 
2135	if (ret != X86EMUL_CONTINUE)
2136		return ret;
2137	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
 
2138	if (ret != X86EMUL_CONTINUE)
2139		return ret;
2140	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
 
2141	if (ret != X86EMUL_CONTINUE)
2142		return ret;
2143
2144	return X86EMUL_CONTINUE;
2145}
2146
2147static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2148			  u16 tss_selector, u16 old_tss_sel,
2149			  ulong old_tss_base, struct desc_struct *new_desc)
2150{
2151	struct x86_emulate_ops *ops = ctxt->ops;
2152	struct tss_segment_16 tss_seg;
2153	int ret;
2154	u32 new_tss_base = get_desc_base(new_desc);
2155
2156	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2157			    &ctxt->exception);
2158	if (ret != X86EMUL_CONTINUE)
2159		/* FIXME: need to provide precise fault address */
2160		return ret;
2161
2162	save_state_to_tss16(ctxt, &tss_seg);
2163
2164	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2165			     &ctxt->exception);
2166	if (ret != X86EMUL_CONTINUE)
2167		/* FIXME: need to provide precise fault address */
2168		return ret;
2169
2170	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2171			    &ctxt->exception);
2172	if (ret != X86EMUL_CONTINUE)
2173		/* FIXME: need to provide precise fault address */
2174		return ret;
2175
2176	if (old_tss_sel != 0xffff) {
2177		tss_seg.prev_task_link = old_tss_sel;
2178
2179		ret = ops->write_std(ctxt, new_tss_base,
2180				     &tss_seg.prev_task_link,
2181				     sizeof tss_seg.prev_task_link,
2182				     &ctxt->exception);
2183		if (ret != X86EMUL_CONTINUE)
2184			/* FIXME: need to provide precise fault address */
2185			return ret;
2186	}
2187
2188	return load_state_from_tss16(ctxt, &tss_seg);
2189}
2190
2191static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2192				struct tss_segment_32 *tss)
2193{
2194	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2195	tss->eip = ctxt->_eip;
2196	tss->eflags = ctxt->eflags;
2197	tss->eax = ctxt->regs[VCPU_REGS_RAX];
2198	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2199	tss->edx = ctxt->regs[VCPU_REGS_RDX];
2200	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2201	tss->esp = ctxt->regs[VCPU_REGS_RSP];
2202	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2203	tss->esi = ctxt->regs[VCPU_REGS_RSI];
2204	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2205
2206	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2207	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2208	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2209	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2210	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2211	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2212	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2213}
2214
2215static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2216				 struct tss_segment_32 *tss)
2217{
2218	int ret;
 
2219
2220	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2221		return emulate_gp(ctxt, 0);
2222	ctxt->_eip = tss->eip;
2223	ctxt->eflags = tss->eflags | 2;
2224	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2225	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2226	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2227	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2228	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2229	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2230	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2231	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
 
 
2232
2233	/*
2234	 * SDM says that segment selectors are loaded before segment
2235	 * descriptors
 
2236	 */
2237	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2238	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2239	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2240	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2241	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2242	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2243	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2244
2245	/*
2246	 * Now load segment descriptors. If fault happenes at this stage
 
 
 
 
 
 
 
 
 
 
 
 
 
2247	 * it is handled in a context of new task
2248	 */
2249	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2250	if (ret != X86EMUL_CONTINUE)
2251		return ret;
2252	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2253	if (ret != X86EMUL_CONTINUE)
2254		return ret;
2255	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
 
2256	if (ret != X86EMUL_CONTINUE)
2257		return ret;
2258	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
 
2259	if (ret != X86EMUL_CONTINUE)
2260		return ret;
2261	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
 
2262	if (ret != X86EMUL_CONTINUE)
2263		return ret;
2264	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
 
2265	if (ret != X86EMUL_CONTINUE)
2266		return ret;
2267	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
 
2268	if (ret != X86EMUL_CONTINUE)
2269		return ret;
 
 
2270
2271	return X86EMUL_CONTINUE;
2272}
2273
2274static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2275			  u16 tss_selector, u16 old_tss_sel,
2276			  ulong old_tss_base, struct desc_struct *new_desc)
2277{
2278	struct x86_emulate_ops *ops = ctxt->ops;
2279	struct tss_segment_32 tss_seg;
2280	int ret;
2281	u32 new_tss_base = get_desc_base(new_desc);
 
 
2282
2283	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2284			    &ctxt->exception);
2285	if (ret != X86EMUL_CONTINUE)
2286		/* FIXME: need to provide precise fault address */
2287		return ret;
2288
2289	save_state_to_tss32(ctxt, &tss_seg);
2290
2291	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2292			     &ctxt->exception);
 
2293	if (ret != X86EMUL_CONTINUE)
2294		/* FIXME: need to provide precise fault address */
2295		return ret;
2296
2297	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2298			    &ctxt->exception);
2299	if (ret != X86EMUL_CONTINUE)
2300		/* FIXME: need to provide precise fault address */
2301		return ret;
2302
2303	if (old_tss_sel != 0xffff) {
2304		tss_seg.prev_task_link = old_tss_sel;
2305
2306		ret = ops->write_std(ctxt, new_tss_base,
2307				     &tss_seg.prev_task_link,
2308				     sizeof tss_seg.prev_task_link,
2309				     &ctxt->exception);
2310		if (ret != X86EMUL_CONTINUE)
2311			/* FIXME: need to provide precise fault address */
2312			return ret;
2313	}
2314
2315	return load_state_from_tss32(ctxt, &tss_seg);
2316}
2317
2318static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2319				   u16 tss_selector, int reason,
2320				   bool has_error_code, u32 error_code)
2321{
2322	struct x86_emulate_ops *ops = ctxt->ops;
2323	struct desc_struct curr_tss_desc, next_tss_desc;
2324	int ret;
2325	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2326	ulong old_tss_base =
2327		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2328	u32 desc_limit;
 
2329
2330	/* FIXME: old_tss_base == ~0 ? */
2331
2332	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2333	if (ret != X86EMUL_CONTINUE)
2334		return ret;
2335	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2336	if (ret != X86EMUL_CONTINUE)
2337		return ret;
2338
2339	/* FIXME: check that next_tss_desc is tss */
2340
2341	if (reason != TASK_SWITCH_IRET) {
2342		if ((tss_selector & 3) > next_tss_desc.dpl ||
2343		    ops->cpl(ctxt) > next_tss_desc.dpl)
2344			return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345	}
2346
2347	desc_limit = desc_limit_scaled(&next_tss_desc);
2348	if (!next_tss_desc.p ||
2349	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2350	     desc_limit < 0x2b)) {
2351		emulate_ts(ctxt, tss_selector & 0xfffc);
2352		return X86EMUL_PROPAGATE_FAULT;
2353	}
2354
2355	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2356		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2357		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2358	}
2359
2360	if (reason == TASK_SWITCH_IRET)
2361		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2362
2363	/* set back link to prev task only if NT bit is set in eflags
2364	   note that old_tss_sel is not used afetr this point */
2365	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2366		old_tss_sel = 0xffff;
2367
2368	if (next_tss_desc.type & 8)
2369		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2370				     old_tss_base, &next_tss_desc);
2371	else
2372		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2373				     old_tss_base, &next_tss_desc);
2374	if (ret != X86EMUL_CONTINUE)
2375		return ret;
2376
2377	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2378		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2379
2380	if (reason != TASK_SWITCH_IRET) {
2381		next_tss_desc.type |= (1 << 1); /* set busy flag */
2382		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2383	}
2384
2385	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2386	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2387
2388	if (has_error_code) {
2389		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2390		ctxt->lock_prefix = 0;
2391		ctxt->src.val = (unsigned long) error_code;
2392		ret = em_push(ctxt);
2393	}
2394
 
 
 
2395	return ret;
2396}
2397
2398int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2399			 u16 tss_selector, int reason,
2400			 bool has_error_code, u32 error_code)
2401{
2402	int rc;
2403
 
2404	ctxt->_eip = ctxt->eip;
2405	ctxt->dst.type = OP_NONE;
2406
2407	rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2408				     has_error_code, error_code);
2409
2410	if (rc == X86EMUL_CONTINUE)
2411		ctxt->eip = ctxt->_eip;
 
 
2412
2413	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2414}
2415
2416static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2417			    int reg, struct operand *op)
2418{
2419	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2420
2421	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2422	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2423	op->addr.mem.seg = seg;
2424}
2425
2426static int em_das(struct x86_emulate_ctxt *ctxt)
2427{
2428	u8 al, old_al;
2429	bool af, cf, old_cf;
2430
2431	cf = ctxt->eflags & X86_EFLAGS_CF;
2432	al = ctxt->dst.val;
2433
2434	old_al = al;
2435	old_cf = cf;
2436	cf = false;
2437	af = ctxt->eflags & X86_EFLAGS_AF;
2438	if ((al & 0x0f) > 9 || af) {
2439		al -= 6;
2440		cf = old_cf | (al >= 250);
2441		af = true;
2442	} else {
2443		af = false;
2444	}
2445	if (old_al > 0x99 || old_cf) {
2446		al -= 0x60;
2447		cf = true;
2448	}
2449
2450	ctxt->dst.val = al;
2451	/* Set PF, ZF, SF */
2452	ctxt->src.type = OP_IMM;
2453	ctxt->src.val = 0;
2454	ctxt->src.bytes = 1;
2455	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2456	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2457	if (cf)
2458		ctxt->eflags |= X86_EFLAGS_CF;
2459	if (af)
2460		ctxt->eflags |= X86_EFLAGS_AF;
2461	return X86EMUL_CONTINUE;
2462}
2463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2464static int em_call_far(struct x86_emulate_ctxt *ctxt)
2465{
2466	u16 sel, old_cs;
2467	ulong old_eip;
2468	int rc;
 
 
 
 
2469
2470	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2471	old_eip = ctxt->_eip;
 
2472
2473	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2474	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2475		return X86EMUL_CONTINUE;
 
 
2476
2477	ctxt->_eip = 0;
2478	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
 
2479
2480	ctxt->src.val = old_cs;
2481	rc = em_push(ctxt);
2482	if (rc != X86EMUL_CONTINUE)
2483		return rc;
2484
2485	ctxt->src.val = old_eip;
2486	return em_push(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
2487}
2488
2489static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2490{
2491	int rc;
 
2492
2493	ctxt->dst.type = OP_REG;
2494	ctxt->dst.addr.reg = &ctxt->_eip;
2495	ctxt->dst.bytes = ctxt->op_bytes;
2496	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2497	if (rc != X86EMUL_CONTINUE)
2498		return rc;
2499	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2500	return X86EMUL_CONTINUE;
2501}
2502
2503static int em_add(struct x86_emulate_ctxt *ctxt)
2504{
2505	emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
2506	return X86EMUL_CONTINUE;
2507}
2508
2509static int em_or(struct x86_emulate_ctxt *ctxt)
2510{
2511	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2512	return X86EMUL_CONTINUE;
2513}
2514
2515static int em_adc(struct x86_emulate_ctxt *ctxt)
2516{
2517	emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags);
2518	return X86EMUL_CONTINUE;
2519}
2520
2521static int em_sbb(struct x86_emulate_ctxt *ctxt)
2522{
2523	emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
2524	return X86EMUL_CONTINUE;
2525}
2526
2527static int em_and(struct x86_emulate_ctxt *ctxt)
2528{
2529	emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
2530	return X86EMUL_CONTINUE;
2531}
2532
2533static int em_sub(struct x86_emulate_ctxt *ctxt)
2534{
2535	emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
2536	return X86EMUL_CONTINUE;
2537}
2538
2539static int em_xor(struct x86_emulate_ctxt *ctxt)
2540{
2541	emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
2542	return X86EMUL_CONTINUE;
2543}
2544
2545static int em_cmp(struct x86_emulate_ctxt *ctxt)
2546{
2547	emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
2548	/* Disable writeback. */
2549	ctxt->dst.type = OP_NONE;
2550	return X86EMUL_CONTINUE;
2551}
2552
2553static int em_test(struct x86_emulate_ctxt *ctxt)
2554{
2555	emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2556	return X86EMUL_CONTINUE;
2557}
2558
2559static int em_xchg(struct x86_emulate_ctxt *ctxt)
2560{
2561	/* Write back the register source. */
2562	ctxt->src.val = ctxt->dst.val;
2563	write_register_operand(&ctxt->src);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564
2565	/* Write back the memory destination with implicit LOCK prefix. */
2566	ctxt->dst.val = ctxt->src.orig_val;
2567	ctxt->lock_prefix = 1;
2568	return X86EMUL_CONTINUE;
2569}
2570
2571static int em_imul(struct x86_emulate_ctxt *ctxt)
2572{
2573	emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
 
 
 
 
 
 
2574	return X86EMUL_CONTINUE;
2575}
2576
2577static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2578{
2579	ctxt->dst.val = ctxt->src2.val;
2580	return em_imul(ctxt);
2581}
2582
2583static int em_cwd(struct x86_emulate_ctxt *ctxt)
2584{
2585	ctxt->dst.type = OP_REG;
2586	ctxt->dst.bytes = ctxt->src.bytes;
2587	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2588	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2589
2590	return X86EMUL_CONTINUE;
 
 
 
2591}
2592
2593static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2594{
2595	u64 tsc = 0;
 
 
2596
2597	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2598	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2599	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2600	return X86EMUL_CONTINUE;
 
 
 
 
 
 
2601}
2602
2603static int em_mov(struct x86_emulate_ctxt *ctxt)
2604{
2605	ctxt->dst.val = ctxt->src.val;
 
 
 
 
 
 
 
2606	return X86EMUL_CONTINUE;
2607}
2608
2609static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2610{
2611	if (ctxt->modrm_reg > VCPU_SREG_GS)
2612		return emulate_ud(ctxt);
2613
2614	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2615	return X86EMUL_CONTINUE;
2616}
2617
2618static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2619{
2620	u16 sel = ctxt->src.val;
2621
2622	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2623		return emulate_ud(ctxt);
2624
2625	if (ctxt->modrm_reg == VCPU_SREG_SS)
2626		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2627
2628	/* Disable writeback. */
2629	ctxt->dst.type = OP_NONE;
2630	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2631}
2632
2633static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2634{
2635	memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2636	return X86EMUL_CONTINUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2637}
2638
2639static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2640{
2641	int rc;
2642	ulong linear;
 
2643
2644	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
 
2645	if (rc == X86EMUL_CONTINUE)
2646		ctxt->ops->invlpg(ctxt, linear);
2647	/* Disable writeback. */
2648	ctxt->dst.type = OP_NONE;
2649	return X86EMUL_CONTINUE;
2650}
2651
2652static int em_clts(struct x86_emulate_ctxt *ctxt)
2653{
2654	ulong cr0;
2655
2656	cr0 = ctxt->ops->get_cr(ctxt, 0);
2657	cr0 &= ~X86_CR0_TS;
2658	ctxt->ops->set_cr(ctxt, 0, cr0);
2659	return X86EMUL_CONTINUE;
2660}
2661
2662static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2663{
2664	int rc;
2665
2666	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2667		return X86EMUL_UNHANDLEABLE;
2668
2669	rc = ctxt->ops->fix_hypercall(ctxt);
2670	if (rc != X86EMUL_CONTINUE)
2671		return rc;
2672
2673	/* Let the processor re-execute the fixed hypercall */
2674	ctxt->_eip = ctxt->eip;
2675	/* Disable writeback. */
2676	ctxt->dst.type = OP_NONE;
2677	return X86EMUL_CONTINUE;
2678}
2679
2680static int em_lgdt(struct x86_emulate_ctxt *ctxt)
 
 
2681{
2682	struct desc_ptr desc_ptr;
2683	int rc;
2684
2685	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2686			     &desc_ptr.size, &desc_ptr.address,
2687			     ctxt->op_bytes);
2688	if (rc != X86EMUL_CONTINUE)
2689		return rc;
2690	ctxt->ops->set_gdt(ctxt, &desc_ptr);
 
 
 
 
 
2691	/* Disable writeback. */
2692	ctxt->dst.type = OP_NONE;
2693	return X86EMUL_CONTINUE;
 
2694}
2695
2696static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2697{
2698	int rc;
2699
2700	rc = ctxt->ops->fix_hypercall(ctxt);
2701
2702	/* Disable writeback. */
2703	ctxt->dst.type = OP_NONE;
2704	return rc;
2705}
2706
2707static int em_lidt(struct x86_emulate_ctxt *ctxt)
2708{
2709	struct desc_ptr desc_ptr;
2710	int rc;
2711
 
 
2712	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2713			     &desc_ptr.size, &desc_ptr.address,
2714			     ctxt->op_bytes);
2715	if (rc != X86EMUL_CONTINUE)
2716		return rc;
2717	ctxt->ops->set_idt(ctxt, &desc_ptr);
 
 
 
 
 
 
 
2718	/* Disable writeback. */
2719	ctxt->dst.type = OP_NONE;
2720	return X86EMUL_CONTINUE;
2721}
2722
 
 
 
 
 
 
 
 
 
 
2723static int em_smsw(struct x86_emulate_ctxt *ctxt)
2724{
2725	ctxt->dst.bytes = 2;
 
 
 
 
 
2726	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2727	return X86EMUL_CONTINUE;
2728}
2729
2730static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2731{
2732	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2733			  | (ctxt->src.val & 0x0f));
2734	ctxt->dst.type = OP_NONE;
2735	return X86EMUL_CONTINUE;
2736}
2737
2738static int em_loop(struct x86_emulate_ctxt *ctxt)
2739{
2740	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2741	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
 
 
2742	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2743		jmp_rel(ctxt, ctxt->src.val);
2744
2745	return X86EMUL_CONTINUE;
2746}
2747
2748static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2749{
2750	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2751		jmp_rel(ctxt, ctxt->src.val);
 
 
 
 
 
 
 
 
 
 
 
2752
2753	return X86EMUL_CONTINUE;
2754}
2755
 
 
 
 
 
 
 
 
 
2756static int em_cli(struct x86_emulate_ctxt *ctxt)
2757{
2758	if (emulator_bad_iopl(ctxt))
2759		return emulate_gp(ctxt, 0);
2760
2761	ctxt->eflags &= ~X86_EFLAGS_IF;
2762	return X86EMUL_CONTINUE;
2763}
2764
2765static int em_sti(struct x86_emulate_ctxt *ctxt)
2766{
2767	if (emulator_bad_iopl(ctxt))
2768		return emulate_gp(ctxt, 0);
2769
2770	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2771	ctxt->eflags |= X86_EFLAGS_IF;
2772	return X86EMUL_CONTINUE;
2773}
2774
2775static bool valid_cr(int nr)
2776{
2777	switch (nr) {
2778	case 0:
2779	case 2 ... 4:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780	case 8:
2781		return true;
 
 
2782	default:
2783		return false;
 
2784	}
 
2785}
2786
2787static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2788{
2789	if (!valid_cr(ctxt->modrm_reg))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2790		return emulate_ud(ctxt);
2791
 
 
 
 
 
 
 
 
 
 
2792	return X86EMUL_CONTINUE;
2793}
2794
2795static int check_cr_write(struct x86_emulate_ctxt *ctxt)
 
 
 
 
2796{
2797	u64 new_val = ctxt->src.val64;
2798	int cr = ctxt->modrm_reg;
2799	u64 efer = 0;
2800
2801	static u64 cr_reserved_bits[] = {
2802		0xffffffff00000000ULL,
2803		0, 0, 0, /* CR3 checked later */
2804		CR4_RESERVED_BITS,
2805		0, 0, 0,
2806		CR8_RESERVED_BITS,
2807	};
2808
2809	if (!valid_cr(cr))
2810		return emulate_ud(ctxt);
 
2811
2812	if (new_val & cr_reserved_bits[cr])
2813		return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814
2815	switch (cr) {
2816	case 0: {
2817		u64 cr4;
2818		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2819		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2820			return emulate_gp(ctxt, 0);
2821
2822		cr4 = ctxt->ops->get_cr(ctxt, 4);
2823		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2824
2825		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2826		    !(cr4 & X86_CR4_PAE))
2827			return emulate_gp(ctxt, 0);
2828
2829		break;
2830		}
2831	case 3: {
2832		u64 rsvd = 0;
2833
2834		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2835		if (efer & EFER_LMA)
2836			rsvd = CR3_L_MODE_RESERVED_BITS;
2837		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2838			rsvd = CR3_PAE_RESERVED_BITS;
2839		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2840			rsvd = CR3_NONPAE_RESERVED_BITS;
2841
2842		if (new_val & rsvd)
2843			return emulate_gp(ctxt, 0);
 
2844
2845		break;
2846		}
2847	case 4: {
2848		u64 cr4;
 
 
 
 
 
 
 
 
2849
2850		cr4 = ctxt->ops->get_cr(ctxt, 4);
2851		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 
2852
2853		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2854			return emulate_gp(ctxt, 0);
2855
2856		break;
2857		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858	}
2859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2860	return X86EMUL_CONTINUE;
2861}
2862
2863static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2864{
2865	unsigned long dr7;
 
 
 
 
 
 
 
 
2866
2867	ctxt->ops->get_dr(ctxt, 7, &dr7);
 
 
 
2868
2869	/* Check if DR7.Global_Enable is set */
2870	return dr7 & (1 << 13);
2871}
2872
2873static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2874{
2875	int dr = ctxt->modrm_reg;
2876	u64 cr4;
2877
2878	if (dr > 7)
2879		return emulate_ud(ctxt);
2880
2881	cr4 = ctxt->ops->get_cr(ctxt, 4);
2882	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2883		return emulate_ud(ctxt);
2884
2885	if (check_dr7_gd(ctxt))
 
 
 
 
 
 
2886		return emulate_db(ctxt);
 
2887
2888	return X86EMUL_CONTINUE;
2889}
2890
2891static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2892{
2893	u64 new_val = ctxt->src.val64;
2894	int dr = ctxt->modrm_reg;
2895
2896	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2897		return emulate_gp(ctxt, 0);
2898
2899	return check_dr_read(ctxt);
2900}
2901
2902static int check_svme(struct x86_emulate_ctxt *ctxt)
2903{
2904	u64 efer;
2905
2906	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2907
2908	if (!(efer & EFER_SVME))
2909		return emulate_ud(ctxt);
2910
2911	return X86EMUL_CONTINUE;
2912}
2913
2914static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2915{
2916	u64 rax = ctxt->regs[VCPU_REGS_RAX];
2917
2918	/* Valid physical address? */
2919	if (rax & 0xffff000000000000ULL)
2920		return emulate_gp(ctxt, 0);
2921
2922	return check_svme(ctxt);
2923}
2924
2925static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2926{
2927	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2928
2929	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2930		return emulate_ud(ctxt);
2931
2932	return X86EMUL_CONTINUE;
2933}
2934
2935static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2936{
2937	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2938	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
 
 
 
 
 
 
 
2939
 
 
 
 
 
2940	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2941	    (rcx > 3))
2942		return emulate_gp(ctxt, 0);
2943
2944	return X86EMUL_CONTINUE;
2945}
2946
2947static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2948{
2949	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
2950	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2951		return emulate_gp(ctxt, 0);
2952
2953	return X86EMUL_CONTINUE;
2954}
2955
2956static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2957{
2958	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
2959	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2960		return emulate_gp(ctxt, 0);
2961
2962	return X86EMUL_CONTINUE;
2963}
2964
2965#define D(_y) { .flags = (_y) }
2966#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2967#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2968		      .check_perm = (_p) }
2969#define N    D(0)
2970#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2971#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2972#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
 
 
 
2973#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
 
2974#define II(_f, _e, _i) \
2975	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2976#define IIP(_f, _e, _i, _p) \
2977	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2978	  .check_perm = (_p) }
2979#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2980
2981#define D2bv(_f)      D((_f) | ByteOp), D(_f)
2982#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2983#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
 
 
 
 
 
 
 
2984
2985#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
2986		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
2987		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
2988
2989static struct opcode group7_rm1[] = {
2990	DI(SrcNone | ModRM | Priv, monitor),
2991	DI(SrcNone | ModRM | Priv, mwait),
2992	N, N, N, N, N, N,
2993};
2994
2995static struct opcode group7_rm3[] = {
2996	DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
2997	II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
2998	DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
2999	DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
3000	DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
3001	DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
3002	DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
3003	DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3004};
3005
3006static struct opcode group7_rm7[] = {
3007	N,
3008	DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3009	N, N, N, N, N, N,
3010};
3011
3012static struct opcode group1[] = {
3013	I(Lock, em_add),
3014	I(Lock, em_or),
3015	I(Lock, em_adc),
3016	I(Lock, em_sbb),
3017	I(Lock, em_and),
3018	I(Lock, em_sub),
3019	I(Lock, em_xor),
3020	I(0, em_cmp),
3021};
3022
3023static struct opcode group1A[] = {
3024	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025};
3026
3027static struct opcode group3[] = {
3028	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
3029	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3030	X4(D(SrcMem | ModRM)),
 
 
 
 
 
3031};
3032
3033static struct opcode group4[] = {
3034	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
 
 
 
 
 
 
 
 
 
 
 
 
3035	N, N, N, N, N, N,
3036};
3037
3038static struct opcode group5[] = {
3039	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3040	D(SrcMem | ModRM | Stack),
3041	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3042	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3043	D(SrcMem | ModRM | Stack), N,
 
 
3044};
3045
3046static struct opcode group6[] = {
3047	DI(ModRM | Prot,        sldt),
3048	DI(ModRM | Prot,        str),
3049	DI(ModRM | Prot | Priv, lldt),
3050	DI(ModRM | Prot | Priv, ltr),
3051	N, N, N, N,
3052};
3053
3054static struct group_dual group7 = { {
3055	DI(ModRM | Mov | DstMem | Priv, sgdt),
3056	DI(ModRM | Mov | DstMem | Priv, sidt),
3057	II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3058	II(ModRM | SrcMem | Priv, em_lidt, lidt),
3059	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3060	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3061	II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3062}, {
3063	I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3064	EXT(0, group7_rm1),
3065	N, EXT(0, group7_rm3),
3066	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3067	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
 
 
3068} };
3069
3070static struct opcode group8[] = {
3071	N, N, N, N,
3072	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3073	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3074};
3075
3076static struct group_dual group9 = { {
3077	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3078}, {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3079	N, N, N, N, N, N, N, N,
3080} };
3081
3082static struct opcode group11[] = {
3083	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
 
 
 
 
3084};
3085
3086static struct gprefix pfx_0f_6f_0f_7f = {
3087	N, N, N, I(Sse, em_movdqu),
3088};
3089
3090static struct opcode opcode_table[256] = {
3091	/* 0x00 - 0x07 */
3092	I6ALU(Lock, em_add),
3093	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3094	/* 0x08 - 0x0F */
3095	I6ALU(Lock, em_or),
3096	D(ImplicitOps | Stack | No64), N,
 
3097	/* 0x10 - 0x17 */
3098	I6ALU(Lock, em_adc),
3099	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3100	/* 0x18 - 0x1F */
3101	I6ALU(Lock, em_sbb),
3102	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3103	/* 0x20 - 0x27 */
3104	I6ALU(Lock, em_and), N, N,
3105	/* 0x28 - 0x2F */
3106	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3107	/* 0x30 - 0x37 */
3108	I6ALU(Lock, em_xor), N, N,
3109	/* 0x38 - 0x3F */
3110	I6ALU(0, em_cmp), N, N,
3111	/* 0x40 - 0x4F */
3112	X16(D(DstReg)),
3113	/* 0x50 - 0x57 */
3114	X8(I(SrcReg | Stack, em_push)),
3115	/* 0x58 - 0x5F */
3116	X8(I(DstReg | Stack, em_pop)),
3117	/* 0x60 - 0x67 */
3118	I(ImplicitOps | Stack | No64, em_pusha),
3119	I(ImplicitOps | Stack | No64, em_popa),
3120	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3121	N, N, N, N,
3122	/* 0x68 - 0x6F */
3123	I(SrcImm | Mov | Stack, em_push),
3124	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3125	I(SrcImmByte | Mov | Stack, em_push),
3126	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3127	D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3128	D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3129	/* 0x70 - 0x7F */
3130	X16(D(SrcImmByte)),
3131	/* 0x80 - 0x87 */
3132	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3133	G(DstMem | SrcImm | ModRM | Group, group1),
3134	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3135	G(DstMem | SrcImmByte | ModRM | Group, group1),
3136	I2bv(DstMem | SrcReg | ModRM, em_test),
3137	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3138	/* 0x88 - 0x8F */
3139	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3140	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3141	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3142	D(ModRM | SrcMem | NoAccess | DstReg),
3143	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3144	G(0, group1A),
3145	/* 0x90 - 0x97 */
3146	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3147	/* 0x98 - 0x9F */
3148	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3149	I(SrcImmFAddr | No64, em_call_far), N,
3150	II(ImplicitOps | Stack, em_pushf, pushf),
3151	II(ImplicitOps | Stack, em_popf, popf), N, N,
 
3152	/* 0xA0 - 0xA7 */
3153	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3154	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3155	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3156	I2bv(SrcSI | DstDI | String, em_cmp),
3157	/* 0xA8 - 0xAF */
3158	I2bv(DstAcc | SrcImm, em_test),
3159	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3160	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3161	I2bv(SrcAcc | DstDI | String, em_cmp),
3162	/* 0xB0 - 0xB7 */
3163	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3164	/* 0xB8 - 0xBF */
3165	X8(I(DstReg | SrcImm | Mov, em_mov)),
3166	/* 0xC0 - 0xC7 */
3167	D2bv(DstMem | SrcImmByte | ModRM),
3168	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3169	I(ImplicitOps | Stack, em_ret),
3170	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
 
3171	G(ByteOp, group11), G(0, group11),
3172	/* 0xC8 - 0xCF */
3173	N, N, N, I(ImplicitOps | Stack, em_ret_far),
3174	D(ImplicitOps), DI(SrcImmByte, intn),
3175	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
 
 
 
 
3176	/* 0xD0 - 0xD7 */
3177	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3178	N, N, N, N,
 
 
 
 
3179	/* 0xD8 - 0xDF */
3180	N, N, N, N, N, N, N, N,
3181	/* 0xE0 - 0xE7 */
3182	X3(I(SrcImmByte, em_loop)),
3183	I(SrcImmByte, em_jcxz),
3184	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
3185	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3186	/* 0xE8 - 0xEF */
3187	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3188	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3189	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
3190	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
 
 
3191	/* 0xF0 - 0xF7 */
3192	N, DI(ImplicitOps, icebp), N, N,
3193	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3194	G(ByteOp, group3), G(0, group3),
3195	/* 0xF8 - 0xFF */
3196	D(ImplicitOps), D(ImplicitOps),
3197	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3198	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3199};
3200
3201static struct opcode twobyte_table[256] = {
3202	/* 0x00 - 0x0F */
3203	G(0, group6), GD(0, &group7), N, N,
3204	N, I(ImplicitOps | VendorSpecific, em_syscall),
3205	II(ImplicitOps | Priv, em_clts, clts), N,
3206	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3207	N, D(ImplicitOps | ModRM), N, N,
3208	/* 0x10 - 0x1F */
3209	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
 
 
 
 
 
 
 
 
3210	/* 0x20 - 0x2F */
3211	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3212	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3213	DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3214	DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
 
 
 
 
 
 
3215	N, N, N, N,
3216	N, N, N, N, N, N, N, N,
3217	/* 0x30 - 0x3F */
3218	DI(ImplicitOps | Priv, wrmsr),
3219	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3220	DI(ImplicitOps | Priv, rdmsr),
3221	DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3222	I(ImplicitOps | VendorSpecific, em_sysenter),
3223	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3224	N, N,
3225	N, N, N, N, N, N, N, N,
3226	/* 0x40 - 0x4F */
3227	X16(D(DstReg | SrcMem | ModRM | Mov)),
3228	/* 0x50 - 0x5F */
3229	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3230	/* 0x60 - 0x6F */
3231	N, N, N, N,
3232	N, N, N, N,
3233	N, N, N, N,
3234	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3235	/* 0x70 - 0x7F */
3236	N, N, N, N,
3237	N, N, N, N,
3238	N, N, N, N,
3239	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3240	/* 0x80 - 0x8F */
3241	X16(D(SrcImm)),
3242	/* 0x90 - 0x9F */
3243	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3244	/* 0xA0 - 0xA7 */
3245	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3246	DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3247	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3248	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
 
3249	/* 0xA8 - 0xAF */
3250	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3251	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3252	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3253	D(DstMem | SrcReg | Src2CL | ModRM),
3254	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
 
3255	/* 0xB0 - 0xB7 */
3256	D2bv(DstMem | SrcReg | ModRM | Lock),
3257	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3258	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3259	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
 
 
3260	/* 0xB8 - 0xBF */
3261	N, N,
3262	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3263	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3264	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3265	/* 0xC0 - 0xCF */
3266	D2bv(DstMem | SrcReg | ModRM | Lock),
3267	N, D(DstMem | SrcReg | ModRM | Mov),
 
 
3268	N, N, N, GD(0, &group9),
3269	N, N, N, N, N, N, N, N,
 
3270	/* 0xD0 - 0xDF */
3271	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3272	/* 0xE0 - 0xEF */
3273	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
 
3274	/* 0xF0 - 0xFF */
3275	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3276};
3277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3278#undef D
3279#undef N
3280#undef G
3281#undef GD
3282#undef I
3283#undef GP
3284#undef EXT
 
 
3285
3286#undef D2bv
3287#undef D2bvIP
3288#undef I2bv
 
3289#undef I6ALU
3290
3291static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3292{
3293	unsigned size;
3294
3295	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3296	if (size == 8)
3297		size = 4;
3298	return size;
3299}
3300
3301static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3302		      unsigned size, bool sign_extension)
3303{
3304	int rc = X86EMUL_CONTINUE;
3305
3306	op->type = OP_IMM;
3307	op->bytes = size;
3308	op->addr.mem.ea = ctxt->_eip;
3309	/* NB. Immediates are sign-extended as necessary. */
3310	switch (op->bytes) {
3311	case 1:
3312		op->val = insn_fetch(s8, 1, ctxt->_eip);
3313		break;
3314	case 2:
3315		op->val = insn_fetch(s16, 2, ctxt->_eip);
3316		break;
3317	case 4:
3318		op->val = insn_fetch(s32, 4, ctxt->_eip);
 
 
 
3319		break;
3320	}
3321	if (!sign_extension) {
3322		switch (op->bytes) {
3323		case 1:
3324			op->val &= 0xff;
3325			break;
3326		case 2:
3327			op->val &= 0xffff;
3328			break;
3329		case 4:
3330			op->val &= 0xffffffff;
3331			break;
3332		}
3333	}
3334done:
3335	return rc;
3336}
3337
3338int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3339{
3340	int rc = X86EMUL_CONTINUE;
3341	int mode = ctxt->mode;
3342	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3343	bool op_prefix = false;
 
3344	struct opcode opcode;
3345	struct operand memop = { .type = OP_NONE }, *memopp = NULL;
 
3346
 
 
3347	ctxt->_eip = ctxt->eip;
3348	ctxt->fetch.start = ctxt->_eip;
3349	ctxt->fetch.end = ctxt->fetch.start + insn_len;
 
 
3350	if (insn_len > 0)
3351		memcpy(ctxt->fetch.data, insn, insn_len);
 
 
 
 
 
3352
3353	switch (mode) {
3354	case X86EMUL_MODE_REAL:
3355	case X86EMUL_MODE_VM86:
 
 
 
 
 
3356	case X86EMUL_MODE_PROT16:
3357		def_op_bytes = def_ad_bytes = 2;
3358		break;
3359	case X86EMUL_MODE_PROT32:
3360		def_op_bytes = def_ad_bytes = 4;
3361		break;
3362#ifdef CONFIG_X86_64
3363	case X86EMUL_MODE_PROT64:
3364		def_op_bytes = 4;
3365		def_ad_bytes = 8;
3366		break;
3367#endif
3368	default:
3369		return -1;
3370	}
3371
3372	ctxt->op_bytes = def_op_bytes;
3373	ctxt->ad_bytes = def_ad_bytes;
3374
3375	/* Legacy prefixes. */
3376	for (;;) {
3377		switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
3378		case 0x66:	/* operand-size override */
3379			op_prefix = true;
3380			/* switch between 2/4 bytes */
3381			ctxt->op_bytes = def_op_bytes ^ 6;
3382			break;
3383		case 0x67:	/* address-size override */
3384			if (mode == X86EMUL_MODE_PROT64)
3385				/* switch between 4/8 bytes */
3386				ctxt->ad_bytes = def_ad_bytes ^ 12;
3387			else
3388				/* switch between 2/4 bytes */
3389				ctxt->ad_bytes = def_ad_bytes ^ 6;
3390			break;
3391		case 0x26:	/* ES override */
 
 
 
3392		case 0x2e:	/* CS override */
 
 
 
3393		case 0x36:	/* SS override */
 
 
 
3394		case 0x3e:	/* DS override */
3395			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
 
3396			break;
3397		case 0x64:	/* FS override */
 
 
 
3398		case 0x65:	/* GS override */
3399			set_seg_override(ctxt, ctxt->b & 7);
 
3400			break;
3401		case 0x40 ... 0x4f: /* REX */
3402			if (mode != X86EMUL_MODE_PROT64)
3403				goto done_prefixes;
3404			ctxt->rex_prefix = ctxt->b;
3405			continue;
3406		case 0xf0:	/* LOCK */
3407			ctxt->lock_prefix = 1;
3408			break;
3409		case 0xf2:	/* REPNE/REPNZ */
3410		case 0xf3:	/* REP/REPE/REPZ */
3411			ctxt->rep_prefix = ctxt->b;
3412			break;
3413		default:
3414			goto done_prefixes;
3415		}
3416
3417		/* Any legacy prefix after a REX prefix nullifies its effect. */
3418
3419		ctxt->rex_prefix = 0;
3420	}
3421
3422done_prefixes:
3423
3424	/* REX prefix. */
3425	if (ctxt->rex_prefix & 8)
3426		ctxt->op_bytes = 8;	/* REX.W */
3427
3428	/* Opcode byte(s). */
3429	opcode = opcode_table[ctxt->b];
3430	/* Two-byte opcode? */
3431	if (ctxt->b == 0x0f) {
3432		ctxt->twobyte = 1;
3433		ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
3434		opcode = twobyte_table[ctxt->b];
 
 
 
 
 
 
 
3435	}
3436	ctxt->d = opcode.flags;
3437
 
 
 
 
 
 
 
 
 
3438	while (ctxt->d & GroupMask) {
3439		switch (ctxt->d & GroupMask) {
3440		case Group:
3441			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3442			--ctxt->_eip;
3443			goffset = (ctxt->modrm >> 3) & 7;
3444			opcode = opcode.u.group[goffset];
3445			break;
3446		case GroupDual:
3447			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3448			--ctxt->_eip;
3449			goffset = (ctxt->modrm >> 3) & 7;
3450			if ((ctxt->modrm >> 6) == 3)
3451				opcode = opcode.u.gdual->mod3[goffset];
3452			else
3453				opcode = opcode.u.gdual->mod012[goffset];
3454			break;
3455		case RMExt:
3456			goffset = ctxt->modrm & 7;
3457			opcode = opcode.u.group[goffset];
3458			break;
3459		case Prefix:
3460			if (ctxt->rep_prefix && op_prefix)
3461				return X86EMUL_UNHANDLEABLE;
3462			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3463			switch (simd_prefix) {
3464			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3465			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3466			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3467			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3468			}
3469			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470		default:
3471			return X86EMUL_UNHANDLEABLE;
3472		}
3473
3474		ctxt->d &= ~GroupMask;
3475		ctxt->d |= opcode.flags;
3476	}
3477
3478	ctxt->execute = opcode.u.execute;
3479	ctxt->check_perm = opcode.check_perm;
3480	ctxt->intercept = opcode.intercept;
3481
3482	/* Unrecognised? */
3483	if (ctxt->d == 0 || (ctxt->d & Undefined))
3484		return -1;
3485
3486	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3487		return -1;
3488
3489	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3490		ctxt->op_bytes = 8;
 
 
 
 
 
 
 
 
 
 
 
3491
3492	if (ctxt->d & Op3264) {
3493		if (mode == X86EMUL_MODE_PROT64)
3494			ctxt->op_bytes = 8;
3495		else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3496			ctxt->op_bytes = 4;
3497	}
3498
3499	if (ctxt->d & Sse)
3500		ctxt->op_bytes = 16;
 
 
 
3501
3502	/* ModRM and SIB bytes. */
3503	if (ctxt->d & ModRM) {
3504		rc = decode_modrm(ctxt, &memop);
3505		if (!ctxt->has_seg_override)
3506			set_seg_override(ctxt, ctxt->modrm_seg);
 
 
3507	} else if (ctxt->d & MemAbs)
3508		rc = decode_abs(ctxt, &memop);
3509	if (rc != X86EMUL_CONTINUE)
3510		goto done;
3511
3512	if (!ctxt->has_seg_override)
3513		set_seg_override(ctxt, VCPU_SREG_DS);
3514
3515	memop.addr.mem.seg = seg_override(ctxt);
3516
3517	if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
3518		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3519
3520	/*
3521	 * Decode and fetch the source operand: register, memory
3522	 * or immediate.
3523	 */
3524	switch (ctxt->d & SrcMask) {
3525	case SrcNone:
3526		break;
3527	case SrcReg:
3528		decode_register_operand(ctxt, &ctxt->src, 0);
3529		break;
3530	case SrcMem16:
3531		memop.bytes = 2;
3532		goto srcmem_common;
3533	case SrcMem32:
3534		memop.bytes = 4;
3535		goto srcmem_common;
3536	case SrcMem:
3537		memop.bytes = (ctxt->d & ByteOp) ? 1 :
3538							   ctxt->op_bytes;
3539	srcmem_common:
3540		ctxt->src = memop;
3541		memopp = &ctxt->src;
3542		break;
3543	case SrcImmU16:
3544		rc = decode_imm(ctxt, &ctxt->src, 2, false);
3545		break;
3546	case SrcImm:
3547		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
3548		break;
3549	case SrcImmU:
3550		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
3551		break;
3552	case SrcImmByte:
3553		rc = decode_imm(ctxt, &ctxt->src, 1, true);
3554		break;
3555	case SrcImmUByte:
3556		rc = decode_imm(ctxt, &ctxt->src, 1, false);
3557		break;
3558	case SrcAcc:
3559		ctxt->src.type = OP_REG;
3560		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3561		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3562		fetch_register_operand(&ctxt->src);
3563		break;
3564	case SrcOne:
3565		ctxt->src.bytes = 1;
3566		ctxt->src.val = 1;
3567		break;
3568	case SrcSI:
3569		ctxt->src.type = OP_MEM;
3570		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3571		ctxt->src.addr.mem.ea =
3572			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3573		ctxt->src.addr.mem.seg = seg_override(ctxt);
3574		ctxt->src.val = 0;
3575		break;
3576	case SrcImmFAddr:
3577		ctxt->src.type = OP_IMM;
3578		ctxt->src.addr.mem.ea = ctxt->_eip;
3579		ctxt->src.bytes = ctxt->op_bytes + 2;
3580		insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
3581		break;
3582	case SrcMemFAddr:
3583		memop.bytes = ctxt->op_bytes + 2;
3584		goto srcmem_common;
3585		break;
3586	case SrcDX:
3587		ctxt->src.type = OP_REG;
3588		ctxt->src.bytes = 2;
3589		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3590		fetch_register_operand(&ctxt->src);
3591		break;
3592	}
3593
3594	if (rc != X86EMUL_CONTINUE)
3595		goto done;
3596
3597	/*
3598	 * Decode and fetch the second source operand: register, memory
3599	 * or immediate.
3600	 */
3601	switch (ctxt->d & Src2Mask) {
3602	case Src2None:
3603		break;
3604	case Src2CL:
3605		ctxt->src2.bytes = 1;
3606		ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3607		break;
3608	case Src2ImmByte:
3609		rc = decode_imm(ctxt, &ctxt->src2, 1, true);
3610		break;
3611	case Src2One:
3612		ctxt->src2.bytes = 1;
3613		ctxt->src2.val = 1;
3614		break;
3615	case Src2Imm:
3616		rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
3617		break;
3618	}
3619
3620	if (rc != X86EMUL_CONTINUE)
3621		goto done;
3622
3623	/* Decode and fetch the destination operand: register or memory. */
3624	switch (ctxt->d & DstMask) {
3625	case DstReg:
3626		decode_register_operand(ctxt, &ctxt->dst,
3627			 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3628		break;
3629	case DstImmUByte:
3630		ctxt->dst.type = OP_IMM;
3631		ctxt->dst.addr.mem.ea = ctxt->_eip;
3632		ctxt->dst.bytes = 1;
3633		ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
3634		break;
3635	case DstMem:
3636	case DstMem64:
3637		ctxt->dst = memop;
3638		memopp = &ctxt->dst;
3639		if ((ctxt->d & DstMask) == DstMem64)
3640			ctxt->dst.bytes = 8;
3641		else
3642			ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3643		if (ctxt->d & BitOp)
3644			fetch_bit_operand(ctxt);
3645		ctxt->dst.orig_val = ctxt->dst.val;
3646		break;
3647	case DstAcc:
3648		ctxt->dst.type = OP_REG;
3649		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3650		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3651		fetch_register_operand(&ctxt->dst);
3652		ctxt->dst.orig_val = ctxt->dst.val;
3653		break;
3654	case DstDI:
3655		ctxt->dst.type = OP_MEM;
3656		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3657		ctxt->dst.addr.mem.ea =
3658			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3659		ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
3660		ctxt->dst.val = 0;
3661		break;
3662	case DstDX:
3663		ctxt->dst.type = OP_REG;
3664		ctxt->dst.bytes = 2;
3665		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3666		fetch_register_operand(&ctxt->dst);
3667		break;
3668	case ImplicitOps:
3669		/* Special instructions do their own operand decoding. */
3670	default:
3671		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3672		break;
3673	}
3674
3675done:
3676	if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
3677		memopp->addr.mem.ea += ctxt->_eip;
 
 
3678
3679	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 
 
3680}
3681
3682static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3683{
3684	/* The second termination condition only applies for REPE
3685	 * and REPNE. Test if the repeat string operation prefix is
3686	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3687	 * corresponding termination condition according to:
3688	 * 	- if REPE/REPZ and ZF = 0 then done
3689	 * 	- if REPNE/REPNZ and ZF = 1 then done
3690	 */
3691	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3692	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3693	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
3694		 ((ctxt->eflags & EFLG_ZF) == 0))
3695		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
3696		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3697		return true;
3698
3699	return false;
3700}
3701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3702int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3703{
3704	struct x86_emulate_ops *ops = ctxt->ops;
3705	u64 msr_data;
3706	int rc = X86EMUL_CONTINUE;
3707	int saved_dst_type = ctxt->dst.type;
 
3708
3709	ctxt->mem_read.pos = 0;
3710
3711	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3712		rc = emulate_ud(ctxt);
3713		goto done;
3714	}
3715
3716	/* LOCK prefix is allowed only with some instructions */
3717	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3718		rc = emulate_ud(ctxt);
3719		goto done;
3720	}
3721
3722	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3723		rc = emulate_ud(ctxt);
3724		goto done;
3725	}
3726
3727	if ((ctxt->d & Sse)
3728	    && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3729		|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3730		rc = emulate_ud(ctxt);
3731		goto done;
3732	}
 
3733
3734	if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3735		rc = emulate_nm(ctxt);
3736		goto done;
3737	}
 
3738
3739	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3740		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3741					      X86_ICPT_PRE_EXCEPT);
3742		if (rc != X86EMUL_CONTINUE)
3743			goto done;
3744	}
3745
3746	/* Privileged instruction can be executed only in CPL=0 */
3747	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3748		rc = emulate_gp(ctxt, 0);
3749		goto done;
3750	}
 
 
 
 
 
 
 
 
3751
3752	/* Instruction can only be executed in protected mode */
3753	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3754		rc = emulate_ud(ctxt);
3755		goto done;
3756	}
 
3757
3758	/* Do instruction specific permission checks */
3759	if (ctxt->check_perm) {
3760		rc = ctxt->check_perm(ctxt);
3761		if (rc != X86EMUL_CONTINUE)
3762			goto done;
3763	}
3764
3765	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3766		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3767					      X86_ICPT_POST_EXCEPT);
3768		if (rc != X86EMUL_CONTINUE)
 
 
3769			goto done;
3770	}
3771
3772	if (ctxt->rep_prefix && (ctxt->d & String)) {
3773		/* All REP prefixes have the same first termination condition */
3774		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3775			ctxt->eip = ctxt->_eip;
3776			goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3777		}
3778	}
3779
3780	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3781		rc = segmented_read(ctxt, ctxt->src.addr.mem,
3782				    ctxt->src.valptr, ctxt->src.bytes);
3783		if (rc != X86EMUL_CONTINUE)
3784			goto done;
3785		ctxt->src.orig_val64 = ctxt->src.val64;
3786	}
3787
3788	if (ctxt->src2.type == OP_MEM) {
3789		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3790				    &ctxt->src2.val, ctxt->src2.bytes);
3791		if (rc != X86EMUL_CONTINUE)
3792			goto done;
3793	}
3794
3795	if ((ctxt->d & DstMask) == ImplicitOps)
3796		goto special_insn;
3797
3798
3799	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3800		/* optimisation - avoid slow emulated read if Mov */
3801		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3802				   &ctxt->dst.val, ctxt->dst.bytes);
3803		if (rc != X86EMUL_CONTINUE)
 
 
 
 
3804			goto done;
 
3805	}
3806	ctxt->dst.orig_val = ctxt->dst.val;
 
3807
3808special_insn:
3809
3810	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3811		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3812					      X86_ICPT_POST_MEMACCESS);
3813		if (rc != X86EMUL_CONTINUE)
3814			goto done;
3815	}
3816
 
 
 
 
 
3817	if (ctxt->execute) {
3818		rc = ctxt->execute(ctxt);
 
 
 
3819		if (rc != X86EMUL_CONTINUE)
3820			goto done;
3821		goto writeback;
3822	}
3823
3824	if (ctxt->twobyte)
3825		goto twobyte_insn;
 
 
3826
3827	switch (ctxt->b) {
3828	case 0x06:		/* push es */
3829		rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3830		break;
3831	case 0x07:		/* pop es */
3832		rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3833		break;
3834	case 0x0e:		/* push cs */
3835		rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3836		break;
3837	case 0x16:		/* push ss */
3838		rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3839		break;
3840	case 0x17:		/* pop ss */
3841		rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3842		break;
3843	case 0x1e:		/* push ds */
3844		rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3845		break;
3846	case 0x1f:		/* pop ds */
3847		rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3848		break;
3849	case 0x40 ... 0x47: /* inc r16/r32 */
3850		emulate_1op("inc", ctxt->dst, ctxt->eflags);
3851		break;
3852	case 0x48 ... 0x4f: /* dec r16/r32 */
3853		emulate_1op("dec", ctxt->dst, ctxt->eflags);
3854		break;
3855	case 0x63:		/* movsxd */
3856		if (ctxt->mode != X86EMUL_MODE_PROT64)
3857			goto cannot_emulate;
3858		ctxt->dst.val = (s32) ctxt->src.val;
3859		break;
3860	case 0x6c:		/* insb */
3861	case 0x6d:		/* insw/insd */
3862		ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3863		goto do_io_in;
3864	case 0x6e:		/* outsb */
3865	case 0x6f:		/* outsw/outsd */
3866		ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3867		goto do_io_out;
3868		break;
3869	case 0x70 ... 0x7f: /* jcc (short) */
3870		if (test_cc(ctxt->b, ctxt->eflags))
3871			jmp_rel(ctxt, ctxt->src.val);
3872		break;
3873	case 0x8d: /* lea r16/r32, m */
3874		ctxt->dst.val = ctxt->src.addr.mem.ea;
3875		break;
3876	case 0x8f:		/* pop (sole member of Grp1a) */
3877		rc = em_grp1a(ctxt);
3878		break;
3879	case 0x90 ... 0x97: /* nop / xchg reg, rax */
3880		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3881			break;
3882		rc = em_xchg(ctxt);
 
3883		break;
3884	case 0x98: /* cbw/cwde/cdqe */
3885		switch (ctxt->op_bytes) {
3886		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
3887		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
3888		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3889		}
3890		break;
3891	case 0xc0 ... 0xc1:
3892		rc = em_grp2(ctxt);
3893		break;
3894	case 0xc4:		/* les */
3895		rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3896		break;
3897	case 0xc5:		/* lds */
3898		rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3899		break;
3900	case 0xcc:		/* int3 */
3901		rc = emulate_int(ctxt, 3);
3902		break;
3903	case 0xcd:		/* int n */
3904		rc = emulate_int(ctxt, ctxt->src.val);
3905		break;
3906	case 0xce:		/* into */
3907		if (ctxt->eflags & EFLG_OF)
3908			rc = emulate_int(ctxt, 4);
3909		break;
3910	case 0xd0 ... 0xd1:	/* Grp2 */
3911		rc = em_grp2(ctxt);
3912		break;
3913	case 0xd2 ... 0xd3:	/* Grp2 */
3914		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3915		rc = em_grp2(ctxt);
3916		break;
3917	case 0xe4: 	/* inb */
3918	case 0xe5: 	/* in */
3919		goto do_io_in;
3920	case 0xe6: /* outb */
3921	case 0xe7: /* out */
3922		goto do_io_out;
3923	case 0xe8: /* call (near) */ {
3924		long int rel = ctxt->src.val;
3925		ctxt->src.val = (unsigned long) ctxt->_eip;
3926		jmp_rel(ctxt, rel);
3927		rc = em_push(ctxt);
3928		break;
3929	}
3930	case 0xe9: /* jmp rel */
3931	case 0xeb: /* jmp rel short */
3932		jmp_rel(ctxt, ctxt->src.val);
3933		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3934		break;
3935	case 0xec: /* in al,dx */
3936	case 0xed: /* in (e/r)ax,dx */
3937	do_io_in:
3938		if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3939				     &ctxt->dst.val))
3940			goto done; /* IO is needed */
3941		break;
3942	case 0xee: /* out dx,al */
3943	case 0xef: /* out dx,(e/r)ax */
3944	do_io_out:
3945		ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3946				      &ctxt->src.val, 1);
3947		ctxt->dst.type = OP_NONE;	/* Disable writeback. */
3948		break;
3949	case 0xf4:              /* hlt */
3950		ctxt->ops->halt(ctxt);
3951		break;
3952	case 0xf5:	/* cmc */
3953		/* complement carry flag from eflags reg */
3954		ctxt->eflags ^= EFLG_CF;
3955		break;
3956	case 0xf6 ... 0xf7:	/* Grp3 */
3957		rc = em_grp3(ctxt);
3958		break;
3959	case 0xf8: /* clc */
3960		ctxt->eflags &= ~EFLG_CF;
3961		break;
3962	case 0xf9: /* stc */
3963		ctxt->eflags |= EFLG_CF;
3964		break;
3965	case 0xfc: /* cld */
3966		ctxt->eflags &= ~EFLG_DF;
3967		break;
3968	case 0xfd: /* std */
3969		ctxt->eflags |= EFLG_DF;
3970		break;
3971	case 0xfe: /* Grp4 */
3972		rc = em_grp45(ctxt);
3973		break;
3974	case 0xff: /* Grp5 */
3975		rc = em_grp45(ctxt);
3976		break;
3977	default:
3978		goto cannot_emulate;
3979	}
3980
3981	if (rc != X86EMUL_CONTINUE)
3982		goto done;
3983
3984writeback:
3985	rc = writeback(ctxt);
3986	if (rc != X86EMUL_CONTINUE)
3987		goto done;
 
 
 
 
 
 
 
 
3988
3989	/*
3990	 * restore dst type in case the decoding will be reused
3991	 * (happens for string instruction )
3992	 */
3993	ctxt->dst.type = saved_dst_type;
3994
3995	if ((ctxt->d & SrcMask) == SrcSI)
3996		string_addr_inc(ctxt, seg_override(ctxt),
3997				VCPU_REGS_RSI, &ctxt->src);
3998
3999	if ((ctxt->d & DstMask) == DstDI)
4000		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4001				&ctxt->dst);
4002
4003	if (ctxt->rep_prefix && (ctxt->d & String)) {
 
4004		struct read_cache *r = &ctxt->io_read;
4005		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
 
 
 
 
4006
4007		if (!string_insn_completed(ctxt)) {
4008			/*
4009			 * Re-enter guest when pio read ahead buffer is empty
4010			 * or, if it is not used, after each 1024 iteration.
4011			 */
4012			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4013			    (r->end == 0 || r->end != r->pos)) {
4014				/*
4015				 * Reset read cache. Usually happens before
4016				 * decode, but since instruction is restarted
4017				 * we have to do it here.
4018				 */
4019				ctxt->mem_read.end = 0;
 
4020				return EMULATION_RESTART;
4021			}
4022			goto done; /* skip rip writeback */
4023		}
 
4024	}
4025
4026	ctxt->eip = ctxt->_eip;
 
 
4027
4028done:
4029	if (rc == X86EMUL_PROPAGATE_FAULT)
 
 
4030		ctxt->have_exception = true;
 
4031	if (rc == X86EMUL_INTERCEPTED)
4032		return EMULATION_INTERCEPTED;
4033
 
 
 
4034	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4035
4036twobyte_insn:
4037	switch (ctxt->b) {
4038	case 0x09:		/* wbinvd */
4039		(ctxt->ops->wbinvd)(ctxt);
4040		break;
4041	case 0x08:		/* invd */
4042	case 0x0d:		/* GrpP (prefetch) */
4043	case 0x18:		/* Grp16 (prefetch/nop) */
 
4044		break;
4045	case 0x20: /* mov cr, reg */
4046		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4047		break;
4048	case 0x21: /* mov from dr to reg */
4049		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4050		break;
4051	case 0x22: /* mov reg, cr */
4052		if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4053			emulate_gp(ctxt, 0);
4054			rc = X86EMUL_PROPAGATE_FAULT;
4055			goto done;
4056		}
4057		ctxt->dst.type = OP_NONE;
4058		break;
4059	case 0x23: /* mov from reg to dr */
4060		if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4061				((ctxt->mode == X86EMUL_MODE_PROT64) ?
4062				 ~0ULL : ~0U)) < 0) {
4063			/* #UD condition is already handled by the code above */
4064			emulate_gp(ctxt, 0);
4065			rc = X86EMUL_PROPAGATE_FAULT;
4066			goto done;
4067		}
4068
4069		ctxt->dst.type = OP_NONE;	/* no writeback */
4070		break;
4071	case 0x30:
4072		/* wrmsr */
4073		msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4074			| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4075		if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4076			emulate_gp(ctxt, 0);
4077			rc = X86EMUL_PROPAGATE_FAULT;
4078			goto done;
4079		}
4080		rc = X86EMUL_CONTINUE;
4081		break;
4082	case 0x32:
4083		/* rdmsr */
4084		if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4085			emulate_gp(ctxt, 0);
4086			rc = X86EMUL_PROPAGATE_FAULT;
4087			goto done;
4088		} else {
4089			ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4090			ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4091		}
4092		rc = X86EMUL_CONTINUE;
4093		break;
4094	case 0x40 ... 0x4f:	/* cmov */
4095		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4096		if (!test_cc(ctxt->b, ctxt->eflags))
 
4097			ctxt->dst.type = OP_NONE; /* no writeback */
4098		break;
4099	case 0x80 ... 0x8f: /* jnz rel, etc*/
4100		if (test_cc(ctxt->b, ctxt->eflags))
4101			jmp_rel(ctxt, ctxt->src.val);
4102		break;
4103	case 0x90 ... 0x9f:     /* setcc r/m8 */
4104		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4105		break;
4106	case 0xa0:	  /* push fs */
4107		rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4108		break;
4109	case 0xa1:	 /* pop fs */
4110		rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4111		break;
4112	case 0xa3:
4113	      bt:		/* bt */
4114		ctxt->dst.type = OP_NONE;
4115		/* only subword offset */
4116		ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4117		emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags);
4118		break;
4119	case 0xa4: /* shld imm8, r, r/m */
4120	case 0xa5: /* shld cl, r, r/m */
4121		emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4122		break;
4123	case 0xa8:	/* push gs */
4124		rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4125		break;
4126	case 0xa9:	/* pop gs */
4127		rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4128		break;
4129	case 0xab:
4130	      bts:		/* bts */
4131		emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags);
4132		break;
4133	case 0xac: /* shrd imm8, r, r/m */
4134	case 0xad: /* shrd cl, r, r/m */
4135		emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4136		break;
4137	case 0xae:              /* clflush */
4138		break;
4139	case 0xb0 ... 0xb1:	/* cmpxchg */
4140		/*
4141		 * Save real source value, then compare EAX against
4142		 * destination.
4143		 */
4144		ctxt->src.orig_val = ctxt->src.val;
4145		ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4146		emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
4147		if (ctxt->eflags & EFLG_ZF) {
4148			/* Success: write back to memory. */
4149			ctxt->dst.val = ctxt->src.orig_val;
4150		} else {
4151			/* Failure: write the value we saw to EAX. */
4152			ctxt->dst.type = OP_REG;
4153			ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4154		}
4155		break;
4156	case 0xb2:		/* lss */
4157		rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4158		break;
4159	case 0xb3:
4160	      btr:		/* btr */
4161		emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags);
4162		break;
4163	case 0xb4:		/* lfs */
4164		rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4165		break;
4166	case 0xb5:		/* lgs */
4167		rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4168		break;
4169	case 0xb6 ... 0xb7:	/* movzx */
4170		ctxt->dst.bytes = ctxt->op_bytes;
4171		ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4172						       : (u16) ctxt->src.val;
4173		break;
4174	case 0xba:		/* Grp8 */
4175		switch (ctxt->modrm_reg & 3) {
4176		case 0:
4177			goto bt;
4178		case 1:
4179			goto bts;
4180		case 2:
4181			goto btr;
4182		case 3:
4183			goto btc;
4184		}
4185		break;
4186	case 0xbb:
4187	      btc:		/* btc */
4188		emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags);
4189		break;
4190	case 0xbc: {		/* bsf */
4191		u8 zf;
4192		__asm__ ("bsf %2, %0; setz %1"
4193			 : "=r"(ctxt->dst.val), "=q"(zf)
4194			 : "r"(ctxt->src.val));
4195		ctxt->eflags &= ~X86_EFLAGS_ZF;
4196		if (zf) {
4197			ctxt->eflags |= X86_EFLAGS_ZF;
4198			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4199		}
4200		break;
4201	}
4202	case 0xbd: {		/* bsr */
4203		u8 zf;
4204		__asm__ ("bsr %2, %0; setz %1"
4205			 : "=r"(ctxt->dst.val), "=q"(zf)
4206			 : "r"(ctxt->src.val));
4207		ctxt->eflags &= ~X86_EFLAGS_ZF;
4208		if (zf) {
4209			ctxt->eflags |= X86_EFLAGS_ZF;
4210			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4211		}
4212		break;
4213	}
4214	case 0xbe ... 0xbf:	/* movsx */
4215		ctxt->dst.bytes = ctxt->op_bytes;
4216		ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4217							(s16) ctxt->src.val;
4218		break;
4219	case 0xc0 ... 0xc1:	/* xadd */
4220		emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
4221		/* Write back the register source. */
4222		ctxt->src.val = ctxt->dst.orig_val;
4223		write_register_operand(&ctxt->src);
4224		break;
4225	case 0xc3:		/* movnti */
4226		ctxt->dst.bytes = ctxt->op_bytes;
4227		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4228							(u64) ctxt->src.val;
4229		break;
4230	case 0xc7:		/* Grp9 (cmpxchg8b) */
4231		rc = em_grp9(ctxt);
4232		break;
4233	default:
4234		goto cannot_emulate;
4235	}
4236
 
 
4237	if (rc != X86EMUL_CONTINUE)
4238		goto done;
4239
4240	goto writeback;
4241
4242cannot_emulate:
4243	return EMULATION_FAILED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3 * emulate.c
   4 *
   5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   6 *
   7 * Copyright (c) 2005 Keir Fraser
   8 *
   9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  10 * privileged instructions:
  11 *
  12 * Copyright (C) 2006 Qumranet
  13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  14 *
  15 *   Avi Kivity <avi@qumranet.com>
  16 *   Yaniv Kamay <yaniv@qumranet.com>
  17 *
 
 
 
  18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  19 */
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/kvm_host.h>
  23#include "kvm_cache_regs.h"
  24#include "kvm_emulate.h"
  25#include <linux/stringify.h>
  26#include <asm/debugreg.h>
  27#include <asm/nospec-branch.h>
  28#include <asm/ibt.h>
  29
  30#include "x86.h"
  31#include "tss.h"
  32#include "mmu.h"
  33#include "pmu.h"
  34
  35/*
  36 * Operand types
  37 */
  38#define OpNone             0ull
  39#define OpImplicit         1ull  /* No generic decode */
  40#define OpReg              2ull  /* Register */
  41#define OpMem              3ull  /* Memory */
  42#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
  43#define OpDI               5ull  /* ES:DI/EDI/RDI */
  44#define OpMem64            6ull  /* Memory, 64-bit */
  45#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
  46#define OpDX               8ull  /* DX register */
  47#define OpCL               9ull  /* CL register (for shifts) */
  48#define OpImmByte         10ull  /* 8-bit sign extended immediate */
  49#define OpOne             11ull  /* Implied 1 */
  50#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
  51#define OpMem16           13ull  /* Memory operand (16-bit). */
  52#define OpMem32           14ull  /* Memory operand (32-bit). */
  53#define OpImmU            15ull  /* Immediate operand, zero extended */
  54#define OpSI              16ull  /* SI/ESI/RSI */
  55#define OpImmFAddr        17ull  /* Immediate far address */
  56#define OpMemFAddr        18ull  /* Far address in memory */
  57#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
  58#define OpES              20ull  /* ES */
  59#define OpCS              21ull  /* CS */
  60#define OpSS              22ull  /* SS */
  61#define OpDS              23ull  /* DS */
  62#define OpFS              24ull  /* FS */
  63#define OpGS              25ull  /* GS */
  64#define OpMem8            26ull  /* 8-bit zero extended memory operand */
  65#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
  66#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
  67#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
  68#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
  69
  70#define OpBits             5  /* Width of operand field */
  71#define OpMask             ((1ull << OpBits) - 1)
  72
  73/*
  74 * Opcode effective-address decode tables.
  75 * Note that we only emulate instructions that have at least one memory
  76 * operand (excluding implicit stack references). We assume that stack
  77 * references and instruction fetches will never occur in special memory
  78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  79 * not be handled.
  80 */
  81
  82/* Operand sizes: 8-bit operands or specified/overridden size. */
  83#define ByteOp      (1<<0)	/* 8-bit operands. */
  84/* Destination operand type. */
  85#define DstShift    1
  86#define ImplicitOps (OpImplicit << DstShift)
  87#define DstReg      (OpReg << DstShift)
  88#define DstMem      (OpMem << DstShift)
  89#define DstAcc      (OpAcc << DstShift)
  90#define DstDI       (OpDI << DstShift)
  91#define DstMem64    (OpMem64 << DstShift)
  92#define DstMem16    (OpMem16 << DstShift)
  93#define DstImmUByte (OpImmUByte << DstShift)
  94#define DstDX       (OpDX << DstShift)
  95#define DstAccLo    (OpAccLo << DstShift)
  96#define DstMask     (OpMask << DstShift)
  97/* Source operand type. */
  98#define SrcShift    6
  99#define SrcNone     (OpNone << SrcShift)
 100#define SrcReg      (OpReg << SrcShift)
 101#define SrcMem      (OpMem << SrcShift)
 102#define SrcMem16    (OpMem16 << SrcShift)
 103#define SrcMem32    (OpMem32 << SrcShift)
 104#define SrcImm      (OpImm << SrcShift)
 105#define SrcImmByte  (OpImmByte << SrcShift)
 106#define SrcOne      (OpOne << SrcShift)
 107#define SrcImmUByte (OpImmUByte << SrcShift)
 108#define SrcImmU     (OpImmU << SrcShift)
 109#define SrcSI       (OpSI << SrcShift)
 110#define SrcXLat     (OpXLat << SrcShift)
 111#define SrcImmFAddr (OpImmFAddr << SrcShift)
 112#define SrcMemFAddr (OpMemFAddr << SrcShift)
 113#define SrcAcc      (OpAcc << SrcShift)
 114#define SrcImmU16   (OpImmU16 << SrcShift)
 115#define SrcImm64    (OpImm64 << SrcShift)
 116#define SrcDX       (OpDX << SrcShift)
 117#define SrcMem8     (OpMem8 << SrcShift)
 118#define SrcAccHi    (OpAccHi << SrcShift)
 119#define SrcMask     (OpMask << SrcShift)
 120#define BitOp       (1<<11)
 121#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
 122#define String      (1<<13)     /* String instruction (rep capable) */
 123#define Stack       (1<<14)     /* Stack instruction (push/pop) */
 124#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
 125#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
 126#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
 127#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
 128#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 129#define Escape      (5<<15)     /* Escape to coprocessor instruction */
 130#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
 131#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
 132#define Sse         (1<<18)     /* SSE Vector instruction */
 133/* Generic ModRM decode. */
 134#define ModRM       (1<<19)
 135/* Destination is only written; never read. */
 136#define Mov         (1<<20)
 137/* Misc flags */
 138#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 139#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
 140#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 141#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 142#define Undefined   (1<<25) /* No Such Instruction */
 143#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 144#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 145#define No64	    (1<<28)
 146#define PageTable   (1 << 29)   /* instruction used to write page table */
 147#define NotImpl     (1 << 30)   /* instruction is not implemented */
 148/* Source 2 operand type */
 149#define Src2Shift   (31)
 150#define Src2None    (OpNone << Src2Shift)
 151#define Src2Mem     (OpMem << Src2Shift)
 152#define Src2CL      (OpCL << Src2Shift)
 153#define Src2ImmByte (OpImmByte << Src2Shift)
 154#define Src2One     (OpOne << Src2Shift)
 155#define Src2Imm     (OpImm << Src2Shift)
 156#define Src2ES      (OpES << Src2Shift)
 157#define Src2CS      (OpCS << Src2Shift)
 158#define Src2SS      (OpSS << Src2Shift)
 159#define Src2DS      (OpDS << Src2Shift)
 160#define Src2FS      (OpFS << Src2Shift)
 161#define Src2GS      (OpGS << Src2Shift)
 162#define Src2Mask    (OpMask << Src2Shift)
 163#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
 164#define AlignMask   ((u64)7 << 41)
 165#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
 166#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
 167#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
 168#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
 169#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
 170#define NoWrite     ((u64)1 << 45)  /* No writeback */
 171#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 172#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
 173#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
 174#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
 175#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 176#define NearBranch  ((u64)1 << 52)  /* Near branches */
 177#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
 178#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
 179#define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
 180#define IsBranch    ((u64)1 << 56)  /* Instruction is considered a branch. */
 181
 182#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 183
 184#define X2(x...) x, x
 185#define X3(x...) X2(x), x
 186#define X4(x...) X2(x), X2(x)
 187#define X5(x...) X4(x), x
 188#define X6(x...) X4(x), X2(x)
 189#define X7(x...) X4(x), X3(x)
 190#define X8(x...) X4(x), X4(x)
 191#define X16(x...) X8(x), X8(x)
 192
 193struct opcode {
 194	u64 flags;
 195	u8 intercept;
 196	u8 pad[7];
 197	union {
 198		int (*execute)(struct x86_emulate_ctxt *ctxt);
 199		const struct opcode *group;
 200		const struct group_dual *gdual;
 201		const struct gprefix *gprefix;
 202		const struct escape *esc;
 203		const struct instr_dual *idual;
 204		const struct mode_dual *mdual;
 205		void (*fastop)(struct fastop *fake);
 206	} u;
 207	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 208};
 209
 210struct group_dual {
 211	struct opcode mod012[8];
 212	struct opcode mod3[8];
 213};
 214
 215struct gprefix {
 216	struct opcode pfx_no;
 217	struct opcode pfx_66;
 218	struct opcode pfx_f2;
 219	struct opcode pfx_f3;
 220};
 221
 222struct escape {
 223	struct opcode op[8];
 224	struct opcode high[64];
 225};
 226
 227struct instr_dual {
 228	struct opcode mod012;
 229	struct opcode mod3;
 230};
 231
 232struct mode_dual {
 233	struct opcode mode32;
 234	struct opcode mode64;
 235};
 
 
 
 
 236
 237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 
 238
 239enum x86_transfer_type {
 240	X86_TRANSFER_NONE,
 241	X86_TRANSFER_CALL_JMP,
 242	X86_TRANSFER_RET,
 243	X86_TRANSFER_TASK_SWITCH,
 244};
 245
 246static void writeback_registers(struct x86_emulate_ctxt *ctxt)
 247{
 248	unsigned long dirty = ctxt->regs_dirty;
 249	unsigned reg;
 250
 251	for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
 252		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
 253}
 254
 255static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
 256{
 257	ctxt->regs_dirty = 0;
 258	ctxt->regs_valid = 0;
 259}
 260
 261/*
 262 * These EFLAGS bits are restored from saved value during emulation, and
 263 * any changes are written back to the saved value after emulation.
 264 */
 265#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
 266		     X86_EFLAGS_PF|X86_EFLAGS_CF)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267
 268#ifdef CONFIG_X86_64
 269#define ON64(x) x
 270#else
 271#define ON64(x)
 272#endif
 273
 274/*
 275 * fastop functions have a special calling convention:
 276 *
 277 * dst:    rax        (in/out)
 278 * src:    rdx        (in/out)
 279 * src2:   rcx        (in)
 280 * flags:  rflags     (in/out)
 281 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 282 *
 283 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 284 * different operand sizes can be reached by calculation, rather than a jump
 285 * table (which would be bigger than the code).
 286 *
 287 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
 288 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
 289 * body of the function.  Currently none is larger than 4.
 290 */
 291static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 292
 293#define FASTOP_SIZE	16
 294
 295#define __FOP_FUNC(name) \
 296	".align " __stringify(FASTOP_SIZE) " \n\t" \
 297	".type " name ", @function \n\t" \
 298	name ":\n\t" \
 299	ASM_ENDBR \
 300	IBT_NOSEAL(name)
 301
 302#define FOP_FUNC(name) \
 303	__FOP_FUNC(#name)
 304
 305#define __FOP_RET(name) \
 306	"11: " ASM_RET \
 307	".size " name ", .-" name "\n\t"
 308
 309#define FOP_RET(name) \
 310	__FOP_RET(#name)
 311
 312#define __FOP_START(op, align) \
 313	extern void em_##op(struct fastop *fake); \
 314	asm(".pushsection .text, \"ax\" \n\t" \
 315	    ".global em_" #op " \n\t" \
 316	    ".align " __stringify(align) " \n\t" \
 317	    "em_" #op ":\n\t"
 318
 319#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
 320
 321#define FOP_END \
 322	    ".popsection")
 323
 324#define __FOPNOP(name) \
 325	__FOP_FUNC(name) \
 326	__FOP_RET(name)
 327
 328#define FOPNOP() \
 329	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
 330
 331#define FOP1E(op,  dst) \
 332	__FOP_FUNC(#op "_" #dst) \
 333	"10: " #op " %" #dst " \n\t" \
 334	__FOP_RET(#op "_" #dst)
 335
 336#define FOP1EEX(op,  dst) \
 337	FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
 338
 339#define FASTOP1(op) \
 340	FOP_START(op) \
 341	FOP1E(op##b, al) \
 342	FOP1E(op##w, ax) \
 343	FOP1E(op##l, eax) \
 344	ON64(FOP1E(op##q, rax))	\
 345	FOP_END
 346
 347/* 1-operand, using src2 (for MUL/DIV r/m) */
 348#define FASTOP1SRC2(op, name) \
 349	FOP_START(name) \
 350	FOP1E(op, cl) \
 351	FOP1E(op, cx) \
 352	FOP1E(op, ecx) \
 353	ON64(FOP1E(op, rcx)) \
 354	FOP_END
 355
 356/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
 357#define FASTOP1SRC2EX(op, name) \
 358	FOP_START(name) \
 359	FOP1EEX(op, cl) \
 360	FOP1EEX(op, cx) \
 361	FOP1EEX(op, ecx) \
 362	ON64(FOP1EEX(op, rcx)) \
 363	FOP_END
 364
 365#define FOP2E(op,  dst, src)	   \
 366	__FOP_FUNC(#op "_" #dst "_" #src) \
 367	#op " %" #src ", %" #dst " \n\t" \
 368	__FOP_RET(#op "_" #dst "_" #src)
 369
 370#define FASTOP2(op) \
 371	FOP_START(op) \
 372	FOP2E(op##b, al, dl) \
 373	FOP2E(op##w, ax, dx) \
 374	FOP2E(op##l, eax, edx) \
 375	ON64(FOP2E(op##q, rax, rdx)) \
 376	FOP_END
 377
 378/* 2 operand, word only */
 379#define FASTOP2W(op) \
 380	FOP_START(op) \
 381	FOPNOP() \
 382	FOP2E(op##w, ax, dx) \
 383	FOP2E(op##l, eax, edx) \
 384	ON64(FOP2E(op##q, rax, rdx)) \
 385	FOP_END
 386
 387/* 2 operand, src is CL */
 388#define FASTOP2CL(op) \
 389	FOP_START(op) \
 390	FOP2E(op##b, al, cl) \
 391	FOP2E(op##w, ax, cl) \
 392	FOP2E(op##l, eax, cl) \
 393	ON64(FOP2E(op##q, rax, cl)) \
 394	FOP_END
 395
 396/* 2 operand, src and dest are reversed */
 397#define FASTOP2R(op, name) \
 398	FOP_START(name) \
 399	FOP2E(op##b, dl, al) \
 400	FOP2E(op##w, dx, ax) \
 401	FOP2E(op##l, edx, eax) \
 402	ON64(FOP2E(op##q, rdx, rax)) \
 403	FOP_END
 404
 405#define FOP3E(op,  dst, src, src2) \
 406	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
 407	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
 408	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
 409
 410/* 3-operand, word-only, src2=cl */
 411#define FASTOP3WCL(op) \
 412	FOP_START(op) \
 413	FOPNOP() \
 414	FOP3E(op##w, ax, dx, cl) \
 415	FOP3E(op##l, eax, edx, cl) \
 416	ON64(FOP3E(op##q, rax, rdx, cl)) \
 417	FOP_END
 418
 419/* Special case for SETcc - 1 instruction per cc */
 420#define FOP_SETCC(op) \
 421	FOP_FUNC(op) \
 422	#op " %al \n\t" \
 423	FOP_RET(op)
 424
 425FOP_START(setcc)
 426FOP_SETCC(seto)
 427FOP_SETCC(setno)
 428FOP_SETCC(setc)
 429FOP_SETCC(setnc)
 430FOP_SETCC(setz)
 431FOP_SETCC(setnz)
 432FOP_SETCC(setbe)
 433FOP_SETCC(setnbe)
 434FOP_SETCC(sets)
 435FOP_SETCC(setns)
 436FOP_SETCC(setp)
 437FOP_SETCC(setnp)
 438FOP_SETCC(setl)
 439FOP_SETCC(setnl)
 440FOP_SETCC(setle)
 441FOP_SETCC(setnle)
 442FOP_END;
 443
 444FOP_START(salc)
 445FOP_FUNC(salc)
 446"pushf; sbb %al, %al; popf \n\t"
 447FOP_RET(salc)
 448FOP_END;
 449
 450/*
 451 * XXX: inoutclob user must know where the argument is being expanded.
 452 *      Using asm goto would allow us to remove _fault.
 453 */
 454#define asm_safe(insn, inoutclob...) \
 455({ \
 456	int _fault = 0; \
 457 \
 458	asm volatile("1:" insn "\n" \
 459	             "2:\n" \
 460		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
 461	             : [_fault] "+r"(_fault) inoutclob ); \
 462 \
 463	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
 464})
 
 
 
 
 
 
 
 
 
 
 465
 466static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 467				    enum x86_intercept intercept,
 468				    enum x86_intercept_stage stage)
 469{
 470	struct x86_instruction_info info = {
 471		.intercept  = intercept,
 472		.rep_prefix = ctxt->rep_prefix,
 473		.modrm_mod  = ctxt->modrm_mod,
 474		.modrm_reg  = ctxt->modrm_reg,
 475		.modrm_rm   = ctxt->modrm_rm,
 476		.src_val    = ctxt->src.val64,
 477		.dst_val    = ctxt->dst.val64,
 478		.src_bytes  = ctxt->src.bytes,
 479		.dst_bytes  = ctxt->dst.bytes,
 480		.ad_bytes   = ctxt->ad_bytes,
 481		.next_rip   = ctxt->eip,
 482	};
 483
 484	return ctxt->ops->intercept(ctxt, &info, stage);
 485}
 486
 487static void assign_masked(ulong *dest, ulong src, ulong mask)
 488{
 489	*dest = (*dest & ~mask) | (src & mask);
 490}
 491
 492static void assign_register(unsigned long *reg, u64 val, int bytes)
 493{
 494	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
 495	switch (bytes) {
 496	case 1:
 497		*(u8 *)reg = (u8)val;
 498		break;
 499	case 2:
 500		*(u16 *)reg = (u16)val;
 501		break;
 502	case 4:
 503		*reg = (u32)val;
 504		break;	/* 64b: zero-extend */
 505	case 8:
 506		*reg = val;
 507		break;
 508	}
 509}
 510
 511static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 512{
 513	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 514}
 515
 516static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
 517{
 518	u16 sel;
 519	struct desc_struct ss;
 520
 521	if (ctxt->mode == X86EMUL_MODE_PROT64)
 522		return ~0UL;
 523	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
 524	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
 525}
 526
 527static int stack_size(struct x86_emulate_ctxt *ctxt)
 528{
 529	return (__fls(stack_mask(ctxt)) + 1) >> 3;
 530}
 531
 532/* Access/update address held in a register, based on addressing mode. */
 533static inline unsigned long
 534address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 535{
 536	if (ctxt->ad_bytes == sizeof(unsigned long))
 537		return reg;
 538	else
 539		return reg & ad_mask(ctxt);
 540}
 541
 542static inline unsigned long
 543register_address(struct x86_emulate_ctxt *ctxt, int reg)
 544{
 545	return address_mask(ctxt, reg_read(ctxt, reg));
 546}
 547
 548static void masked_increment(ulong *reg, ulong mask, int inc)
 549{
 550	assign_masked(reg, *reg + inc, mask);
 551}
 552
 553static inline void
 554register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
 555{
 556	ulong *preg = reg_rmw(ctxt, reg);
 557
 558	assign_register(preg, *preg + inc, ctxt->ad_bytes);
 
 559}
 560
 561static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
 562{
 563	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
 564}
 565
 566static u32 desc_limit_scaled(struct desc_struct *desc)
 567{
 568	u32 limit = get_desc_limit(desc);
 569
 570	return desc->g ? (limit << 12) | 0xfff : limit;
 571}
 572
 
 
 
 
 
 
 573static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 574{
 575	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 576		return 0;
 577
 578	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 579}
 580
 
 
 
 
 
 
 
 
 581static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 582			     u32 error, bool valid)
 583{
 584	if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
 585		return X86EMUL_UNHANDLEABLE;
 586
 587	ctxt->exception.vector = vec;
 588	ctxt->exception.error_code = error;
 589	ctxt->exception.error_code_valid = valid;
 590	return X86EMUL_PROPAGATE_FAULT;
 591}
 592
 593static int emulate_db(struct x86_emulate_ctxt *ctxt)
 594{
 595	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 596}
 597
 598static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 599{
 600	return emulate_exception(ctxt, GP_VECTOR, err, true);
 601}
 602
 603static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 604{
 605	return emulate_exception(ctxt, SS_VECTOR, err, true);
 606}
 607
 608static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 609{
 610	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 611}
 612
 613static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 614{
 615	return emulate_exception(ctxt, TS_VECTOR, err, true);
 616}
 617
 618static int emulate_de(struct x86_emulate_ctxt *ctxt)
 619{
 620	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 621}
 622
 623static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 624{
 625	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 626}
 627
 628static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 629{
 630	u16 selector;
 631	struct desc_struct desc;
 632
 633	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 634	return selector;
 635}
 636
 637static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 638				 unsigned seg)
 639{
 640	u16 dummy;
 641	u32 base3;
 642	struct desc_struct desc;
 643
 644	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 645	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 646}
 647
 648static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
 649{
 650	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
 651}
 652
 653static inline bool emul_is_noncanonical_address(u64 la,
 654						struct x86_emulate_ctxt *ctxt,
 655						unsigned int flags)
 656{
 657	return !ctxt->ops->is_canonical_addr(ctxt, la, flags);
 658}
 659
 660/*
 661 * x86 defines three classes of vector instructions: explicitly
 662 * aligned, explicitly unaligned, and the rest, which change behaviour
 663 * depending on whether they're AVX encoded or not.
 664 *
 665 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 666 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 667 * 512 bytes of data must be aligned to a 16 byte boundary.
 668 */
 669static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
 670{
 671	u64 alignment = ctxt->d & AlignMask;
 672
 673	if (likely(size < 16))
 674		return 1;
 675
 676	switch (alignment) {
 677	case Unaligned:
 678	case Avx:
 679		return 1;
 680	case Aligned16:
 681		return 16;
 682	case Aligned:
 683	default:
 684		return size;
 685	}
 686}
 687
 688static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 689				       struct segmented_address addr,
 690				       unsigned *max_size, unsigned size,
 691				       enum x86emul_mode mode, ulong *linear,
 692				       unsigned int flags)
 693{
 694	struct desc_struct desc;
 695	bool usable;
 696	ulong la;
 697	u32 lim;
 698	u16 sel;
 699	u8  va_bits;
 700
 701	la = seg_base(ctxt, addr.seg) + addr.ea;
 702	*max_size = 0;
 703	switch (mode) {
 
 704	case X86EMUL_MODE_PROT64:
 705		*linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
 706		va_bits = ctxt_virt_addr_bits(ctxt);
 707		if (!__is_canonical_address(la, va_bits))
 708			goto bad;
 709
 710		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
 711		if (size > *max_size)
 712			goto bad;
 713		break;
 714	default:
 715		*linear = la = (u32)la;
 716		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 717						addr.seg);
 718		if (!usable)
 719			goto bad;
 720		/* code segment in protected mode or read-only data segment */
 721		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
 722		    (flags & X86EMUL_F_WRITE))
 723			goto bad;
 724		/* unreadable code segment */
 725		if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
 726			goto bad;
 727		lim = desc_limit_scaled(&desc);
 728		if (!(desc.type & 8) && (desc.type & 4)) {
 729			/* expand-down segment */
 730			if (addr.ea <= lim)
 
 
 
 
 731				goto bad;
 732			lim = desc.d ? 0xffffffff : 0xffff;
 
 
 733		}
 734		if (addr.ea > lim)
 735			goto bad;
 736		if (lim == 0xffffffff)
 737			*max_size = ~0u;
 738		else {
 739			*max_size = (u64)lim + 1 - addr.ea;
 740			if (size > *max_size)
 
 
 
 
 
 
 
 741				goto bad;
 742		}
 743		break;
 744	}
 745	if (la & (insn_alignment(ctxt, size) - 1))
 746		return emulate_gp(ctxt, 0);
 
 747	return X86EMUL_CONTINUE;
 748bad:
 749	if (addr.seg == VCPU_SREG_SS)
 750		return emulate_ss(ctxt, 0);
 751	else
 752		return emulate_gp(ctxt, 0);
 753}
 754
 755static int linearize(struct x86_emulate_ctxt *ctxt,
 756		     struct segmented_address addr,
 757		     unsigned size, bool write,
 758		     ulong *linear)
 759{
 760	unsigned max_size;
 761	return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
 762			   write ? X86EMUL_F_WRITE : 0);
 763}
 764
 765static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 766{
 767	ulong linear;
 768	int rc;
 769	unsigned max_size;
 770	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 771					   .ea = dst };
 772
 773	if (ctxt->op_bytes != sizeof(unsigned long))
 774		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
 775	rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
 776			 X86EMUL_F_FETCH);
 777	if (rc == X86EMUL_CONTINUE)
 778		ctxt->_eip = addr.ea;
 779	return rc;
 780}
 781
 782static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
 783{
 784	u64 efer;
 785	struct desc_struct cs;
 786	u16 selector;
 787	u32 base3;
 788
 789	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 790
 791	if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
 792		/* Real mode. cpu must not have long mode active */
 793		if (efer & EFER_LMA)
 794			return X86EMUL_UNHANDLEABLE;
 795		ctxt->mode = X86EMUL_MODE_REAL;
 796		return X86EMUL_CONTINUE;
 797	}
 798
 799	if (ctxt->eflags & X86_EFLAGS_VM) {
 800		/* Protected/VM86 mode. cpu must not have long mode active */
 801		if (efer & EFER_LMA)
 802			return X86EMUL_UNHANDLEABLE;
 803		ctxt->mode = X86EMUL_MODE_VM86;
 804		return X86EMUL_CONTINUE;
 805	}
 806
 807	if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
 808		return X86EMUL_UNHANDLEABLE;
 809
 810	if (efer & EFER_LMA) {
 811		if (cs.l) {
 812			/* Proper long mode */
 813			ctxt->mode = X86EMUL_MODE_PROT64;
 814		} else if (cs.d) {
 815			/* 32 bit compatibility mode*/
 816			ctxt->mode = X86EMUL_MODE_PROT32;
 817		} else {
 818			ctxt->mode = X86EMUL_MODE_PROT16;
 819		}
 820	} else {
 821		/* Legacy 32 bit / 16 bit mode */
 822		ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 823	}
 824
 825	return X86EMUL_CONTINUE;
 826}
 827
 828static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
 829{
 830	return assign_eip(ctxt, dst);
 831}
 832
 833static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
 834{
 835	int rc = emulator_recalc_and_set_mode(ctxt);
 836
 837	if (rc != X86EMUL_CONTINUE)
 838		return rc;
 839
 840	return assign_eip(ctxt, dst);
 841}
 842
 843static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 844{
 845	return assign_eip_near(ctxt, ctxt->_eip + rel);
 846}
 847
 848static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
 849			      void *data, unsigned size)
 850{
 851	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
 852}
 853
 854static int linear_write_system(struct x86_emulate_ctxt *ctxt,
 855			       ulong linear, void *data,
 856			       unsigned int size)
 857{
 858	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
 859}
 860
 861static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 862			      struct segmented_address addr,
 863			      void *data,
 864			      unsigned size)
 865{
 866	int rc;
 867	ulong linear;
 868
 869	rc = linearize(ctxt, addr, size, false, &linear);
 870	if (rc != X86EMUL_CONTINUE)
 871		return rc;
 872	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
 873}
 874
 875static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
 876			       struct segmented_address addr,
 877			       void *data,
 878			       unsigned int size)
 879{
 
 880	int rc;
 881	ulong linear;
 882
 883	rc = linearize(ctxt, addr, size, true, &linear);
 884	if (rc != X86EMUL_CONTINUE)
 885		return rc;
 886	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
 
 
 
 
 
 
 
 
 
 
 
 
 887}
 888
 889/*
 890 * Prefetch the remaining bytes of the instruction without crossing page
 891 * boundary if they are not in fetch_cache yet.
 892 */
 893static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 894{
 895	int rc;
 896	unsigned size, max_size;
 897	unsigned long linear;
 898	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
 899	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 900					   .ea = ctxt->eip + cur_size };
 901
 902	/*
 903	 * We do not know exactly how many bytes will be needed, and
 904	 * __linearize is expensive, so fetch as much as possible.  We
 905	 * just have to avoid going beyond the 15 byte limit, the end
 906	 * of the segment, or the end of the page.
 907	 *
 908	 * __linearize is called with size 0 so that it does not do any
 909	 * boundary check itself.  Instead, we use max_size to check
 910	 * against op_size.
 911	 */
 912	rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
 913			 X86EMUL_F_FETCH);
 914	if (unlikely(rc != X86EMUL_CONTINUE))
 915		return rc;
 916
 917	size = min_t(unsigned, 15UL ^ cur_size, max_size);
 918	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 919
 920	/*
 921	 * One instruction can only straddle two pages,
 922	 * and one has been loaded at the beginning of
 923	 * x86_decode_insn.  So, if not enough bytes
 924	 * still, we must have hit the 15-byte boundary.
 925	 */
 926	if (unlikely(size < op_size))
 927		return emulate_gp(ctxt, 0);
 928
 929	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
 930			      size, &ctxt->exception);
 931	if (unlikely(rc != X86EMUL_CONTINUE))
 932		return rc;
 933	ctxt->fetch.end += size;
 934	return X86EMUL_CONTINUE;
 935}
 936
 937static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
 938					       unsigned size)
 939{
 940	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
 941
 942	if (unlikely(done_size < size))
 943		return __do_insn_fetch_bytes(ctxt, size - done_size);
 944	else
 945		return X86EMUL_CONTINUE;
 946}
 947
 948/* Fetch next part of the instruction being emulated. */
 949#define insn_fetch(_type, _ctxt)					\
 950({	_type _x;							\
 951									\
 952	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
 953	if (rc != X86EMUL_CONTINUE)					\
 954		goto done;						\
 955	ctxt->_eip += sizeof(_type);					\
 956	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
 957	ctxt->fetch.ptr += sizeof(_type);				\
 958	_x;								\
 959})
 960
 961#define insn_fetch_arr(_arr, _size, _ctxt)				\
 962({									\
 963	rc = do_insn_fetch_bytes(_ctxt, _size);				\
 964	if (rc != X86EMUL_CONTINUE)					\
 965		goto done;						\
 966	ctxt->_eip += (_size);						\
 967	memcpy(_arr, ctxt->fetch.ptr, _size);				\
 968	ctxt->fetch.ptr += (_size);					\
 969})
 970
 971/*
 972 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 973 * pointer into the block that addresses the relevant register.
 974 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 975 */
 976static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
 977			     int byteop)
 978{
 979	void *p;
 980	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
 981
 
 982	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 983		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
 984	else
 985		p = reg_rmw(ctxt, modrm_reg);
 986	return p;
 987}
 988
 989static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 990			   struct segmented_address addr,
 991			   u16 *size, unsigned long *address, int op_bytes)
 992{
 993	int rc;
 994
 995	if (op_bytes == 2)
 996		op_bytes = 3;
 997	*address = 0;
 998	rc = segmented_read_std(ctxt, addr, size, 2);
 999	if (rc != X86EMUL_CONTINUE)
1000		return rc;
1001	addr.ea += 2;
1002	rc = segmented_read_std(ctxt, addr, address, op_bytes);
1003	return rc;
1004}
1005
1006FASTOP2(add);
1007FASTOP2(or);
1008FASTOP2(adc);
1009FASTOP2(sbb);
1010FASTOP2(and);
1011FASTOP2(sub);
1012FASTOP2(xor);
1013FASTOP2(cmp);
1014FASTOP2(test);
1015
1016FASTOP1SRC2(mul, mul_ex);
1017FASTOP1SRC2(imul, imul_ex);
1018FASTOP1SRC2EX(div, div_ex);
1019FASTOP1SRC2EX(idiv, idiv_ex);
1020
1021FASTOP3WCL(shld);
1022FASTOP3WCL(shrd);
1023
1024FASTOP2W(imul);
1025
1026FASTOP1(not);
1027FASTOP1(neg);
1028FASTOP1(inc);
1029FASTOP1(dec);
1030
1031FASTOP2CL(rol);
1032FASTOP2CL(ror);
1033FASTOP2CL(rcl);
1034FASTOP2CL(rcr);
1035FASTOP2CL(shl);
1036FASTOP2CL(shr);
1037FASTOP2CL(sar);
1038
1039FASTOP2W(bsf);
1040FASTOP2W(bsr);
1041FASTOP2W(bt);
1042FASTOP2W(bts);
1043FASTOP2W(btr);
1044FASTOP2W(btc);
1045
1046FASTOP2(xadd);
1047
1048FASTOP2R(cmp, cmp_r);
1049
1050static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1051{
1052	/* If src is zero, do not writeback, but update flags */
1053	if (ctxt->src.val == 0)
1054		ctxt->dst.type = OP_NONE;
1055	return fastop(ctxt, em_bsf);
1056}
1057
1058static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1059{
1060	/* If src is zero, do not writeback, but update flags */
1061	if (ctxt->src.val == 0)
1062		ctxt->dst.type = OP_NONE;
1063	return fastop(ctxt, em_bsr);
1064}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
1066static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1067{
1068	u8 rc;
1069	void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
1070
1071	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1072	asm("push %[flags]; popf; " CALL_NOSPEC
1073	    : "=a"(rc), ASM_CALL_CONSTRAINT : [thunk_target]"r"(fop), [flags]"r"(flags));
1074	return rc;
1075}
1076
1077static void fetch_register_operand(struct operand *op)
1078{
1079	switch (op->bytes) {
1080	case 1:
1081		op->val = *(u8 *)op->addr.reg;
1082		break;
1083	case 2:
1084		op->val = *(u16 *)op->addr.reg;
1085		break;
1086	case 4:
1087		op->val = *(u32 *)op->addr.reg;
1088		break;
1089	case 8:
1090		op->val = *(u64 *)op->addr.reg;
1091		break;
1092	}
1093}
1094
1095static int em_fninit(struct x86_emulate_ctxt *ctxt)
1096{
1097	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1098		return emulate_nm(ctxt);
1099
1100	kvm_fpu_get();
1101	asm volatile("fninit");
1102	kvm_fpu_put();
1103	return X86EMUL_CONTINUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104}
1105
1106static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
 
1107{
1108	u16 fcw;
1109
1110	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1111		return emulate_nm(ctxt);
1112
1113	kvm_fpu_get();
1114	asm volatile("fnstcw %0": "+m"(fcw));
1115	kvm_fpu_put();
1116
1117	ctxt->dst.val = fcw;
1118
1119	return X86EMUL_CONTINUE;
1120}
1121
1122static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1123{
1124	u16 fsw;
1125
1126	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1127		return emulate_nm(ctxt);
1128
1129	kvm_fpu_get();
1130	asm volatile("fnstsw %0": "+m"(fsw));
1131	kvm_fpu_put();
1132
1133	ctxt->dst.val = fsw;
1134
1135	return X86EMUL_CONTINUE;
1136}
1137
1138static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1139				    struct operand *op)
 
1140{
1141	unsigned int reg;
 
1142
1143	if (ctxt->d & ModRM)
1144		reg = ctxt->modrm_reg;
1145	else
1146		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1147
1148	if (ctxt->d & Sse) {
1149		op->type = OP_XMM;
1150		op->bytes = 16;
1151		op->addr.xmm = reg;
1152		kvm_read_sse_reg(reg, &op->vec_val);
1153		return;
1154	}
1155	if (ctxt->d & Mmx) {
1156		reg &= 7;
1157		op->type = OP_MM;
1158		op->bytes = 8;
1159		op->addr.mm = reg;
1160		return;
1161	}
1162
1163	op->type = OP_REG;
1164	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1165	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1166
 
 
 
 
1167	fetch_register_operand(op);
1168	op->orig_val = op->val;
1169}
1170
1171static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1172{
1173	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1174		ctxt->modrm_seg = VCPU_SREG_SS;
1175}
1176
1177static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1178			struct operand *op)
1179{
1180	u8 sib;
1181	int index_reg, base_reg, scale;
1182	int rc = X86EMUL_CONTINUE;
1183	ulong modrm_ea = 0;
1184
1185	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1186	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1187	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
 
 
1188
1189	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
 
1190	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1191	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1192	ctxt->modrm_seg = VCPU_SREG_DS;
1193
1194	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1195		op->type = OP_REG;
1196		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1197		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1198				ctxt->d & ByteOp);
1199		if (ctxt->d & Sse) {
1200			op->type = OP_XMM;
1201			op->bytes = 16;
1202			op->addr.xmm = ctxt->modrm_rm;
1203			kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1204			return rc;
1205		}
1206		if (ctxt->d & Mmx) {
1207			op->type = OP_MM;
1208			op->bytes = 8;
1209			op->addr.mm = ctxt->modrm_rm & 7;
1210			return rc;
1211		}
1212		fetch_register_operand(op);
1213		return rc;
1214	}
1215
1216	op->type = OP_MEM;
1217
1218	if (ctxt->ad_bytes == 2) {
1219		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1220		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1221		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1222		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1223
1224		/* 16-bit ModR/M decode. */
1225		switch (ctxt->modrm_mod) {
1226		case 0:
1227			if (ctxt->modrm_rm == 6)
1228				modrm_ea += insn_fetch(u16, ctxt);
1229			break;
1230		case 1:
1231			modrm_ea += insn_fetch(s8, ctxt);
1232			break;
1233		case 2:
1234			modrm_ea += insn_fetch(u16, ctxt);
1235			break;
1236		}
1237		switch (ctxt->modrm_rm) {
1238		case 0:
1239			modrm_ea += bx + si;
1240			break;
1241		case 1:
1242			modrm_ea += bx + di;
1243			break;
1244		case 2:
1245			modrm_ea += bp + si;
1246			break;
1247		case 3:
1248			modrm_ea += bp + di;
1249			break;
1250		case 4:
1251			modrm_ea += si;
1252			break;
1253		case 5:
1254			modrm_ea += di;
1255			break;
1256		case 6:
1257			if (ctxt->modrm_mod != 0)
1258				modrm_ea += bp;
1259			break;
1260		case 7:
1261			modrm_ea += bx;
1262			break;
1263		}
1264		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1265		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1266			ctxt->modrm_seg = VCPU_SREG_SS;
1267		modrm_ea = (u16)modrm_ea;
1268	} else {
1269		/* 32/64-bit ModR/M decode. */
1270		if ((ctxt->modrm_rm & 7) == 4) {
1271			sib = insn_fetch(u8, ctxt);
1272			index_reg |= (sib >> 3) & 7;
1273			base_reg |= sib & 7;
1274			scale = sib >> 6;
1275
1276			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1277				modrm_ea += insn_fetch(s32, ctxt);
1278			else {
1279				modrm_ea += reg_read(ctxt, base_reg);
1280				adjust_modrm_seg(ctxt, base_reg);
1281				/* Increment ESP on POP [ESP] */
1282				if ((ctxt->d & IncSP) &&
1283				    base_reg == VCPU_REGS_RSP)
1284					modrm_ea += ctxt->op_bytes;
1285			}
1286			if (index_reg != 4)
1287				modrm_ea += reg_read(ctxt, index_reg) << scale;
1288		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1289			modrm_ea += insn_fetch(s32, ctxt);
1290			if (ctxt->mode == X86EMUL_MODE_PROT64)
1291				ctxt->rip_relative = 1;
1292		} else {
1293			base_reg = ctxt->modrm_rm;
1294			modrm_ea += reg_read(ctxt, base_reg);
1295			adjust_modrm_seg(ctxt, base_reg);
1296		}
1297		switch (ctxt->modrm_mod) {
 
 
 
 
1298		case 1:
1299			modrm_ea += insn_fetch(s8, ctxt);
1300			break;
1301		case 2:
1302			modrm_ea += insn_fetch(s32, ctxt);
1303			break;
1304		}
1305	}
1306	op->addr.mem.ea = modrm_ea;
1307	if (ctxt->ad_bytes != 8)
1308		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1309
1310done:
1311	return rc;
1312}
1313
1314static int decode_abs(struct x86_emulate_ctxt *ctxt,
1315		      struct operand *op)
1316{
1317	int rc = X86EMUL_CONTINUE;
1318
1319	op->type = OP_MEM;
1320	switch (ctxt->ad_bytes) {
1321	case 2:
1322		op->addr.mem.ea = insn_fetch(u16, ctxt);
1323		break;
1324	case 4:
1325		op->addr.mem.ea = insn_fetch(u32, ctxt);
1326		break;
1327	case 8:
1328		op->addr.mem.ea = insn_fetch(u64, ctxt);
1329		break;
1330	}
1331done:
1332	return rc;
1333}
1334
1335static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1336{
1337	long sv = 0, mask;
1338
1339	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1340		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1341
1342		if (ctxt->src.bytes == 2)
1343			sv = (s16)ctxt->src.val & (s16)mask;
1344		else if (ctxt->src.bytes == 4)
1345			sv = (s32)ctxt->src.val & (s32)mask;
1346		else
1347			sv = (s64)ctxt->src.val & (s64)mask;
1348
1349		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1350					   ctxt->dst.addr.mem.ea + (sv >> 3));
1351	}
1352
1353	/* only subword offset */
1354	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1355}
1356
1357static int read_emulated(struct x86_emulate_ctxt *ctxt,
1358			 unsigned long addr, void *dest, unsigned size)
1359{
1360	int rc;
1361	struct read_cache *mc = &ctxt->mem_read;
1362
1363	if (mc->pos < mc->end)
1364		goto read_cached;
 
 
 
1365
1366	if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1367		return X86EMUL_UNHANDLEABLE;
 
 
 
1368
1369	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1370				      &ctxt->exception);
1371	if (rc != X86EMUL_CONTINUE)
1372		return rc;
1373
1374	mc->end += size;
1375
1376read_cached:
1377	memcpy(dest, mc->data + mc->pos, size);
1378	mc->pos += size;
1379	return X86EMUL_CONTINUE;
1380}
1381
1382static int segmented_read(struct x86_emulate_ctxt *ctxt,
1383			  struct segmented_address addr,
1384			  void *data,
1385			  unsigned size)
1386{
1387	int rc;
1388	ulong linear;
1389
1390	rc = linearize(ctxt, addr, size, false, &linear);
1391	if (rc != X86EMUL_CONTINUE)
1392		return rc;
1393	return read_emulated(ctxt, linear, data, size);
1394}
1395
1396static int segmented_write(struct x86_emulate_ctxt *ctxt,
1397			   struct segmented_address addr,
1398			   const void *data,
1399			   unsigned size)
1400{
1401	int rc;
1402	ulong linear;
1403
1404	rc = linearize(ctxt, addr, size, true, &linear);
1405	if (rc != X86EMUL_CONTINUE)
1406		return rc;
1407	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1408					 &ctxt->exception);
1409}
1410
1411static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1412			     struct segmented_address addr,
1413			     const void *orig_data, const void *data,
1414			     unsigned size)
1415{
1416	int rc;
1417	ulong linear;
1418
1419	rc = linearize(ctxt, addr, size, true, &linear);
1420	if (rc != X86EMUL_CONTINUE)
1421		return rc;
1422	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1423					   size, &ctxt->exception);
1424}
1425
1426static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1427			   unsigned int size, unsigned short port,
1428			   void *dest)
1429{
1430	struct read_cache *rc = &ctxt->io_read;
1431
1432	if (rc->pos == rc->end) { /* refill pio read ahead */
1433		unsigned int in_page, n;
1434		unsigned int count = ctxt->rep_prefix ?
1435			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1436		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1437			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1438			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1439		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
 
1440		if (n == 0)
1441			n = 1;
1442		rc->pos = rc->end = 0;
1443		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1444			return 0;
1445		rc->end = n * size;
1446	}
1447
1448	if (ctxt->rep_prefix && (ctxt->d & String) &&
1449	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1450		ctxt->dst.data = rc->data + rc->pos;
1451		ctxt->dst.type = OP_MEM_STR;
1452		ctxt->dst.count = (rc->end - rc->pos) / size;
1453		rc->pos = rc->end;
1454	} else {
1455		memcpy(dest, rc->data + rc->pos, size);
1456		rc->pos += size;
1457	}
1458	return 1;
1459}
1460
1461static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1462				     u16 index, struct desc_struct *desc)
1463{
1464	struct desc_ptr dt;
1465	ulong addr;
1466
1467	ctxt->ops->get_idt(ctxt, &dt);
1468
1469	if (dt.size < index * 8 + 7)
1470		return emulate_gp(ctxt, index << 3 | 0x2);
1471
1472	addr = dt.address + index * 8;
1473	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1474}
1475
1476static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1477				     u16 selector, struct desc_ptr *dt)
1478{
1479	const struct x86_emulate_ops *ops = ctxt->ops;
1480	u32 base3 = 0;
1481
1482	if (selector & 1 << 2) {
1483		struct desc_struct desc;
1484		u16 sel;
1485
1486		memset(dt, 0, sizeof(*dt));
1487		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1488				      VCPU_SREG_LDTR))
1489			return;
1490
1491		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1492		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1493	} else
1494		ops->get_gdt(ctxt, dt);
1495}
1496
1497static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1498			      u16 selector, ulong *desc_addr_p)
 
1499{
1500	struct desc_ptr dt;
1501	u16 index = selector >> 3;
1502	ulong addr;
1503
1504	get_descriptor_table_ptr(ctxt, selector, &dt);
1505
1506	if (dt.size < index * 8 + 7)
1507		return emulate_gp(ctxt, selector & 0xfffc);
1508
1509	addr = dt.address + index * 8;
1510
1511#ifdef CONFIG_X86_64
1512	if (addr >> 32 != 0) {
1513		u64 efer = 0;
1514
1515		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1516		if (!(efer & EFER_LMA))
1517			addr &= (u32)-1;
1518	}
1519#endif
1520
1521	*desc_addr_p = addr;
1522	return X86EMUL_CONTINUE;
1523}
1524
1525/* allowed just for 8 bytes segments */
1526static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1527				   u16 selector, struct desc_struct *desc,
1528				   ulong *desc_addr_p)
1529{
1530	int rc;
1531
1532	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1533	if (rc != X86EMUL_CONTINUE)
1534		return rc;
1535
1536	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1537}
1538
1539/* allowed just for 8 bytes segments */
1540static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1541				    u16 selector, struct desc_struct *desc)
1542{
1543	int rc;
 
1544	ulong addr;
1545
1546	rc = get_descriptor_ptr(ctxt, selector, &addr);
1547	if (rc != X86EMUL_CONTINUE)
1548		return rc;
 
1549
1550	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
 
 
1551}
1552
1553static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1554				     u16 selector, int seg, u8 cpl,
1555				     enum x86_transfer_type transfer,
1556				     struct desc_struct *desc)
1557{
1558	struct desc_struct seg_desc, old_desc;
1559	u8 dpl, rpl;
1560	unsigned err_vec = GP_VECTOR;
1561	u32 err_code = 0;
1562	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1563	ulong desc_addr;
1564	int ret;
1565	u16 dummy;
1566	u32 base3 = 0;
1567
1568	memset(&seg_desc, 0, sizeof(seg_desc));
1569
1570	if (ctxt->mode == X86EMUL_MODE_REAL) {
1571		/* set real mode segment descriptor (keep limit etc. for
1572		 * unreal mode) */
1573		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1574		set_desc_base(&seg_desc, selector << 4);
1575		goto load;
1576	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1577		/* VM86 needs a clean new segment descriptor */
1578		set_desc_base(&seg_desc, selector << 4);
1579		set_desc_limit(&seg_desc, 0xffff);
1580		seg_desc.type = 3;
1581		seg_desc.p = 1;
1582		seg_desc.s = 1;
1583		seg_desc.dpl = 3;
1584		goto load;
1585	}
1586
1587	rpl = selector & 3;
 
 
 
1588
1589	/* TR should be in GDT only */
1590	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1591		goto exception;
1592
1593	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1594	if (null_selector) {
1595		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1596			goto exception;
1597
1598		if (seg == VCPU_SREG_SS) {
1599			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1600				goto exception;
1601
1602			/*
1603			 * ctxt->ops->set_segment expects the CPL to be in
1604			 * SS.DPL, so fake an expand-up 32-bit data segment.
1605			 */
1606			seg_desc.type = 3;
1607			seg_desc.p = 1;
1608			seg_desc.s = 1;
1609			seg_desc.dpl = cpl;
1610			seg_desc.d = 1;
1611			seg_desc.g = 1;
1612		}
1613
1614		/* Skip all following checks */
1615		goto load;
1616	}
1617
1618	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1619	if (ret != X86EMUL_CONTINUE)
1620		return ret;
1621
1622	err_code = selector & 0xfffc;
1623	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1624							   GP_VECTOR;
 
 
 
1625
1626	/* can't load system descriptor into segment selector */
1627	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1628		if (transfer == X86_TRANSFER_CALL_JMP)
1629			return X86EMUL_UNHANDLEABLE;
1630		goto exception;
1631	}
1632
 
1633	dpl = seg_desc.dpl;
 
1634
1635	switch (seg) {
1636	case VCPU_SREG_SS:
1637		/*
1638		 * segment is not a writable data segment or segment
1639		 * selector's RPL != CPL or DPL != CPL
1640		 */
1641		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1642			goto exception;
1643		break;
1644	case VCPU_SREG_CS:
1645		/*
1646		 * KVM uses "none" when loading CS as part of emulating Real
1647		 * Mode exceptions and IRET (handled above).  In all other
1648		 * cases, loading CS without a control transfer is a KVM bug.
1649		 */
1650		if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
1651			goto exception;
1652
1653		if (!(seg_desc.type & 8))
1654			goto exception;
1655
1656		if (transfer == X86_TRANSFER_RET) {
1657			/* RET can never return to an inner privilege level. */
1658			if (rpl < cpl)
1659				goto exception;
1660			/* Outer-privilege level return is not implemented */
1661			if (rpl > cpl)
1662				return X86EMUL_UNHANDLEABLE;
1663		}
1664		if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1665			if (seg_desc.type & 4) {
1666				/* conforming */
1667				if (dpl > rpl)
1668					goto exception;
1669			} else {
1670				/* nonconforming */
1671				if (dpl != rpl)
1672					goto exception;
1673			}
1674		} else { /* X86_TRANSFER_CALL_JMP */
1675			if (seg_desc.type & 4) {
1676				/* conforming */
1677				if (dpl > cpl)
1678					goto exception;
1679			} else {
1680				/* nonconforming */
1681				if (rpl > cpl || dpl != cpl)
1682					goto exception;
1683			}
1684		}
1685		/* in long-mode d/b must be clear if l is set */
1686		if (seg_desc.d && seg_desc.l) {
1687			u64 efer = 0;
1688
1689			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1690			if (efer & EFER_LMA)
1691				goto exception;
1692		}
1693
1694		/* CS(RPL) <- CPL */
1695		selector = (selector & 0xfffc) | cpl;
1696		break;
1697	case VCPU_SREG_TR:
1698		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1699			goto exception;
1700		break;
1701	case VCPU_SREG_LDTR:
1702		if (seg_desc.s || seg_desc.type != 2)
1703			goto exception;
1704		break;
1705	default: /*  DS, ES, FS, or GS */
1706		/*
1707		 * segment is not a data or readable code segment or
1708		 * ((segment is a data or nonconforming code segment)
1709		 * and ((RPL > DPL) or (CPL > DPL)))
1710		 */
1711		if ((seg_desc.type & 0xa) == 0x8 ||
1712		    (((seg_desc.type & 0xc) != 0xc) &&
1713		     (rpl > dpl || cpl > dpl)))
1714			goto exception;
1715		break;
1716	}
1717
1718	if (!seg_desc.p) {
1719		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1720		goto exception;
1721	}
1722
1723	if (seg_desc.s) {
1724		/* mark segment as accessed */
1725		if (!(seg_desc.type & 1)) {
1726			seg_desc.type |= 1;
1727			ret = write_segment_descriptor(ctxt, selector,
1728						       &seg_desc);
1729			if (ret != X86EMUL_CONTINUE)
1730				return ret;
1731		}
1732	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1733		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1734		if (ret != X86EMUL_CONTINUE)
1735			return ret;
1736		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1737						 ((u64)base3 << 32), ctxt,
1738						 X86EMUL_F_DT_LOAD))
1739			return emulate_gp(ctxt, err_code);
1740	}
1741
1742	if (seg == VCPU_SREG_TR) {
1743		old_desc = seg_desc;
1744		seg_desc.type |= 2; /* busy */
1745		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1746						  sizeof(seg_desc), &ctxt->exception);
1747		if (ret != X86EMUL_CONTINUE)
1748			return ret;
1749	}
1750load:
1751	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1752	if (desc)
1753		*desc = seg_desc;
1754	return X86EMUL_CONTINUE;
1755exception:
1756	return emulate_exception(ctxt, err_vec, err_code, true);
 
1757}
1758
1759static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1760				   u16 selector, int seg)
1761{
1762	u8 cpl = ctxt->ops->cpl(ctxt);
1763
1764	/*
1765	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1766	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1767	 * but it's wrong).
1768	 *
1769	 * However, the Intel manual says that putting IST=1/DPL=3 in
1770	 * an interrupt gate will result in SS=3 (the AMD manual instead
1771	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1772	 * and only forbid it here.
1773	 */
1774	if (seg == VCPU_SREG_SS && selector == 3 &&
1775	    ctxt->mode == X86EMUL_MODE_PROT64)
1776		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1777
1778	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1779					 X86_TRANSFER_NONE, NULL);
1780}
1781
1782static void write_register_operand(struct operand *op)
1783{
1784	return assign_register(op->addr.reg, op->val, op->bytes);
1785}
1786
1787static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1788{
1789	switch (op->type) {
1790	case OP_REG:
1791		write_register_operand(op);
1792		break;
1793	case OP_MEM:
1794		if (ctxt->lock_prefix)
1795			return segmented_cmpxchg(ctxt,
1796						 op->addr.mem,
1797						 &op->orig_val,
1798						 &op->val,
1799						 op->bytes);
1800		else
1801			return segmented_write(ctxt,
1802					       op->addr.mem,
1803					       &op->val,
1804					       op->bytes);
1805	case OP_MEM_STR:
1806		return segmented_write(ctxt,
1807				       op->addr.mem,
1808				       op->data,
1809				       op->bytes * op->count);
1810	case OP_XMM:
1811		kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1812		break;
1813	case OP_MM:
1814		kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1815		break;
1816	case OP_NONE:
1817		/* no writeback */
1818		break;
1819	default:
1820		break;
1821	}
1822	return X86EMUL_CONTINUE;
1823}
1824
1825static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
1826{
1827	struct segmented_address addr;
1828
1829	rsp_increment(ctxt, -len);
1830	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1831	addr.seg = VCPU_SREG_SS;
1832
1833	return segmented_write(ctxt, addr, data, len);
1834}
1835
1836static int em_push(struct x86_emulate_ctxt *ctxt)
1837{
1838	/* Disable writeback. */
1839	ctxt->dst.type = OP_NONE;
1840	return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1841}
1842
1843static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1844		       void *dest, int len)
1845{
1846	int rc;
1847	struct segmented_address addr;
1848
1849	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1850	addr.seg = VCPU_SREG_SS;
1851	rc = segmented_read(ctxt, addr, dest, len);
1852	if (rc != X86EMUL_CONTINUE)
1853		return rc;
1854
1855	rsp_increment(ctxt, len);
1856	return rc;
1857}
1858
1859static int em_pop(struct x86_emulate_ctxt *ctxt)
1860{
1861	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1862}
1863
1864static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1865			void *dest, int len)
1866{
1867	int rc;
1868	unsigned long val = 0;
1869	unsigned long change_mask;
1870	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1871	int cpl = ctxt->ops->cpl(ctxt);
1872
1873	rc = emulate_pop(ctxt, &val, len);
1874	if (rc != X86EMUL_CONTINUE)
1875		return rc;
1876
1877	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1878		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1879		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1880		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1881
1882	switch(ctxt->mode) {
1883	case X86EMUL_MODE_PROT64:
1884	case X86EMUL_MODE_PROT32:
1885	case X86EMUL_MODE_PROT16:
1886		if (cpl == 0)
1887			change_mask |= X86_EFLAGS_IOPL;
1888		if (cpl <= iopl)
1889			change_mask |= X86_EFLAGS_IF;
1890		break;
1891	case X86EMUL_MODE_VM86:
1892		if (iopl < 3)
1893			return emulate_gp(ctxt, 0);
1894		change_mask |= X86_EFLAGS_IF;
1895		break;
1896	default: /* real mode */
1897		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1898		break;
1899	}
1900
1901	*(unsigned long *)dest =
1902		(ctxt->eflags & ~change_mask) | (val & change_mask);
1903
1904	return rc;
1905}
1906
1907static int em_popf(struct x86_emulate_ctxt *ctxt)
1908{
1909	ctxt->dst.type = OP_REG;
1910	ctxt->dst.addr.reg = &ctxt->eflags;
1911	ctxt->dst.bytes = ctxt->op_bytes;
1912	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1913}
1914
1915static int em_enter(struct x86_emulate_ctxt *ctxt)
1916{
1917	int rc;
1918	unsigned frame_size = ctxt->src.val;
1919	unsigned nesting_level = ctxt->src2.val & 31;
1920	ulong rbp;
1921
1922	if (nesting_level)
1923		return X86EMUL_UNHANDLEABLE;
1924
1925	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1926	rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
1927	if (rc != X86EMUL_CONTINUE)
1928		return rc;
1929	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1930		      stack_mask(ctxt));
1931	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1932		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1933		      stack_mask(ctxt));
1934	return X86EMUL_CONTINUE;
1935}
1936
1937static int em_leave(struct x86_emulate_ctxt *ctxt)
1938{
1939	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1940		      stack_mask(ctxt));
1941	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1942}
1943
1944static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1945{
1946	int seg = ctxt->src2.val;
1947
1948	ctxt->src.val = get_segment_selector(ctxt, seg);
1949	if (ctxt->op_bytes == 4) {
1950		rsp_increment(ctxt, -2);
1951		ctxt->op_bytes = 2;
1952	}
1953
1954	return em_push(ctxt);
1955}
1956
1957static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1958{
1959	int seg = ctxt->src2.val;
1960	unsigned long selector = 0;
1961	int rc;
1962
1963	rc = emulate_pop(ctxt, &selector, 2);
1964	if (rc != X86EMUL_CONTINUE)
1965		return rc;
1966
1967	if (seg == VCPU_SREG_SS)
1968		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1969	if (ctxt->op_bytes > 2)
1970		rsp_increment(ctxt, ctxt->op_bytes - 2);
1971
1972	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1973	return rc;
1974}
1975
1976static int em_pusha(struct x86_emulate_ctxt *ctxt)
1977{
1978	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1979	int rc = X86EMUL_CONTINUE;
1980	int reg = VCPU_REGS_RAX;
1981
1982	while (reg <= VCPU_REGS_RDI) {
1983		(reg == VCPU_REGS_RSP) ?
1984		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1985
1986		rc = em_push(ctxt);
1987		if (rc != X86EMUL_CONTINUE)
1988			return rc;
1989
1990		++reg;
1991	}
1992
1993	return rc;
1994}
1995
1996static int em_pushf(struct x86_emulate_ctxt *ctxt)
1997{
1998	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1999	return em_push(ctxt);
2000}
2001
2002static int em_popa(struct x86_emulate_ctxt *ctxt)
2003{
2004	int rc = X86EMUL_CONTINUE;
2005	int reg = VCPU_REGS_RDI;
2006	u32 val = 0;
2007
2008	while (reg >= VCPU_REGS_RAX) {
2009		if (reg == VCPU_REGS_RSP) {
2010			rsp_increment(ctxt, ctxt->op_bytes);
 
2011			--reg;
2012		}
2013
2014		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2015		if (rc != X86EMUL_CONTINUE)
2016			break;
2017		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2018		--reg;
2019	}
2020	return rc;
2021}
2022
2023static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2024{
2025	const struct x86_emulate_ops *ops = ctxt->ops;
2026	int rc;
2027	struct desc_ptr dt;
2028	gva_t cs_addr;
2029	gva_t eip_addr;
2030	u16 cs, eip;
2031
2032	/* TODO: Add limit checks */
2033	ctxt->src.val = ctxt->eflags;
2034	rc = em_push(ctxt);
2035	if (rc != X86EMUL_CONTINUE)
2036		return rc;
2037
2038	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2039
2040	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2041	rc = em_push(ctxt);
2042	if (rc != X86EMUL_CONTINUE)
2043		return rc;
2044
2045	ctxt->src.val = ctxt->_eip;
2046	rc = em_push(ctxt);
2047	if (rc != X86EMUL_CONTINUE)
2048		return rc;
2049
2050	ops->get_idt(ctxt, &dt);
2051
2052	eip_addr = dt.address + (irq << 2);
2053	cs_addr = dt.address + (irq << 2) + 2;
2054
2055	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2056	if (rc != X86EMUL_CONTINUE)
2057		return rc;
2058
2059	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2060	if (rc != X86EMUL_CONTINUE)
2061		return rc;
2062
2063	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2064	if (rc != X86EMUL_CONTINUE)
2065		return rc;
2066
2067	ctxt->_eip = eip;
2068
2069	return rc;
2070}
2071
2072int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2073{
2074	int rc;
2075
2076	invalidate_registers(ctxt);
2077	rc = __emulate_int_real(ctxt, irq);
2078	if (rc == X86EMUL_CONTINUE)
2079		writeback_registers(ctxt);
2080	return rc;
2081}
2082
2083static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2084{
2085	switch(ctxt->mode) {
2086	case X86EMUL_MODE_REAL:
2087		return __emulate_int_real(ctxt, irq);
2088	case X86EMUL_MODE_VM86:
2089	case X86EMUL_MODE_PROT16:
2090	case X86EMUL_MODE_PROT32:
2091	case X86EMUL_MODE_PROT64:
2092	default:
2093		/* Protected mode interrupts unimplemented yet */
2094		return X86EMUL_UNHANDLEABLE;
2095	}
2096}
2097
2098static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2099{
2100	int rc = X86EMUL_CONTINUE;
2101	unsigned long temp_eip = 0;
2102	unsigned long temp_eflags = 0;
2103	unsigned long cs = 0;
2104	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2105			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2106			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2107			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2108			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2109			     X86_EFLAGS_FIXED;
2110	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2111				  X86_EFLAGS_VIP;
2112
2113	/* TODO: Add stack limit check */
2114
2115	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2116
2117	if (rc != X86EMUL_CONTINUE)
2118		return rc;
2119
2120	if (temp_eip & ~0xffff)
2121		return emulate_gp(ctxt, 0);
2122
2123	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2124
2125	if (rc != X86EMUL_CONTINUE)
2126		return rc;
2127
2128	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2129
2130	if (rc != X86EMUL_CONTINUE)
2131		return rc;
2132
2133	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2134
2135	if (rc != X86EMUL_CONTINUE)
2136		return rc;
2137
2138	ctxt->_eip = temp_eip;
2139
 
2140	if (ctxt->op_bytes == 4)
2141		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2142	else if (ctxt->op_bytes == 2) {
2143		ctxt->eflags &= ~0xffff;
2144		ctxt->eflags |= temp_eflags;
2145	}
2146
2147	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2148	ctxt->eflags |= X86_EFLAGS_FIXED;
2149	ctxt->ops->set_nmi_mask(ctxt, false);
2150
2151	return rc;
2152}
2153
2154static int em_iret(struct x86_emulate_ctxt *ctxt)
2155{
2156	switch(ctxt->mode) {
2157	case X86EMUL_MODE_REAL:
2158		return emulate_iret_real(ctxt);
2159	case X86EMUL_MODE_VM86:
2160	case X86EMUL_MODE_PROT16:
2161	case X86EMUL_MODE_PROT32:
2162	case X86EMUL_MODE_PROT64:
2163	default:
2164		/* iret from protected mode unimplemented yet */
2165		return X86EMUL_UNHANDLEABLE;
2166	}
2167}
2168
2169static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2170{
2171	int rc;
2172	unsigned short sel;
2173	struct desc_struct new_desc;
2174	u8 cpl = ctxt->ops->cpl(ctxt);
2175
2176	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2177
2178	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2179				       X86_TRANSFER_CALL_JMP,
2180				       &new_desc);
2181	if (rc != X86EMUL_CONTINUE)
2182		return rc;
2183
2184	rc = assign_eip_far(ctxt, ctxt->src.val);
2185	/* Error handling is not implemented. */
2186	if (rc != X86EMUL_CONTINUE)
2187		return X86EMUL_UNHANDLEABLE;
 
 
 
 
 
2188
2189	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2190}
2191
2192static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2193{
2194	return assign_eip_near(ctxt, ctxt->src.val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2195}
2196
2197static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2198{
2199	int rc;
2200	long int old_eip;
2201
2202	old_eip = ctxt->_eip;
2203	rc = assign_eip_near(ctxt, ctxt->src.val);
2204	if (rc != X86EMUL_CONTINUE)
2205		return rc;
2206	ctxt->src.val = old_eip;
2207	rc = em_push(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208	return rc;
2209}
2210
2211static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2212{
2213	u64 old = ctxt->dst.orig_val64;
2214
2215	if (ctxt->dst.bytes == 16)
2216		return X86EMUL_UNHANDLEABLE;
2217
2218	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2219	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2220		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2221		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2222		ctxt->eflags &= ~X86_EFLAGS_ZF;
2223	} else {
2224		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2225			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2226
2227		ctxt->eflags |= X86_EFLAGS_ZF;
2228	}
2229	return X86EMUL_CONTINUE;
2230}
2231
2232static int em_ret(struct x86_emulate_ctxt *ctxt)
2233{
2234	int rc;
2235	unsigned long eip = 0;
2236
2237	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2238	if (rc != X86EMUL_CONTINUE)
2239		return rc;
2240
2241	return assign_eip_near(ctxt, eip);
2242}
2243
2244static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2245{
2246	int rc;
2247	unsigned long eip = 0;
2248	unsigned long cs = 0;
2249	int cpl = ctxt->ops->cpl(ctxt);
2250	struct desc_struct new_desc;
2251
2252	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2253	if (rc != X86EMUL_CONTINUE)
2254		return rc;
 
 
2255	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2256	if (rc != X86EMUL_CONTINUE)
2257		return rc;
2258	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2259				       X86_TRANSFER_RET,
2260				       &new_desc);
2261	if (rc != X86EMUL_CONTINUE)
2262		return rc;
2263	rc = assign_eip_far(ctxt, eip);
2264	/* Error handling is not implemented. */
2265	if (rc != X86EMUL_CONTINUE)
2266		return X86EMUL_UNHANDLEABLE;
2267
2268	return rc;
2269}
2270
2271static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2272{
2273        int rc;
2274
2275        rc = em_ret_far(ctxt);
2276        if (rc != X86EMUL_CONTINUE)
2277                return rc;
2278        rsp_increment(ctxt, ctxt->src.val);
2279        return X86EMUL_CONTINUE;
2280}
2281
2282static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2283{
2284	/* Save real source value, then compare EAX against destination. */
2285	ctxt->dst.orig_val = ctxt->dst.val;
2286	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2287	ctxt->src.orig_val = ctxt->src.val;
2288	ctxt->src.val = ctxt->dst.orig_val;
2289	fastop(ctxt, em_cmp);
2290
2291	if (ctxt->eflags & X86_EFLAGS_ZF) {
2292		/* Success: write back to memory; no update of EAX */
2293		ctxt->src.type = OP_NONE;
2294		ctxt->dst.val = ctxt->src.orig_val;
2295	} else {
2296		/* Failure: write the value we saw to EAX. */
2297		ctxt->src.type = OP_REG;
2298		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2299		ctxt->src.val = ctxt->dst.orig_val;
2300		/* Create write-cycle to dest by writing the same value */
2301		ctxt->dst.val = ctxt->dst.orig_val;
2302	}
2303	return X86EMUL_CONTINUE;
2304}
2305
2306static int em_lseg(struct x86_emulate_ctxt *ctxt)
2307{
2308	int seg = ctxt->src2.val;
2309	unsigned short sel;
2310	int rc;
2311
2312	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2313
2314	rc = load_segment_descriptor(ctxt, sel, seg);
2315	if (rc != X86EMUL_CONTINUE)
2316		return rc;
2317
2318	ctxt->dst.val = ctxt->src.val;
2319	return rc;
2320}
2321
2322static int em_rsm(struct x86_emulate_ctxt *ctxt)
 
 
2323{
2324	if (!ctxt->ops->is_smm(ctxt))
2325		return emulate_ud(ctxt);
2326
2327	if (ctxt->ops->leave_smm(ctxt))
2328		ctxt->ops->triple_fault(ctxt);
 
2329
2330	return emulator_recalc_and_set_mode(ctxt);
2331}
2332
2333static void
2334setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2335{
2336	cs->l = 0;		/* will be adjusted later */
2337	set_desc_base(cs, 0);	/* flat segment */
2338	cs->g = 1;		/* 4kb granularity */
2339	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2340	cs->type = 0x0b;	/* Read, Execute, Accessed */
2341	cs->s = 1;
2342	cs->dpl = 0;		/* will be adjusted later */
2343	cs->p = 1;
2344	cs->d = 1;
2345	cs->avl = 0;
2346
2347	set_desc_base(ss, 0);	/* flat segment */
2348	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2349	ss->g = 1;		/* 4kb granularity */
2350	ss->s = 1;
2351	ss->type = 0x03;	/* Read/Write, Accessed */
2352	ss->d = 1;		/* 32bit stack segment */
2353	ss->dpl = 0;
2354	ss->p = 1;
2355	ss->l = 0;
2356	ss->avl = 0;
2357}
2358
2359static int em_syscall(struct x86_emulate_ctxt *ctxt)
2360{
2361	const struct x86_emulate_ops *ops = ctxt->ops;
2362	struct desc_struct cs, ss;
2363	u64 msr_data;
2364	u16 cs_sel, ss_sel;
2365	u64 efer = 0;
2366
2367	/* syscall is not available in real mode */
2368	if (ctxt->mode == X86EMUL_MODE_REAL ||
2369	    ctxt->mode == X86EMUL_MODE_VM86)
2370		return emulate_ud(ctxt);
2371
2372	/*
2373	 * Intel compatible CPUs only support SYSCALL in 64-bit mode, whereas
2374	 * AMD allows SYSCALL in any flavor of protected mode.  Note, it's
2375	 * infeasible to emulate Intel behavior when running on AMD hardware,
2376	 * as SYSCALL won't fault in the "wrong" mode, i.e. there is no #UD
2377	 * for KVM to trap-and-emulate, unlike emulating AMD on Intel.
2378	 */
2379	if (ctxt->mode != X86EMUL_MODE_PROT64 &&
2380	    ctxt->ops->guest_cpuid_is_intel_compatible(ctxt))
2381		return emulate_ud(ctxt);
2382
2383	ops->get_msr(ctxt, MSR_EFER, &efer);
2384	if (!(efer & EFER_SCE))
2385		return emulate_ud(ctxt);
2386
2387	setup_syscalls_segments(&cs, &ss);
2388	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2389	msr_data >>= 32;
2390	cs_sel = (u16)(msr_data & 0xfffc);
2391	ss_sel = (u16)(msr_data + 8);
2392
2393	if (efer & EFER_LMA) {
2394		cs.d = 0;
2395		cs.l = 1;
2396	}
2397	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2398	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2399
2400	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2401	if (efer & EFER_LMA) {
2402#ifdef CONFIG_X86_64
2403		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2404
2405		ops->get_msr(ctxt,
2406			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2407			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2408		ctxt->_eip = msr_data;
2409
2410		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2411		ctxt->eflags &= ~msr_data;
2412		ctxt->eflags |= X86_EFLAGS_FIXED;
2413#endif
2414	} else {
2415		/* legacy mode */
2416		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2417		ctxt->_eip = (u32)msr_data;
2418
2419		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2420	}
2421
2422	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2423	return X86EMUL_CONTINUE;
2424}
2425
2426static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2427{
2428	const struct x86_emulate_ops *ops = ctxt->ops;
2429	struct desc_struct cs, ss;
2430	u64 msr_data;
2431	u16 cs_sel, ss_sel;
2432	u64 efer = 0;
2433
2434	ops->get_msr(ctxt, MSR_EFER, &efer);
2435	/* inject #GP if in real mode */
2436	if (ctxt->mode == X86EMUL_MODE_REAL)
2437		return emulate_gp(ctxt, 0);
2438
2439	/*
2440	 * Intel's architecture allows SYSENTER in compatibility mode, but AMD
2441	 * does not.  Note, AMD does allow SYSENTER in legacy protected mode.
2442	 */
2443	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) &&
2444	    !ctxt->ops->guest_cpuid_is_intel_compatible(ctxt))
2445		return emulate_ud(ctxt);
2446
2447	/* sysenter/sysexit have not been tested in 64bit mode. */
2448	if (ctxt->mode == X86EMUL_MODE_PROT64)
2449		return X86EMUL_UNHANDLEABLE;
2450
2451	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2452	if ((msr_data & 0xfffc) == 0x0)
2453		return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
2454
2455	setup_syscalls_segments(&cs, &ss);
2456	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2457	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2458	ss_sel = cs_sel + 8;
2459	if (efer & EFER_LMA) {
 
2460		cs.d = 0;
2461		cs.l = 1;
2462	}
2463
2464	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2465	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2466
2467	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2468	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2469
2470	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2471	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2472							      (u32)msr_data;
2473	if (efer & EFER_LMA)
2474		ctxt->mode = X86EMUL_MODE_PROT64;
2475
2476	return X86EMUL_CONTINUE;
2477}
2478
2479static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2480{
2481	const struct x86_emulate_ops *ops = ctxt->ops;
2482	struct desc_struct cs, ss;
2483	u64 msr_data, rcx, rdx;
2484	int usermode;
2485	u16 cs_sel = 0, ss_sel = 0;
2486
2487	/* inject #GP if in real mode or Virtual 8086 mode */
2488	if (ctxt->mode == X86EMUL_MODE_REAL ||
2489	    ctxt->mode == X86EMUL_MODE_VM86)
2490		return emulate_gp(ctxt, 0);
2491
2492	setup_syscalls_segments(&cs, &ss);
2493
2494	if ((ctxt->rex_prefix & 0x8) != 0x0)
2495		usermode = X86EMUL_MODE_PROT64;
2496	else
2497		usermode = X86EMUL_MODE_PROT32;
2498
2499	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2500	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2501
2502	cs.dpl = 3;
2503	ss.dpl = 3;
2504	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2505	switch (usermode) {
2506	case X86EMUL_MODE_PROT32:
2507		cs_sel = (u16)(msr_data + 16);
2508		if ((msr_data & 0xfffc) == 0x0)
2509			return emulate_gp(ctxt, 0);
2510		ss_sel = (u16)(msr_data + 24);
2511		rcx = (u32)rcx;
2512		rdx = (u32)rdx;
2513		break;
2514	case X86EMUL_MODE_PROT64:
2515		cs_sel = (u16)(msr_data + 32);
2516		if (msr_data == 0x0)
2517			return emulate_gp(ctxt, 0);
2518		ss_sel = cs_sel + 8;
2519		cs.d = 0;
2520		cs.l = 1;
2521		if (emul_is_noncanonical_address(rcx, ctxt, 0) ||
2522		    emul_is_noncanonical_address(rdx, ctxt, 0))
2523			return emulate_gp(ctxt, 0);
2524		break;
2525	}
2526	cs_sel |= SEGMENT_RPL_MASK;
2527	ss_sel |= SEGMENT_RPL_MASK;
2528
2529	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2530	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2531
2532	ctxt->_eip = rdx;
2533	ctxt->mode = usermode;
2534	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2535
2536	return X86EMUL_CONTINUE;
2537}
2538
2539static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2540{
2541	int iopl;
2542	if (ctxt->mode == X86EMUL_MODE_REAL)
2543		return false;
2544	if (ctxt->mode == X86EMUL_MODE_VM86)
2545		return true;
2546	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2547	return ctxt->ops->cpl(ctxt) > iopl;
2548}
2549
2550#define VMWARE_PORT_VMPORT	(0x5658)
2551#define VMWARE_PORT_VMRPC	(0x5659)
2552
2553static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2554					    u16 port, u16 len)
2555{
2556	const struct x86_emulate_ops *ops = ctxt->ops;
2557	struct desc_struct tr_seg;
2558	u32 base3;
2559	int r;
2560	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2561	unsigned mask = (1 << len) - 1;
2562	unsigned long base;
2563
2564	/*
2565	 * VMware allows access to these ports even if denied
2566	 * by TSS I/O permission bitmap. Mimic behavior.
2567	 */
2568	if (enable_vmware_backdoor &&
2569	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2570		return true;
2571
2572	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2573	if (!tr_seg.p)
2574		return false;
2575	if (desc_limit_scaled(&tr_seg) < 103)
2576		return false;
2577	base = get_desc_base(&tr_seg);
2578#ifdef CONFIG_X86_64
2579	base |= ((u64)base3) << 32;
2580#endif
2581	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2582	if (r != X86EMUL_CONTINUE)
2583		return false;
2584	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2585		return false;
2586	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2587	if (r != X86EMUL_CONTINUE)
2588		return false;
2589	if ((perm >> bit_idx) & mask)
2590		return false;
2591	return true;
2592}
2593
2594static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
2595				  u16 port, u16 len)
2596{
2597	if (ctxt->perm_ok)
2598		return true;
2599
2600	if (emulator_bad_iopl(ctxt))
2601		if (!emulator_io_port_access_allowed(ctxt, port, len))
2602			return false;
2603
2604	ctxt->perm_ok = true;
2605
2606	return true;
2607}
2608
2609static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2610{
2611	/*
2612	 * Intel CPUs mask the counter and pointers in quite strange
2613	 * manner when ECX is zero due to REP-string optimizations.
2614	 */
2615#ifdef CONFIG_X86_64
2616	u32 eax, ebx, ecx, edx;
2617
2618	if (ctxt->ad_bytes != 4)
2619		return;
2620
2621	eax = ecx = 0;
2622	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2623	if (!is_guest_vendor_intel(ebx, ecx, edx))
2624		return;
2625
2626	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2627
2628	switch (ctxt->b) {
2629	case 0xa4:	/* movsb */
2630	case 0xa5:	/* movsd/w */
2631		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2632		fallthrough;
2633	case 0xaa:	/* stosb */
2634	case 0xab:	/* stosd/w */
2635		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2636	}
2637#endif
2638}
2639
2640static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2641				struct tss_segment_16 *tss)
2642{
2643	tss->ip = ctxt->_eip;
2644	tss->flag = ctxt->eflags;
2645	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2646	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2647	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2648	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2649	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2650	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2651	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2652	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2653
2654	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2655	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2656	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2657	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2658	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2659}
2660
2661static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2662				 struct tss_segment_16 *tss)
2663{
2664	int ret;
2665	u8 cpl;
2666
2667	ctxt->_eip = tss->ip;
2668	ctxt->eflags = tss->flag | 2;
2669	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2670	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2671	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2672	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2673	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2674	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2675	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2676	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2677
2678	/*
2679	 * SDM says that segment selectors are loaded before segment
2680	 * descriptors
2681	 */
2682	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2683	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2684	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2685	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2686	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2687
2688	cpl = tss->cs & 3;
2689
2690	/*
2691	 * Now load segment descriptors. If fault happens at this stage
2692	 * it is handled in a context of new task
2693	 */
2694	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2695					X86_TRANSFER_TASK_SWITCH, NULL);
2696	if (ret != X86EMUL_CONTINUE)
2697		return ret;
2698	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2699					X86_TRANSFER_TASK_SWITCH, NULL);
2700	if (ret != X86EMUL_CONTINUE)
2701		return ret;
2702	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2703					X86_TRANSFER_TASK_SWITCH, NULL);
2704	if (ret != X86EMUL_CONTINUE)
2705		return ret;
2706	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2707					X86_TRANSFER_TASK_SWITCH, NULL);
2708	if (ret != X86EMUL_CONTINUE)
2709		return ret;
2710	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2711					X86_TRANSFER_TASK_SWITCH, NULL);
2712	if (ret != X86EMUL_CONTINUE)
2713		return ret;
2714
2715	return X86EMUL_CONTINUE;
2716}
2717
2718static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2719			  ulong old_tss_base, struct desc_struct *new_desc)
2720{
 
2721	struct tss_segment_16 tss_seg;
2722	int ret;
2723	u32 new_tss_base = get_desc_base(new_desc);
2724
2725	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2726	if (ret != X86EMUL_CONTINUE)
 
2727		return ret;
2728
2729	save_state_to_tss16(ctxt, &tss_seg);
2730
2731	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2732	if (ret != X86EMUL_CONTINUE)
 
2733		return ret;
2734
2735	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2736	if (ret != X86EMUL_CONTINUE)
 
2737		return ret;
2738
2739	if (old_tss_sel != 0xffff) {
2740		tss_seg.prev_task_link = old_tss_sel;
2741
2742		ret = linear_write_system(ctxt, new_tss_base,
2743					  &tss_seg.prev_task_link,
2744					  sizeof(tss_seg.prev_task_link));
 
2745		if (ret != X86EMUL_CONTINUE)
 
2746			return ret;
2747	}
2748
2749	return load_state_from_tss16(ctxt, &tss_seg);
2750}
2751
2752static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2753				struct tss_segment_32 *tss)
2754{
2755	/* CR3 and ldt selector are not saved intentionally */
2756	tss->eip = ctxt->_eip;
2757	tss->eflags = ctxt->eflags;
2758	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2759	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2760	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2761	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2762	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2763	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2764	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2765	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2766
2767	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2768	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2769	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2770	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2771	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2772	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
 
2773}
2774
2775static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2776				 struct tss_segment_32 *tss)
2777{
2778	int ret;
2779	u8 cpl;
2780
2781	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2782		return emulate_gp(ctxt, 0);
2783	ctxt->_eip = tss->eip;
2784	ctxt->eflags = tss->eflags | 2;
2785
2786	/* General purpose registers */
2787	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2788	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2789	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2790	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2791	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2792	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2793	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2794	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2795
2796	/*
2797	 * SDM says that segment selectors are loaded before segment
2798	 * descriptors.  This is important because CPL checks will
2799	 * use CS.RPL.
2800	 */
2801	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2802	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2803	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2804	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2805	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2806	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2807	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2808
2809	/*
2810	 * If we're switching between Protected Mode and VM86, we need to make
2811	 * sure to update the mode before loading the segment descriptors so
2812	 * that the selectors are interpreted correctly.
2813	 */
2814	if (ctxt->eflags & X86_EFLAGS_VM) {
2815		ctxt->mode = X86EMUL_MODE_VM86;
2816		cpl = 3;
2817	} else {
2818		ctxt->mode = X86EMUL_MODE_PROT32;
2819		cpl = tss->cs & 3;
2820	}
2821
2822	/*
2823	 * Now load segment descriptors. If fault happens at this stage
2824	 * it is handled in a context of new task
2825	 */
2826	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2827					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
 
 
2828	if (ret != X86EMUL_CONTINUE)
2829		return ret;
2830	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2831					X86_TRANSFER_TASK_SWITCH, NULL);
2832	if (ret != X86EMUL_CONTINUE)
2833		return ret;
2834	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2835					X86_TRANSFER_TASK_SWITCH, NULL);
2836	if (ret != X86EMUL_CONTINUE)
2837		return ret;
2838	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2839					X86_TRANSFER_TASK_SWITCH, NULL);
2840	if (ret != X86EMUL_CONTINUE)
2841		return ret;
2842	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2843					X86_TRANSFER_TASK_SWITCH, NULL);
2844	if (ret != X86EMUL_CONTINUE)
2845		return ret;
2846	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2847					X86_TRANSFER_TASK_SWITCH, NULL);
2848	if (ret != X86EMUL_CONTINUE)
2849		return ret;
2850	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2851					X86_TRANSFER_TASK_SWITCH, NULL);
2852
2853	return ret;
2854}
2855
2856static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2857			  ulong old_tss_base, struct desc_struct *new_desc)
2858{
 
2859	struct tss_segment_32 tss_seg;
2860	int ret;
2861	u32 new_tss_base = get_desc_base(new_desc);
2862	u32 eip_offset = offsetof(struct tss_segment_32, eip);
2863	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2864
2865	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2866	if (ret != X86EMUL_CONTINUE)
 
2867		return ret;
2868
2869	save_state_to_tss32(ctxt, &tss_seg);
2870
2871	/* Only GP registers and segment selectors are saved */
2872	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2873				  ldt_sel_offset - eip_offset);
2874	if (ret != X86EMUL_CONTINUE)
 
2875		return ret;
2876
2877	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2878	if (ret != X86EMUL_CONTINUE)
 
2879		return ret;
2880
2881	if (old_tss_sel != 0xffff) {
2882		tss_seg.prev_task_link = old_tss_sel;
2883
2884		ret = linear_write_system(ctxt, new_tss_base,
2885					  &tss_seg.prev_task_link,
2886					  sizeof(tss_seg.prev_task_link));
 
2887		if (ret != X86EMUL_CONTINUE)
 
2888			return ret;
2889	}
2890
2891	return load_state_from_tss32(ctxt, &tss_seg);
2892}
2893
2894static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2895				   u16 tss_selector, int idt_index, int reason,
2896				   bool has_error_code, u32 error_code)
2897{
2898	const struct x86_emulate_ops *ops = ctxt->ops;
2899	struct desc_struct curr_tss_desc, next_tss_desc;
2900	int ret;
2901	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2902	ulong old_tss_base =
2903		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2904	u32 desc_limit;
2905	ulong desc_addr, dr7;
2906
2907	/* FIXME: old_tss_base == ~0 ? */
2908
2909	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2910	if (ret != X86EMUL_CONTINUE)
2911		return ret;
2912	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2913	if (ret != X86EMUL_CONTINUE)
2914		return ret;
2915
2916	/* FIXME: check that next_tss_desc is tss */
2917
2918	/*
2919	 * Check privileges. The three cases are task switch caused by...
2920	 *
2921	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2922	 * 2. Exception/IRQ/iret: No check is performed
2923	 * 3. jmp/call to TSS/task-gate: No check is performed since the
2924	 *    hardware checks it before exiting.
2925	 */
2926	if (reason == TASK_SWITCH_GATE) {
2927		if (idt_index != -1) {
2928			/* Software interrupts */
2929			struct desc_struct task_gate_desc;
2930			int dpl;
2931
2932			ret = read_interrupt_descriptor(ctxt, idt_index,
2933							&task_gate_desc);
2934			if (ret != X86EMUL_CONTINUE)
2935				return ret;
2936
2937			dpl = task_gate_desc.dpl;
2938			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2939				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2940		}
2941	}
2942
2943	desc_limit = desc_limit_scaled(&next_tss_desc);
2944	if (!next_tss_desc.p ||
2945	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2946	     desc_limit < 0x2b)) {
2947		return emulate_ts(ctxt, tss_selector & 0xfffc);
 
2948	}
2949
2950	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2951		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2952		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2953	}
2954
2955	if (reason == TASK_SWITCH_IRET)
2956		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2957
2958	/* set back link to prev task only if NT bit is set in eflags
2959	   note that old_tss_sel is not used after this point */
2960	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2961		old_tss_sel = 0xffff;
2962
2963	if (next_tss_desc.type & 8)
2964		ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
 
2965	else
2966		ret = task_switch_16(ctxt, old_tss_sel,
2967				     old_tss_base, &next_tss_desc);
2968	if (ret != X86EMUL_CONTINUE)
2969		return ret;
2970
2971	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2972		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2973
2974	if (reason != TASK_SWITCH_IRET) {
2975		next_tss_desc.type |= (1 << 1); /* set busy flag */
2976		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2977	}
2978
2979	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2980	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2981
2982	if (has_error_code) {
2983		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2984		ctxt->lock_prefix = 0;
2985		ctxt->src.val = (unsigned long) error_code;
2986		ret = em_push(ctxt);
2987	}
2988
2989	dr7 = ops->get_dr(ctxt, 7);
2990	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
2991
2992	return ret;
2993}
2994
2995int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2996			 u16 tss_selector, int idt_index, int reason,
2997			 bool has_error_code, u32 error_code)
2998{
2999	int rc;
3000
3001	invalidate_registers(ctxt);
3002	ctxt->_eip = ctxt->eip;
3003	ctxt->dst.type = OP_NONE;
3004
3005	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3006				     has_error_code, error_code);
3007
3008	if (rc == X86EMUL_CONTINUE) {
3009		ctxt->eip = ctxt->_eip;
3010		writeback_registers(ctxt);
3011	}
3012
3013	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3014}
3015
3016static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3017		struct operand *op)
3018{
3019	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3020
3021	register_address_increment(ctxt, reg, df * op->bytes);
3022	op->addr.mem.ea = register_address(ctxt, reg);
 
3023}
3024
3025static int em_das(struct x86_emulate_ctxt *ctxt)
3026{
3027	u8 al, old_al;
3028	bool af, cf, old_cf;
3029
3030	cf = ctxt->eflags & X86_EFLAGS_CF;
3031	al = ctxt->dst.val;
3032
3033	old_al = al;
3034	old_cf = cf;
3035	cf = false;
3036	af = ctxt->eflags & X86_EFLAGS_AF;
3037	if ((al & 0x0f) > 9 || af) {
3038		al -= 6;
3039		cf = old_cf | (al >= 250);
3040		af = true;
3041	} else {
3042		af = false;
3043	}
3044	if (old_al > 0x99 || old_cf) {
3045		al -= 0x60;
3046		cf = true;
3047	}
3048
3049	ctxt->dst.val = al;
3050	/* Set PF, ZF, SF */
3051	ctxt->src.type = OP_IMM;
3052	ctxt->src.val = 0;
3053	ctxt->src.bytes = 1;
3054	fastop(ctxt, em_or);
3055	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3056	if (cf)
3057		ctxt->eflags |= X86_EFLAGS_CF;
3058	if (af)
3059		ctxt->eflags |= X86_EFLAGS_AF;
3060	return X86EMUL_CONTINUE;
3061}
3062
3063static int em_aam(struct x86_emulate_ctxt *ctxt)
3064{
3065	u8 al, ah;
3066
3067	if (ctxt->src.val == 0)
3068		return emulate_de(ctxt);
3069
3070	al = ctxt->dst.val & 0xff;
3071	ah = al / ctxt->src.val;
3072	al %= ctxt->src.val;
3073
3074	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3075
3076	/* Set PF, ZF, SF */
3077	ctxt->src.type = OP_IMM;
3078	ctxt->src.val = 0;
3079	ctxt->src.bytes = 1;
3080	fastop(ctxt, em_or);
3081
3082	return X86EMUL_CONTINUE;
3083}
3084
3085static int em_aad(struct x86_emulate_ctxt *ctxt)
3086{
3087	u8 al = ctxt->dst.val & 0xff;
3088	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3089
3090	al = (al + (ah * ctxt->src.val)) & 0xff;
3091
3092	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3093
3094	/* Set PF, ZF, SF */
3095	ctxt->src.type = OP_IMM;
3096	ctxt->src.val = 0;
3097	ctxt->src.bytes = 1;
3098	fastop(ctxt, em_or);
3099
3100	return X86EMUL_CONTINUE;
3101}
3102
3103static int em_call(struct x86_emulate_ctxt *ctxt)
3104{
3105	int rc;
3106	long rel = ctxt->src.val;
3107
3108	ctxt->src.val = (unsigned long)ctxt->_eip;
3109	rc = jmp_rel(ctxt, rel);
3110	if (rc != X86EMUL_CONTINUE)
3111		return rc;
3112	return em_push(ctxt);
3113}
3114
3115static int em_call_far(struct x86_emulate_ctxt *ctxt)
3116{
3117	u16 sel, old_cs;
3118	ulong old_eip;
3119	int rc;
3120	struct desc_struct old_desc, new_desc;
3121	const struct x86_emulate_ops *ops = ctxt->ops;
3122	int cpl = ctxt->ops->cpl(ctxt);
3123	enum x86emul_mode prev_mode = ctxt->mode;
3124
 
3125	old_eip = ctxt->_eip;
3126	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3127
3128	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3129	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3130				       X86_TRANSFER_CALL_JMP, &new_desc);
3131	if (rc != X86EMUL_CONTINUE)
3132		return rc;
3133
3134	rc = assign_eip_far(ctxt, ctxt->src.val);
3135	if (rc != X86EMUL_CONTINUE)
3136		goto fail;
3137
3138	ctxt->src.val = old_cs;
3139	rc = em_push(ctxt);
3140	if (rc != X86EMUL_CONTINUE)
3141		goto fail;
3142
3143	ctxt->src.val = old_eip;
3144	rc = em_push(ctxt);
3145	/* If we failed, we tainted the memory, but the very least we should
3146	   restore cs */
3147	if (rc != X86EMUL_CONTINUE) {
3148		pr_warn_once("faulting far call emulation tainted memory\n");
3149		goto fail;
3150	}
3151	return rc;
3152fail:
3153	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3154	ctxt->mode = prev_mode;
3155	return rc;
3156
3157}
3158
3159static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3160{
3161	int rc;
3162	unsigned long eip = 0;
3163
3164	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3165	if (rc != X86EMUL_CONTINUE)
3166		return rc;
3167	rc = assign_eip_near(ctxt, eip);
3168	if (rc != X86EMUL_CONTINUE)
3169		return rc;
3170	rsp_increment(ctxt, ctxt->src.val);
3171	return X86EMUL_CONTINUE;
3172}
3173
3174static int em_xchg(struct x86_emulate_ctxt *ctxt)
3175{
3176	/* Write back the register source. */
3177	ctxt->src.val = ctxt->dst.val;
3178	write_register_operand(&ctxt->src);
3179
3180	/* Write back the memory destination with implicit LOCK prefix. */
3181	ctxt->dst.val = ctxt->src.orig_val;
3182	ctxt->lock_prefix = 1;
3183	return X86EMUL_CONTINUE;
3184}
3185
3186static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3187{
3188	ctxt->dst.val = ctxt->src2.val;
3189	return fastop(ctxt, em_imul);
3190}
3191
3192static int em_cwd(struct x86_emulate_ctxt *ctxt)
3193{
3194	ctxt->dst.type = OP_REG;
3195	ctxt->dst.bytes = ctxt->src.bytes;
3196	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3197	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3198
3199	return X86EMUL_CONTINUE;
3200}
3201
3202static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3203{
3204	u64 tsc_aux = 0;
3205
3206	if (!ctxt->ops->guest_has_rdpid(ctxt))
3207		return emulate_ud(ctxt);
3208
3209	ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3210	ctxt->dst.val = tsc_aux;
3211	return X86EMUL_CONTINUE;
3212}
3213
3214static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3215{
3216	u64 tsc = 0;
3217
3218	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3219	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3220	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3221	return X86EMUL_CONTINUE;
3222}
3223
3224static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3225{
3226	u64 pmc;
3227
3228	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3229		return emulate_gp(ctxt, 0);
3230	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3231	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3232	return X86EMUL_CONTINUE;
3233}
3234
3235static int em_mov(struct x86_emulate_ctxt *ctxt)
3236{
3237	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
 
 
3238	return X86EMUL_CONTINUE;
3239}
3240
3241static int em_movbe(struct x86_emulate_ctxt *ctxt)
3242{
3243	u16 tmp;
3244
3245	if (!ctxt->ops->guest_has_movbe(ctxt))
3246		return emulate_ud(ctxt);
3247
3248	switch (ctxt->op_bytes) {
3249	case 2:
3250		/*
3251		 * From MOVBE definition: "...When the operand size is 16 bits,
3252		 * the upper word of the destination register remains unchanged
3253		 * ..."
3254		 *
3255		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3256		 * rules so we have to do the operation almost per hand.
3257		 */
3258		tmp = (u16)ctxt->src.val;
3259		ctxt->dst.val &= ~0xffffUL;
3260		ctxt->dst.val |= (unsigned long)swab16(tmp);
3261		break;
3262	case 4:
3263		ctxt->dst.val = swab32((u32)ctxt->src.val);
3264		break;
3265	case 8:
3266		ctxt->dst.val = swab64(ctxt->src.val);
3267		break;
3268	default:
3269		BUG();
3270	}
3271	return X86EMUL_CONTINUE;
3272}
3273
3274static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3275{
3276	int cr_num = ctxt->modrm_reg;
3277	int r;
3278
3279	if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3280		return emulate_gp(ctxt, 0);
3281
3282	/* Disable writeback. */
3283	ctxt->dst.type = OP_NONE;
3284
3285	if (cr_num == 0) {
3286		/*
3287		 * CR0 write might have updated CR0.PE and/or CR0.PG
3288		 * which can affect the cpu's execution mode.
3289		 */
3290		r = emulator_recalc_and_set_mode(ctxt);
3291		if (r != X86EMUL_CONTINUE)
3292			return r;
3293	}
3294
 
 
 
3295	return X86EMUL_CONTINUE;
3296}
3297
3298static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3299{
3300	unsigned long val;
3301
3302	if (ctxt->mode == X86EMUL_MODE_PROT64)
3303		val = ctxt->src.val & ~0ULL;
3304	else
3305		val = ctxt->src.val & ~0U;
3306
3307	/* #UD condition is already handled. */
3308	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3309		return emulate_gp(ctxt, 0);
3310
3311	/* Disable writeback. */
3312	ctxt->dst.type = OP_NONE;
3313	return X86EMUL_CONTINUE;
3314}
3315
3316static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3317{
3318	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3319	u64 msr_data;
3320	int r;
3321
3322	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3323		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3324	r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
 
 
 
3325
3326	if (r == X86EMUL_PROPAGATE_FAULT)
3327		return emulate_gp(ctxt, 0);
3328
3329	return r;
3330}
3331
3332static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3333{
3334	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3335	u64 msr_data;
3336	int r;
3337
3338	r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3339
3340	if (r == X86EMUL_PROPAGATE_FAULT)
3341		return emulate_gp(ctxt, 0);
3342
3343	if (r == X86EMUL_CONTINUE) {
3344		*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3345		*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3346	}
3347	return r;
3348}
3349
3350static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3351{
3352	if (segment > VCPU_SREG_GS &&
3353	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3354	    ctxt->ops->cpl(ctxt) > 0)
3355		return emulate_gp(ctxt, 0);
3356
3357	ctxt->dst.val = get_segment_selector(ctxt, segment);
3358	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3359		ctxt->dst.bytes = 2;
3360	return X86EMUL_CONTINUE;
3361}
3362
3363static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3364{
3365	if (ctxt->modrm_reg > VCPU_SREG_GS)
3366		return emulate_ud(ctxt);
3367
3368	return em_store_sreg(ctxt, ctxt->modrm_reg);
 
3369}
3370
3371static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3372{
3373	u16 sel = ctxt->src.val;
3374
3375	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3376		return emulate_ud(ctxt);
3377
3378	if (ctxt->modrm_reg == VCPU_SREG_SS)
3379		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3380
3381	/* Disable writeback. */
3382	ctxt->dst.type = OP_NONE;
3383	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3384}
3385
3386static int em_sldt(struct x86_emulate_ctxt *ctxt)
3387{
3388	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3389}
3390
3391static int em_lldt(struct x86_emulate_ctxt *ctxt)
3392{
3393	u16 sel = ctxt->src.val;
3394
3395	/* Disable writeback. */
3396	ctxt->dst.type = OP_NONE;
3397	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3398}
3399
3400static int em_str(struct x86_emulate_ctxt *ctxt)
3401{
3402	return em_store_sreg(ctxt, VCPU_SREG_TR);
3403}
3404
3405static int em_ltr(struct x86_emulate_ctxt *ctxt)
3406{
3407	u16 sel = ctxt->src.val;
3408
3409	/* Disable writeback. */
3410	ctxt->dst.type = OP_NONE;
3411	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3412}
3413
3414static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3415{
3416	int rc;
3417	ulong linear;
3418	unsigned int max_size;
3419
3420	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
3421			 &linear, X86EMUL_F_INVLPG);
3422	if (rc == X86EMUL_CONTINUE)
3423		ctxt->ops->invlpg(ctxt, linear);
3424	/* Disable writeback. */
3425	ctxt->dst.type = OP_NONE;
3426	return X86EMUL_CONTINUE;
3427}
3428
3429static int em_clts(struct x86_emulate_ctxt *ctxt)
3430{
3431	ulong cr0;
3432
3433	cr0 = ctxt->ops->get_cr(ctxt, 0);
3434	cr0 &= ~X86_CR0_TS;
3435	ctxt->ops->set_cr(ctxt, 0, cr0);
3436	return X86EMUL_CONTINUE;
3437}
3438
3439static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3440{
3441	int rc = ctxt->ops->fix_hypercall(ctxt);
 
 
 
3442
 
3443	if (rc != X86EMUL_CONTINUE)
3444		return rc;
3445
3446	/* Let the processor re-execute the fixed hypercall */
3447	ctxt->_eip = ctxt->eip;
3448	/* Disable writeback. */
3449	ctxt->dst.type = OP_NONE;
3450	return X86EMUL_CONTINUE;
3451}
3452
3453static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3454				  void (*get)(struct x86_emulate_ctxt *ctxt,
3455					      struct desc_ptr *ptr))
3456{
3457	struct desc_ptr desc_ptr;
 
3458
3459	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3460	    ctxt->ops->cpl(ctxt) > 0)
3461		return emulate_gp(ctxt, 0);
3462
3463	if (ctxt->mode == X86EMUL_MODE_PROT64)
3464		ctxt->op_bytes = 8;
3465	get(ctxt, &desc_ptr);
3466	if (ctxt->op_bytes == 2) {
3467		ctxt->op_bytes = 4;
3468		desc_ptr.address &= 0x00ffffff;
3469	}
3470	/* Disable writeback. */
3471	ctxt->dst.type = OP_NONE;
3472	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3473				   &desc_ptr, 2 + ctxt->op_bytes);
3474}
3475
3476static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3477{
3478	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3479}
 
3480
3481static int em_sidt(struct x86_emulate_ctxt *ctxt)
3482{
3483	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3484}
3485
3486static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3487{
3488	struct desc_ptr desc_ptr;
3489	int rc;
3490
3491	if (ctxt->mode == X86EMUL_MODE_PROT64)
3492		ctxt->op_bytes = 8;
3493	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3494			     &desc_ptr.size, &desc_ptr.address,
3495			     ctxt->op_bytes);
3496	if (rc != X86EMUL_CONTINUE)
3497		return rc;
3498	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3499	    emul_is_noncanonical_address(desc_ptr.address, ctxt,
3500					 X86EMUL_F_DT_LOAD))
3501		return emulate_gp(ctxt, 0);
3502	if (lgdt)
3503		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3504	else
3505		ctxt->ops->set_idt(ctxt, &desc_ptr);
3506	/* Disable writeback. */
3507	ctxt->dst.type = OP_NONE;
3508	return X86EMUL_CONTINUE;
3509}
3510
3511static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3512{
3513	return em_lgdt_lidt(ctxt, true);
3514}
3515
3516static int em_lidt(struct x86_emulate_ctxt *ctxt)
3517{
3518	return em_lgdt_lidt(ctxt, false);
3519}
3520
3521static int em_smsw(struct x86_emulate_ctxt *ctxt)
3522{
3523	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3524	    ctxt->ops->cpl(ctxt) > 0)
3525		return emulate_gp(ctxt, 0);
3526
3527	if (ctxt->dst.type == OP_MEM)
3528		ctxt->dst.bytes = 2;
3529	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3530	return X86EMUL_CONTINUE;
3531}
3532
3533static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3534{
3535	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3536			  | (ctxt->src.val & 0x0f));
3537	ctxt->dst.type = OP_NONE;
3538	return X86EMUL_CONTINUE;
3539}
3540
3541static int em_loop(struct x86_emulate_ctxt *ctxt)
3542{
3543	int rc = X86EMUL_CONTINUE;
3544
3545	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3546	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3547	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3548		rc = jmp_rel(ctxt, ctxt->src.val);
3549
3550	return rc;
3551}
3552
3553static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3554{
3555	int rc = X86EMUL_CONTINUE;
3556
3557	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3558		rc = jmp_rel(ctxt, ctxt->src.val);
3559
3560	return rc;
3561}
3562
3563static int em_in(struct x86_emulate_ctxt *ctxt)
3564{
3565	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3566			     &ctxt->dst.val))
3567		return X86EMUL_IO_NEEDED;
3568
3569	return X86EMUL_CONTINUE;
3570}
3571
3572static int em_out(struct x86_emulate_ctxt *ctxt)
3573{
3574	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3575				    &ctxt->src.val, 1);
3576	/* Disable writeback. */
3577	ctxt->dst.type = OP_NONE;
3578	return X86EMUL_CONTINUE;
3579}
3580
3581static int em_cli(struct x86_emulate_ctxt *ctxt)
3582{
3583	if (emulator_bad_iopl(ctxt))
3584		return emulate_gp(ctxt, 0);
3585
3586	ctxt->eflags &= ~X86_EFLAGS_IF;
3587	return X86EMUL_CONTINUE;
3588}
3589
3590static int em_sti(struct x86_emulate_ctxt *ctxt)
3591{
3592	if (emulator_bad_iopl(ctxt))
3593		return emulate_gp(ctxt, 0);
3594
3595	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3596	ctxt->eflags |= X86_EFLAGS_IF;
3597	return X86EMUL_CONTINUE;
3598}
3599
3600static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3601{
3602	u32 eax, ebx, ecx, edx;
3603	u64 msr = 0;
3604
3605	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3606	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3607	    ctxt->ops->cpl(ctxt)) {
3608		return emulate_gp(ctxt, 0);
3609	}
3610
3611	eax = reg_read(ctxt, VCPU_REGS_RAX);
3612	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3613	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3614	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3615	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3616	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3617	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3618	return X86EMUL_CONTINUE;
3619}
3620
3621static int em_sahf(struct x86_emulate_ctxt *ctxt)
3622{
3623	u32 flags;
3624
3625	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3626		X86_EFLAGS_SF;
3627	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3628
3629	ctxt->eflags &= ~0xffUL;
3630	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3631	return X86EMUL_CONTINUE;
3632}
3633
3634static int em_lahf(struct x86_emulate_ctxt *ctxt)
3635{
3636	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3637	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3638	return X86EMUL_CONTINUE;
3639}
3640
3641static int em_bswap(struct x86_emulate_ctxt *ctxt)
3642{
3643	switch (ctxt->op_bytes) {
3644#ifdef CONFIG_X86_64
3645	case 8:
3646		asm("bswap %0" : "+r"(ctxt->dst.val));
3647		break;
3648#endif
3649	default:
3650		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3651		break;
3652	}
3653	return X86EMUL_CONTINUE;
3654}
3655
3656static int em_clflush(struct x86_emulate_ctxt *ctxt)
3657{
3658	/* emulating clflush regardless of cpuid */
3659	return X86EMUL_CONTINUE;
3660}
3661
3662static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3663{
3664	/* emulating clflushopt regardless of cpuid */
3665	return X86EMUL_CONTINUE;
3666}
3667
3668static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3669{
3670	ctxt->dst.val = (s32) ctxt->src.val;
3671	return X86EMUL_CONTINUE;
3672}
3673
3674static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3675{
3676	if (!ctxt->ops->guest_has_fxsr(ctxt))
3677		return emulate_ud(ctxt);
3678
3679	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3680		return emulate_nm(ctxt);
3681
3682	/*
3683	 * Don't emulate a case that should never be hit, instead of working
3684	 * around a lack of fxsave64/fxrstor64 on old compilers.
3685	 */
3686	if (ctxt->mode >= X86EMUL_MODE_PROT64)
3687		return X86EMUL_UNHANDLEABLE;
3688
3689	return X86EMUL_CONTINUE;
3690}
3691
3692/*
3693 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3694 * and restore MXCSR.
3695 */
3696static size_t __fxstate_size(int nregs)
3697{
3698	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3699}
 
3700
3701static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3702{
3703	bool cr4_osfxsr;
3704	if (ctxt->mode == X86EMUL_MODE_PROT64)
3705		return __fxstate_size(16);
 
 
3706
3707	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3708	return __fxstate_size(cr4_osfxsr ? 8 : 0);
3709}
3710
3711/*
3712 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3713 *  1) 16 bit mode
3714 *  2) 32 bit mode
3715 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
3716 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3717 *       save and restore
3718 *  3) 64-bit mode with REX.W prefix
3719 *     - like (2), but XMM 8-15 are being saved and restored
3720 *  4) 64-bit mode without REX.W prefix
3721 *     - like (3), but FIP and FDP are 64 bit
3722 *
3723 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3724 * desired result.  (4) is not emulated.
3725 *
3726 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3727 * and FPU DS) should match.
3728 */
3729static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3730{
3731	struct fxregs_state fx_state;
3732	int rc;
3733
3734	rc = check_fxsr(ctxt);
3735	if (rc != X86EMUL_CONTINUE)
3736		return rc;
 
 
 
3737
3738	kvm_fpu_get();
 
3739
3740	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
 
3741
3742	kvm_fpu_put();
 
 
 
3743
3744	if (rc != X86EMUL_CONTINUE)
3745		return rc;
 
 
 
 
 
3746
3747	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3748		                   fxstate_size(ctxt));
3749}
3750
3751/*
3752 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3753 * in the host registers (via FXSAVE) instead, so they won't be modified.
3754 * (preemption has to stay disabled until FXRSTOR).
3755 *
3756 * Use noinline to keep the stack for other functions called by callers small.
3757 */
3758static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3759				 const size_t used_size)
3760{
3761	struct fxregs_state fx_tmp;
3762	int rc;
3763
3764	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3765	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3766	       __fxstate_size(16) - used_size);
3767
3768	return rc;
3769}
3770
3771static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3772{
3773	struct fxregs_state fx_state;
3774	int rc;
3775	size_t size;
3776
3777	rc = check_fxsr(ctxt);
3778	if (rc != X86EMUL_CONTINUE)
3779		return rc;
3780
3781	size = fxstate_size(ctxt);
3782	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3783	if (rc != X86EMUL_CONTINUE)
3784		return rc;
3785
3786	kvm_fpu_get();
3787
3788	if (size < __fxstate_size(16)) {
3789		rc = fxregs_fixup(&fx_state, size);
3790		if (rc != X86EMUL_CONTINUE)
3791			goto out;
3792	}
3793
3794	if (fx_state.mxcsr >> 16) {
3795		rc = emulate_gp(ctxt, 0);
3796		goto out;
3797	}
3798
3799	if (rc == X86EMUL_CONTINUE)
3800		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3801
3802out:
3803	kvm_fpu_put();
3804
3805	return rc;
3806}
3807
3808static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3809{
3810	u32 eax, ecx, edx;
3811
3812	if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3813		return emulate_ud(ctxt);
3814
3815	eax = reg_read(ctxt, VCPU_REGS_RAX);
3816	edx = reg_read(ctxt, VCPU_REGS_RDX);
3817	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3818
3819	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3820		return emulate_gp(ctxt, 0);
3821
3822	return X86EMUL_CONTINUE;
3823}
3824
3825static bool valid_cr(int nr)
3826{
3827	switch (nr) {
3828	case 0:
3829	case 2 ... 4:
3830	case 8:
3831		return true;
3832	default:
3833		return false;
3834	}
3835}
3836
3837static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3838{
3839	if (!valid_cr(ctxt->modrm_reg))
3840		return emulate_ud(ctxt);
3841
3842	return X86EMUL_CONTINUE;
 
3843}
3844
3845static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3846{
3847	int dr = ctxt->modrm_reg;
3848	u64 cr4;
3849
3850	if (dr > 7)
3851		return emulate_ud(ctxt);
3852
3853	cr4 = ctxt->ops->get_cr(ctxt, 4);
3854	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3855		return emulate_ud(ctxt);
3856
3857	if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
3858		ulong dr6;
3859
3860		dr6 = ctxt->ops->get_dr(ctxt, 6);
3861		dr6 &= ~DR_TRAP_BITS;
3862		dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3863		ctxt->ops->set_dr(ctxt, 6, dr6);
3864		return emulate_db(ctxt);
3865	}
3866
3867	return X86EMUL_CONTINUE;
3868}
3869
3870static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3871{
3872	u64 new_val = ctxt->src.val64;
3873	int dr = ctxt->modrm_reg;
3874
3875	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3876		return emulate_gp(ctxt, 0);
3877
3878	return check_dr_read(ctxt);
3879}
3880
3881static int check_svme(struct x86_emulate_ctxt *ctxt)
3882{
3883	u64 efer = 0;
3884
3885	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3886
3887	if (!(efer & EFER_SVME))
3888		return emulate_ud(ctxt);
3889
3890	return X86EMUL_CONTINUE;
3891}
3892
3893static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3894{
3895	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3896
3897	/* Valid physical address? */
3898	if (rax & 0xffff000000000000ULL)
3899		return emulate_gp(ctxt, 0);
3900
3901	return check_svme(ctxt);
3902}
3903
3904static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3905{
3906	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3907
3908	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3909		return emulate_gp(ctxt, 0);
3910
3911	return X86EMUL_CONTINUE;
3912}
3913
3914static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3915{
3916	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3917	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3918
3919	/*
3920	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3921	 * in Ring3 when CR4.PCE=0.
3922	 */
3923	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3924		return X86EMUL_CONTINUE;
3925
3926	/*
3927	 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0.  The CR0.PE
3928	 * check however is unnecessary because CPL is always 0 outside
3929	 * protected mode.
3930	 */
3931	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3932	    ctxt->ops->check_rdpmc_early(ctxt, rcx))
3933		return emulate_gp(ctxt, 0);
3934
3935	return X86EMUL_CONTINUE;
3936}
3937
3938static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3939{
3940	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3941	if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
3942		return emulate_gp(ctxt, 0);
3943
3944	return X86EMUL_CONTINUE;
3945}
3946
3947static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3948{
3949	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3950	if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
3951		return emulate_gp(ctxt, 0);
3952
3953	return X86EMUL_CONTINUE;
3954}
3955
3956#define D(_y) { .flags = (_y) }
3957#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3958#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3959		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3960#define N    D(NotImpl)
3961#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3962#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3963#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3964#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3965#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3966#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3967#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3968#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3969#define II(_f, _e, _i) \
3970	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3971#define IIP(_f, _e, _i, _p) \
3972	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3973	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3974#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3975
3976#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3977#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3978#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3979#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3980#define I2bvIP(_f, _e, _i, _p) \
3981	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3982
3983#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
3984		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
3985		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3986
3987static const struct opcode group7_rm0[] = {
3988	N,
3989	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
 
 
 
 
3990	N, N, N, N, N, N,
3991};
3992
3993static const struct opcode group7_rm1[] = {
3994	DI(SrcNone | Priv, monitor),
3995	DI(SrcNone | Priv, mwait),
3996	N, N, N, N, N, N,
 
 
 
 
 
3997};
3998
3999static const struct opcode group7_rm2[] = {
4000	N,
4001	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
4002	N, N, N, N, N, N,
4003};
4004
4005static const struct opcode group7_rm3[] = {
4006	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4007	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4008	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4009	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4010	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4011	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4012	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4013	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4014};
4015
4016static const struct opcode group7_rm7[] = {
4017	N,
4018	DIP(SrcNone, rdtscp, check_rdtsc),
4019	N, N, N, N, N, N,
4020};
4021
4022static const struct opcode group1[] = {
4023	F(Lock, em_add),
4024	F(Lock | PageTable, em_or),
4025	F(Lock, em_adc),
4026	F(Lock, em_sbb),
4027	F(Lock | PageTable, em_and),
4028	F(Lock, em_sub),
4029	F(Lock, em_xor),
4030	F(NoWrite, em_cmp),
4031};
4032
4033static const struct opcode group1A[] = {
4034	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4035};
4036
4037static const struct opcode group2[] = {
4038	F(DstMem | ModRM, em_rol),
4039	F(DstMem | ModRM, em_ror),
4040	F(DstMem | ModRM, em_rcl),
4041	F(DstMem | ModRM, em_rcr),
4042	F(DstMem | ModRM, em_shl),
4043	F(DstMem | ModRM, em_shr),
4044	F(DstMem | ModRM, em_shl),
4045	F(DstMem | ModRM, em_sar),
4046};
4047
4048static const struct opcode group3[] = {
4049	F(DstMem | SrcImm | NoWrite, em_test),
4050	F(DstMem | SrcImm | NoWrite, em_test),
4051	F(DstMem | SrcNone | Lock, em_not),
4052	F(DstMem | SrcNone | Lock, em_neg),
4053	F(DstXacc | Src2Mem, em_mul_ex),
4054	F(DstXacc | Src2Mem, em_imul_ex),
4055	F(DstXacc | Src2Mem, em_div_ex),
4056	F(DstXacc | Src2Mem, em_idiv_ex),
4057};
4058
4059static const struct opcode group4[] = {
4060	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4061	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4062	N, N, N, N, N, N,
4063};
4064
4065static const struct opcode group5[] = {
4066	F(DstMem | SrcNone | Lock,		em_inc),
4067	F(DstMem | SrcNone | Lock,		em_dec),
4068	I(SrcMem | NearBranch | IsBranch,       em_call_near_abs),
4069	I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4070	I(SrcMem | NearBranch | IsBranch,       em_jmp_abs),
4071	I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4072	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4073};
4074
4075static const struct opcode group6[] = {
4076	II(Prot | DstMem,	   em_sldt, sldt),
4077	II(Prot | DstMem,	   em_str, str),
4078	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4079	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4080	N, N, N, N,
4081};
4082
4083static const struct group_dual group7 = { {
4084	II(Mov | DstMem,			em_sgdt, sgdt),
4085	II(Mov | DstMem,			em_sidt, sidt),
4086	II(SrcMem | Priv,			em_lgdt, lgdt),
4087	II(SrcMem | Priv,			em_lidt, lidt),
4088	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4089	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4090	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4091}, {
4092	EXT(0, group7_rm0),
4093	EXT(0, group7_rm1),
4094	EXT(0, group7_rm2),
4095	EXT(0, group7_rm3),
4096	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4097	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4098	EXT(0, group7_rm7),
4099} };
4100
4101static const struct opcode group8[] = {
4102	N, N, N, N,
4103	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4104	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4105	F(DstMem | SrcImmByte | Lock,			em_btr),
4106	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4107};
4108
4109/*
4110 * The "memory" destination is actually always a register, since we come
4111 * from the register case of group9.
4112 */
4113static const struct gprefix pfx_0f_c7_7 = {
4114	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4115};
4116
4117
4118static const struct group_dual group9 = { {
4119	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4120}, {
4121	N, N, N, N, N, N, N,
4122	GP(0, &pfx_0f_c7_7),
4123} };
4124
4125static const struct opcode group11[] = {
4126	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4127	X7(D(Undefined)),
4128};
4129
4130static const struct gprefix pfx_0f_ae_7 = {
4131	I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4132};
4133
4134static const struct group_dual group15 = { {
4135	I(ModRM | Aligned16, em_fxsave),
4136	I(ModRM | Aligned16, em_fxrstor),
4137	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4138}, {
4139	N, N, N, N, N, N, N, N,
4140} };
4141
4142static const struct gprefix pfx_0f_6f_0f_7f = {
4143	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4144};
4145
4146static const struct instr_dual instr_dual_0f_2b = {
4147	I(0, em_mov), N
4148};
4149
4150static const struct gprefix pfx_0f_2b = {
4151	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4152};
4153
4154static const struct gprefix pfx_0f_10_0f_11 = {
4155	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4156};
4157
4158static const struct gprefix pfx_0f_28_0f_29 = {
4159	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4160};
4161
4162static const struct gprefix pfx_0f_e7 = {
4163	N, I(Sse, em_mov), N, N,
4164};
4165
4166static const struct escape escape_d9 = { {
4167	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4168}, {
4169	/* 0xC0 - 0xC7 */
4170	N, N, N, N, N, N, N, N,
4171	/* 0xC8 - 0xCF */
4172	N, N, N, N, N, N, N, N,
4173	/* 0xD0 - 0xC7 */
4174	N, N, N, N, N, N, N, N,
4175	/* 0xD8 - 0xDF */
4176	N, N, N, N, N, N, N, N,
4177	/* 0xE0 - 0xE7 */
4178	N, N, N, N, N, N, N, N,
4179	/* 0xE8 - 0xEF */
4180	N, N, N, N, N, N, N, N,
4181	/* 0xF0 - 0xF7 */
4182	N, N, N, N, N, N, N, N,
4183	/* 0xF8 - 0xFF */
4184	N, N, N, N, N, N, N, N,
4185} };
4186
4187static const struct escape escape_db = { {
4188	N, N, N, N, N, N, N, N,
4189}, {
4190	/* 0xC0 - 0xC7 */
4191	N, N, N, N, N, N, N, N,
4192	/* 0xC8 - 0xCF */
4193	N, N, N, N, N, N, N, N,
4194	/* 0xD0 - 0xC7 */
4195	N, N, N, N, N, N, N, N,
4196	/* 0xD8 - 0xDF */
4197	N, N, N, N, N, N, N, N,
4198	/* 0xE0 - 0xE7 */
4199	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4200	/* 0xE8 - 0xEF */
4201	N, N, N, N, N, N, N, N,
4202	/* 0xF0 - 0xF7 */
4203	N, N, N, N, N, N, N, N,
4204	/* 0xF8 - 0xFF */
4205	N, N, N, N, N, N, N, N,
4206} };
4207
4208static const struct escape escape_dd = { {
4209	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4210}, {
4211	/* 0xC0 - 0xC7 */
4212	N, N, N, N, N, N, N, N,
4213	/* 0xC8 - 0xCF */
4214	N, N, N, N, N, N, N, N,
4215	/* 0xD0 - 0xC7 */
4216	N, N, N, N, N, N, N, N,
4217	/* 0xD8 - 0xDF */
4218	N, N, N, N, N, N, N, N,
4219	/* 0xE0 - 0xE7 */
4220	N, N, N, N, N, N, N, N,
4221	/* 0xE8 - 0xEF */
4222	N, N, N, N, N, N, N, N,
4223	/* 0xF0 - 0xF7 */
4224	N, N, N, N, N, N, N, N,
4225	/* 0xF8 - 0xFF */
4226	N, N, N, N, N, N, N, N,
4227} };
4228
4229static const struct instr_dual instr_dual_0f_c3 = {
4230	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4231};
4232
4233static const struct mode_dual mode_dual_63 = {
4234	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4235};
4236
4237static const struct instr_dual instr_dual_8d = {
4238	D(DstReg | SrcMem | ModRM | NoAccess), N
4239};
4240
4241static const struct opcode opcode_table[256] = {
4242	/* 0x00 - 0x07 */
4243	F6ALU(Lock, em_add),
4244	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4245	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4246	/* 0x08 - 0x0F */
4247	F6ALU(Lock | PageTable, em_or),
4248	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4249	N,
4250	/* 0x10 - 0x17 */
4251	F6ALU(Lock, em_adc),
4252	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4253	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4254	/* 0x18 - 0x1F */
4255	F6ALU(Lock, em_sbb),
4256	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4257	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4258	/* 0x20 - 0x27 */
4259	F6ALU(Lock | PageTable, em_and), N, N,
4260	/* 0x28 - 0x2F */
4261	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4262	/* 0x30 - 0x37 */
4263	F6ALU(Lock, em_xor), N, N,
4264	/* 0x38 - 0x3F */
4265	F6ALU(NoWrite, em_cmp), N, N,
4266	/* 0x40 - 0x4F */
4267	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4268	/* 0x50 - 0x57 */
4269	X8(I(SrcReg | Stack, em_push)),
4270	/* 0x58 - 0x5F */
4271	X8(I(DstReg | Stack, em_pop)),
4272	/* 0x60 - 0x67 */
4273	I(ImplicitOps | Stack | No64, em_pusha),
4274	I(ImplicitOps | Stack | No64, em_popa),
4275	N, MD(ModRM, &mode_dual_63),
4276	N, N, N, N,
4277	/* 0x68 - 0x6F */
4278	I(SrcImm | Mov | Stack, em_push),
4279	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4280	I(SrcImmByte | Mov | Stack, em_push),
4281	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4282	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4283	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4284	/* 0x70 - 0x7F */
4285	X16(D(SrcImmByte | NearBranch | IsBranch)),
4286	/* 0x80 - 0x87 */
4287	G(ByteOp | DstMem | SrcImm, group1),
4288	G(DstMem | SrcImm, group1),
4289	G(ByteOp | DstMem | SrcImm | No64, group1),
4290	G(DstMem | SrcImmByte, group1),
4291	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4292	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4293	/* 0x88 - 0x8F */
4294	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4295	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4296	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4297	ID(0, &instr_dual_8d),
4298	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4299	G(0, group1A),
4300	/* 0x90 - 0x97 */
4301	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4302	/* 0x98 - 0x9F */
4303	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4304	I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4305	II(ImplicitOps | Stack, em_pushf, pushf),
4306	II(ImplicitOps | Stack, em_popf, popf),
4307	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4308	/* 0xA0 - 0xA7 */
4309	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4310	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4311	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4312	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4313	/* 0xA8 - 0xAF */
4314	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4315	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4316	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4317	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4318	/* 0xB0 - 0xB7 */
4319	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4320	/* 0xB8 - 0xBF */
4321	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4322	/* 0xC0 - 0xC7 */
4323	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4324	I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4325	I(ImplicitOps | NearBranch | IsBranch, em_ret),
4326	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4327	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4328	G(ByteOp, group11), G(0, group11),
4329	/* 0xC8 - 0xCF */
4330	I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4331	I(Stack | IsBranch, em_leave),
4332	I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4333	I(ImplicitOps | IsBranch, em_ret_far),
4334	D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4335	D(ImplicitOps | No64 | IsBranch),
4336	II(ImplicitOps | IsBranch, em_iret, iret),
4337	/* 0xD0 - 0xD7 */
4338	G(Src2One | ByteOp, group2), G(Src2One, group2),
4339	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4340	I(DstAcc | SrcImmUByte | No64, em_aam),
4341	I(DstAcc | SrcImmUByte | No64, em_aad),
4342	F(DstAcc | ByteOp | No64, em_salc),
4343	I(DstAcc | SrcXLat | ByteOp, em_mov),
4344	/* 0xD8 - 0xDF */
4345	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4346	/* 0xE0 - 0xE7 */
4347	X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4348	I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4349	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4350	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4351	/* 0xE8 - 0xEF */
4352	I(SrcImm | NearBranch | IsBranch, em_call),
4353	D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4354	I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4355	D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4356	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4357	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4358	/* 0xF0 - 0xF7 */
4359	N, DI(ImplicitOps, icebp), N, N,
4360	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4361	G(ByteOp, group3), G(0, group3),
4362	/* 0xF8 - 0xFF */
4363	D(ImplicitOps), D(ImplicitOps),
4364	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4365	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4366};
4367
4368static const struct opcode twobyte_table[256] = {
4369	/* 0x00 - 0x0F */
4370	G(0, group6), GD(0, &group7), N, N,
4371	N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4372	II(ImplicitOps | Priv, em_clts, clts), N,
4373	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4374	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4375	/* 0x10 - 0x1F */
4376	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4377	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4378	N, N, N, N, N, N,
4379	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4380	D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4381	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4382	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4383	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4384	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4385	/* 0x20 - 0x2F */
4386	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4387	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4388	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4389						check_cr_access),
4390	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4391						check_dr_write),
4392	N, N, N, N,
4393	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4394	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4395	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4396	N, N, N, N,
 
4397	/* 0x30 - 0x3F */
4398	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4399	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4400	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4401	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4402	I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4403	I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4404	N, N,
4405	N, N, N, N, N, N, N, N,
4406	/* 0x40 - 0x4F */
4407	X16(D(DstReg | SrcMem | ModRM)),
4408	/* 0x50 - 0x5F */
4409	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4410	/* 0x60 - 0x6F */
4411	N, N, N, N,
4412	N, N, N, N,
4413	N, N, N, N,
4414	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4415	/* 0x70 - 0x7F */
4416	N, N, N, N,
4417	N, N, N, N,
4418	N, N, N, N,
4419	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4420	/* 0x80 - 0x8F */
4421	X16(D(SrcImm | NearBranch | IsBranch)),
4422	/* 0x90 - 0x9F */
4423	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4424	/* 0xA0 - 0xA7 */
4425	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4426	II(ImplicitOps, em_cpuid, cpuid),
4427	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4428	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4429	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4430	/* 0xA8 - 0xAF */
4431	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4432	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4433	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4434	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4435	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4436	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4437	/* 0xB0 - 0xB7 */
4438	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4439	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4440	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4441	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4442	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4443	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4444	/* 0xB8 - 0xBF */
4445	N, N,
4446	G(BitOp, group8),
4447	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4448	I(DstReg | SrcMem | ModRM, em_bsf_c),
4449	I(DstReg | SrcMem | ModRM, em_bsr_c),
4450	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4451	/* 0xC0 - 0xC7 */
4452	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4453	N, ID(0, &instr_dual_0f_c3),
4454	N, N, N, GD(0, &group9),
4455	/* 0xC8 - 0xCF */
4456	X8(I(DstReg, em_bswap)),
4457	/* 0xD0 - 0xDF */
4458	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4459	/* 0xE0 - 0xEF */
4460	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4461	N, N, N, N, N, N, N, N,
4462	/* 0xF0 - 0xFF */
4463	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4464};
4465
4466static const struct instr_dual instr_dual_0f_38_f0 = {
4467	I(DstReg | SrcMem | Mov, em_movbe), N
4468};
4469
4470static const struct instr_dual instr_dual_0f_38_f1 = {
4471	I(DstMem | SrcReg | Mov, em_movbe), N
4472};
4473
4474static const struct gprefix three_byte_0f_38_f0 = {
4475	ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
4476};
4477
4478static const struct gprefix three_byte_0f_38_f1 = {
4479	ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
4480};
4481
4482/*
4483 * Insns below are selected by the prefix which indexed by the third opcode
4484 * byte.
4485 */
4486static const struct opcode opcode_map_0f_38[256] = {
4487	/* 0x00 - 0x7f */
4488	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4489	/* 0x80 - 0xef */
4490	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4491	/* 0xf0 - 0xf1 */
4492	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4493	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4494	/* 0xf2 - 0xff */
4495	N, N, X4(N), X8(N)
4496};
4497
4498#undef D
4499#undef N
4500#undef G
4501#undef GD
4502#undef I
4503#undef GP
4504#undef EXT
4505#undef MD
4506#undef ID
4507
4508#undef D2bv
4509#undef D2bvIP
4510#undef I2bv
4511#undef I2bvIP
4512#undef I6ALU
4513
4514static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4515{
4516	unsigned size;
4517
4518	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4519	if (size == 8)
4520		size = 4;
4521	return size;
4522}
4523
4524static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4525		      unsigned size, bool sign_extension)
4526{
4527	int rc = X86EMUL_CONTINUE;
4528
4529	op->type = OP_IMM;
4530	op->bytes = size;
4531	op->addr.mem.ea = ctxt->_eip;
4532	/* NB. Immediates are sign-extended as necessary. */
4533	switch (op->bytes) {
4534	case 1:
4535		op->val = insn_fetch(s8, ctxt);
4536		break;
4537	case 2:
4538		op->val = insn_fetch(s16, ctxt);
4539		break;
4540	case 4:
4541		op->val = insn_fetch(s32, ctxt);
4542		break;
4543	case 8:
4544		op->val = insn_fetch(s64, ctxt);
4545		break;
4546	}
4547	if (!sign_extension) {
4548		switch (op->bytes) {
4549		case 1:
4550			op->val &= 0xff;
4551			break;
4552		case 2:
4553			op->val &= 0xffff;
4554			break;
4555		case 4:
4556			op->val &= 0xffffffff;
4557			break;
4558		}
4559	}
4560done:
4561	return rc;
4562}
4563
4564static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4565			  unsigned d)
4566{
4567	int rc = X86EMUL_CONTINUE;
4568
4569	switch (d) {
4570	case OpReg:
4571		decode_register_operand(ctxt, op);
4572		break;
4573	case OpImmUByte:
4574		rc = decode_imm(ctxt, op, 1, false);
4575		break;
4576	case OpMem:
4577		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4578	mem_common:
4579		*op = ctxt->memop;
4580		ctxt->memopp = op;
4581		if (ctxt->d & BitOp)
4582			fetch_bit_operand(ctxt);
4583		op->orig_val = op->val;
4584		break;
4585	case OpMem64:
4586		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4587		goto mem_common;
4588	case OpAcc:
4589		op->type = OP_REG;
4590		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4591		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4592		fetch_register_operand(op);
4593		op->orig_val = op->val;
4594		break;
4595	case OpAccLo:
4596		op->type = OP_REG;
4597		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4598		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4599		fetch_register_operand(op);
4600		op->orig_val = op->val;
4601		break;
4602	case OpAccHi:
4603		if (ctxt->d & ByteOp) {
4604			op->type = OP_NONE;
4605			break;
4606		}
4607		op->type = OP_REG;
4608		op->bytes = ctxt->op_bytes;
4609		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4610		fetch_register_operand(op);
4611		op->orig_val = op->val;
4612		break;
4613	case OpDI:
4614		op->type = OP_MEM;
4615		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4616		op->addr.mem.ea =
4617			register_address(ctxt, VCPU_REGS_RDI);
4618		op->addr.mem.seg = VCPU_SREG_ES;
4619		op->val = 0;
4620		op->count = 1;
4621		break;
4622	case OpDX:
4623		op->type = OP_REG;
4624		op->bytes = 2;
4625		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4626		fetch_register_operand(op);
4627		break;
4628	case OpCL:
4629		op->type = OP_IMM;
4630		op->bytes = 1;
4631		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4632		break;
4633	case OpImmByte:
4634		rc = decode_imm(ctxt, op, 1, true);
4635		break;
4636	case OpOne:
4637		op->type = OP_IMM;
4638		op->bytes = 1;
4639		op->val = 1;
4640		break;
4641	case OpImm:
4642		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4643		break;
4644	case OpImm64:
4645		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4646		break;
4647	case OpMem8:
4648		ctxt->memop.bytes = 1;
4649		if (ctxt->memop.type == OP_REG) {
4650			ctxt->memop.addr.reg = decode_register(ctxt,
4651					ctxt->modrm_rm, true);
4652			fetch_register_operand(&ctxt->memop);
4653		}
4654		goto mem_common;
4655	case OpMem16:
4656		ctxt->memop.bytes = 2;
4657		goto mem_common;
4658	case OpMem32:
4659		ctxt->memop.bytes = 4;
4660		goto mem_common;
4661	case OpImmU16:
4662		rc = decode_imm(ctxt, op, 2, false);
4663		break;
4664	case OpImmU:
4665		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4666		break;
4667	case OpSI:
4668		op->type = OP_MEM;
4669		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4670		op->addr.mem.ea =
4671			register_address(ctxt, VCPU_REGS_RSI);
4672		op->addr.mem.seg = ctxt->seg_override;
4673		op->val = 0;
4674		op->count = 1;
4675		break;
4676	case OpXLat:
4677		op->type = OP_MEM;
4678		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4679		op->addr.mem.ea =
4680			address_mask(ctxt,
4681				reg_read(ctxt, VCPU_REGS_RBX) +
4682				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4683		op->addr.mem.seg = ctxt->seg_override;
4684		op->val = 0;
4685		break;
4686	case OpImmFAddr:
4687		op->type = OP_IMM;
4688		op->addr.mem.ea = ctxt->_eip;
4689		op->bytes = ctxt->op_bytes + 2;
4690		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4691		break;
4692	case OpMemFAddr:
4693		ctxt->memop.bytes = ctxt->op_bytes + 2;
4694		goto mem_common;
4695	case OpES:
4696		op->type = OP_IMM;
4697		op->val = VCPU_SREG_ES;
4698		break;
4699	case OpCS:
4700		op->type = OP_IMM;
4701		op->val = VCPU_SREG_CS;
4702		break;
4703	case OpSS:
4704		op->type = OP_IMM;
4705		op->val = VCPU_SREG_SS;
4706		break;
4707	case OpDS:
4708		op->type = OP_IMM;
4709		op->val = VCPU_SREG_DS;
4710		break;
4711	case OpFS:
4712		op->type = OP_IMM;
4713		op->val = VCPU_SREG_FS;
4714		break;
4715	case OpGS:
4716		op->type = OP_IMM;
4717		op->val = VCPU_SREG_GS;
4718		break;
4719	case OpImplicit:
4720		/* Special instructions do their own operand decoding. */
4721	default:
4722		op->type = OP_NONE; /* Disable writeback. */
4723		break;
4724	}
4725
4726done:
4727	return rc;
4728}
4729
4730int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4731{
4732	int rc = X86EMUL_CONTINUE;
4733	int mode = ctxt->mode;
4734	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4735	bool op_prefix = false;
4736	bool has_seg_override = false;
4737	struct opcode opcode;
4738	u16 dummy;
4739	struct desc_struct desc;
4740
4741	ctxt->memop.type = OP_NONE;
4742	ctxt->memopp = NULL;
4743	ctxt->_eip = ctxt->eip;
4744	ctxt->fetch.ptr = ctxt->fetch.data;
4745	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4746	ctxt->opcode_len = 1;
4747	ctxt->intercept = x86_intercept_none;
4748	if (insn_len > 0)
4749		memcpy(ctxt->fetch.data, insn, insn_len);
4750	else {
4751		rc = __do_insn_fetch_bytes(ctxt, 1);
4752		if (rc != X86EMUL_CONTINUE)
4753			goto done;
4754	}
4755
4756	switch (mode) {
4757	case X86EMUL_MODE_REAL:
4758	case X86EMUL_MODE_VM86:
4759		def_op_bytes = def_ad_bytes = 2;
4760		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4761		if (desc.d)
4762			def_op_bytes = def_ad_bytes = 4;
4763		break;
4764	case X86EMUL_MODE_PROT16:
4765		def_op_bytes = def_ad_bytes = 2;
4766		break;
4767	case X86EMUL_MODE_PROT32:
4768		def_op_bytes = def_ad_bytes = 4;
4769		break;
4770#ifdef CONFIG_X86_64
4771	case X86EMUL_MODE_PROT64:
4772		def_op_bytes = 4;
4773		def_ad_bytes = 8;
4774		break;
4775#endif
4776	default:
4777		return EMULATION_FAILED;
4778	}
4779
4780	ctxt->op_bytes = def_op_bytes;
4781	ctxt->ad_bytes = def_ad_bytes;
4782
4783	/* Legacy prefixes. */
4784	for (;;) {
4785		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4786		case 0x66:	/* operand-size override */
4787			op_prefix = true;
4788			/* switch between 2/4 bytes */
4789			ctxt->op_bytes = def_op_bytes ^ 6;
4790			break;
4791		case 0x67:	/* address-size override */
4792			if (mode == X86EMUL_MODE_PROT64)
4793				/* switch between 4/8 bytes */
4794				ctxt->ad_bytes = def_ad_bytes ^ 12;
4795			else
4796				/* switch between 2/4 bytes */
4797				ctxt->ad_bytes = def_ad_bytes ^ 6;
4798			break;
4799		case 0x26:	/* ES override */
4800			has_seg_override = true;
4801			ctxt->seg_override = VCPU_SREG_ES;
4802			break;
4803		case 0x2e:	/* CS override */
4804			has_seg_override = true;
4805			ctxt->seg_override = VCPU_SREG_CS;
4806			break;
4807		case 0x36:	/* SS override */
4808			has_seg_override = true;
4809			ctxt->seg_override = VCPU_SREG_SS;
4810			break;
4811		case 0x3e:	/* DS override */
4812			has_seg_override = true;
4813			ctxt->seg_override = VCPU_SREG_DS;
4814			break;
4815		case 0x64:	/* FS override */
4816			has_seg_override = true;
4817			ctxt->seg_override = VCPU_SREG_FS;
4818			break;
4819		case 0x65:	/* GS override */
4820			has_seg_override = true;
4821			ctxt->seg_override = VCPU_SREG_GS;
4822			break;
4823		case 0x40 ... 0x4f: /* REX */
4824			if (mode != X86EMUL_MODE_PROT64)
4825				goto done_prefixes;
4826			ctxt->rex_prefix = ctxt->b;
4827			continue;
4828		case 0xf0:	/* LOCK */
4829			ctxt->lock_prefix = 1;
4830			break;
4831		case 0xf2:	/* REPNE/REPNZ */
4832		case 0xf3:	/* REP/REPE/REPZ */
4833			ctxt->rep_prefix = ctxt->b;
4834			break;
4835		default:
4836			goto done_prefixes;
4837		}
4838
4839		/* Any legacy prefix after a REX prefix nullifies its effect. */
4840
4841		ctxt->rex_prefix = 0;
4842	}
4843
4844done_prefixes:
4845
4846	/* REX prefix. */
4847	if (ctxt->rex_prefix & 8)
4848		ctxt->op_bytes = 8;	/* REX.W */
4849
4850	/* Opcode byte(s). */
4851	opcode = opcode_table[ctxt->b];
4852	/* Two-byte opcode? */
4853	if (ctxt->b == 0x0f) {
4854		ctxt->opcode_len = 2;
4855		ctxt->b = insn_fetch(u8, ctxt);
4856		opcode = twobyte_table[ctxt->b];
4857
4858		/* 0F_38 opcode map */
4859		if (ctxt->b == 0x38) {
4860			ctxt->opcode_len = 3;
4861			ctxt->b = insn_fetch(u8, ctxt);
4862			opcode = opcode_map_0f_38[ctxt->b];
4863		}
4864	}
4865	ctxt->d = opcode.flags;
4866
4867	if (ctxt->d & ModRM)
4868		ctxt->modrm = insn_fetch(u8, ctxt);
4869
4870	/* vex-prefix instructions are not implemented */
4871	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4872	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4873		ctxt->d = NotImpl;
4874	}
4875
4876	while (ctxt->d & GroupMask) {
4877		switch (ctxt->d & GroupMask) {
4878		case Group:
 
 
4879			goffset = (ctxt->modrm >> 3) & 7;
4880			opcode = opcode.u.group[goffset];
4881			break;
4882		case GroupDual:
 
 
4883			goffset = (ctxt->modrm >> 3) & 7;
4884			if ((ctxt->modrm >> 6) == 3)
4885				opcode = opcode.u.gdual->mod3[goffset];
4886			else
4887				opcode = opcode.u.gdual->mod012[goffset];
4888			break;
4889		case RMExt:
4890			goffset = ctxt->modrm & 7;
4891			opcode = opcode.u.group[goffset];
4892			break;
4893		case Prefix:
4894			if (ctxt->rep_prefix && op_prefix)
4895				return EMULATION_FAILED;
4896			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4897			switch (simd_prefix) {
4898			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4899			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4900			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4901			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4902			}
4903			break;
4904		case Escape:
4905			if (ctxt->modrm > 0xbf) {
4906				size_t size = ARRAY_SIZE(opcode.u.esc->high);
4907				u32 index = array_index_nospec(
4908					ctxt->modrm - 0xc0, size);
4909
4910				opcode = opcode.u.esc->high[index];
4911			} else {
4912				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4913			}
4914			break;
4915		case InstrDual:
4916			if ((ctxt->modrm >> 6) == 3)
4917				opcode = opcode.u.idual->mod3;
4918			else
4919				opcode = opcode.u.idual->mod012;
4920			break;
4921		case ModeDual:
4922			if (ctxt->mode == X86EMUL_MODE_PROT64)
4923				opcode = opcode.u.mdual->mode64;
4924			else
4925				opcode = opcode.u.mdual->mode32;
4926			break;
4927		default:
4928			return EMULATION_FAILED;
4929		}
4930
4931		ctxt->d &= ~(u64)GroupMask;
4932		ctxt->d |= opcode.flags;
4933	}
4934
4935	ctxt->is_branch = opcode.flags & IsBranch;
 
 
4936
4937	/* Unrecognised? */
4938	if (ctxt->d == 0)
4939		return EMULATION_FAILED;
4940
4941	ctxt->execute = opcode.u.execute;
 
4942
4943	if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4944	    likely(!(ctxt->d & EmulateOnUD)))
4945		return EMULATION_FAILED;
4946
4947	if (unlikely(ctxt->d &
4948	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4949	     No16))) {
4950		/*
4951		 * These are copied unconditionally here, and checked unconditionally
4952		 * in x86_emulate_insn.
4953		 */
4954		ctxt->check_perm = opcode.check_perm;
4955		ctxt->intercept = opcode.intercept;
4956
4957		if (ctxt->d & NotImpl)
4958			return EMULATION_FAILED;
4959
4960		if (mode == X86EMUL_MODE_PROT64) {
4961			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4962				ctxt->op_bytes = 8;
4963			else if (ctxt->d & NearBranch)
4964				ctxt->op_bytes = 8;
4965		}
4966
4967		if (ctxt->d & Op3264) {
4968			if (mode == X86EMUL_MODE_PROT64)
4969				ctxt->op_bytes = 8;
4970			else
4971				ctxt->op_bytes = 4;
4972		}
4973
4974		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4975			ctxt->op_bytes = 4;
 
4976
4977		if (ctxt->d & Sse)
4978			ctxt->op_bytes = 16;
4979		else if (ctxt->d & Mmx)
4980			ctxt->op_bytes = 8;
4981	}
4982
4983	/* ModRM and SIB bytes. */
4984	if (ctxt->d & ModRM) {
4985		rc = decode_modrm(ctxt, &ctxt->memop);
4986		if (!has_seg_override) {
4987			has_seg_override = true;
4988			ctxt->seg_override = ctxt->modrm_seg;
4989		}
4990	} else if (ctxt->d & MemAbs)
4991		rc = decode_abs(ctxt, &ctxt->memop);
4992	if (rc != X86EMUL_CONTINUE)
4993		goto done;
4994
4995	if (!has_seg_override)
4996		ctxt->seg_override = VCPU_SREG_DS;
4997
4998	ctxt->memop.addr.mem.seg = ctxt->seg_override;
 
 
 
4999
5000	/*
5001	 * Decode and fetch the source operand: register, memory
5002	 * or immediate.
5003	 */
5004	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5005	if (rc != X86EMUL_CONTINUE)
5006		goto done;
5007
5008	/*
5009	 * Decode and fetch the second source operand: register, memory
5010	 * or immediate.
5011	 */
5012	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5013	if (rc != X86EMUL_CONTINUE)
5014		goto done;
5015
5016	/* Decode and fetch the destination operand: register or memory. */
5017	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5018
5019	if (ctxt->rip_relative && likely(ctxt->memopp))
5020		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5021					ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5022
5023done:
5024	if (rc == X86EMUL_PROPAGATE_FAULT)
5025		ctxt->have_exception = true;
5026	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5027}
5028
5029bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5030{
5031	return ctxt->d & PageTable;
5032}
5033
5034static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5035{
5036	/* The second termination condition only applies for REPE
5037	 * and REPNE. Test if the repeat string operation prefix is
5038	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5039	 * corresponding termination condition according to:
5040	 * 	- if REPE/REPZ and ZF = 0 then done
5041	 * 	- if REPNE/REPNZ and ZF = 1 then done
5042	 */
5043	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5044	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5045	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5046		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5047		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5048		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5049		return true;
5050
5051	return false;
5052}
5053
5054static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5055{
5056	int rc;
5057
5058	kvm_fpu_get();
5059	rc = asm_safe("fwait");
5060	kvm_fpu_put();
5061
5062	if (unlikely(rc != X86EMUL_CONTINUE))
5063		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5064
5065	return X86EMUL_CONTINUE;
5066}
5067
5068static void fetch_possible_mmx_operand(struct operand *op)
5069{
5070	if (op->type == OP_MM)
5071		kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5072}
5073
5074static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5075{
5076	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5077
5078	if (!(ctxt->d & ByteOp))
5079		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5080
5081	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5082	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5083	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5084	    : "c"(ctxt->src2.val));
5085
5086	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5087	if (!fop) /* exception is returned in fop variable */
5088		return emulate_de(ctxt);
5089	return X86EMUL_CONTINUE;
5090}
5091
5092void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5093{
5094	/* Clear fields that are set conditionally but read without a guard. */
5095	ctxt->rip_relative = false;
5096	ctxt->rex_prefix = 0;
5097	ctxt->lock_prefix = 0;
5098	ctxt->rep_prefix = 0;
5099	ctxt->regs_valid = 0;
5100	ctxt->regs_dirty = 0;
5101
5102	ctxt->io_read.pos = 0;
5103	ctxt->io_read.end = 0;
5104	ctxt->mem_read.end = 0;
5105}
5106
5107int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5108{
5109	const struct x86_emulate_ops *ops = ctxt->ops;
 
5110	int rc = X86EMUL_CONTINUE;
5111	int saved_dst_type = ctxt->dst.type;
5112	bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
5113
5114	ctxt->mem_read.pos = 0;
5115
 
 
 
 
 
5116	/* LOCK prefix is allowed only with some instructions */
5117	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5118		rc = emulate_ud(ctxt);
5119		goto done;
5120	}
5121
5122	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5123		rc = emulate_ud(ctxt);
5124		goto done;
5125	}
5126
5127	if (unlikely(ctxt->d &
5128		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5129		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5130				(ctxt->d & Undefined)) {
5131			rc = emulate_ud(ctxt);
5132			goto done;
5133		}
5134
5135		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5136		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5137			rc = emulate_ud(ctxt);
5138			goto done;
5139		}
5140
5141		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5142			rc = emulate_nm(ctxt);
 
 
5143			goto done;
5144		}
5145
5146		if (ctxt->d & Mmx) {
5147			rc = flush_pending_x87_faults(ctxt);
5148			if (rc != X86EMUL_CONTINUE)
5149				goto done;
5150			/*
5151			 * Now that we know the fpu is exception safe, we can fetch
5152			 * operands from it.
5153			 */
5154			fetch_possible_mmx_operand(&ctxt->src);
5155			fetch_possible_mmx_operand(&ctxt->src2);
5156			if (!(ctxt->d & Mov))
5157				fetch_possible_mmx_operand(&ctxt->dst);
5158		}
5159
5160		if (unlikely(is_guest_mode) && ctxt->intercept) {
5161			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5162						      X86_ICPT_PRE_EXCEPT);
5163			if (rc != X86EMUL_CONTINUE)
5164				goto done;
5165		}
5166
5167		/* Instruction can only be executed in protected mode */
5168		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5169			rc = emulate_ud(ctxt);
 
5170			goto done;
5171		}
5172
5173		/* Privileged instruction can be executed only in CPL=0 */
5174		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5175			if (ctxt->d & PrivUD)
5176				rc = emulate_ud(ctxt);
5177			else
5178				rc = emulate_gp(ctxt, 0);
5179			goto done;
5180		}
5181
5182		/* Do instruction specific permission checks */
5183		if (ctxt->d & CheckPerm) {
5184			rc = ctxt->check_perm(ctxt);
5185			if (rc != X86EMUL_CONTINUE)
5186				goto done;
5187		}
5188
5189		if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5190			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5191						      X86_ICPT_POST_EXCEPT);
5192			if (rc != X86EMUL_CONTINUE)
5193				goto done;
5194		}
5195
5196		if (ctxt->rep_prefix && (ctxt->d & String)) {
5197			/* All REP prefixes have the same first termination condition */
5198			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5199				string_registers_quirk(ctxt);
5200				ctxt->eip = ctxt->_eip;
5201				ctxt->eflags &= ~X86_EFLAGS_RF;
5202				goto done;
5203			}
5204		}
5205	}
5206
5207	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5208		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5209				    ctxt->src.valptr, ctxt->src.bytes);
5210		if (rc != X86EMUL_CONTINUE)
5211			goto done;
5212		ctxt->src.orig_val64 = ctxt->src.val64;
5213	}
5214
5215	if (ctxt->src2.type == OP_MEM) {
5216		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5217				    &ctxt->src2.val, ctxt->src2.bytes);
5218		if (rc != X86EMUL_CONTINUE)
5219			goto done;
5220	}
5221
5222	if ((ctxt->d & DstMask) == ImplicitOps)
5223		goto special_insn;
5224
5225
5226	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5227		/* optimisation - avoid slow emulated read if Mov */
5228		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5229				   &ctxt->dst.val, ctxt->dst.bytes);
5230		if (rc != X86EMUL_CONTINUE) {
5231			if (!(ctxt->d & NoWrite) &&
5232			    rc == X86EMUL_PROPAGATE_FAULT &&
5233			    ctxt->exception.vector == PF_VECTOR)
5234				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5235			goto done;
5236		}
5237	}
5238	/* Copy full 64-bit value for CMPXCHG8B.  */
5239	ctxt->dst.orig_val64 = ctxt->dst.val64;
5240
5241special_insn:
5242
5243	if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5244		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5245					      X86_ICPT_POST_MEMACCESS);
5246		if (rc != X86EMUL_CONTINUE)
5247			goto done;
5248	}
5249
5250	if (ctxt->rep_prefix && (ctxt->d & String))
5251		ctxt->eflags |= X86_EFLAGS_RF;
5252	else
5253		ctxt->eflags &= ~X86_EFLAGS_RF;
5254
5255	if (ctxt->execute) {
5256		if (ctxt->d & Fastop)
5257			rc = fastop(ctxt, ctxt->fop);
5258		else
5259			rc = ctxt->execute(ctxt);
5260		if (rc != X86EMUL_CONTINUE)
5261			goto done;
5262		goto writeback;
5263	}
5264
5265	if (ctxt->opcode_len == 2)
5266		goto twobyte_insn;
5267	else if (ctxt->opcode_len == 3)
5268		goto threebyte_insn;
5269
5270	switch (ctxt->b) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5271	case 0x70 ... 0x7f: /* jcc (short) */
5272		if (test_cc(ctxt->b, ctxt->eflags))
5273			rc = jmp_rel(ctxt, ctxt->src.val);
5274		break;
5275	case 0x8d: /* lea r16/r32, m */
5276		ctxt->dst.val = ctxt->src.addr.mem.ea;
5277		break;
 
 
 
5278	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5279		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5280			ctxt->dst.type = OP_NONE;
5281		else
5282			rc = em_xchg(ctxt);
5283		break;
5284	case 0x98: /* cbw/cwde/cdqe */
5285		switch (ctxt->op_bytes) {
5286		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5287		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5288		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5289		}
5290		break;
 
 
 
 
 
 
 
 
 
5291	case 0xcc:		/* int3 */
5292		rc = emulate_int(ctxt, 3);
5293		break;
5294	case 0xcd:		/* int n */
5295		rc = emulate_int(ctxt, ctxt->src.val);
5296		break;
5297	case 0xce:		/* into */
5298		if (ctxt->eflags & X86_EFLAGS_OF)
5299			rc = emulate_int(ctxt, 4);
5300		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5301	case 0xe9: /* jmp rel */
5302	case 0xeb: /* jmp rel short */
5303		rc = jmp_rel(ctxt, ctxt->src.val);
5304		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5305		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5306	case 0xf4:              /* hlt */
5307		ctxt->ops->halt(ctxt);
5308		break;
5309	case 0xf5:	/* cmc */
5310		/* complement carry flag from eflags reg */
5311		ctxt->eflags ^= X86_EFLAGS_CF;
 
 
 
5312		break;
5313	case 0xf8: /* clc */
5314		ctxt->eflags &= ~X86_EFLAGS_CF;
5315		break;
5316	case 0xf9: /* stc */
5317		ctxt->eflags |= X86_EFLAGS_CF;
5318		break;
5319	case 0xfc: /* cld */
5320		ctxt->eflags &= ~X86_EFLAGS_DF;
5321		break;
5322	case 0xfd: /* std */
5323		ctxt->eflags |= X86_EFLAGS_DF;
 
 
 
 
 
 
5324		break;
5325	default:
5326		goto cannot_emulate;
5327	}
5328
5329	if (rc != X86EMUL_CONTINUE)
5330		goto done;
5331
5332writeback:
5333	if (ctxt->d & SrcWrite) {
5334		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5335		rc = writeback(ctxt, &ctxt->src);
5336		if (rc != X86EMUL_CONTINUE)
5337			goto done;
5338	}
5339	if (!(ctxt->d & NoWrite)) {
5340		rc = writeback(ctxt, &ctxt->dst);
5341		if (rc != X86EMUL_CONTINUE)
5342			goto done;
5343	}
5344
5345	/*
5346	 * restore dst type in case the decoding will be reused
5347	 * (happens for string instruction )
5348	 */
5349	ctxt->dst.type = saved_dst_type;
5350
5351	if ((ctxt->d & SrcMask) == SrcSI)
5352		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
 
5353
5354	if ((ctxt->d & DstMask) == DstDI)
5355		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
 
5356
5357	if (ctxt->rep_prefix && (ctxt->d & String)) {
5358		unsigned int count;
5359		struct read_cache *r = &ctxt->io_read;
5360		if ((ctxt->d & SrcMask) == SrcSI)
5361			count = ctxt->src.count;
5362		else
5363			count = ctxt->dst.count;
5364		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5365
5366		if (!string_insn_completed(ctxt)) {
5367			/*
5368			 * Re-enter guest when pio read ahead buffer is empty
5369			 * or, if it is not used, after each 1024 iteration.
5370			 */
5371			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5372			    (r->end == 0 || r->end != r->pos)) {
5373				/*
5374				 * Reset read cache. Usually happens before
5375				 * decode, but since instruction is restarted
5376				 * we have to do it here.
5377				 */
5378				ctxt->mem_read.end = 0;
5379				writeback_registers(ctxt);
5380				return EMULATION_RESTART;
5381			}
5382			goto done; /* skip rip writeback */
5383		}
5384		ctxt->eflags &= ~X86_EFLAGS_RF;
5385	}
5386
5387	ctxt->eip = ctxt->_eip;
5388	if (ctxt->mode != X86EMUL_MODE_PROT64)
5389		ctxt->eip = (u32)ctxt->_eip;
5390
5391done:
5392	if (rc == X86EMUL_PROPAGATE_FAULT) {
5393		if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5394			return EMULATION_FAILED;
5395		ctxt->have_exception = true;
5396	}
5397	if (rc == X86EMUL_INTERCEPTED)
5398		return EMULATION_INTERCEPTED;
5399
5400	if (rc == X86EMUL_CONTINUE)
5401		writeback_registers(ctxt);
5402
5403	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5404
5405twobyte_insn:
5406	switch (ctxt->b) {
5407	case 0x09:		/* wbinvd */
5408		(ctxt->ops->wbinvd)(ctxt);
5409		break;
5410	case 0x08:		/* invd */
5411	case 0x0d:		/* GrpP (prefetch) */
5412	case 0x18:		/* Grp16 (prefetch/nop) */
5413	case 0x1f:		/* nop */
5414		break;
5415	case 0x20: /* mov cr, reg */
5416		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5417		break;
5418	case 0x21: /* mov from dr to reg */
5419		ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5420		break;
5421	case 0x40 ... 0x4f:	/* cmov */
5422		if (test_cc(ctxt->b, ctxt->eflags))
5423			ctxt->dst.val = ctxt->src.val;
5424		else if (ctxt->op_bytes != 4)
5425			ctxt->dst.type = OP_NONE; /* no writeback */
5426		break;
5427	case 0x80 ... 0x8f: /* jnz rel, etc*/
5428		if (test_cc(ctxt->b, ctxt->eflags))
5429			rc = jmp_rel(ctxt, ctxt->src.val);
5430		break;
5431	case 0x90 ... 0x9f:     /* setcc r/m8 */
5432		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5433		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5434	case 0xb6 ... 0xb7:	/* movzx */
5435		ctxt->dst.bytes = ctxt->op_bytes;
5436		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5437						       : (u16) ctxt->src.val;
5438		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5439	case 0xbe ... 0xbf:	/* movsx */
5440		ctxt->dst.bytes = ctxt->op_bytes;
5441		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5442							(s16) ctxt->src.val;
5443		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5444	default:
5445		goto cannot_emulate;
5446	}
5447
5448threebyte_insn:
5449
5450	if (rc != X86EMUL_CONTINUE)
5451		goto done;
5452
5453	goto writeback;
5454
5455cannot_emulate:
5456	return EMULATION_FAILED;
5457}
5458
5459void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5460{
5461	invalidate_registers(ctxt);
5462}
5463
5464void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5465{
5466	writeback_registers(ctxt);
5467}
5468
5469bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5470{
5471	if (ctxt->rep_prefix && (ctxt->d & String))
5472		return false;
5473
5474	if (ctxt->d & TwoMemOp)
5475		return false;
5476
5477	return true;
5478}