Linux Audio

Check our new training course

Loading...
v3.1
 
   1/******************************************************************************
   2 * emulate.c
   3 *
   4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   5 *
   6 * Copyright (c) 2005 Keir Fraser
   7 *
   8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
   9 * privileged instructions:
  10 *
  11 * Copyright (C) 2006 Qumranet
  12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13 *
  14 *   Avi Kivity <avi@qumranet.com>
  15 *   Yaniv Kamay <yaniv@qumranet.com>
  16 *
  17 * This work is licensed under the terms of the GNU GPL, version 2.  See
  18 * the COPYING file in the top-level directory.
  19 *
  20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21 */
 
  22
  23#include <linux/kvm_host.h>
  24#include "kvm_cache_regs.h"
  25#include <linux/module.h>
  26#include <asm/kvm_emulate.h>
 
 
 
  27
  28#include "x86.h"
  29#include "tss.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  30
  31/*
  32 * Opcode effective-address decode tables.
  33 * Note that we only emulate instructions that have at least one memory
  34 * operand (excluding implicit stack references). We assume that stack
  35 * references and instruction fetches will never occur in special memory
  36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  37 * not be handled.
  38 */
  39
  40/* Operand sizes: 8-bit operands or specified/overridden size. */
  41#define ByteOp      (1<<0)	/* 8-bit operands. */
  42/* Destination operand type. */
  43#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
  44#define DstReg      (2<<1)	/* Register operand. */
  45#define DstMem      (3<<1)	/* Memory operand. */
  46#define DstAcc      (4<<1)	/* Destination Accumulator */
  47#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
  48#define DstMem64    (6<<1)	/* 64bit memory operand */
  49#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
  50#define DstDX       (8<<1)	/* Destination is in DX register */
  51#define DstMask     (0xf<<1)
 
 
 
  52/* Source operand type. */
  53#define SrcNone     (0<<5)	/* No source operand. */
  54#define SrcReg      (1<<5)	/* Register operand. */
  55#define SrcMem      (2<<5)	/* Memory operand. */
  56#define SrcMem16    (3<<5)	/* Memory operand (16-bit). */
  57#define SrcMem32    (4<<5)	/* Memory operand (32-bit). */
  58#define SrcImm      (5<<5)	/* Immediate operand. */
  59#define SrcImmByte  (6<<5)	/* 8-bit sign-extended immediate operand. */
  60#define SrcOne      (7<<5)	/* Implied '1' */
  61#define SrcImmUByte (8<<5)      /* 8-bit unsigned immediate operand. */
  62#define SrcImmU     (9<<5)      /* Immediate operand, unsigned */
  63#define SrcSI       (0xa<<5)	/* Source is in the DS:RSI */
  64#define SrcImmFAddr (0xb<<5)	/* Source is immediate far address */
  65#define SrcMemFAddr (0xc<<5)	/* Source is far address in memory */
  66#define SrcAcc      (0xd<<5)	/* Source Accumulator */
  67#define SrcImmU16   (0xe<<5)    /* Immediate operand, unsigned, 16 bits */
  68#define SrcDX       (0xf<<5)	/* Source is in DX register */
  69#define SrcMask     (0xf<<5)
  70/* Generic ModRM decode. */
  71#define ModRM       (1<<9)
  72/* Destination is only written; never read. */
  73#define Mov         (1<<10)
 
  74#define BitOp       (1<<11)
  75#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
  76#define String      (1<<13)     /* String instruction (rep capable) */
  77#define Stack       (1<<14)     /* Stack instruction (push/pop) */
  78#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
  79#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
  80#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
  81#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
  82#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 
 
 
  83#define Sse         (1<<18)     /* SSE Vector instruction */
 
 
 
 
  84/* Misc flags */
  85#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
  86#define VendorSpecific (1<<22) /* Vendor specific instruction */
  87#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
  88#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
  89#define Undefined   (1<<25) /* No Such Instruction */
  90#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
  91#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
  92#define No64	    (1<<28)
 
 
  93/* Source 2 operand type */
  94#define Src2None    (0<<29)
  95#define Src2CL      (1<<29)
  96#define Src2ImmByte (2<<29)
  97#define Src2One     (3<<29)
  98#define Src2Imm     (4<<29)
  99#define Src2Mask    (7<<29)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100
 101#define X2(x...) x, x
 102#define X3(x...) X2(x), x
 103#define X4(x...) X2(x), X2(x)
 104#define X5(x...) X4(x), x
 105#define X6(x...) X4(x), X2(x)
 106#define X7(x...) X4(x), X3(x)
 107#define X8(x...) X4(x), X4(x)
 108#define X16(x...) X8(x), X8(x)
 109
 110struct opcode {
 111	u32 flags;
 112	u8 intercept;
 
 113	union {
 114		int (*execute)(struct x86_emulate_ctxt *ctxt);
 115		struct opcode *group;
 116		struct group_dual *gdual;
 117		struct gprefix *gprefix;
 
 
 
 
 118	} u;
 119	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 120};
 121
 122struct group_dual {
 123	struct opcode mod012[8];
 124	struct opcode mod3[8];
 125};
 126
 127struct gprefix {
 128	struct opcode pfx_no;
 129	struct opcode pfx_66;
 130	struct opcode pfx_f2;
 131	struct opcode pfx_f3;
 132};
 133
 134/* EFLAGS bit definitions. */
 135#define EFLG_ID (1<<21)
 136#define EFLG_VIP (1<<20)
 137#define EFLG_VIF (1<<19)
 138#define EFLG_AC (1<<18)
 139#define EFLG_VM (1<<17)
 140#define EFLG_RF (1<<16)
 141#define EFLG_IOPL (3<<12)
 142#define EFLG_NT (1<<14)
 143#define EFLG_OF (1<<11)
 144#define EFLG_DF (1<<10)
 145#define EFLG_IF (1<<9)
 146#define EFLG_TF (1<<8)
 147#define EFLG_SF (1<<7)
 148#define EFLG_ZF (1<<6)
 149#define EFLG_AF (1<<4)
 150#define EFLG_PF (1<<2)
 151#define EFLG_CF (1<<0)
 152
 153#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 154#define EFLG_RESERVED_ONE_MASK 2
 155
 156/*
 157 * Instruction emulation:
 158 * Most instructions are emulated directly via a fragment of inline assembly
 159 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 160 * any modified flags.
 161 */
 162
 163#if defined(CONFIG_X86_64)
 164#define _LO32 "k"		/* force 32-bit operand */
 165#define _STK  "%%rsp"		/* stack pointer */
 166#elif defined(__i386__)
 167#define _LO32 ""		/* force 32-bit operand */
 168#define _STK  "%%esp"		/* stack pointer */
 169#endif
 
 
 
 
 
 
 
 170
 171/*
 172 * These EFLAGS bits are restored from saved value during emulation, and
 173 * any changes are written back to the saved value after emulation.
 174 */
 175#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
 176
 177/* Before executing instruction: restore necessary bits in EFLAGS. */
 178#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
 179	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
 180	"movl %"_sav",%"_LO32 _tmp"; "                                  \
 181	"push %"_tmp"; "                                                \
 182	"push %"_tmp"; "                                                \
 183	"movl %"_msk",%"_LO32 _tmp"; "                                  \
 184	"andl %"_LO32 _tmp",("_STK"); "                                 \
 185	"pushf; "                                                       \
 186	"notl %"_LO32 _tmp"; "                                          \
 187	"andl %"_LO32 _tmp",("_STK"); "                                 \
 188	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
 189	"pop  %"_tmp"; "                                                \
 190	"orl  %"_LO32 _tmp",("_STK"); "                                 \
 191	"popf; "                                                        \
 192	"pop  %"_sav"; "
 193
 194/* After executing instruction: write-back necessary bits in EFLAGS. */
 195#define _POST_EFLAGS(_sav, _msk, _tmp) \
 196	/* _sav |= EFLAGS & _msk; */		\
 197	"pushf; "				\
 198	"pop  %"_tmp"; "			\
 199	"andl %"_msk",%"_LO32 _tmp"; "		\
 200	"orl  %"_LO32 _tmp",%"_sav"; "
 201
 202#ifdef CONFIG_X86_64
 203#define ON64(x) x
 204#else
 205#define ON64(x)
 206#endif
 207
 208#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
 209	do {								\
 210		__asm__ __volatile__ (					\
 211			_PRE_EFLAGS("0", "4", "2")			\
 212			_op _suffix " %"_x"3,%1; "			\
 213			_POST_EFLAGS("0", "4", "2")			\
 214			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
 215			  "=&r" (_tmp)					\
 216			: _y ((_src).val), "i" (EFLAGS_MASK));		\
 217	} while (0)
 218
 219
 220/* Raw emulation: instruction has two explicit operands. */
 221#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
 222	do {								\
 223		unsigned long _tmp;					\
 224									\
 225		switch ((_dst).bytes) {					\
 226		case 2:							\
 227			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
 228			break;						\
 229		case 4:							\
 230			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
 231			break;						\
 232		case 8:							\
 233			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
 234			break;						\
 235		}							\
 236	} while (0)
 237
 238#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
 239	do {								     \
 240		unsigned long _tmp;					     \
 241		switch ((_dst).bytes) {				             \
 242		case 1:							     \
 243			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
 244			break;						     \
 245		default:						     \
 246			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
 247					     _wx, _wy, _lx, _ly, _qx, _qy);  \
 248			break;						     \
 249		}							     \
 250	} while (0)
 251
 252/* Source operand is byte-sized and may be restricted to just %cl. */
 253#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
 254	__emulate_2op(_op, _src, _dst, _eflags,				\
 255		      "b", "c", "b", "c", "b", "c", "b", "c")
 256
 257/* Source operand is byte, word, long or quad sized. */
 258#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
 259	__emulate_2op(_op, _src, _dst, _eflags,				\
 260		      "b", "q", "w", "r", _LO32, "r", "", "r")
 261
 262/* Source operand is word, long or quad sized. */
 263#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
 264	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
 265			     "w", "r", _LO32, "r", "", "r")
 266
 267/* Instruction has three operands and one operand is stored in ECX register */
 268#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type)	\
 269	do {								\
 270		unsigned long _tmp;					\
 271		_type _clv  = (_cl).val;				\
 272		_type _srcv = (_src).val;				\
 273		_type _dstv = (_dst).val;				\
 274									\
 275		__asm__ __volatile__ (					\
 276			_PRE_EFLAGS("0", "5", "2")			\
 277			_op _suffix " %4,%1 \n"				\
 278			_POST_EFLAGS("0", "5", "2")			\
 279			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)	\
 280			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
 281			);						\
 282									\
 283		(_cl).val  = (unsigned long) _clv;			\
 284		(_src).val = (unsigned long) _srcv;			\
 285		(_dst).val = (unsigned long) _dstv;			\
 286	} while (0)
 287
 288#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)			\
 289	do {								\
 290		switch ((_dst).bytes) {					\
 291		case 2:							\
 292			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
 293					 "w", unsigned short);         	\
 294			break;						\
 295		case 4:							\
 296			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
 297					 "l", unsigned int);           	\
 298			break;						\
 299		case 8:							\
 300			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
 301					      "q", unsigned long));	\
 302			break;						\
 303		}							\
 304	} while (0)
 305
 306#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
 307	do {								\
 308		unsigned long _tmp;					\
 309									\
 310		__asm__ __volatile__ (					\
 311			_PRE_EFLAGS("0", "3", "2")			\
 312			_op _suffix " %1; "				\
 313			_POST_EFLAGS("0", "3", "2")			\
 314			: "=m" (_eflags), "+m" ((_dst).val),		\
 315			  "=&r" (_tmp)					\
 316			: "i" (EFLAGS_MASK));				\
 317	} while (0)
 318
 319/* Instruction has only one explicit operand (no source operand). */
 320#define emulate_1op(_op, _dst, _eflags)                                    \
 321	do {								\
 322		switch ((_dst).bytes) {				        \
 323		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
 324		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
 325		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
 326		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
 327		}							\
 328	} while (0)
 329
 330#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
 331	do {								\
 332		unsigned long _tmp;					\
 333									\
 334		__asm__ __volatile__ (					\
 335			_PRE_EFLAGS("0", "4", "1")			\
 336			_op _suffix " %5; "				\
 337			_POST_EFLAGS("0", "4", "1")			\
 338			: "=m" (_eflags), "=&r" (_tmp),			\
 339			  "+a" (_rax), "+d" (_rdx)			\
 340			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
 341			  "a" (_rax), "d" (_rdx));			\
 342	} while (0)
 343
 344#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
 345	do {								\
 346		unsigned long _tmp;					\
 347									\
 348		__asm__ __volatile__ (					\
 349			_PRE_EFLAGS("0", "5", "1")			\
 350			"1: \n\t"					\
 351			_op _suffix " %6; "				\
 352			"2: \n\t"					\
 353			_POST_EFLAGS("0", "5", "1")			\
 354			".pushsection .fixup,\"ax\" \n\t"		\
 355			"3: movb $1, %4 \n\t"				\
 356			"jmp 2b \n\t"					\
 357			".popsection \n\t"				\
 358			_ASM_EXTABLE(1b, 3b)				\
 359			: "=m" (_eflags), "=&r" (_tmp),			\
 360			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
 361			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
 362			  "a" (_rax), "d" (_rdx));			\
 363	} while (0)
 364
 365/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
 366#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)		\
 367	do {								\
 368		switch((_src).bytes) {					\
 369		case 1:							\
 370			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 371					      _eflags, "b");		\
 372			break;						\
 373		case 2:							\
 374			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 375					      _eflags, "w");		\
 376			break;						\
 377		case 4:							\
 378			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
 379					      _eflags, "l");		\
 380			break;						\
 381		case 8:							\
 382			ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
 383						   _eflags, "q"));	\
 384			break;						\
 385		}							\
 386	} while (0)
 387
 388#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
 389	do {								\
 390		switch((_src).bytes) {					\
 391		case 1:							\
 392			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
 393						 _eflags, "b", _ex);	\
 394			break;						\
 395		case 2:							\
 396			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 397						 _eflags, "w", _ex);	\
 398			break;						\
 399		case 4:							\
 400			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 401						 _eflags, "l", _ex);	\
 402			break;						\
 403		case 8: ON64(						\
 404			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
 405						 _eflags, "q", _ex));	\
 406			break;						\
 407		}							\
 408	} while (0)
 409
 410static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 411				    enum x86_intercept intercept,
 412				    enum x86_intercept_stage stage)
 413{
 414	struct x86_instruction_info info = {
 415		.intercept  = intercept,
 416		.rep_prefix = ctxt->rep_prefix,
 417		.modrm_mod  = ctxt->modrm_mod,
 418		.modrm_reg  = ctxt->modrm_reg,
 419		.modrm_rm   = ctxt->modrm_rm,
 420		.src_val    = ctxt->src.val64,
 
 421		.src_bytes  = ctxt->src.bytes,
 422		.dst_bytes  = ctxt->dst.bytes,
 423		.ad_bytes   = ctxt->ad_bytes,
 424		.next_rip   = ctxt->eip,
 425	};
 426
 427	return ctxt->ops->intercept(ctxt, &info, stage);
 428}
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 431{
 432	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 433}
 434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435/* Access/update address held in a register, based on addressing mode. */
 436static inline unsigned long
 437address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 438{
 439	if (ctxt->ad_bytes == sizeof(unsigned long))
 440		return reg;
 441	else
 442		return reg & ad_mask(ctxt);
 443}
 444
 445static inline unsigned long
 446register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 447{
 448	return address_mask(ctxt, reg);
 
 
 
 
 
 449}
 450
 451static inline void
 452register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
 453{
 454	if (ctxt->ad_bytes == sizeof(unsigned long))
 455		*reg += inc;
 456	else
 457		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
 458}
 459
 460static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 461{
 462	register_address_increment(ctxt, &ctxt->_eip, rel);
 463}
 464
 465static u32 desc_limit_scaled(struct desc_struct *desc)
 466{
 467	u32 limit = get_desc_limit(desc);
 468
 469	return desc->g ? (limit << 12) | 0xfff : limit;
 470}
 471
 472static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
 473{
 474	ctxt->has_seg_override = true;
 475	ctxt->seg_override = seg;
 476}
 477
 478static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 479{
 480	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 481		return 0;
 482
 483	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 484}
 485
 486static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
 487{
 488	if (!ctxt->has_seg_override)
 489		return 0;
 490
 491	return ctxt->seg_override;
 492}
 493
 494static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 495			     u32 error, bool valid)
 496{
 
 
 
 497	ctxt->exception.vector = vec;
 498	ctxt->exception.error_code = error;
 499	ctxt->exception.error_code_valid = valid;
 500	return X86EMUL_PROPAGATE_FAULT;
 501}
 502
 503static int emulate_db(struct x86_emulate_ctxt *ctxt)
 504{
 505	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 506}
 507
 508static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 509{
 510	return emulate_exception(ctxt, GP_VECTOR, err, true);
 511}
 512
 513static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 514{
 515	return emulate_exception(ctxt, SS_VECTOR, err, true);
 516}
 517
 518static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 519{
 520	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 521}
 522
 523static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 524{
 525	return emulate_exception(ctxt, TS_VECTOR, err, true);
 526}
 527
 528static int emulate_de(struct x86_emulate_ctxt *ctxt)
 529{
 530	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 531}
 532
 533static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 534{
 535	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 536}
 537
 538static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 539{
 540	u16 selector;
 541	struct desc_struct desc;
 542
 543	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 544	return selector;
 545}
 546
 547static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 548				 unsigned seg)
 549{
 550	u16 dummy;
 551	u32 base3;
 552	struct desc_struct desc;
 553
 554	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 555	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 556}
 557
 558static int __linearize(struct x86_emulate_ctxt *ctxt,
 559		     struct segmented_address addr,
 560		     unsigned size, bool write, bool fetch,
 561		     ulong *linear)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562{
 563	struct desc_struct desc;
 564	bool usable;
 565	ulong la;
 566	u32 lim;
 567	u16 sel;
 568	unsigned cpl, rpl;
 569
 570	la = seg_base(ctxt, addr.seg) + addr.ea;
 571	switch (ctxt->mode) {
 572	case X86EMUL_MODE_REAL:
 573		break;
 574	case X86EMUL_MODE_PROT64:
 575		if (((signed long)la << 16) >> 16 != la)
 576			return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 577		break;
 578	default:
 
 579		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 580						addr.seg);
 581		if (!usable)
 582			goto bad;
 583		/* code segment or read-only data segment */
 584		if (((desc.type & 8) || !(desc.type & 2)) && write)
 
 585			goto bad;
 586		/* unreadable code segment */
 587		if (!fetch && (desc.type & 8) && !(desc.type & 2))
 588			goto bad;
 589		lim = desc_limit_scaled(&desc);
 590		if ((desc.type & 8) || !(desc.type & 4)) {
 591			/* expand-up segment */
 592			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
 593				goto bad;
 594		} else {
 595			/* exapand-down segment */
 596			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
 597				goto bad;
 598			lim = desc.d ? 0xffffffff : 0xffff;
 599			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
 600				goto bad;
 601		}
 602		cpl = ctxt->ops->cpl(ctxt);
 603		rpl = sel & 3;
 604		cpl = max(cpl, rpl);
 605		if (!(desc.type & 8)) {
 606			/* data segment */
 607			if (cpl > desc.dpl)
 608				goto bad;
 609		} else if ((desc.type & 8) && !(desc.type & 4)) {
 610			/* nonconforming code segment */
 611			if (cpl != desc.dpl)
 612				goto bad;
 613		} else if ((desc.type & 8) && (desc.type & 4)) {
 614			/* conforming code segment */
 615			if (cpl < desc.dpl)
 616				goto bad;
 617		}
 618		break;
 619	}
 620	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
 621		la &= (u32)-1;
 622	*linear = la;
 623	return X86EMUL_CONTINUE;
 624bad:
 625	if (addr.seg == VCPU_SREG_SS)
 626		return emulate_ss(ctxt, addr.seg);
 627	else
 628		return emulate_gp(ctxt, addr.seg);
 629}
 630
 631static int linearize(struct x86_emulate_ctxt *ctxt,
 632		     struct segmented_address addr,
 633		     unsigned size, bool write,
 634		     ulong *linear)
 635{
 636	return __linearize(ctxt, addr, size, write, false, linear);
 
 
 637}
 638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 639
 640static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 641			      struct segmented_address addr,
 642			      void *data,
 643			      unsigned size)
 644{
 645	int rc;
 646	ulong linear;
 647
 648	rc = linearize(ctxt, addr, size, false, &linear);
 649	if (rc != X86EMUL_CONTINUE)
 650		return rc;
 651	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
 652}
 653
 654static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
 655			      unsigned long eip, u8 *dest)
 
 
 656{
 657	struct fetch_cache *fc = &ctxt->fetch;
 658	int rc;
 659	int size, cur_size;
 660
 661	if (eip == fc->end) {
 662		unsigned long linear;
 663		struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
 664		cur_size = fc->end - fc->start;
 665		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
 666		rc = __linearize(ctxt, addr, size, false, true, &linear);
 667		if (rc != X86EMUL_CONTINUE)
 668			return rc;
 669		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
 670				      size, &ctxt->exception);
 671		if (rc != X86EMUL_CONTINUE)
 672			return rc;
 673		fc->end += size;
 674	}
 675	*dest = fc->data[eip - fc->start];
 676	return X86EMUL_CONTINUE;
 677}
 678
 679static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
 680			 unsigned long eip, void *dest, unsigned size)
 
 
 
 681{
 682	int rc;
 
 
 
 
 
 683
 684	/* x86 instructions are limited to 15 bytes. */
 685	if (eip + size - ctxt->eip > 15)
 686		return X86EMUL_UNHANDLEABLE;
 687	while (size--) {
 688		rc = do_insn_fetch_byte(ctxt, eip++, dest++);
 689		if (rc != X86EMUL_CONTINUE)
 690			return rc;
 691	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692	return X86EMUL_CONTINUE;
 693}
 694
 
 
 
 
 
 
 
 
 
 
 
 695/* Fetch next part of the instruction being emulated. */
 696#define insn_fetch(_type, _size, _eip)					\
 697({	unsigned long _x;						\
 698	rc = do_insn_fetch(ctxt, (_eip), &_x, (_size));			\
 
 699	if (rc != X86EMUL_CONTINUE)					\
 700		goto done;						\
 701	(_eip) += (_size);						\
 702	(_type)_x;							\
 
 
 703})
 704
 705#define insn_fetch_arr(_arr, _size, _eip)				\
 706({	rc = do_insn_fetch(ctxt, (_eip), _arr, (_size));		\
 
 707	if (rc != X86EMUL_CONTINUE)					\
 708		goto done;						\
 709	(_eip) += (_size);						\
 
 
 710})
 711
 712/*
 713 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 714 * pointer into the block that addresses the relevant register.
 715 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 716 */
 717static void *decode_register(u8 modrm_reg, unsigned long *regs,
 718			     int highbyte_regs)
 719{
 720	void *p;
 
 721
 722	p = &regs[modrm_reg];
 723	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 724		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
 
 
 725	return p;
 726}
 727
 728static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 729			   struct segmented_address addr,
 730			   u16 *size, unsigned long *address, int op_bytes)
 731{
 732	int rc;
 733
 734	if (op_bytes == 2)
 735		op_bytes = 3;
 736	*address = 0;
 737	rc = segmented_read_std(ctxt, addr, size, 2);
 738	if (rc != X86EMUL_CONTINUE)
 739		return rc;
 740	addr.ea += 2;
 741	rc = segmented_read_std(ctxt, addr, address, op_bytes);
 742	return rc;
 743}
 744
 745static int test_cc(unsigned int condition, unsigned int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 746{
 747	int rc = 0;
 
 
 
 
 748
 749	switch ((condition & 15) >> 1) {
 750	case 0: /* o */
 751		rc |= (flags & EFLG_OF);
 752		break;
 753	case 1: /* b/c/nae */
 754		rc |= (flags & EFLG_CF);
 755		break;
 756	case 2: /* z/e */
 757		rc |= (flags & EFLG_ZF);
 758		break;
 759	case 3: /* be/na */
 760		rc |= (flags & (EFLG_CF|EFLG_ZF));
 761		break;
 762	case 4: /* s */
 763		rc |= (flags & EFLG_SF);
 764		break;
 765	case 5: /* p/pe */
 766		rc |= (flags & EFLG_PF);
 767		break;
 768	case 7: /* le/ng */
 769		rc |= (flags & EFLG_ZF);
 770		/* fall through */
 771	case 6: /* l/nge */
 772		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
 773		break;
 774	}
 775
 776	/* Odd condition identifiers (lsb == 1) have inverted sense. */
 777	return (!!rc ^ (condition & 1));
 
 
 778}
 779
 780static void fetch_register_operand(struct operand *op)
 781{
 782	switch (op->bytes) {
 783	case 1:
 784		op->val = *(u8 *)op->addr.reg;
 785		break;
 786	case 2:
 787		op->val = *(u16 *)op->addr.reg;
 788		break;
 789	case 4:
 790		op->val = *(u32 *)op->addr.reg;
 791		break;
 792	case 8:
 793		op->val = *(u64 *)op->addr.reg;
 794		break;
 795	}
 796}
 797
 798static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
 799{
 800	ctxt->ops->get_fpu(ctxt);
 801	switch (reg) {
 802	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
 803	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
 804	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
 805	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
 806	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
 807	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
 808	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
 809	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
 810#ifdef CONFIG_X86_64
 811	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
 812	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
 813	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
 814	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
 815	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
 816	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
 817	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
 818	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
 819#endif
 820	default: BUG();
 821	}
 822	ctxt->ops->put_fpu(ctxt);
 823}
 824
 825static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
 826			  int reg)
 827{
 828	ctxt->ops->get_fpu(ctxt);
 829	switch (reg) {
 830	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
 831	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
 832	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
 833	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
 834	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
 835	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
 836	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
 837	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
 838#ifdef CONFIG_X86_64
 839	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
 840	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
 841	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
 842	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
 843	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
 844	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
 845	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
 846	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
 847#endif
 848	default: BUG();
 849	}
 850	ctxt->ops->put_fpu(ctxt);
 
 
 
 
 
 851}
 852
 853static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
 854				    struct operand *op,
 855				    int inhibit_bytereg)
 856{
 857	unsigned reg = ctxt->modrm_reg;
 858	int highbyte_regs = ctxt->rex_prefix == 0;
 859
 860	if (!(ctxt->d & ModRM))
 
 
 861		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
 862
 863	if (ctxt->d & Sse) {
 864		op->type = OP_XMM;
 865		op->bytes = 16;
 866		op->addr.xmm = reg;
 867		read_sse_reg(ctxt, &op->vec_val, reg);
 
 
 
 
 
 
 
 868		return;
 869	}
 870
 871	op->type = OP_REG;
 872	if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
 873		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
 874		op->bytes = 1;
 875	} else {
 876		op->addr.reg = decode_register(reg, ctxt->regs, 0);
 877		op->bytes = ctxt->op_bytes;
 878	}
 879	fetch_register_operand(op);
 880	op->orig_val = op->val;
 881}
 882
 
 
 
 
 
 
 883static int decode_modrm(struct x86_emulate_ctxt *ctxt,
 884			struct operand *op)
 885{
 886	u8 sib;
 887	int index_reg = 0, base_reg = 0, scale;
 888	int rc = X86EMUL_CONTINUE;
 889	ulong modrm_ea = 0;
 890
 891	if (ctxt->rex_prefix) {
 892		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
 893		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
 894		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
 895	}
 896
 897	ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
 898	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
 899	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
 900	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
 901	ctxt->modrm_seg = VCPU_SREG_DS;
 902
 903	if (ctxt->modrm_mod == 3) {
 904		op->type = OP_REG;
 905		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
 906		op->addr.reg = decode_register(ctxt->modrm_rm,
 907					       ctxt->regs, ctxt->d & ByteOp);
 908		if (ctxt->d & Sse) {
 909			op->type = OP_XMM;
 910			op->bytes = 16;
 911			op->addr.xmm = ctxt->modrm_rm;
 912			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
 
 
 
 
 
 
 913			return rc;
 914		}
 915		fetch_register_operand(op);
 916		return rc;
 917	}
 918
 919	op->type = OP_MEM;
 920
 921	if (ctxt->ad_bytes == 2) {
 922		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
 923		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
 924		unsigned si = ctxt->regs[VCPU_REGS_RSI];
 925		unsigned di = ctxt->regs[VCPU_REGS_RDI];
 926
 927		/* 16-bit ModR/M decode. */
 928		switch (ctxt->modrm_mod) {
 929		case 0:
 930			if (ctxt->modrm_rm == 6)
 931				modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
 932			break;
 933		case 1:
 934			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
 935			break;
 936		case 2:
 937			modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
 938			break;
 939		}
 940		switch (ctxt->modrm_rm) {
 941		case 0:
 942			modrm_ea += bx + si;
 943			break;
 944		case 1:
 945			modrm_ea += bx + di;
 946			break;
 947		case 2:
 948			modrm_ea += bp + si;
 949			break;
 950		case 3:
 951			modrm_ea += bp + di;
 952			break;
 953		case 4:
 954			modrm_ea += si;
 955			break;
 956		case 5:
 957			modrm_ea += di;
 958			break;
 959		case 6:
 960			if (ctxt->modrm_mod != 0)
 961				modrm_ea += bp;
 962			break;
 963		case 7:
 964			modrm_ea += bx;
 965			break;
 966		}
 967		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
 968		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
 969			ctxt->modrm_seg = VCPU_SREG_SS;
 970		modrm_ea = (u16)modrm_ea;
 971	} else {
 972		/* 32/64-bit ModR/M decode. */
 973		if ((ctxt->modrm_rm & 7) == 4) {
 974			sib = insn_fetch(u8, 1, ctxt->_eip);
 975			index_reg |= (sib >> 3) & 7;
 976			base_reg |= sib & 7;
 977			scale = sib >> 6;
 978
 979			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
 980				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
 981			else
 982				modrm_ea += ctxt->regs[base_reg];
 
 
 
 
 
 
 983			if (index_reg != 4)
 984				modrm_ea += ctxt->regs[index_reg] << scale;
 985		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
 
 986			if (ctxt->mode == X86EMUL_MODE_PROT64)
 987				ctxt->rip_relative = 1;
 988		} else
 989			modrm_ea += ctxt->regs[ctxt->modrm_rm];
 
 
 
 990		switch (ctxt->modrm_mod) {
 991		case 0:
 992			if (ctxt->modrm_rm == 5)
 993				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
 994			break;
 995		case 1:
 996			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
 997			break;
 998		case 2:
 999			modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
1000			break;
1001		}
1002	}
1003	op->addr.mem.ea = modrm_ea;
 
 
 
1004done:
1005	return rc;
1006}
1007
1008static int decode_abs(struct x86_emulate_ctxt *ctxt,
1009		      struct operand *op)
1010{
1011	int rc = X86EMUL_CONTINUE;
1012
1013	op->type = OP_MEM;
1014	switch (ctxt->ad_bytes) {
1015	case 2:
1016		op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
1017		break;
1018	case 4:
1019		op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
1020		break;
1021	case 8:
1022		op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
1023		break;
1024	}
1025done:
1026	return rc;
1027}
1028
1029static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1030{
1031	long sv = 0, mask;
1032
1033	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1034		mask = ~(ctxt->dst.bytes * 8 - 1);
1035
1036		if (ctxt->src.bytes == 2)
1037			sv = (s16)ctxt->src.val & (s16)mask;
1038		else if (ctxt->src.bytes == 4)
1039			sv = (s32)ctxt->src.val & (s32)mask;
 
 
1040
1041		ctxt->dst.addr.mem.ea += (sv >> 3);
 
1042	}
1043
1044	/* only subword offset */
1045	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1046}
1047
1048static int read_emulated(struct x86_emulate_ctxt *ctxt,
1049			 unsigned long addr, void *dest, unsigned size)
1050{
1051	int rc;
1052	struct read_cache *mc = &ctxt->mem_read;
1053
1054	while (size) {
1055		int n = min(size, 8u);
1056		size -= n;
1057		if (mc->pos < mc->end)
1058			goto read_cached;
1059
1060		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1061					      &ctxt->exception);
1062		if (rc != X86EMUL_CONTINUE)
1063			return rc;
1064		mc->end += n;
1065
1066	read_cached:
1067		memcpy(dest, mc->data + mc->pos, n);
1068		mc->pos += n;
1069		dest += n;
1070		addr += n;
1071	}
 
 
 
 
1072	return X86EMUL_CONTINUE;
1073}
1074
1075static int segmented_read(struct x86_emulate_ctxt *ctxt,
1076			  struct segmented_address addr,
1077			  void *data,
1078			  unsigned size)
1079{
1080	int rc;
1081	ulong linear;
1082
1083	rc = linearize(ctxt, addr, size, false, &linear);
1084	if (rc != X86EMUL_CONTINUE)
1085		return rc;
1086	return read_emulated(ctxt, linear, data, size);
1087}
1088
1089static int segmented_write(struct x86_emulate_ctxt *ctxt,
1090			   struct segmented_address addr,
1091			   const void *data,
1092			   unsigned size)
1093{
1094	int rc;
1095	ulong linear;
1096
1097	rc = linearize(ctxt, addr, size, true, &linear);
1098	if (rc != X86EMUL_CONTINUE)
1099		return rc;
1100	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1101					 &ctxt->exception);
1102}
1103
1104static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1105			     struct segmented_address addr,
1106			     const void *orig_data, const void *data,
1107			     unsigned size)
1108{
1109	int rc;
1110	ulong linear;
1111
1112	rc = linearize(ctxt, addr, size, true, &linear);
1113	if (rc != X86EMUL_CONTINUE)
1114		return rc;
1115	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1116					   size, &ctxt->exception);
1117}
1118
1119static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1120			   unsigned int size, unsigned short port,
1121			   void *dest)
1122{
1123	struct read_cache *rc = &ctxt->io_read;
1124
1125	if (rc->pos == rc->end) { /* refill pio read ahead */
1126		unsigned int in_page, n;
1127		unsigned int count = ctxt->rep_prefix ?
1128			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1129		in_page = (ctxt->eflags & EFLG_DF) ?
1130			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1131			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1132		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1133			count);
1134		if (n == 0)
1135			n = 1;
1136		rc->pos = rc->end = 0;
1137		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1138			return 0;
1139		rc->end = n * size;
1140	}
1141
1142	memcpy(dest, rc->data + rc->pos, size);
1143	rc->pos += size;
 
 
 
 
 
 
 
 
1144	return 1;
1145}
1146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1148				     u16 selector, struct desc_ptr *dt)
1149{
1150	struct x86_emulate_ops *ops = ctxt->ops;
 
1151
1152	if (selector & 1 << 2) {
1153		struct desc_struct desc;
1154		u16 sel;
1155
1156		memset (dt, 0, sizeof *dt);
1157		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
 
1158			return;
1159
1160		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1161		dt->address = get_desc_base(&desc);
1162	} else
1163		ops->get_gdt(ctxt, dt);
1164}
1165
1166/* allowed just for 8 bytes segments */
1167static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1168				   u16 selector, struct desc_struct *desc)
1169{
1170	struct desc_ptr dt;
1171	u16 index = selector >> 3;
1172	ulong addr;
1173
1174	get_descriptor_table_ptr(ctxt, selector, &dt);
1175
1176	if (dt.size < index * 8 + 7)
1177		return emulate_gp(ctxt, selector & 0xfffc);
1178
1179	addr = dt.address + index * 8;
1180	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1181				   &ctxt->exception);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182}
1183
1184/* allowed just for 8 bytes segments */
1185static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1186				    u16 selector, struct desc_struct *desc)
1187{
1188	struct desc_ptr dt;
1189	u16 index = selector >> 3;
1190	ulong addr;
1191
1192	get_descriptor_table_ptr(ctxt, selector, &dt);
1193
1194	if (dt.size < index * 8 + 7)
1195		return emulate_gp(ctxt, selector & 0xfffc);
1196
1197	addr = dt.address + index * 8;
1198	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1199				    &ctxt->exception);
1200}
1201
1202/* Does not support long mode */
1203static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1204				   u16 selector, int seg)
 
1205{
1206	struct desc_struct seg_desc;
1207	u8 dpl, rpl, cpl;
1208	unsigned err_vec = GP_VECTOR;
1209	u32 err_code = 0;
1210	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
 
1211	int ret;
 
 
1212
1213	memset(&seg_desc, 0, sizeof seg_desc);
1214
1215	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1216	    || ctxt->mode == X86EMUL_MODE_REAL) {
1217		/* set real mode segment descriptor */
 
 
 
 
 
1218		set_desc_base(&seg_desc, selector << 4);
1219		set_desc_limit(&seg_desc, 0xffff);
1220		seg_desc.type = 3;
1221		seg_desc.p = 1;
1222		seg_desc.s = 1;
 
1223		goto load;
1224	}
1225
1226	/* NULL selector is not valid for TR, CS and SS */
1227	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1228	    && null_selector)
1229		goto exception;
1230
1231	/* TR should be in GDT only */
1232	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1233		goto exception;
1234
1235	if (null_selector) /* for NULL selector skip all following checks */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236		goto load;
 
1237
1238	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1239	if (ret != X86EMUL_CONTINUE)
1240		return ret;
1241
1242	err_code = selector & 0xfffc;
1243	err_vec = GP_VECTOR;
1244
1245	/* can't load system descriptor into segment selecor */
1246	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1247		goto exception;
1248
1249	if (!seg_desc.p) {
1250		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
 
 
1251		goto exception;
1252	}
1253
1254	rpl = selector & 3;
1255	dpl = seg_desc.dpl;
1256	cpl = ctxt->ops->cpl(ctxt);
1257
1258	switch (seg) {
1259	case VCPU_SREG_SS:
1260		/*
1261		 * segment is not a writable data segment or segment
1262		 * selector's RPL != CPL or segment selector's RPL != CPL
1263		 */
1264		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1265			goto exception;
1266		break;
1267	case VCPU_SREG_CS:
 
 
 
 
 
 
 
 
1268		if (!(seg_desc.type & 8))
1269			goto exception;
1270
1271		if (seg_desc.type & 4) {
1272			/* conforming */
1273			if (dpl > cpl)
1274				goto exception;
1275		} else {
1276			/* nonconforming */
1277			if (rpl > cpl || dpl != cpl)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278				goto exception;
1279		}
 
1280		/* CS(RPL) <- CPL */
1281		selector = (selector & 0xfffc) | cpl;
1282		break;
1283	case VCPU_SREG_TR:
1284		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1285			goto exception;
1286		break;
1287	case VCPU_SREG_LDTR:
1288		if (seg_desc.s || seg_desc.type != 2)
1289			goto exception;
1290		break;
1291	default: /*  DS, ES, FS, or GS */
1292		/*
1293		 * segment is not a data or readable code segment or
1294		 * ((segment is a data or nonconforming code segment)
1295		 * and (both RPL and CPL > DPL))
1296		 */
1297		if ((seg_desc.type & 0xa) == 0x8 ||
1298		    (((seg_desc.type & 0xc) != 0xc) &&
1299		     (rpl > dpl && cpl > dpl)))
1300			goto exception;
1301		break;
1302	}
1303
 
 
 
 
 
1304	if (seg_desc.s) {
1305		/* mark segment as accessed */
1306		seg_desc.type |= 1;
1307		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1308		if (ret != X86EMUL_CONTINUE)
1309			return ret;
1310	}
1311load:
1312	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
 
 
1313	return X86EMUL_CONTINUE;
1314exception:
1315	emulate_exception(ctxt, err_vec, err_code, true);
1316	return X86EMUL_PROPAGATE_FAULT;
1317}
1318
1319static void write_register_operand(struct operand *op)
 
1320{
1321	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1322	switch (op->bytes) {
1323	case 1:
1324		*(u8 *)op->addr.reg = (u8)op->val;
1325		break;
1326	case 2:
1327		*(u16 *)op->addr.reg = (u16)op->val;
1328		break;
1329	case 4:
1330		*op->addr.reg = (u32)op->val;
1331		break;	/* 64b: zero-extend */
1332	case 8:
1333		*op->addr.reg = op->val;
1334		break;
1335	}
 
 
 
1336}
1337
1338static int writeback(struct x86_emulate_ctxt *ctxt)
1339{
1340	int rc;
 
1341
1342	switch (ctxt->dst.type) {
 
 
1343	case OP_REG:
1344		write_register_operand(&ctxt->dst);
1345		break;
1346	case OP_MEM:
1347		if (ctxt->lock_prefix)
1348			rc = segmented_cmpxchg(ctxt,
1349					       ctxt->dst.addr.mem,
1350					       &ctxt->dst.orig_val,
1351					       &ctxt->dst.val,
1352					       ctxt->dst.bytes);
1353		else
1354			rc = segmented_write(ctxt,
1355					     ctxt->dst.addr.mem,
1356					     &ctxt->dst.val,
1357					     ctxt->dst.bytes);
1358		if (rc != X86EMUL_CONTINUE)
1359			return rc;
1360		break;
 
 
1361	case OP_XMM:
1362		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
 
 
 
1363		break;
1364	case OP_NONE:
1365		/* no writeback */
1366		break;
1367	default:
1368		break;
1369	}
1370	return X86EMUL_CONTINUE;
1371}
1372
1373static int em_push(struct x86_emulate_ctxt *ctxt)
1374{
1375	struct segmented_address addr;
1376
1377	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1378	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1379	addr.seg = VCPU_SREG_SS;
1380
 
 
 
 
 
1381	/* Disable writeback. */
1382	ctxt->dst.type = OP_NONE;
1383	return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1384}
1385
1386static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1387		       void *dest, int len)
1388{
1389	int rc;
1390	struct segmented_address addr;
1391
1392	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1393	addr.seg = VCPU_SREG_SS;
1394	rc = segmented_read(ctxt, addr, dest, len);
1395	if (rc != X86EMUL_CONTINUE)
1396		return rc;
1397
1398	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1399	return rc;
1400}
1401
1402static int em_pop(struct x86_emulate_ctxt *ctxt)
1403{
1404	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1405}
1406
1407static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1408			void *dest, int len)
1409{
1410	int rc;
1411	unsigned long val, change_mask;
1412	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
 
1413	int cpl = ctxt->ops->cpl(ctxt);
1414
1415	rc = emulate_pop(ctxt, &val, len);
1416	if (rc != X86EMUL_CONTINUE)
1417		return rc;
1418
1419	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1420		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
 
 
1421
1422	switch(ctxt->mode) {
1423	case X86EMUL_MODE_PROT64:
1424	case X86EMUL_MODE_PROT32:
1425	case X86EMUL_MODE_PROT16:
1426		if (cpl == 0)
1427			change_mask |= EFLG_IOPL;
1428		if (cpl <= iopl)
1429			change_mask |= EFLG_IF;
1430		break;
1431	case X86EMUL_MODE_VM86:
1432		if (iopl < 3)
1433			return emulate_gp(ctxt, 0);
1434		change_mask |= EFLG_IF;
1435		break;
1436	default: /* real mode */
1437		change_mask |= (EFLG_IOPL | EFLG_IF);
1438		break;
1439	}
1440
1441	*(unsigned long *)dest =
1442		(ctxt->eflags & ~change_mask) | (val & change_mask);
1443
1444	return rc;
1445}
1446
1447static int em_popf(struct x86_emulate_ctxt *ctxt)
1448{
1449	ctxt->dst.type = OP_REG;
1450	ctxt->dst.addr.reg = &ctxt->eflags;
1451	ctxt->dst.bytes = ctxt->op_bytes;
1452	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1453}
1454
1455static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456{
 
 
 
 
 
 
 
 
 
1457	ctxt->src.val = get_segment_selector(ctxt, seg);
 
 
 
 
1458
1459	return em_push(ctxt);
1460}
1461
1462static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1463{
1464	unsigned long selector;
 
1465	int rc;
1466
1467	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1468	if (rc != X86EMUL_CONTINUE)
1469		return rc;
1470
 
 
 
 
 
1471	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1472	return rc;
1473}
1474
1475static int em_pusha(struct x86_emulate_ctxt *ctxt)
1476{
1477	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1478	int rc = X86EMUL_CONTINUE;
1479	int reg = VCPU_REGS_RAX;
1480
1481	while (reg <= VCPU_REGS_RDI) {
1482		(reg == VCPU_REGS_RSP) ?
1483		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1484
1485		rc = em_push(ctxt);
1486		if (rc != X86EMUL_CONTINUE)
1487			return rc;
1488
1489		++reg;
1490	}
1491
1492	return rc;
1493}
1494
1495static int em_pushf(struct x86_emulate_ctxt *ctxt)
1496{
1497	ctxt->src.val =  (unsigned long)ctxt->eflags;
1498	return em_push(ctxt);
1499}
1500
1501static int em_popa(struct x86_emulate_ctxt *ctxt)
1502{
1503	int rc = X86EMUL_CONTINUE;
1504	int reg = VCPU_REGS_RDI;
 
1505
1506	while (reg >= VCPU_REGS_RAX) {
1507		if (reg == VCPU_REGS_RSP) {
1508			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1509							ctxt->op_bytes);
1510			--reg;
1511		}
1512
1513		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1514		if (rc != X86EMUL_CONTINUE)
1515			break;
 
1516		--reg;
1517	}
1518	return rc;
1519}
1520
1521int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1522{
1523	struct x86_emulate_ops *ops = ctxt->ops;
1524	int rc;
1525	struct desc_ptr dt;
1526	gva_t cs_addr;
1527	gva_t eip_addr;
1528	u16 cs, eip;
1529
1530	/* TODO: Add limit checks */
1531	ctxt->src.val = ctxt->eflags;
1532	rc = em_push(ctxt);
1533	if (rc != X86EMUL_CONTINUE)
1534		return rc;
1535
1536	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1537
1538	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1539	rc = em_push(ctxt);
1540	if (rc != X86EMUL_CONTINUE)
1541		return rc;
1542
1543	ctxt->src.val = ctxt->_eip;
1544	rc = em_push(ctxt);
1545	if (rc != X86EMUL_CONTINUE)
1546		return rc;
1547
1548	ops->get_idt(ctxt, &dt);
1549
1550	eip_addr = dt.address + (irq << 2);
1551	cs_addr = dt.address + (irq << 2) + 2;
1552
1553	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1554	if (rc != X86EMUL_CONTINUE)
1555		return rc;
1556
1557	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1558	if (rc != X86EMUL_CONTINUE)
1559		return rc;
1560
1561	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1562	if (rc != X86EMUL_CONTINUE)
1563		return rc;
1564
1565	ctxt->_eip = eip;
1566
1567	return rc;
1568}
1569
 
 
 
 
 
 
 
 
 
 
 
1570static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1571{
1572	switch(ctxt->mode) {
1573	case X86EMUL_MODE_REAL:
1574		return emulate_int_real(ctxt, irq);
1575	case X86EMUL_MODE_VM86:
1576	case X86EMUL_MODE_PROT16:
1577	case X86EMUL_MODE_PROT32:
1578	case X86EMUL_MODE_PROT64:
1579	default:
1580		/* Protected mode interrupts unimplemented yet */
1581		return X86EMUL_UNHANDLEABLE;
1582	}
1583}
1584
1585static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1586{
1587	int rc = X86EMUL_CONTINUE;
1588	unsigned long temp_eip = 0;
1589	unsigned long temp_eflags = 0;
1590	unsigned long cs = 0;
1591	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1592			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1593			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1594	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
 
 
 
 
1595
1596	/* TODO: Add stack limit check */
1597
1598	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1599
1600	if (rc != X86EMUL_CONTINUE)
1601		return rc;
1602
1603	if (temp_eip & ~0xffff)
1604		return emulate_gp(ctxt, 0);
1605
1606	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1607
1608	if (rc != X86EMUL_CONTINUE)
1609		return rc;
1610
1611	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1612
1613	if (rc != X86EMUL_CONTINUE)
1614		return rc;
1615
1616	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1617
1618	if (rc != X86EMUL_CONTINUE)
1619		return rc;
1620
1621	ctxt->_eip = temp_eip;
1622
1623
1624	if (ctxt->op_bytes == 4)
1625		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1626	else if (ctxt->op_bytes == 2) {
1627		ctxt->eflags &= ~0xffff;
1628		ctxt->eflags |= temp_eflags;
1629	}
1630
1631	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1632	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
 
1633
1634	return rc;
1635}
1636
1637static int em_iret(struct x86_emulate_ctxt *ctxt)
1638{
1639	switch(ctxt->mode) {
1640	case X86EMUL_MODE_REAL:
1641		return emulate_iret_real(ctxt);
1642	case X86EMUL_MODE_VM86:
1643	case X86EMUL_MODE_PROT16:
1644	case X86EMUL_MODE_PROT32:
1645	case X86EMUL_MODE_PROT64:
1646	default:
1647		/* iret from protected mode unimplemented yet */
1648		return X86EMUL_UNHANDLEABLE;
1649	}
1650}
1651
1652static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1653{
1654	int rc;
1655	unsigned short sel;
 
 
1656
1657	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1658
1659	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
 
 
1660	if (rc != X86EMUL_CONTINUE)
1661		return rc;
1662
1663	ctxt->_eip = 0;
1664	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1665	return X86EMUL_CONTINUE;
1666}
1667
1668static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1669{
1670	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1671}
1672
1673static int em_grp2(struct x86_emulate_ctxt *ctxt)
1674{
1675	switch (ctxt->modrm_reg) {
1676	case 0:	/* rol */
1677		emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags);
1678		break;
1679	case 1:	/* ror */
1680		emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags);
1681		break;
1682	case 2:	/* rcl */
1683		emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags);
1684		break;
1685	case 3:	/* rcr */
1686		emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags);
1687		break;
1688	case 4:	/* sal/shl */
1689	case 6:	/* sal/shl */
1690		emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags);
1691		break;
1692	case 5:	/* shr */
1693		emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags);
1694		break;
1695	case 7:	/* sar */
1696		emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags);
1697		break;
1698	}
1699	return X86EMUL_CONTINUE;
1700}
1701
1702static int em_grp3(struct x86_emulate_ctxt *ctxt)
1703{
1704	unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
1705	unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
1706	u8 de = 0;
1707
1708	switch (ctxt->modrm_reg) {
1709	case 0 ... 1:	/* test */
1710		emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
1711		break;
1712	case 2:	/* not */
1713		ctxt->dst.val = ~ctxt->dst.val;
1714		break;
1715	case 3:	/* neg */
1716		emulate_1op("neg", ctxt->dst, ctxt->eflags);
1717		break;
1718	case 4: /* mul */
1719		emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
1720		break;
1721	case 5: /* imul */
1722		emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
1723		break;
1724	case 6: /* div */
1725		emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
1726				       ctxt->eflags, de);
1727		break;
1728	case 7: /* idiv */
1729		emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
1730				       ctxt->eflags, de);
1731		break;
1732	default:
1733		return X86EMUL_UNHANDLEABLE;
1734	}
1735	if (de)
1736		return emulate_de(ctxt);
1737	return X86EMUL_CONTINUE;
1738}
1739
1740static int em_grp45(struct x86_emulate_ctxt *ctxt)
1741{
1742	int rc = X86EMUL_CONTINUE;
 
1743
1744	switch (ctxt->modrm_reg) {
1745	case 0:	/* inc */
1746		emulate_1op("inc", ctxt->dst, ctxt->eflags);
1747		break;
1748	case 1:	/* dec */
1749		emulate_1op("dec", ctxt->dst, ctxt->eflags);
1750		break;
1751	case 2: /* call near abs */ {
1752		long int old_eip;
1753		old_eip = ctxt->_eip;
1754		ctxt->_eip = ctxt->src.val;
1755		ctxt->src.val = old_eip;
1756		rc = em_push(ctxt);
1757		break;
1758	}
1759	case 4: /* jmp abs */
1760		ctxt->_eip = ctxt->src.val;
1761		break;
1762	case 5: /* jmp far */
1763		rc = em_jmp_far(ctxt);
1764		break;
1765	case 6:	/* push */
1766		rc = em_push(ctxt);
1767		break;
1768	}
1769	return rc;
1770}
1771
1772static int em_grp9(struct x86_emulate_ctxt *ctxt)
1773{
1774	u64 old = ctxt->dst.orig_val64;
1775
1776	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1777	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1778		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1779		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1780		ctxt->eflags &= ~EFLG_ZF;
 
 
 
1781	} else {
1782		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1783			(u32) ctxt->regs[VCPU_REGS_RBX];
1784
1785		ctxt->eflags |= EFLG_ZF;
1786	}
1787	return X86EMUL_CONTINUE;
1788}
1789
1790static int em_ret(struct x86_emulate_ctxt *ctxt)
1791{
1792	ctxt->dst.type = OP_REG;
1793	ctxt->dst.addr.reg = &ctxt->_eip;
1794	ctxt->dst.bytes = ctxt->op_bytes;
1795	return em_pop(ctxt);
 
 
 
 
1796}
1797
1798static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1799{
1800	int rc;
1801	unsigned long cs;
 
 
 
1802
1803	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1804	if (rc != X86EMUL_CONTINUE)
1805		return rc;
1806	if (ctxt->op_bytes == 4)
1807		ctxt->_eip = (u32)ctxt->_eip;
1808	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1809	if (rc != X86EMUL_CONTINUE)
1810		return rc;
1811	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
 
 
 
 
 
 
 
 
 
1812	return rc;
1813}
1814
1815static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1816{
 
1817	unsigned short sel;
1818	int rc;
1819
1820	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1821
1822	rc = load_segment_descriptor(ctxt, sel, seg);
1823	if (rc != X86EMUL_CONTINUE)
1824		return rc;
1825
1826	ctxt->dst.val = ctxt->src.val;
1827	return rc;
1828}
1829
1830static void
1831setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1832			struct desc_struct *cs, struct desc_struct *ss)
1833{
1834	u16 selector;
 
1835
1836	memset(cs, 0, sizeof(struct desc_struct));
1837	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1838	memset(ss, 0, sizeof(struct desc_struct));
 
 
1839
 
 
 
1840	cs->l = 0;		/* will be adjusted later */
1841	set_desc_base(cs, 0);	/* flat segment */
1842	cs->g = 1;		/* 4kb granularity */
1843	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1844	cs->type = 0x0b;	/* Read, Execute, Accessed */
1845	cs->s = 1;
1846	cs->dpl = 0;		/* will be adjusted later */
1847	cs->p = 1;
1848	cs->d = 1;
 
1849
1850	set_desc_base(ss, 0);	/* flat segment */
1851	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1852	ss->g = 1;		/* 4kb granularity */
1853	ss->s = 1;
1854	ss->type = 0x03;	/* Read/Write, Accessed */
1855	ss->d = 1;		/* 32bit stack segment */
1856	ss->dpl = 0;
1857	ss->p = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858}
1859
1860static int em_syscall(struct x86_emulate_ctxt *ctxt)
1861{
1862	struct x86_emulate_ops *ops = ctxt->ops;
1863	struct desc_struct cs, ss;
1864	u64 msr_data;
1865	u16 cs_sel, ss_sel;
1866	u64 efer = 0;
1867
1868	/* syscall is not available in real mode */
1869	if (ctxt->mode == X86EMUL_MODE_REAL ||
1870	    ctxt->mode == X86EMUL_MODE_VM86)
1871		return emulate_ud(ctxt);
1872
 
 
 
1873	ops->get_msr(ctxt, MSR_EFER, &efer);
1874	setup_syscalls_segments(ctxt, &cs, &ss);
 
1875
 
1876	ops->get_msr(ctxt, MSR_STAR, &msr_data);
1877	msr_data >>= 32;
1878	cs_sel = (u16)(msr_data & 0xfffc);
1879	ss_sel = (u16)(msr_data + 8);
1880
1881	if (efer & EFER_LMA) {
1882		cs.d = 0;
1883		cs.l = 1;
1884	}
1885	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1886	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1887
1888	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1889	if (efer & EFER_LMA) {
1890#ifdef CONFIG_X86_64
1891		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1892
1893		ops->get_msr(ctxt,
1894			     ctxt->mode == X86EMUL_MODE_PROT64 ?
1895			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1896		ctxt->_eip = msr_data;
1897
1898		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1899		ctxt->eflags &= ~(msr_data | EFLG_RF);
 
1900#endif
1901	} else {
1902		/* legacy mode */
1903		ops->get_msr(ctxt, MSR_STAR, &msr_data);
1904		ctxt->_eip = (u32)msr_data;
1905
1906		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1907	}
1908
 
1909	return X86EMUL_CONTINUE;
1910}
1911
1912static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1913{
1914	struct x86_emulate_ops *ops = ctxt->ops;
1915	struct desc_struct cs, ss;
1916	u64 msr_data;
1917	u16 cs_sel, ss_sel;
1918	u64 efer = 0;
1919
1920	ops->get_msr(ctxt, MSR_EFER, &efer);
1921	/* inject #GP if in real mode */
1922	if (ctxt->mode == X86EMUL_MODE_REAL)
1923		return emulate_gp(ctxt, 0);
1924
1925	/* XXX sysenter/sysexit have not been tested in 64bit mode.
1926	* Therefore, we inject an #UD.
1927	*/
1928	if (ctxt->mode == X86EMUL_MODE_PROT64)
 
 
1929		return emulate_ud(ctxt);
1930
1931	setup_syscalls_segments(ctxt, &cs, &ss);
 
 
1932
1933	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1934	switch (ctxt->mode) {
1935	case X86EMUL_MODE_PROT32:
1936		if ((msr_data & 0xfffc) == 0x0)
1937			return emulate_gp(ctxt, 0);
1938		break;
1939	case X86EMUL_MODE_PROT64:
1940		if (msr_data == 0x0)
1941			return emulate_gp(ctxt, 0);
1942		break;
1943	}
1944
1945	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1946	cs_sel = (u16)msr_data;
1947	cs_sel &= ~SELECTOR_RPL_MASK;
1948	ss_sel = cs_sel + 8;
1949	ss_sel &= ~SELECTOR_RPL_MASK;
1950	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1951		cs.d = 0;
1952		cs.l = 1;
1953	}
1954
1955	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1956	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1957
1958	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1959	ctxt->_eip = msr_data;
1960
1961	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1962	ctxt->regs[VCPU_REGS_RSP] = msr_data;
 
 
 
1963
1964	return X86EMUL_CONTINUE;
1965}
1966
1967static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1968{
1969	struct x86_emulate_ops *ops = ctxt->ops;
1970	struct desc_struct cs, ss;
1971	u64 msr_data;
1972	int usermode;
1973	u16 cs_sel = 0, ss_sel = 0;
1974
1975	/* inject #GP if in real mode or Virtual 8086 mode */
1976	if (ctxt->mode == X86EMUL_MODE_REAL ||
1977	    ctxt->mode == X86EMUL_MODE_VM86)
1978		return emulate_gp(ctxt, 0);
1979
1980	setup_syscalls_segments(ctxt, &cs, &ss);
1981
1982	if ((ctxt->rex_prefix & 0x8) != 0x0)
1983		usermode = X86EMUL_MODE_PROT64;
1984	else
1985		usermode = X86EMUL_MODE_PROT32;
1986
 
 
 
1987	cs.dpl = 3;
1988	ss.dpl = 3;
1989	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1990	switch (usermode) {
1991	case X86EMUL_MODE_PROT32:
1992		cs_sel = (u16)(msr_data + 16);
1993		if ((msr_data & 0xfffc) == 0x0)
1994			return emulate_gp(ctxt, 0);
1995		ss_sel = (u16)(msr_data + 24);
 
 
1996		break;
1997	case X86EMUL_MODE_PROT64:
1998		cs_sel = (u16)(msr_data + 32);
1999		if (msr_data == 0x0)
2000			return emulate_gp(ctxt, 0);
2001		ss_sel = cs_sel + 8;
2002		cs.d = 0;
2003		cs.l = 1;
 
 
 
2004		break;
2005	}
2006	cs_sel |= SELECTOR_RPL_MASK;
2007	ss_sel |= SELECTOR_RPL_MASK;
2008
2009	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2010	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2011
2012	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2013	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
 
2014
2015	return X86EMUL_CONTINUE;
2016}
2017
2018static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2019{
2020	int iopl;
2021	if (ctxt->mode == X86EMUL_MODE_REAL)
2022		return false;
2023	if (ctxt->mode == X86EMUL_MODE_VM86)
2024		return true;
2025	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2026	return ctxt->ops->cpl(ctxt) > iopl;
2027}
2028
 
 
 
2029static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2030					    u16 port, u16 len)
2031{
2032	struct x86_emulate_ops *ops = ctxt->ops;
2033	struct desc_struct tr_seg;
2034	u32 base3;
2035	int r;
2036	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2037	unsigned mask = (1 << len) - 1;
2038	unsigned long base;
2039
 
 
 
 
 
 
 
 
2040	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2041	if (!tr_seg.p)
2042		return false;
2043	if (desc_limit_scaled(&tr_seg) < 103)
2044		return false;
2045	base = get_desc_base(&tr_seg);
2046#ifdef CONFIG_X86_64
2047	base |= ((u64)base3) << 32;
2048#endif
2049	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2050	if (r != X86EMUL_CONTINUE)
2051		return false;
2052	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2053		return false;
2054	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2055	if (r != X86EMUL_CONTINUE)
2056		return false;
2057	if ((perm >> bit_idx) & mask)
2058		return false;
2059	return true;
2060}
2061
2062static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2063				 u16 port, u16 len)
2064{
2065	if (ctxt->perm_ok)
2066		return true;
2067
2068	if (emulator_bad_iopl(ctxt))
2069		if (!emulator_io_port_access_allowed(ctxt, port, len))
2070			return false;
2071
2072	ctxt->perm_ok = true;
2073
2074	return true;
2075}
2076
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2077static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2078				struct tss_segment_16 *tss)
2079{
2080	tss->ip = ctxt->_eip;
2081	tss->flag = ctxt->eflags;
2082	tss->ax = ctxt->regs[VCPU_REGS_RAX];
2083	tss->cx = ctxt->regs[VCPU_REGS_RCX];
2084	tss->dx = ctxt->regs[VCPU_REGS_RDX];
2085	tss->bx = ctxt->regs[VCPU_REGS_RBX];
2086	tss->sp = ctxt->regs[VCPU_REGS_RSP];
2087	tss->bp = ctxt->regs[VCPU_REGS_RBP];
2088	tss->si = ctxt->regs[VCPU_REGS_RSI];
2089	tss->di = ctxt->regs[VCPU_REGS_RDI];
2090
2091	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2092	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2093	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2094	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2095	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2096}
2097
2098static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2099				 struct tss_segment_16 *tss)
2100{
2101	int ret;
 
2102
2103	ctxt->_eip = tss->ip;
2104	ctxt->eflags = tss->flag | 2;
2105	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2106	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2107	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2108	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2109	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2110	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2111	ctxt->regs[VCPU_REGS_RSI] = tss->si;
2112	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2113
2114	/*
2115	 * SDM says that segment selectors are loaded before segment
2116	 * descriptors
2117	 */
2118	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2119	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2120	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2121	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2122	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2123
 
 
2124	/*
2125	 * Now load segment descriptors. If fault happenes at this stage
2126	 * it is handled in a context of new task
2127	 */
2128	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
 
2129	if (ret != X86EMUL_CONTINUE)
2130		return ret;
2131	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
 
2132	if (ret != X86EMUL_CONTINUE)
2133		return ret;
2134	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
 
2135	if (ret != X86EMUL_CONTINUE)
2136		return ret;
2137	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
 
2138	if (ret != X86EMUL_CONTINUE)
2139		return ret;
2140	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
 
2141	if (ret != X86EMUL_CONTINUE)
2142		return ret;
2143
2144	return X86EMUL_CONTINUE;
2145}
2146
2147static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2148			  u16 tss_selector, u16 old_tss_sel,
2149			  ulong old_tss_base, struct desc_struct *new_desc)
2150{
2151	struct x86_emulate_ops *ops = ctxt->ops;
2152	struct tss_segment_16 tss_seg;
2153	int ret;
2154	u32 new_tss_base = get_desc_base(new_desc);
2155
2156	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2157			    &ctxt->exception);
2158	if (ret != X86EMUL_CONTINUE)
2159		/* FIXME: need to provide precise fault address */
2160		return ret;
2161
2162	save_state_to_tss16(ctxt, &tss_seg);
2163
2164	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2165			     &ctxt->exception);
2166	if (ret != X86EMUL_CONTINUE)
2167		/* FIXME: need to provide precise fault address */
2168		return ret;
2169
2170	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2171			    &ctxt->exception);
2172	if (ret != X86EMUL_CONTINUE)
2173		/* FIXME: need to provide precise fault address */
2174		return ret;
2175
2176	if (old_tss_sel != 0xffff) {
2177		tss_seg.prev_task_link = old_tss_sel;
2178
2179		ret = ops->write_std(ctxt, new_tss_base,
2180				     &tss_seg.prev_task_link,
2181				     sizeof tss_seg.prev_task_link,
2182				     &ctxt->exception);
2183		if (ret != X86EMUL_CONTINUE)
2184			/* FIXME: need to provide precise fault address */
2185			return ret;
2186	}
2187
2188	return load_state_from_tss16(ctxt, &tss_seg);
2189}
2190
2191static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2192				struct tss_segment_32 *tss)
2193{
2194	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2195	tss->eip = ctxt->_eip;
2196	tss->eflags = ctxt->eflags;
2197	tss->eax = ctxt->regs[VCPU_REGS_RAX];
2198	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2199	tss->edx = ctxt->regs[VCPU_REGS_RDX];
2200	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2201	tss->esp = ctxt->regs[VCPU_REGS_RSP];
2202	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2203	tss->esi = ctxt->regs[VCPU_REGS_RSI];
2204	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2205
2206	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2207	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2208	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2209	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2210	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2211	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2212	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2213}
2214
2215static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2216				 struct tss_segment_32 *tss)
2217{
2218	int ret;
 
2219
2220	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2221		return emulate_gp(ctxt, 0);
2222	ctxt->_eip = tss->eip;
2223	ctxt->eflags = tss->eflags | 2;
2224	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2225	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2226	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2227	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2228	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2229	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2230	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2231	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
 
 
2232
2233	/*
2234	 * SDM says that segment selectors are loaded before segment
2235	 * descriptors
 
2236	 */
2237	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2238	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2239	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2240	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2241	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2242	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2243	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2244
2245	/*
2246	 * Now load segment descriptors. If fault happenes at this stage
 
 
 
 
 
 
 
 
 
 
 
 
 
2247	 * it is handled in a context of new task
2248	 */
2249	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
 
2250	if (ret != X86EMUL_CONTINUE)
2251		return ret;
2252	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
 
2253	if (ret != X86EMUL_CONTINUE)
2254		return ret;
2255	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
 
2256	if (ret != X86EMUL_CONTINUE)
2257		return ret;
2258	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
 
2259	if (ret != X86EMUL_CONTINUE)
2260		return ret;
2261	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
 
2262	if (ret != X86EMUL_CONTINUE)
2263		return ret;
2264	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2265	if (ret != X86EMUL_CONTINUE)
2266		return ret;
2267	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2268	if (ret != X86EMUL_CONTINUE)
2269		return ret;
 
 
2270
2271	return X86EMUL_CONTINUE;
2272}
2273
2274static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2275			  u16 tss_selector, u16 old_tss_sel,
2276			  ulong old_tss_base, struct desc_struct *new_desc)
2277{
2278	struct x86_emulate_ops *ops = ctxt->ops;
2279	struct tss_segment_32 tss_seg;
2280	int ret;
2281	u32 new_tss_base = get_desc_base(new_desc);
 
 
2282
2283	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2284			    &ctxt->exception);
2285	if (ret != X86EMUL_CONTINUE)
2286		/* FIXME: need to provide precise fault address */
2287		return ret;
2288
2289	save_state_to_tss32(ctxt, &tss_seg);
2290
2291	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2292			     &ctxt->exception);
 
2293	if (ret != X86EMUL_CONTINUE)
2294		/* FIXME: need to provide precise fault address */
2295		return ret;
2296
2297	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2298			    &ctxt->exception);
2299	if (ret != X86EMUL_CONTINUE)
2300		/* FIXME: need to provide precise fault address */
2301		return ret;
2302
2303	if (old_tss_sel != 0xffff) {
2304		tss_seg.prev_task_link = old_tss_sel;
2305
2306		ret = ops->write_std(ctxt, new_tss_base,
2307				     &tss_seg.prev_task_link,
2308				     sizeof tss_seg.prev_task_link,
2309				     &ctxt->exception);
2310		if (ret != X86EMUL_CONTINUE)
2311			/* FIXME: need to provide precise fault address */
2312			return ret;
2313	}
2314
2315	return load_state_from_tss32(ctxt, &tss_seg);
2316}
2317
2318static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2319				   u16 tss_selector, int reason,
2320				   bool has_error_code, u32 error_code)
2321{
2322	struct x86_emulate_ops *ops = ctxt->ops;
2323	struct desc_struct curr_tss_desc, next_tss_desc;
2324	int ret;
2325	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2326	ulong old_tss_base =
2327		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2328	u32 desc_limit;
 
2329
2330	/* FIXME: old_tss_base == ~0 ? */
2331
2332	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2333	if (ret != X86EMUL_CONTINUE)
2334		return ret;
2335	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2336	if (ret != X86EMUL_CONTINUE)
2337		return ret;
2338
2339	/* FIXME: check that next_tss_desc is tss */
2340
2341	if (reason != TASK_SWITCH_IRET) {
2342		if ((tss_selector & 3) > next_tss_desc.dpl ||
2343		    ops->cpl(ctxt) > next_tss_desc.dpl)
2344			return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2345	}
2346
2347	desc_limit = desc_limit_scaled(&next_tss_desc);
2348	if (!next_tss_desc.p ||
2349	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2350	     desc_limit < 0x2b)) {
2351		emulate_ts(ctxt, tss_selector & 0xfffc);
2352		return X86EMUL_PROPAGATE_FAULT;
2353	}
2354
2355	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2356		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2357		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2358	}
2359
2360	if (reason == TASK_SWITCH_IRET)
2361		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2362
2363	/* set back link to prev task only if NT bit is set in eflags
2364	   note that old_tss_sel is not used afetr this point */
2365	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2366		old_tss_sel = 0xffff;
2367
2368	if (next_tss_desc.type & 8)
2369		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2370				     old_tss_base, &next_tss_desc);
2371	else
2372		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2373				     old_tss_base, &next_tss_desc);
2374	if (ret != X86EMUL_CONTINUE)
2375		return ret;
2376
2377	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2378		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2379
2380	if (reason != TASK_SWITCH_IRET) {
2381		next_tss_desc.type |= (1 << 1); /* set busy flag */
2382		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2383	}
2384
2385	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2386	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2387
2388	if (has_error_code) {
2389		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2390		ctxt->lock_prefix = 0;
2391		ctxt->src.val = (unsigned long) error_code;
2392		ret = em_push(ctxt);
2393	}
2394
 
 
 
2395	return ret;
2396}
2397
2398int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2399			 u16 tss_selector, int reason,
2400			 bool has_error_code, u32 error_code)
2401{
2402	int rc;
2403
 
2404	ctxt->_eip = ctxt->eip;
2405	ctxt->dst.type = OP_NONE;
2406
2407	rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2408				     has_error_code, error_code);
2409
2410	if (rc == X86EMUL_CONTINUE)
2411		ctxt->eip = ctxt->_eip;
 
 
2412
2413	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2414}
2415
2416static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2417			    int reg, struct operand *op)
2418{
2419	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2420
2421	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2422	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2423	op->addr.mem.seg = seg;
2424}
2425
2426static int em_das(struct x86_emulate_ctxt *ctxt)
2427{
2428	u8 al, old_al;
2429	bool af, cf, old_cf;
2430
2431	cf = ctxt->eflags & X86_EFLAGS_CF;
2432	al = ctxt->dst.val;
2433
2434	old_al = al;
2435	old_cf = cf;
2436	cf = false;
2437	af = ctxt->eflags & X86_EFLAGS_AF;
2438	if ((al & 0x0f) > 9 || af) {
2439		al -= 6;
2440		cf = old_cf | (al >= 250);
2441		af = true;
2442	} else {
2443		af = false;
2444	}
2445	if (old_al > 0x99 || old_cf) {
2446		al -= 0x60;
2447		cf = true;
2448	}
2449
2450	ctxt->dst.val = al;
2451	/* Set PF, ZF, SF */
2452	ctxt->src.type = OP_IMM;
2453	ctxt->src.val = 0;
2454	ctxt->src.bytes = 1;
2455	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2456	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2457	if (cf)
2458		ctxt->eflags |= X86_EFLAGS_CF;
2459	if (af)
2460		ctxt->eflags |= X86_EFLAGS_AF;
2461	return X86EMUL_CONTINUE;
2462}
2463
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2464static int em_call_far(struct x86_emulate_ctxt *ctxt)
2465{
2466	u16 sel, old_cs;
2467	ulong old_eip;
2468	int rc;
 
 
 
 
2469
2470	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2471	old_eip = ctxt->_eip;
 
2472
2473	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2474	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2475		return X86EMUL_CONTINUE;
 
 
2476
2477	ctxt->_eip = 0;
2478	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
 
2479
2480	ctxt->src.val = old_cs;
2481	rc = em_push(ctxt);
2482	if (rc != X86EMUL_CONTINUE)
2483		return rc;
2484
2485	ctxt->src.val = old_eip;
2486	return em_push(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
2487}
2488
2489static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2490{
2491	int rc;
 
2492
2493	ctxt->dst.type = OP_REG;
2494	ctxt->dst.addr.reg = &ctxt->_eip;
2495	ctxt->dst.bytes = ctxt->op_bytes;
2496	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2497	if (rc != X86EMUL_CONTINUE)
2498		return rc;
2499	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2500	return X86EMUL_CONTINUE;
2501}
2502
2503static int em_add(struct x86_emulate_ctxt *ctxt)
2504{
2505	emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
2506	return X86EMUL_CONTINUE;
2507}
2508
2509static int em_or(struct x86_emulate_ctxt *ctxt)
2510{
2511	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2512	return X86EMUL_CONTINUE;
2513}
2514
2515static int em_adc(struct x86_emulate_ctxt *ctxt)
2516{
2517	emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags);
2518	return X86EMUL_CONTINUE;
2519}
2520
2521static int em_sbb(struct x86_emulate_ctxt *ctxt)
2522{
2523	emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
2524	return X86EMUL_CONTINUE;
2525}
2526
2527static int em_and(struct x86_emulate_ctxt *ctxt)
2528{
2529	emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
2530	return X86EMUL_CONTINUE;
2531}
2532
2533static int em_sub(struct x86_emulate_ctxt *ctxt)
2534{
2535	emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
2536	return X86EMUL_CONTINUE;
2537}
2538
2539static int em_xor(struct x86_emulate_ctxt *ctxt)
2540{
2541	emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
2542	return X86EMUL_CONTINUE;
2543}
2544
2545static int em_cmp(struct x86_emulate_ctxt *ctxt)
2546{
2547	emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
2548	/* Disable writeback. */
2549	ctxt->dst.type = OP_NONE;
2550	return X86EMUL_CONTINUE;
2551}
2552
2553static int em_test(struct x86_emulate_ctxt *ctxt)
2554{
2555	emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2556	return X86EMUL_CONTINUE;
2557}
2558
2559static int em_xchg(struct x86_emulate_ctxt *ctxt)
2560{
2561	/* Write back the register source. */
2562	ctxt->src.val = ctxt->dst.val;
2563	write_register_operand(&ctxt->src);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564
2565	/* Write back the memory destination with implicit LOCK prefix. */
2566	ctxt->dst.val = ctxt->src.orig_val;
2567	ctxt->lock_prefix = 1;
2568	return X86EMUL_CONTINUE;
2569}
2570
2571static int em_imul(struct x86_emulate_ctxt *ctxt)
2572{
2573	emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags);
 
 
 
 
 
 
 
 
 
 
 
 
2574	return X86EMUL_CONTINUE;
2575}
2576
2577static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2578{
2579	ctxt->dst.val = ctxt->src2.val;
2580	return em_imul(ctxt);
2581}
2582
2583static int em_cwd(struct x86_emulate_ctxt *ctxt)
2584{
2585	ctxt->dst.type = OP_REG;
2586	ctxt->dst.bytes = ctxt->src.bytes;
2587	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2588	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2589
2590	return X86EMUL_CONTINUE;
 
 
 
2591}
2592
2593static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2594{
2595	u64 tsc = 0;
 
 
2596
2597	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2598	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2599	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2600	return X86EMUL_CONTINUE;
 
 
 
 
 
 
2601}
2602
2603static int em_mov(struct x86_emulate_ctxt *ctxt)
2604{
2605	ctxt->dst.val = ctxt->src.val;
 
 
 
 
 
 
 
2606	return X86EMUL_CONTINUE;
2607}
2608
2609static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2610{
2611	if (ctxt->modrm_reg > VCPU_SREG_GS)
2612		return emulate_ud(ctxt);
2613
2614	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2615	return X86EMUL_CONTINUE;
2616}
2617
2618static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2619{
2620	u16 sel = ctxt->src.val;
2621
2622	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2623		return emulate_ud(ctxt);
2624
2625	if (ctxt->modrm_reg == VCPU_SREG_SS)
2626		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2627
2628	/* Disable writeback. */
2629	ctxt->dst.type = OP_NONE;
2630	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2631}
2632
2633static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2634{
2635	memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2636	return X86EMUL_CONTINUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2637}
2638
2639static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2640{
2641	int rc;
2642	ulong linear;
 
2643
2644	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
 
2645	if (rc == X86EMUL_CONTINUE)
2646		ctxt->ops->invlpg(ctxt, linear);
2647	/* Disable writeback. */
2648	ctxt->dst.type = OP_NONE;
2649	return X86EMUL_CONTINUE;
2650}
2651
2652static int em_clts(struct x86_emulate_ctxt *ctxt)
2653{
2654	ulong cr0;
2655
2656	cr0 = ctxt->ops->get_cr(ctxt, 0);
2657	cr0 &= ~X86_CR0_TS;
2658	ctxt->ops->set_cr(ctxt, 0, cr0);
2659	return X86EMUL_CONTINUE;
2660}
2661
2662static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2663{
2664	int rc;
2665
2666	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2667		return X86EMUL_UNHANDLEABLE;
2668
2669	rc = ctxt->ops->fix_hypercall(ctxt);
2670	if (rc != X86EMUL_CONTINUE)
2671		return rc;
2672
2673	/* Let the processor re-execute the fixed hypercall */
2674	ctxt->_eip = ctxt->eip;
2675	/* Disable writeback. */
2676	ctxt->dst.type = OP_NONE;
2677	return X86EMUL_CONTINUE;
2678}
2679
2680static int em_lgdt(struct x86_emulate_ctxt *ctxt)
 
 
2681{
2682	struct desc_ptr desc_ptr;
2683	int rc;
2684
2685	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2686			     &desc_ptr.size, &desc_ptr.address,
2687			     ctxt->op_bytes);
2688	if (rc != X86EMUL_CONTINUE)
2689		return rc;
2690	ctxt->ops->set_gdt(ctxt, &desc_ptr);
 
 
 
 
 
2691	/* Disable writeback. */
2692	ctxt->dst.type = OP_NONE;
2693	return X86EMUL_CONTINUE;
 
2694}
2695
2696static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2697{
2698	int rc;
2699
2700	rc = ctxt->ops->fix_hypercall(ctxt);
2701
2702	/* Disable writeback. */
2703	ctxt->dst.type = OP_NONE;
2704	return rc;
2705}
2706
2707static int em_lidt(struct x86_emulate_ctxt *ctxt)
2708{
2709	struct desc_ptr desc_ptr;
2710	int rc;
2711
 
 
2712	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2713			     &desc_ptr.size, &desc_ptr.address,
2714			     ctxt->op_bytes);
2715	if (rc != X86EMUL_CONTINUE)
2716		return rc;
2717	ctxt->ops->set_idt(ctxt, &desc_ptr);
 
 
 
 
 
 
2718	/* Disable writeback. */
2719	ctxt->dst.type = OP_NONE;
2720	return X86EMUL_CONTINUE;
2721}
2722
 
 
 
 
 
 
 
 
 
 
2723static int em_smsw(struct x86_emulate_ctxt *ctxt)
2724{
2725	ctxt->dst.bytes = 2;
 
 
 
 
 
2726	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2727	return X86EMUL_CONTINUE;
2728}
2729
2730static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2731{
2732	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2733			  | (ctxt->src.val & 0x0f));
2734	ctxt->dst.type = OP_NONE;
2735	return X86EMUL_CONTINUE;
2736}
2737
2738static int em_loop(struct x86_emulate_ctxt *ctxt)
2739{
2740	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2741	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
 
 
2742	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2743		jmp_rel(ctxt, ctxt->src.val);
2744
2745	return X86EMUL_CONTINUE;
2746}
2747
2748static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2749{
2750	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2751		jmp_rel(ctxt, ctxt->src.val);
 
 
 
 
 
 
 
 
 
 
 
2752
2753	return X86EMUL_CONTINUE;
2754}
2755
 
 
 
 
 
 
 
 
 
2756static int em_cli(struct x86_emulate_ctxt *ctxt)
2757{
2758	if (emulator_bad_iopl(ctxt))
2759		return emulate_gp(ctxt, 0);
2760
2761	ctxt->eflags &= ~X86_EFLAGS_IF;
2762	return X86EMUL_CONTINUE;
2763}
2764
2765static int em_sti(struct x86_emulate_ctxt *ctxt)
2766{
2767	if (emulator_bad_iopl(ctxt))
2768		return emulate_gp(ctxt, 0);
2769
2770	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2771	ctxt->eflags |= X86_EFLAGS_IF;
2772	return X86EMUL_CONTINUE;
2773}
2774
2775static bool valid_cr(int nr)
2776{
2777	switch (nr) {
2778	case 0:
2779	case 2 ... 4:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780	case 8:
2781		return true;
 
 
2782	default:
2783		return false;
 
2784	}
 
2785}
2786
2787static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2788{
2789	if (!valid_cr(ctxt->modrm_reg))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2790		return emulate_ud(ctxt);
2791
 
 
 
 
 
 
 
 
 
 
2792	return X86EMUL_CONTINUE;
2793}
2794
2795static int check_cr_write(struct x86_emulate_ctxt *ctxt)
 
 
 
 
2796{
2797	u64 new_val = ctxt->src.val64;
2798	int cr = ctxt->modrm_reg;
2799	u64 efer = 0;
2800
2801	static u64 cr_reserved_bits[] = {
2802		0xffffffff00000000ULL,
2803		0, 0, 0, /* CR3 checked later */
2804		CR4_RESERVED_BITS,
2805		0, 0, 0,
2806		CR8_RESERVED_BITS,
2807	};
2808
2809	if (!valid_cr(cr))
2810		return emulate_ud(ctxt);
 
2811
2812	if (new_val & cr_reserved_bits[cr])
2813		return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814
2815	switch (cr) {
2816	case 0: {
2817		u64 cr4;
2818		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2819		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2820			return emulate_gp(ctxt, 0);
2821
2822		cr4 = ctxt->ops->get_cr(ctxt, 4);
2823		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2824
2825		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2826		    !(cr4 & X86_CR4_PAE))
2827			return emulate_gp(ctxt, 0);
2828
2829		break;
2830		}
2831	case 3: {
2832		u64 rsvd = 0;
2833
2834		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2835		if (efer & EFER_LMA)
2836			rsvd = CR3_L_MODE_RESERVED_BITS;
2837		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2838			rsvd = CR3_PAE_RESERVED_BITS;
2839		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2840			rsvd = CR3_NONPAE_RESERVED_BITS;
2841
2842		if (new_val & rsvd)
2843			return emulate_gp(ctxt, 0);
 
2844
2845		break;
2846		}
2847	case 4: {
2848		u64 cr4;
 
 
 
 
 
 
 
 
2849
2850		cr4 = ctxt->ops->get_cr(ctxt, 4);
2851		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 
2852
2853		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2854			return emulate_gp(ctxt, 0);
2855
2856		break;
2857		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858	}
2859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2860	return X86EMUL_CONTINUE;
2861}
2862
2863static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2864{
2865	unsigned long dr7;
 
 
 
 
 
 
 
 
2866
2867	ctxt->ops->get_dr(ctxt, 7, &dr7);
 
 
 
2868
2869	/* Check if DR7.Global_Enable is set */
2870	return dr7 & (1 << 13);
2871}
2872
2873static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2874{
2875	int dr = ctxt->modrm_reg;
2876	u64 cr4;
2877
2878	if (dr > 7)
2879		return emulate_ud(ctxt);
2880
2881	cr4 = ctxt->ops->get_cr(ctxt, 4);
2882	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2883		return emulate_ud(ctxt);
2884
2885	if (check_dr7_gd(ctxt))
 
 
 
 
 
 
2886		return emulate_db(ctxt);
 
2887
2888	return X86EMUL_CONTINUE;
2889}
2890
2891static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2892{
2893	u64 new_val = ctxt->src.val64;
2894	int dr = ctxt->modrm_reg;
2895
2896	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2897		return emulate_gp(ctxt, 0);
2898
2899	return check_dr_read(ctxt);
2900}
2901
2902static int check_svme(struct x86_emulate_ctxt *ctxt)
2903{
2904	u64 efer;
2905
2906	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2907
2908	if (!(efer & EFER_SVME))
2909		return emulate_ud(ctxt);
2910
2911	return X86EMUL_CONTINUE;
2912}
2913
2914static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2915{
2916	u64 rax = ctxt->regs[VCPU_REGS_RAX];
2917
2918	/* Valid physical address? */
2919	if (rax & 0xffff000000000000ULL)
2920		return emulate_gp(ctxt, 0);
2921
2922	return check_svme(ctxt);
2923}
2924
2925static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2926{
2927	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2928
2929	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2930		return emulate_ud(ctxt);
2931
2932	return X86EMUL_CONTINUE;
2933}
2934
2935static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2936{
2937	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2938	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
 
 
 
 
 
 
 
2939
 
 
 
 
 
2940	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2941	    (rcx > 3))
2942		return emulate_gp(ctxt, 0);
2943
2944	return X86EMUL_CONTINUE;
2945}
2946
2947static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2948{
2949	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
2950	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2951		return emulate_gp(ctxt, 0);
2952
2953	return X86EMUL_CONTINUE;
2954}
2955
2956static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2957{
2958	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
2959	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2960		return emulate_gp(ctxt, 0);
2961
2962	return X86EMUL_CONTINUE;
2963}
2964
2965#define D(_y) { .flags = (_y) }
2966#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2967#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2968		      .check_perm = (_p) }
2969#define N    D(0)
2970#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2971#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2972#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
 
 
 
2973#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
 
2974#define II(_f, _e, _i) \
2975	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2976#define IIP(_f, _e, _i, _p) \
2977	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2978	  .check_perm = (_p) }
2979#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2980
2981#define D2bv(_f)      D((_f) | ByteOp), D(_f)
2982#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2983#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
 
 
 
 
 
 
 
2984
2985#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
2986		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
2987		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
2988
2989static struct opcode group7_rm1[] = {
2990	DI(SrcNone | ModRM | Priv, monitor),
2991	DI(SrcNone | ModRM | Priv, mwait),
2992	N, N, N, N, N, N,
2993};
2994
2995static struct opcode group7_rm3[] = {
2996	DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
2997	II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
2998	DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
2999	DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
3000	DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
3001	DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
3002	DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
3003	DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3004};
3005
3006static struct opcode group7_rm7[] = {
3007	N,
3008	DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3009	N, N, N, N, N, N,
3010};
3011
3012static struct opcode group1[] = {
3013	I(Lock, em_add),
3014	I(Lock, em_or),
3015	I(Lock, em_adc),
3016	I(Lock, em_sbb),
3017	I(Lock, em_and),
3018	I(Lock, em_sub),
3019	I(Lock, em_xor),
3020	I(0, em_cmp),
3021};
3022
3023static struct opcode group1A[] = {
3024	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
 
 
3025};
3026
3027static struct opcode group3[] = {
3028	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
3029	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3030	X4(D(SrcMem | ModRM)),
 
 
 
 
 
3031};
3032
3033static struct opcode group4[] = {
3034	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3035	N, N, N, N, N, N,
3036};
3037
3038static struct opcode group5[] = {
3039	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3040	D(SrcMem | ModRM | Stack),
3041	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3042	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3043	D(SrcMem | ModRM | Stack), N,
 
 
3044};
3045
3046static struct opcode group6[] = {
3047	DI(ModRM | Prot,        sldt),
3048	DI(ModRM | Prot,        str),
3049	DI(ModRM | Prot | Priv, lldt),
3050	DI(ModRM | Prot | Priv, ltr),
3051	N, N, N, N,
3052};
3053
3054static struct group_dual group7 = { {
3055	DI(ModRM | Mov | DstMem | Priv, sgdt),
3056	DI(ModRM | Mov | DstMem | Priv, sidt),
3057	II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3058	II(ModRM | SrcMem | Priv, em_lidt, lidt),
3059	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3060	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3061	II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3062}, {
3063	I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3064	EXT(0, group7_rm1),
3065	N, EXT(0, group7_rm3),
3066	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3067	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
 
 
3068} };
3069
3070static struct opcode group8[] = {
3071	N, N, N, N,
3072	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3073	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3074};
3075
3076static struct group_dual group9 = { {
3077	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3078}, {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3079	N, N, N, N, N, N, N, N,
3080} };
3081
3082static struct opcode group11[] = {
3083	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3084};
3085
3086static struct gprefix pfx_0f_6f_0f_7f = {
3087	N, N, N, I(Sse, em_movdqu),
3088};
3089
3090static struct opcode opcode_table[256] = {
 
 
 
 
3091	/* 0x00 - 0x07 */
3092	I6ALU(Lock, em_add),
3093	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3094	/* 0x08 - 0x0F */
3095	I6ALU(Lock, em_or),
3096	D(ImplicitOps | Stack | No64), N,
 
3097	/* 0x10 - 0x17 */
3098	I6ALU(Lock, em_adc),
3099	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3100	/* 0x18 - 0x1F */
3101	I6ALU(Lock, em_sbb),
3102	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 
3103	/* 0x20 - 0x27 */
3104	I6ALU(Lock, em_and), N, N,
3105	/* 0x28 - 0x2F */
3106	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3107	/* 0x30 - 0x37 */
3108	I6ALU(Lock, em_xor), N, N,
3109	/* 0x38 - 0x3F */
3110	I6ALU(0, em_cmp), N, N,
3111	/* 0x40 - 0x4F */
3112	X16(D(DstReg)),
3113	/* 0x50 - 0x57 */
3114	X8(I(SrcReg | Stack, em_push)),
3115	/* 0x58 - 0x5F */
3116	X8(I(DstReg | Stack, em_pop)),
3117	/* 0x60 - 0x67 */
3118	I(ImplicitOps | Stack | No64, em_pusha),
3119	I(ImplicitOps | Stack | No64, em_popa),
3120	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3121	N, N, N, N,
3122	/* 0x68 - 0x6F */
3123	I(SrcImm | Mov | Stack, em_push),
3124	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3125	I(SrcImmByte | Mov | Stack, em_push),
3126	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3127	D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3128	D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3129	/* 0x70 - 0x7F */
3130	X16(D(SrcImmByte)),
3131	/* 0x80 - 0x87 */
3132	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3133	G(DstMem | SrcImm | ModRM | Group, group1),
3134	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3135	G(DstMem | SrcImmByte | ModRM | Group, group1),
3136	I2bv(DstMem | SrcReg | ModRM, em_test),
3137	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3138	/* 0x88 - 0x8F */
3139	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3140	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3141	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3142	D(ModRM | SrcMem | NoAccess | DstReg),
3143	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3144	G(0, group1A),
3145	/* 0x90 - 0x97 */
3146	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3147	/* 0x98 - 0x9F */
3148	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3149	I(SrcImmFAddr | No64, em_call_far), N,
3150	II(ImplicitOps | Stack, em_pushf, pushf),
3151	II(ImplicitOps | Stack, em_popf, popf), N, N,
 
3152	/* 0xA0 - 0xA7 */
3153	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3154	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3155	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3156	I2bv(SrcSI | DstDI | String, em_cmp),
3157	/* 0xA8 - 0xAF */
3158	I2bv(DstAcc | SrcImm, em_test),
3159	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3160	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3161	I2bv(SrcAcc | DstDI | String, em_cmp),
3162	/* 0xB0 - 0xB7 */
3163	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3164	/* 0xB8 - 0xBF */
3165	X8(I(DstReg | SrcImm | Mov, em_mov)),
3166	/* 0xC0 - 0xC7 */
3167	D2bv(DstMem | SrcImmByte | ModRM),
3168	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3169	I(ImplicitOps | Stack, em_ret),
3170	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
 
3171	G(ByteOp, group11), G(0, group11),
3172	/* 0xC8 - 0xCF */
3173	N, N, N, I(ImplicitOps | Stack, em_ret_far),
3174	D(ImplicitOps), DI(SrcImmByte, intn),
3175	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
 
 
 
 
3176	/* 0xD0 - 0xD7 */
3177	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3178	N, N, N, N,
 
 
 
 
3179	/* 0xD8 - 0xDF */
3180	N, N, N, N, N, N, N, N,
3181	/* 0xE0 - 0xE7 */
3182	X3(I(SrcImmByte, em_loop)),
3183	I(SrcImmByte, em_jcxz),
3184	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
3185	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3186	/* 0xE8 - 0xEF */
3187	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3188	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3189	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
3190	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
 
 
3191	/* 0xF0 - 0xF7 */
3192	N, DI(ImplicitOps, icebp), N, N,
3193	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3194	G(ByteOp, group3), G(0, group3),
3195	/* 0xF8 - 0xFF */
3196	D(ImplicitOps), D(ImplicitOps),
3197	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3198	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3199};
3200
3201static struct opcode twobyte_table[256] = {
3202	/* 0x00 - 0x0F */
3203	G(0, group6), GD(0, &group7), N, N,
3204	N, I(ImplicitOps | VendorSpecific, em_syscall),
3205	II(ImplicitOps | Priv, em_clts, clts), N,
3206	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3207	N, D(ImplicitOps | ModRM), N, N,
3208	/* 0x10 - 0x1F */
3209	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
 
 
 
 
 
 
 
 
3210	/* 0x20 - 0x2F */
3211	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3212	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3213	DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3214	DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
 
 
 
 
 
 
3215	N, N, N, N,
3216	N, N, N, N, N, N, N, N,
3217	/* 0x30 - 0x3F */
3218	DI(ImplicitOps | Priv, wrmsr),
3219	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3220	DI(ImplicitOps | Priv, rdmsr),
3221	DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3222	I(ImplicitOps | VendorSpecific, em_sysenter),
3223	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3224	N, N,
3225	N, N, N, N, N, N, N, N,
3226	/* 0x40 - 0x4F */
3227	X16(D(DstReg | SrcMem | ModRM | Mov)),
3228	/* 0x50 - 0x5F */
3229	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3230	/* 0x60 - 0x6F */
3231	N, N, N, N,
3232	N, N, N, N,
3233	N, N, N, N,
3234	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3235	/* 0x70 - 0x7F */
3236	N, N, N, N,
3237	N, N, N, N,
3238	N, N, N, N,
3239	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3240	/* 0x80 - 0x8F */
3241	X16(D(SrcImm)),
3242	/* 0x90 - 0x9F */
3243	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3244	/* 0xA0 - 0xA7 */
3245	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3246	DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3247	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3248	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
 
3249	/* 0xA8 - 0xAF */
3250	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3251	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3252	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3253	D(DstMem | SrcReg | Src2CL | ModRM),
3254	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
 
3255	/* 0xB0 - 0xB7 */
3256	D2bv(DstMem | SrcReg | ModRM | Lock),
3257	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3258	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3259	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
 
 
3260	/* 0xB8 - 0xBF */
3261	N, N,
3262	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3263	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3264	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3265	/* 0xC0 - 0xCF */
3266	D2bv(DstMem | SrcReg | ModRM | Lock),
3267	N, D(DstMem | SrcReg | ModRM | Mov),
 
 
3268	N, N, N, GD(0, &group9),
3269	N, N, N, N, N, N, N, N,
 
3270	/* 0xD0 - 0xDF */
3271	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3272	/* 0xE0 - 0xEF */
3273	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
 
3274	/* 0xF0 - 0xFF */
3275	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3276};
3277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3278#undef D
3279#undef N
3280#undef G
3281#undef GD
3282#undef I
3283#undef GP
3284#undef EXT
 
 
3285
3286#undef D2bv
3287#undef D2bvIP
3288#undef I2bv
 
3289#undef I6ALU
3290
3291static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3292{
3293	unsigned size;
3294
3295	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3296	if (size == 8)
3297		size = 4;
3298	return size;
3299}
3300
3301static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3302		      unsigned size, bool sign_extension)
3303{
3304	int rc = X86EMUL_CONTINUE;
3305
3306	op->type = OP_IMM;
3307	op->bytes = size;
3308	op->addr.mem.ea = ctxt->_eip;
3309	/* NB. Immediates are sign-extended as necessary. */
3310	switch (op->bytes) {
3311	case 1:
3312		op->val = insn_fetch(s8, 1, ctxt->_eip);
3313		break;
3314	case 2:
3315		op->val = insn_fetch(s16, 2, ctxt->_eip);
3316		break;
3317	case 4:
3318		op->val = insn_fetch(s32, 4, ctxt->_eip);
 
 
 
3319		break;
3320	}
3321	if (!sign_extension) {
3322		switch (op->bytes) {
3323		case 1:
3324			op->val &= 0xff;
3325			break;
3326		case 2:
3327			op->val &= 0xffff;
3328			break;
3329		case 4:
3330			op->val &= 0xffffffff;
3331			break;
3332		}
3333	}
3334done:
3335	return rc;
3336}
3337
3338int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3339{
3340	int rc = X86EMUL_CONTINUE;
3341	int mode = ctxt->mode;
3342	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3343	bool op_prefix = false;
 
3344	struct opcode opcode;
3345	struct operand memop = { .type = OP_NONE }, *memopp = NULL;
 
3346
 
 
3347	ctxt->_eip = ctxt->eip;
3348	ctxt->fetch.start = ctxt->_eip;
3349	ctxt->fetch.end = ctxt->fetch.start + insn_len;
 
 
3350	if (insn_len > 0)
3351		memcpy(ctxt->fetch.data, insn, insn_len);
 
 
 
 
 
3352
3353	switch (mode) {
3354	case X86EMUL_MODE_REAL:
3355	case X86EMUL_MODE_VM86:
 
 
 
 
 
3356	case X86EMUL_MODE_PROT16:
3357		def_op_bytes = def_ad_bytes = 2;
3358		break;
3359	case X86EMUL_MODE_PROT32:
3360		def_op_bytes = def_ad_bytes = 4;
3361		break;
3362#ifdef CONFIG_X86_64
3363	case X86EMUL_MODE_PROT64:
3364		def_op_bytes = 4;
3365		def_ad_bytes = 8;
3366		break;
3367#endif
3368	default:
3369		return -1;
3370	}
3371
3372	ctxt->op_bytes = def_op_bytes;
3373	ctxt->ad_bytes = def_ad_bytes;
3374
3375	/* Legacy prefixes. */
3376	for (;;) {
3377		switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
3378		case 0x66:	/* operand-size override */
3379			op_prefix = true;
3380			/* switch between 2/4 bytes */
3381			ctxt->op_bytes = def_op_bytes ^ 6;
3382			break;
3383		case 0x67:	/* address-size override */
3384			if (mode == X86EMUL_MODE_PROT64)
3385				/* switch between 4/8 bytes */
3386				ctxt->ad_bytes = def_ad_bytes ^ 12;
3387			else
3388				/* switch between 2/4 bytes */
3389				ctxt->ad_bytes = def_ad_bytes ^ 6;
3390			break;
3391		case 0x26:	/* ES override */
 
 
 
3392		case 0x2e:	/* CS override */
 
 
 
3393		case 0x36:	/* SS override */
 
 
 
3394		case 0x3e:	/* DS override */
3395			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
 
3396			break;
3397		case 0x64:	/* FS override */
 
 
 
3398		case 0x65:	/* GS override */
3399			set_seg_override(ctxt, ctxt->b & 7);
 
3400			break;
3401		case 0x40 ... 0x4f: /* REX */
3402			if (mode != X86EMUL_MODE_PROT64)
3403				goto done_prefixes;
3404			ctxt->rex_prefix = ctxt->b;
3405			continue;
3406		case 0xf0:	/* LOCK */
3407			ctxt->lock_prefix = 1;
3408			break;
3409		case 0xf2:	/* REPNE/REPNZ */
3410		case 0xf3:	/* REP/REPE/REPZ */
3411			ctxt->rep_prefix = ctxt->b;
3412			break;
3413		default:
3414			goto done_prefixes;
3415		}
3416
3417		/* Any legacy prefix after a REX prefix nullifies its effect. */
3418
3419		ctxt->rex_prefix = 0;
3420	}
3421
3422done_prefixes:
3423
3424	/* REX prefix. */
3425	if (ctxt->rex_prefix & 8)
3426		ctxt->op_bytes = 8;	/* REX.W */
3427
3428	/* Opcode byte(s). */
3429	opcode = opcode_table[ctxt->b];
3430	/* Two-byte opcode? */
3431	if (ctxt->b == 0x0f) {
3432		ctxt->twobyte = 1;
3433		ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
3434		opcode = twobyte_table[ctxt->b];
 
 
 
 
 
 
 
3435	}
3436	ctxt->d = opcode.flags;
3437
 
 
 
 
 
 
 
 
 
3438	while (ctxt->d & GroupMask) {
3439		switch (ctxt->d & GroupMask) {
3440		case Group:
3441			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3442			--ctxt->_eip;
3443			goffset = (ctxt->modrm >> 3) & 7;
3444			opcode = opcode.u.group[goffset];
3445			break;
3446		case GroupDual:
3447			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3448			--ctxt->_eip;
3449			goffset = (ctxt->modrm >> 3) & 7;
3450			if ((ctxt->modrm >> 6) == 3)
3451				opcode = opcode.u.gdual->mod3[goffset];
3452			else
3453				opcode = opcode.u.gdual->mod012[goffset];
3454			break;
3455		case RMExt:
3456			goffset = ctxt->modrm & 7;
3457			opcode = opcode.u.group[goffset];
3458			break;
3459		case Prefix:
3460			if (ctxt->rep_prefix && op_prefix)
3461				return X86EMUL_UNHANDLEABLE;
3462			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3463			switch (simd_prefix) {
3464			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3465			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3466			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3467			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3468			}
3469			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470		default:
3471			return X86EMUL_UNHANDLEABLE;
3472		}
3473
3474		ctxt->d &= ~GroupMask;
3475		ctxt->d |= opcode.flags;
3476	}
3477
3478	ctxt->execute = opcode.u.execute;
3479	ctxt->check_perm = opcode.check_perm;
3480	ctxt->intercept = opcode.intercept;
3481
3482	/* Unrecognised? */
3483	if (ctxt->d == 0 || (ctxt->d & Undefined))
3484		return -1;
3485
3486	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3487		return -1;
3488
3489	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3490		ctxt->op_bytes = 8;
 
 
 
 
 
 
 
 
 
 
 
3491
3492	if (ctxt->d & Op3264) {
3493		if (mode == X86EMUL_MODE_PROT64)
3494			ctxt->op_bytes = 8;
3495		else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3496			ctxt->op_bytes = 4;
3497	}
3498
3499	if (ctxt->d & Sse)
3500		ctxt->op_bytes = 16;
 
 
 
3501
3502	/* ModRM and SIB bytes. */
3503	if (ctxt->d & ModRM) {
3504		rc = decode_modrm(ctxt, &memop);
3505		if (!ctxt->has_seg_override)
3506			set_seg_override(ctxt, ctxt->modrm_seg);
 
 
3507	} else if (ctxt->d & MemAbs)
3508		rc = decode_abs(ctxt, &memop);
3509	if (rc != X86EMUL_CONTINUE)
3510		goto done;
3511
3512	if (!ctxt->has_seg_override)
3513		set_seg_override(ctxt, VCPU_SREG_DS);
3514
3515	memop.addr.mem.seg = seg_override(ctxt);
3516
3517	if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
3518		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3519
3520	/*
3521	 * Decode and fetch the source operand: register, memory
3522	 * or immediate.
3523	 */
3524	switch (ctxt->d & SrcMask) {
3525	case SrcNone:
3526		break;
3527	case SrcReg:
3528		decode_register_operand(ctxt, &ctxt->src, 0);
3529		break;
3530	case SrcMem16:
3531		memop.bytes = 2;
3532		goto srcmem_common;
3533	case SrcMem32:
3534		memop.bytes = 4;
3535		goto srcmem_common;
3536	case SrcMem:
3537		memop.bytes = (ctxt->d & ByteOp) ? 1 :
3538							   ctxt->op_bytes;
3539	srcmem_common:
3540		ctxt->src = memop;
3541		memopp = &ctxt->src;
3542		break;
3543	case SrcImmU16:
3544		rc = decode_imm(ctxt, &ctxt->src, 2, false);
3545		break;
3546	case SrcImm:
3547		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
3548		break;
3549	case SrcImmU:
3550		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
3551		break;
3552	case SrcImmByte:
3553		rc = decode_imm(ctxt, &ctxt->src, 1, true);
3554		break;
3555	case SrcImmUByte:
3556		rc = decode_imm(ctxt, &ctxt->src, 1, false);
3557		break;
3558	case SrcAcc:
3559		ctxt->src.type = OP_REG;
3560		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3561		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3562		fetch_register_operand(&ctxt->src);
3563		break;
3564	case SrcOne:
3565		ctxt->src.bytes = 1;
3566		ctxt->src.val = 1;
3567		break;
3568	case SrcSI:
3569		ctxt->src.type = OP_MEM;
3570		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3571		ctxt->src.addr.mem.ea =
3572			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3573		ctxt->src.addr.mem.seg = seg_override(ctxt);
3574		ctxt->src.val = 0;
3575		break;
3576	case SrcImmFAddr:
3577		ctxt->src.type = OP_IMM;
3578		ctxt->src.addr.mem.ea = ctxt->_eip;
3579		ctxt->src.bytes = ctxt->op_bytes + 2;
3580		insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
3581		break;
3582	case SrcMemFAddr:
3583		memop.bytes = ctxt->op_bytes + 2;
3584		goto srcmem_common;
3585		break;
3586	case SrcDX:
3587		ctxt->src.type = OP_REG;
3588		ctxt->src.bytes = 2;
3589		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3590		fetch_register_operand(&ctxt->src);
3591		break;
3592	}
3593
3594	if (rc != X86EMUL_CONTINUE)
3595		goto done;
3596
3597	/*
3598	 * Decode and fetch the second source operand: register, memory
3599	 * or immediate.
3600	 */
3601	switch (ctxt->d & Src2Mask) {
3602	case Src2None:
3603		break;
3604	case Src2CL:
3605		ctxt->src2.bytes = 1;
3606		ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3607		break;
3608	case Src2ImmByte:
3609		rc = decode_imm(ctxt, &ctxt->src2, 1, true);
3610		break;
3611	case Src2One:
3612		ctxt->src2.bytes = 1;
3613		ctxt->src2.val = 1;
3614		break;
3615	case Src2Imm:
3616		rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
3617		break;
3618	}
3619
3620	if (rc != X86EMUL_CONTINUE)
3621		goto done;
3622
3623	/* Decode and fetch the destination operand: register or memory. */
3624	switch (ctxt->d & DstMask) {
3625	case DstReg:
3626		decode_register_operand(ctxt, &ctxt->dst,
3627			 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3628		break;
3629	case DstImmUByte:
3630		ctxt->dst.type = OP_IMM;
3631		ctxt->dst.addr.mem.ea = ctxt->_eip;
3632		ctxt->dst.bytes = 1;
3633		ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
3634		break;
3635	case DstMem:
3636	case DstMem64:
3637		ctxt->dst = memop;
3638		memopp = &ctxt->dst;
3639		if ((ctxt->d & DstMask) == DstMem64)
3640			ctxt->dst.bytes = 8;
3641		else
3642			ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3643		if (ctxt->d & BitOp)
3644			fetch_bit_operand(ctxt);
3645		ctxt->dst.orig_val = ctxt->dst.val;
3646		break;
3647	case DstAcc:
3648		ctxt->dst.type = OP_REG;
3649		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3650		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3651		fetch_register_operand(&ctxt->dst);
3652		ctxt->dst.orig_val = ctxt->dst.val;
3653		break;
3654	case DstDI:
3655		ctxt->dst.type = OP_MEM;
3656		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3657		ctxt->dst.addr.mem.ea =
3658			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3659		ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
3660		ctxt->dst.val = 0;
3661		break;
3662	case DstDX:
3663		ctxt->dst.type = OP_REG;
3664		ctxt->dst.bytes = 2;
3665		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3666		fetch_register_operand(&ctxt->dst);
3667		break;
3668	case ImplicitOps:
3669		/* Special instructions do their own operand decoding. */
3670	default:
3671		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3672		break;
3673	}
3674
3675done:
3676	if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
3677		memopp->addr.mem.ea += ctxt->_eip;
 
 
3678
3679	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 
 
3680}
3681
3682static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3683{
3684	/* The second termination condition only applies for REPE
3685	 * and REPNE. Test if the repeat string operation prefix is
3686	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3687	 * corresponding termination condition according to:
3688	 * 	- if REPE/REPZ and ZF = 0 then done
3689	 * 	- if REPNE/REPNZ and ZF = 1 then done
3690	 */
3691	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3692	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3693	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
3694		 ((ctxt->eflags & EFLG_ZF) == 0))
3695		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
3696		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3697		return true;
3698
3699	return false;
3700}
3701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3702int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3703{
3704	struct x86_emulate_ops *ops = ctxt->ops;
3705	u64 msr_data;
3706	int rc = X86EMUL_CONTINUE;
3707	int saved_dst_type = ctxt->dst.type;
 
3708
3709	ctxt->mem_read.pos = 0;
3710
3711	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3712		rc = emulate_ud(ctxt);
3713		goto done;
3714	}
3715
3716	/* LOCK prefix is allowed only with some instructions */
3717	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3718		rc = emulate_ud(ctxt);
3719		goto done;
3720	}
3721
3722	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3723		rc = emulate_ud(ctxt);
3724		goto done;
3725	}
3726
3727	if ((ctxt->d & Sse)
3728	    && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3729		|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3730		rc = emulate_ud(ctxt);
3731		goto done;
3732	}
 
3733
3734	if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3735		rc = emulate_nm(ctxt);
3736		goto done;
3737	}
 
3738
3739	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3740		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3741					      X86_ICPT_PRE_EXCEPT);
3742		if (rc != X86EMUL_CONTINUE)
3743			goto done;
3744	}
3745
3746	/* Privileged instruction can be executed only in CPL=0 */
3747	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3748		rc = emulate_gp(ctxt, 0);
3749		goto done;
3750	}
 
 
 
 
 
 
 
 
3751
3752	/* Instruction can only be executed in protected mode */
3753	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3754		rc = emulate_ud(ctxt);
3755		goto done;
3756	}
 
3757
3758	/* Do instruction specific permission checks */
3759	if (ctxt->check_perm) {
3760		rc = ctxt->check_perm(ctxt);
3761		if (rc != X86EMUL_CONTINUE)
3762			goto done;
3763	}
3764
3765	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3766		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3767					      X86_ICPT_POST_EXCEPT);
3768		if (rc != X86EMUL_CONTINUE)
 
 
3769			goto done;
3770	}
3771
3772	if (ctxt->rep_prefix && (ctxt->d & String)) {
3773		/* All REP prefixes have the same first termination condition */
3774		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3775			ctxt->eip = ctxt->_eip;
3776			goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3777		}
3778	}
3779
3780	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3781		rc = segmented_read(ctxt, ctxt->src.addr.mem,
3782				    ctxt->src.valptr, ctxt->src.bytes);
3783		if (rc != X86EMUL_CONTINUE)
3784			goto done;
3785		ctxt->src.orig_val64 = ctxt->src.val64;
3786	}
3787
3788	if (ctxt->src2.type == OP_MEM) {
3789		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3790				    &ctxt->src2.val, ctxt->src2.bytes);
3791		if (rc != X86EMUL_CONTINUE)
3792			goto done;
3793	}
3794
3795	if ((ctxt->d & DstMask) == ImplicitOps)
3796		goto special_insn;
3797
3798
3799	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3800		/* optimisation - avoid slow emulated read if Mov */
3801		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3802				   &ctxt->dst.val, ctxt->dst.bytes);
3803		if (rc != X86EMUL_CONTINUE)
 
 
 
 
3804			goto done;
 
3805	}
3806	ctxt->dst.orig_val = ctxt->dst.val;
 
3807
3808special_insn:
3809
3810	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3811		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3812					      X86_ICPT_POST_MEMACCESS);
3813		if (rc != X86EMUL_CONTINUE)
3814			goto done;
3815	}
3816
 
 
 
 
 
3817	if (ctxt->execute) {
3818		rc = ctxt->execute(ctxt);
 
 
 
3819		if (rc != X86EMUL_CONTINUE)
3820			goto done;
3821		goto writeback;
3822	}
3823
3824	if (ctxt->twobyte)
3825		goto twobyte_insn;
 
 
3826
3827	switch (ctxt->b) {
3828	case 0x06:		/* push es */
3829		rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3830		break;
3831	case 0x07:		/* pop es */
3832		rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3833		break;
3834	case 0x0e:		/* push cs */
3835		rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3836		break;
3837	case 0x16:		/* push ss */
3838		rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3839		break;
3840	case 0x17:		/* pop ss */
3841		rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3842		break;
3843	case 0x1e:		/* push ds */
3844		rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3845		break;
3846	case 0x1f:		/* pop ds */
3847		rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3848		break;
3849	case 0x40 ... 0x47: /* inc r16/r32 */
3850		emulate_1op("inc", ctxt->dst, ctxt->eflags);
3851		break;
3852	case 0x48 ... 0x4f: /* dec r16/r32 */
3853		emulate_1op("dec", ctxt->dst, ctxt->eflags);
3854		break;
3855	case 0x63:		/* movsxd */
3856		if (ctxt->mode != X86EMUL_MODE_PROT64)
3857			goto cannot_emulate;
3858		ctxt->dst.val = (s32) ctxt->src.val;
3859		break;
3860	case 0x6c:		/* insb */
3861	case 0x6d:		/* insw/insd */
3862		ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3863		goto do_io_in;
3864	case 0x6e:		/* outsb */
3865	case 0x6f:		/* outsw/outsd */
3866		ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3867		goto do_io_out;
3868		break;
3869	case 0x70 ... 0x7f: /* jcc (short) */
3870		if (test_cc(ctxt->b, ctxt->eflags))
3871			jmp_rel(ctxt, ctxt->src.val);
3872		break;
3873	case 0x8d: /* lea r16/r32, m */
3874		ctxt->dst.val = ctxt->src.addr.mem.ea;
3875		break;
3876	case 0x8f:		/* pop (sole member of Grp1a) */
3877		rc = em_grp1a(ctxt);
3878		break;
3879	case 0x90 ... 0x97: /* nop / xchg reg, rax */
3880		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3881			break;
3882		rc = em_xchg(ctxt);
 
3883		break;
3884	case 0x98: /* cbw/cwde/cdqe */
3885		switch (ctxt->op_bytes) {
3886		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
3887		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
3888		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3889		}
3890		break;
3891	case 0xc0 ... 0xc1:
3892		rc = em_grp2(ctxt);
3893		break;
3894	case 0xc4:		/* les */
3895		rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3896		break;
3897	case 0xc5:		/* lds */
3898		rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3899		break;
3900	case 0xcc:		/* int3 */
3901		rc = emulate_int(ctxt, 3);
3902		break;
3903	case 0xcd:		/* int n */
3904		rc = emulate_int(ctxt, ctxt->src.val);
3905		break;
3906	case 0xce:		/* into */
3907		if (ctxt->eflags & EFLG_OF)
3908			rc = emulate_int(ctxt, 4);
3909		break;
3910	case 0xd0 ... 0xd1:	/* Grp2 */
3911		rc = em_grp2(ctxt);
3912		break;
3913	case 0xd2 ... 0xd3:	/* Grp2 */
3914		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3915		rc = em_grp2(ctxt);
3916		break;
3917	case 0xe4: 	/* inb */
3918	case 0xe5: 	/* in */
3919		goto do_io_in;
3920	case 0xe6: /* outb */
3921	case 0xe7: /* out */
3922		goto do_io_out;
3923	case 0xe8: /* call (near) */ {
3924		long int rel = ctxt->src.val;
3925		ctxt->src.val = (unsigned long) ctxt->_eip;
3926		jmp_rel(ctxt, rel);
3927		rc = em_push(ctxt);
3928		break;
3929	}
3930	case 0xe9: /* jmp rel */
3931	case 0xeb: /* jmp rel short */
3932		jmp_rel(ctxt, ctxt->src.val);
3933		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3934		break;
3935	case 0xec: /* in al,dx */
3936	case 0xed: /* in (e/r)ax,dx */
3937	do_io_in:
3938		if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3939				     &ctxt->dst.val))
3940			goto done; /* IO is needed */
3941		break;
3942	case 0xee: /* out dx,al */
3943	case 0xef: /* out dx,(e/r)ax */
3944	do_io_out:
3945		ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3946				      &ctxt->src.val, 1);
3947		ctxt->dst.type = OP_NONE;	/* Disable writeback. */
3948		break;
3949	case 0xf4:              /* hlt */
3950		ctxt->ops->halt(ctxt);
3951		break;
3952	case 0xf5:	/* cmc */
3953		/* complement carry flag from eflags reg */
3954		ctxt->eflags ^= EFLG_CF;
3955		break;
3956	case 0xf6 ... 0xf7:	/* Grp3 */
3957		rc = em_grp3(ctxt);
3958		break;
3959	case 0xf8: /* clc */
3960		ctxt->eflags &= ~EFLG_CF;
3961		break;
3962	case 0xf9: /* stc */
3963		ctxt->eflags |= EFLG_CF;
3964		break;
3965	case 0xfc: /* cld */
3966		ctxt->eflags &= ~EFLG_DF;
3967		break;
3968	case 0xfd: /* std */
3969		ctxt->eflags |= EFLG_DF;
3970		break;
3971	case 0xfe: /* Grp4 */
3972		rc = em_grp45(ctxt);
3973		break;
3974	case 0xff: /* Grp5 */
3975		rc = em_grp45(ctxt);
3976		break;
3977	default:
3978		goto cannot_emulate;
3979	}
3980
3981	if (rc != X86EMUL_CONTINUE)
3982		goto done;
3983
3984writeback:
3985	rc = writeback(ctxt);
3986	if (rc != X86EMUL_CONTINUE)
3987		goto done;
 
 
 
 
 
 
 
 
3988
3989	/*
3990	 * restore dst type in case the decoding will be reused
3991	 * (happens for string instruction )
3992	 */
3993	ctxt->dst.type = saved_dst_type;
3994
3995	if ((ctxt->d & SrcMask) == SrcSI)
3996		string_addr_inc(ctxt, seg_override(ctxt),
3997				VCPU_REGS_RSI, &ctxt->src);
3998
3999	if ((ctxt->d & DstMask) == DstDI)
4000		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4001				&ctxt->dst);
4002
4003	if (ctxt->rep_prefix && (ctxt->d & String)) {
 
4004		struct read_cache *r = &ctxt->io_read;
4005		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
 
 
 
 
4006
4007		if (!string_insn_completed(ctxt)) {
4008			/*
4009			 * Re-enter guest when pio read ahead buffer is empty
4010			 * or, if it is not used, after each 1024 iteration.
4011			 */
4012			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4013			    (r->end == 0 || r->end != r->pos)) {
4014				/*
4015				 * Reset read cache. Usually happens before
4016				 * decode, but since instruction is restarted
4017				 * we have to do it here.
4018				 */
4019				ctxt->mem_read.end = 0;
 
4020				return EMULATION_RESTART;
4021			}
4022			goto done; /* skip rip writeback */
4023		}
 
4024	}
4025
4026	ctxt->eip = ctxt->_eip;
 
 
4027
4028done:
4029	if (rc == X86EMUL_PROPAGATE_FAULT)
 
 
4030		ctxt->have_exception = true;
 
4031	if (rc == X86EMUL_INTERCEPTED)
4032		return EMULATION_INTERCEPTED;
4033
 
 
 
4034	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4035
4036twobyte_insn:
4037	switch (ctxt->b) {
4038	case 0x09:		/* wbinvd */
4039		(ctxt->ops->wbinvd)(ctxt);
4040		break;
4041	case 0x08:		/* invd */
4042	case 0x0d:		/* GrpP (prefetch) */
4043	case 0x18:		/* Grp16 (prefetch/nop) */
 
4044		break;
4045	case 0x20: /* mov cr, reg */
4046		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4047		break;
4048	case 0x21: /* mov from dr to reg */
4049		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4050		break;
4051	case 0x22: /* mov reg, cr */
4052		if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4053			emulate_gp(ctxt, 0);
4054			rc = X86EMUL_PROPAGATE_FAULT;
4055			goto done;
4056		}
4057		ctxt->dst.type = OP_NONE;
4058		break;
4059	case 0x23: /* mov from reg to dr */
4060		if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4061				((ctxt->mode == X86EMUL_MODE_PROT64) ?
4062				 ~0ULL : ~0U)) < 0) {
4063			/* #UD condition is already handled by the code above */
4064			emulate_gp(ctxt, 0);
4065			rc = X86EMUL_PROPAGATE_FAULT;
4066			goto done;
4067		}
4068
4069		ctxt->dst.type = OP_NONE;	/* no writeback */
4070		break;
4071	case 0x30:
4072		/* wrmsr */
4073		msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4074			| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4075		if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4076			emulate_gp(ctxt, 0);
4077			rc = X86EMUL_PROPAGATE_FAULT;
4078			goto done;
4079		}
4080		rc = X86EMUL_CONTINUE;
4081		break;
4082	case 0x32:
4083		/* rdmsr */
4084		if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4085			emulate_gp(ctxt, 0);
4086			rc = X86EMUL_PROPAGATE_FAULT;
4087			goto done;
4088		} else {
4089			ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4090			ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4091		}
4092		rc = X86EMUL_CONTINUE;
4093		break;
4094	case 0x40 ... 0x4f:	/* cmov */
4095		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4096		if (!test_cc(ctxt->b, ctxt->eflags))
 
4097			ctxt->dst.type = OP_NONE; /* no writeback */
4098		break;
4099	case 0x80 ... 0x8f: /* jnz rel, etc*/
4100		if (test_cc(ctxt->b, ctxt->eflags))
4101			jmp_rel(ctxt, ctxt->src.val);
4102		break;
4103	case 0x90 ... 0x9f:     /* setcc r/m8 */
4104		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4105		break;
4106	case 0xa0:	  /* push fs */
4107		rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4108		break;
4109	case 0xa1:	 /* pop fs */
4110		rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4111		break;
4112	case 0xa3:
4113	      bt:		/* bt */
4114		ctxt->dst.type = OP_NONE;
4115		/* only subword offset */
4116		ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4117		emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags);
4118		break;
4119	case 0xa4: /* shld imm8, r, r/m */
4120	case 0xa5: /* shld cl, r, r/m */
4121		emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4122		break;
4123	case 0xa8:	/* push gs */
4124		rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4125		break;
4126	case 0xa9:	/* pop gs */
4127		rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4128		break;
4129	case 0xab:
4130	      bts:		/* bts */
4131		emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags);
4132		break;
4133	case 0xac: /* shrd imm8, r, r/m */
4134	case 0xad: /* shrd cl, r, r/m */
4135		emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4136		break;
4137	case 0xae:              /* clflush */
4138		break;
4139	case 0xb0 ... 0xb1:	/* cmpxchg */
4140		/*
4141		 * Save real source value, then compare EAX against
4142		 * destination.
4143		 */
4144		ctxt->src.orig_val = ctxt->src.val;
4145		ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4146		emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
4147		if (ctxt->eflags & EFLG_ZF) {
4148			/* Success: write back to memory. */
4149			ctxt->dst.val = ctxt->src.orig_val;
4150		} else {
4151			/* Failure: write the value we saw to EAX. */
4152			ctxt->dst.type = OP_REG;
4153			ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4154		}
4155		break;
4156	case 0xb2:		/* lss */
4157		rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4158		break;
4159	case 0xb3:
4160	      btr:		/* btr */
4161		emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags);
4162		break;
4163	case 0xb4:		/* lfs */
4164		rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4165		break;
4166	case 0xb5:		/* lgs */
4167		rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4168		break;
4169	case 0xb6 ... 0xb7:	/* movzx */
4170		ctxt->dst.bytes = ctxt->op_bytes;
4171		ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4172						       : (u16) ctxt->src.val;
4173		break;
4174	case 0xba:		/* Grp8 */
4175		switch (ctxt->modrm_reg & 3) {
4176		case 0:
4177			goto bt;
4178		case 1:
4179			goto bts;
4180		case 2:
4181			goto btr;
4182		case 3:
4183			goto btc;
4184		}
4185		break;
4186	case 0xbb:
4187	      btc:		/* btc */
4188		emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags);
4189		break;
4190	case 0xbc: {		/* bsf */
4191		u8 zf;
4192		__asm__ ("bsf %2, %0; setz %1"
4193			 : "=r"(ctxt->dst.val), "=q"(zf)
4194			 : "r"(ctxt->src.val));
4195		ctxt->eflags &= ~X86_EFLAGS_ZF;
4196		if (zf) {
4197			ctxt->eflags |= X86_EFLAGS_ZF;
4198			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4199		}
4200		break;
4201	}
4202	case 0xbd: {		/* bsr */
4203		u8 zf;
4204		__asm__ ("bsr %2, %0; setz %1"
4205			 : "=r"(ctxt->dst.val), "=q"(zf)
4206			 : "r"(ctxt->src.val));
4207		ctxt->eflags &= ~X86_EFLAGS_ZF;
4208		if (zf) {
4209			ctxt->eflags |= X86_EFLAGS_ZF;
4210			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4211		}
4212		break;
4213	}
4214	case 0xbe ... 0xbf:	/* movsx */
4215		ctxt->dst.bytes = ctxt->op_bytes;
4216		ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4217							(s16) ctxt->src.val;
4218		break;
4219	case 0xc0 ... 0xc1:	/* xadd */
4220		emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
4221		/* Write back the register source. */
4222		ctxt->src.val = ctxt->dst.orig_val;
4223		write_register_operand(&ctxt->src);
4224		break;
4225	case 0xc3:		/* movnti */
4226		ctxt->dst.bytes = ctxt->op_bytes;
4227		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4228							(u64) ctxt->src.val;
4229		break;
4230	case 0xc7:		/* Grp9 (cmpxchg8b) */
4231		rc = em_grp9(ctxt);
4232		break;
4233	default:
4234		goto cannot_emulate;
4235	}
4236
 
 
4237	if (rc != X86EMUL_CONTINUE)
4238		goto done;
4239
4240	goto writeback;
4241
4242cannot_emulate:
4243	return EMULATION_FAILED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3 * emulate.c
   4 *
   5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   6 *
   7 * Copyright (c) 2005 Keir Fraser
   8 *
   9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  10 * privileged instructions:
  11 *
  12 * Copyright (C) 2006 Qumranet
  13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  14 *
  15 *   Avi Kivity <avi@qumranet.com>
  16 *   Yaniv Kamay <yaniv@qumranet.com>
  17 *
 
 
 
  18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  19 */
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/kvm_host.h>
  23#include "kvm_cache_regs.h"
  24#include "kvm_emulate.h"
  25#include <linux/stringify.h>
  26#include <asm/debugreg.h>
  27#include <asm/nospec-branch.h>
  28#include <asm/ibt.h>
  29
  30#include "x86.h"
  31#include "tss.h"
  32#include "mmu.h"
  33#include "pmu.h"
  34
  35/*
  36 * Operand types
  37 */
  38#define OpNone             0ull
  39#define OpImplicit         1ull  /* No generic decode */
  40#define OpReg              2ull  /* Register */
  41#define OpMem              3ull  /* Memory */
  42#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
  43#define OpDI               5ull  /* ES:DI/EDI/RDI */
  44#define OpMem64            6ull  /* Memory, 64-bit */
  45#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
  46#define OpDX               8ull  /* DX register */
  47#define OpCL               9ull  /* CL register (for shifts) */
  48#define OpImmByte         10ull  /* 8-bit sign extended immediate */
  49#define OpOne             11ull  /* Implied 1 */
  50#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
  51#define OpMem16           13ull  /* Memory operand (16-bit). */
  52#define OpMem32           14ull  /* Memory operand (32-bit). */
  53#define OpImmU            15ull  /* Immediate operand, zero extended */
  54#define OpSI              16ull  /* SI/ESI/RSI */
  55#define OpImmFAddr        17ull  /* Immediate far address */
  56#define OpMemFAddr        18ull  /* Far address in memory */
  57#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
  58#define OpES              20ull  /* ES */
  59#define OpCS              21ull  /* CS */
  60#define OpSS              22ull  /* SS */
  61#define OpDS              23ull  /* DS */
  62#define OpFS              24ull  /* FS */
  63#define OpGS              25ull  /* GS */
  64#define OpMem8            26ull  /* 8-bit zero extended memory operand */
  65#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
  66#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
  67#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
  68#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
  69
  70#define OpBits             5  /* Width of operand field */
  71#define OpMask             ((1ull << OpBits) - 1)
  72
  73/*
  74 * Opcode effective-address decode tables.
  75 * Note that we only emulate instructions that have at least one memory
  76 * operand (excluding implicit stack references). We assume that stack
  77 * references and instruction fetches will never occur in special memory
  78 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  79 * not be handled.
  80 */
  81
  82/* Operand sizes: 8-bit operands or specified/overridden size. */
  83#define ByteOp      (1<<0)	/* 8-bit operands. */
  84/* Destination operand type. */
  85#define DstShift    1
  86#define ImplicitOps (OpImplicit << DstShift)
  87#define DstReg      (OpReg << DstShift)
  88#define DstMem      (OpMem << DstShift)
  89#define DstAcc      (OpAcc << DstShift)
  90#define DstDI       (OpDI << DstShift)
  91#define DstMem64    (OpMem64 << DstShift)
  92#define DstMem16    (OpMem16 << DstShift)
  93#define DstImmUByte (OpImmUByte << DstShift)
  94#define DstDX       (OpDX << DstShift)
  95#define DstAccLo    (OpAccLo << DstShift)
  96#define DstMask     (OpMask << DstShift)
  97/* Source operand type. */
  98#define SrcShift    6
  99#define SrcNone     (OpNone << SrcShift)
 100#define SrcReg      (OpReg << SrcShift)
 101#define SrcMem      (OpMem << SrcShift)
 102#define SrcMem16    (OpMem16 << SrcShift)
 103#define SrcMem32    (OpMem32 << SrcShift)
 104#define SrcImm      (OpImm << SrcShift)
 105#define SrcImmByte  (OpImmByte << SrcShift)
 106#define SrcOne      (OpOne << SrcShift)
 107#define SrcImmUByte (OpImmUByte << SrcShift)
 108#define SrcImmU     (OpImmU << SrcShift)
 109#define SrcSI       (OpSI << SrcShift)
 110#define SrcXLat     (OpXLat << SrcShift)
 111#define SrcImmFAddr (OpImmFAddr << SrcShift)
 112#define SrcMemFAddr (OpMemFAddr << SrcShift)
 113#define SrcAcc      (OpAcc << SrcShift)
 114#define SrcImmU16   (OpImmU16 << SrcShift)
 115#define SrcImm64    (OpImm64 << SrcShift)
 116#define SrcDX       (OpDX << SrcShift)
 117#define SrcMem8     (OpMem8 << SrcShift)
 118#define SrcAccHi    (OpAccHi << SrcShift)
 119#define SrcMask     (OpMask << SrcShift)
 120#define BitOp       (1<<11)
 121#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
 122#define String      (1<<13)     /* String instruction (rep capable) */
 123#define Stack       (1<<14)     /* Stack instruction (push/pop) */
 124#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
 125#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
 126#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
 127#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
 128#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 129#define Escape      (5<<15)     /* Escape to coprocessor instruction */
 130#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
 131#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
 132#define Sse         (1<<18)     /* SSE Vector instruction */
 133/* Generic ModRM decode. */
 134#define ModRM       (1<<19)
 135/* Destination is only written; never read. */
 136#define Mov         (1<<20)
 137/* Misc flags */
 138#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 139#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
 140#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 141#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 142#define Undefined   (1<<25) /* No Such Instruction */
 143#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 144#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 145#define No64	    (1<<28)
 146#define PageTable   (1 << 29)   /* instruction used to write page table */
 147#define NotImpl     (1 << 30)   /* instruction is not implemented */
 148/* Source 2 operand type */
 149#define Src2Shift   (31)
 150#define Src2None    (OpNone << Src2Shift)
 151#define Src2Mem     (OpMem << Src2Shift)
 152#define Src2CL      (OpCL << Src2Shift)
 153#define Src2ImmByte (OpImmByte << Src2Shift)
 154#define Src2One     (OpOne << Src2Shift)
 155#define Src2Imm     (OpImm << Src2Shift)
 156#define Src2ES      (OpES << Src2Shift)
 157#define Src2CS      (OpCS << Src2Shift)
 158#define Src2SS      (OpSS << Src2Shift)
 159#define Src2DS      (OpDS << Src2Shift)
 160#define Src2FS      (OpFS << Src2Shift)
 161#define Src2GS      (OpGS << Src2Shift)
 162#define Src2Mask    (OpMask << Src2Shift)
 163#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
 164#define AlignMask   ((u64)7 << 41)
 165#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
 166#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
 167#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
 168#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
 169#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
 170#define NoWrite     ((u64)1 << 45)  /* No writeback */
 171#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 172#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
 173#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
 174#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
 175#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 176#define NearBranch  ((u64)1 << 52)  /* Near branches */
 177#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
 178#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
 179#define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
 180#define IsBranch    ((u64)1 << 56)  /* Instruction is considered a branch. */
 181
 182#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 183
 184#define X2(x...) x, x
 185#define X3(x...) X2(x), x
 186#define X4(x...) X2(x), X2(x)
 187#define X5(x...) X4(x), x
 188#define X6(x...) X4(x), X2(x)
 189#define X7(x...) X4(x), X3(x)
 190#define X8(x...) X4(x), X4(x)
 191#define X16(x...) X8(x), X8(x)
 192
 193struct opcode {
 194	u64 flags;
 195	u8 intercept;
 196	u8 pad[7];
 197	union {
 198		int (*execute)(struct x86_emulate_ctxt *ctxt);
 199		const struct opcode *group;
 200		const struct group_dual *gdual;
 201		const struct gprefix *gprefix;
 202		const struct escape *esc;
 203		const struct instr_dual *idual;
 204		const struct mode_dual *mdual;
 205		void (*fastop)(struct fastop *fake);
 206	} u;
 207	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 208};
 209
 210struct group_dual {
 211	struct opcode mod012[8];
 212	struct opcode mod3[8];
 213};
 214
 215struct gprefix {
 216	struct opcode pfx_no;
 217	struct opcode pfx_66;
 218	struct opcode pfx_f2;
 219	struct opcode pfx_f3;
 220};
 221
 222struct escape {
 223	struct opcode op[8];
 224	struct opcode high[64];
 225};
 226
 227struct instr_dual {
 228	struct opcode mod012;
 229	struct opcode mod3;
 230};
 231
 232struct mode_dual {
 233	struct opcode mode32;
 234	struct opcode mode64;
 235};
 
 
 
 
 236
 237#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 
 238
 239enum x86_transfer_type {
 240	X86_TRANSFER_NONE,
 241	X86_TRANSFER_CALL_JMP,
 242	X86_TRANSFER_RET,
 243	X86_TRANSFER_TASK_SWITCH,
 244};
 245
 246static void writeback_registers(struct x86_emulate_ctxt *ctxt)
 247{
 248	unsigned long dirty = ctxt->regs_dirty;
 249	unsigned reg;
 250
 251	for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
 252		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
 253}
 254
 255static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
 256{
 257	ctxt->regs_dirty = 0;
 258	ctxt->regs_valid = 0;
 259}
 260
 261/*
 262 * These EFLAGS bits are restored from saved value during emulation, and
 263 * any changes are written back to the saved value after emulation.
 264 */
 265#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
 266		     X86_EFLAGS_PF|X86_EFLAGS_CF)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267
 268#ifdef CONFIG_X86_64
 269#define ON64(x) x
 270#else
 271#define ON64(x)
 272#endif
 273
 274/*
 275 * fastop functions have a special calling convention:
 276 *
 277 * dst:    rax        (in/out)
 278 * src:    rdx        (in/out)
 279 * src2:   rcx        (in)
 280 * flags:  rflags     (in/out)
 281 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 282 *
 283 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 284 * different operand sizes can be reached by calculation, rather than a jump
 285 * table (which would be bigger than the code).
 286 *
 287 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
 288 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
 289 * body of the function.  Currently none is larger than 4.
 290 */
 291static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 292
 293#define FASTOP_SIZE	16
 294
 295#define __FOP_FUNC(name) \
 296	".align " __stringify(FASTOP_SIZE) " \n\t" \
 297	".type " name ", @function \n\t" \
 298	name ":\n\t" \
 299	ASM_ENDBR \
 300	IBT_NOSEAL(name)
 301
 302#define FOP_FUNC(name) \
 303	__FOP_FUNC(#name)
 304
 305#define __FOP_RET(name) \
 306	"11: " ASM_RET \
 307	".size " name ", .-" name "\n\t"
 308
 309#define FOP_RET(name) \
 310	__FOP_RET(#name)
 311
 312#define __FOP_START(op, align) \
 313	extern void em_##op(struct fastop *fake); \
 314	asm(".pushsection .text, \"ax\" \n\t" \
 315	    ".global em_" #op " \n\t" \
 316	    ".align " __stringify(align) " \n\t" \
 317	    "em_" #op ":\n\t"
 318
 319#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
 320
 321#define FOP_END \
 322	    ".popsection")
 323
 324#define __FOPNOP(name) \
 325	__FOP_FUNC(name) \
 326	__FOP_RET(name)
 327
 328#define FOPNOP() \
 329	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
 330
 331#define FOP1E(op,  dst) \
 332	__FOP_FUNC(#op "_" #dst) \
 333	"10: " #op " %" #dst " \n\t" \
 334	__FOP_RET(#op "_" #dst)
 335
 336#define FOP1EEX(op,  dst) \
 337	FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
 338
 339#define FASTOP1(op) \
 340	FOP_START(op) \
 341	FOP1E(op##b, al) \
 342	FOP1E(op##w, ax) \
 343	FOP1E(op##l, eax) \
 344	ON64(FOP1E(op##q, rax))	\
 345	FOP_END
 346
 347/* 1-operand, using src2 (for MUL/DIV r/m) */
 348#define FASTOP1SRC2(op, name) \
 349	FOP_START(name) \
 350	FOP1E(op, cl) \
 351	FOP1E(op, cx) \
 352	FOP1E(op, ecx) \
 353	ON64(FOP1E(op, rcx)) \
 354	FOP_END
 355
 356/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
 357#define FASTOP1SRC2EX(op, name) \
 358	FOP_START(name) \
 359	FOP1EEX(op, cl) \
 360	FOP1EEX(op, cx) \
 361	FOP1EEX(op, ecx) \
 362	ON64(FOP1EEX(op, rcx)) \
 363	FOP_END
 364
 365#define FOP2E(op,  dst, src)	   \
 366	__FOP_FUNC(#op "_" #dst "_" #src) \
 367	#op " %" #src ", %" #dst " \n\t" \
 368	__FOP_RET(#op "_" #dst "_" #src)
 369
 370#define FASTOP2(op) \
 371	FOP_START(op) \
 372	FOP2E(op##b, al, dl) \
 373	FOP2E(op##w, ax, dx) \
 374	FOP2E(op##l, eax, edx) \
 375	ON64(FOP2E(op##q, rax, rdx)) \
 376	FOP_END
 377
 378/* 2 operand, word only */
 379#define FASTOP2W(op) \
 380	FOP_START(op) \
 381	FOPNOP() \
 382	FOP2E(op##w, ax, dx) \
 383	FOP2E(op##l, eax, edx) \
 384	ON64(FOP2E(op##q, rax, rdx)) \
 385	FOP_END
 386
 387/* 2 operand, src is CL */
 388#define FASTOP2CL(op) \
 389	FOP_START(op) \
 390	FOP2E(op##b, al, cl) \
 391	FOP2E(op##w, ax, cl) \
 392	FOP2E(op##l, eax, cl) \
 393	ON64(FOP2E(op##q, rax, cl)) \
 394	FOP_END
 395
 396/* 2 operand, src and dest are reversed */
 397#define FASTOP2R(op, name) \
 398	FOP_START(name) \
 399	FOP2E(op##b, dl, al) \
 400	FOP2E(op##w, dx, ax) \
 401	FOP2E(op##l, edx, eax) \
 402	ON64(FOP2E(op##q, rdx, rax)) \
 403	FOP_END
 404
 405#define FOP3E(op,  dst, src, src2) \
 406	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
 407	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
 408	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
 409
 410/* 3-operand, word-only, src2=cl */
 411#define FASTOP3WCL(op) \
 412	FOP_START(op) \
 413	FOPNOP() \
 414	FOP3E(op##w, ax, dx, cl) \
 415	FOP3E(op##l, eax, edx, cl) \
 416	ON64(FOP3E(op##q, rax, rdx, cl)) \
 417	FOP_END
 418
 419/* Special case for SETcc - 1 instruction per cc */
 420#define FOP_SETCC(op) \
 421	FOP_FUNC(op) \
 422	#op " %al \n\t" \
 423	FOP_RET(op)
 424
 425FOP_START(setcc)
 426FOP_SETCC(seto)
 427FOP_SETCC(setno)
 428FOP_SETCC(setc)
 429FOP_SETCC(setnc)
 430FOP_SETCC(setz)
 431FOP_SETCC(setnz)
 432FOP_SETCC(setbe)
 433FOP_SETCC(setnbe)
 434FOP_SETCC(sets)
 435FOP_SETCC(setns)
 436FOP_SETCC(setp)
 437FOP_SETCC(setnp)
 438FOP_SETCC(setl)
 439FOP_SETCC(setnl)
 440FOP_SETCC(setle)
 441FOP_SETCC(setnle)
 442FOP_END;
 443
 444FOP_START(salc)
 445FOP_FUNC(salc)
 446"pushf; sbb %al, %al; popf \n\t"
 447FOP_RET(salc)
 448FOP_END;
 449
 450/*
 451 * XXX: inoutclob user must know where the argument is being expanded.
 452 *      Using asm goto would allow us to remove _fault.
 453 */
 454#define asm_safe(insn, inoutclob...) \
 455({ \
 456	int _fault = 0; \
 457 \
 458	asm volatile("1:" insn "\n" \
 459	             "2:\n" \
 460		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
 461	             : [_fault] "+r"(_fault) inoutclob ); \
 462 \
 463	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
 464})
 
 
 
 
 
 
 
 
 
 
 465
 466static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 467				    enum x86_intercept intercept,
 468				    enum x86_intercept_stage stage)
 469{
 470	struct x86_instruction_info info = {
 471		.intercept  = intercept,
 472		.rep_prefix = ctxt->rep_prefix,
 473		.modrm_mod  = ctxt->modrm_mod,
 474		.modrm_reg  = ctxt->modrm_reg,
 475		.modrm_rm   = ctxt->modrm_rm,
 476		.src_val    = ctxt->src.val64,
 477		.dst_val    = ctxt->dst.val64,
 478		.src_bytes  = ctxt->src.bytes,
 479		.dst_bytes  = ctxt->dst.bytes,
 480		.ad_bytes   = ctxt->ad_bytes,
 481		.next_rip   = ctxt->eip,
 482	};
 483
 484	return ctxt->ops->intercept(ctxt, &info, stage);
 485}
 486
 487static void assign_masked(ulong *dest, ulong src, ulong mask)
 488{
 489	*dest = (*dest & ~mask) | (src & mask);
 490}
 491
 492static void assign_register(unsigned long *reg, u64 val, int bytes)
 493{
 494	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
 495	switch (bytes) {
 496	case 1:
 497		*(u8 *)reg = (u8)val;
 498		break;
 499	case 2:
 500		*(u16 *)reg = (u16)val;
 501		break;
 502	case 4:
 503		*reg = (u32)val;
 504		break;	/* 64b: zero-extend */
 505	case 8:
 506		*reg = val;
 507		break;
 508	}
 509}
 510
 511static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 512{
 513	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 514}
 515
 516static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
 517{
 518	u16 sel;
 519	struct desc_struct ss;
 520
 521	if (ctxt->mode == X86EMUL_MODE_PROT64)
 522		return ~0UL;
 523	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
 524	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
 525}
 526
 527static int stack_size(struct x86_emulate_ctxt *ctxt)
 528{
 529	return (__fls(stack_mask(ctxt)) + 1) >> 3;
 530}
 531
 532/* Access/update address held in a register, based on addressing mode. */
 533static inline unsigned long
 534address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 535{
 536	if (ctxt->ad_bytes == sizeof(unsigned long))
 537		return reg;
 538	else
 539		return reg & ad_mask(ctxt);
 540}
 541
 542static inline unsigned long
 543register_address(struct x86_emulate_ctxt *ctxt, int reg)
 544{
 545	return address_mask(ctxt, reg_read(ctxt, reg));
 546}
 547
 548static void masked_increment(ulong *reg, ulong mask, int inc)
 549{
 550	assign_masked(reg, *reg + inc, mask);
 551}
 552
 553static inline void
 554register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
 555{
 556	ulong *preg = reg_rmw(ctxt, reg);
 557
 558	assign_register(preg, *preg + inc, ctxt->ad_bytes);
 
 559}
 560
 561static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
 562{
 563	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
 564}
 565
 566static u32 desc_limit_scaled(struct desc_struct *desc)
 567{
 568	u32 limit = get_desc_limit(desc);
 569
 570	return desc->g ? (limit << 12) | 0xfff : limit;
 571}
 572
 
 
 
 
 
 
 573static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 574{
 575	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 576		return 0;
 577
 578	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 579}
 580
 
 
 
 
 
 
 
 
 581static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 582			     u32 error, bool valid)
 583{
 584	if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
 585		return X86EMUL_UNHANDLEABLE;
 586
 587	ctxt->exception.vector = vec;
 588	ctxt->exception.error_code = error;
 589	ctxt->exception.error_code_valid = valid;
 590	return X86EMUL_PROPAGATE_FAULT;
 591}
 592
 593static int emulate_db(struct x86_emulate_ctxt *ctxt)
 594{
 595	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 596}
 597
 598static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 599{
 600	return emulate_exception(ctxt, GP_VECTOR, err, true);
 601}
 602
 603static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 604{
 605	return emulate_exception(ctxt, SS_VECTOR, err, true);
 606}
 607
 608static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 609{
 610	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 611}
 612
 613static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 614{
 615	return emulate_exception(ctxt, TS_VECTOR, err, true);
 616}
 617
 618static int emulate_de(struct x86_emulate_ctxt *ctxt)
 619{
 620	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 621}
 622
 623static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 624{
 625	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 626}
 627
 628static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 629{
 630	u16 selector;
 631	struct desc_struct desc;
 632
 633	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 634	return selector;
 635}
 636
 637static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 638				 unsigned seg)
 639{
 640	u16 dummy;
 641	u32 base3;
 642	struct desc_struct desc;
 643
 644	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 645	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 646}
 647
 648static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
 649{
 650	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
 651}
 652
 653static inline bool emul_is_noncanonical_address(u64 la,
 654						struct x86_emulate_ctxt *ctxt)
 655{
 656	return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
 657}
 658
 659/*
 660 * x86 defines three classes of vector instructions: explicitly
 661 * aligned, explicitly unaligned, and the rest, which change behaviour
 662 * depending on whether they're AVX encoded or not.
 663 *
 664 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 665 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 666 * 512 bytes of data must be aligned to a 16 byte boundary.
 667 */
 668static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
 669{
 670	u64 alignment = ctxt->d & AlignMask;
 671
 672	if (likely(size < 16))
 673		return 1;
 674
 675	switch (alignment) {
 676	case Unaligned:
 677	case Avx:
 678		return 1;
 679	case Aligned16:
 680		return 16;
 681	case Aligned:
 682	default:
 683		return size;
 684	}
 685}
 686
 687static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 688				       struct segmented_address addr,
 689				       unsigned *max_size, unsigned size,
 690				       enum x86emul_mode mode, ulong *linear,
 691				       unsigned int flags)
 692{
 693	struct desc_struct desc;
 694	bool usable;
 695	ulong la;
 696	u32 lim;
 697	u16 sel;
 698	u8  va_bits;
 699
 700	la = seg_base(ctxt, addr.seg) + addr.ea;
 701	*max_size = 0;
 702	switch (mode) {
 
 703	case X86EMUL_MODE_PROT64:
 704		*linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
 705		va_bits = ctxt_virt_addr_bits(ctxt);
 706		if (!__is_canonical_address(la, va_bits))
 707			goto bad;
 708
 709		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
 710		if (size > *max_size)
 711			goto bad;
 712		break;
 713	default:
 714		*linear = la = (u32)la;
 715		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 716						addr.seg);
 717		if (!usable)
 718			goto bad;
 719		/* code segment in protected mode or read-only data segment */
 720		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
 721		    (flags & X86EMUL_F_WRITE))
 722			goto bad;
 723		/* unreadable code segment */
 724		if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
 725			goto bad;
 726		lim = desc_limit_scaled(&desc);
 727		if (!(desc.type & 8) && (desc.type & 4)) {
 728			/* expand-down segment */
 729			if (addr.ea <= lim)
 
 
 
 
 730				goto bad;
 731			lim = desc.d ? 0xffffffff : 0xffff;
 
 
 732		}
 733		if (addr.ea > lim)
 734			goto bad;
 735		if (lim == 0xffffffff)
 736			*max_size = ~0u;
 737		else {
 738			*max_size = (u64)lim + 1 - addr.ea;
 739			if (size > *max_size)
 
 
 
 
 
 
 
 740				goto bad;
 741		}
 742		break;
 743	}
 744	if (la & (insn_alignment(ctxt, size) - 1))
 745		return emulate_gp(ctxt, 0);
 
 746	return X86EMUL_CONTINUE;
 747bad:
 748	if (addr.seg == VCPU_SREG_SS)
 749		return emulate_ss(ctxt, 0);
 750	else
 751		return emulate_gp(ctxt, 0);
 752}
 753
 754static int linearize(struct x86_emulate_ctxt *ctxt,
 755		     struct segmented_address addr,
 756		     unsigned size, bool write,
 757		     ulong *linear)
 758{
 759	unsigned max_size;
 760	return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
 761			   write ? X86EMUL_F_WRITE : 0);
 762}
 763
 764static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 765{
 766	ulong linear;
 767	int rc;
 768	unsigned max_size;
 769	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 770					   .ea = dst };
 771
 772	if (ctxt->op_bytes != sizeof(unsigned long))
 773		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
 774	rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
 775			 X86EMUL_F_FETCH);
 776	if (rc == X86EMUL_CONTINUE)
 777		ctxt->_eip = addr.ea;
 778	return rc;
 779}
 780
 781static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
 782{
 783	u64 efer;
 784	struct desc_struct cs;
 785	u16 selector;
 786	u32 base3;
 787
 788	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 789
 790	if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
 791		/* Real mode. cpu must not have long mode active */
 792		if (efer & EFER_LMA)
 793			return X86EMUL_UNHANDLEABLE;
 794		ctxt->mode = X86EMUL_MODE_REAL;
 795		return X86EMUL_CONTINUE;
 796	}
 797
 798	if (ctxt->eflags & X86_EFLAGS_VM) {
 799		/* Protected/VM86 mode. cpu must not have long mode active */
 800		if (efer & EFER_LMA)
 801			return X86EMUL_UNHANDLEABLE;
 802		ctxt->mode = X86EMUL_MODE_VM86;
 803		return X86EMUL_CONTINUE;
 804	}
 805
 806	if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
 807		return X86EMUL_UNHANDLEABLE;
 808
 809	if (efer & EFER_LMA) {
 810		if (cs.l) {
 811			/* Proper long mode */
 812			ctxt->mode = X86EMUL_MODE_PROT64;
 813		} else if (cs.d) {
 814			/* 32 bit compatibility mode*/
 815			ctxt->mode = X86EMUL_MODE_PROT32;
 816		} else {
 817			ctxt->mode = X86EMUL_MODE_PROT16;
 818		}
 819	} else {
 820		/* Legacy 32 bit / 16 bit mode */
 821		ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 822	}
 823
 824	return X86EMUL_CONTINUE;
 825}
 826
 827static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
 828{
 829	return assign_eip(ctxt, dst);
 830}
 831
 832static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
 833{
 834	int rc = emulator_recalc_and_set_mode(ctxt);
 835
 836	if (rc != X86EMUL_CONTINUE)
 837		return rc;
 838
 839	return assign_eip(ctxt, dst);
 840}
 841
 842static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 843{
 844	return assign_eip_near(ctxt, ctxt->_eip + rel);
 845}
 846
 847static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
 848			      void *data, unsigned size)
 849{
 850	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
 851}
 852
 853static int linear_write_system(struct x86_emulate_ctxt *ctxt,
 854			       ulong linear, void *data,
 855			       unsigned int size)
 856{
 857	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
 858}
 859
 860static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 861			      struct segmented_address addr,
 862			      void *data,
 863			      unsigned size)
 864{
 865	int rc;
 866	ulong linear;
 867
 868	rc = linearize(ctxt, addr, size, false, &linear);
 869	if (rc != X86EMUL_CONTINUE)
 870		return rc;
 871	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
 872}
 873
 874static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
 875			       struct segmented_address addr,
 876			       void *data,
 877			       unsigned int size)
 878{
 
 879	int rc;
 880	ulong linear;
 881
 882	rc = linearize(ctxt, addr, size, true, &linear);
 883	if (rc != X86EMUL_CONTINUE)
 884		return rc;
 885	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
 
 
 
 
 
 
 
 
 
 
 
 
 886}
 887
 888/*
 889 * Prefetch the remaining bytes of the instruction without crossing page
 890 * boundary if they are not in fetch_cache yet.
 891 */
 892static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 893{
 894	int rc;
 895	unsigned size, max_size;
 896	unsigned long linear;
 897	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
 898	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 899					   .ea = ctxt->eip + cur_size };
 900
 901	/*
 902	 * We do not know exactly how many bytes will be needed, and
 903	 * __linearize is expensive, so fetch as much as possible.  We
 904	 * just have to avoid going beyond the 15 byte limit, the end
 905	 * of the segment, or the end of the page.
 906	 *
 907	 * __linearize is called with size 0 so that it does not do any
 908	 * boundary check itself.  Instead, we use max_size to check
 909	 * against op_size.
 910	 */
 911	rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
 912			 X86EMUL_F_FETCH);
 913	if (unlikely(rc != X86EMUL_CONTINUE))
 914		return rc;
 915
 916	size = min_t(unsigned, 15UL ^ cur_size, max_size);
 917	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 918
 919	/*
 920	 * One instruction can only straddle two pages,
 921	 * and one has been loaded at the beginning of
 922	 * x86_decode_insn.  So, if not enough bytes
 923	 * still, we must have hit the 15-byte boundary.
 924	 */
 925	if (unlikely(size < op_size))
 926		return emulate_gp(ctxt, 0);
 927
 928	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
 929			      size, &ctxt->exception);
 930	if (unlikely(rc != X86EMUL_CONTINUE))
 931		return rc;
 932	ctxt->fetch.end += size;
 933	return X86EMUL_CONTINUE;
 934}
 935
 936static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
 937					       unsigned size)
 938{
 939	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
 940
 941	if (unlikely(done_size < size))
 942		return __do_insn_fetch_bytes(ctxt, size - done_size);
 943	else
 944		return X86EMUL_CONTINUE;
 945}
 946
 947/* Fetch next part of the instruction being emulated. */
 948#define insn_fetch(_type, _ctxt)					\
 949({	_type _x;							\
 950									\
 951	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
 952	if (rc != X86EMUL_CONTINUE)					\
 953		goto done;						\
 954	ctxt->_eip += sizeof(_type);					\
 955	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
 956	ctxt->fetch.ptr += sizeof(_type);				\
 957	_x;								\
 958})
 959
 960#define insn_fetch_arr(_arr, _size, _ctxt)				\
 961({									\
 962	rc = do_insn_fetch_bytes(_ctxt, _size);				\
 963	if (rc != X86EMUL_CONTINUE)					\
 964		goto done;						\
 965	ctxt->_eip += (_size);						\
 966	memcpy(_arr, ctxt->fetch.ptr, _size);				\
 967	ctxt->fetch.ptr += (_size);					\
 968})
 969
 970/*
 971 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 972 * pointer into the block that addresses the relevant register.
 973 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 974 */
 975static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
 976			     int byteop)
 977{
 978	void *p;
 979	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
 980
 
 981	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 982		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
 983	else
 984		p = reg_rmw(ctxt, modrm_reg);
 985	return p;
 986}
 987
 988static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 989			   struct segmented_address addr,
 990			   u16 *size, unsigned long *address, int op_bytes)
 991{
 992	int rc;
 993
 994	if (op_bytes == 2)
 995		op_bytes = 3;
 996	*address = 0;
 997	rc = segmented_read_std(ctxt, addr, size, 2);
 998	if (rc != X86EMUL_CONTINUE)
 999		return rc;
1000	addr.ea += 2;
1001	rc = segmented_read_std(ctxt, addr, address, op_bytes);
1002	return rc;
1003}
1004
1005FASTOP2(add);
1006FASTOP2(or);
1007FASTOP2(adc);
1008FASTOP2(sbb);
1009FASTOP2(and);
1010FASTOP2(sub);
1011FASTOP2(xor);
1012FASTOP2(cmp);
1013FASTOP2(test);
1014
1015FASTOP1SRC2(mul, mul_ex);
1016FASTOP1SRC2(imul, imul_ex);
1017FASTOP1SRC2EX(div, div_ex);
1018FASTOP1SRC2EX(idiv, idiv_ex);
1019
1020FASTOP3WCL(shld);
1021FASTOP3WCL(shrd);
1022
1023FASTOP2W(imul);
1024
1025FASTOP1(not);
1026FASTOP1(neg);
1027FASTOP1(inc);
1028FASTOP1(dec);
1029
1030FASTOP2CL(rol);
1031FASTOP2CL(ror);
1032FASTOP2CL(rcl);
1033FASTOP2CL(rcr);
1034FASTOP2CL(shl);
1035FASTOP2CL(shr);
1036FASTOP2CL(sar);
1037
1038FASTOP2W(bsf);
1039FASTOP2W(bsr);
1040FASTOP2W(bt);
1041FASTOP2W(bts);
1042FASTOP2W(btr);
1043FASTOP2W(btc);
1044
1045FASTOP2(xadd);
1046
1047FASTOP2R(cmp, cmp_r);
1048
1049static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1050{
1051	/* If src is zero, do not writeback, but update flags */
1052	if (ctxt->src.val == 0)
1053		ctxt->dst.type = OP_NONE;
1054	return fastop(ctxt, em_bsf);
1055}
1056
1057static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1058{
1059	/* If src is zero, do not writeback, but update flags */
1060	if (ctxt->src.val == 0)
1061		ctxt->dst.type = OP_NONE;
1062	return fastop(ctxt, em_bsr);
1063}
1064
1065static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1066{
1067	u8 rc;
1068	void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069
1070	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1071	asm("push %[flags]; popf; " CALL_NOSPEC
1072	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1073	return rc;
1074}
1075
1076static void fetch_register_operand(struct operand *op)
1077{
1078	switch (op->bytes) {
1079	case 1:
1080		op->val = *(u8 *)op->addr.reg;
1081		break;
1082	case 2:
1083		op->val = *(u16 *)op->addr.reg;
1084		break;
1085	case 4:
1086		op->val = *(u32 *)op->addr.reg;
1087		break;
1088	case 8:
1089		op->val = *(u64 *)op->addr.reg;
1090		break;
1091	}
1092}
1093
1094static int em_fninit(struct x86_emulate_ctxt *ctxt)
1095{
1096	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1097		return emulate_nm(ctxt);
1098
1099	kvm_fpu_get();
1100	asm volatile("fninit");
1101	kvm_fpu_put();
1102	return X86EMUL_CONTINUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103}
1104
1105static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
 
1106{
1107	u16 fcw;
1108
1109	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1110		return emulate_nm(ctxt);
1111
1112	kvm_fpu_get();
1113	asm volatile("fnstcw %0": "+m"(fcw));
1114	kvm_fpu_put();
1115
1116	ctxt->dst.val = fcw;
1117
1118	return X86EMUL_CONTINUE;
1119}
1120
1121static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1122{
1123	u16 fsw;
1124
1125	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1126		return emulate_nm(ctxt);
1127
1128	kvm_fpu_get();
1129	asm volatile("fnstsw %0": "+m"(fsw));
1130	kvm_fpu_put();
1131
1132	ctxt->dst.val = fsw;
1133
1134	return X86EMUL_CONTINUE;
1135}
1136
1137static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1138				    struct operand *op)
 
1139{
1140	unsigned int reg;
 
1141
1142	if (ctxt->d & ModRM)
1143		reg = ctxt->modrm_reg;
1144	else
1145		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1146
1147	if (ctxt->d & Sse) {
1148		op->type = OP_XMM;
1149		op->bytes = 16;
1150		op->addr.xmm = reg;
1151		kvm_read_sse_reg(reg, &op->vec_val);
1152		return;
1153	}
1154	if (ctxt->d & Mmx) {
1155		reg &= 7;
1156		op->type = OP_MM;
1157		op->bytes = 8;
1158		op->addr.mm = reg;
1159		return;
1160	}
1161
1162	op->type = OP_REG;
1163	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1164	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1165
 
 
 
 
1166	fetch_register_operand(op);
1167	op->orig_val = op->val;
1168}
1169
1170static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1171{
1172	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1173		ctxt->modrm_seg = VCPU_SREG_SS;
1174}
1175
1176static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1177			struct operand *op)
1178{
1179	u8 sib;
1180	int index_reg, base_reg, scale;
1181	int rc = X86EMUL_CONTINUE;
1182	ulong modrm_ea = 0;
1183
1184	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1185	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1186	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
 
 
1187
1188	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
 
1189	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1190	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1191	ctxt->modrm_seg = VCPU_SREG_DS;
1192
1193	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1194		op->type = OP_REG;
1195		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1196		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1197				ctxt->d & ByteOp);
1198		if (ctxt->d & Sse) {
1199			op->type = OP_XMM;
1200			op->bytes = 16;
1201			op->addr.xmm = ctxt->modrm_rm;
1202			kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1203			return rc;
1204		}
1205		if (ctxt->d & Mmx) {
1206			op->type = OP_MM;
1207			op->bytes = 8;
1208			op->addr.mm = ctxt->modrm_rm & 7;
1209			return rc;
1210		}
1211		fetch_register_operand(op);
1212		return rc;
1213	}
1214
1215	op->type = OP_MEM;
1216
1217	if (ctxt->ad_bytes == 2) {
1218		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1219		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1220		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1221		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1222
1223		/* 16-bit ModR/M decode. */
1224		switch (ctxt->modrm_mod) {
1225		case 0:
1226			if (ctxt->modrm_rm == 6)
1227				modrm_ea += insn_fetch(u16, ctxt);
1228			break;
1229		case 1:
1230			modrm_ea += insn_fetch(s8, ctxt);
1231			break;
1232		case 2:
1233			modrm_ea += insn_fetch(u16, ctxt);
1234			break;
1235		}
1236		switch (ctxt->modrm_rm) {
1237		case 0:
1238			modrm_ea += bx + si;
1239			break;
1240		case 1:
1241			modrm_ea += bx + di;
1242			break;
1243		case 2:
1244			modrm_ea += bp + si;
1245			break;
1246		case 3:
1247			modrm_ea += bp + di;
1248			break;
1249		case 4:
1250			modrm_ea += si;
1251			break;
1252		case 5:
1253			modrm_ea += di;
1254			break;
1255		case 6:
1256			if (ctxt->modrm_mod != 0)
1257				modrm_ea += bp;
1258			break;
1259		case 7:
1260			modrm_ea += bx;
1261			break;
1262		}
1263		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1264		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1265			ctxt->modrm_seg = VCPU_SREG_SS;
1266		modrm_ea = (u16)modrm_ea;
1267	} else {
1268		/* 32/64-bit ModR/M decode. */
1269		if ((ctxt->modrm_rm & 7) == 4) {
1270			sib = insn_fetch(u8, ctxt);
1271			index_reg |= (sib >> 3) & 7;
1272			base_reg |= sib & 7;
1273			scale = sib >> 6;
1274
1275			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1276				modrm_ea += insn_fetch(s32, ctxt);
1277			else {
1278				modrm_ea += reg_read(ctxt, base_reg);
1279				adjust_modrm_seg(ctxt, base_reg);
1280				/* Increment ESP on POP [ESP] */
1281				if ((ctxt->d & IncSP) &&
1282				    base_reg == VCPU_REGS_RSP)
1283					modrm_ea += ctxt->op_bytes;
1284			}
1285			if (index_reg != 4)
1286				modrm_ea += reg_read(ctxt, index_reg) << scale;
1287		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1288			modrm_ea += insn_fetch(s32, ctxt);
1289			if (ctxt->mode == X86EMUL_MODE_PROT64)
1290				ctxt->rip_relative = 1;
1291		} else {
1292			base_reg = ctxt->modrm_rm;
1293			modrm_ea += reg_read(ctxt, base_reg);
1294			adjust_modrm_seg(ctxt, base_reg);
1295		}
1296		switch (ctxt->modrm_mod) {
 
 
 
 
1297		case 1:
1298			modrm_ea += insn_fetch(s8, ctxt);
1299			break;
1300		case 2:
1301			modrm_ea += insn_fetch(s32, ctxt);
1302			break;
1303		}
1304	}
1305	op->addr.mem.ea = modrm_ea;
1306	if (ctxt->ad_bytes != 8)
1307		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1308
1309done:
1310	return rc;
1311}
1312
1313static int decode_abs(struct x86_emulate_ctxt *ctxt,
1314		      struct operand *op)
1315{
1316	int rc = X86EMUL_CONTINUE;
1317
1318	op->type = OP_MEM;
1319	switch (ctxt->ad_bytes) {
1320	case 2:
1321		op->addr.mem.ea = insn_fetch(u16, ctxt);
1322		break;
1323	case 4:
1324		op->addr.mem.ea = insn_fetch(u32, ctxt);
1325		break;
1326	case 8:
1327		op->addr.mem.ea = insn_fetch(u64, ctxt);
1328		break;
1329	}
1330done:
1331	return rc;
1332}
1333
1334static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1335{
1336	long sv = 0, mask;
1337
1338	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1339		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1340
1341		if (ctxt->src.bytes == 2)
1342			sv = (s16)ctxt->src.val & (s16)mask;
1343		else if (ctxt->src.bytes == 4)
1344			sv = (s32)ctxt->src.val & (s32)mask;
1345		else
1346			sv = (s64)ctxt->src.val & (s64)mask;
1347
1348		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1349					   ctxt->dst.addr.mem.ea + (sv >> 3));
1350	}
1351
1352	/* only subword offset */
1353	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1354}
1355
1356static int read_emulated(struct x86_emulate_ctxt *ctxt,
1357			 unsigned long addr, void *dest, unsigned size)
1358{
1359	int rc;
1360	struct read_cache *mc = &ctxt->mem_read;
1361
1362	if (mc->pos < mc->end)
1363		goto read_cached;
 
 
 
1364
1365	if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1366		return X86EMUL_UNHANDLEABLE;
 
 
 
1367
1368	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1369				      &ctxt->exception);
1370	if (rc != X86EMUL_CONTINUE)
1371		return rc;
1372
1373	mc->end += size;
1374
1375read_cached:
1376	memcpy(dest, mc->data + mc->pos, size);
1377	mc->pos += size;
1378	return X86EMUL_CONTINUE;
1379}
1380
1381static int segmented_read(struct x86_emulate_ctxt *ctxt,
1382			  struct segmented_address addr,
1383			  void *data,
1384			  unsigned size)
1385{
1386	int rc;
1387	ulong linear;
1388
1389	rc = linearize(ctxt, addr, size, false, &linear);
1390	if (rc != X86EMUL_CONTINUE)
1391		return rc;
1392	return read_emulated(ctxt, linear, data, size);
1393}
1394
1395static int segmented_write(struct x86_emulate_ctxt *ctxt,
1396			   struct segmented_address addr,
1397			   const void *data,
1398			   unsigned size)
1399{
1400	int rc;
1401	ulong linear;
1402
1403	rc = linearize(ctxt, addr, size, true, &linear);
1404	if (rc != X86EMUL_CONTINUE)
1405		return rc;
1406	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1407					 &ctxt->exception);
1408}
1409
1410static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1411			     struct segmented_address addr,
1412			     const void *orig_data, const void *data,
1413			     unsigned size)
1414{
1415	int rc;
1416	ulong linear;
1417
1418	rc = linearize(ctxt, addr, size, true, &linear);
1419	if (rc != X86EMUL_CONTINUE)
1420		return rc;
1421	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1422					   size, &ctxt->exception);
1423}
1424
1425static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1426			   unsigned int size, unsigned short port,
1427			   void *dest)
1428{
1429	struct read_cache *rc = &ctxt->io_read;
1430
1431	if (rc->pos == rc->end) { /* refill pio read ahead */
1432		unsigned int in_page, n;
1433		unsigned int count = ctxt->rep_prefix ?
1434			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1435		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1436			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1437			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1438		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
 
1439		if (n == 0)
1440			n = 1;
1441		rc->pos = rc->end = 0;
1442		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1443			return 0;
1444		rc->end = n * size;
1445	}
1446
1447	if (ctxt->rep_prefix && (ctxt->d & String) &&
1448	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1449		ctxt->dst.data = rc->data + rc->pos;
1450		ctxt->dst.type = OP_MEM_STR;
1451		ctxt->dst.count = (rc->end - rc->pos) / size;
1452		rc->pos = rc->end;
1453	} else {
1454		memcpy(dest, rc->data + rc->pos, size);
1455		rc->pos += size;
1456	}
1457	return 1;
1458}
1459
1460static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1461				     u16 index, struct desc_struct *desc)
1462{
1463	struct desc_ptr dt;
1464	ulong addr;
1465
1466	ctxt->ops->get_idt(ctxt, &dt);
1467
1468	if (dt.size < index * 8 + 7)
1469		return emulate_gp(ctxt, index << 3 | 0x2);
1470
1471	addr = dt.address + index * 8;
1472	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1473}
1474
1475static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1476				     u16 selector, struct desc_ptr *dt)
1477{
1478	const struct x86_emulate_ops *ops = ctxt->ops;
1479	u32 base3 = 0;
1480
1481	if (selector & 1 << 2) {
1482		struct desc_struct desc;
1483		u16 sel;
1484
1485		memset(dt, 0, sizeof(*dt));
1486		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1487				      VCPU_SREG_LDTR))
1488			return;
1489
1490		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1491		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1492	} else
1493		ops->get_gdt(ctxt, dt);
1494}
1495
1496static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1497			      u16 selector, ulong *desc_addr_p)
 
1498{
1499	struct desc_ptr dt;
1500	u16 index = selector >> 3;
1501	ulong addr;
1502
1503	get_descriptor_table_ptr(ctxt, selector, &dt);
1504
1505	if (dt.size < index * 8 + 7)
1506		return emulate_gp(ctxt, selector & 0xfffc);
1507
1508	addr = dt.address + index * 8;
1509
1510#ifdef CONFIG_X86_64
1511	if (addr >> 32 != 0) {
1512		u64 efer = 0;
1513
1514		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1515		if (!(efer & EFER_LMA))
1516			addr &= (u32)-1;
1517	}
1518#endif
1519
1520	*desc_addr_p = addr;
1521	return X86EMUL_CONTINUE;
1522}
1523
1524/* allowed just for 8 bytes segments */
1525static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1526				   u16 selector, struct desc_struct *desc,
1527				   ulong *desc_addr_p)
1528{
1529	int rc;
1530
1531	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1532	if (rc != X86EMUL_CONTINUE)
1533		return rc;
1534
1535	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1536}
1537
1538/* allowed just for 8 bytes segments */
1539static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1540				    u16 selector, struct desc_struct *desc)
1541{
1542	int rc;
 
1543	ulong addr;
1544
1545	rc = get_descriptor_ptr(ctxt, selector, &addr);
1546	if (rc != X86EMUL_CONTINUE)
1547		return rc;
 
1548
1549	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
 
 
1550}
1551
1552static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1553				     u16 selector, int seg, u8 cpl,
1554				     enum x86_transfer_type transfer,
1555				     struct desc_struct *desc)
1556{
1557	struct desc_struct seg_desc, old_desc;
1558	u8 dpl, rpl;
1559	unsigned err_vec = GP_VECTOR;
1560	u32 err_code = 0;
1561	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1562	ulong desc_addr;
1563	int ret;
1564	u16 dummy;
1565	u32 base3 = 0;
1566
1567	memset(&seg_desc, 0, sizeof(seg_desc));
1568
1569	if (ctxt->mode == X86EMUL_MODE_REAL) {
1570		/* set real mode segment descriptor (keep limit etc. for
1571		 * unreal mode) */
1572		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1573		set_desc_base(&seg_desc, selector << 4);
1574		goto load;
1575	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1576		/* VM86 needs a clean new segment descriptor */
1577		set_desc_base(&seg_desc, selector << 4);
1578		set_desc_limit(&seg_desc, 0xffff);
1579		seg_desc.type = 3;
1580		seg_desc.p = 1;
1581		seg_desc.s = 1;
1582		seg_desc.dpl = 3;
1583		goto load;
1584	}
1585
1586	rpl = selector & 3;
 
 
 
1587
1588	/* TR should be in GDT only */
1589	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1590		goto exception;
1591
1592	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1593	if (null_selector) {
1594		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1595			goto exception;
1596
1597		if (seg == VCPU_SREG_SS) {
1598			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1599				goto exception;
1600
1601			/*
1602			 * ctxt->ops->set_segment expects the CPL to be in
1603			 * SS.DPL, so fake an expand-up 32-bit data segment.
1604			 */
1605			seg_desc.type = 3;
1606			seg_desc.p = 1;
1607			seg_desc.s = 1;
1608			seg_desc.dpl = cpl;
1609			seg_desc.d = 1;
1610			seg_desc.g = 1;
1611		}
1612
1613		/* Skip all following checks */
1614		goto load;
1615	}
1616
1617	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1618	if (ret != X86EMUL_CONTINUE)
1619		return ret;
1620
1621	err_code = selector & 0xfffc;
1622	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1623							   GP_VECTOR;
 
 
 
1624
1625	/* can't load system descriptor into segment selector */
1626	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1627		if (transfer == X86_TRANSFER_CALL_JMP)
1628			return X86EMUL_UNHANDLEABLE;
1629		goto exception;
1630	}
1631
 
1632	dpl = seg_desc.dpl;
 
1633
1634	switch (seg) {
1635	case VCPU_SREG_SS:
1636		/*
1637		 * segment is not a writable data segment or segment
1638		 * selector's RPL != CPL or DPL != CPL
1639		 */
1640		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1641			goto exception;
1642		break;
1643	case VCPU_SREG_CS:
1644		/*
1645		 * KVM uses "none" when loading CS as part of emulating Real
1646		 * Mode exceptions and IRET (handled above).  In all other
1647		 * cases, loading CS without a control transfer is a KVM bug.
1648		 */
1649		if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
1650			goto exception;
1651
1652		if (!(seg_desc.type & 8))
1653			goto exception;
1654
1655		if (transfer == X86_TRANSFER_RET) {
1656			/* RET can never return to an inner privilege level. */
1657			if (rpl < cpl)
1658				goto exception;
1659			/* Outer-privilege level return is not implemented */
1660			if (rpl > cpl)
1661				return X86EMUL_UNHANDLEABLE;
1662		}
1663		if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1664			if (seg_desc.type & 4) {
1665				/* conforming */
1666				if (dpl > rpl)
1667					goto exception;
1668			} else {
1669				/* nonconforming */
1670				if (dpl != rpl)
1671					goto exception;
1672			}
1673		} else { /* X86_TRANSFER_CALL_JMP */
1674			if (seg_desc.type & 4) {
1675				/* conforming */
1676				if (dpl > cpl)
1677					goto exception;
1678			} else {
1679				/* nonconforming */
1680				if (rpl > cpl || dpl != cpl)
1681					goto exception;
1682			}
1683		}
1684		/* in long-mode d/b must be clear if l is set */
1685		if (seg_desc.d && seg_desc.l) {
1686			u64 efer = 0;
1687
1688			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1689			if (efer & EFER_LMA)
1690				goto exception;
1691		}
1692
1693		/* CS(RPL) <- CPL */
1694		selector = (selector & 0xfffc) | cpl;
1695		break;
1696	case VCPU_SREG_TR:
1697		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1698			goto exception;
1699		break;
1700	case VCPU_SREG_LDTR:
1701		if (seg_desc.s || seg_desc.type != 2)
1702			goto exception;
1703		break;
1704	default: /*  DS, ES, FS, or GS */
1705		/*
1706		 * segment is not a data or readable code segment or
1707		 * ((segment is a data or nonconforming code segment)
1708		 * and ((RPL > DPL) or (CPL > DPL)))
1709		 */
1710		if ((seg_desc.type & 0xa) == 0x8 ||
1711		    (((seg_desc.type & 0xc) != 0xc) &&
1712		     (rpl > dpl || cpl > dpl)))
1713			goto exception;
1714		break;
1715	}
1716
1717	if (!seg_desc.p) {
1718		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1719		goto exception;
1720	}
1721
1722	if (seg_desc.s) {
1723		/* mark segment as accessed */
1724		if (!(seg_desc.type & 1)) {
1725			seg_desc.type |= 1;
1726			ret = write_segment_descriptor(ctxt, selector,
1727						       &seg_desc);
1728			if (ret != X86EMUL_CONTINUE)
1729				return ret;
1730		}
1731	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1732		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1733		if (ret != X86EMUL_CONTINUE)
1734			return ret;
1735		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1736						 ((u64)base3 << 32), ctxt))
1737			return emulate_gp(ctxt, err_code);
1738	}
1739
1740	if (seg == VCPU_SREG_TR) {
1741		old_desc = seg_desc;
1742		seg_desc.type |= 2; /* busy */
1743		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1744						  sizeof(seg_desc), &ctxt->exception);
1745		if (ret != X86EMUL_CONTINUE)
1746			return ret;
1747	}
1748load:
1749	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1750	if (desc)
1751		*desc = seg_desc;
1752	return X86EMUL_CONTINUE;
1753exception:
1754	return emulate_exception(ctxt, err_vec, err_code, true);
 
1755}
1756
1757static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1758				   u16 selector, int seg)
1759{
1760	u8 cpl = ctxt->ops->cpl(ctxt);
1761
1762	/*
1763	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1764	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1765	 * but it's wrong).
1766	 *
1767	 * However, the Intel manual says that putting IST=1/DPL=3 in
1768	 * an interrupt gate will result in SS=3 (the AMD manual instead
1769	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1770	 * and only forbid it here.
1771	 */
1772	if (seg == VCPU_SREG_SS && selector == 3 &&
1773	    ctxt->mode == X86EMUL_MODE_PROT64)
1774		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1775
1776	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1777					 X86_TRANSFER_NONE, NULL);
1778}
1779
1780static void write_register_operand(struct operand *op)
1781{
1782	return assign_register(op->addr.reg, op->val, op->bytes);
1783}
1784
1785static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1786{
1787	switch (op->type) {
1788	case OP_REG:
1789		write_register_operand(op);
1790		break;
1791	case OP_MEM:
1792		if (ctxt->lock_prefix)
1793			return segmented_cmpxchg(ctxt,
1794						 op->addr.mem,
1795						 &op->orig_val,
1796						 &op->val,
1797						 op->bytes);
1798		else
1799			return segmented_write(ctxt,
1800					       op->addr.mem,
1801					       &op->val,
1802					       op->bytes);
1803	case OP_MEM_STR:
1804		return segmented_write(ctxt,
1805				       op->addr.mem,
1806				       op->data,
1807				       op->bytes * op->count);
1808	case OP_XMM:
1809		kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1810		break;
1811	case OP_MM:
1812		kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1813		break;
1814	case OP_NONE:
1815		/* no writeback */
1816		break;
1817	default:
1818		break;
1819	}
1820	return X86EMUL_CONTINUE;
1821}
1822
1823static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
1824{
1825	struct segmented_address addr;
1826
1827	rsp_increment(ctxt, -len);
1828	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829	addr.seg = VCPU_SREG_SS;
1830
1831	return segmented_write(ctxt, addr, data, len);
1832}
1833
1834static int em_push(struct x86_emulate_ctxt *ctxt)
1835{
1836	/* Disable writeback. */
1837	ctxt->dst.type = OP_NONE;
1838	return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1839}
1840
1841static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842		       void *dest, int len)
1843{
1844	int rc;
1845	struct segmented_address addr;
1846
1847	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848	addr.seg = VCPU_SREG_SS;
1849	rc = segmented_read(ctxt, addr, dest, len);
1850	if (rc != X86EMUL_CONTINUE)
1851		return rc;
1852
1853	rsp_increment(ctxt, len);
1854	return rc;
1855}
1856
1857static int em_pop(struct x86_emulate_ctxt *ctxt)
1858{
1859	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1860}
1861
1862static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863			void *dest, int len)
1864{
1865	int rc;
1866	unsigned long val = 0;
1867	unsigned long change_mask;
1868	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1869	int cpl = ctxt->ops->cpl(ctxt);
1870
1871	rc = emulate_pop(ctxt, &val, len);
1872	if (rc != X86EMUL_CONTINUE)
1873		return rc;
1874
1875	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1876		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1877		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1878		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1879
1880	switch(ctxt->mode) {
1881	case X86EMUL_MODE_PROT64:
1882	case X86EMUL_MODE_PROT32:
1883	case X86EMUL_MODE_PROT16:
1884		if (cpl == 0)
1885			change_mask |= X86_EFLAGS_IOPL;
1886		if (cpl <= iopl)
1887			change_mask |= X86_EFLAGS_IF;
1888		break;
1889	case X86EMUL_MODE_VM86:
1890		if (iopl < 3)
1891			return emulate_gp(ctxt, 0);
1892		change_mask |= X86_EFLAGS_IF;
1893		break;
1894	default: /* real mode */
1895		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1896		break;
1897	}
1898
1899	*(unsigned long *)dest =
1900		(ctxt->eflags & ~change_mask) | (val & change_mask);
1901
1902	return rc;
1903}
1904
1905static int em_popf(struct x86_emulate_ctxt *ctxt)
1906{
1907	ctxt->dst.type = OP_REG;
1908	ctxt->dst.addr.reg = &ctxt->eflags;
1909	ctxt->dst.bytes = ctxt->op_bytes;
1910	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1911}
1912
1913static int em_enter(struct x86_emulate_ctxt *ctxt)
1914{
1915	int rc;
1916	unsigned frame_size = ctxt->src.val;
1917	unsigned nesting_level = ctxt->src2.val & 31;
1918	ulong rbp;
1919
1920	if (nesting_level)
1921		return X86EMUL_UNHANDLEABLE;
1922
1923	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1924	rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
1925	if (rc != X86EMUL_CONTINUE)
1926		return rc;
1927	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1928		      stack_mask(ctxt));
1929	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1930		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1931		      stack_mask(ctxt));
1932	return X86EMUL_CONTINUE;
1933}
1934
1935static int em_leave(struct x86_emulate_ctxt *ctxt)
1936{
1937	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1938		      stack_mask(ctxt));
1939	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1940}
1941
1942static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1943{
1944	int seg = ctxt->src2.val;
1945
1946	ctxt->src.val = get_segment_selector(ctxt, seg);
1947	if (ctxt->op_bytes == 4) {
1948		rsp_increment(ctxt, -2);
1949		ctxt->op_bytes = 2;
1950	}
1951
1952	return em_push(ctxt);
1953}
1954
1955static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1956{
1957	int seg = ctxt->src2.val;
1958	unsigned long selector = 0;
1959	int rc;
1960
1961	rc = emulate_pop(ctxt, &selector, 2);
1962	if (rc != X86EMUL_CONTINUE)
1963		return rc;
1964
1965	if (seg == VCPU_SREG_SS)
1966		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1967	if (ctxt->op_bytes > 2)
1968		rsp_increment(ctxt, ctxt->op_bytes - 2);
1969
1970	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1971	return rc;
1972}
1973
1974static int em_pusha(struct x86_emulate_ctxt *ctxt)
1975{
1976	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1977	int rc = X86EMUL_CONTINUE;
1978	int reg = VCPU_REGS_RAX;
1979
1980	while (reg <= VCPU_REGS_RDI) {
1981		(reg == VCPU_REGS_RSP) ?
1982		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1983
1984		rc = em_push(ctxt);
1985		if (rc != X86EMUL_CONTINUE)
1986			return rc;
1987
1988		++reg;
1989	}
1990
1991	return rc;
1992}
1993
1994static int em_pushf(struct x86_emulate_ctxt *ctxt)
1995{
1996	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1997	return em_push(ctxt);
1998}
1999
2000static int em_popa(struct x86_emulate_ctxt *ctxt)
2001{
2002	int rc = X86EMUL_CONTINUE;
2003	int reg = VCPU_REGS_RDI;
2004	u32 val = 0;
2005
2006	while (reg >= VCPU_REGS_RAX) {
2007		if (reg == VCPU_REGS_RSP) {
2008			rsp_increment(ctxt, ctxt->op_bytes);
 
2009			--reg;
2010		}
2011
2012		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2013		if (rc != X86EMUL_CONTINUE)
2014			break;
2015		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2016		--reg;
2017	}
2018	return rc;
2019}
2020
2021static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2022{
2023	const struct x86_emulate_ops *ops = ctxt->ops;
2024	int rc;
2025	struct desc_ptr dt;
2026	gva_t cs_addr;
2027	gva_t eip_addr;
2028	u16 cs, eip;
2029
2030	/* TODO: Add limit checks */
2031	ctxt->src.val = ctxt->eflags;
2032	rc = em_push(ctxt);
2033	if (rc != X86EMUL_CONTINUE)
2034		return rc;
2035
2036	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2037
2038	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2039	rc = em_push(ctxt);
2040	if (rc != X86EMUL_CONTINUE)
2041		return rc;
2042
2043	ctxt->src.val = ctxt->_eip;
2044	rc = em_push(ctxt);
2045	if (rc != X86EMUL_CONTINUE)
2046		return rc;
2047
2048	ops->get_idt(ctxt, &dt);
2049
2050	eip_addr = dt.address + (irq << 2);
2051	cs_addr = dt.address + (irq << 2) + 2;
2052
2053	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2054	if (rc != X86EMUL_CONTINUE)
2055		return rc;
2056
2057	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2058	if (rc != X86EMUL_CONTINUE)
2059		return rc;
2060
2061	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2062	if (rc != X86EMUL_CONTINUE)
2063		return rc;
2064
2065	ctxt->_eip = eip;
2066
2067	return rc;
2068}
2069
2070int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2071{
2072	int rc;
2073
2074	invalidate_registers(ctxt);
2075	rc = __emulate_int_real(ctxt, irq);
2076	if (rc == X86EMUL_CONTINUE)
2077		writeback_registers(ctxt);
2078	return rc;
2079}
2080
2081static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2082{
2083	switch(ctxt->mode) {
2084	case X86EMUL_MODE_REAL:
2085		return __emulate_int_real(ctxt, irq);
2086	case X86EMUL_MODE_VM86:
2087	case X86EMUL_MODE_PROT16:
2088	case X86EMUL_MODE_PROT32:
2089	case X86EMUL_MODE_PROT64:
2090	default:
2091		/* Protected mode interrupts unimplemented yet */
2092		return X86EMUL_UNHANDLEABLE;
2093	}
2094}
2095
2096static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2097{
2098	int rc = X86EMUL_CONTINUE;
2099	unsigned long temp_eip = 0;
2100	unsigned long temp_eflags = 0;
2101	unsigned long cs = 0;
2102	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2103			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2104			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2105			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2106			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2107			     X86_EFLAGS_FIXED;
2108	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2109				  X86_EFLAGS_VIP;
2110
2111	/* TODO: Add stack limit check */
2112
2113	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2114
2115	if (rc != X86EMUL_CONTINUE)
2116		return rc;
2117
2118	if (temp_eip & ~0xffff)
2119		return emulate_gp(ctxt, 0);
2120
2121	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2122
2123	if (rc != X86EMUL_CONTINUE)
2124		return rc;
2125
2126	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2127
2128	if (rc != X86EMUL_CONTINUE)
2129		return rc;
2130
2131	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2132
2133	if (rc != X86EMUL_CONTINUE)
2134		return rc;
2135
2136	ctxt->_eip = temp_eip;
2137
 
2138	if (ctxt->op_bytes == 4)
2139		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2140	else if (ctxt->op_bytes == 2) {
2141		ctxt->eflags &= ~0xffff;
2142		ctxt->eflags |= temp_eflags;
2143	}
2144
2145	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2146	ctxt->eflags |= X86_EFLAGS_FIXED;
2147	ctxt->ops->set_nmi_mask(ctxt, false);
2148
2149	return rc;
2150}
2151
2152static int em_iret(struct x86_emulate_ctxt *ctxt)
2153{
2154	switch(ctxt->mode) {
2155	case X86EMUL_MODE_REAL:
2156		return emulate_iret_real(ctxt);
2157	case X86EMUL_MODE_VM86:
2158	case X86EMUL_MODE_PROT16:
2159	case X86EMUL_MODE_PROT32:
2160	case X86EMUL_MODE_PROT64:
2161	default:
2162		/* iret from protected mode unimplemented yet */
2163		return X86EMUL_UNHANDLEABLE;
2164	}
2165}
2166
2167static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2168{
2169	int rc;
2170	unsigned short sel;
2171	struct desc_struct new_desc;
2172	u8 cpl = ctxt->ops->cpl(ctxt);
2173
2174	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2175
2176	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2177				       X86_TRANSFER_CALL_JMP,
2178				       &new_desc);
2179	if (rc != X86EMUL_CONTINUE)
2180		return rc;
2181
2182	rc = assign_eip_far(ctxt, ctxt->src.val);
2183	/* Error handling is not implemented. */
2184	if (rc != X86EMUL_CONTINUE)
2185		return X86EMUL_UNHANDLEABLE;
 
 
 
 
 
2186
2187	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2188}
2189
2190static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2191{
2192	return assign_eip_near(ctxt, ctxt->src.val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2193}
2194
2195static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2196{
2197	int rc;
2198	long int old_eip;
2199
2200	old_eip = ctxt->_eip;
2201	rc = assign_eip_near(ctxt, ctxt->src.val);
2202	if (rc != X86EMUL_CONTINUE)
2203		return rc;
2204	ctxt->src.val = old_eip;
2205	rc = em_push(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2206	return rc;
2207}
2208
2209static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2210{
2211	u64 old = ctxt->dst.orig_val64;
2212
2213	if (ctxt->dst.bytes == 16)
2214		return X86EMUL_UNHANDLEABLE;
2215
2216	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2217	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2218		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2219		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2220		ctxt->eflags &= ~X86_EFLAGS_ZF;
2221	} else {
2222		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2223			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2224
2225		ctxt->eflags |= X86_EFLAGS_ZF;
2226	}
2227	return X86EMUL_CONTINUE;
2228}
2229
2230static int em_ret(struct x86_emulate_ctxt *ctxt)
2231{
2232	int rc;
2233	unsigned long eip = 0;
2234
2235	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2236	if (rc != X86EMUL_CONTINUE)
2237		return rc;
2238
2239	return assign_eip_near(ctxt, eip);
2240}
2241
2242static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2243{
2244	int rc;
2245	unsigned long eip = 0;
2246	unsigned long cs = 0;
2247	int cpl = ctxt->ops->cpl(ctxt);
2248	struct desc_struct new_desc;
2249
2250	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2251	if (rc != X86EMUL_CONTINUE)
2252		return rc;
 
 
2253	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2254	if (rc != X86EMUL_CONTINUE)
2255		return rc;
2256	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2257				       X86_TRANSFER_RET,
2258				       &new_desc);
2259	if (rc != X86EMUL_CONTINUE)
2260		return rc;
2261	rc = assign_eip_far(ctxt, eip);
2262	/* Error handling is not implemented. */
2263	if (rc != X86EMUL_CONTINUE)
2264		return X86EMUL_UNHANDLEABLE;
2265
2266	return rc;
2267}
2268
2269static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2270{
2271        int rc;
2272
2273        rc = em_ret_far(ctxt);
2274        if (rc != X86EMUL_CONTINUE)
2275                return rc;
2276        rsp_increment(ctxt, ctxt->src.val);
2277        return X86EMUL_CONTINUE;
2278}
2279
2280static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2281{
2282	/* Save real source value, then compare EAX against destination. */
2283	ctxt->dst.orig_val = ctxt->dst.val;
2284	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2285	ctxt->src.orig_val = ctxt->src.val;
2286	ctxt->src.val = ctxt->dst.orig_val;
2287	fastop(ctxt, em_cmp);
2288
2289	if (ctxt->eflags & X86_EFLAGS_ZF) {
2290		/* Success: write back to memory; no update of EAX */
2291		ctxt->src.type = OP_NONE;
2292		ctxt->dst.val = ctxt->src.orig_val;
2293	} else {
2294		/* Failure: write the value we saw to EAX. */
2295		ctxt->src.type = OP_REG;
2296		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2297		ctxt->src.val = ctxt->dst.orig_val;
2298		/* Create write-cycle to dest by writing the same value */
2299		ctxt->dst.val = ctxt->dst.orig_val;
2300	}
2301	return X86EMUL_CONTINUE;
2302}
2303
2304static int em_lseg(struct x86_emulate_ctxt *ctxt)
2305{
2306	int seg = ctxt->src2.val;
2307	unsigned short sel;
2308	int rc;
2309
2310	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2311
2312	rc = load_segment_descriptor(ctxt, sel, seg);
2313	if (rc != X86EMUL_CONTINUE)
2314		return rc;
2315
2316	ctxt->dst.val = ctxt->src.val;
2317	return rc;
2318}
2319
2320static int em_rsm(struct x86_emulate_ctxt *ctxt)
 
 
2321{
2322	if (!ctxt->ops->is_smm(ctxt))
2323		return emulate_ud(ctxt);
2324
2325	if (ctxt->ops->leave_smm(ctxt))
2326		ctxt->ops->triple_fault(ctxt);
2327
2328	return emulator_recalc_and_set_mode(ctxt);
2329}
2330
2331static void
2332setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2333{
2334	cs->l = 0;		/* will be adjusted later */
2335	set_desc_base(cs, 0);	/* flat segment */
2336	cs->g = 1;		/* 4kb granularity */
2337	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2338	cs->type = 0x0b;	/* Read, Execute, Accessed */
2339	cs->s = 1;
2340	cs->dpl = 0;		/* will be adjusted later */
2341	cs->p = 1;
2342	cs->d = 1;
2343	cs->avl = 0;
2344
2345	set_desc_base(ss, 0);	/* flat segment */
2346	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2347	ss->g = 1;		/* 4kb granularity */
2348	ss->s = 1;
2349	ss->type = 0x03;	/* Read/Write, Accessed */
2350	ss->d = 1;		/* 32bit stack segment */
2351	ss->dpl = 0;
2352	ss->p = 1;
2353	ss->l = 0;
2354	ss->avl = 0;
2355}
2356
2357static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2358{
2359	u32 eax, ebx, ecx, edx;
2360
2361	eax = ecx = 0;
2362	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2363	return is_guest_vendor_intel(ebx, ecx, edx);
2364}
2365
2366static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2367{
2368	const struct x86_emulate_ops *ops = ctxt->ops;
2369	u32 eax, ebx, ecx, edx;
2370
2371	/*
2372	 * syscall should always be enabled in longmode - so only become
2373	 * vendor specific (cpuid) if other modes are active...
2374	 */
2375	if (ctxt->mode == X86EMUL_MODE_PROT64)
2376		return true;
2377
2378	eax = 0x00000000;
2379	ecx = 0x00000000;
2380	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2381	/*
2382	 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2383	 * 64bit guest with a 32bit compat-app running will #UD !! While this
2384	 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2385	 * AMD can't behave like Intel.
2386	 */
2387	if (is_guest_vendor_intel(ebx, ecx, edx))
2388		return false;
2389
2390	if (is_guest_vendor_amd(ebx, ecx, edx) ||
2391	    is_guest_vendor_hygon(ebx, ecx, edx))
2392		return true;
2393
2394	/*
2395	 * default: (not Intel, not AMD, not Hygon), apply Intel's
2396	 * stricter rules...
2397	 */
2398	return false;
2399}
2400
2401static int em_syscall(struct x86_emulate_ctxt *ctxt)
2402{
2403	const struct x86_emulate_ops *ops = ctxt->ops;
2404	struct desc_struct cs, ss;
2405	u64 msr_data;
2406	u16 cs_sel, ss_sel;
2407	u64 efer = 0;
2408
2409	/* syscall is not available in real mode */
2410	if (ctxt->mode == X86EMUL_MODE_REAL ||
2411	    ctxt->mode == X86EMUL_MODE_VM86)
2412		return emulate_ud(ctxt);
2413
2414	if (!(em_syscall_is_enabled(ctxt)))
2415		return emulate_ud(ctxt);
2416
2417	ops->get_msr(ctxt, MSR_EFER, &efer);
2418	if (!(efer & EFER_SCE))
2419		return emulate_ud(ctxt);
2420
2421	setup_syscalls_segments(&cs, &ss);
2422	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2423	msr_data >>= 32;
2424	cs_sel = (u16)(msr_data & 0xfffc);
2425	ss_sel = (u16)(msr_data + 8);
2426
2427	if (efer & EFER_LMA) {
2428		cs.d = 0;
2429		cs.l = 1;
2430	}
2431	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2432	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2433
2434	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2435	if (efer & EFER_LMA) {
2436#ifdef CONFIG_X86_64
2437		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2438
2439		ops->get_msr(ctxt,
2440			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2441			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2442		ctxt->_eip = msr_data;
2443
2444		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2445		ctxt->eflags &= ~msr_data;
2446		ctxt->eflags |= X86_EFLAGS_FIXED;
2447#endif
2448	} else {
2449		/* legacy mode */
2450		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2451		ctxt->_eip = (u32)msr_data;
2452
2453		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2454	}
2455
2456	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2457	return X86EMUL_CONTINUE;
2458}
2459
2460static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2461{
2462	const struct x86_emulate_ops *ops = ctxt->ops;
2463	struct desc_struct cs, ss;
2464	u64 msr_data;
2465	u16 cs_sel, ss_sel;
2466	u64 efer = 0;
2467
2468	ops->get_msr(ctxt, MSR_EFER, &efer);
2469	/* inject #GP if in real mode */
2470	if (ctxt->mode == X86EMUL_MODE_REAL)
2471		return emulate_gp(ctxt, 0);
2472
2473	/*
2474	 * Not recognized on AMD in compat mode (but is recognized in legacy
2475	 * mode).
2476	 */
2477	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2478	    && !vendor_intel(ctxt))
2479		return emulate_ud(ctxt);
2480
2481	/* sysenter/sysexit have not been tested in 64bit mode. */
2482	if (ctxt->mode == X86EMUL_MODE_PROT64)
2483		return X86EMUL_UNHANDLEABLE;
2484
2485	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2486	if ((msr_data & 0xfffc) == 0x0)
2487		return emulate_gp(ctxt, 0);
 
 
 
 
 
 
 
 
2488
2489	setup_syscalls_segments(&cs, &ss);
2490	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2491	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2492	ss_sel = cs_sel + 8;
2493	if (efer & EFER_LMA) {
 
2494		cs.d = 0;
2495		cs.l = 1;
2496	}
2497
2498	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2499	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2500
2501	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2502	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2503
2504	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2505	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2506							      (u32)msr_data;
2507	if (efer & EFER_LMA)
2508		ctxt->mode = X86EMUL_MODE_PROT64;
2509
2510	return X86EMUL_CONTINUE;
2511}
2512
2513static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2514{
2515	const struct x86_emulate_ops *ops = ctxt->ops;
2516	struct desc_struct cs, ss;
2517	u64 msr_data, rcx, rdx;
2518	int usermode;
2519	u16 cs_sel = 0, ss_sel = 0;
2520
2521	/* inject #GP if in real mode or Virtual 8086 mode */
2522	if (ctxt->mode == X86EMUL_MODE_REAL ||
2523	    ctxt->mode == X86EMUL_MODE_VM86)
2524		return emulate_gp(ctxt, 0);
2525
2526	setup_syscalls_segments(&cs, &ss);
2527
2528	if ((ctxt->rex_prefix & 0x8) != 0x0)
2529		usermode = X86EMUL_MODE_PROT64;
2530	else
2531		usermode = X86EMUL_MODE_PROT32;
2532
2533	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2534	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2535
2536	cs.dpl = 3;
2537	ss.dpl = 3;
2538	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2539	switch (usermode) {
2540	case X86EMUL_MODE_PROT32:
2541		cs_sel = (u16)(msr_data + 16);
2542		if ((msr_data & 0xfffc) == 0x0)
2543			return emulate_gp(ctxt, 0);
2544		ss_sel = (u16)(msr_data + 24);
2545		rcx = (u32)rcx;
2546		rdx = (u32)rdx;
2547		break;
2548	case X86EMUL_MODE_PROT64:
2549		cs_sel = (u16)(msr_data + 32);
2550		if (msr_data == 0x0)
2551			return emulate_gp(ctxt, 0);
2552		ss_sel = cs_sel + 8;
2553		cs.d = 0;
2554		cs.l = 1;
2555		if (emul_is_noncanonical_address(rcx, ctxt) ||
2556		    emul_is_noncanonical_address(rdx, ctxt))
2557			return emulate_gp(ctxt, 0);
2558		break;
2559	}
2560	cs_sel |= SEGMENT_RPL_MASK;
2561	ss_sel |= SEGMENT_RPL_MASK;
2562
2563	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2564	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2565
2566	ctxt->_eip = rdx;
2567	ctxt->mode = usermode;
2568	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2569
2570	return X86EMUL_CONTINUE;
2571}
2572
2573static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2574{
2575	int iopl;
2576	if (ctxt->mode == X86EMUL_MODE_REAL)
2577		return false;
2578	if (ctxt->mode == X86EMUL_MODE_VM86)
2579		return true;
2580	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2581	return ctxt->ops->cpl(ctxt) > iopl;
2582}
2583
2584#define VMWARE_PORT_VMPORT	(0x5658)
2585#define VMWARE_PORT_VMRPC	(0x5659)
2586
2587static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2588					    u16 port, u16 len)
2589{
2590	const struct x86_emulate_ops *ops = ctxt->ops;
2591	struct desc_struct tr_seg;
2592	u32 base3;
2593	int r;
2594	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2595	unsigned mask = (1 << len) - 1;
2596	unsigned long base;
2597
2598	/*
2599	 * VMware allows access to these ports even if denied
2600	 * by TSS I/O permission bitmap. Mimic behavior.
2601	 */
2602	if (enable_vmware_backdoor &&
2603	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2604		return true;
2605
2606	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2607	if (!tr_seg.p)
2608		return false;
2609	if (desc_limit_scaled(&tr_seg) < 103)
2610		return false;
2611	base = get_desc_base(&tr_seg);
2612#ifdef CONFIG_X86_64
2613	base |= ((u64)base3) << 32;
2614#endif
2615	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2616	if (r != X86EMUL_CONTINUE)
2617		return false;
2618	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2619		return false;
2620	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2621	if (r != X86EMUL_CONTINUE)
2622		return false;
2623	if ((perm >> bit_idx) & mask)
2624		return false;
2625	return true;
2626}
2627
2628static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
2629				  u16 port, u16 len)
2630{
2631	if (ctxt->perm_ok)
2632		return true;
2633
2634	if (emulator_bad_iopl(ctxt))
2635		if (!emulator_io_port_access_allowed(ctxt, port, len))
2636			return false;
2637
2638	ctxt->perm_ok = true;
2639
2640	return true;
2641}
2642
2643static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2644{
2645	/*
2646	 * Intel CPUs mask the counter and pointers in quite strange
2647	 * manner when ECX is zero due to REP-string optimizations.
2648	 */
2649#ifdef CONFIG_X86_64
2650	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2651		return;
2652
2653	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2654
2655	switch (ctxt->b) {
2656	case 0xa4:	/* movsb */
2657	case 0xa5:	/* movsd/w */
2658		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2659		fallthrough;
2660	case 0xaa:	/* stosb */
2661	case 0xab:	/* stosd/w */
2662		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2663	}
2664#endif
2665}
2666
2667static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2668				struct tss_segment_16 *tss)
2669{
2670	tss->ip = ctxt->_eip;
2671	tss->flag = ctxt->eflags;
2672	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2673	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2674	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2675	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2676	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2677	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2678	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2679	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2680
2681	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2682	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2683	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2684	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2685	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2686}
2687
2688static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2689				 struct tss_segment_16 *tss)
2690{
2691	int ret;
2692	u8 cpl;
2693
2694	ctxt->_eip = tss->ip;
2695	ctxt->eflags = tss->flag | 2;
2696	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2697	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2698	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2699	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2700	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2701	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2702	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2703	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2704
2705	/*
2706	 * SDM says that segment selectors are loaded before segment
2707	 * descriptors
2708	 */
2709	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2710	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2711	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2712	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2713	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2714
2715	cpl = tss->cs & 3;
2716
2717	/*
2718	 * Now load segment descriptors. If fault happens at this stage
2719	 * it is handled in a context of new task
2720	 */
2721	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2722					X86_TRANSFER_TASK_SWITCH, NULL);
2723	if (ret != X86EMUL_CONTINUE)
2724		return ret;
2725	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2726					X86_TRANSFER_TASK_SWITCH, NULL);
2727	if (ret != X86EMUL_CONTINUE)
2728		return ret;
2729	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2730					X86_TRANSFER_TASK_SWITCH, NULL);
2731	if (ret != X86EMUL_CONTINUE)
2732		return ret;
2733	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2734					X86_TRANSFER_TASK_SWITCH, NULL);
2735	if (ret != X86EMUL_CONTINUE)
2736		return ret;
2737	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2738					X86_TRANSFER_TASK_SWITCH, NULL);
2739	if (ret != X86EMUL_CONTINUE)
2740		return ret;
2741
2742	return X86EMUL_CONTINUE;
2743}
2744
2745static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2746			  ulong old_tss_base, struct desc_struct *new_desc)
2747{
 
2748	struct tss_segment_16 tss_seg;
2749	int ret;
2750	u32 new_tss_base = get_desc_base(new_desc);
2751
2752	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2753	if (ret != X86EMUL_CONTINUE)
 
2754		return ret;
2755
2756	save_state_to_tss16(ctxt, &tss_seg);
2757
2758	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2759	if (ret != X86EMUL_CONTINUE)
 
2760		return ret;
2761
2762	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2763	if (ret != X86EMUL_CONTINUE)
 
2764		return ret;
2765
2766	if (old_tss_sel != 0xffff) {
2767		tss_seg.prev_task_link = old_tss_sel;
2768
2769		ret = linear_write_system(ctxt, new_tss_base,
2770					  &tss_seg.prev_task_link,
2771					  sizeof(tss_seg.prev_task_link));
 
2772		if (ret != X86EMUL_CONTINUE)
 
2773			return ret;
2774	}
2775
2776	return load_state_from_tss16(ctxt, &tss_seg);
2777}
2778
2779static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2780				struct tss_segment_32 *tss)
2781{
2782	/* CR3 and ldt selector are not saved intentionally */
2783	tss->eip = ctxt->_eip;
2784	tss->eflags = ctxt->eflags;
2785	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2786	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2787	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2788	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2789	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2790	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2791	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2792	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2793
2794	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2795	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2796	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2797	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2798	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2799	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
 
2800}
2801
2802static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2803				 struct tss_segment_32 *tss)
2804{
2805	int ret;
2806	u8 cpl;
2807
2808	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2809		return emulate_gp(ctxt, 0);
2810	ctxt->_eip = tss->eip;
2811	ctxt->eflags = tss->eflags | 2;
2812
2813	/* General purpose registers */
2814	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2815	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2816	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2817	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2818	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2819	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2820	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2821	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2822
2823	/*
2824	 * SDM says that segment selectors are loaded before segment
2825	 * descriptors.  This is important because CPL checks will
2826	 * use CS.RPL.
2827	 */
2828	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2829	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2830	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2831	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2832	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2833	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2834	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2835
2836	/*
2837	 * If we're switching between Protected Mode and VM86, we need to make
2838	 * sure to update the mode before loading the segment descriptors so
2839	 * that the selectors are interpreted correctly.
2840	 */
2841	if (ctxt->eflags & X86_EFLAGS_VM) {
2842		ctxt->mode = X86EMUL_MODE_VM86;
2843		cpl = 3;
2844	} else {
2845		ctxt->mode = X86EMUL_MODE_PROT32;
2846		cpl = tss->cs & 3;
2847	}
2848
2849	/*
2850	 * Now load segment descriptors. If fault happens at this stage
2851	 * it is handled in a context of new task
2852	 */
2853	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2854					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2855	if (ret != X86EMUL_CONTINUE)
2856		return ret;
2857	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2858					X86_TRANSFER_TASK_SWITCH, NULL);
2859	if (ret != X86EMUL_CONTINUE)
2860		return ret;
2861	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2862					X86_TRANSFER_TASK_SWITCH, NULL);
2863	if (ret != X86EMUL_CONTINUE)
2864		return ret;
2865	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2866					X86_TRANSFER_TASK_SWITCH, NULL);
2867	if (ret != X86EMUL_CONTINUE)
2868		return ret;
2869	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2870					X86_TRANSFER_TASK_SWITCH, NULL);
2871	if (ret != X86EMUL_CONTINUE)
2872		return ret;
2873	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2874					X86_TRANSFER_TASK_SWITCH, NULL);
 
 
2875	if (ret != X86EMUL_CONTINUE)
2876		return ret;
2877	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2878					X86_TRANSFER_TASK_SWITCH, NULL);
2879
2880	return ret;
2881}
2882
2883static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2884			  ulong old_tss_base, struct desc_struct *new_desc)
2885{
 
2886	struct tss_segment_32 tss_seg;
2887	int ret;
2888	u32 new_tss_base = get_desc_base(new_desc);
2889	u32 eip_offset = offsetof(struct tss_segment_32, eip);
2890	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2891
2892	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2893	if (ret != X86EMUL_CONTINUE)
 
2894		return ret;
2895
2896	save_state_to_tss32(ctxt, &tss_seg);
2897
2898	/* Only GP registers and segment selectors are saved */
2899	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2900				  ldt_sel_offset - eip_offset);
2901	if (ret != X86EMUL_CONTINUE)
 
2902		return ret;
2903
2904	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2905	if (ret != X86EMUL_CONTINUE)
 
2906		return ret;
2907
2908	if (old_tss_sel != 0xffff) {
2909		tss_seg.prev_task_link = old_tss_sel;
2910
2911		ret = linear_write_system(ctxt, new_tss_base,
2912					  &tss_seg.prev_task_link,
2913					  sizeof(tss_seg.prev_task_link));
 
2914		if (ret != X86EMUL_CONTINUE)
 
2915			return ret;
2916	}
2917
2918	return load_state_from_tss32(ctxt, &tss_seg);
2919}
2920
2921static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2922				   u16 tss_selector, int idt_index, int reason,
2923				   bool has_error_code, u32 error_code)
2924{
2925	const struct x86_emulate_ops *ops = ctxt->ops;
2926	struct desc_struct curr_tss_desc, next_tss_desc;
2927	int ret;
2928	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2929	ulong old_tss_base =
2930		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2931	u32 desc_limit;
2932	ulong desc_addr, dr7;
2933
2934	/* FIXME: old_tss_base == ~0 ? */
2935
2936	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2937	if (ret != X86EMUL_CONTINUE)
2938		return ret;
2939	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2940	if (ret != X86EMUL_CONTINUE)
2941		return ret;
2942
2943	/* FIXME: check that next_tss_desc is tss */
2944
2945	/*
2946	 * Check privileges. The three cases are task switch caused by...
2947	 *
2948	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2949	 * 2. Exception/IRQ/iret: No check is performed
2950	 * 3. jmp/call to TSS/task-gate: No check is performed since the
2951	 *    hardware checks it before exiting.
2952	 */
2953	if (reason == TASK_SWITCH_GATE) {
2954		if (idt_index != -1) {
2955			/* Software interrupts */
2956			struct desc_struct task_gate_desc;
2957			int dpl;
2958
2959			ret = read_interrupt_descriptor(ctxt, idt_index,
2960							&task_gate_desc);
2961			if (ret != X86EMUL_CONTINUE)
2962				return ret;
2963
2964			dpl = task_gate_desc.dpl;
2965			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2966				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2967		}
2968	}
2969
2970	desc_limit = desc_limit_scaled(&next_tss_desc);
2971	if (!next_tss_desc.p ||
2972	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2973	     desc_limit < 0x2b)) {
2974		return emulate_ts(ctxt, tss_selector & 0xfffc);
 
2975	}
2976
2977	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2978		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2979		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2980	}
2981
2982	if (reason == TASK_SWITCH_IRET)
2983		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2984
2985	/* set back link to prev task only if NT bit is set in eflags
2986	   note that old_tss_sel is not used after this point */
2987	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2988		old_tss_sel = 0xffff;
2989
2990	if (next_tss_desc.type & 8)
2991		ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
 
2992	else
2993		ret = task_switch_16(ctxt, old_tss_sel,
2994				     old_tss_base, &next_tss_desc);
2995	if (ret != X86EMUL_CONTINUE)
2996		return ret;
2997
2998	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2999		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3000
3001	if (reason != TASK_SWITCH_IRET) {
3002		next_tss_desc.type |= (1 << 1); /* set busy flag */
3003		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3004	}
3005
3006	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3007	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3008
3009	if (has_error_code) {
3010		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3011		ctxt->lock_prefix = 0;
3012		ctxt->src.val = (unsigned long) error_code;
3013		ret = em_push(ctxt);
3014	}
3015
3016	dr7 = ops->get_dr(ctxt, 7);
3017	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3018
3019	return ret;
3020}
3021
3022int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3023			 u16 tss_selector, int idt_index, int reason,
3024			 bool has_error_code, u32 error_code)
3025{
3026	int rc;
3027
3028	invalidate_registers(ctxt);
3029	ctxt->_eip = ctxt->eip;
3030	ctxt->dst.type = OP_NONE;
3031
3032	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3033				     has_error_code, error_code);
3034
3035	if (rc == X86EMUL_CONTINUE) {
3036		ctxt->eip = ctxt->_eip;
3037		writeback_registers(ctxt);
3038	}
3039
3040	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3041}
3042
3043static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3044		struct operand *op)
3045{
3046	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3047
3048	register_address_increment(ctxt, reg, df * op->bytes);
3049	op->addr.mem.ea = register_address(ctxt, reg);
 
3050}
3051
3052static int em_das(struct x86_emulate_ctxt *ctxt)
3053{
3054	u8 al, old_al;
3055	bool af, cf, old_cf;
3056
3057	cf = ctxt->eflags & X86_EFLAGS_CF;
3058	al = ctxt->dst.val;
3059
3060	old_al = al;
3061	old_cf = cf;
3062	cf = false;
3063	af = ctxt->eflags & X86_EFLAGS_AF;
3064	if ((al & 0x0f) > 9 || af) {
3065		al -= 6;
3066		cf = old_cf | (al >= 250);
3067		af = true;
3068	} else {
3069		af = false;
3070	}
3071	if (old_al > 0x99 || old_cf) {
3072		al -= 0x60;
3073		cf = true;
3074	}
3075
3076	ctxt->dst.val = al;
3077	/* Set PF, ZF, SF */
3078	ctxt->src.type = OP_IMM;
3079	ctxt->src.val = 0;
3080	ctxt->src.bytes = 1;
3081	fastop(ctxt, em_or);
3082	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3083	if (cf)
3084		ctxt->eflags |= X86_EFLAGS_CF;
3085	if (af)
3086		ctxt->eflags |= X86_EFLAGS_AF;
3087	return X86EMUL_CONTINUE;
3088}
3089
3090static int em_aam(struct x86_emulate_ctxt *ctxt)
3091{
3092	u8 al, ah;
3093
3094	if (ctxt->src.val == 0)
3095		return emulate_de(ctxt);
3096
3097	al = ctxt->dst.val & 0xff;
3098	ah = al / ctxt->src.val;
3099	al %= ctxt->src.val;
3100
3101	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3102
3103	/* Set PF, ZF, SF */
3104	ctxt->src.type = OP_IMM;
3105	ctxt->src.val = 0;
3106	ctxt->src.bytes = 1;
3107	fastop(ctxt, em_or);
3108
3109	return X86EMUL_CONTINUE;
3110}
3111
3112static int em_aad(struct x86_emulate_ctxt *ctxt)
3113{
3114	u8 al = ctxt->dst.val & 0xff;
3115	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3116
3117	al = (al + (ah * ctxt->src.val)) & 0xff;
3118
3119	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3120
3121	/* Set PF, ZF, SF */
3122	ctxt->src.type = OP_IMM;
3123	ctxt->src.val = 0;
3124	ctxt->src.bytes = 1;
3125	fastop(ctxt, em_or);
3126
3127	return X86EMUL_CONTINUE;
3128}
3129
3130static int em_call(struct x86_emulate_ctxt *ctxt)
3131{
3132	int rc;
3133	long rel = ctxt->src.val;
3134
3135	ctxt->src.val = (unsigned long)ctxt->_eip;
3136	rc = jmp_rel(ctxt, rel);
3137	if (rc != X86EMUL_CONTINUE)
3138		return rc;
3139	return em_push(ctxt);
3140}
3141
3142static int em_call_far(struct x86_emulate_ctxt *ctxt)
3143{
3144	u16 sel, old_cs;
3145	ulong old_eip;
3146	int rc;
3147	struct desc_struct old_desc, new_desc;
3148	const struct x86_emulate_ops *ops = ctxt->ops;
3149	int cpl = ctxt->ops->cpl(ctxt);
3150	enum x86emul_mode prev_mode = ctxt->mode;
3151
 
3152	old_eip = ctxt->_eip;
3153	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3154
3155	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3156	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3157				       X86_TRANSFER_CALL_JMP, &new_desc);
3158	if (rc != X86EMUL_CONTINUE)
3159		return rc;
3160
3161	rc = assign_eip_far(ctxt, ctxt->src.val);
3162	if (rc != X86EMUL_CONTINUE)
3163		goto fail;
3164
3165	ctxt->src.val = old_cs;
3166	rc = em_push(ctxt);
3167	if (rc != X86EMUL_CONTINUE)
3168		goto fail;
3169
3170	ctxt->src.val = old_eip;
3171	rc = em_push(ctxt);
3172	/* If we failed, we tainted the memory, but the very least we should
3173	   restore cs */
3174	if (rc != X86EMUL_CONTINUE) {
3175		pr_warn_once("faulting far call emulation tainted memory\n");
3176		goto fail;
3177	}
3178	return rc;
3179fail:
3180	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3181	ctxt->mode = prev_mode;
3182	return rc;
3183
3184}
3185
3186static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3187{
3188	int rc;
3189	unsigned long eip = 0;
3190
3191	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3192	if (rc != X86EMUL_CONTINUE)
3193		return rc;
3194	rc = assign_eip_near(ctxt, eip);
3195	if (rc != X86EMUL_CONTINUE)
3196		return rc;
3197	rsp_increment(ctxt, ctxt->src.val);
3198	return X86EMUL_CONTINUE;
3199}
3200
3201static int em_xchg(struct x86_emulate_ctxt *ctxt)
3202{
3203	/* Write back the register source. */
3204	ctxt->src.val = ctxt->dst.val;
3205	write_register_operand(&ctxt->src);
3206
3207	/* Write back the memory destination with implicit LOCK prefix. */
3208	ctxt->dst.val = ctxt->src.orig_val;
3209	ctxt->lock_prefix = 1;
3210	return X86EMUL_CONTINUE;
3211}
3212
3213static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3214{
3215	ctxt->dst.val = ctxt->src2.val;
3216	return fastop(ctxt, em_imul);
3217}
3218
3219static int em_cwd(struct x86_emulate_ctxt *ctxt)
3220{
3221	ctxt->dst.type = OP_REG;
3222	ctxt->dst.bytes = ctxt->src.bytes;
3223	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3224	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3225
3226	return X86EMUL_CONTINUE;
3227}
3228
3229static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3230{
3231	u64 tsc_aux = 0;
3232
3233	if (!ctxt->ops->guest_has_rdpid(ctxt))
3234		return emulate_ud(ctxt);
3235
3236	ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3237	ctxt->dst.val = tsc_aux;
3238	return X86EMUL_CONTINUE;
3239}
3240
3241static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3242{
3243	u64 tsc = 0;
3244
3245	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3246	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3247	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3248	return X86EMUL_CONTINUE;
3249}
3250
3251static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3252{
3253	u64 pmc;
3254
3255	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3256		return emulate_gp(ctxt, 0);
3257	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3258	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3259	return X86EMUL_CONTINUE;
3260}
3261
3262static int em_mov(struct x86_emulate_ctxt *ctxt)
3263{
3264	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
 
 
3265	return X86EMUL_CONTINUE;
3266}
3267
3268static int em_movbe(struct x86_emulate_ctxt *ctxt)
3269{
3270	u16 tmp;
3271
3272	if (!ctxt->ops->guest_has_movbe(ctxt))
3273		return emulate_ud(ctxt);
3274
3275	switch (ctxt->op_bytes) {
3276	case 2:
3277		/*
3278		 * From MOVBE definition: "...When the operand size is 16 bits,
3279		 * the upper word of the destination register remains unchanged
3280		 * ..."
3281		 *
3282		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3283		 * rules so we have to do the operation almost per hand.
3284		 */
3285		tmp = (u16)ctxt->src.val;
3286		ctxt->dst.val &= ~0xffffUL;
3287		ctxt->dst.val |= (unsigned long)swab16(tmp);
3288		break;
3289	case 4:
3290		ctxt->dst.val = swab32((u32)ctxt->src.val);
3291		break;
3292	case 8:
3293		ctxt->dst.val = swab64(ctxt->src.val);
3294		break;
3295	default:
3296		BUG();
3297	}
3298	return X86EMUL_CONTINUE;
3299}
3300
3301static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3302{
3303	int cr_num = ctxt->modrm_reg;
3304	int r;
3305
3306	if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3307		return emulate_gp(ctxt, 0);
3308
3309	/* Disable writeback. */
3310	ctxt->dst.type = OP_NONE;
3311
3312	if (cr_num == 0) {
3313		/*
3314		 * CR0 write might have updated CR0.PE and/or CR0.PG
3315		 * which can affect the cpu's execution mode.
3316		 */
3317		r = emulator_recalc_and_set_mode(ctxt);
3318		if (r != X86EMUL_CONTINUE)
3319			return r;
3320	}
3321
 
 
 
3322	return X86EMUL_CONTINUE;
3323}
3324
3325static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3326{
3327	unsigned long val;
3328
3329	if (ctxt->mode == X86EMUL_MODE_PROT64)
3330		val = ctxt->src.val & ~0ULL;
3331	else
3332		val = ctxt->src.val & ~0U;
3333
3334	/* #UD condition is already handled. */
3335	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3336		return emulate_gp(ctxt, 0);
3337
3338	/* Disable writeback. */
3339	ctxt->dst.type = OP_NONE;
3340	return X86EMUL_CONTINUE;
3341}
3342
3343static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3344{
3345	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3346	u64 msr_data;
3347	int r;
3348
3349	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3350		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3351	r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
 
 
 
3352
3353	if (r == X86EMUL_PROPAGATE_FAULT)
3354		return emulate_gp(ctxt, 0);
3355
3356	return r;
3357}
3358
3359static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3360{
3361	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3362	u64 msr_data;
3363	int r;
3364
3365	r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3366
3367	if (r == X86EMUL_PROPAGATE_FAULT)
3368		return emulate_gp(ctxt, 0);
3369
3370	if (r == X86EMUL_CONTINUE) {
3371		*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3372		*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3373	}
3374	return r;
3375}
3376
3377static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3378{
3379	if (segment > VCPU_SREG_GS &&
3380	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3381	    ctxt->ops->cpl(ctxt) > 0)
3382		return emulate_gp(ctxt, 0);
3383
3384	ctxt->dst.val = get_segment_selector(ctxt, segment);
3385	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3386		ctxt->dst.bytes = 2;
3387	return X86EMUL_CONTINUE;
3388}
3389
3390static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3391{
3392	if (ctxt->modrm_reg > VCPU_SREG_GS)
3393		return emulate_ud(ctxt);
3394
3395	return em_store_sreg(ctxt, ctxt->modrm_reg);
 
3396}
3397
3398static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3399{
3400	u16 sel = ctxt->src.val;
3401
3402	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3403		return emulate_ud(ctxt);
3404
3405	if (ctxt->modrm_reg == VCPU_SREG_SS)
3406		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3407
3408	/* Disable writeback. */
3409	ctxt->dst.type = OP_NONE;
3410	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3411}
3412
3413static int em_sldt(struct x86_emulate_ctxt *ctxt)
3414{
3415	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3416}
3417
3418static int em_lldt(struct x86_emulate_ctxt *ctxt)
3419{
3420	u16 sel = ctxt->src.val;
3421
3422	/* Disable writeback. */
3423	ctxt->dst.type = OP_NONE;
3424	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3425}
3426
3427static int em_str(struct x86_emulate_ctxt *ctxt)
3428{
3429	return em_store_sreg(ctxt, VCPU_SREG_TR);
3430}
3431
3432static int em_ltr(struct x86_emulate_ctxt *ctxt)
3433{
3434	u16 sel = ctxt->src.val;
3435
3436	/* Disable writeback. */
3437	ctxt->dst.type = OP_NONE;
3438	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3439}
3440
3441static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3442{
3443	int rc;
3444	ulong linear;
3445	unsigned int max_size;
3446
3447	rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
3448			 &linear, X86EMUL_F_INVLPG);
3449	if (rc == X86EMUL_CONTINUE)
3450		ctxt->ops->invlpg(ctxt, linear);
3451	/* Disable writeback. */
3452	ctxt->dst.type = OP_NONE;
3453	return X86EMUL_CONTINUE;
3454}
3455
3456static int em_clts(struct x86_emulate_ctxt *ctxt)
3457{
3458	ulong cr0;
3459
3460	cr0 = ctxt->ops->get_cr(ctxt, 0);
3461	cr0 &= ~X86_CR0_TS;
3462	ctxt->ops->set_cr(ctxt, 0, cr0);
3463	return X86EMUL_CONTINUE;
3464}
3465
3466static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3467{
3468	int rc = ctxt->ops->fix_hypercall(ctxt);
3469
 
 
 
 
3470	if (rc != X86EMUL_CONTINUE)
3471		return rc;
3472
3473	/* Let the processor re-execute the fixed hypercall */
3474	ctxt->_eip = ctxt->eip;
3475	/* Disable writeback. */
3476	ctxt->dst.type = OP_NONE;
3477	return X86EMUL_CONTINUE;
3478}
3479
3480static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3481				  void (*get)(struct x86_emulate_ctxt *ctxt,
3482					      struct desc_ptr *ptr))
3483{
3484	struct desc_ptr desc_ptr;
 
3485
3486	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3487	    ctxt->ops->cpl(ctxt) > 0)
3488		return emulate_gp(ctxt, 0);
3489
3490	if (ctxt->mode == X86EMUL_MODE_PROT64)
3491		ctxt->op_bytes = 8;
3492	get(ctxt, &desc_ptr);
3493	if (ctxt->op_bytes == 2) {
3494		ctxt->op_bytes = 4;
3495		desc_ptr.address &= 0x00ffffff;
3496	}
3497	/* Disable writeback. */
3498	ctxt->dst.type = OP_NONE;
3499	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3500				   &desc_ptr, 2 + ctxt->op_bytes);
3501}
3502
3503static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3504{
3505	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3506}
 
3507
3508static int em_sidt(struct x86_emulate_ctxt *ctxt)
3509{
3510	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3511}
3512
3513static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3514{
3515	struct desc_ptr desc_ptr;
3516	int rc;
3517
3518	if (ctxt->mode == X86EMUL_MODE_PROT64)
3519		ctxt->op_bytes = 8;
3520	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3521			     &desc_ptr.size, &desc_ptr.address,
3522			     ctxt->op_bytes);
3523	if (rc != X86EMUL_CONTINUE)
3524		return rc;
3525	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3526	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3527		return emulate_gp(ctxt, 0);
3528	if (lgdt)
3529		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3530	else
3531		ctxt->ops->set_idt(ctxt, &desc_ptr);
3532	/* Disable writeback. */
3533	ctxt->dst.type = OP_NONE;
3534	return X86EMUL_CONTINUE;
3535}
3536
3537static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3538{
3539	return em_lgdt_lidt(ctxt, true);
3540}
3541
3542static int em_lidt(struct x86_emulate_ctxt *ctxt)
3543{
3544	return em_lgdt_lidt(ctxt, false);
3545}
3546
3547static int em_smsw(struct x86_emulate_ctxt *ctxt)
3548{
3549	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3550	    ctxt->ops->cpl(ctxt) > 0)
3551		return emulate_gp(ctxt, 0);
3552
3553	if (ctxt->dst.type == OP_MEM)
3554		ctxt->dst.bytes = 2;
3555	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3556	return X86EMUL_CONTINUE;
3557}
3558
3559static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3560{
3561	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3562			  | (ctxt->src.val & 0x0f));
3563	ctxt->dst.type = OP_NONE;
3564	return X86EMUL_CONTINUE;
3565}
3566
3567static int em_loop(struct x86_emulate_ctxt *ctxt)
3568{
3569	int rc = X86EMUL_CONTINUE;
3570
3571	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3572	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3573	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3574		rc = jmp_rel(ctxt, ctxt->src.val);
3575
3576	return rc;
3577}
3578
3579static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3580{
3581	int rc = X86EMUL_CONTINUE;
3582
3583	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3584		rc = jmp_rel(ctxt, ctxt->src.val);
3585
3586	return rc;
3587}
3588
3589static int em_in(struct x86_emulate_ctxt *ctxt)
3590{
3591	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3592			     &ctxt->dst.val))
3593		return X86EMUL_IO_NEEDED;
3594
3595	return X86EMUL_CONTINUE;
3596}
3597
3598static int em_out(struct x86_emulate_ctxt *ctxt)
3599{
3600	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3601				    &ctxt->src.val, 1);
3602	/* Disable writeback. */
3603	ctxt->dst.type = OP_NONE;
3604	return X86EMUL_CONTINUE;
3605}
3606
3607static int em_cli(struct x86_emulate_ctxt *ctxt)
3608{
3609	if (emulator_bad_iopl(ctxt))
3610		return emulate_gp(ctxt, 0);
3611
3612	ctxt->eflags &= ~X86_EFLAGS_IF;
3613	return X86EMUL_CONTINUE;
3614}
3615
3616static int em_sti(struct x86_emulate_ctxt *ctxt)
3617{
3618	if (emulator_bad_iopl(ctxt))
3619		return emulate_gp(ctxt, 0);
3620
3621	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3622	ctxt->eflags |= X86_EFLAGS_IF;
3623	return X86EMUL_CONTINUE;
3624}
3625
3626static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3627{
3628	u32 eax, ebx, ecx, edx;
3629	u64 msr = 0;
3630
3631	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3632	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3633	    ctxt->ops->cpl(ctxt)) {
3634		return emulate_gp(ctxt, 0);
3635	}
3636
3637	eax = reg_read(ctxt, VCPU_REGS_RAX);
3638	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3639	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3640	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3641	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3642	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3643	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3644	return X86EMUL_CONTINUE;
3645}
3646
3647static int em_sahf(struct x86_emulate_ctxt *ctxt)
3648{
3649	u32 flags;
3650
3651	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3652		X86_EFLAGS_SF;
3653	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3654
3655	ctxt->eflags &= ~0xffUL;
3656	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3657	return X86EMUL_CONTINUE;
3658}
3659
3660static int em_lahf(struct x86_emulate_ctxt *ctxt)
3661{
3662	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3663	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3664	return X86EMUL_CONTINUE;
3665}
3666
3667static int em_bswap(struct x86_emulate_ctxt *ctxt)
3668{
3669	switch (ctxt->op_bytes) {
3670#ifdef CONFIG_X86_64
3671	case 8:
3672		asm("bswap %0" : "+r"(ctxt->dst.val));
3673		break;
3674#endif
3675	default:
3676		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3677		break;
3678	}
3679	return X86EMUL_CONTINUE;
3680}
3681
3682static int em_clflush(struct x86_emulate_ctxt *ctxt)
3683{
3684	/* emulating clflush regardless of cpuid */
3685	return X86EMUL_CONTINUE;
3686}
3687
3688static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3689{
3690	/* emulating clflushopt regardless of cpuid */
3691	return X86EMUL_CONTINUE;
3692}
3693
3694static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3695{
3696	ctxt->dst.val = (s32) ctxt->src.val;
3697	return X86EMUL_CONTINUE;
3698}
3699
3700static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3701{
3702	if (!ctxt->ops->guest_has_fxsr(ctxt))
3703		return emulate_ud(ctxt);
3704
3705	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3706		return emulate_nm(ctxt);
3707
3708	/*
3709	 * Don't emulate a case that should never be hit, instead of working
3710	 * around a lack of fxsave64/fxrstor64 on old compilers.
3711	 */
3712	if (ctxt->mode >= X86EMUL_MODE_PROT64)
3713		return X86EMUL_UNHANDLEABLE;
3714
3715	return X86EMUL_CONTINUE;
3716}
3717
3718/*
3719 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3720 * and restore MXCSR.
3721 */
3722static size_t __fxstate_size(int nregs)
3723{
3724	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3725}
 
3726
3727static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3728{
3729	bool cr4_osfxsr;
3730	if (ctxt->mode == X86EMUL_MODE_PROT64)
3731		return __fxstate_size(16);
 
 
3732
3733	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3734	return __fxstate_size(cr4_osfxsr ? 8 : 0);
3735}
3736
3737/*
3738 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3739 *  1) 16 bit mode
3740 *  2) 32 bit mode
3741 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
3742 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3743 *       save and restore
3744 *  3) 64-bit mode with REX.W prefix
3745 *     - like (2), but XMM 8-15 are being saved and restored
3746 *  4) 64-bit mode without REX.W prefix
3747 *     - like (3), but FIP and FDP are 64 bit
3748 *
3749 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3750 * desired result.  (4) is not emulated.
3751 *
3752 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3753 * and FPU DS) should match.
3754 */
3755static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3756{
3757	struct fxregs_state fx_state;
3758	int rc;
3759
3760	rc = check_fxsr(ctxt);
3761	if (rc != X86EMUL_CONTINUE)
3762		return rc;
 
 
 
3763
3764	kvm_fpu_get();
 
3765
3766	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
 
3767
3768	kvm_fpu_put();
 
 
 
3769
3770	if (rc != X86EMUL_CONTINUE)
3771		return rc;
 
 
 
 
 
3772
3773	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3774		                   fxstate_size(ctxt));
3775}
3776
3777/*
3778 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3779 * in the host registers (via FXSAVE) instead, so they won't be modified.
3780 * (preemption has to stay disabled until FXRSTOR).
3781 *
3782 * Use noinline to keep the stack for other functions called by callers small.
3783 */
3784static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3785				 const size_t used_size)
3786{
3787	struct fxregs_state fx_tmp;
3788	int rc;
3789
3790	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3791	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3792	       __fxstate_size(16) - used_size);
3793
3794	return rc;
3795}
3796
3797static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3798{
3799	struct fxregs_state fx_state;
3800	int rc;
3801	size_t size;
3802
3803	rc = check_fxsr(ctxt);
3804	if (rc != X86EMUL_CONTINUE)
3805		return rc;
3806
3807	size = fxstate_size(ctxt);
3808	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3809	if (rc != X86EMUL_CONTINUE)
3810		return rc;
3811
3812	kvm_fpu_get();
3813
3814	if (size < __fxstate_size(16)) {
3815		rc = fxregs_fixup(&fx_state, size);
3816		if (rc != X86EMUL_CONTINUE)
3817			goto out;
3818	}
3819
3820	if (fx_state.mxcsr >> 16) {
3821		rc = emulate_gp(ctxt, 0);
3822		goto out;
3823	}
3824
3825	if (rc == X86EMUL_CONTINUE)
3826		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3827
3828out:
3829	kvm_fpu_put();
3830
3831	return rc;
3832}
3833
3834static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3835{
3836	u32 eax, ecx, edx;
3837
3838	if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3839		return emulate_ud(ctxt);
3840
3841	eax = reg_read(ctxt, VCPU_REGS_RAX);
3842	edx = reg_read(ctxt, VCPU_REGS_RDX);
3843	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3844
3845	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3846		return emulate_gp(ctxt, 0);
3847
3848	return X86EMUL_CONTINUE;
3849}
3850
3851static bool valid_cr(int nr)
3852{
3853	switch (nr) {
3854	case 0:
3855	case 2 ... 4:
3856	case 8:
3857		return true;
3858	default:
3859		return false;
3860	}
3861}
3862
3863static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3864{
3865	if (!valid_cr(ctxt->modrm_reg))
3866		return emulate_ud(ctxt);
3867
3868	return X86EMUL_CONTINUE;
 
3869}
3870
3871static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3872{
3873	int dr = ctxt->modrm_reg;
3874	u64 cr4;
3875
3876	if (dr > 7)
3877		return emulate_ud(ctxt);
3878
3879	cr4 = ctxt->ops->get_cr(ctxt, 4);
3880	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3881		return emulate_ud(ctxt);
3882
3883	if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
3884		ulong dr6;
3885
3886		dr6 = ctxt->ops->get_dr(ctxt, 6);
3887		dr6 &= ~DR_TRAP_BITS;
3888		dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3889		ctxt->ops->set_dr(ctxt, 6, dr6);
3890		return emulate_db(ctxt);
3891	}
3892
3893	return X86EMUL_CONTINUE;
3894}
3895
3896static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3897{
3898	u64 new_val = ctxt->src.val64;
3899	int dr = ctxt->modrm_reg;
3900
3901	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3902		return emulate_gp(ctxt, 0);
3903
3904	return check_dr_read(ctxt);
3905}
3906
3907static int check_svme(struct x86_emulate_ctxt *ctxt)
3908{
3909	u64 efer = 0;
3910
3911	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3912
3913	if (!(efer & EFER_SVME))
3914		return emulate_ud(ctxt);
3915
3916	return X86EMUL_CONTINUE;
3917}
3918
3919static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3920{
3921	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3922
3923	/* Valid physical address? */
3924	if (rax & 0xffff000000000000ULL)
3925		return emulate_gp(ctxt, 0);
3926
3927	return check_svme(ctxt);
3928}
3929
3930static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3931{
3932	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3933
3934	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3935		return emulate_gp(ctxt, 0);
3936
3937	return X86EMUL_CONTINUE;
3938}
3939
3940static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3941{
3942	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3943	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3944
3945	/*
3946	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3947	 * in Ring3 when CR4.PCE=0.
3948	 */
3949	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3950		return X86EMUL_CONTINUE;
3951
3952	/*
3953	 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0.  The CR0.PE
3954	 * check however is unnecessary because CPL is always 0 outside
3955	 * protected mode.
3956	 */
3957	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3958	    ctxt->ops->check_rdpmc_early(ctxt, rcx))
3959		return emulate_gp(ctxt, 0);
3960
3961	return X86EMUL_CONTINUE;
3962}
3963
3964static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3965{
3966	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3967	if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
3968		return emulate_gp(ctxt, 0);
3969
3970	return X86EMUL_CONTINUE;
3971}
3972
3973static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3974{
3975	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3976	if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
3977		return emulate_gp(ctxt, 0);
3978
3979	return X86EMUL_CONTINUE;
3980}
3981
3982#define D(_y) { .flags = (_y) }
3983#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3984#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3985		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3986#define N    D(NotImpl)
3987#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3988#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3989#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3990#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3991#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3992#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3993#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3994#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3995#define II(_f, _e, _i) \
3996	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3997#define IIP(_f, _e, _i, _p) \
3998	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3999	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4000#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4001
4002#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4003#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4004#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4005#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4006#define I2bvIP(_f, _e, _i, _p) \
4007	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4008
4009#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4010		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4011		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4012
4013static const struct opcode group7_rm0[] = {
4014	N,
4015	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
 
 
 
 
4016	N, N, N, N, N, N,
4017};
4018
4019static const struct opcode group7_rm1[] = {
4020	DI(SrcNone | Priv, monitor),
4021	DI(SrcNone | Priv, mwait),
4022	N, N, N, N, N, N,
 
 
 
 
 
4023};
4024
4025static const struct opcode group7_rm2[] = {
4026	N,
4027	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
4028	N, N, N, N, N, N,
4029};
4030
4031static const struct opcode group7_rm3[] = {
4032	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4033	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4034	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4035	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4036	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4037	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4038	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4039	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4040};
4041
4042static const struct opcode group7_rm7[] = {
4043	N,
4044	DIP(SrcNone, rdtscp, check_rdtsc),
4045	N, N, N, N, N, N,
4046};
4047
4048static const struct opcode group1[] = {
4049	F(Lock, em_add),
4050	F(Lock | PageTable, em_or),
4051	F(Lock, em_adc),
4052	F(Lock, em_sbb),
4053	F(Lock | PageTable, em_and),
4054	F(Lock, em_sub),
4055	F(Lock, em_xor),
4056	F(NoWrite, em_cmp),
4057};
4058
4059static const struct opcode group1A[] = {
4060	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4061};
4062
4063static const struct opcode group2[] = {
4064	F(DstMem | ModRM, em_rol),
4065	F(DstMem | ModRM, em_ror),
4066	F(DstMem | ModRM, em_rcl),
4067	F(DstMem | ModRM, em_rcr),
4068	F(DstMem | ModRM, em_shl),
4069	F(DstMem | ModRM, em_shr),
4070	F(DstMem | ModRM, em_shl),
4071	F(DstMem | ModRM, em_sar),
4072};
4073
4074static const struct opcode group3[] = {
4075	F(DstMem | SrcImm | NoWrite, em_test),
4076	F(DstMem | SrcImm | NoWrite, em_test),
4077	F(DstMem | SrcNone | Lock, em_not),
4078	F(DstMem | SrcNone | Lock, em_neg),
4079	F(DstXacc | Src2Mem, em_mul_ex),
4080	F(DstXacc | Src2Mem, em_imul_ex),
4081	F(DstXacc | Src2Mem, em_div_ex),
4082	F(DstXacc | Src2Mem, em_idiv_ex),
4083};
4084
4085static const struct opcode group4[] = {
4086	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4087	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4088	N, N, N, N, N, N,
4089};
4090
4091static const struct opcode group5[] = {
4092	F(DstMem | SrcNone | Lock,		em_inc),
4093	F(DstMem | SrcNone | Lock,		em_dec),
4094	I(SrcMem | NearBranch | IsBranch,       em_call_near_abs),
4095	I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4096	I(SrcMem | NearBranch | IsBranch,       em_jmp_abs),
4097	I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4098	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4099};
4100
4101static const struct opcode group6[] = {
4102	II(Prot | DstMem,	   em_sldt, sldt),
4103	II(Prot | DstMem,	   em_str, str),
4104	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4105	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4106	N, N, N, N,
4107};
4108
4109static const struct group_dual group7 = { {
4110	II(Mov | DstMem,			em_sgdt, sgdt),
4111	II(Mov | DstMem,			em_sidt, sidt),
4112	II(SrcMem | Priv,			em_lgdt, lgdt),
4113	II(SrcMem | Priv,			em_lidt, lidt),
4114	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4115	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4116	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4117}, {
4118	EXT(0, group7_rm0),
4119	EXT(0, group7_rm1),
4120	EXT(0, group7_rm2),
4121	EXT(0, group7_rm3),
4122	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4123	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4124	EXT(0, group7_rm7),
4125} };
4126
4127static const struct opcode group8[] = {
4128	N, N, N, N,
4129	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4130	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4131	F(DstMem | SrcImmByte | Lock,			em_btr),
4132	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4133};
4134
4135/*
4136 * The "memory" destination is actually always a register, since we come
4137 * from the register case of group9.
4138 */
4139static const struct gprefix pfx_0f_c7_7 = {
4140	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4141};
4142
4143
4144static const struct group_dual group9 = { {
4145	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4146}, {
4147	N, N, N, N, N, N, N,
4148	GP(0, &pfx_0f_c7_7),
4149} };
4150
4151static const struct opcode group11[] = {
4152	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4153	X7(D(Undefined)),
4154};
4155
4156static const struct gprefix pfx_0f_ae_7 = {
4157	I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4158};
4159
4160static const struct group_dual group15 = { {
4161	I(ModRM | Aligned16, em_fxsave),
4162	I(ModRM | Aligned16, em_fxrstor),
4163	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4164}, {
4165	N, N, N, N, N, N, N, N,
4166} };
4167
4168static const struct gprefix pfx_0f_6f_0f_7f = {
4169	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4170};
4171
4172static const struct instr_dual instr_dual_0f_2b = {
4173	I(0, em_mov), N
4174};
4175
4176static const struct gprefix pfx_0f_2b = {
4177	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4178};
4179
4180static const struct gprefix pfx_0f_10_0f_11 = {
4181	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4182};
4183
4184static const struct gprefix pfx_0f_28_0f_29 = {
4185	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4186};
4187
4188static const struct gprefix pfx_0f_e7 = {
4189	N, I(Sse, em_mov), N, N,
4190};
4191
4192static const struct escape escape_d9 = { {
4193	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4194}, {
4195	/* 0xC0 - 0xC7 */
4196	N, N, N, N, N, N, N, N,
4197	/* 0xC8 - 0xCF */
4198	N, N, N, N, N, N, N, N,
4199	/* 0xD0 - 0xC7 */
4200	N, N, N, N, N, N, N, N,
4201	/* 0xD8 - 0xDF */
4202	N, N, N, N, N, N, N, N,
4203	/* 0xE0 - 0xE7 */
4204	N, N, N, N, N, N, N, N,
4205	/* 0xE8 - 0xEF */
4206	N, N, N, N, N, N, N, N,
4207	/* 0xF0 - 0xF7 */
4208	N, N, N, N, N, N, N, N,
4209	/* 0xF8 - 0xFF */
4210	N, N, N, N, N, N, N, N,
4211} };
4212
4213static const struct escape escape_db = { {
4214	N, N, N, N, N, N, N, N,
4215}, {
4216	/* 0xC0 - 0xC7 */
4217	N, N, N, N, N, N, N, N,
4218	/* 0xC8 - 0xCF */
4219	N, N, N, N, N, N, N, N,
4220	/* 0xD0 - 0xC7 */
4221	N, N, N, N, N, N, N, N,
4222	/* 0xD8 - 0xDF */
4223	N, N, N, N, N, N, N, N,
4224	/* 0xE0 - 0xE7 */
4225	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4226	/* 0xE8 - 0xEF */
4227	N, N, N, N, N, N, N, N,
4228	/* 0xF0 - 0xF7 */
4229	N, N, N, N, N, N, N, N,
4230	/* 0xF8 - 0xFF */
4231	N, N, N, N, N, N, N, N,
4232} };
4233
4234static const struct escape escape_dd = { {
4235	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4236}, {
4237	/* 0xC0 - 0xC7 */
4238	N, N, N, N, N, N, N, N,
4239	/* 0xC8 - 0xCF */
4240	N, N, N, N, N, N, N, N,
4241	/* 0xD0 - 0xC7 */
4242	N, N, N, N, N, N, N, N,
4243	/* 0xD8 - 0xDF */
4244	N, N, N, N, N, N, N, N,
4245	/* 0xE0 - 0xE7 */
4246	N, N, N, N, N, N, N, N,
4247	/* 0xE8 - 0xEF */
4248	N, N, N, N, N, N, N, N,
4249	/* 0xF0 - 0xF7 */
4250	N, N, N, N, N, N, N, N,
4251	/* 0xF8 - 0xFF */
4252	N, N, N, N, N, N, N, N,
4253} };
4254
4255static const struct instr_dual instr_dual_0f_c3 = {
4256	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4257};
4258
4259static const struct mode_dual mode_dual_63 = {
4260	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4261};
4262
4263static const struct instr_dual instr_dual_8d = {
4264	D(DstReg | SrcMem | ModRM | NoAccess), N
4265};
4266
4267static const struct opcode opcode_table[256] = {
4268	/* 0x00 - 0x07 */
4269	F6ALU(Lock, em_add),
4270	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4271	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4272	/* 0x08 - 0x0F */
4273	F6ALU(Lock | PageTable, em_or),
4274	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4275	N,
4276	/* 0x10 - 0x17 */
4277	F6ALU(Lock, em_adc),
4278	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4279	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4280	/* 0x18 - 0x1F */
4281	F6ALU(Lock, em_sbb),
4282	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4283	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4284	/* 0x20 - 0x27 */
4285	F6ALU(Lock | PageTable, em_and), N, N,
4286	/* 0x28 - 0x2F */
4287	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4288	/* 0x30 - 0x37 */
4289	F6ALU(Lock, em_xor), N, N,
4290	/* 0x38 - 0x3F */
4291	F6ALU(NoWrite, em_cmp), N, N,
4292	/* 0x40 - 0x4F */
4293	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4294	/* 0x50 - 0x57 */
4295	X8(I(SrcReg | Stack, em_push)),
4296	/* 0x58 - 0x5F */
4297	X8(I(DstReg | Stack, em_pop)),
4298	/* 0x60 - 0x67 */
4299	I(ImplicitOps | Stack | No64, em_pusha),
4300	I(ImplicitOps | Stack | No64, em_popa),
4301	N, MD(ModRM, &mode_dual_63),
4302	N, N, N, N,
4303	/* 0x68 - 0x6F */
4304	I(SrcImm | Mov | Stack, em_push),
4305	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4306	I(SrcImmByte | Mov | Stack, em_push),
4307	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4308	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4309	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4310	/* 0x70 - 0x7F */
4311	X16(D(SrcImmByte | NearBranch | IsBranch)),
4312	/* 0x80 - 0x87 */
4313	G(ByteOp | DstMem | SrcImm, group1),
4314	G(DstMem | SrcImm, group1),
4315	G(ByteOp | DstMem | SrcImm | No64, group1),
4316	G(DstMem | SrcImmByte, group1),
4317	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4318	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4319	/* 0x88 - 0x8F */
4320	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4321	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4322	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4323	ID(0, &instr_dual_8d),
4324	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4325	G(0, group1A),
4326	/* 0x90 - 0x97 */
4327	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4328	/* 0x98 - 0x9F */
4329	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4330	I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4331	II(ImplicitOps | Stack, em_pushf, pushf),
4332	II(ImplicitOps | Stack, em_popf, popf),
4333	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4334	/* 0xA0 - 0xA7 */
4335	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4336	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4337	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4338	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4339	/* 0xA8 - 0xAF */
4340	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4341	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4342	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4343	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4344	/* 0xB0 - 0xB7 */
4345	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4346	/* 0xB8 - 0xBF */
4347	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4348	/* 0xC0 - 0xC7 */
4349	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4350	I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4351	I(ImplicitOps | NearBranch | IsBranch, em_ret),
4352	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4353	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4354	G(ByteOp, group11), G(0, group11),
4355	/* 0xC8 - 0xCF */
4356	I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4357	I(Stack | IsBranch, em_leave),
4358	I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4359	I(ImplicitOps | IsBranch, em_ret_far),
4360	D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4361	D(ImplicitOps | No64 | IsBranch),
4362	II(ImplicitOps | IsBranch, em_iret, iret),
4363	/* 0xD0 - 0xD7 */
4364	G(Src2One | ByteOp, group2), G(Src2One, group2),
4365	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4366	I(DstAcc | SrcImmUByte | No64, em_aam),
4367	I(DstAcc | SrcImmUByte | No64, em_aad),
4368	F(DstAcc | ByteOp | No64, em_salc),
4369	I(DstAcc | SrcXLat | ByteOp, em_mov),
4370	/* 0xD8 - 0xDF */
4371	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4372	/* 0xE0 - 0xE7 */
4373	X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4374	I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4375	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4376	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4377	/* 0xE8 - 0xEF */
4378	I(SrcImm | NearBranch | IsBranch, em_call),
4379	D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4380	I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4381	D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4382	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4383	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4384	/* 0xF0 - 0xF7 */
4385	N, DI(ImplicitOps, icebp), N, N,
4386	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4387	G(ByteOp, group3), G(0, group3),
4388	/* 0xF8 - 0xFF */
4389	D(ImplicitOps), D(ImplicitOps),
4390	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4391	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4392};
4393
4394static const struct opcode twobyte_table[256] = {
4395	/* 0x00 - 0x0F */
4396	G(0, group6), GD(0, &group7), N, N,
4397	N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4398	II(ImplicitOps | Priv, em_clts, clts), N,
4399	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4400	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4401	/* 0x10 - 0x1F */
4402	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4403	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4404	N, N, N, N, N, N,
4405	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4406	D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4407	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4408	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4409	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4410	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4411	/* 0x20 - 0x2F */
4412	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4413	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4414	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4415						check_cr_access),
4416	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4417						check_dr_write),
4418	N, N, N, N,
4419	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4420	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4421	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4422	N, N, N, N,
 
4423	/* 0x30 - 0x3F */
4424	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4425	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4426	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4427	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4428	I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4429	I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4430	N, N,
4431	N, N, N, N, N, N, N, N,
4432	/* 0x40 - 0x4F */
4433	X16(D(DstReg | SrcMem | ModRM)),
4434	/* 0x50 - 0x5F */
4435	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4436	/* 0x60 - 0x6F */
4437	N, N, N, N,
4438	N, N, N, N,
4439	N, N, N, N,
4440	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4441	/* 0x70 - 0x7F */
4442	N, N, N, N,
4443	N, N, N, N,
4444	N, N, N, N,
4445	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4446	/* 0x80 - 0x8F */
4447	X16(D(SrcImm | NearBranch | IsBranch)),
4448	/* 0x90 - 0x9F */
4449	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4450	/* 0xA0 - 0xA7 */
4451	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4452	II(ImplicitOps, em_cpuid, cpuid),
4453	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4454	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4455	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4456	/* 0xA8 - 0xAF */
4457	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4458	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4459	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4460	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4461	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4462	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4463	/* 0xB0 - 0xB7 */
4464	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4465	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4466	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4467	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4468	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4469	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4470	/* 0xB8 - 0xBF */
4471	N, N,
4472	G(BitOp, group8),
4473	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4474	I(DstReg | SrcMem | ModRM, em_bsf_c),
4475	I(DstReg | SrcMem | ModRM, em_bsr_c),
4476	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4477	/* 0xC0 - 0xC7 */
4478	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4479	N, ID(0, &instr_dual_0f_c3),
4480	N, N, N, GD(0, &group9),
4481	/* 0xC8 - 0xCF */
4482	X8(I(DstReg, em_bswap)),
4483	/* 0xD0 - 0xDF */
4484	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4485	/* 0xE0 - 0xEF */
4486	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4487	N, N, N, N, N, N, N, N,
4488	/* 0xF0 - 0xFF */
4489	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4490};
4491
4492static const struct instr_dual instr_dual_0f_38_f0 = {
4493	I(DstReg | SrcMem | Mov, em_movbe), N
4494};
4495
4496static const struct instr_dual instr_dual_0f_38_f1 = {
4497	I(DstMem | SrcReg | Mov, em_movbe), N
4498};
4499
4500static const struct gprefix three_byte_0f_38_f0 = {
4501	ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
4502};
4503
4504static const struct gprefix three_byte_0f_38_f1 = {
4505	ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
4506};
4507
4508/*
4509 * Insns below are selected by the prefix which indexed by the third opcode
4510 * byte.
4511 */
4512static const struct opcode opcode_map_0f_38[256] = {
4513	/* 0x00 - 0x7f */
4514	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4515	/* 0x80 - 0xef */
4516	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4517	/* 0xf0 - 0xf1 */
4518	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4519	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4520	/* 0xf2 - 0xff */
4521	N, N, X4(N), X8(N)
4522};
4523
4524#undef D
4525#undef N
4526#undef G
4527#undef GD
4528#undef I
4529#undef GP
4530#undef EXT
4531#undef MD
4532#undef ID
4533
4534#undef D2bv
4535#undef D2bvIP
4536#undef I2bv
4537#undef I2bvIP
4538#undef I6ALU
4539
4540static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4541{
4542	unsigned size;
4543
4544	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4545	if (size == 8)
4546		size = 4;
4547	return size;
4548}
4549
4550static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4551		      unsigned size, bool sign_extension)
4552{
4553	int rc = X86EMUL_CONTINUE;
4554
4555	op->type = OP_IMM;
4556	op->bytes = size;
4557	op->addr.mem.ea = ctxt->_eip;
4558	/* NB. Immediates are sign-extended as necessary. */
4559	switch (op->bytes) {
4560	case 1:
4561		op->val = insn_fetch(s8, ctxt);
4562		break;
4563	case 2:
4564		op->val = insn_fetch(s16, ctxt);
4565		break;
4566	case 4:
4567		op->val = insn_fetch(s32, ctxt);
4568		break;
4569	case 8:
4570		op->val = insn_fetch(s64, ctxt);
4571		break;
4572	}
4573	if (!sign_extension) {
4574		switch (op->bytes) {
4575		case 1:
4576			op->val &= 0xff;
4577			break;
4578		case 2:
4579			op->val &= 0xffff;
4580			break;
4581		case 4:
4582			op->val &= 0xffffffff;
4583			break;
4584		}
4585	}
4586done:
4587	return rc;
4588}
4589
4590static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4591			  unsigned d)
4592{
4593	int rc = X86EMUL_CONTINUE;
4594
4595	switch (d) {
4596	case OpReg:
4597		decode_register_operand(ctxt, op);
4598		break;
4599	case OpImmUByte:
4600		rc = decode_imm(ctxt, op, 1, false);
4601		break;
4602	case OpMem:
4603		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4604	mem_common:
4605		*op = ctxt->memop;
4606		ctxt->memopp = op;
4607		if (ctxt->d & BitOp)
4608			fetch_bit_operand(ctxt);
4609		op->orig_val = op->val;
4610		break;
4611	case OpMem64:
4612		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4613		goto mem_common;
4614	case OpAcc:
4615		op->type = OP_REG;
4616		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4617		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4618		fetch_register_operand(op);
4619		op->orig_val = op->val;
4620		break;
4621	case OpAccLo:
4622		op->type = OP_REG;
4623		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4624		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4625		fetch_register_operand(op);
4626		op->orig_val = op->val;
4627		break;
4628	case OpAccHi:
4629		if (ctxt->d & ByteOp) {
4630			op->type = OP_NONE;
4631			break;
4632		}
4633		op->type = OP_REG;
4634		op->bytes = ctxt->op_bytes;
4635		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4636		fetch_register_operand(op);
4637		op->orig_val = op->val;
4638		break;
4639	case OpDI:
4640		op->type = OP_MEM;
4641		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4642		op->addr.mem.ea =
4643			register_address(ctxt, VCPU_REGS_RDI);
4644		op->addr.mem.seg = VCPU_SREG_ES;
4645		op->val = 0;
4646		op->count = 1;
4647		break;
4648	case OpDX:
4649		op->type = OP_REG;
4650		op->bytes = 2;
4651		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4652		fetch_register_operand(op);
4653		break;
4654	case OpCL:
4655		op->type = OP_IMM;
4656		op->bytes = 1;
4657		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4658		break;
4659	case OpImmByte:
4660		rc = decode_imm(ctxt, op, 1, true);
4661		break;
4662	case OpOne:
4663		op->type = OP_IMM;
4664		op->bytes = 1;
4665		op->val = 1;
4666		break;
4667	case OpImm:
4668		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4669		break;
4670	case OpImm64:
4671		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4672		break;
4673	case OpMem8:
4674		ctxt->memop.bytes = 1;
4675		if (ctxt->memop.type == OP_REG) {
4676			ctxt->memop.addr.reg = decode_register(ctxt,
4677					ctxt->modrm_rm, true);
4678			fetch_register_operand(&ctxt->memop);
4679		}
4680		goto mem_common;
4681	case OpMem16:
4682		ctxt->memop.bytes = 2;
4683		goto mem_common;
4684	case OpMem32:
4685		ctxt->memop.bytes = 4;
4686		goto mem_common;
4687	case OpImmU16:
4688		rc = decode_imm(ctxt, op, 2, false);
4689		break;
4690	case OpImmU:
4691		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4692		break;
4693	case OpSI:
4694		op->type = OP_MEM;
4695		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4696		op->addr.mem.ea =
4697			register_address(ctxt, VCPU_REGS_RSI);
4698		op->addr.mem.seg = ctxt->seg_override;
4699		op->val = 0;
4700		op->count = 1;
4701		break;
4702	case OpXLat:
4703		op->type = OP_MEM;
4704		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4705		op->addr.mem.ea =
4706			address_mask(ctxt,
4707				reg_read(ctxt, VCPU_REGS_RBX) +
4708				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4709		op->addr.mem.seg = ctxt->seg_override;
4710		op->val = 0;
4711		break;
4712	case OpImmFAddr:
4713		op->type = OP_IMM;
4714		op->addr.mem.ea = ctxt->_eip;
4715		op->bytes = ctxt->op_bytes + 2;
4716		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4717		break;
4718	case OpMemFAddr:
4719		ctxt->memop.bytes = ctxt->op_bytes + 2;
4720		goto mem_common;
4721	case OpES:
4722		op->type = OP_IMM;
4723		op->val = VCPU_SREG_ES;
4724		break;
4725	case OpCS:
4726		op->type = OP_IMM;
4727		op->val = VCPU_SREG_CS;
4728		break;
4729	case OpSS:
4730		op->type = OP_IMM;
4731		op->val = VCPU_SREG_SS;
4732		break;
4733	case OpDS:
4734		op->type = OP_IMM;
4735		op->val = VCPU_SREG_DS;
4736		break;
4737	case OpFS:
4738		op->type = OP_IMM;
4739		op->val = VCPU_SREG_FS;
4740		break;
4741	case OpGS:
4742		op->type = OP_IMM;
4743		op->val = VCPU_SREG_GS;
4744		break;
4745	case OpImplicit:
4746		/* Special instructions do their own operand decoding. */
4747	default:
4748		op->type = OP_NONE; /* Disable writeback. */
4749		break;
4750	}
4751
4752done:
4753	return rc;
4754}
4755
4756int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4757{
4758	int rc = X86EMUL_CONTINUE;
4759	int mode = ctxt->mode;
4760	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4761	bool op_prefix = false;
4762	bool has_seg_override = false;
4763	struct opcode opcode;
4764	u16 dummy;
4765	struct desc_struct desc;
4766
4767	ctxt->memop.type = OP_NONE;
4768	ctxt->memopp = NULL;
4769	ctxt->_eip = ctxt->eip;
4770	ctxt->fetch.ptr = ctxt->fetch.data;
4771	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4772	ctxt->opcode_len = 1;
4773	ctxt->intercept = x86_intercept_none;
4774	if (insn_len > 0)
4775		memcpy(ctxt->fetch.data, insn, insn_len);
4776	else {
4777		rc = __do_insn_fetch_bytes(ctxt, 1);
4778		if (rc != X86EMUL_CONTINUE)
4779			goto done;
4780	}
4781
4782	switch (mode) {
4783	case X86EMUL_MODE_REAL:
4784	case X86EMUL_MODE_VM86:
4785		def_op_bytes = def_ad_bytes = 2;
4786		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4787		if (desc.d)
4788			def_op_bytes = def_ad_bytes = 4;
4789		break;
4790	case X86EMUL_MODE_PROT16:
4791		def_op_bytes = def_ad_bytes = 2;
4792		break;
4793	case X86EMUL_MODE_PROT32:
4794		def_op_bytes = def_ad_bytes = 4;
4795		break;
4796#ifdef CONFIG_X86_64
4797	case X86EMUL_MODE_PROT64:
4798		def_op_bytes = 4;
4799		def_ad_bytes = 8;
4800		break;
4801#endif
4802	default:
4803		return EMULATION_FAILED;
4804	}
4805
4806	ctxt->op_bytes = def_op_bytes;
4807	ctxt->ad_bytes = def_ad_bytes;
4808
4809	/* Legacy prefixes. */
4810	for (;;) {
4811		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4812		case 0x66:	/* operand-size override */
4813			op_prefix = true;
4814			/* switch between 2/4 bytes */
4815			ctxt->op_bytes = def_op_bytes ^ 6;
4816			break;
4817		case 0x67:	/* address-size override */
4818			if (mode == X86EMUL_MODE_PROT64)
4819				/* switch between 4/8 bytes */
4820				ctxt->ad_bytes = def_ad_bytes ^ 12;
4821			else
4822				/* switch between 2/4 bytes */
4823				ctxt->ad_bytes = def_ad_bytes ^ 6;
4824			break;
4825		case 0x26:	/* ES override */
4826			has_seg_override = true;
4827			ctxt->seg_override = VCPU_SREG_ES;
4828			break;
4829		case 0x2e:	/* CS override */
4830			has_seg_override = true;
4831			ctxt->seg_override = VCPU_SREG_CS;
4832			break;
4833		case 0x36:	/* SS override */
4834			has_seg_override = true;
4835			ctxt->seg_override = VCPU_SREG_SS;
4836			break;
4837		case 0x3e:	/* DS override */
4838			has_seg_override = true;
4839			ctxt->seg_override = VCPU_SREG_DS;
4840			break;
4841		case 0x64:	/* FS override */
4842			has_seg_override = true;
4843			ctxt->seg_override = VCPU_SREG_FS;
4844			break;
4845		case 0x65:	/* GS override */
4846			has_seg_override = true;
4847			ctxt->seg_override = VCPU_SREG_GS;
4848			break;
4849		case 0x40 ... 0x4f: /* REX */
4850			if (mode != X86EMUL_MODE_PROT64)
4851				goto done_prefixes;
4852			ctxt->rex_prefix = ctxt->b;
4853			continue;
4854		case 0xf0:	/* LOCK */
4855			ctxt->lock_prefix = 1;
4856			break;
4857		case 0xf2:	/* REPNE/REPNZ */
4858		case 0xf3:	/* REP/REPE/REPZ */
4859			ctxt->rep_prefix = ctxt->b;
4860			break;
4861		default:
4862			goto done_prefixes;
4863		}
4864
4865		/* Any legacy prefix after a REX prefix nullifies its effect. */
4866
4867		ctxt->rex_prefix = 0;
4868	}
4869
4870done_prefixes:
4871
4872	/* REX prefix. */
4873	if (ctxt->rex_prefix & 8)
4874		ctxt->op_bytes = 8;	/* REX.W */
4875
4876	/* Opcode byte(s). */
4877	opcode = opcode_table[ctxt->b];
4878	/* Two-byte opcode? */
4879	if (ctxt->b == 0x0f) {
4880		ctxt->opcode_len = 2;
4881		ctxt->b = insn_fetch(u8, ctxt);
4882		opcode = twobyte_table[ctxt->b];
4883
4884		/* 0F_38 opcode map */
4885		if (ctxt->b == 0x38) {
4886			ctxt->opcode_len = 3;
4887			ctxt->b = insn_fetch(u8, ctxt);
4888			opcode = opcode_map_0f_38[ctxt->b];
4889		}
4890	}
4891	ctxt->d = opcode.flags;
4892
4893	if (ctxt->d & ModRM)
4894		ctxt->modrm = insn_fetch(u8, ctxt);
4895
4896	/* vex-prefix instructions are not implemented */
4897	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4898	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4899		ctxt->d = NotImpl;
4900	}
4901
4902	while (ctxt->d & GroupMask) {
4903		switch (ctxt->d & GroupMask) {
4904		case Group:
 
 
4905			goffset = (ctxt->modrm >> 3) & 7;
4906			opcode = opcode.u.group[goffset];
4907			break;
4908		case GroupDual:
 
 
4909			goffset = (ctxt->modrm >> 3) & 7;
4910			if ((ctxt->modrm >> 6) == 3)
4911				opcode = opcode.u.gdual->mod3[goffset];
4912			else
4913				opcode = opcode.u.gdual->mod012[goffset];
4914			break;
4915		case RMExt:
4916			goffset = ctxt->modrm & 7;
4917			opcode = opcode.u.group[goffset];
4918			break;
4919		case Prefix:
4920			if (ctxt->rep_prefix && op_prefix)
4921				return EMULATION_FAILED;
4922			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4923			switch (simd_prefix) {
4924			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4925			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4926			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4927			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4928			}
4929			break;
4930		case Escape:
4931			if (ctxt->modrm > 0xbf) {
4932				size_t size = ARRAY_SIZE(opcode.u.esc->high);
4933				u32 index = array_index_nospec(
4934					ctxt->modrm - 0xc0, size);
4935
4936				opcode = opcode.u.esc->high[index];
4937			} else {
4938				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4939			}
4940			break;
4941		case InstrDual:
4942			if ((ctxt->modrm >> 6) == 3)
4943				opcode = opcode.u.idual->mod3;
4944			else
4945				opcode = opcode.u.idual->mod012;
4946			break;
4947		case ModeDual:
4948			if (ctxt->mode == X86EMUL_MODE_PROT64)
4949				opcode = opcode.u.mdual->mode64;
4950			else
4951				opcode = opcode.u.mdual->mode32;
4952			break;
4953		default:
4954			return EMULATION_FAILED;
4955		}
4956
4957		ctxt->d &= ~(u64)GroupMask;
4958		ctxt->d |= opcode.flags;
4959	}
4960
4961	ctxt->is_branch = opcode.flags & IsBranch;
 
 
4962
4963	/* Unrecognised? */
4964	if (ctxt->d == 0)
4965		return EMULATION_FAILED;
4966
4967	ctxt->execute = opcode.u.execute;
 
4968
4969	if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4970	    likely(!(ctxt->d & EmulateOnUD)))
4971		return EMULATION_FAILED;
4972
4973	if (unlikely(ctxt->d &
4974	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4975	     No16))) {
4976		/*
4977		 * These are copied unconditionally here, and checked unconditionally
4978		 * in x86_emulate_insn.
4979		 */
4980		ctxt->check_perm = opcode.check_perm;
4981		ctxt->intercept = opcode.intercept;
4982
4983		if (ctxt->d & NotImpl)
4984			return EMULATION_FAILED;
4985
4986		if (mode == X86EMUL_MODE_PROT64) {
4987			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4988				ctxt->op_bytes = 8;
4989			else if (ctxt->d & NearBranch)
4990				ctxt->op_bytes = 8;
4991		}
4992
4993		if (ctxt->d & Op3264) {
4994			if (mode == X86EMUL_MODE_PROT64)
4995				ctxt->op_bytes = 8;
4996			else
4997				ctxt->op_bytes = 4;
4998		}
4999
5000		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5001			ctxt->op_bytes = 4;
 
5002
5003		if (ctxt->d & Sse)
5004			ctxt->op_bytes = 16;
5005		else if (ctxt->d & Mmx)
5006			ctxt->op_bytes = 8;
5007	}
5008
5009	/* ModRM and SIB bytes. */
5010	if (ctxt->d & ModRM) {
5011		rc = decode_modrm(ctxt, &ctxt->memop);
5012		if (!has_seg_override) {
5013			has_seg_override = true;
5014			ctxt->seg_override = ctxt->modrm_seg;
5015		}
5016	} else if (ctxt->d & MemAbs)
5017		rc = decode_abs(ctxt, &ctxt->memop);
5018	if (rc != X86EMUL_CONTINUE)
5019		goto done;
5020
5021	if (!has_seg_override)
5022		ctxt->seg_override = VCPU_SREG_DS;
 
 
5023
5024	ctxt->memop.addr.mem.seg = ctxt->seg_override;
 
5025
5026	/*
5027	 * Decode and fetch the source operand: register, memory
5028	 * or immediate.
5029	 */
5030	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5031	if (rc != X86EMUL_CONTINUE)
5032		goto done;
5033
5034	/*
5035	 * Decode and fetch the second source operand: register, memory
5036	 * or immediate.
5037	 */
5038	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5039	if (rc != X86EMUL_CONTINUE)
5040		goto done;
5041
5042	/* Decode and fetch the destination operand: register or memory. */
5043	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5044
5045	if (ctxt->rip_relative && likely(ctxt->memopp))
5046		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5047					ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5048
5049done:
5050	if (rc == X86EMUL_PROPAGATE_FAULT)
5051		ctxt->have_exception = true;
5052	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5053}
5054
5055bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5056{
5057	return ctxt->d & PageTable;
5058}
5059
5060static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5061{
5062	/* The second termination condition only applies for REPE
5063	 * and REPNE. Test if the repeat string operation prefix is
5064	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5065	 * corresponding termination condition according to:
5066	 * 	- if REPE/REPZ and ZF = 0 then done
5067	 * 	- if REPNE/REPNZ and ZF = 1 then done
5068	 */
5069	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5070	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5071	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5072		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5073		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5074		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5075		return true;
5076
5077	return false;
5078}
5079
5080static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5081{
5082	int rc;
5083
5084	kvm_fpu_get();
5085	rc = asm_safe("fwait");
5086	kvm_fpu_put();
5087
5088	if (unlikely(rc != X86EMUL_CONTINUE))
5089		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5090
5091	return X86EMUL_CONTINUE;
5092}
5093
5094static void fetch_possible_mmx_operand(struct operand *op)
5095{
5096	if (op->type == OP_MM)
5097		kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5098}
5099
5100static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5101{
5102	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5103
5104	if (!(ctxt->d & ByteOp))
5105		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5106
5107	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5108	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5109	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5110	    : "c"(ctxt->src2.val));
5111
5112	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5113	if (!fop) /* exception is returned in fop variable */
5114		return emulate_de(ctxt);
5115	return X86EMUL_CONTINUE;
5116}
5117
5118void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5119{
5120	/* Clear fields that are set conditionally but read without a guard. */
5121	ctxt->rip_relative = false;
5122	ctxt->rex_prefix = 0;
5123	ctxt->lock_prefix = 0;
5124	ctxt->rep_prefix = 0;
5125	ctxt->regs_valid = 0;
5126	ctxt->regs_dirty = 0;
5127
5128	ctxt->io_read.pos = 0;
5129	ctxt->io_read.end = 0;
5130	ctxt->mem_read.end = 0;
5131}
5132
5133int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5134{
5135	const struct x86_emulate_ops *ops = ctxt->ops;
 
5136	int rc = X86EMUL_CONTINUE;
5137	int saved_dst_type = ctxt->dst.type;
5138	bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
5139
5140	ctxt->mem_read.pos = 0;
5141
 
 
 
 
 
5142	/* LOCK prefix is allowed only with some instructions */
5143	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5144		rc = emulate_ud(ctxt);
5145		goto done;
5146	}
5147
5148	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5149		rc = emulate_ud(ctxt);
5150		goto done;
5151	}
5152
5153	if (unlikely(ctxt->d &
5154		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5155		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5156				(ctxt->d & Undefined)) {
5157			rc = emulate_ud(ctxt);
5158			goto done;
5159		}
5160
5161		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5162		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5163			rc = emulate_ud(ctxt);
5164			goto done;
5165		}
5166
5167		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5168			rc = emulate_nm(ctxt);
 
 
5169			goto done;
5170		}
5171
5172		if (ctxt->d & Mmx) {
5173			rc = flush_pending_x87_faults(ctxt);
5174			if (rc != X86EMUL_CONTINUE)
5175				goto done;
5176			/*
5177			 * Now that we know the fpu is exception safe, we can fetch
5178			 * operands from it.
5179			 */
5180			fetch_possible_mmx_operand(&ctxt->src);
5181			fetch_possible_mmx_operand(&ctxt->src2);
5182			if (!(ctxt->d & Mov))
5183				fetch_possible_mmx_operand(&ctxt->dst);
5184		}
5185
5186		if (unlikely(is_guest_mode) && ctxt->intercept) {
5187			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5188						      X86_ICPT_PRE_EXCEPT);
5189			if (rc != X86EMUL_CONTINUE)
5190				goto done;
5191		}
5192
5193		/* Instruction can only be executed in protected mode */
5194		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5195			rc = emulate_ud(ctxt);
 
5196			goto done;
5197		}
5198
5199		/* Privileged instruction can be executed only in CPL=0 */
5200		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5201			if (ctxt->d & PrivUD)
5202				rc = emulate_ud(ctxt);
5203			else
5204				rc = emulate_gp(ctxt, 0);
5205			goto done;
5206		}
5207
5208		/* Do instruction specific permission checks */
5209		if (ctxt->d & CheckPerm) {
5210			rc = ctxt->check_perm(ctxt);
5211			if (rc != X86EMUL_CONTINUE)
5212				goto done;
5213		}
5214
5215		if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5216			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5217						      X86_ICPT_POST_EXCEPT);
5218			if (rc != X86EMUL_CONTINUE)
5219				goto done;
5220		}
5221
5222		if (ctxt->rep_prefix && (ctxt->d & String)) {
5223			/* All REP prefixes have the same first termination condition */
5224			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5225				string_registers_quirk(ctxt);
5226				ctxt->eip = ctxt->_eip;
5227				ctxt->eflags &= ~X86_EFLAGS_RF;
5228				goto done;
5229			}
5230		}
5231	}
5232
5233	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5234		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5235				    ctxt->src.valptr, ctxt->src.bytes);
5236		if (rc != X86EMUL_CONTINUE)
5237			goto done;
5238		ctxt->src.orig_val64 = ctxt->src.val64;
5239	}
5240
5241	if (ctxt->src2.type == OP_MEM) {
5242		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5243				    &ctxt->src2.val, ctxt->src2.bytes);
5244		if (rc != X86EMUL_CONTINUE)
5245			goto done;
5246	}
5247
5248	if ((ctxt->d & DstMask) == ImplicitOps)
5249		goto special_insn;
5250
5251
5252	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5253		/* optimisation - avoid slow emulated read if Mov */
5254		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5255				   &ctxt->dst.val, ctxt->dst.bytes);
5256		if (rc != X86EMUL_CONTINUE) {
5257			if (!(ctxt->d & NoWrite) &&
5258			    rc == X86EMUL_PROPAGATE_FAULT &&
5259			    ctxt->exception.vector == PF_VECTOR)
5260				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5261			goto done;
5262		}
5263	}
5264	/* Copy full 64-bit value for CMPXCHG8B.  */
5265	ctxt->dst.orig_val64 = ctxt->dst.val64;
5266
5267special_insn:
5268
5269	if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5270		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5271					      X86_ICPT_POST_MEMACCESS);
5272		if (rc != X86EMUL_CONTINUE)
5273			goto done;
5274	}
5275
5276	if (ctxt->rep_prefix && (ctxt->d & String))
5277		ctxt->eflags |= X86_EFLAGS_RF;
5278	else
5279		ctxt->eflags &= ~X86_EFLAGS_RF;
5280
5281	if (ctxt->execute) {
5282		if (ctxt->d & Fastop)
5283			rc = fastop(ctxt, ctxt->fop);
5284		else
5285			rc = ctxt->execute(ctxt);
5286		if (rc != X86EMUL_CONTINUE)
5287			goto done;
5288		goto writeback;
5289	}
5290
5291	if (ctxt->opcode_len == 2)
5292		goto twobyte_insn;
5293	else if (ctxt->opcode_len == 3)
5294		goto threebyte_insn;
5295
5296	switch (ctxt->b) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5297	case 0x70 ... 0x7f: /* jcc (short) */
5298		if (test_cc(ctxt->b, ctxt->eflags))
5299			rc = jmp_rel(ctxt, ctxt->src.val);
5300		break;
5301	case 0x8d: /* lea r16/r32, m */
5302		ctxt->dst.val = ctxt->src.addr.mem.ea;
5303		break;
 
 
 
5304	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5305		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5306			ctxt->dst.type = OP_NONE;
5307		else
5308			rc = em_xchg(ctxt);
5309		break;
5310	case 0x98: /* cbw/cwde/cdqe */
5311		switch (ctxt->op_bytes) {
5312		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5313		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5314		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5315		}
5316		break;
 
 
 
 
 
 
 
 
 
5317	case 0xcc:		/* int3 */
5318		rc = emulate_int(ctxt, 3);
5319		break;
5320	case 0xcd:		/* int n */
5321		rc = emulate_int(ctxt, ctxt->src.val);
5322		break;
5323	case 0xce:		/* into */
5324		if (ctxt->eflags & X86_EFLAGS_OF)
5325			rc = emulate_int(ctxt, 4);
5326		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5327	case 0xe9: /* jmp rel */
5328	case 0xeb: /* jmp rel short */
5329		rc = jmp_rel(ctxt, ctxt->src.val);
5330		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5331		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5332	case 0xf4:              /* hlt */
5333		ctxt->ops->halt(ctxt);
5334		break;
5335	case 0xf5:	/* cmc */
5336		/* complement carry flag from eflags reg */
5337		ctxt->eflags ^= X86_EFLAGS_CF;
 
 
 
5338		break;
5339	case 0xf8: /* clc */
5340		ctxt->eflags &= ~X86_EFLAGS_CF;
5341		break;
5342	case 0xf9: /* stc */
5343		ctxt->eflags |= X86_EFLAGS_CF;
5344		break;
5345	case 0xfc: /* cld */
5346		ctxt->eflags &= ~X86_EFLAGS_DF;
5347		break;
5348	case 0xfd: /* std */
5349		ctxt->eflags |= X86_EFLAGS_DF;
 
 
 
 
 
 
5350		break;
5351	default:
5352		goto cannot_emulate;
5353	}
5354
5355	if (rc != X86EMUL_CONTINUE)
5356		goto done;
5357
5358writeback:
5359	if (ctxt->d & SrcWrite) {
5360		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5361		rc = writeback(ctxt, &ctxt->src);
5362		if (rc != X86EMUL_CONTINUE)
5363			goto done;
5364	}
5365	if (!(ctxt->d & NoWrite)) {
5366		rc = writeback(ctxt, &ctxt->dst);
5367		if (rc != X86EMUL_CONTINUE)
5368			goto done;
5369	}
5370
5371	/*
5372	 * restore dst type in case the decoding will be reused
5373	 * (happens for string instruction )
5374	 */
5375	ctxt->dst.type = saved_dst_type;
5376
5377	if ((ctxt->d & SrcMask) == SrcSI)
5378		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
 
5379
5380	if ((ctxt->d & DstMask) == DstDI)
5381		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
 
5382
5383	if (ctxt->rep_prefix && (ctxt->d & String)) {
5384		unsigned int count;
5385		struct read_cache *r = &ctxt->io_read;
5386		if ((ctxt->d & SrcMask) == SrcSI)
5387			count = ctxt->src.count;
5388		else
5389			count = ctxt->dst.count;
5390		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5391
5392		if (!string_insn_completed(ctxt)) {
5393			/*
5394			 * Re-enter guest when pio read ahead buffer is empty
5395			 * or, if it is not used, after each 1024 iteration.
5396			 */
5397			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5398			    (r->end == 0 || r->end != r->pos)) {
5399				/*
5400				 * Reset read cache. Usually happens before
5401				 * decode, but since instruction is restarted
5402				 * we have to do it here.
5403				 */
5404				ctxt->mem_read.end = 0;
5405				writeback_registers(ctxt);
5406				return EMULATION_RESTART;
5407			}
5408			goto done; /* skip rip writeback */
5409		}
5410		ctxt->eflags &= ~X86_EFLAGS_RF;
5411	}
5412
5413	ctxt->eip = ctxt->_eip;
5414	if (ctxt->mode != X86EMUL_MODE_PROT64)
5415		ctxt->eip = (u32)ctxt->_eip;
5416
5417done:
5418	if (rc == X86EMUL_PROPAGATE_FAULT) {
5419		if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5420			return EMULATION_FAILED;
5421		ctxt->have_exception = true;
5422	}
5423	if (rc == X86EMUL_INTERCEPTED)
5424		return EMULATION_INTERCEPTED;
5425
5426	if (rc == X86EMUL_CONTINUE)
5427		writeback_registers(ctxt);
5428
5429	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5430
5431twobyte_insn:
5432	switch (ctxt->b) {
5433	case 0x09:		/* wbinvd */
5434		(ctxt->ops->wbinvd)(ctxt);
5435		break;
5436	case 0x08:		/* invd */
5437	case 0x0d:		/* GrpP (prefetch) */
5438	case 0x18:		/* Grp16 (prefetch/nop) */
5439	case 0x1f:		/* nop */
5440		break;
5441	case 0x20: /* mov cr, reg */
5442		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5443		break;
5444	case 0x21: /* mov from dr to reg */
5445		ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5446		break;
5447	case 0x40 ... 0x4f:	/* cmov */
5448		if (test_cc(ctxt->b, ctxt->eflags))
5449			ctxt->dst.val = ctxt->src.val;
5450		else if (ctxt->op_bytes != 4)
5451			ctxt->dst.type = OP_NONE; /* no writeback */
5452		break;
5453	case 0x80 ... 0x8f: /* jnz rel, etc*/
5454		if (test_cc(ctxt->b, ctxt->eflags))
5455			rc = jmp_rel(ctxt, ctxt->src.val);
5456		break;
5457	case 0x90 ... 0x9f:     /* setcc r/m8 */
5458		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5459		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5460	case 0xb6 ... 0xb7:	/* movzx */
5461		ctxt->dst.bytes = ctxt->op_bytes;
5462		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5463						       : (u16) ctxt->src.val;
5464		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5465	case 0xbe ... 0xbf:	/* movsx */
5466		ctxt->dst.bytes = ctxt->op_bytes;
5467		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5468							(s16) ctxt->src.val;
5469		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5470	default:
5471		goto cannot_emulate;
5472	}
5473
5474threebyte_insn:
5475
5476	if (rc != X86EMUL_CONTINUE)
5477		goto done;
5478
5479	goto writeback;
5480
5481cannot_emulate:
5482	return EMULATION_FAILED;
5483}
5484
5485void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5486{
5487	invalidate_registers(ctxt);
5488}
5489
5490void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5491{
5492	writeback_registers(ctxt);
5493}
5494
5495bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5496{
5497	if (ctxt->rep_prefix && (ctxt->d & String))
5498		return false;
5499
5500	if (ctxt->d & TwoMemOp)
5501		return false;
5502
5503	return true;
5504}