Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/******************************************************************************
   3 * emulate.c
   4 *
   5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   6 *
   7 * Copyright (c) 2005 Keir Fraser
   8 *
   9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  10 * privileged instructions:
  11 *
  12 * Copyright (C) 2006 Qumranet
  13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  14 *
  15 *   Avi Kivity <avi@qumranet.com>
  16 *   Yaniv Kamay <yaniv@qumranet.com>
  17 *
 
 
 
  18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  19 */
  20
  21#include <linux/kvm_host.h>
  22#include "kvm_cache_regs.h"
  23#include "kvm_emulate.h"
 
  24#include <linux/stringify.h>
  25#include <asm/debugreg.h>
  26#include <asm/nospec-branch.h>
  27#include <asm/ibt.h>
  28
  29#include "x86.h"
  30#include "tss.h"
  31#include "mmu.h"
  32#include "pmu.h"
  33
  34/*
  35 * Operand types
  36 */
  37#define OpNone             0ull
  38#define OpImplicit         1ull  /* No generic decode */
  39#define OpReg              2ull  /* Register */
  40#define OpMem              3ull  /* Memory */
  41#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
  42#define OpDI               5ull  /* ES:DI/EDI/RDI */
  43#define OpMem64            6ull  /* Memory, 64-bit */
  44#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
  45#define OpDX               8ull  /* DX register */
  46#define OpCL               9ull  /* CL register (for shifts) */
  47#define OpImmByte         10ull  /* 8-bit sign extended immediate */
  48#define OpOne             11ull  /* Implied 1 */
  49#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
  50#define OpMem16           13ull  /* Memory operand (16-bit). */
  51#define OpMem32           14ull  /* Memory operand (32-bit). */
  52#define OpImmU            15ull  /* Immediate operand, zero extended */
  53#define OpSI              16ull  /* SI/ESI/RSI */
  54#define OpImmFAddr        17ull  /* Immediate far address */
  55#define OpMemFAddr        18ull  /* Far address in memory */
  56#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
  57#define OpES              20ull  /* ES */
  58#define OpCS              21ull  /* CS */
  59#define OpSS              22ull  /* SS */
  60#define OpDS              23ull  /* DS */
  61#define OpFS              24ull  /* FS */
  62#define OpGS              25ull  /* GS */
  63#define OpMem8            26ull  /* 8-bit zero extended memory operand */
  64#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
  65#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
  66#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
  67#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
  68
  69#define OpBits             5  /* Width of operand field */
  70#define OpMask             ((1ull << OpBits) - 1)
  71
  72/*
  73 * Opcode effective-address decode tables.
  74 * Note that we only emulate instructions that have at least one memory
  75 * operand (excluding implicit stack references). We assume that stack
  76 * references and instruction fetches will never occur in special memory
  77 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  78 * not be handled.
  79 */
  80
  81/* Operand sizes: 8-bit operands or specified/overridden size. */
  82#define ByteOp      (1<<0)	/* 8-bit operands. */
  83/* Destination operand type. */
  84#define DstShift    1
  85#define ImplicitOps (OpImplicit << DstShift)
  86#define DstReg      (OpReg << DstShift)
  87#define DstMem      (OpMem << DstShift)
  88#define DstAcc      (OpAcc << DstShift)
  89#define DstDI       (OpDI << DstShift)
  90#define DstMem64    (OpMem64 << DstShift)
  91#define DstMem16    (OpMem16 << DstShift)
  92#define DstImmUByte (OpImmUByte << DstShift)
  93#define DstDX       (OpDX << DstShift)
  94#define DstAccLo    (OpAccLo << DstShift)
  95#define DstMask     (OpMask << DstShift)
  96/* Source operand type. */
  97#define SrcShift    6
  98#define SrcNone     (OpNone << SrcShift)
  99#define SrcReg      (OpReg << SrcShift)
 100#define SrcMem      (OpMem << SrcShift)
 101#define SrcMem16    (OpMem16 << SrcShift)
 102#define SrcMem32    (OpMem32 << SrcShift)
 103#define SrcImm      (OpImm << SrcShift)
 104#define SrcImmByte  (OpImmByte << SrcShift)
 105#define SrcOne      (OpOne << SrcShift)
 106#define SrcImmUByte (OpImmUByte << SrcShift)
 107#define SrcImmU     (OpImmU << SrcShift)
 108#define SrcSI       (OpSI << SrcShift)
 109#define SrcXLat     (OpXLat << SrcShift)
 110#define SrcImmFAddr (OpImmFAddr << SrcShift)
 111#define SrcMemFAddr (OpMemFAddr << SrcShift)
 112#define SrcAcc      (OpAcc << SrcShift)
 113#define SrcImmU16   (OpImmU16 << SrcShift)
 114#define SrcImm64    (OpImm64 << SrcShift)
 115#define SrcDX       (OpDX << SrcShift)
 116#define SrcMem8     (OpMem8 << SrcShift)
 117#define SrcAccHi    (OpAccHi << SrcShift)
 118#define SrcMask     (OpMask << SrcShift)
 119#define BitOp       (1<<11)
 120#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
 121#define String      (1<<13)     /* String instruction (rep capable) */
 122#define Stack       (1<<14)     /* Stack instruction (push/pop) */
 123#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
 124#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
 125#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
 126#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
 127#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 128#define Escape      (5<<15)     /* Escape to coprocessor instruction */
 129#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
 130#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
 131#define Sse         (1<<18)     /* SSE Vector instruction */
 132/* Generic ModRM decode. */
 133#define ModRM       (1<<19)
 134/* Destination is only written; never read. */
 135#define Mov         (1<<20)
 136/* Misc flags */
 137#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 138#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
 139#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 140#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 141#define Undefined   (1<<25) /* No Such Instruction */
 142#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 143#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 144#define No64	    (1<<28)
 145#define PageTable   (1 << 29)   /* instruction used to write page table */
 146#define NotImpl     (1 << 30)   /* instruction is not implemented */
 147/* Source 2 operand type */
 148#define Src2Shift   (31)
 149#define Src2None    (OpNone << Src2Shift)
 150#define Src2Mem     (OpMem << Src2Shift)
 151#define Src2CL      (OpCL << Src2Shift)
 152#define Src2ImmByte (OpImmByte << Src2Shift)
 153#define Src2One     (OpOne << Src2Shift)
 154#define Src2Imm     (OpImm << Src2Shift)
 155#define Src2ES      (OpES << Src2Shift)
 156#define Src2CS      (OpCS << Src2Shift)
 157#define Src2SS      (OpSS << Src2Shift)
 158#define Src2DS      (OpDS << Src2Shift)
 159#define Src2FS      (OpFS << Src2Shift)
 160#define Src2GS      (OpGS << Src2Shift)
 161#define Src2Mask    (OpMask << Src2Shift)
 162#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
 163#define AlignMask   ((u64)7 << 41)
 164#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
 165#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
 166#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
 167#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
 168#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
 169#define NoWrite     ((u64)1 << 45)  /* No writeback */
 170#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 171#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
 172#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
 173#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
 174#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 175#define NearBranch  ((u64)1 << 52)  /* Near branches */
 176#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
 177#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
 178#define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
 179#define IsBranch    ((u64)1 << 56)  /* Instruction is considered a branch. */
 180
 181#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 182
 183#define X2(x...) x, x
 184#define X3(x...) X2(x), x
 185#define X4(x...) X2(x), X2(x)
 186#define X5(x...) X4(x), x
 187#define X6(x...) X4(x), X2(x)
 188#define X7(x...) X4(x), X3(x)
 189#define X8(x...) X4(x), X4(x)
 190#define X16(x...) X8(x), X8(x)
 191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 192struct opcode {
 193	u64 flags;
 194	u8 intercept;
 195	u8 pad[7];
 196	union {
 197		int (*execute)(struct x86_emulate_ctxt *ctxt);
 198		const struct opcode *group;
 199		const struct group_dual *gdual;
 200		const struct gprefix *gprefix;
 201		const struct escape *esc;
 202		const struct instr_dual *idual;
 203		const struct mode_dual *mdual;
 204		void (*fastop)(struct fastop *fake);
 205	} u;
 206	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 207};
 208
 209struct group_dual {
 210	struct opcode mod012[8];
 211	struct opcode mod3[8];
 212};
 213
 214struct gprefix {
 215	struct opcode pfx_no;
 216	struct opcode pfx_66;
 217	struct opcode pfx_f2;
 218	struct opcode pfx_f3;
 219};
 220
 221struct escape {
 222	struct opcode op[8];
 223	struct opcode high[64];
 224};
 225
 226struct instr_dual {
 227	struct opcode mod012;
 228	struct opcode mod3;
 229};
 230
 231struct mode_dual {
 232	struct opcode mode32;
 233	struct opcode mode64;
 234};
 235
 236#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 237
 238enum x86_transfer_type {
 239	X86_TRANSFER_NONE,
 240	X86_TRANSFER_CALL_JMP,
 241	X86_TRANSFER_RET,
 242	X86_TRANSFER_TASK_SWITCH,
 243};
 244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245static void writeback_registers(struct x86_emulate_ctxt *ctxt)
 246{
 247	unsigned long dirty = ctxt->regs_dirty;
 248	unsigned reg;
 249
 250	for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
 251		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
 252}
 253
 254static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
 255{
 256	ctxt->regs_dirty = 0;
 257	ctxt->regs_valid = 0;
 258}
 259
 260/*
 261 * These EFLAGS bits are restored from saved value during emulation, and
 262 * any changes are written back to the saved value after emulation.
 263 */
 264#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
 265		     X86_EFLAGS_PF|X86_EFLAGS_CF)
 266
 267#ifdef CONFIG_X86_64
 268#define ON64(x) x
 269#else
 270#define ON64(x)
 271#endif
 272
 273/*
 274 * fastop functions have a special calling convention:
 275 *
 276 * dst:    rax        (in/out)
 277 * src:    rdx        (in/out)
 278 * src2:   rcx        (in)
 279 * flags:  rflags     (in/out)
 280 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 281 *
 282 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 283 * different operand sizes can be reached by calculation, rather than a jump
 284 * table (which would be bigger than the code).
 285 *
 286 * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
 287 * and 1 for the straight line speculation INT3, leaves 7 bytes for the
 288 * body of the function.  Currently none is larger than 4.
 289 */
 290static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
 291
 292#define FASTOP_SIZE	16
 293
 294#define __FOP_FUNC(name) \
 295	".align " __stringify(FASTOP_SIZE) " \n\t" \
 296	".type " name ", @function \n\t" \
 297	name ":\n\t" \
 298	ASM_ENDBR \
 299	IBT_NOSEAL(name)
 300
 301#define FOP_FUNC(name) \
 302	__FOP_FUNC(#name)
 303
 304#define __FOP_RET(name) \
 305	"11: " ASM_RET \
 306	".size " name ", .-" name "\n\t"
 307
 308#define FOP_RET(name) \
 309	__FOP_RET(#name)
 310
 311#define __FOP_START(op, align) \
 312	extern void em_##op(struct fastop *fake); \
 313	asm(".pushsection .text, \"ax\" \n\t" \
 314	    ".global em_" #op " \n\t" \
 315	    ".align " __stringify(align) " \n\t" \
 316	    "em_" #op ":\n\t"
 317
 318#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
 319
 320#define FOP_END \
 321	    ".popsection")
 322
 323#define __FOPNOP(name) \
 324	__FOP_FUNC(name) \
 325	__FOP_RET(name)
 326
 327#define FOPNOP() \
 328	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
 
 329
 330#define FOP1E(op,  dst) \
 331	__FOP_FUNC(#op "_" #dst) \
 332	"10: " #op " %" #dst " \n\t" \
 333	__FOP_RET(#op "_" #dst)
 334
 335#define FOP1EEX(op,  dst) \
 336	FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
 337
 338#define FASTOP1(op) \
 339	FOP_START(op) \
 340	FOP1E(op##b, al) \
 341	FOP1E(op##w, ax) \
 342	FOP1E(op##l, eax) \
 343	ON64(FOP1E(op##q, rax))	\
 344	FOP_END
 345
 346/* 1-operand, using src2 (for MUL/DIV r/m) */
 347#define FASTOP1SRC2(op, name) \
 348	FOP_START(name) \
 349	FOP1E(op, cl) \
 350	FOP1E(op, cx) \
 351	FOP1E(op, ecx) \
 352	ON64(FOP1E(op, rcx)) \
 353	FOP_END
 354
 355/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
 356#define FASTOP1SRC2EX(op, name) \
 357	FOP_START(name) \
 358	FOP1EEX(op, cl) \
 359	FOP1EEX(op, cx) \
 360	FOP1EEX(op, ecx) \
 361	ON64(FOP1EEX(op, rcx)) \
 362	FOP_END
 363
 364#define FOP2E(op,  dst, src)	   \
 365	__FOP_FUNC(#op "_" #dst "_" #src) \
 366	#op " %" #src ", %" #dst " \n\t" \
 367	__FOP_RET(#op "_" #dst "_" #src)
 368
 369#define FASTOP2(op) \
 370	FOP_START(op) \
 371	FOP2E(op##b, al, dl) \
 372	FOP2E(op##w, ax, dx) \
 373	FOP2E(op##l, eax, edx) \
 374	ON64(FOP2E(op##q, rax, rdx)) \
 375	FOP_END
 376
 377/* 2 operand, word only */
 378#define FASTOP2W(op) \
 379	FOP_START(op) \
 380	FOPNOP() \
 381	FOP2E(op##w, ax, dx) \
 382	FOP2E(op##l, eax, edx) \
 383	ON64(FOP2E(op##q, rax, rdx)) \
 384	FOP_END
 385
 386/* 2 operand, src is CL */
 387#define FASTOP2CL(op) \
 388	FOP_START(op) \
 389	FOP2E(op##b, al, cl) \
 390	FOP2E(op##w, ax, cl) \
 391	FOP2E(op##l, eax, cl) \
 392	ON64(FOP2E(op##q, rax, cl)) \
 393	FOP_END
 394
 395/* 2 operand, src and dest are reversed */
 396#define FASTOP2R(op, name) \
 397	FOP_START(name) \
 398	FOP2E(op##b, dl, al) \
 399	FOP2E(op##w, dx, ax) \
 400	FOP2E(op##l, edx, eax) \
 401	ON64(FOP2E(op##q, rdx, rax)) \
 402	FOP_END
 403
 404#define FOP3E(op,  dst, src, src2) \
 405	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
 406	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
 407	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
 408
 409/* 3-operand, word-only, src2=cl */
 410#define FASTOP3WCL(op) \
 411	FOP_START(op) \
 412	FOPNOP() \
 413	FOP3E(op##w, ax, dx, cl) \
 414	FOP3E(op##l, eax, edx, cl) \
 415	ON64(FOP3E(op##q, rax, rdx, cl)) \
 416	FOP_END
 417
 418/* Special case for SETcc - 1 instruction per cc */
 419#define FOP_SETCC(op) \
 420	FOP_FUNC(op) \
 
 
 421	#op " %al \n\t" \
 422	FOP_RET(op)
 
 
 
 423
 424FOP_START(setcc)
 425FOP_SETCC(seto)
 426FOP_SETCC(setno)
 427FOP_SETCC(setc)
 428FOP_SETCC(setnc)
 429FOP_SETCC(setz)
 430FOP_SETCC(setnz)
 431FOP_SETCC(setbe)
 432FOP_SETCC(setnbe)
 433FOP_SETCC(sets)
 434FOP_SETCC(setns)
 435FOP_SETCC(setp)
 436FOP_SETCC(setnp)
 437FOP_SETCC(setl)
 438FOP_SETCC(setnl)
 439FOP_SETCC(setle)
 440FOP_SETCC(setnle)
 441FOP_END;
 442
 443FOP_START(salc)
 444FOP_FUNC(salc)
 445"pushf; sbb %al, %al; popf \n\t"
 446FOP_RET(salc)
 447FOP_END;
 448
 449/*
 450 * XXX: inoutclob user must know where the argument is being expanded.
 451 *      Using asm goto would allow us to remove _fault.
 452 */
 453#define asm_safe(insn, inoutclob...) \
 454({ \
 455	int _fault = 0; \
 456 \
 457	asm volatile("1:" insn "\n" \
 458	             "2:\n" \
 459		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
 460	             : [_fault] "+r"(_fault) inoutclob ); \
 461 \
 462	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
 463})
 464
 465static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 466				    enum x86_intercept intercept,
 467				    enum x86_intercept_stage stage)
 468{
 469	struct x86_instruction_info info = {
 470		.intercept  = intercept,
 471		.rep_prefix = ctxt->rep_prefix,
 472		.modrm_mod  = ctxt->modrm_mod,
 473		.modrm_reg  = ctxt->modrm_reg,
 474		.modrm_rm   = ctxt->modrm_rm,
 475		.src_val    = ctxt->src.val64,
 476		.dst_val    = ctxt->dst.val64,
 477		.src_bytes  = ctxt->src.bytes,
 478		.dst_bytes  = ctxt->dst.bytes,
 479		.ad_bytes   = ctxt->ad_bytes,
 480		.next_rip   = ctxt->eip,
 481	};
 482
 483	return ctxt->ops->intercept(ctxt, &info, stage);
 484}
 485
 486static void assign_masked(ulong *dest, ulong src, ulong mask)
 487{
 488	*dest = (*dest & ~mask) | (src & mask);
 489}
 490
 491static void assign_register(unsigned long *reg, u64 val, int bytes)
 492{
 493	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
 494	switch (bytes) {
 495	case 1:
 496		*(u8 *)reg = (u8)val;
 497		break;
 498	case 2:
 499		*(u16 *)reg = (u16)val;
 500		break;
 501	case 4:
 502		*reg = (u32)val;
 503		break;	/* 64b: zero-extend */
 504	case 8:
 505		*reg = val;
 506		break;
 507	}
 508}
 509
 510static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 511{
 512	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 513}
 514
 515static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
 516{
 517	u16 sel;
 518	struct desc_struct ss;
 519
 520	if (ctxt->mode == X86EMUL_MODE_PROT64)
 521		return ~0UL;
 522	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
 523	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
 524}
 525
 526static int stack_size(struct x86_emulate_ctxt *ctxt)
 527{
 528	return (__fls(stack_mask(ctxt)) + 1) >> 3;
 529}
 530
 531/* Access/update address held in a register, based on addressing mode. */
 532static inline unsigned long
 533address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 534{
 535	if (ctxt->ad_bytes == sizeof(unsigned long))
 536		return reg;
 537	else
 538		return reg & ad_mask(ctxt);
 539}
 540
 541static inline unsigned long
 542register_address(struct x86_emulate_ctxt *ctxt, int reg)
 543{
 544	return address_mask(ctxt, reg_read(ctxt, reg));
 545}
 546
 547static void masked_increment(ulong *reg, ulong mask, int inc)
 548{
 549	assign_masked(reg, *reg + inc, mask);
 550}
 551
 552static inline void
 553register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
 554{
 555	ulong *preg = reg_rmw(ctxt, reg);
 556
 557	assign_register(preg, *preg + inc, ctxt->ad_bytes);
 558}
 559
 560static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
 561{
 562	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
 563}
 564
 565static u32 desc_limit_scaled(struct desc_struct *desc)
 566{
 567	u32 limit = get_desc_limit(desc);
 568
 569	return desc->g ? (limit << 12) | 0xfff : limit;
 570}
 571
 572static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 573{
 574	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 575		return 0;
 576
 577	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 578}
 579
 580static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 581			     u32 error, bool valid)
 582{
 583	if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
 584		return X86EMUL_UNHANDLEABLE;
 585
 586	ctxt->exception.vector = vec;
 587	ctxt->exception.error_code = error;
 588	ctxt->exception.error_code_valid = valid;
 589	return X86EMUL_PROPAGATE_FAULT;
 590}
 591
 592static int emulate_db(struct x86_emulate_ctxt *ctxt)
 593{
 594	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 595}
 596
 597static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 598{
 599	return emulate_exception(ctxt, GP_VECTOR, err, true);
 600}
 601
 602static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 603{
 604	return emulate_exception(ctxt, SS_VECTOR, err, true);
 605}
 606
 607static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 608{
 609	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 610}
 611
 612static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 613{
 614	return emulate_exception(ctxt, TS_VECTOR, err, true);
 615}
 616
 617static int emulate_de(struct x86_emulate_ctxt *ctxt)
 618{
 619	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 620}
 621
 622static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 623{
 624	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 625}
 626
 627static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 628{
 629	u16 selector;
 630	struct desc_struct desc;
 631
 632	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 633	return selector;
 634}
 635
 636static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 637				 unsigned seg)
 638{
 639	u16 dummy;
 640	u32 base3;
 641	struct desc_struct desc;
 642
 643	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 644	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 645}
 646
 647static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
 648{
 649	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
 650}
 651
 652static inline bool emul_is_noncanonical_address(u64 la,
 653						struct x86_emulate_ctxt *ctxt)
 654{
 655	return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
 656}
 657
 658/*
 659 * x86 defines three classes of vector instructions: explicitly
 660 * aligned, explicitly unaligned, and the rest, which change behaviour
 661 * depending on whether they're AVX encoded or not.
 662 *
 663 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 664 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 665 * 512 bytes of data must be aligned to a 16 byte boundary.
 666 */
 667static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
 668{
 669	u64 alignment = ctxt->d & AlignMask;
 670
 671	if (likely(size < 16))
 672		return 1;
 673
 674	switch (alignment) {
 675	case Unaligned:
 676	case Avx:
 677		return 1;
 678	case Aligned16:
 679		return 16;
 680	case Aligned:
 681	default:
 682		return size;
 683	}
 684}
 685
 686static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 687				       struct segmented_address addr,
 688				       unsigned *max_size, unsigned size,
 689				       bool write, bool fetch,
 690				       enum x86emul_mode mode, ulong *linear)
 691{
 692	struct desc_struct desc;
 693	bool usable;
 694	ulong la;
 695	u32 lim;
 696	u16 sel;
 697	u8  va_bits;
 698
 699	la = seg_base(ctxt, addr.seg) + addr.ea;
 700	*max_size = 0;
 701	switch (mode) {
 702	case X86EMUL_MODE_PROT64:
 703		*linear = la;
 704		va_bits = ctxt_virt_addr_bits(ctxt);
 705		if (!__is_canonical_address(la, va_bits))
 706			goto bad;
 707
 708		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
 709		if (size > *max_size)
 710			goto bad;
 711		break;
 712	default:
 713		*linear = la = (u32)la;
 714		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 715						addr.seg);
 716		if (!usable)
 717			goto bad;
 718		/* code segment in protected mode or read-only data segment */
 719		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
 720					|| !(desc.type & 2)) && write)
 721			goto bad;
 722		/* unreadable code segment */
 723		if (!fetch && (desc.type & 8) && !(desc.type & 2))
 724			goto bad;
 725		lim = desc_limit_scaled(&desc);
 726		if (!(desc.type & 8) && (desc.type & 4)) {
 727			/* expand-down segment */
 728			if (addr.ea <= lim)
 729				goto bad;
 730			lim = desc.d ? 0xffffffff : 0xffff;
 731		}
 732		if (addr.ea > lim)
 733			goto bad;
 734		if (lim == 0xffffffff)
 735			*max_size = ~0u;
 736		else {
 737			*max_size = (u64)lim + 1 - addr.ea;
 738			if (size > *max_size)
 739				goto bad;
 740		}
 741		break;
 742	}
 743	if (la & (insn_alignment(ctxt, size) - 1))
 744		return emulate_gp(ctxt, 0);
 745	return X86EMUL_CONTINUE;
 746bad:
 747	if (addr.seg == VCPU_SREG_SS)
 748		return emulate_ss(ctxt, 0);
 749	else
 750		return emulate_gp(ctxt, 0);
 751}
 752
 753static int linearize(struct x86_emulate_ctxt *ctxt,
 754		     struct segmented_address addr,
 755		     unsigned size, bool write,
 756		     ulong *linear)
 757{
 758	unsigned max_size;
 759	return __linearize(ctxt, addr, &max_size, size, write, false,
 760			   ctxt->mode, linear);
 761}
 762
 763static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
 
 764{
 765	ulong linear;
 766	int rc;
 767	unsigned max_size;
 768	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 769					   .ea = dst };
 770
 771	if (ctxt->op_bytes != sizeof(unsigned long))
 772		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
 773	rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
 774	if (rc == X86EMUL_CONTINUE)
 775		ctxt->_eip = addr.ea;
 776	return rc;
 777}
 778
 779static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
 780{
 781	u64 efer;
 782	struct desc_struct cs;
 783	u16 selector;
 784	u32 base3;
 785
 786	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 787
 788	if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
 789		/* Real mode. cpu must not have long mode active */
 790		if (efer & EFER_LMA)
 791			return X86EMUL_UNHANDLEABLE;
 792		ctxt->mode = X86EMUL_MODE_REAL;
 793		return X86EMUL_CONTINUE;
 794	}
 795
 796	if (ctxt->eflags & X86_EFLAGS_VM) {
 797		/* Protected/VM86 mode. cpu must not have long mode active */
 798		if (efer & EFER_LMA)
 799			return X86EMUL_UNHANDLEABLE;
 800		ctxt->mode = X86EMUL_MODE_VM86;
 801		return X86EMUL_CONTINUE;
 802	}
 803
 804	if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
 805		return X86EMUL_UNHANDLEABLE;
 806
 807	if (efer & EFER_LMA) {
 808		if (cs.l) {
 809			/* Proper long mode */
 810			ctxt->mode = X86EMUL_MODE_PROT64;
 811		} else if (cs.d) {
 812			/* 32 bit compatibility mode*/
 813			ctxt->mode = X86EMUL_MODE_PROT32;
 814		} else {
 815			ctxt->mode = X86EMUL_MODE_PROT16;
 816		}
 817	} else {
 818		/* Legacy 32 bit / 16 bit mode */
 819		ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 820	}
 821
 822	return X86EMUL_CONTINUE;
 823}
 824
 825static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
 826{
 827	return assign_eip(ctxt, dst);
 828}
 829
 830static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
 
 831{
 832	int rc = emulator_recalc_and_set_mode(ctxt);
 
 833
 834	if (rc != X86EMUL_CONTINUE)
 835		return rc;
 
 
 836
 837	return assign_eip(ctxt, dst);
 
 
 
 
 
 
 
 
 
 
 
 
 838}
 839
 840static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 841{
 842	return assign_eip_near(ctxt, ctxt->_eip + rel);
 843}
 844
 845static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
 846			      void *data, unsigned size)
 847{
 848	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
 849}
 850
 851static int linear_write_system(struct x86_emulate_ctxt *ctxt,
 852			       ulong linear, void *data,
 853			       unsigned int size)
 854{
 855	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
 856}
 857
 858static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 859			      struct segmented_address addr,
 860			      void *data,
 861			      unsigned size)
 862{
 863	int rc;
 864	ulong linear;
 865
 866	rc = linearize(ctxt, addr, size, false, &linear);
 867	if (rc != X86EMUL_CONTINUE)
 868		return rc;
 869	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
 870}
 871
 872static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
 873			       struct segmented_address addr,
 874			       void *data,
 875			       unsigned int size)
 876{
 877	int rc;
 878	ulong linear;
 879
 880	rc = linearize(ctxt, addr, size, true, &linear);
 881	if (rc != X86EMUL_CONTINUE)
 882		return rc;
 883	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
 884}
 885
 886/*
 887 * Prefetch the remaining bytes of the instruction without crossing page
 888 * boundary if they are not in fetch_cache yet.
 889 */
 890static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 891{
 892	int rc;
 893	unsigned size, max_size;
 894	unsigned long linear;
 895	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
 896	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 897					   .ea = ctxt->eip + cur_size };
 898
 899	/*
 900	 * We do not know exactly how many bytes will be needed, and
 901	 * __linearize is expensive, so fetch as much as possible.  We
 902	 * just have to avoid going beyond the 15 byte limit, the end
 903	 * of the segment, or the end of the page.
 904	 *
 905	 * __linearize is called with size 0 so that it does not do any
 906	 * boundary check itself.  Instead, we use max_size to check
 907	 * against op_size.
 908	 */
 909	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
 910			 &linear);
 911	if (unlikely(rc != X86EMUL_CONTINUE))
 912		return rc;
 913
 914	size = min_t(unsigned, 15UL ^ cur_size, max_size);
 915	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 916
 917	/*
 918	 * One instruction can only straddle two pages,
 919	 * and one has been loaded at the beginning of
 920	 * x86_decode_insn.  So, if not enough bytes
 921	 * still, we must have hit the 15-byte boundary.
 922	 */
 923	if (unlikely(size < op_size))
 924		return emulate_gp(ctxt, 0);
 925
 926	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
 927			      size, &ctxt->exception);
 928	if (unlikely(rc != X86EMUL_CONTINUE))
 929		return rc;
 930	ctxt->fetch.end += size;
 931	return X86EMUL_CONTINUE;
 932}
 933
 934static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
 935					       unsigned size)
 936{
 937	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
 938
 939	if (unlikely(done_size < size))
 940		return __do_insn_fetch_bytes(ctxt, size - done_size);
 941	else
 942		return X86EMUL_CONTINUE;
 943}
 944
 945/* Fetch next part of the instruction being emulated. */
 946#define insn_fetch(_type, _ctxt)					\
 947({	_type _x;							\
 948									\
 949	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
 950	if (rc != X86EMUL_CONTINUE)					\
 951		goto done;						\
 952	ctxt->_eip += sizeof(_type);					\
 953	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
 954	ctxt->fetch.ptr += sizeof(_type);				\
 955	_x;								\
 956})
 957
 958#define insn_fetch_arr(_arr, _size, _ctxt)				\
 959({									\
 960	rc = do_insn_fetch_bytes(_ctxt, _size);				\
 961	if (rc != X86EMUL_CONTINUE)					\
 962		goto done;						\
 963	ctxt->_eip += (_size);						\
 964	memcpy(_arr, ctxt->fetch.ptr, _size);				\
 965	ctxt->fetch.ptr += (_size);					\
 966})
 967
 968/*
 969 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 970 * pointer into the block that addresses the relevant register.
 971 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 972 */
 973static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
 974			     int byteop)
 975{
 976	void *p;
 977	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
 978
 979	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 980		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
 981	else
 982		p = reg_rmw(ctxt, modrm_reg);
 983	return p;
 984}
 985
 986static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 987			   struct segmented_address addr,
 988			   u16 *size, unsigned long *address, int op_bytes)
 989{
 990	int rc;
 991
 992	if (op_bytes == 2)
 993		op_bytes = 3;
 994	*address = 0;
 995	rc = segmented_read_std(ctxt, addr, size, 2);
 996	if (rc != X86EMUL_CONTINUE)
 997		return rc;
 998	addr.ea += 2;
 999	rc = segmented_read_std(ctxt, addr, address, op_bytes);
1000	return rc;
1001}
1002
1003FASTOP2(add);
1004FASTOP2(or);
1005FASTOP2(adc);
1006FASTOP2(sbb);
1007FASTOP2(and);
1008FASTOP2(sub);
1009FASTOP2(xor);
1010FASTOP2(cmp);
1011FASTOP2(test);
1012
1013FASTOP1SRC2(mul, mul_ex);
1014FASTOP1SRC2(imul, imul_ex);
1015FASTOP1SRC2EX(div, div_ex);
1016FASTOP1SRC2EX(idiv, idiv_ex);
1017
1018FASTOP3WCL(shld);
1019FASTOP3WCL(shrd);
1020
1021FASTOP2W(imul);
1022
1023FASTOP1(not);
1024FASTOP1(neg);
1025FASTOP1(inc);
1026FASTOP1(dec);
1027
1028FASTOP2CL(rol);
1029FASTOP2CL(ror);
1030FASTOP2CL(rcl);
1031FASTOP2CL(rcr);
1032FASTOP2CL(shl);
1033FASTOP2CL(shr);
1034FASTOP2CL(sar);
1035
1036FASTOP2W(bsf);
1037FASTOP2W(bsr);
1038FASTOP2W(bt);
1039FASTOP2W(bts);
1040FASTOP2W(btr);
1041FASTOP2W(btc);
1042
1043FASTOP2(xadd);
1044
1045FASTOP2R(cmp, cmp_r);
1046
1047static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1048{
1049	/* If src is zero, do not writeback, but update flags */
1050	if (ctxt->src.val == 0)
1051		ctxt->dst.type = OP_NONE;
1052	return fastop(ctxt, em_bsf);
1053}
1054
1055static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1056{
1057	/* If src is zero, do not writeback, but update flags */
1058	if (ctxt->src.val == 0)
1059		ctxt->dst.type = OP_NONE;
1060	return fastop(ctxt, em_bsr);
1061}
1062
1063static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1064{
1065	u8 rc;
1066	void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
1067
1068	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1069	asm("push %[flags]; popf; " CALL_NOSPEC
1070	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1071	return rc;
1072}
1073
1074static void fetch_register_operand(struct operand *op)
1075{
1076	switch (op->bytes) {
1077	case 1:
1078		op->val = *(u8 *)op->addr.reg;
1079		break;
1080	case 2:
1081		op->val = *(u16 *)op->addr.reg;
1082		break;
1083	case 4:
1084		op->val = *(u32 *)op->addr.reg;
1085		break;
1086	case 8:
1087		op->val = *(u64 *)op->addr.reg;
1088		break;
1089	}
1090}
1091
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1092static int em_fninit(struct x86_emulate_ctxt *ctxt)
1093{
1094	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1095		return emulate_nm(ctxt);
1096
1097	kvm_fpu_get();
1098	asm volatile("fninit");
1099	kvm_fpu_put();
1100	return X86EMUL_CONTINUE;
1101}
1102
1103static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1104{
1105	u16 fcw;
1106
1107	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1108		return emulate_nm(ctxt);
1109
1110	kvm_fpu_get();
1111	asm volatile("fnstcw %0": "+m"(fcw));
1112	kvm_fpu_put();
1113
1114	ctxt->dst.val = fcw;
1115
1116	return X86EMUL_CONTINUE;
1117}
1118
1119static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1120{
1121	u16 fsw;
1122
1123	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1124		return emulate_nm(ctxt);
1125
1126	kvm_fpu_get();
1127	asm volatile("fnstsw %0": "+m"(fsw));
1128	kvm_fpu_put();
1129
1130	ctxt->dst.val = fsw;
1131
1132	return X86EMUL_CONTINUE;
1133}
1134
1135static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1136				    struct operand *op)
1137{
1138	unsigned int reg;
1139
1140	if (ctxt->d & ModRM)
1141		reg = ctxt->modrm_reg;
1142	else
1143		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1144
1145	if (ctxt->d & Sse) {
1146		op->type = OP_XMM;
1147		op->bytes = 16;
1148		op->addr.xmm = reg;
1149		kvm_read_sse_reg(reg, &op->vec_val);
1150		return;
1151	}
1152	if (ctxt->d & Mmx) {
1153		reg &= 7;
1154		op->type = OP_MM;
1155		op->bytes = 8;
1156		op->addr.mm = reg;
1157		return;
1158	}
1159
1160	op->type = OP_REG;
1161	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1162	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1163
1164	fetch_register_operand(op);
1165	op->orig_val = op->val;
1166}
1167
1168static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1169{
1170	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1171		ctxt->modrm_seg = VCPU_SREG_SS;
1172}
1173
1174static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1175			struct operand *op)
1176{
1177	u8 sib;
1178	int index_reg, base_reg, scale;
1179	int rc = X86EMUL_CONTINUE;
1180	ulong modrm_ea = 0;
1181
1182	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1183	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1184	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1185
1186	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1187	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1188	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1189	ctxt->modrm_seg = VCPU_SREG_DS;
1190
1191	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1192		op->type = OP_REG;
1193		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1194		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1195				ctxt->d & ByteOp);
1196		if (ctxt->d & Sse) {
1197			op->type = OP_XMM;
1198			op->bytes = 16;
1199			op->addr.xmm = ctxt->modrm_rm;
1200			kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1201			return rc;
1202		}
1203		if (ctxt->d & Mmx) {
1204			op->type = OP_MM;
1205			op->bytes = 8;
1206			op->addr.mm = ctxt->modrm_rm & 7;
1207			return rc;
1208		}
1209		fetch_register_operand(op);
1210		return rc;
1211	}
1212
1213	op->type = OP_MEM;
1214
1215	if (ctxt->ad_bytes == 2) {
1216		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1217		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1218		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1219		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1220
1221		/* 16-bit ModR/M decode. */
1222		switch (ctxt->modrm_mod) {
1223		case 0:
1224			if (ctxt->modrm_rm == 6)
1225				modrm_ea += insn_fetch(u16, ctxt);
1226			break;
1227		case 1:
1228			modrm_ea += insn_fetch(s8, ctxt);
1229			break;
1230		case 2:
1231			modrm_ea += insn_fetch(u16, ctxt);
1232			break;
1233		}
1234		switch (ctxt->modrm_rm) {
1235		case 0:
1236			modrm_ea += bx + si;
1237			break;
1238		case 1:
1239			modrm_ea += bx + di;
1240			break;
1241		case 2:
1242			modrm_ea += bp + si;
1243			break;
1244		case 3:
1245			modrm_ea += bp + di;
1246			break;
1247		case 4:
1248			modrm_ea += si;
1249			break;
1250		case 5:
1251			modrm_ea += di;
1252			break;
1253		case 6:
1254			if (ctxt->modrm_mod != 0)
1255				modrm_ea += bp;
1256			break;
1257		case 7:
1258			modrm_ea += bx;
1259			break;
1260		}
1261		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1262		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1263			ctxt->modrm_seg = VCPU_SREG_SS;
1264		modrm_ea = (u16)modrm_ea;
1265	} else {
1266		/* 32/64-bit ModR/M decode. */
1267		if ((ctxt->modrm_rm & 7) == 4) {
1268			sib = insn_fetch(u8, ctxt);
1269			index_reg |= (sib >> 3) & 7;
1270			base_reg |= sib & 7;
1271			scale = sib >> 6;
1272
1273			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1274				modrm_ea += insn_fetch(s32, ctxt);
1275			else {
1276				modrm_ea += reg_read(ctxt, base_reg);
1277				adjust_modrm_seg(ctxt, base_reg);
1278				/* Increment ESP on POP [ESP] */
1279				if ((ctxt->d & IncSP) &&
1280				    base_reg == VCPU_REGS_RSP)
1281					modrm_ea += ctxt->op_bytes;
1282			}
1283			if (index_reg != 4)
1284				modrm_ea += reg_read(ctxt, index_reg) << scale;
1285		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1286			modrm_ea += insn_fetch(s32, ctxt);
1287			if (ctxt->mode == X86EMUL_MODE_PROT64)
1288				ctxt->rip_relative = 1;
1289		} else {
1290			base_reg = ctxt->modrm_rm;
1291			modrm_ea += reg_read(ctxt, base_reg);
1292			adjust_modrm_seg(ctxt, base_reg);
1293		}
1294		switch (ctxt->modrm_mod) {
1295		case 1:
1296			modrm_ea += insn_fetch(s8, ctxt);
1297			break;
1298		case 2:
1299			modrm_ea += insn_fetch(s32, ctxt);
1300			break;
1301		}
1302	}
1303	op->addr.mem.ea = modrm_ea;
1304	if (ctxt->ad_bytes != 8)
1305		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1306
1307done:
1308	return rc;
1309}
1310
1311static int decode_abs(struct x86_emulate_ctxt *ctxt,
1312		      struct operand *op)
1313{
1314	int rc = X86EMUL_CONTINUE;
1315
1316	op->type = OP_MEM;
1317	switch (ctxt->ad_bytes) {
1318	case 2:
1319		op->addr.mem.ea = insn_fetch(u16, ctxt);
1320		break;
1321	case 4:
1322		op->addr.mem.ea = insn_fetch(u32, ctxt);
1323		break;
1324	case 8:
1325		op->addr.mem.ea = insn_fetch(u64, ctxt);
1326		break;
1327	}
1328done:
1329	return rc;
1330}
1331
1332static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1333{
1334	long sv = 0, mask;
1335
1336	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1337		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1338
1339		if (ctxt->src.bytes == 2)
1340			sv = (s16)ctxt->src.val & (s16)mask;
1341		else if (ctxt->src.bytes == 4)
1342			sv = (s32)ctxt->src.val & (s32)mask;
1343		else
1344			sv = (s64)ctxt->src.val & (s64)mask;
1345
1346		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1347					   ctxt->dst.addr.mem.ea + (sv >> 3));
1348	}
1349
1350	/* only subword offset */
1351	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1352}
1353
1354static int read_emulated(struct x86_emulate_ctxt *ctxt,
1355			 unsigned long addr, void *dest, unsigned size)
1356{
1357	int rc;
1358	struct read_cache *mc = &ctxt->mem_read;
1359
1360	if (mc->pos < mc->end)
1361		goto read_cached;
1362
1363	if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1364		return X86EMUL_UNHANDLEABLE;
1365
1366	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1367				      &ctxt->exception);
1368	if (rc != X86EMUL_CONTINUE)
1369		return rc;
1370
1371	mc->end += size;
1372
1373read_cached:
1374	memcpy(dest, mc->data + mc->pos, size);
1375	mc->pos += size;
1376	return X86EMUL_CONTINUE;
1377}
1378
1379static int segmented_read(struct x86_emulate_ctxt *ctxt,
1380			  struct segmented_address addr,
1381			  void *data,
1382			  unsigned size)
1383{
1384	int rc;
1385	ulong linear;
1386
1387	rc = linearize(ctxt, addr, size, false, &linear);
1388	if (rc != X86EMUL_CONTINUE)
1389		return rc;
1390	return read_emulated(ctxt, linear, data, size);
1391}
1392
1393static int segmented_write(struct x86_emulate_ctxt *ctxt,
1394			   struct segmented_address addr,
1395			   const void *data,
1396			   unsigned size)
1397{
1398	int rc;
1399	ulong linear;
1400
1401	rc = linearize(ctxt, addr, size, true, &linear);
1402	if (rc != X86EMUL_CONTINUE)
1403		return rc;
1404	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1405					 &ctxt->exception);
1406}
1407
1408static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1409			     struct segmented_address addr,
1410			     const void *orig_data, const void *data,
1411			     unsigned size)
1412{
1413	int rc;
1414	ulong linear;
1415
1416	rc = linearize(ctxt, addr, size, true, &linear);
1417	if (rc != X86EMUL_CONTINUE)
1418		return rc;
1419	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1420					   size, &ctxt->exception);
1421}
1422
1423static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1424			   unsigned int size, unsigned short port,
1425			   void *dest)
1426{
1427	struct read_cache *rc = &ctxt->io_read;
1428
1429	if (rc->pos == rc->end) { /* refill pio read ahead */
1430		unsigned int in_page, n;
1431		unsigned int count = ctxt->rep_prefix ?
1432			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1433		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1434			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1435			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1436		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1437		if (n == 0)
1438			n = 1;
1439		rc->pos = rc->end = 0;
1440		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1441			return 0;
1442		rc->end = n * size;
1443	}
1444
1445	if (ctxt->rep_prefix && (ctxt->d & String) &&
1446	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1447		ctxt->dst.data = rc->data + rc->pos;
1448		ctxt->dst.type = OP_MEM_STR;
1449		ctxt->dst.count = (rc->end - rc->pos) / size;
1450		rc->pos = rc->end;
1451	} else {
1452		memcpy(dest, rc->data + rc->pos, size);
1453		rc->pos += size;
1454	}
1455	return 1;
1456}
1457
1458static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1459				     u16 index, struct desc_struct *desc)
1460{
1461	struct desc_ptr dt;
1462	ulong addr;
1463
1464	ctxt->ops->get_idt(ctxt, &dt);
1465
1466	if (dt.size < index * 8 + 7)
1467		return emulate_gp(ctxt, index << 3 | 0x2);
1468
1469	addr = dt.address + index * 8;
1470	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
 
1471}
1472
1473static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1474				     u16 selector, struct desc_ptr *dt)
1475{
1476	const struct x86_emulate_ops *ops = ctxt->ops;
1477	u32 base3 = 0;
1478
1479	if (selector & 1 << 2) {
1480		struct desc_struct desc;
1481		u16 sel;
1482
1483		memset(dt, 0, sizeof(*dt));
1484		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1485				      VCPU_SREG_LDTR))
1486			return;
1487
1488		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1489		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1490	} else
1491		ops->get_gdt(ctxt, dt);
1492}
1493
1494static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1495			      u16 selector, ulong *desc_addr_p)
1496{
1497	struct desc_ptr dt;
1498	u16 index = selector >> 3;
1499	ulong addr;
1500
1501	get_descriptor_table_ptr(ctxt, selector, &dt);
1502
1503	if (dt.size < index * 8 + 7)
1504		return emulate_gp(ctxt, selector & 0xfffc);
1505
1506	addr = dt.address + index * 8;
1507
1508#ifdef CONFIG_X86_64
1509	if (addr >> 32 != 0) {
1510		u64 efer = 0;
1511
1512		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1513		if (!(efer & EFER_LMA))
1514			addr &= (u32)-1;
1515	}
1516#endif
1517
1518	*desc_addr_p = addr;
1519	return X86EMUL_CONTINUE;
1520}
1521
1522/* allowed just for 8 bytes segments */
1523static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1524				   u16 selector, struct desc_struct *desc,
1525				   ulong *desc_addr_p)
1526{
1527	int rc;
1528
1529	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1530	if (rc != X86EMUL_CONTINUE)
1531		return rc;
1532
1533	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
 
1534}
1535
1536/* allowed just for 8 bytes segments */
1537static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1538				    u16 selector, struct desc_struct *desc)
1539{
1540	int rc;
1541	ulong addr;
1542
1543	rc = get_descriptor_ptr(ctxt, selector, &addr);
1544	if (rc != X86EMUL_CONTINUE)
1545		return rc;
1546
1547	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
 
1548}
1549
 
1550static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1551				     u16 selector, int seg, u8 cpl,
1552				     enum x86_transfer_type transfer,
1553				     struct desc_struct *desc)
1554{
1555	struct desc_struct seg_desc, old_desc;
1556	u8 dpl, rpl;
1557	unsigned err_vec = GP_VECTOR;
1558	u32 err_code = 0;
1559	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1560	ulong desc_addr;
1561	int ret;
1562	u16 dummy;
1563	u32 base3 = 0;
1564
1565	memset(&seg_desc, 0, sizeof(seg_desc));
1566
1567	if (ctxt->mode == X86EMUL_MODE_REAL) {
1568		/* set real mode segment descriptor (keep limit etc. for
1569		 * unreal mode) */
1570		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1571		set_desc_base(&seg_desc, selector << 4);
1572		goto load;
1573	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1574		/* VM86 needs a clean new segment descriptor */
1575		set_desc_base(&seg_desc, selector << 4);
1576		set_desc_limit(&seg_desc, 0xffff);
1577		seg_desc.type = 3;
1578		seg_desc.p = 1;
1579		seg_desc.s = 1;
1580		seg_desc.dpl = 3;
1581		goto load;
1582	}
1583
1584	rpl = selector & 3;
1585
 
 
 
 
 
 
 
 
1586	/* TR should be in GDT only */
1587	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1588		goto exception;
1589
1590	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
1591	if (null_selector) {
1592		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1593			goto exception;
1594
1595		if (seg == VCPU_SREG_SS) {
1596			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1597				goto exception;
1598
1599			/*
1600			 * ctxt->ops->set_segment expects the CPL to be in
1601			 * SS.DPL, so fake an expand-up 32-bit data segment.
1602			 */
1603			seg_desc.type = 3;
1604			seg_desc.p = 1;
1605			seg_desc.s = 1;
1606			seg_desc.dpl = cpl;
1607			seg_desc.d = 1;
1608			seg_desc.g = 1;
1609		}
1610
1611		/* Skip all following checks */
1612		goto load;
1613	}
1614
1615	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1616	if (ret != X86EMUL_CONTINUE)
1617		return ret;
1618
1619	err_code = selector & 0xfffc;
1620	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1621							   GP_VECTOR;
1622
1623	/* can't load system descriptor into segment selector */
1624	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1625		if (transfer == X86_TRANSFER_CALL_JMP)
1626			return X86EMUL_UNHANDLEABLE;
1627		goto exception;
1628	}
1629
 
 
 
 
 
1630	dpl = seg_desc.dpl;
1631
1632	switch (seg) {
1633	case VCPU_SREG_SS:
1634		/*
1635		 * segment is not a writable data segment or segment
1636		 * selector's RPL != CPL or segment selector's RPL != CPL
1637		 */
1638		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1639			goto exception;
1640		break;
1641	case VCPU_SREG_CS:
1642		if (!(seg_desc.type & 8))
1643			goto exception;
1644
1645		if (transfer == X86_TRANSFER_RET) {
1646			/* RET can never return to an inner privilege level. */
1647			if (rpl < cpl)
 
 
 
 
1648				goto exception;
1649			/* Outer-privilege level return is not implemented */
1650			if (rpl > cpl)
1651				return X86EMUL_UNHANDLEABLE;
1652		}
1653		if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1654			if (seg_desc.type & 4) {
1655				/* conforming */
1656				if (dpl > rpl)
1657					goto exception;
1658			} else {
1659				/* nonconforming */
1660				if (dpl != rpl)
1661					goto exception;
1662			}
1663		} else { /* X86_TRANSFER_CALL_JMP */
1664			if (seg_desc.type & 4) {
1665				/* conforming */
1666				if (dpl > cpl)
1667					goto exception;
1668			} else {
1669				/* nonconforming */
1670				if (rpl > cpl || dpl != cpl)
1671					goto exception;
1672			}
1673		}
1674		/* in long-mode d/b must be clear if l is set */
1675		if (seg_desc.d && seg_desc.l) {
1676			u64 efer = 0;
1677
1678			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1679			if (efer & EFER_LMA)
1680				goto exception;
1681		}
1682
1683		/* CS(RPL) <- CPL */
1684		selector = (selector & 0xfffc) | cpl;
1685		break;
1686	case VCPU_SREG_TR:
1687		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1688			goto exception;
 
 
 
 
 
 
1689		break;
1690	case VCPU_SREG_LDTR:
1691		if (seg_desc.s || seg_desc.type != 2)
1692			goto exception;
1693		break;
1694	default: /*  DS, ES, FS, or GS */
1695		/*
1696		 * segment is not a data or readable code segment or
1697		 * ((segment is a data or nonconforming code segment)
1698		 * and (both RPL and CPL > DPL))
1699		 */
1700		if ((seg_desc.type & 0xa) == 0x8 ||
1701		    (((seg_desc.type & 0xc) != 0xc) &&
1702		     (rpl > dpl && cpl > dpl)))
1703			goto exception;
1704		break;
1705	}
1706
1707	if (!seg_desc.p) {
1708		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1709		goto exception;
1710	}
1711
1712	if (seg_desc.s) {
1713		/* mark segment as accessed */
1714		if (!(seg_desc.type & 1)) {
1715			seg_desc.type |= 1;
1716			ret = write_segment_descriptor(ctxt, selector,
1717						       &seg_desc);
1718			if (ret != X86EMUL_CONTINUE)
1719				return ret;
1720		}
1721	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1722		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1723		if (ret != X86EMUL_CONTINUE)
1724			return ret;
1725		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1726						 ((u64)base3 << 32), ctxt))
1727			return emulate_gp(ctxt, err_code);
1728	}
1729
1730	if (seg == VCPU_SREG_TR) {
1731		old_desc = seg_desc;
1732		seg_desc.type |= 2; /* busy */
1733		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1734						  sizeof(seg_desc), &ctxt->exception);
1735		if (ret != X86EMUL_CONTINUE)
1736			return ret;
 
 
 
1737	}
1738load:
1739	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1740	if (desc)
1741		*desc = seg_desc;
1742	return X86EMUL_CONTINUE;
1743exception:
1744	return emulate_exception(ctxt, err_vec, err_code, true);
1745}
1746
1747static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1748				   u16 selector, int seg)
1749{
1750	u8 cpl = ctxt->ops->cpl(ctxt);
1751
1752	/*
1753	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1754	 * they can load it at CPL<3 (Intel's manual says only LSS can,
1755	 * but it's wrong).
1756	 *
1757	 * However, the Intel manual says that putting IST=1/DPL=3 in
1758	 * an interrupt gate will result in SS=3 (the AMD manual instead
1759	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1760	 * and only forbid it here.
1761	 */
1762	if (seg == VCPU_SREG_SS && selector == 3 &&
1763	    ctxt->mode == X86EMUL_MODE_PROT64)
1764		return emulate_exception(ctxt, GP_VECTOR, 0, true);
1765
1766	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1767					 X86_TRANSFER_NONE, NULL);
1768}
1769
1770static void write_register_operand(struct operand *op)
1771{
1772	return assign_register(op->addr.reg, op->val, op->bytes);
1773}
1774
1775static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1776{
1777	switch (op->type) {
1778	case OP_REG:
1779		write_register_operand(op);
1780		break;
1781	case OP_MEM:
1782		if (ctxt->lock_prefix)
1783			return segmented_cmpxchg(ctxt,
1784						 op->addr.mem,
1785						 &op->orig_val,
1786						 &op->val,
1787						 op->bytes);
1788		else
1789			return segmented_write(ctxt,
1790					       op->addr.mem,
1791					       &op->val,
1792					       op->bytes);
1793		break;
1794	case OP_MEM_STR:
1795		return segmented_write(ctxt,
1796				       op->addr.mem,
1797				       op->data,
1798				       op->bytes * op->count);
1799		break;
1800	case OP_XMM:
1801		kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1802		break;
1803	case OP_MM:
1804		kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1805		break;
1806	case OP_NONE:
1807		/* no writeback */
1808		break;
1809	default:
1810		break;
1811	}
1812	return X86EMUL_CONTINUE;
1813}
1814
1815static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1816{
1817	struct segmented_address addr;
1818
1819	rsp_increment(ctxt, -bytes);
1820	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1821	addr.seg = VCPU_SREG_SS;
1822
1823	return segmented_write(ctxt, addr, data, bytes);
1824}
1825
1826static int em_push(struct x86_emulate_ctxt *ctxt)
1827{
1828	/* Disable writeback. */
1829	ctxt->dst.type = OP_NONE;
1830	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1831}
1832
1833static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1834		       void *dest, int len)
1835{
1836	int rc;
1837	struct segmented_address addr;
1838
1839	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1840	addr.seg = VCPU_SREG_SS;
1841	rc = segmented_read(ctxt, addr, dest, len);
1842	if (rc != X86EMUL_CONTINUE)
1843		return rc;
1844
1845	rsp_increment(ctxt, len);
1846	return rc;
1847}
1848
1849static int em_pop(struct x86_emulate_ctxt *ctxt)
1850{
1851	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1852}
1853
1854static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1855			void *dest, int len)
1856{
1857	int rc;
1858	unsigned long val, change_mask;
1859	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1860	int cpl = ctxt->ops->cpl(ctxt);
1861
1862	rc = emulate_pop(ctxt, &val, len);
1863	if (rc != X86EMUL_CONTINUE)
1864		return rc;
1865
1866	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1867		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1868		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1869		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1870
1871	switch(ctxt->mode) {
1872	case X86EMUL_MODE_PROT64:
1873	case X86EMUL_MODE_PROT32:
1874	case X86EMUL_MODE_PROT16:
1875		if (cpl == 0)
1876			change_mask |= X86_EFLAGS_IOPL;
1877		if (cpl <= iopl)
1878			change_mask |= X86_EFLAGS_IF;
1879		break;
1880	case X86EMUL_MODE_VM86:
1881		if (iopl < 3)
1882			return emulate_gp(ctxt, 0);
1883		change_mask |= X86_EFLAGS_IF;
1884		break;
1885	default: /* real mode */
1886		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1887		break;
1888	}
1889
1890	*(unsigned long *)dest =
1891		(ctxt->eflags & ~change_mask) | (val & change_mask);
1892
1893	return rc;
1894}
1895
1896static int em_popf(struct x86_emulate_ctxt *ctxt)
1897{
1898	ctxt->dst.type = OP_REG;
1899	ctxt->dst.addr.reg = &ctxt->eflags;
1900	ctxt->dst.bytes = ctxt->op_bytes;
1901	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1902}
1903
1904static int em_enter(struct x86_emulate_ctxt *ctxt)
1905{
1906	int rc;
1907	unsigned frame_size = ctxt->src.val;
1908	unsigned nesting_level = ctxt->src2.val & 31;
1909	ulong rbp;
1910
1911	if (nesting_level)
1912		return X86EMUL_UNHANDLEABLE;
1913
1914	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1915	rc = push(ctxt, &rbp, stack_size(ctxt));
1916	if (rc != X86EMUL_CONTINUE)
1917		return rc;
1918	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1919		      stack_mask(ctxt));
1920	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1921		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1922		      stack_mask(ctxt));
1923	return X86EMUL_CONTINUE;
1924}
1925
1926static int em_leave(struct x86_emulate_ctxt *ctxt)
1927{
1928	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1929		      stack_mask(ctxt));
1930	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1931}
1932
1933static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1934{
1935	int seg = ctxt->src2.val;
1936
1937	ctxt->src.val = get_segment_selector(ctxt, seg);
1938	if (ctxt->op_bytes == 4) {
1939		rsp_increment(ctxt, -2);
1940		ctxt->op_bytes = 2;
1941	}
1942
1943	return em_push(ctxt);
1944}
1945
1946static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1947{
1948	int seg = ctxt->src2.val;
1949	unsigned long selector;
1950	int rc;
1951
1952	rc = emulate_pop(ctxt, &selector, 2);
1953	if (rc != X86EMUL_CONTINUE)
1954		return rc;
1955
1956	if (seg == VCPU_SREG_SS)
1957		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1958	if (ctxt->op_bytes > 2)
1959		rsp_increment(ctxt, ctxt->op_bytes - 2);
1960
1961	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1962	return rc;
1963}
1964
1965static int em_pusha(struct x86_emulate_ctxt *ctxt)
1966{
1967	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1968	int rc = X86EMUL_CONTINUE;
1969	int reg = VCPU_REGS_RAX;
1970
1971	while (reg <= VCPU_REGS_RDI) {
1972		(reg == VCPU_REGS_RSP) ?
1973		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1974
1975		rc = em_push(ctxt);
1976		if (rc != X86EMUL_CONTINUE)
1977			return rc;
1978
1979		++reg;
1980	}
1981
1982	return rc;
1983}
1984
1985static int em_pushf(struct x86_emulate_ctxt *ctxt)
1986{
1987	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1988	return em_push(ctxt);
1989}
1990
1991static int em_popa(struct x86_emulate_ctxt *ctxt)
1992{
1993	int rc = X86EMUL_CONTINUE;
1994	int reg = VCPU_REGS_RDI;
1995	u32 val;
1996
1997	while (reg >= VCPU_REGS_RAX) {
1998		if (reg == VCPU_REGS_RSP) {
1999			rsp_increment(ctxt, ctxt->op_bytes);
2000			--reg;
2001		}
2002
2003		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2004		if (rc != X86EMUL_CONTINUE)
2005			break;
2006		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2007		--reg;
2008	}
2009	return rc;
2010}
2011
2012static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2013{
2014	const struct x86_emulate_ops *ops = ctxt->ops;
2015	int rc;
2016	struct desc_ptr dt;
2017	gva_t cs_addr;
2018	gva_t eip_addr;
2019	u16 cs, eip;
2020
2021	/* TODO: Add limit checks */
2022	ctxt->src.val = ctxt->eflags;
2023	rc = em_push(ctxt);
2024	if (rc != X86EMUL_CONTINUE)
2025		return rc;
2026
2027	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2028
2029	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2030	rc = em_push(ctxt);
2031	if (rc != X86EMUL_CONTINUE)
2032		return rc;
2033
2034	ctxt->src.val = ctxt->_eip;
2035	rc = em_push(ctxt);
2036	if (rc != X86EMUL_CONTINUE)
2037		return rc;
2038
2039	ops->get_idt(ctxt, &dt);
2040
2041	eip_addr = dt.address + (irq << 2);
2042	cs_addr = dt.address + (irq << 2) + 2;
2043
2044	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2045	if (rc != X86EMUL_CONTINUE)
2046		return rc;
2047
2048	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2049	if (rc != X86EMUL_CONTINUE)
2050		return rc;
2051
2052	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2053	if (rc != X86EMUL_CONTINUE)
2054		return rc;
2055
2056	ctxt->_eip = eip;
2057
2058	return rc;
2059}
2060
2061int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2062{
2063	int rc;
2064
2065	invalidate_registers(ctxt);
2066	rc = __emulate_int_real(ctxt, irq);
2067	if (rc == X86EMUL_CONTINUE)
2068		writeback_registers(ctxt);
2069	return rc;
2070}
2071
2072static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2073{
2074	switch(ctxt->mode) {
2075	case X86EMUL_MODE_REAL:
2076		return __emulate_int_real(ctxt, irq);
2077	case X86EMUL_MODE_VM86:
2078	case X86EMUL_MODE_PROT16:
2079	case X86EMUL_MODE_PROT32:
2080	case X86EMUL_MODE_PROT64:
2081	default:
2082		/* Protected mode interrupts unimplemented yet */
2083		return X86EMUL_UNHANDLEABLE;
2084	}
2085}
2086
2087static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2088{
2089	int rc = X86EMUL_CONTINUE;
2090	unsigned long temp_eip = 0;
2091	unsigned long temp_eflags = 0;
2092	unsigned long cs = 0;
2093	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2094			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2095			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2096			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2097			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2098			     X86_EFLAGS_FIXED;
2099	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2100				  X86_EFLAGS_VIP;
2101
2102	/* TODO: Add stack limit check */
2103
2104	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2105
2106	if (rc != X86EMUL_CONTINUE)
2107		return rc;
2108
2109	if (temp_eip & ~0xffff)
2110		return emulate_gp(ctxt, 0);
2111
2112	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2113
2114	if (rc != X86EMUL_CONTINUE)
2115		return rc;
2116
2117	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2118
2119	if (rc != X86EMUL_CONTINUE)
2120		return rc;
2121
2122	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2123
2124	if (rc != X86EMUL_CONTINUE)
2125		return rc;
2126
2127	ctxt->_eip = temp_eip;
2128
2129	if (ctxt->op_bytes == 4)
2130		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2131	else if (ctxt->op_bytes == 2) {
2132		ctxt->eflags &= ~0xffff;
2133		ctxt->eflags |= temp_eflags;
2134	}
2135
2136	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2137	ctxt->eflags |= X86_EFLAGS_FIXED;
2138	ctxt->ops->set_nmi_mask(ctxt, false);
2139
2140	return rc;
2141}
2142
2143static int em_iret(struct x86_emulate_ctxt *ctxt)
2144{
2145	switch(ctxt->mode) {
2146	case X86EMUL_MODE_REAL:
2147		return emulate_iret_real(ctxt);
2148	case X86EMUL_MODE_VM86:
2149	case X86EMUL_MODE_PROT16:
2150	case X86EMUL_MODE_PROT32:
2151	case X86EMUL_MODE_PROT64:
2152	default:
2153		/* iret from protected mode unimplemented yet */
2154		return X86EMUL_UNHANDLEABLE;
2155	}
2156}
2157
2158static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2159{
2160	int rc;
2161	unsigned short sel;
2162	struct desc_struct new_desc;
 
2163	u8 cpl = ctxt->ops->cpl(ctxt);
2164
 
 
 
 
 
2165	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2166
2167	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2168				       X86_TRANSFER_CALL_JMP,
2169				       &new_desc);
2170	if (rc != X86EMUL_CONTINUE)
2171		return rc;
2172
2173	rc = assign_eip_far(ctxt, ctxt->src.val);
2174	/* Error handling is not implemented. */
2175	if (rc != X86EMUL_CONTINUE)
2176		return X86EMUL_UNHANDLEABLE;
2177
 
 
2178	return rc;
2179}
2180
2181static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2182{
2183	return assign_eip_near(ctxt, ctxt->src.val);
2184}
2185
2186static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2187{
2188	int rc;
2189	long int old_eip;
2190
2191	old_eip = ctxt->_eip;
2192	rc = assign_eip_near(ctxt, ctxt->src.val);
2193	if (rc != X86EMUL_CONTINUE)
2194		return rc;
2195	ctxt->src.val = old_eip;
2196	rc = em_push(ctxt);
2197	return rc;
2198}
2199
2200static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2201{
2202	u64 old = ctxt->dst.orig_val64;
2203
2204	if (ctxt->dst.bytes == 16)
2205		return X86EMUL_UNHANDLEABLE;
2206
2207	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2208	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2209		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2210		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2211		ctxt->eflags &= ~X86_EFLAGS_ZF;
2212	} else {
2213		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2214			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2215
2216		ctxt->eflags |= X86_EFLAGS_ZF;
2217	}
2218	return X86EMUL_CONTINUE;
2219}
2220
2221static int em_ret(struct x86_emulate_ctxt *ctxt)
2222{
2223	int rc;
2224	unsigned long eip;
2225
2226	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2227	if (rc != X86EMUL_CONTINUE)
2228		return rc;
2229
2230	return assign_eip_near(ctxt, eip);
2231}
2232
2233static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2234{
2235	int rc;
2236	unsigned long eip, cs;
 
2237	int cpl = ctxt->ops->cpl(ctxt);
2238	struct desc_struct new_desc;
 
 
 
 
 
2239
2240	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2241	if (rc != X86EMUL_CONTINUE)
2242		return rc;
2243	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2244	if (rc != X86EMUL_CONTINUE)
2245		return rc;
 
 
 
2246	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2247				       X86_TRANSFER_RET,
2248				       &new_desc);
2249	if (rc != X86EMUL_CONTINUE)
2250		return rc;
2251	rc = assign_eip_far(ctxt, eip);
2252	/* Error handling is not implemented. */
2253	if (rc != X86EMUL_CONTINUE)
2254		return X86EMUL_UNHANDLEABLE;
2255
2256	return rc;
2257}
2258
2259static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2260{
2261        int rc;
2262
2263        rc = em_ret_far(ctxt);
2264        if (rc != X86EMUL_CONTINUE)
2265                return rc;
2266        rsp_increment(ctxt, ctxt->src.val);
2267        return X86EMUL_CONTINUE;
2268}
2269
2270static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2271{
2272	/* Save real source value, then compare EAX against destination. */
2273	ctxt->dst.orig_val = ctxt->dst.val;
2274	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2275	ctxt->src.orig_val = ctxt->src.val;
2276	ctxt->src.val = ctxt->dst.orig_val;
2277	fastop(ctxt, em_cmp);
2278
2279	if (ctxt->eflags & X86_EFLAGS_ZF) {
2280		/* Success: write back to memory; no update of EAX */
2281		ctxt->src.type = OP_NONE;
2282		ctxt->dst.val = ctxt->src.orig_val;
2283	} else {
2284		/* Failure: write the value we saw to EAX. */
2285		ctxt->src.type = OP_REG;
2286		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2287		ctxt->src.val = ctxt->dst.orig_val;
2288		/* Create write-cycle to dest by writing the same value */
2289		ctxt->dst.val = ctxt->dst.orig_val;
2290	}
2291	return X86EMUL_CONTINUE;
2292}
2293
2294static int em_lseg(struct x86_emulate_ctxt *ctxt)
2295{
2296	int seg = ctxt->src2.val;
2297	unsigned short sel;
2298	int rc;
2299
2300	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2301
2302	rc = load_segment_descriptor(ctxt, sel, seg);
2303	if (rc != X86EMUL_CONTINUE)
2304		return rc;
2305
2306	ctxt->dst.val = ctxt->src.val;
2307	return rc;
2308}
2309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2310static int em_rsm(struct x86_emulate_ctxt *ctxt)
2311{
2312	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
 
 
 
 
2313		return emulate_ud(ctxt);
2314
2315	if (ctxt->ops->leave_smm(ctxt))
2316		ctxt->ops->triple_fault(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
2317
2318	return emulator_recalc_and_set_mode(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2319}
2320
2321static void
2322setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
 
2323{
2324	cs->l = 0;		/* will be adjusted later */
2325	set_desc_base(cs, 0);	/* flat segment */
2326	cs->g = 1;		/* 4kb granularity */
2327	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2328	cs->type = 0x0b;	/* Read, Execute, Accessed */
2329	cs->s = 1;
2330	cs->dpl = 0;		/* will be adjusted later */
2331	cs->p = 1;
2332	cs->d = 1;
2333	cs->avl = 0;
2334
2335	set_desc_base(ss, 0);	/* flat segment */
2336	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2337	ss->g = 1;		/* 4kb granularity */
2338	ss->s = 1;
2339	ss->type = 0x03;	/* Read/Write, Accessed */
2340	ss->d = 1;		/* 32bit stack segment */
2341	ss->dpl = 0;
2342	ss->p = 1;
2343	ss->l = 0;
2344	ss->avl = 0;
2345}
2346
2347static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2348{
2349	u32 eax, ebx, ecx, edx;
2350
2351	eax = ecx = 0;
2352	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2353	return is_guest_vendor_intel(ebx, ecx, edx);
 
 
2354}
2355
2356static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2357{
2358	const struct x86_emulate_ops *ops = ctxt->ops;
2359	u32 eax, ebx, ecx, edx;
2360
2361	/*
2362	 * syscall should always be enabled in longmode - so only become
2363	 * vendor specific (cpuid) if other modes are active...
2364	 */
2365	if (ctxt->mode == X86EMUL_MODE_PROT64)
2366		return true;
2367
2368	eax = 0x00000000;
2369	ecx = 0x00000000;
2370	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2371	/*
2372	 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2373	 * 64bit guest with a 32bit compat-app running will #UD !! While this
2374	 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2375	 * AMD can't behave like Intel.
 
 
2376	 */
2377	if (is_guest_vendor_intel(ebx, ecx, edx))
 
 
2378		return false;
2379
2380	if (is_guest_vendor_amd(ebx, ecx, edx) ||
2381	    is_guest_vendor_hygon(ebx, ecx, edx))
 
 
2382		return true;
2383
2384	/*
2385	 * default: (not Intel, not AMD, not Hygon), apply Intel's
2386	 * stricter rules...
2387	 */
 
 
 
2388	return false;
2389}
2390
2391static int em_syscall(struct x86_emulate_ctxt *ctxt)
2392{
2393	const struct x86_emulate_ops *ops = ctxt->ops;
2394	struct desc_struct cs, ss;
2395	u64 msr_data;
2396	u16 cs_sel, ss_sel;
2397	u64 efer = 0;
2398
2399	/* syscall is not available in real mode */
2400	if (ctxt->mode == X86EMUL_MODE_REAL ||
2401	    ctxt->mode == X86EMUL_MODE_VM86)
2402		return emulate_ud(ctxt);
2403
2404	if (!(em_syscall_is_enabled(ctxt)))
2405		return emulate_ud(ctxt);
2406
2407	ops->get_msr(ctxt, MSR_EFER, &efer);
 
 
2408	if (!(efer & EFER_SCE))
2409		return emulate_ud(ctxt);
2410
2411	setup_syscalls_segments(&cs, &ss);
2412	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2413	msr_data >>= 32;
2414	cs_sel = (u16)(msr_data & 0xfffc);
2415	ss_sel = (u16)(msr_data + 8);
2416
2417	if (efer & EFER_LMA) {
2418		cs.d = 0;
2419		cs.l = 1;
2420	}
2421	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2422	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2423
2424	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2425	if (efer & EFER_LMA) {
2426#ifdef CONFIG_X86_64
2427		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2428
2429		ops->get_msr(ctxt,
2430			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2431			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2432		ctxt->_eip = msr_data;
2433
2434		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2435		ctxt->eflags &= ~msr_data;
2436		ctxt->eflags |= X86_EFLAGS_FIXED;
2437#endif
2438	} else {
2439		/* legacy mode */
2440		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2441		ctxt->_eip = (u32)msr_data;
2442
2443		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2444	}
2445
2446	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2447	return X86EMUL_CONTINUE;
2448}
2449
2450static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2451{
2452	const struct x86_emulate_ops *ops = ctxt->ops;
2453	struct desc_struct cs, ss;
2454	u64 msr_data;
2455	u16 cs_sel, ss_sel;
2456	u64 efer = 0;
2457
2458	ops->get_msr(ctxt, MSR_EFER, &efer);
2459	/* inject #GP if in real mode */
2460	if (ctxt->mode == X86EMUL_MODE_REAL)
2461		return emulate_gp(ctxt, 0);
2462
2463	/*
2464	 * Not recognized on AMD in compat mode (but is recognized in legacy
2465	 * mode).
2466	 */
2467	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2468	    && !vendor_intel(ctxt))
2469		return emulate_ud(ctxt);
2470
2471	/* sysenter/sysexit have not been tested in 64bit mode. */
2472	if (ctxt->mode == X86EMUL_MODE_PROT64)
2473		return X86EMUL_UNHANDLEABLE;
2474
 
 
2475	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2476	if ((msr_data & 0xfffc) == 0x0)
2477		return emulate_gp(ctxt, 0);
2478
2479	setup_syscalls_segments(&cs, &ss);
2480	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2481	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2482	ss_sel = cs_sel + 8;
2483	if (efer & EFER_LMA) {
2484		cs.d = 0;
2485		cs.l = 1;
2486	}
2487
2488	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2489	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2490
2491	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2492	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2493
2494	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2495	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2496							      (u32)msr_data;
2497	if (efer & EFER_LMA)
2498		ctxt->mode = X86EMUL_MODE_PROT64;
2499
2500	return X86EMUL_CONTINUE;
2501}
2502
2503static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2504{
2505	const struct x86_emulate_ops *ops = ctxt->ops;
2506	struct desc_struct cs, ss;
2507	u64 msr_data, rcx, rdx;
2508	int usermode;
2509	u16 cs_sel = 0, ss_sel = 0;
2510
2511	/* inject #GP if in real mode or Virtual 8086 mode */
2512	if (ctxt->mode == X86EMUL_MODE_REAL ||
2513	    ctxt->mode == X86EMUL_MODE_VM86)
2514		return emulate_gp(ctxt, 0);
2515
2516	setup_syscalls_segments(&cs, &ss);
2517
2518	if ((ctxt->rex_prefix & 0x8) != 0x0)
2519		usermode = X86EMUL_MODE_PROT64;
2520	else
2521		usermode = X86EMUL_MODE_PROT32;
2522
2523	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2524	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2525
2526	cs.dpl = 3;
2527	ss.dpl = 3;
2528	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2529	switch (usermode) {
2530	case X86EMUL_MODE_PROT32:
2531		cs_sel = (u16)(msr_data + 16);
2532		if ((msr_data & 0xfffc) == 0x0)
2533			return emulate_gp(ctxt, 0);
2534		ss_sel = (u16)(msr_data + 24);
2535		rcx = (u32)rcx;
2536		rdx = (u32)rdx;
2537		break;
2538	case X86EMUL_MODE_PROT64:
2539		cs_sel = (u16)(msr_data + 32);
2540		if (msr_data == 0x0)
2541			return emulate_gp(ctxt, 0);
2542		ss_sel = cs_sel + 8;
2543		cs.d = 0;
2544		cs.l = 1;
2545		if (emul_is_noncanonical_address(rcx, ctxt) ||
2546		    emul_is_noncanonical_address(rdx, ctxt))
2547			return emulate_gp(ctxt, 0);
2548		break;
2549	}
2550	cs_sel |= SEGMENT_RPL_MASK;
2551	ss_sel |= SEGMENT_RPL_MASK;
2552
2553	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2554	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2555
2556	ctxt->_eip = rdx;
2557	ctxt->mode = usermode;
2558	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2559
2560	return X86EMUL_CONTINUE;
2561}
2562
2563static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2564{
2565	int iopl;
2566	if (ctxt->mode == X86EMUL_MODE_REAL)
2567		return false;
2568	if (ctxt->mode == X86EMUL_MODE_VM86)
2569		return true;
2570	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2571	return ctxt->ops->cpl(ctxt) > iopl;
2572}
2573
2574#define VMWARE_PORT_VMPORT	(0x5658)
2575#define VMWARE_PORT_VMRPC	(0x5659)
2576
2577static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2578					    u16 port, u16 len)
2579{
2580	const struct x86_emulate_ops *ops = ctxt->ops;
2581	struct desc_struct tr_seg;
2582	u32 base3;
2583	int r;
2584	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2585	unsigned mask = (1 << len) - 1;
2586	unsigned long base;
2587
2588	/*
2589	 * VMware allows access to these ports even if denied
2590	 * by TSS I/O permission bitmap. Mimic behavior.
2591	 */
2592	if (enable_vmware_backdoor &&
2593	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2594		return true;
2595
2596	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2597	if (!tr_seg.p)
2598		return false;
2599	if (desc_limit_scaled(&tr_seg) < 103)
2600		return false;
2601	base = get_desc_base(&tr_seg);
2602#ifdef CONFIG_X86_64
2603	base |= ((u64)base3) << 32;
2604#endif
2605	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2606	if (r != X86EMUL_CONTINUE)
2607		return false;
2608	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2609		return false;
2610	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2611	if (r != X86EMUL_CONTINUE)
2612		return false;
2613	if ((perm >> bit_idx) & mask)
2614		return false;
2615	return true;
2616}
2617
2618static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2619				 u16 port, u16 len)
2620{
2621	if (ctxt->perm_ok)
2622		return true;
2623
2624	if (emulator_bad_iopl(ctxt))
2625		if (!emulator_io_port_access_allowed(ctxt, port, len))
2626			return false;
2627
2628	ctxt->perm_ok = true;
2629
2630	return true;
2631}
2632
2633static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2634{
2635	/*
2636	 * Intel CPUs mask the counter and pointers in quite strange
2637	 * manner when ECX is zero due to REP-string optimizations.
2638	 */
2639#ifdef CONFIG_X86_64
2640	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2641		return;
2642
2643	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2644
2645	switch (ctxt->b) {
2646	case 0xa4:	/* movsb */
2647	case 0xa5:	/* movsd/w */
2648		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2649		fallthrough;
2650	case 0xaa:	/* stosb */
2651	case 0xab:	/* stosd/w */
2652		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2653	}
2654#endif
2655}
2656
2657static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2658				struct tss_segment_16 *tss)
2659{
2660	tss->ip = ctxt->_eip;
2661	tss->flag = ctxt->eflags;
2662	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2663	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2664	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2665	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2666	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2667	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2668	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2669	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2670
2671	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2672	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2673	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2674	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2675	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2676}
2677
2678static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2679				 struct tss_segment_16 *tss)
2680{
2681	int ret;
2682	u8 cpl;
2683
2684	ctxt->_eip = tss->ip;
2685	ctxt->eflags = tss->flag | 2;
2686	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2687	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2688	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2689	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2690	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2691	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2692	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2693	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2694
2695	/*
2696	 * SDM says that segment selectors are loaded before segment
2697	 * descriptors
2698	 */
2699	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2700	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2701	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2702	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2703	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2704
2705	cpl = tss->cs & 3;
2706
2707	/*
2708	 * Now load segment descriptors. If fault happens at this stage
2709	 * it is handled in a context of new task
2710	 */
2711	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2712					X86_TRANSFER_TASK_SWITCH, NULL);
2713	if (ret != X86EMUL_CONTINUE)
2714		return ret;
2715	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2716					X86_TRANSFER_TASK_SWITCH, NULL);
2717	if (ret != X86EMUL_CONTINUE)
2718		return ret;
2719	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2720					X86_TRANSFER_TASK_SWITCH, NULL);
2721	if (ret != X86EMUL_CONTINUE)
2722		return ret;
2723	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2724					X86_TRANSFER_TASK_SWITCH, NULL);
2725	if (ret != X86EMUL_CONTINUE)
2726		return ret;
2727	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2728					X86_TRANSFER_TASK_SWITCH, NULL);
2729	if (ret != X86EMUL_CONTINUE)
2730		return ret;
2731
2732	return X86EMUL_CONTINUE;
2733}
2734
2735static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2736			  ulong old_tss_base, struct desc_struct *new_desc)
2737{
 
2738	struct tss_segment_16 tss_seg;
2739	int ret;
2740	u32 new_tss_base = get_desc_base(new_desc);
2741
2742	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2743	if (ret != X86EMUL_CONTINUE)
2744		return ret;
2745
2746	save_state_to_tss16(ctxt, &tss_seg);
2747
2748	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2749	if (ret != X86EMUL_CONTINUE)
2750		return ret;
2751
2752	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2753	if (ret != X86EMUL_CONTINUE)
2754		return ret;
2755
2756	if (old_tss_sel != 0xffff) {
2757		tss_seg.prev_task_link = old_tss_sel;
2758
2759		ret = linear_write_system(ctxt, new_tss_base,
2760					  &tss_seg.prev_task_link,
2761					  sizeof(tss_seg.prev_task_link));
 
2762		if (ret != X86EMUL_CONTINUE)
2763			return ret;
2764	}
2765
2766	return load_state_from_tss16(ctxt, &tss_seg);
2767}
2768
2769static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2770				struct tss_segment_32 *tss)
2771{
2772	/* CR3 and ldt selector are not saved intentionally */
2773	tss->eip = ctxt->_eip;
2774	tss->eflags = ctxt->eflags;
2775	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2776	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2777	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2778	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2779	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2780	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2781	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2782	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2783
2784	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2785	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2786	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2787	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2788	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2789	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2790}
2791
2792static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2793				 struct tss_segment_32 *tss)
2794{
2795	int ret;
2796	u8 cpl;
2797
2798	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2799		return emulate_gp(ctxt, 0);
2800	ctxt->_eip = tss->eip;
2801	ctxt->eflags = tss->eflags | 2;
2802
2803	/* General purpose registers */
2804	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2805	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2806	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2807	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2808	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2809	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2810	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2811	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2812
2813	/*
2814	 * SDM says that segment selectors are loaded before segment
2815	 * descriptors.  This is important because CPL checks will
2816	 * use CS.RPL.
2817	 */
2818	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2819	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2820	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2821	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2822	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2823	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2824	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2825
2826	/*
2827	 * If we're switching between Protected Mode and VM86, we need to make
2828	 * sure to update the mode before loading the segment descriptors so
2829	 * that the selectors are interpreted correctly.
2830	 */
2831	if (ctxt->eflags & X86_EFLAGS_VM) {
2832		ctxt->mode = X86EMUL_MODE_VM86;
2833		cpl = 3;
2834	} else {
2835		ctxt->mode = X86EMUL_MODE_PROT32;
2836		cpl = tss->cs & 3;
2837	}
2838
2839	/*
2840	 * Now load segment descriptors. If fault happens at this stage
2841	 * it is handled in a context of new task
2842	 */
2843	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2844					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2845	if (ret != X86EMUL_CONTINUE)
2846		return ret;
2847	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2848					X86_TRANSFER_TASK_SWITCH, NULL);
2849	if (ret != X86EMUL_CONTINUE)
2850		return ret;
2851	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2852					X86_TRANSFER_TASK_SWITCH, NULL);
2853	if (ret != X86EMUL_CONTINUE)
2854		return ret;
2855	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2856					X86_TRANSFER_TASK_SWITCH, NULL);
2857	if (ret != X86EMUL_CONTINUE)
2858		return ret;
2859	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2860					X86_TRANSFER_TASK_SWITCH, NULL);
2861	if (ret != X86EMUL_CONTINUE)
2862		return ret;
2863	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2864					X86_TRANSFER_TASK_SWITCH, NULL);
2865	if (ret != X86EMUL_CONTINUE)
2866		return ret;
2867	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2868					X86_TRANSFER_TASK_SWITCH, NULL);
2869
2870	return ret;
2871}
2872
2873static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
 
2874			  ulong old_tss_base, struct desc_struct *new_desc)
2875{
 
2876	struct tss_segment_32 tss_seg;
2877	int ret;
2878	u32 new_tss_base = get_desc_base(new_desc);
2879	u32 eip_offset = offsetof(struct tss_segment_32, eip);
2880	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2881
2882	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
 
2883	if (ret != X86EMUL_CONTINUE)
2884		return ret;
2885
2886	save_state_to_tss32(ctxt, &tss_seg);
2887
2888	/* Only GP registers and segment selectors are saved */
2889	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2890				  ldt_sel_offset - eip_offset);
2891	if (ret != X86EMUL_CONTINUE)
2892		return ret;
2893
2894	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
 
2895	if (ret != X86EMUL_CONTINUE)
2896		return ret;
2897
2898	if (old_tss_sel != 0xffff) {
2899		tss_seg.prev_task_link = old_tss_sel;
2900
2901		ret = linear_write_system(ctxt, new_tss_base,
2902					  &tss_seg.prev_task_link,
2903					  sizeof(tss_seg.prev_task_link));
 
2904		if (ret != X86EMUL_CONTINUE)
2905			return ret;
2906	}
2907
2908	return load_state_from_tss32(ctxt, &tss_seg);
2909}
2910
2911static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2912				   u16 tss_selector, int idt_index, int reason,
2913				   bool has_error_code, u32 error_code)
2914{
2915	const struct x86_emulate_ops *ops = ctxt->ops;
2916	struct desc_struct curr_tss_desc, next_tss_desc;
2917	int ret;
2918	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2919	ulong old_tss_base =
2920		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2921	u32 desc_limit;
2922	ulong desc_addr, dr7;
2923
2924	/* FIXME: old_tss_base == ~0 ? */
2925
2926	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2927	if (ret != X86EMUL_CONTINUE)
2928		return ret;
2929	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2930	if (ret != X86EMUL_CONTINUE)
2931		return ret;
2932
2933	/* FIXME: check that next_tss_desc is tss */
2934
2935	/*
2936	 * Check privileges. The three cases are task switch caused by...
2937	 *
2938	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2939	 * 2. Exception/IRQ/iret: No check is performed
2940	 * 3. jmp/call to TSS/task-gate: No check is performed since the
2941	 *    hardware checks it before exiting.
2942	 */
2943	if (reason == TASK_SWITCH_GATE) {
2944		if (idt_index != -1) {
2945			/* Software interrupts */
2946			struct desc_struct task_gate_desc;
2947			int dpl;
2948
2949			ret = read_interrupt_descriptor(ctxt, idt_index,
2950							&task_gate_desc);
2951			if (ret != X86EMUL_CONTINUE)
2952				return ret;
2953
2954			dpl = task_gate_desc.dpl;
2955			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2956				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2957		}
2958	}
2959
2960	desc_limit = desc_limit_scaled(&next_tss_desc);
2961	if (!next_tss_desc.p ||
2962	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2963	     desc_limit < 0x2b)) {
2964		return emulate_ts(ctxt, tss_selector & 0xfffc);
2965	}
2966
2967	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2968		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2969		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2970	}
2971
2972	if (reason == TASK_SWITCH_IRET)
2973		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2974
2975	/* set back link to prev task only if NT bit is set in eflags
2976	   note that old_tss_sel is not used after this point */
2977	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2978		old_tss_sel = 0xffff;
2979
2980	if (next_tss_desc.type & 8)
2981		ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
 
2982	else
2983		ret = task_switch_16(ctxt, old_tss_sel,
2984				     old_tss_base, &next_tss_desc);
2985	if (ret != X86EMUL_CONTINUE)
2986		return ret;
2987
2988	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2989		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2990
2991	if (reason != TASK_SWITCH_IRET) {
2992		next_tss_desc.type |= (1 << 1); /* set busy flag */
2993		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2994	}
2995
2996	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2997	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2998
2999	if (has_error_code) {
3000		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3001		ctxt->lock_prefix = 0;
3002		ctxt->src.val = (unsigned long) error_code;
3003		ret = em_push(ctxt);
3004	}
3005
3006	ops->get_dr(ctxt, 7, &dr7);
3007	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3008
3009	return ret;
3010}
3011
3012int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3013			 u16 tss_selector, int idt_index, int reason,
3014			 bool has_error_code, u32 error_code)
3015{
3016	int rc;
3017
3018	invalidate_registers(ctxt);
3019	ctxt->_eip = ctxt->eip;
3020	ctxt->dst.type = OP_NONE;
3021
3022	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3023				     has_error_code, error_code);
3024
3025	if (rc == X86EMUL_CONTINUE) {
3026		ctxt->eip = ctxt->_eip;
3027		writeback_registers(ctxt);
3028	}
3029
3030	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3031}
3032
3033static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3034		struct operand *op)
3035{
3036	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3037
3038	register_address_increment(ctxt, reg, df * op->bytes);
3039	op->addr.mem.ea = register_address(ctxt, reg);
3040}
3041
3042static int em_das(struct x86_emulate_ctxt *ctxt)
3043{
3044	u8 al, old_al;
3045	bool af, cf, old_cf;
3046
3047	cf = ctxt->eflags & X86_EFLAGS_CF;
3048	al = ctxt->dst.val;
3049
3050	old_al = al;
3051	old_cf = cf;
3052	cf = false;
3053	af = ctxt->eflags & X86_EFLAGS_AF;
3054	if ((al & 0x0f) > 9 || af) {
3055		al -= 6;
3056		cf = old_cf | (al >= 250);
3057		af = true;
3058	} else {
3059		af = false;
3060	}
3061	if (old_al > 0x99 || old_cf) {
3062		al -= 0x60;
3063		cf = true;
3064	}
3065
3066	ctxt->dst.val = al;
3067	/* Set PF, ZF, SF */
3068	ctxt->src.type = OP_IMM;
3069	ctxt->src.val = 0;
3070	ctxt->src.bytes = 1;
3071	fastop(ctxt, em_or);
3072	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3073	if (cf)
3074		ctxt->eflags |= X86_EFLAGS_CF;
3075	if (af)
3076		ctxt->eflags |= X86_EFLAGS_AF;
3077	return X86EMUL_CONTINUE;
3078}
3079
3080static int em_aam(struct x86_emulate_ctxt *ctxt)
3081{
3082	u8 al, ah;
3083
3084	if (ctxt->src.val == 0)
3085		return emulate_de(ctxt);
3086
3087	al = ctxt->dst.val & 0xff;
3088	ah = al / ctxt->src.val;
3089	al %= ctxt->src.val;
3090
3091	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3092
3093	/* Set PF, ZF, SF */
3094	ctxt->src.type = OP_IMM;
3095	ctxt->src.val = 0;
3096	ctxt->src.bytes = 1;
3097	fastop(ctxt, em_or);
3098
3099	return X86EMUL_CONTINUE;
3100}
3101
3102static int em_aad(struct x86_emulate_ctxt *ctxt)
3103{
3104	u8 al = ctxt->dst.val & 0xff;
3105	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3106
3107	al = (al + (ah * ctxt->src.val)) & 0xff;
3108
3109	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3110
3111	/* Set PF, ZF, SF */
3112	ctxt->src.type = OP_IMM;
3113	ctxt->src.val = 0;
3114	ctxt->src.bytes = 1;
3115	fastop(ctxt, em_or);
3116
3117	return X86EMUL_CONTINUE;
3118}
3119
3120static int em_call(struct x86_emulate_ctxt *ctxt)
3121{
3122	int rc;
3123	long rel = ctxt->src.val;
3124
3125	ctxt->src.val = (unsigned long)ctxt->_eip;
3126	rc = jmp_rel(ctxt, rel);
3127	if (rc != X86EMUL_CONTINUE)
3128		return rc;
3129	return em_push(ctxt);
3130}
3131
3132static int em_call_far(struct x86_emulate_ctxt *ctxt)
3133{
3134	u16 sel, old_cs;
3135	ulong old_eip;
3136	int rc;
3137	struct desc_struct old_desc, new_desc;
3138	const struct x86_emulate_ops *ops = ctxt->ops;
3139	int cpl = ctxt->ops->cpl(ctxt);
3140	enum x86emul_mode prev_mode = ctxt->mode;
3141
3142	old_eip = ctxt->_eip;
3143	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3144
3145	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3146	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3147				       X86_TRANSFER_CALL_JMP, &new_desc);
3148	if (rc != X86EMUL_CONTINUE)
3149		return rc;
3150
3151	rc = assign_eip_far(ctxt, ctxt->src.val);
3152	if (rc != X86EMUL_CONTINUE)
3153		goto fail;
3154
3155	ctxt->src.val = old_cs;
3156	rc = em_push(ctxt);
3157	if (rc != X86EMUL_CONTINUE)
3158		goto fail;
3159
3160	ctxt->src.val = old_eip;
3161	rc = em_push(ctxt);
3162	/* If we failed, we tainted the memory, but the very least we should
3163	   restore cs */
3164	if (rc != X86EMUL_CONTINUE) {
3165		pr_warn_once("faulting far call emulation tainted memory\n");
3166		goto fail;
3167	}
3168	return rc;
3169fail:
3170	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3171	ctxt->mode = prev_mode;
3172	return rc;
3173
3174}
3175
3176static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3177{
3178	int rc;
3179	unsigned long eip;
3180
3181	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3182	if (rc != X86EMUL_CONTINUE)
3183		return rc;
3184	rc = assign_eip_near(ctxt, eip);
3185	if (rc != X86EMUL_CONTINUE)
3186		return rc;
3187	rsp_increment(ctxt, ctxt->src.val);
3188	return X86EMUL_CONTINUE;
3189}
3190
3191static int em_xchg(struct x86_emulate_ctxt *ctxt)
3192{
3193	/* Write back the register source. */
3194	ctxt->src.val = ctxt->dst.val;
3195	write_register_operand(&ctxt->src);
3196
3197	/* Write back the memory destination with implicit LOCK prefix. */
3198	ctxt->dst.val = ctxt->src.orig_val;
3199	ctxt->lock_prefix = 1;
3200	return X86EMUL_CONTINUE;
3201}
3202
3203static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3204{
3205	ctxt->dst.val = ctxt->src2.val;
3206	return fastop(ctxt, em_imul);
3207}
3208
3209static int em_cwd(struct x86_emulate_ctxt *ctxt)
3210{
3211	ctxt->dst.type = OP_REG;
3212	ctxt->dst.bytes = ctxt->src.bytes;
3213	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3214	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3215
3216	return X86EMUL_CONTINUE;
3217}
3218
3219static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3220{
3221	u64 tsc_aux = 0;
3222
3223	if (!ctxt->ops->guest_has_rdpid(ctxt))
3224		return emulate_ud(ctxt);
3225
3226	ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3227	ctxt->dst.val = tsc_aux;
3228	return X86EMUL_CONTINUE;
3229}
3230
3231static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3232{
3233	u64 tsc = 0;
3234
3235	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3236	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3237	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3238	return X86EMUL_CONTINUE;
3239}
3240
3241static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3242{
3243	u64 pmc;
3244
3245	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3246		return emulate_gp(ctxt, 0);
3247	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3248	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3249	return X86EMUL_CONTINUE;
3250}
3251
3252static int em_mov(struct x86_emulate_ctxt *ctxt)
3253{
3254	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3255	return X86EMUL_CONTINUE;
3256}
3257
 
 
3258static int em_movbe(struct x86_emulate_ctxt *ctxt)
3259{
 
3260	u16 tmp;
3261
3262	if (!ctxt->ops->guest_has_movbe(ctxt))
 
 
 
 
3263		return emulate_ud(ctxt);
3264
3265	switch (ctxt->op_bytes) {
3266	case 2:
3267		/*
3268		 * From MOVBE definition: "...When the operand size is 16 bits,
3269		 * the upper word of the destination register remains unchanged
3270		 * ..."
3271		 *
3272		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3273		 * rules so we have to do the operation almost per hand.
3274		 */
3275		tmp = (u16)ctxt->src.val;
3276		ctxt->dst.val &= ~0xffffUL;
3277		ctxt->dst.val |= (unsigned long)swab16(tmp);
3278		break;
3279	case 4:
3280		ctxt->dst.val = swab32((u32)ctxt->src.val);
3281		break;
3282	case 8:
3283		ctxt->dst.val = swab64(ctxt->src.val);
3284		break;
3285	default:
3286		BUG();
3287	}
3288	return X86EMUL_CONTINUE;
3289}
3290
3291static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3292{
3293	int cr_num = ctxt->modrm_reg;
3294	int r;
3295
3296	if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3297		return emulate_gp(ctxt, 0);
3298
3299	/* Disable writeback. */
3300	ctxt->dst.type = OP_NONE;
3301
3302	if (cr_num == 0) {
3303		/*
3304		 * CR0 write might have updated CR0.PE and/or CR0.PG
3305		 * which can affect the cpu's execution mode.
3306		 */
3307		r = emulator_recalc_and_set_mode(ctxt);
3308		if (r != X86EMUL_CONTINUE)
3309			return r;
3310	}
3311
3312	return X86EMUL_CONTINUE;
3313}
3314
3315static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3316{
3317	unsigned long val;
3318
3319	if (ctxt->mode == X86EMUL_MODE_PROT64)
3320		val = ctxt->src.val & ~0ULL;
3321	else
3322		val = ctxt->src.val & ~0U;
3323
3324	/* #UD condition is already handled. */
3325	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3326		return emulate_gp(ctxt, 0);
3327
3328	/* Disable writeback. */
3329	ctxt->dst.type = OP_NONE;
3330	return X86EMUL_CONTINUE;
3331}
3332
3333static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3334{
3335	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3336	u64 msr_data;
3337	int r;
3338
3339	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3340		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3341	r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
3342
3343	if (r == X86EMUL_PROPAGATE_FAULT)
3344		return emulate_gp(ctxt, 0);
3345
3346	return r;
3347}
3348
3349static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3350{
3351	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3352	u64 msr_data;
3353	int r;
3354
3355	r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3356
3357	if (r == X86EMUL_PROPAGATE_FAULT)
3358		return emulate_gp(ctxt, 0);
3359
3360	if (r == X86EMUL_CONTINUE) {
3361		*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3362		*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3363	}
3364	return r;
3365}
3366
3367static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3368{
3369	if (segment > VCPU_SREG_GS &&
3370	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3371	    ctxt->ops->cpl(ctxt) > 0)
3372		return emulate_gp(ctxt, 0);
3373
3374	ctxt->dst.val = get_segment_selector(ctxt, segment);
3375	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3376		ctxt->dst.bytes = 2;
3377	return X86EMUL_CONTINUE;
3378}
3379
3380static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3381{
3382	if (ctxt->modrm_reg > VCPU_SREG_GS)
3383		return emulate_ud(ctxt);
3384
3385	return em_store_sreg(ctxt, ctxt->modrm_reg);
 
 
 
3386}
3387
3388static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3389{
3390	u16 sel = ctxt->src.val;
3391
3392	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3393		return emulate_ud(ctxt);
3394
3395	if (ctxt->modrm_reg == VCPU_SREG_SS)
3396		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3397
3398	/* Disable writeback. */
3399	ctxt->dst.type = OP_NONE;
3400	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3401}
3402
3403static int em_sldt(struct x86_emulate_ctxt *ctxt)
3404{
3405	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3406}
3407
3408static int em_lldt(struct x86_emulate_ctxt *ctxt)
3409{
3410	u16 sel = ctxt->src.val;
3411
3412	/* Disable writeback. */
3413	ctxt->dst.type = OP_NONE;
3414	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3415}
3416
3417static int em_str(struct x86_emulate_ctxt *ctxt)
3418{
3419	return em_store_sreg(ctxt, VCPU_SREG_TR);
3420}
3421
3422static int em_ltr(struct x86_emulate_ctxt *ctxt)
3423{
3424	u16 sel = ctxt->src.val;
3425
3426	/* Disable writeback. */
3427	ctxt->dst.type = OP_NONE;
3428	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3429}
3430
3431static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3432{
3433	int rc;
3434	ulong linear;
3435
3436	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3437	if (rc == X86EMUL_CONTINUE)
3438		ctxt->ops->invlpg(ctxt, linear);
3439	/* Disable writeback. */
3440	ctxt->dst.type = OP_NONE;
3441	return X86EMUL_CONTINUE;
3442}
3443
3444static int em_clts(struct x86_emulate_ctxt *ctxt)
3445{
3446	ulong cr0;
3447
3448	cr0 = ctxt->ops->get_cr(ctxt, 0);
3449	cr0 &= ~X86_CR0_TS;
3450	ctxt->ops->set_cr(ctxt, 0, cr0);
3451	return X86EMUL_CONTINUE;
3452}
3453
3454static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3455{
3456	int rc = ctxt->ops->fix_hypercall(ctxt);
3457
3458	if (rc != X86EMUL_CONTINUE)
3459		return rc;
3460
3461	/* Let the processor re-execute the fixed hypercall */
3462	ctxt->_eip = ctxt->eip;
3463	/* Disable writeback. */
3464	ctxt->dst.type = OP_NONE;
3465	return X86EMUL_CONTINUE;
3466}
3467
3468static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3469				  void (*get)(struct x86_emulate_ctxt *ctxt,
3470					      struct desc_ptr *ptr))
3471{
3472	struct desc_ptr desc_ptr;
3473
3474	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3475	    ctxt->ops->cpl(ctxt) > 0)
3476		return emulate_gp(ctxt, 0);
3477
3478	if (ctxt->mode == X86EMUL_MODE_PROT64)
3479		ctxt->op_bytes = 8;
3480	get(ctxt, &desc_ptr);
3481	if (ctxt->op_bytes == 2) {
3482		ctxt->op_bytes = 4;
3483		desc_ptr.address &= 0x00ffffff;
3484	}
3485	/* Disable writeback. */
3486	ctxt->dst.type = OP_NONE;
3487	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3488				   &desc_ptr, 2 + ctxt->op_bytes);
3489}
3490
3491static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3492{
3493	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3494}
3495
3496static int em_sidt(struct x86_emulate_ctxt *ctxt)
3497{
3498	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3499}
3500
3501static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3502{
3503	struct desc_ptr desc_ptr;
3504	int rc;
3505
3506	if (ctxt->mode == X86EMUL_MODE_PROT64)
3507		ctxt->op_bytes = 8;
3508	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3509			     &desc_ptr.size, &desc_ptr.address,
3510			     ctxt->op_bytes);
3511	if (rc != X86EMUL_CONTINUE)
3512		return rc;
3513	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3514	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3515		return emulate_gp(ctxt, 0);
3516	if (lgdt)
3517		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3518	else
3519		ctxt->ops->set_idt(ctxt, &desc_ptr);
3520	/* Disable writeback. */
3521	ctxt->dst.type = OP_NONE;
3522	return X86EMUL_CONTINUE;
3523}
3524
3525static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3526{
3527	return em_lgdt_lidt(ctxt, true);
3528}
3529
3530static int em_lidt(struct x86_emulate_ctxt *ctxt)
3531{
3532	return em_lgdt_lidt(ctxt, false);
3533}
3534
3535static int em_smsw(struct x86_emulate_ctxt *ctxt)
3536{
3537	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3538	    ctxt->ops->cpl(ctxt) > 0)
3539		return emulate_gp(ctxt, 0);
3540
3541	if (ctxt->dst.type == OP_MEM)
3542		ctxt->dst.bytes = 2;
3543	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3544	return X86EMUL_CONTINUE;
3545}
3546
3547static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3548{
3549	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3550			  | (ctxt->src.val & 0x0f));
3551	ctxt->dst.type = OP_NONE;
3552	return X86EMUL_CONTINUE;
3553}
3554
3555static int em_loop(struct x86_emulate_ctxt *ctxt)
3556{
3557	int rc = X86EMUL_CONTINUE;
3558
3559	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3560	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3561	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3562		rc = jmp_rel(ctxt, ctxt->src.val);
3563
3564	return rc;
3565}
3566
3567static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3568{
3569	int rc = X86EMUL_CONTINUE;
3570
3571	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3572		rc = jmp_rel(ctxt, ctxt->src.val);
3573
3574	return rc;
3575}
3576
3577static int em_in(struct x86_emulate_ctxt *ctxt)
3578{
3579	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3580			     &ctxt->dst.val))
3581		return X86EMUL_IO_NEEDED;
3582
3583	return X86EMUL_CONTINUE;
3584}
3585
3586static int em_out(struct x86_emulate_ctxt *ctxt)
3587{
3588	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3589				    &ctxt->src.val, 1);
3590	/* Disable writeback. */
3591	ctxt->dst.type = OP_NONE;
3592	return X86EMUL_CONTINUE;
3593}
3594
3595static int em_cli(struct x86_emulate_ctxt *ctxt)
3596{
3597	if (emulator_bad_iopl(ctxt))
3598		return emulate_gp(ctxt, 0);
3599
3600	ctxt->eflags &= ~X86_EFLAGS_IF;
3601	return X86EMUL_CONTINUE;
3602}
3603
3604static int em_sti(struct x86_emulate_ctxt *ctxt)
3605{
3606	if (emulator_bad_iopl(ctxt))
3607		return emulate_gp(ctxt, 0);
3608
3609	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3610	ctxt->eflags |= X86_EFLAGS_IF;
3611	return X86EMUL_CONTINUE;
3612}
3613
3614static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3615{
3616	u32 eax, ebx, ecx, edx;
3617	u64 msr = 0;
3618
3619	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3620	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3621	    ctxt->ops->cpl(ctxt)) {
3622		return emulate_gp(ctxt, 0);
3623	}
3624
3625	eax = reg_read(ctxt, VCPU_REGS_RAX);
3626	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3627	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3628	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3629	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3630	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3631	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3632	return X86EMUL_CONTINUE;
3633}
3634
3635static int em_sahf(struct x86_emulate_ctxt *ctxt)
3636{
3637	u32 flags;
3638
3639	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3640		X86_EFLAGS_SF;
3641	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3642
3643	ctxt->eflags &= ~0xffUL;
3644	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3645	return X86EMUL_CONTINUE;
3646}
3647
3648static int em_lahf(struct x86_emulate_ctxt *ctxt)
3649{
3650	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3651	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3652	return X86EMUL_CONTINUE;
3653}
3654
3655static int em_bswap(struct x86_emulate_ctxt *ctxt)
3656{
3657	switch (ctxt->op_bytes) {
3658#ifdef CONFIG_X86_64
3659	case 8:
3660		asm("bswap %0" : "+r"(ctxt->dst.val));
3661		break;
3662#endif
3663	default:
3664		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3665		break;
3666	}
3667	return X86EMUL_CONTINUE;
3668}
3669
3670static int em_clflush(struct x86_emulate_ctxt *ctxt)
3671{
3672	/* emulating clflush regardless of cpuid */
3673	return X86EMUL_CONTINUE;
3674}
3675
3676static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3677{
3678	/* emulating clflushopt regardless of cpuid */
3679	return X86EMUL_CONTINUE;
3680}
3681
3682static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3683{
3684	ctxt->dst.val = (s32) ctxt->src.val;
3685	return X86EMUL_CONTINUE;
3686}
3687
3688static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3689{
3690	if (!ctxt->ops->guest_has_fxsr(ctxt))
3691		return emulate_ud(ctxt);
3692
3693	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3694		return emulate_nm(ctxt);
3695
3696	/*
3697	 * Don't emulate a case that should never be hit, instead of working
3698	 * around a lack of fxsave64/fxrstor64 on old compilers.
3699	 */
3700	if (ctxt->mode >= X86EMUL_MODE_PROT64)
3701		return X86EMUL_UNHANDLEABLE;
3702
3703	return X86EMUL_CONTINUE;
3704}
3705
3706/*
3707 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3708 * and restore MXCSR.
3709 */
3710static size_t __fxstate_size(int nregs)
3711{
3712	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3713}
3714
3715static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3716{
3717	bool cr4_osfxsr;
3718	if (ctxt->mode == X86EMUL_MODE_PROT64)
3719		return __fxstate_size(16);
3720
3721	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3722	return __fxstate_size(cr4_osfxsr ? 8 : 0);
3723}
3724
3725/*
3726 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3727 *  1) 16 bit mode
3728 *  2) 32 bit mode
3729 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
3730 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3731 *       save and restore
3732 *  3) 64-bit mode with REX.W prefix
3733 *     - like (2), but XMM 8-15 are being saved and restored
3734 *  4) 64-bit mode without REX.W prefix
3735 *     - like (3), but FIP and FDP are 64 bit
3736 *
3737 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3738 * desired result.  (4) is not emulated.
3739 *
3740 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3741 * and FPU DS) should match.
3742 */
3743static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3744{
3745	struct fxregs_state fx_state;
3746	int rc;
3747
3748	rc = check_fxsr(ctxt);
3749	if (rc != X86EMUL_CONTINUE)
3750		return rc;
3751
3752	kvm_fpu_get();
3753
3754	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3755
3756	kvm_fpu_put();
3757
3758	if (rc != X86EMUL_CONTINUE)
3759		return rc;
3760
3761	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3762		                   fxstate_size(ctxt));
3763}
3764
3765/*
3766 * FXRSTOR might restore XMM registers not provided by the guest. Fill
3767 * in the host registers (via FXSAVE) instead, so they won't be modified.
3768 * (preemption has to stay disabled until FXRSTOR).
3769 *
3770 * Use noinline to keep the stack for other functions called by callers small.
3771 */
3772static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3773				 const size_t used_size)
3774{
3775	struct fxregs_state fx_tmp;
3776	int rc;
3777
3778	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3779	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3780	       __fxstate_size(16) - used_size);
3781
3782	return rc;
3783}
3784
3785static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3786{
3787	struct fxregs_state fx_state;
3788	int rc;
3789	size_t size;
3790
3791	rc = check_fxsr(ctxt);
3792	if (rc != X86EMUL_CONTINUE)
3793		return rc;
3794
3795	size = fxstate_size(ctxt);
3796	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3797	if (rc != X86EMUL_CONTINUE)
3798		return rc;
3799
3800	kvm_fpu_get();
 
 
 
 
 
 
3801
3802	if (size < __fxstate_size(16)) {
3803		rc = fxregs_fixup(&fx_state, size);
3804		if (rc != X86EMUL_CONTINUE)
3805			goto out;
3806	}
3807
3808	if (fx_state.mxcsr >> 16) {
3809		rc = emulate_gp(ctxt, 0);
3810		goto out;
3811	}
3812
3813	if (rc == X86EMUL_CONTINUE)
3814		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 
 
 
3815
3816out:
3817	kvm_fpu_put();
3818
3819	return rc;
3820}
 
3821
3822static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3823{
3824	u32 eax, ecx, edx;
 
3825
3826	if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3827		return emulate_ud(ctxt);
 
3828
3829	eax = reg_read(ctxt, VCPU_REGS_RAX);
3830	edx = reg_read(ctxt, VCPU_REGS_RDX);
3831	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3832
3833	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3834		return emulate_gp(ctxt, 0);
 
 
3835
3836	return X86EMUL_CONTINUE;
3837}
3838
3839static bool valid_cr(int nr)
3840{
3841	switch (nr) {
3842	case 0:
3843	case 2 ... 4:
3844	case 8:
3845		return true;
3846	default:
3847		return false;
3848	}
3849}
3850
3851static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3852{
3853	if (!valid_cr(ctxt->modrm_reg))
3854		return emulate_ud(ctxt);
3855
3856	return X86EMUL_CONTINUE;
3857}
3858
3859static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3860{
3861	unsigned long dr7;
3862
3863	ctxt->ops->get_dr(ctxt, 7, &dr7);
3864
3865	return dr7 & DR7_GD;
 
3866}
3867
3868static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3869{
3870	int dr = ctxt->modrm_reg;
3871	u64 cr4;
3872
3873	if (dr > 7)
3874		return emulate_ud(ctxt);
3875
3876	cr4 = ctxt->ops->get_cr(ctxt, 4);
3877	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3878		return emulate_ud(ctxt);
3879
3880	if (check_dr7_gd(ctxt)) {
3881		ulong dr6;
3882
3883		ctxt->ops->get_dr(ctxt, 6, &dr6);
3884		dr6 &= ~DR_TRAP_BITS;
3885		dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3886		ctxt->ops->set_dr(ctxt, 6, dr6);
3887		return emulate_db(ctxt);
3888	}
3889
3890	return X86EMUL_CONTINUE;
3891}
3892
3893static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3894{
3895	u64 new_val = ctxt->src.val64;
3896	int dr = ctxt->modrm_reg;
3897
3898	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3899		return emulate_gp(ctxt, 0);
3900
3901	return check_dr_read(ctxt);
3902}
3903
3904static int check_svme(struct x86_emulate_ctxt *ctxt)
3905{
3906	u64 efer = 0;
3907
3908	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3909
3910	if (!(efer & EFER_SVME))
3911		return emulate_ud(ctxt);
3912
3913	return X86EMUL_CONTINUE;
3914}
3915
3916static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3917{
3918	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3919
3920	/* Valid physical address? */
3921	if (rax & 0xffff000000000000ULL)
3922		return emulate_gp(ctxt, 0);
3923
3924	return check_svme(ctxt);
3925}
3926
3927static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3928{
3929	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3930
3931	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3932		return emulate_gp(ctxt, 0);
3933
3934	return X86EMUL_CONTINUE;
3935}
3936
3937static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3938{
3939	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3940	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3941
3942	/*
3943	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3944	 * in Ring3 when CR4.PCE=0.
3945	 */
3946	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
3947		return X86EMUL_CONTINUE;
3948
3949	/*
3950	 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0.  The CR0.PE
3951	 * check however is unnecessary because CPL is always 0 outside
3952	 * protected mode.
3953	 */
3954	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3955	    ctxt->ops->check_pmc(ctxt, rcx))
3956		return emulate_gp(ctxt, 0);
3957
3958	return X86EMUL_CONTINUE;
3959}
3960
3961static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3962{
3963	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3964	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3965		return emulate_gp(ctxt, 0);
3966
3967	return X86EMUL_CONTINUE;
3968}
3969
3970static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3971{
3972	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3973	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3974		return emulate_gp(ctxt, 0);
3975
3976	return X86EMUL_CONTINUE;
3977}
3978
3979#define D(_y) { .flags = (_y) }
3980#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3981#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3982		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3983#define N    D(NotImpl)
3984#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3985#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3986#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3987#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3988#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3989#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3990#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3991#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3992#define II(_f, _e, _i) \
3993	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3994#define IIP(_f, _e, _i, _p) \
3995	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3996	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3997#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3998
3999#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4000#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4001#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4002#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4003#define I2bvIP(_f, _e, _i, _p) \
4004	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4005
4006#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4007		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4008		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4009
4010static const struct opcode group7_rm0[] = {
4011	N,
4012	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4013	N, N, N, N, N, N,
4014};
4015
4016static const struct opcode group7_rm1[] = {
4017	DI(SrcNone | Priv, monitor),
4018	DI(SrcNone | Priv, mwait),
4019	N, N, N, N, N, N,
4020};
4021
4022static const struct opcode group7_rm2[] = {
4023	N,
4024	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
4025	N, N, N, N, N, N,
4026};
4027
4028static const struct opcode group7_rm3[] = {
4029	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4030	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4031	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4032	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4033	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4034	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4035	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4036	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4037};
4038
4039static const struct opcode group7_rm7[] = {
4040	N,
4041	DIP(SrcNone, rdtscp, check_rdtsc),
4042	N, N, N, N, N, N,
4043};
4044
4045static const struct opcode group1[] = {
4046	F(Lock, em_add),
4047	F(Lock | PageTable, em_or),
4048	F(Lock, em_adc),
4049	F(Lock, em_sbb),
4050	F(Lock | PageTable, em_and),
4051	F(Lock, em_sub),
4052	F(Lock, em_xor),
4053	F(NoWrite, em_cmp),
4054};
4055
4056static const struct opcode group1A[] = {
4057	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4058};
4059
4060static const struct opcode group2[] = {
4061	F(DstMem | ModRM, em_rol),
4062	F(DstMem | ModRM, em_ror),
4063	F(DstMem | ModRM, em_rcl),
4064	F(DstMem | ModRM, em_rcr),
4065	F(DstMem | ModRM, em_shl),
4066	F(DstMem | ModRM, em_shr),
4067	F(DstMem | ModRM, em_shl),
4068	F(DstMem | ModRM, em_sar),
4069};
4070
4071static const struct opcode group3[] = {
4072	F(DstMem | SrcImm | NoWrite, em_test),
4073	F(DstMem | SrcImm | NoWrite, em_test),
4074	F(DstMem | SrcNone | Lock, em_not),
4075	F(DstMem | SrcNone | Lock, em_neg),
4076	F(DstXacc | Src2Mem, em_mul_ex),
4077	F(DstXacc | Src2Mem, em_imul_ex),
4078	F(DstXacc | Src2Mem, em_div_ex),
4079	F(DstXacc | Src2Mem, em_idiv_ex),
4080};
4081
4082static const struct opcode group4[] = {
4083	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4084	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4085	N, N, N, N, N, N,
4086};
4087
4088static const struct opcode group5[] = {
4089	F(DstMem | SrcNone | Lock,		em_inc),
4090	F(DstMem | SrcNone | Lock,		em_dec),
4091	I(SrcMem | NearBranch | IsBranch,       em_call_near_abs),
4092	I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4093	I(SrcMem | NearBranch | IsBranch,       em_jmp_abs),
4094	I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4095	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4096};
4097
4098static const struct opcode group6[] = {
4099	II(Prot | DstMem,	   em_sldt, sldt),
4100	II(Prot | DstMem,	   em_str, str),
4101	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4102	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4103	N, N, N, N,
4104};
4105
4106static const struct group_dual group7 = { {
4107	II(Mov | DstMem,			em_sgdt, sgdt),
4108	II(Mov | DstMem,			em_sidt, sidt),
4109	II(SrcMem | Priv,			em_lgdt, lgdt),
4110	II(SrcMem | Priv,			em_lidt, lidt),
4111	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4112	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4113	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4114}, {
4115	EXT(0, group7_rm0),
4116	EXT(0, group7_rm1),
4117	EXT(0, group7_rm2),
4118	EXT(0, group7_rm3),
4119	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4120	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4121	EXT(0, group7_rm7),
4122} };
4123
4124static const struct opcode group8[] = {
4125	N, N, N, N,
4126	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4127	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4128	F(DstMem | SrcImmByte | Lock,			em_btr),
4129	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4130};
4131
4132/*
4133 * The "memory" destination is actually always a register, since we come
4134 * from the register case of group9.
4135 */
4136static const struct gprefix pfx_0f_c7_7 = {
4137	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4138};
4139
4140
4141static const struct group_dual group9 = { {
4142	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4143}, {
4144	N, N, N, N, N, N, N,
4145	GP(0, &pfx_0f_c7_7),
4146} };
4147
4148static const struct opcode group11[] = {
4149	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4150	X7(D(Undefined)),
4151};
4152
4153static const struct gprefix pfx_0f_ae_7 = {
4154	I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4155};
4156
4157static const struct group_dual group15 = { {
4158	I(ModRM | Aligned16, em_fxsave),
4159	I(ModRM | Aligned16, em_fxrstor),
4160	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4161}, {
4162	N, N, N, N, N, N, N, N,
4163} };
4164
4165static const struct gprefix pfx_0f_6f_0f_7f = {
4166	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4167};
4168
4169static const struct instr_dual instr_dual_0f_2b = {
4170	I(0, em_mov), N
4171};
4172
4173static const struct gprefix pfx_0f_2b = {
4174	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4175};
4176
4177static const struct gprefix pfx_0f_10_0f_11 = {
4178	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4179};
4180
4181static const struct gprefix pfx_0f_28_0f_29 = {
4182	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4183};
4184
4185static const struct gprefix pfx_0f_e7 = {
4186	N, I(Sse, em_mov), N, N,
4187};
4188
4189static const struct escape escape_d9 = { {
4190	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4191}, {
4192	/* 0xC0 - 0xC7 */
4193	N, N, N, N, N, N, N, N,
4194	/* 0xC8 - 0xCF */
4195	N, N, N, N, N, N, N, N,
4196	/* 0xD0 - 0xC7 */
4197	N, N, N, N, N, N, N, N,
4198	/* 0xD8 - 0xDF */
4199	N, N, N, N, N, N, N, N,
4200	/* 0xE0 - 0xE7 */
4201	N, N, N, N, N, N, N, N,
4202	/* 0xE8 - 0xEF */
4203	N, N, N, N, N, N, N, N,
4204	/* 0xF0 - 0xF7 */
4205	N, N, N, N, N, N, N, N,
4206	/* 0xF8 - 0xFF */
4207	N, N, N, N, N, N, N, N,
4208} };
4209
4210static const struct escape escape_db = { {
4211	N, N, N, N, N, N, N, N,
4212}, {
4213	/* 0xC0 - 0xC7 */
4214	N, N, N, N, N, N, N, N,
4215	/* 0xC8 - 0xCF */
4216	N, N, N, N, N, N, N, N,
4217	/* 0xD0 - 0xC7 */
4218	N, N, N, N, N, N, N, N,
4219	/* 0xD8 - 0xDF */
4220	N, N, N, N, N, N, N, N,
4221	/* 0xE0 - 0xE7 */
4222	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4223	/* 0xE8 - 0xEF */
4224	N, N, N, N, N, N, N, N,
4225	/* 0xF0 - 0xF7 */
4226	N, N, N, N, N, N, N, N,
4227	/* 0xF8 - 0xFF */
4228	N, N, N, N, N, N, N, N,
4229} };
4230
4231static const struct escape escape_dd = { {
4232	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4233}, {
4234	/* 0xC0 - 0xC7 */
4235	N, N, N, N, N, N, N, N,
4236	/* 0xC8 - 0xCF */
4237	N, N, N, N, N, N, N, N,
4238	/* 0xD0 - 0xC7 */
4239	N, N, N, N, N, N, N, N,
4240	/* 0xD8 - 0xDF */
4241	N, N, N, N, N, N, N, N,
4242	/* 0xE0 - 0xE7 */
4243	N, N, N, N, N, N, N, N,
4244	/* 0xE8 - 0xEF */
4245	N, N, N, N, N, N, N, N,
4246	/* 0xF0 - 0xF7 */
4247	N, N, N, N, N, N, N, N,
4248	/* 0xF8 - 0xFF */
4249	N, N, N, N, N, N, N, N,
4250} };
4251
4252static const struct instr_dual instr_dual_0f_c3 = {
4253	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4254};
4255
4256static const struct mode_dual mode_dual_63 = {
4257	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4258};
4259
4260static const struct instr_dual instr_dual_8d = {
4261	D(DstReg | SrcMem | ModRM | NoAccess), N
4262};
4263
4264static const struct opcode opcode_table[256] = {
4265	/* 0x00 - 0x07 */
4266	F6ALU(Lock, em_add),
4267	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4268	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4269	/* 0x08 - 0x0F */
4270	F6ALU(Lock | PageTable, em_or),
4271	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4272	N,
4273	/* 0x10 - 0x17 */
4274	F6ALU(Lock, em_adc),
4275	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4276	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4277	/* 0x18 - 0x1F */
4278	F6ALU(Lock, em_sbb),
4279	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4280	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4281	/* 0x20 - 0x27 */
4282	F6ALU(Lock | PageTable, em_and), N, N,
4283	/* 0x28 - 0x2F */
4284	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4285	/* 0x30 - 0x37 */
4286	F6ALU(Lock, em_xor), N, N,
4287	/* 0x38 - 0x3F */
4288	F6ALU(NoWrite, em_cmp), N, N,
4289	/* 0x40 - 0x4F */
4290	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4291	/* 0x50 - 0x57 */
4292	X8(I(SrcReg | Stack, em_push)),
4293	/* 0x58 - 0x5F */
4294	X8(I(DstReg | Stack, em_pop)),
4295	/* 0x60 - 0x67 */
4296	I(ImplicitOps | Stack | No64, em_pusha),
4297	I(ImplicitOps | Stack | No64, em_popa),
4298	N, MD(ModRM, &mode_dual_63),
4299	N, N, N, N,
4300	/* 0x68 - 0x6F */
4301	I(SrcImm | Mov | Stack, em_push),
4302	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4303	I(SrcImmByte | Mov | Stack, em_push),
4304	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4305	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4306	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4307	/* 0x70 - 0x7F */
4308	X16(D(SrcImmByte | NearBranch | IsBranch)),
4309	/* 0x80 - 0x87 */
4310	G(ByteOp | DstMem | SrcImm, group1),
4311	G(DstMem | SrcImm, group1),
4312	G(ByteOp | DstMem | SrcImm | No64, group1),
4313	G(DstMem | SrcImmByte, group1),
4314	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4315	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4316	/* 0x88 - 0x8F */
4317	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4318	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4319	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4320	ID(0, &instr_dual_8d),
4321	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4322	G(0, group1A),
4323	/* 0x90 - 0x97 */
4324	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4325	/* 0x98 - 0x9F */
4326	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4327	I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4328	II(ImplicitOps | Stack, em_pushf, pushf),
4329	II(ImplicitOps | Stack, em_popf, popf),
4330	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4331	/* 0xA0 - 0xA7 */
4332	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4333	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4334	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4335	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4336	/* 0xA8 - 0xAF */
4337	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4338	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4339	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4340	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4341	/* 0xB0 - 0xB7 */
4342	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4343	/* 0xB8 - 0xBF */
4344	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4345	/* 0xC0 - 0xC7 */
4346	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4347	I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4348	I(ImplicitOps | NearBranch | IsBranch, em_ret),
4349	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4350	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4351	G(ByteOp, group11), G(0, group11),
4352	/* 0xC8 - 0xCF */
4353	I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4354	I(Stack | IsBranch, em_leave),
4355	I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4356	I(ImplicitOps | IsBranch, em_ret_far),
4357	D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4358	D(ImplicitOps | No64 | IsBranch),
4359	II(ImplicitOps | IsBranch, em_iret, iret),
4360	/* 0xD0 - 0xD7 */
4361	G(Src2One | ByteOp, group2), G(Src2One, group2),
4362	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4363	I(DstAcc | SrcImmUByte | No64, em_aam),
4364	I(DstAcc | SrcImmUByte | No64, em_aad),
4365	F(DstAcc | ByteOp | No64, em_salc),
4366	I(DstAcc | SrcXLat | ByteOp, em_mov),
4367	/* 0xD8 - 0xDF */
4368	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4369	/* 0xE0 - 0xE7 */
4370	X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4371	I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4372	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4373	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4374	/* 0xE8 - 0xEF */
4375	I(SrcImm | NearBranch | IsBranch, em_call),
4376	D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4377	I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4378	D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4379	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4380	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4381	/* 0xF0 - 0xF7 */
4382	N, DI(ImplicitOps, icebp), N, N,
4383	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4384	G(ByteOp, group3), G(0, group3),
4385	/* 0xF8 - 0xFF */
4386	D(ImplicitOps), D(ImplicitOps),
4387	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4388	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4389};
4390
4391static const struct opcode twobyte_table[256] = {
4392	/* 0x00 - 0x0F */
4393	G(0, group6), GD(0, &group7), N, N,
4394	N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4395	II(ImplicitOps | Priv, em_clts, clts), N,
4396	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4397	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4398	/* 0x10 - 0x1F */
4399	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4400	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4401	N, N, N, N, N, N,
4402	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4403	D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4404	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4405	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4406	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4407	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4408	/* 0x20 - 0x2F */
4409	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4410	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4411	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4412						check_cr_access),
4413	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4414						check_dr_write),
4415	N, N, N, N,
4416	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4417	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4418	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4419	N, N, N, N,
4420	/* 0x30 - 0x3F */
4421	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4422	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4423	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4424	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4425	I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4426	I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4427	N, N,
4428	N, N, N, N, N, N, N, N,
4429	/* 0x40 - 0x4F */
4430	X16(D(DstReg | SrcMem | ModRM)),
4431	/* 0x50 - 0x5F */
4432	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4433	/* 0x60 - 0x6F */
4434	N, N, N, N,
4435	N, N, N, N,
4436	N, N, N, N,
4437	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4438	/* 0x70 - 0x7F */
4439	N, N, N, N,
4440	N, N, N, N,
4441	N, N, N, N,
4442	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4443	/* 0x80 - 0x8F */
4444	X16(D(SrcImm | NearBranch | IsBranch)),
4445	/* 0x90 - 0x9F */
4446	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4447	/* 0xA0 - 0xA7 */
4448	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4449	II(ImplicitOps, em_cpuid, cpuid),
4450	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4451	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4452	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4453	/* 0xA8 - 0xAF */
4454	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4455	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4456	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4457	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4458	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4459	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4460	/* 0xB0 - 0xB7 */
4461	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4462	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4463	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4464	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4465	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4466	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4467	/* 0xB8 - 0xBF */
4468	N, N,
4469	G(BitOp, group8),
4470	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4471	I(DstReg | SrcMem | ModRM, em_bsf_c),
4472	I(DstReg | SrcMem | ModRM, em_bsr_c),
4473	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4474	/* 0xC0 - 0xC7 */
4475	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4476	N, ID(0, &instr_dual_0f_c3),
4477	N, N, N, GD(0, &group9),
4478	/* 0xC8 - 0xCF */
4479	X8(I(DstReg, em_bswap)),
4480	/* 0xD0 - 0xDF */
4481	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4482	/* 0xE0 - 0xEF */
4483	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4484	N, N, N, N, N, N, N, N,
4485	/* 0xF0 - 0xFF */
4486	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4487};
4488
4489static const struct instr_dual instr_dual_0f_38_f0 = {
4490	I(DstReg | SrcMem | Mov, em_movbe), N
4491};
4492
4493static const struct instr_dual instr_dual_0f_38_f1 = {
4494	I(DstMem | SrcReg | Mov, em_movbe), N
4495};
4496
4497static const struct gprefix three_byte_0f_38_f0 = {
4498	ID(0, &instr_dual_0f_38_f0), N, N, N
4499};
4500
4501static const struct gprefix three_byte_0f_38_f1 = {
4502	ID(0, &instr_dual_0f_38_f1), N, N, N
4503};
4504
4505/*
4506 * Insns below are selected by the prefix which indexed by the third opcode
4507 * byte.
4508 */
4509static const struct opcode opcode_map_0f_38[256] = {
4510	/* 0x00 - 0x7f */
4511	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4512	/* 0x80 - 0xef */
4513	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4514	/* 0xf0 - 0xf1 */
4515	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4516	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4517	/* 0xf2 - 0xff */
4518	N, N, X4(N), X8(N)
4519};
4520
4521#undef D
4522#undef N
4523#undef G
4524#undef GD
4525#undef I
4526#undef GP
4527#undef EXT
4528#undef MD
4529#undef ID
4530
4531#undef D2bv
4532#undef D2bvIP
4533#undef I2bv
4534#undef I2bvIP
4535#undef I6ALU
4536
4537static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4538{
4539	unsigned size;
4540
4541	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4542	if (size == 8)
4543		size = 4;
4544	return size;
4545}
4546
4547static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4548		      unsigned size, bool sign_extension)
4549{
4550	int rc = X86EMUL_CONTINUE;
4551
4552	op->type = OP_IMM;
4553	op->bytes = size;
4554	op->addr.mem.ea = ctxt->_eip;
4555	/* NB. Immediates are sign-extended as necessary. */
4556	switch (op->bytes) {
4557	case 1:
4558		op->val = insn_fetch(s8, ctxt);
4559		break;
4560	case 2:
4561		op->val = insn_fetch(s16, ctxt);
4562		break;
4563	case 4:
4564		op->val = insn_fetch(s32, ctxt);
4565		break;
4566	case 8:
4567		op->val = insn_fetch(s64, ctxt);
4568		break;
4569	}
4570	if (!sign_extension) {
4571		switch (op->bytes) {
4572		case 1:
4573			op->val &= 0xff;
4574			break;
4575		case 2:
4576			op->val &= 0xffff;
4577			break;
4578		case 4:
4579			op->val &= 0xffffffff;
4580			break;
4581		}
4582	}
4583done:
4584	return rc;
4585}
4586
4587static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4588			  unsigned d)
4589{
4590	int rc = X86EMUL_CONTINUE;
4591
4592	switch (d) {
4593	case OpReg:
4594		decode_register_operand(ctxt, op);
4595		break;
4596	case OpImmUByte:
4597		rc = decode_imm(ctxt, op, 1, false);
4598		break;
4599	case OpMem:
4600		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4601	mem_common:
4602		*op = ctxt->memop;
4603		ctxt->memopp = op;
4604		if (ctxt->d & BitOp)
4605			fetch_bit_operand(ctxt);
4606		op->orig_val = op->val;
4607		break;
4608	case OpMem64:
4609		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4610		goto mem_common;
4611	case OpAcc:
4612		op->type = OP_REG;
4613		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4614		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4615		fetch_register_operand(op);
4616		op->orig_val = op->val;
4617		break;
4618	case OpAccLo:
4619		op->type = OP_REG;
4620		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4621		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4622		fetch_register_operand(op);
4623		op->orig_val = op->val;
4624		break;
4625	case OpAccHi:
4626		if (ctxt->d & ByteOp) {
4627			op->type = OP_NONE;
4628			break;
4629		}
4630		op->type = OP_REG;
4631		op->bytes = ctxt->op_bytes;
4632		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4633		fetch_register_operand(op);
4634		op->orig_val = op->val;
4635		break;
4636	case OpDI:
4637		op->type = OP_MEM;
4638		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4639		op->addr.mem.ea =
4640			register_address(ctxt, VCPU_REGS_RDI);
4641		op->addr.mem.seg = VCPU_SREG_ES;
4642		op->val = 0;
4643		op->count = 1;
4644		break;
4645	case OpDX:
4646		op->type = OP_REG;
4647		op->bytes = 2;
4648		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4649		fetch_register_operand(op);
4650		break;
4651	case OpCL:
4652		op->type = OP_IMM;
4653		op->bytes = 1;
4654		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4655		break;
4656	case OpImmByte:
4657		rc = decode_imm(ctxt, op, 1, true);
4658		break;
4659	case OpOne:
4660		op->type = OP_IMM;
4661		op->bytes = 1;
4662		op->val = 1;
4663		break;
4664	case OpImm:
4665		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4666		break;
4667	case OpImm64:
4668		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4669		break;
4670	case OpMem8:
4671		ctxt->memop.bytes = 1;
4672		if (ctxt->memop.type == OP_REG) {
4673			ctxt->memop.addr.reg = decode_register(ctxt,
4674					ctxt->modrm_rm, true);
4675			fetch_register_operand(&ctxt->memop);
4676		}
4677		goto mem_common;
4678	case OpMem16:
4679		ctxt->memop.bytes = 2;
4680		goto mem_common;
4681	case OpMem32:
4682		ctxt->memop.bytes = 4;
4683		goto mem_common;
4684	case OpImmU16:
4685		rc = decode_imm(ctxt, op, 2, false);
4686		break;
4687	case OpImmU:
4688		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4689		break;
4690	case OpSI:
4691		op->type = OP_MEM;
4692		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4693		op->addr.mem.ea =
4694			register_address(ctxt, VCPU_REGS_RSI);
4695		op->addr.mem.seg = ctxt->seg_override;
4696		op->val = 0;
4697		op->count = 1;
4698		break;
4699	case OpXLat:
4700		op->type = OP_MEM;
4701		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4702		op->addr.mem.ea =
4703			address_mask(ctxt,
4704				reg_read(ctxt, VCPU_REGS_RBX) +
4705				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4706		op->addr.mem.seg = ctxt->seg_override;
4707		op->val = 0;
4708		break;
4709	case OpImmFAddr:
4710		op->type = OP_IMM;
4711		op->addr.mem.ea = ctxt->_eip;
4712		op->bytes = ctxt->op_bytes + 2;
4713		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4714		break;
4715	case OpMemFAddr:
4716		ctxt->memop.bytes = ctxt->op_bytes + 2;
4717		goto mem_common;
4718	case OpES:
4719		op->type = OP_IMM;
4720		op->val = VCPU_SREG_ES;
4721		break;
4722	case OpCS:
4723		op->type = OP_IMM;
4724		op->val = VCPU_SREG_CS;
4725		break;
4726	case OpSS:
4727		op->type = OP_IMM;
4728		op->val = VCPU_SREG_SS;
4729		break;
4730	case OpDS:
4731		op->type = OP_IMM;
4732		op->val = VCPU_SREG_DS;
4733		break;
4734	case OpFS:
4735		op->type = OP_IMM;
4736		op->val = VCPU_SREG_FS;
4737		break;
4738	case OpGS:
4739		op->type = OP_IMM;
4740		op->val = VCPU_SREG_GS;
4741		break;
4742	case OpImplicit:
4743		/* Special instructions do their own operand decoding. */
4744	default:
4745		op->type = OP_NONE; /* Disable writeback. */
4746		break;
4747	}
4748
4749done:
4750	return rc;
4751}
4752
4753int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4754{
4755	int rc = X86EMUL_CONTINUE;
4756	int mode = ctxt->mode;
4757	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4758	bool op_prefix = false;
4759	bool has_seg_override = false;
4760	struct opcode opcode;
4761	u16 dummy;
4762	struct desc_struct desc;
4763
4764	ctxt->memop.type = OP_NONE;
4765	ctxt->memopp = NULL;
4766	ctxt->_eip = ctxt->eip;
4767	ctxt->fetch.ptr = ctxt->fetch.data;
4768	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4769	ctxt->opcode_len = 1;
4770	ctxt->intercept = x86_intercept_none;
4771	if (insn_len > 0)
4772		memcpy(ctxt->fetch.data, insn, insn_len);
4773	else {
4774		rc = __do_insn_fetch_bytes(ctxt, 1);
4775		if (rc != X86EMUL_CONTINUE)
4776			goto done;
4777	}
4778
4779	switch (mode) {
4780	case X86EMUL_MODE_REAL:
4781	case X86EMUL_MODE_VM86:
4782		def_op_bytes = def_ad_bytes = 2;
4783		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4784		if (desc.d)
4785			def_op_bytes = def_ad_bytes = 4;
4786		break;
4787	case X86EMUL_MODE_PROT16:
4788		def_op_bytes = def_ad_bytes = 2;
4789		break;
4790	case X86EMUL_MODE_PROT32:
4791		def_op_bytes = def_ad_bytes = 4;
4792		break;
4793#ifdef CONFIG_X86_64
4794	case X86EMUL_MODE_PROT64:
4795		def_op_bytes = 4;
4796		def_ad_bytes = 8;
4797		break;
4798#endif
4799	default:
4800		return EMULATION_FAILED;
4801	}
4802
4803	ctxt->op_bytes = def_op_bytes;
4804	ctxt->ad_bytes = def_ad_bytes;
4805
4806	/* Legacy prefixes. */
4807	for (;;) {
4808		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4809		case 0x66:	/* operand-size override */
4810			op_prefix = true;
4811			/* switch between 2/4 bytes */
4812			ctxt->op_bytes = def_op_bytes ^ 6;
4813			break;
4814		case 0x67:	/* address-size override */
4815			if (mode == X86EMUL_MODE_PROT64)
4816				/* switch between 4/8 bytes */
4817				ctxt->ad_bytes = def_ad_bytes ^ 12;
4818			else
4819				/* switch between 2/4 bytes */
4820				ctxt->ad_bytes = def_ad_bytes ^ 6;
4821			break;
4822		case 0x26:	/* ES override */
4823			has_seg_override = true;
4824			ctxt->seg_override = VCPU_SREG_ES;
4825			break;
4826		case 0x2e:	/* CS override */
4827			has_seg_override = true;
4828			ctxt->seg_override = VCPU_SREG_CS;
4829			break;
4830		case 0x36:	/* SS override */
4831			has_seg_override = true;
4832			ctxt->seg_override = VCPU_SREG_SS;
4833			break;
4834		case 0x3e:	/* DS override */
4835			has_seg_override = true;
4836			ctxt->seg_override = VCPU_SREG_DS;
4837			break;
4838		case 0x64:	/* FS override */
4839			has_seg_override = true;
4840			ctxt->seg_override = VCPU_SREG_FS;
4841			break;
4842		case 0x65:	/* GS override */
4843			has_seg_override = true;
4844			ctxt->seg_override = VCPU_SREG_GS;
4845			break;
4846		case 0x40 ... 0x4f: /* REX */
4847			if (mode != X86EMUL_MODE_PROT64)
4848				goto done_prefixes;
4849			ctxt->rex_prefix = ctxt->b;
4850			continue;
4851		case 0xf0:	/* LOCK */
4852			ctxt->lock_prefix = 1;
4853			break;
4854		case 0xf2:	/* REPNE/REPNZ */
4855		case 0xf3:	/* REP/REPE/REPZ */
4856			ctxt->rep_prefix = ctxt->b;
4857			break;
4858		default:
4859			goto done_prefixes;
4860		}
4861
4862		/* Any legacy prefix after a REX prefix nullifies its effect. */
4863
4864		ctxt->rex_prefix = 0;
4865	}
4866
4867done_prefixes:
4868
4869	/* REX prefix. */
4870	if (ctxt->rex_prefix & 8)
4871		ctxt->op_bytes = 8;	/* REX.W */
4872
4873	/* Opcode byte(s). */
4874	opcode = opcode_table[ctxt->b];
4875	/* Two-byte opcode? */
4876	if (ctxt->b == 0x0f) {
4877		ctxt->opcode_len = 2;
4878		ctxt->b = insn_fetch(u8, ctxt);
4879		opcode = twobyte_table[ctxt->b];
4880
4881		/* 0F_38 opcode map */
4882		if (ctxt->b == 0x38) {
4883			ctxt->opcode_len = 3;
4884			ctxt->b = insn_fetch(u8, ctxt);
4885			opcode = opcode_map_0f_38[ctxt->b];
4886		}
4887	}
4888	ctxt->d = opcode.flags;
4889
4890	if (ctxt->d & ModRM)
4891		ctxt->modrm = insn_fetch(u8, ctxt);
4892
4893	/* vex-prefix instructions are not implemented */
4894	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4895	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4896		ctxt->d = NotImpl;
4897	}
4898
4899	while (ctxt->d & GroupMask) {
4900		switch (ctxt->d & GroupMask) {
4901		case Group:
4902			goffset = (ctxt->modrm >> 3) & 7;
4903			opcode = opcode.u.group[goffset];
4904			break;
4905		case GroupDual:
4906			goffset = (ctxt->modrm >> 3) & 7;
4907			if ((ctxt->modrm >> 6) == 3)
4908				opcode = opcode.u.gdual->mod3[goffset];
4909			else
4910				opcode = opcode.u.gdual->mod012[goffset];
4911			break;
4912		case RMExt:
4913			goffset = ctxt->modrm & 7;
4914			opcode = opcode.u.group[goffset];
4915			break;
4916		case Prefix:
4917			if (ctxt->rep_prefix && op_prefix)
4918				return EMULATION_FAILED;
4919			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4920			switch (simd_prefix) {
4921			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4922			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4923			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4924			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4925			}
4926			break;
4927		case Escape:
4928			if (ctxt->modrm > 0xbf) {
4929				size_t size = ARRAY_SIZE(opcode.u.esc->high);
4930				u32 index = array_index_nospec(
4931					ctxt->modrm - 0xc0, size);
4932
4933				opcode = opcode.u.esc->high[index];
4934			} else {
4935				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4936			}
4937			break;
4938		case InstrDual:
4939			if ((ctxt->modrm >> 6) == 3)
4940				opcode = opcode.u.idual->mod3;
4941			else
4942				opcode = opcode.u.idual->mod012;
4943			break;
4944		case ModeDual:
4945			if (ctxt->mode == X86EMUL_MODE_PROT64)
4946				opcode = opcode.u.mdual->mode64;
4947			else
4948				opcode = opcode.u.mdual->mode32;
4949			break;
4950		default:
4951			return EMULATION_FAILED;
4952		}
4953
4954		ctxt->d &= ~(u64)GroupMask;
4955		ctxt->d |= opcode.flags;
4956	}
4957
4958	ctxt->is_branch = opcode.flags & IsBranch;
4959
4960	/* Unrecognised? */
4961	if (ctxt->d == 0)
4962		return EMULATION_FAILED;
4963
4964	ctxt->execute = opcode.u.execute;
4965
4966	if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4967	    likely(!(ctxt->d & EmulateOnUD)))
4968		return EMULATION_FAILED;
4969
4970	if (unlikely(ctxt->d &
4971	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4972	     No16))) {
4973		/*
4974		 * These are copied unconditionally here, and checked unconditionally
4975		 * in x86_emulate_insn.
4976		 */
4977		ctxt->check_perm = opcode.check_perm;
4978		ctxt->intercept = opcode.intercept;
4979
4980		if (ctxt->d & NotImpl)
4981			return EMULATION_FAILED;
4982
4983		if (mode == X86EMUL_MODE_PROT64) {
4984			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4985				ctxt->op_bytes = 8;
4986			else if (ctxt->d & NearBranch)
4987				ctxt->op_bytes = 8;
4988		}
4989
4990		if (ctxt->d & Op3264) {
4991			if (mode == X86EMUL_MODE_PROT64)
4992				ctxt->op_bytes = 8;
4993			else
4994				ctxt->op_bytes = 4;
4995		}
4996
4997		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4998			ctxt->op_bytes = 4;
4999
5000		if (ctxt->d & Sse)
5001			ctxt->op_bytes = 16;
5002		else if (ctxt->d & Mmx)
5003			ctxt->op_bytes = 8;
5004	}
5005
5006	/* ModRM and SIB bytes. */
5007	if (ctxt->d & ModRM) {
5008		rc = decode_modrm(ctxt, &ctxt->memop);
5009		if (!has_seg_override) {
5010			has_seg_override = true;
5011			ctxt->seg_override = ctxt->modrm_seg;
5012		}
5013	} else if (ctxt->d & MemAbs)
5014		rc = decode_abs(ctxt, &ctxt->memop);
5015	if (rc != X86EMUL_CONTINUE)
5016		goto done;
5017
5018	if (!has_seg_override)
5019		ctxt->seg_override = VCPU_SREG_DS;
5020
5021	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5022
5023	/*
5024	 * Decode and fetch the source operand: register, memory
5025	 * or immediate.
5026	 */
5027	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5028	if (rc != X86EMUL_CONTINUE)
5029		goto done;
5030
5031	/*
5032	 * Decode and fetch the second source operand: register, memory
5033	 * or immediate.
5034	 */
5035	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5036	if (rc != X86EMUL_CONTINUE)
5037		goto done;
5038
5039	/* Decode and fetch the destination operand: register or memory. */
5040	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5041
5042	if (ctxt->rip_relative && likely(ctxt->memopp))
5043		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5044					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5045
5046done:
5047	if (rc == X86EMUL_PROPAGATE_FAULT)
5048		ctxt->have_exception = true;
5049	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5050}
5051
5052bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5053{
5054	return ctxt->d & PageTable;
5055}
5056
5057static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5058{
5059	/* The second termination condition only applies for REPE
5060	 * and REPNE. Test if the repeat string operation prefix is
5061	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5062	 * corresponding termination condition according to:
5063	 * 	- if REPE/REPZ and ZF = 0 then done
5064	 * 	- if REPNE/REPNZ and ZF = 1 then done
5065	 */
5066	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5067	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5068	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5069		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5070		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5071		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5072		return true;
5073
5074	return false;
5075}
5076
5077static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5078{
5079	int rc;
5080
5081	kvm_fpu_get();
5082	rc = asm_safe("fwait");
5083	kvm_fpu_put();
 
 
 
 
 
 
 
 
5084
5085	if (unlikely(rc != X86EMUL_CONTINUE))
5086		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5087
5088	return X86EMUL_CONTINUE;
5089}
5090
5091static void fetch_possible_mmx_operand(struct operand *op)
 
5092{
5093	if (op->type == OP_MM)
5094		kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5095}
5096
5097static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5098{
 
5099	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5100
5101	if (!(ctxt->d & ByteOp))
5102		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5103
5104	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5105	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5106	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5107	    : "c"(ctxt->src2.val));
5108
5109	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5110	if (!fop) /* exception is returned in fop variable */
5111		return emulate_de(ctxt);
5112	return X86EMUL_CONTINUE;
5113}
5114
5115void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5116{
5117	/* Clear fields that are set conditionally but read without a guard. */
5118	ctxt->rip_relative = false;
5119	ctxt->rex_prefix = 0;
5120	ctxt->lock_prefix = 0;
5121	ctxt->rep_prefix = 0;
5122	ctxt->regs_valid = 0;
5123	ctxt->regs_dirty = 0;
5124
5125	ctxt->io_read.pos = 0;
5126	ctxt->io_read.end = 0;
5127	ctxt->mem_read.end = 0;
5128}
5129
5130int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5131{
5132	const struct x86_emulate_ops *ops = ctxt->ops;
5133	int rc = X86EMUL_CONTINUE;
5134	int saved_dst_type = ctxt->dst.type;
5135	unsigned emul_flags;
5136
5137	ctxt->mem_read.pos = 0;
5138
5139	/* LOCK prefix is allowed only with some instructions */
5140	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5141		rc = emulate_ud(ctxt);
5142		goto done;
5143	}
5144
5145	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5146		rc = emulate_ud(ctxt);
5147		goto done;
5148	}
5149
5150	emul_flags = ctxt->ops->get_hflags(ctxt);
5151	if (unlikely(ctxt->d &
5152		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5153		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5154				(ctxt->d & Undefined)) {
5155			rc = emulate_ud(ctxt);
5156			goto done;
5157		}
5158
5159		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5160		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5161			rc = emulate_ud(ctxt);
5162			goto done;
5163		}
5164
5165		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5166			rc = emulate_nm(ctxt);
5167			goto done;
5168		}
5169
5170		if (ctxt->d & Mmx) {
5171			rc = flush_pending_x87_faults(ctxt);
5172			if (rc != X86EMUL_CONTINUE)
5173				goto done;
5174			/*
5175			 * Now that we know the fpu is exception safe, we can fetch
5176			 * operands from it.
5177			 */
5178			fetch_possible_mmx_operand(&ctxt->src);
5179			fetch_possible_mmx_operand(&ctxt->src2);
5180			if (!(ctxt->d & Mov))
5181				fetch_possible_mmx_operand(&ctxt->dst);
5182		}
5183
5184		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5185			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5186						      X86_ICPT_PRE_EXCEPT);
5187			if (rc != X86EMUL_CONTINUE)
5188				goto done;
5189		}
5190
5191		/* Instruction can only be executed in protected mode */
5192		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5193			rc = emulate_ud(ctxt);
5194			goto done;
5195		}
5196
5197		/* Privileged instruction can be executed only in CPL=0 */
5198		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5199			if (ctxt->d & PrivUD)
5200				rc = emulate_ud(ctxt);
5201			else
5202				rc = emulate_gp(ctxt, 0);
5203			goto done;
5204		}
5205
5206		/* Do instruction specific permission checks */
5207		if (ctxt->d & CheckPerm) {
5208			rc = ctxt->check_perm(ctxt);
5209			if (rc != X86EMUL_CONTINUE)
5210				goto done;
5211		}
5212
5213		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5214			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5215						      X86_ICPT_POST_EXCEPT);
5216			if (rc != X86EMUL_CONTINUE)
5217				goto done;
5218		}
5219
5220		if (ctxt->rep_prefix && (ctxt->d & String)) {
5221			/* All REP prefixes have the same first termination condition */
5222			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5223				string_registers_quirk(ctxt);
5224				ctxt->eip = ctxt->_eip;
5225				ctxt->eflags &= ~X86_EFLAGS_RF;
5226				goto done;
5227			}
5228		}
5229	}
5230
5231	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5232		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5233				    ctxt->src.valptr, ctxt->src.bytes);
5234		if (rc != X86EMUL_CONTINUE)
5235			goto done;
5236		ctxt->src.orig_val64 = ctxt->src.val64;
5237	}
5238
5239	if (ctxt->src2.type == OP_MEM) {
5240		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5241				    &ctxt->src2.val, ctxt->src2.bytes);
5242		if (rc != X86EMUL_CONTINUE)
5243			goto done;
5244	}
5245
5246	if ((ctxt->d & DstMask) == ImplicitOps)
5247		goto special_insn;
5248
5249
5250	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5251		/* optimisation - avoid slow emulated read if Mov */
5252		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5253				   &ctxt->dst.val, ctxt->dst.bytes);
5254		if (rc != X86EMUL_CONTINUE) {
5255			if (!(ctxt->d & NoWrite) &&
5256			    rc == X86EMUL_PROPAGATE_FAULT &&
5257			    ctxt->exception.vector == PF_VECTOR)
5258				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5259			goto done;
5260		}
5261	}
5262	/* Copy full 64-bit value for CMPXCHG8B.  */
5263	ctxt->dst.orig_val64 = ctxt->dst.val64;
5264
5265special_insn:
5266
5267	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5268		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5269					      X86_ICPT_POST_MEMACCESS);
5270		if (rc != X86EMUL_CONTINUE)
5271			goto done;
5272	}
5273
5274	if (ctxt->rep_prefix && (ctxt->d & String))
5275		ctxt->eflags |= X86_EFLAGS_RF;
5276	else
5277		ctxt->eflags &= ~X86_EFLAGS_RF;
5278
5279	if (ctxt->execute) {
5280		if (ctxt->d & Fastop)
5281			rc = fastop(ctxt, ctxt->fop);
5282		else
5283			rc = ctxt->execute(ctxt);
 
 
 
 
5284		if (rc != X86EMUL_CONTINUE)
5285			goto done;
5286		goto writeback;
5287	}
5288
5289	if (ctxt->opcode_len == 2)
5290		goto twobyte_insn;
5291	else if (ctxt->opcode_len == 3)
5292		goto threebyte_insn;
5293
5294	switch (ctxt->b) {
5295	case 0x70 ... 0x7f: /* jcc (short) */
5296		if (test_cc(ctxt->b, ctxt->eflags))
5297			rc = jmp_rel(ctxt, ctxt->src.val);
5298		break;
5299	case 0x8d: /* lea r16/r32, m */
5300		ctxt->dst.val = ctxt->src.addr.mem.ea;
5301		break;
5302	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5303		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5304			ctxt->dst.type = OP_NONE;
5305		else
5306			rc = em_xchg(ctxt);
5307		break;
5308	case 0x98: /* cbw/cwde/cdqe */
5309		switch (ctxt->op_bytes) {
5310		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5311		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5312		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5313		}
5314		break;
5315	case 0xcc:		/* int3 */
5316		rc = emulate_int(ctxt, 3);
5317		break;
5318	case 0xcd:		/* int n */
5319		rc = emulate_int(ctxt, ctxt->src.val);
5320		break;
5321	case 0xce:		/* into */
5322		if (ctxt->eflags & X86_EFLAGS_OF)
5323			rc = emulate_int(ctxt, 4);
5324		break;
5325	case 0xe9: /* jmp rel */
5326	case 0xeb: /* jmp rel short */
5327		rc = jmp_rel(ctxt, ctxt->src.val);
5328		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5329		break;
5330	case 0xf4:              /* hlt */
5331		ctxt->ops->halt(ctxt);
5332		break;
5333	case 0xf5:	/* cmc */
5334		/* complement carry flag from eflags reg */
5335		ctxt->eflags ^= X86_EFLAGS_CF;
5336		break;
5337	case 0xf8: /* clc */
5338		ctxt->eflags &= ~X86_EFLAGS_CF;
5339		break;
5340	case 0xf9: /* stc */
5341		ctxt->eflags |= X86_EFLAGS_CF;
5342		break;
5343	case 0xfc: /* cld */
5344		ctxt->eflags &= ~X86_EFLAGS_DF;
5345		break;
5346	case 0xfd: /* std */
5347		ctxt->eflags |= X86_EFLAGS_DF;
5348		break;
5349	default:
5350		goto cannot_emulate;
5351	}
5352
5353	if (rc != X86EMUL_CONTINUE)
5354		goto done;
5355
5356writeback:
5357	if (ctxt->d & SrcWrite) {
5358		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5359		rc = writeback(ctxt, &ctxt->src);
5360		if (rc != X86EMUL_CONTINUE)
5361			goto done;
5362	}
5363	if (!(ctxt->d & NoWrite)) {
5364		rc = writeback(ctxt, &ctxt->dst);
5365		if (rc != X86EMUL_CONTINUE)
5366			goto done;
5367	}
5368
5369	/*
5370	 * restore dst type in case the decoding will be reused
5371	 * (happens for string instruction )
5372	 */
5373	ctxt->dst.type = saved_dst_type;
5374
5375	if ((ctxt->d & SrcMask) == SrcSI)
5376		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5377
5378	if ((ctxt->d & DstMask) == DstDI)
5379		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5380
5381	if (ctxt->rep_prefix && (ctxt->d & String)) {
5382		unsigned int count;
5383		struct read_cache *r = &ctxt->io_read;
5384		if ((ctxt->d & SrcMask) == SrcSI)
5385			count = ctxt->src.count;
5386		else
5387			count = ctxt->dst.count;
5388		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5389
5390		if (!string_insn_completed(ctxt)) {
5391			/*
5392			 * Re-enter guest when pio read ahead buffer is empty
5393			 * or, if it is not used, after each 1024 iteration.
5394			 */
5395			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5396			    (r->end == 0 || r->end != r->pos)) {
5397				/*
5398				 * Reset read cache. Usually happens before
5399				 * decode, but since instruction is restarted
5400				 * we have to do it here.
5401				 */
5402				ctxt->mem_read.end = 0;
5403				writeback_registers(ctxt);
5404				return EMULATION_RESTART;
5405			}
5406			goto done; /* skip rip writeback */
5407		}
5408		ctxt->eflags &= ~X86_EFLAGS_RF;
5409	}
5410
5411	ctxt->eip = ctxt->_eip;
5412	if (ctxt->mode != X86EMUL_MODE_PROT64)
5413		ctxt->eip = (u32)ctxt->_eip;
5414
5415done:
5416	if (rc == X86EMUL_PROPAGATE_FAULT) {
5417		if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5418			return EMULATION_FAILED;
5419		ctxt->have_exception = true;
5420	}
5421	if (rc == X86EMUL_INTERCEPTED)
5422		return EMULATION_INTERCEPTED;
5423
5424	if (rc == X86EMUL_CONTINUE)
5425		writeback_registers(ctxt);
5426
5427	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5428
5429twobyte_insn:
5430	switch (ctxt->b) {
5431	case 0x09:		/* wbinvd */
5432		(ctxt->ops->wbinvd)(ctxt);
5433		break;
5434	case 0x08:		/* invd */
5435	case 0x0d:		/* GrpP (prefetch) */
5436	case 0x18:		/* Grp16 (prefetch/nop) */
5437	case 0x1f:		/* nop */
5438		break;
5439	case 0x20: /* mov cr, reg */
5440		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5441		break;
5442	case 0x21: /* mov from dr to reg */
5443		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5444		break;
5445	case 0x40 ... 0x4f:	/* cmov */
5446		if (test_cc(ctxt->b, ctxt->eflags))
5447			ctxt->dst.val = ctxt->src.val;
5448		else if (ctxt->op_bytes != 4)
5449			ctxt->dst.type = OP_NONE; /* no writeback */
5450		break;
5451	case 0x80 ... 0x8f: /* jnz rel, etc*/
5452		if (test_cc(ctxt->b, ctxt->eflags))
5453			rc = jmp_rel(ctxt, ctxt->src.val);
5454		break;
5455	case 0x90 ... 0x9f:     /* setcc r/m8 */
5456		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5457		break;
5458	case 0xb6 ... 0xb7:	/* movzx */
5459		ctxt->dst.bytes = ctxt->op_bytes;
5460		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5461						       : (u16) ctxt->src.val;
5462		break;
5463	case 0xbe ... 0xbf:	/* movsx */
5464		ctxt->dst.bytes = ctxt->op_bytes;
5465		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5466							(s16) ctxt->src.val;
5467		break;
5468	default:
5469		goto cannot_emulate;
5470	}
5471
5472threebyte_insn:
5473
5474	if (rc != X86EMUL_CONTINUE)
5475		goto done;
5476
5477	goto writeback;
5478
5479cannot_emulate:
5480	return EMULATION_FAILED;
5481}
5482
5483void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5484{
5485	invalidate_registers(ctxt);
5486}
5487
5488void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5489{
5490	writeback_registers(ctxt);
5491}
5492
5493bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5494{
5495	if (ctxt->rep_prefix && (ctxt->d & String))
5496		return false;
5497
5498	if (ctxt->d & TwoMemOp)
5499		return false;
5500
5501	return true;
5502}
v4.6
 
   1/******************************************************************************
   2 * emulate.c
   3 *
   4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
   5 *
   6 * Copyright (c) 2005 Keir Fraser
   7 *
   8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
   9 * privileged instructions:
  10 *
  11 * Copyright (C) 2006 Qumranet
  12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13 *
  14 *   Avi Kivity <avi@qumranet.com>
  15 *   Yaniv Kamay <yaniv@qumranet.com>
  16 *
  17 * This work is licensed under the terms of the GNU GPL, version 2.  See
  18 * the COPYING file in the top-level directory.
  19 *
  20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21 */
  22
  23#include <linux/kvm_host.h>
  24#include "kvm_cache_regs.h"
  25#include <linux/module.h>
  26#include <asm/kvm_emulate.h>
  27#include <linux/stringify.h>
  28#include <asm/debugreg.h>
 
 
  29
  30#include "x86.h"
  31#include "tss.h"
 
 
  32
  33/*
  34 * Operand types
  35 */
  36#define OpNone             0ull
  37#define OpImplicit         1ull  /* No generic decode */
  38#define OpReg              2ull  /* Register */
  39#define OpMem              3ull  /* Memory */
  40#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
  41#define OpDI               5ull  /* ES:DI/EDI/RDI */
  42#define OpMem64            6ull  /* Memory, 64-bit */
  43#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
  44#define OpDX               8ull  /* DX register */
  45#define OpCL               9ull  /* CL register (for shifts) */
  46#define OpImmByte         10ull  /* 8-bit sign extended immediate */
  47#define OpOne             11ull  /* Implied 1 */
  48#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
  49#define OpMem16           13ull  /* Memory operand (16-bit). */
  50#define OpMem32           14ull  /* Memory operand (32-bit). */
  51#define OpImmU            15ull  /* Immediate operand, zero extended */
  52#define OpSI              16ull  /* SI/ESI/RSI */
  53#define OpImmFAddr        17ull  /* Immediate far address */
  54#define OpMemFAddr        18ull  /* Far address in memory */
  55#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
  56#define OpES              20ull  /* ES */
  57#define OpCS              21ull  /* CS */
  58#define OpSS              22ull  /* SS */
  59#define OpDS              23ull  /* DS */
  60#define OpFS              24ull  /* FS */
  61#define OpGS              25ull  /* GS */
  62#define OpMem8            26ull  /* 8-bit zero extended memory operand */
  63#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
  64#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
  65#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
  66#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
  67
  68#define OpBits             5  /* Width of operand field */
  69#define OpMask             ((1ull << OpBits) - 1)
  70
  71/*
  72 * Opcode effective-address decode tables.
  73 * Note that we only emulate instructions that have at least one memory
  74 * operand (excluding implicit stack references). We assume that stack
  75 * references and instruction fetches will never occur in special memory
  76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  77 * not be handled.
  78 */
  79
  80/* Operand sizes: 8-bit operands or specified/overridden size. */
  81#define ByteOp      (1<<0)	/* 8-bit operands. */
  82/* Destination operand type. */
  83#define DstShift    1
  84#define ImplicitOps (OpImplicit << DstShift)
  85#define DstReg      (OpReg << DstShift)
  86#define DstMem      (OpMem << DstShift)
  87#define DstAcc      (OpAcc << DstShift)
  88#define DstDI       (OpDI << DstShift)
  89#define DstMem64    (OpMem64 << DstShift)
  90#define DstMem16    (OpMem16 << DstShift)
  91#define DstImmUByte (OpImmUByte << DstShift)
  92#define DstDX       (OpDX << DstShift)
  93#define DstAccLo    (OpAccLo << DstShift)
  94#define DstMask     (OpMask << DstShift)
  95/* Source operand type. */
  96#define SrcShift    6
  97#define SrcNone     (OpNone << SrcShift)
  98#define SrcReg      (OpReg << SrcShift)
  99#define SrcMem      (OpMem << SrcShift)
 100#define SrcMem16    (OpMem16 << SrcShift)
 101#define SrcMem32    (OpMem32 << SrcShift)
 102#define SrcImm      (OpImm << SrcShift)
 103#define SrcImmByte  (OpImmByte << SrcShift)
 104#define SrcOne      (OpOne << SrcShift)
 105#define SrcImmUByte (OpImmUByte << SrcShift)
 106#define SrcImmU     (OpImmU << SrcShift)
 107#define SrcSI       (OpSI << SrcShift)
 108#define SrcXLat     (OpXLat << SrcShift)
 109#define SrcImmFAddr (OpImmFAddr << SrcShift)
 110#define SrcMemFAddr (OpMemFAddr << SrcShift)
 111#define SrcAcc      (OpAcc << SrcShift)
 112#define SrcImmU16   (OpImmU16 << SrcShift)
 113#define SrcImm64    (OpImm64 << SrcShift)
 114#define SrcDX       (OpDX << SrcShift)
 115#define SrcMem8     (OpMem8 << SrcShift)
 116#define SrcAccHi    (OpAccHi << SrcShift)
 117#define SrcMask     (OpMask << SrcShift)
 118#define BitOp       (1<<11)
 119#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
 120#define String      (1<<13)     /* String instruction (rep capable) */
 121#define Stack       (1<<14)     /* Stack instruction (push/pop) */
 122#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
 123#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
 124#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
 125#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
 126#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 127#define Escape      (5<<15)     /* Escape to coprocessor instruction */
 128#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
 129#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
 130#define Sse         (1<<18)     /* SSE Vector instruction */
 131/* Generic ModRM decode. */
 132#define ModRM       (1<<19)
 133/* Destination is only written; never read. */
 134#define Mov         (1<<20)
 135/* Misc flags */
 136#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 137#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
 138#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
 139#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
 140#define Undefined   (1<<25) /* No Such Instruction */
 141#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
 142#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
 143#define No64	    (1<<28)
 144#define PageTable   (1 << 29)   /* instruction used to write page table */
 145#define NotImpl     (1 << 30)   /* instruction is not implemented */
 146/* Source 2 operand type */
 147#define Src2Shift   (31)
 148#define Src2None    (OpNone << Src2Shift)
 149#define Src2Mem     (OpMem << Src2Shift)
 150#define Src2CL      (OpCL << Src2Shift)
 151#define Src2ImmByte (OpImmByte << Src2Shift)
 152#define Src2One     (OpOne << Src2Shift)
 153#define Src2Imm     (OpImm << Src2Shift)
 154#define Src2ES      (OpES << Src2Shift)
 155#define Src2CS      (OpCS << Src2Shift)
 156#define Src2SS      (OpSS << Src2Shift)
 157#define Src2DS      (OpDS << Src2Shift)
 158#define Src2FS      (OpFS << Src2Shift)
 159#define Src2GS      (OpGS << Src2Shift)
 160#define Src2Mask    (OpMask << Src2Shift)
 161#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
 
 162#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
 163#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
 164#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
 
 165#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
 166#define NoWrite     ((u64)1 << 45)  /* No writeback */
 167#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 168#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
 169#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
 170#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
 171#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 172#define NearBranch  ((u64)1 << 52)  /* Near branches */
 173#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
 174#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
 
 
 175
 176#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 177
 178#define X2(x...) x, x
 179#define X3(x...) X2(x), x
 180#define X4(x...) X2(x), X2(x)
 181#define X5(x...) X4(x), x
 182#define X6(x...) X4(x), X2(x)
 183#define X7(x...) X4(x), X3(x)
 184#define X8(x...) X4(x), X4(x)
 185#define X16(x...) X8(x), X8(x)
 186
 187#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
 188#define FASTOP_SIZE 8
 189
 190/*
 191 * fastop functions have a special calling convention:
 192 *
 193 * dst:    rax        (in/out)
 194 * src:    rdx        (in/out)
 195 * src2:   rcx        (in)
 196 * flags:  rflags     (in/out)
 197 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 198 *
 199 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 200 * different operand sizes can be reached by calculation, rather than a jump
 201 * table (which would be bigger than the code).
 202 *
 203 * fastop functions are declared as taking a never-defined fastop parameter,
 204 * so they can't be called from C directly.
 205 */
 206
 207struct fastop;
 208
 209struct opcode {
 210	u64 flags : 56;
 211	u64 intercept : 8;
 
 212	union {
 213		int (*execute)(struct x86_emulate_ctxt *ctxt);
 214		const struct opcode *group;
 215		const struct group_dual *gdual;
 216		const struct gprefix *gprefix;
 217		const struct escape *esc;
 218		const struct instr_dual *idual;
 219		const struct mode_dual *mdual;
 220		void (*fastop)(struct fastop *fake);
 221	} u;
 222	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
 223};
 224
 225struct group_dual {
 226	struct opcode mod012[8];
 227	struct opcode mod3[8];
 228};
 229
 230struct gprefix {
 231	struct opcode pfx_no;
 232	struct opcode pfx_66;
 233	struct opcode pfx_f2;
 234	struct opcode pfx_f3;
 235};
 236
 237struct escape {
 238	struct opcode op[8];
 239	struct opcode high[64];
 240};
 241
 242struct instr_dual {
 243	struct opcode mod012;
 244	struct opcode mod3;
 245};
 246
 247struct mode_dual {
 248	struct opcode mode32;
 249	struct opcode mode64;
 250};
 251
 252#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
 253
 254enum x86_transfer_type {
 255	X86_TRANSFER_NONE,
 256	X86_TRANSFER_CALL_JMP,
 257	X86_TRANSFER_RET,
 258	X86_TRANSFER_TASK_SWITCH,
 259};
 260
 261static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
 262{
 263	if (!(ctxt->regs_valid & (1 << nr))) {
 264		ctxt->regs_valid |= 1 << nr;
 265		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
 266	}
 267	return ctxt->_regs[nr];
 268}
 269
 270static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
 271{
 272	ctxt->regs_valid |= 1 << nr;
 273	ctxt->regs_dirty |= 1 << nr;
 274	return &ctxt->_regs[nr];
 275}
 276
 277static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
 278{
 279	reg_read(ctxt, nr);
 280	return reg_write(ctxt, nr);
 281}
 282
 283static void writeback_registers(struct x86_emulate_ctxt *ctxt)
 284{
 
 285	unsigned reg;
 286
 287	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
 288		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
 289}
 290
 291static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
 292{
 293	ctxt->regs_dirty = 0;
 294	ctxt->regs_valid = 0;
 295}
 296
 297/*
 298 * These EFLAGS bits are restored from saved value during emulation, and
 299 * any changes are written back to the saved value after emulation.
 300 */
 301#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
 302		     X86_EFLAGS_PF|X86_EFLAGS_CF)
 303
 304#ifdef CONFIG_X86_64
 305#define ON64(x) x
 306#else
 307#define ON64(x)
 308#endif
 309
 310static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312#define FOP_FUNC(name) \
 313	".align " __stringify(FASTOP_SIZE) " \n\t" \
 314	".type " name ", @function \n\t" \
 315	name ":\n\t"
 
 
 
 
 
 
 
 
 
 316
 317#define FOP_RET   "ret \n\t"
 
 318
 319#define FOP_START(op) \
 320	extern void em_##op(struct fastop *fake); \
 321	asm(".pushsection .text, \"ax\" \n\t" \
 322	    ".global em_" #op " \n\t" \
 323	    FOP_FUNC("em_" #op)
 
 
 
 324
 325#define FOP_END \
 326	    ".popsection")
 327
 
 
 
 
 328#define FOPNOP() \
 329	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
 330	FOP_RET
 331
 332#define FOP1E(op,  dst) \
 333	FOP_FUNC(#op "_" #dst) \
 334	"10: " #op " %" #dst " \n\t" FOP_RET
 
 335
 336#define FOP1EEX(op,  dst) \
 337	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
 338
 339#define FASTOP1(op) \
 340	FOP_START(op) \
 341	FOP1E(op##b, al) \
 342	FOP1E(op##w, ax) \
 343	FOP1E(op##l, eax) \
 344	ON64(FOP1E(op##q, rax))	\
 345	FOP_END
 346
 347/* 1-operand, using src2 (for MUL/DIV r/m) */
 348#define FASTOP1SRC2(op, name) \
 349	FOP_START(name) \
 350	FOP1E(op, cl) \
 351	FOP1E(op, cx) \
 352	FOP1E(op, ecx) \
 353	ON64(FOP1E(op, rcx)) \
 354	FOP_END
 355
 356/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
 357#define FASTOP1SRC2EX(op, name) \
 358	FOP_START(name) \
 359	FOP1EEX(op, cl) \
 360	FOP1EEX(op, cx) \
 361	FOP1EEX(op, ecx) \
 362	ON64(FOP1EEX(op, rcx)) \
 363	FOP_END
 364
 365#define FOP2E(op,  dst, src)	   \
 366	FOP_FUNC(#op "_" #dst "_" #src) \
 367	#op " %" #src ", %" #dst " \n\t" FOP_RET
 
 368
 369#define FASTOP2(op) \
 370	FOP_START(op) \
 371	FOP2E(op##b, al, dl) \
 372	FOP2E(op##w, ax, dx) \
 373	FOP2E(op##l, eax, edx) \
 374	ON64(FOP2E(op##q, rax, rdx)) \
 375	FOP_END
 376
 377/* 2 operand, word only */
 378#define FASTOP2W(op) \
 379	FOP_START(op) \
 380	FOPNOP() \
 381	FOP2E(op##w, ax, dx) \
 382	FOP2E(op##l, eax, edx) \
 383	ON64(FOP2E(op##q, rax, rdx)) \
 384	FOP_END
 385
 386/* 2 operand, src is CL */
 387#define FASTOP2CL(op) \
 388	FOP_START(op) \
 389	FOP2E(op##b, al, cl) \
 390	FOP2E(op##w, ax, cl) \
 391	FOP2E(op##l, eax, cl) \
 392	ON64(FOP2E(op##q, rax, cl)) \
 393	FOP_END
 394
 395/* 2 operand, src and dest are reversed */
 396#define FASTOP2R(op, name) \
 397	FOP_START(name) \
 398	FOP2E(op##b, dl, al) \
 399	FOP2E(op##w, dx, ax) \
 400	FOP2E(op##l, edx, eax) \
 401	ON64(FOP2E(op##q, rdx, rax)) \
 402	FOP_END
 403
 404#define FOP3E(op,  dst, src, src2) \
 405	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
 406	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
 
 407
 408/* 3-operand, word-only, src2=cl */
 409#define FASTOP3WCL(op) \
 410	FOP_START(op) \
 411	FOPNOP() \
 412	FOP3E(op##w, ax, dx, cl) \
 413	FOP3E(op##l, eax, edx, cl) \
 414	ON64(FOP3E(op##q, rax, rdx, cl)) \
 415	FOP_END
 416
 417/* Special case for SETcc - 1 instruction per cc */
 418#define FOP_SETCC(op) \
 419	".align 4 \n\t" \
 420	".type " #op ", @function \n\t" \
 421	#op ": \n\t" \
 422	#op " %al \n\t" \
 423	FOP_RET
 424
 425asm(".global kvm_fastop_exception \n"
 426    "kvm_fastop_exception: xor %esi, %esi; ret");
 427
 428FOP_START(setcc)
 429FOP_SETCC(seto)
 430FOP_SETCC(setno)
 431FOP_SETCC(setc)
 432FOP_SETCC(setnc)
 433FOP_SETCC(setz)
 434FOP_SETCC(setnz)
 435FOP_SETCC(setbe)
 436FOP_SETCC(setnbe)
 437FOP_SETCC(sets)
 438FOP_SETCC(setns)
 439FOP_SETCC(setp)
 440FOP_SETCC(setnp)
 441FOP_SETCC(setl)
 442FOP_SETCC(setnl)
 443FOP_SETCC(setle)
 444FOP_SETCC(setnle)
 445FOP_END;
 446
 447FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
 
 
 
 448FOP_END;
 449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 451				    enum x86_intercept intercept,
 452				    enum x86_intercept_stage stage)
 453{
 454	struct x86_instruction_info info = {
 455		.intercept  = intercept,
 456		.rep_prefix = ctxt->rep_prefix,
 457		.modrm_mod  = ctxt->modrm_mod,
 458		.modrm_reg  = ctxt->modrm_reg,
 459		.modrm_rm   = ctxt->modrm_rm,
 460		.src_val    = ctxt->src.val64,
 461		.dst_val    = ctxt->dst.val64,
 462		.src_bytes  = ctxt->src.bytes,
 463		.dst_bytes  = ctxt->dst.bytes,
 464		.ad_bytes   = ctxt->ad_bytes,
 465		.next_rip   = ctxt->eip,
 466	};
 467
 468	return ctxt->ops->intercept(ctxt, &info, stage);
 469}
 470
 471static void assign_masked(ulong *dest, ulong src, ulong mask)
 472{
 473	*dest = (*dest & ~mask) | (src & mask);
 474}
 475
 476static void assign_register(unsigned long *reg, u64 val, int bytes)
 477{
 478	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
 479	switch (bytes) {
 480	case 1:
 481		*(u8 *)reg = (u8)val;
 482		break;
 483	case 2:
 484		*(u16 *)reg = (u16)val;
 485		break;
 486	case 4:
 487		*reg = (u32)val;
 488		break;	/* 64b: zero-extend */
 489	case 8:
 490		*reg = val;
 491		break;
 492	}
 493}
 494
 495static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
 496{
 497	return (1UL << (ctxt->ad_bytes << 3)) - 1;
 498}
 499
 500static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
 501{
 502	u16 sel;
 503	struct desc_struct ss;
 504
 505	if (ctxt->mode == X86EMUL_MODE_PROT64)
 506		return ~0UL;
 507	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
 508	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
 509}
 510
 511static int stack_size(struct x86_emulate_ctxt *ctxt)
 512{
 513	return (__fls(stack_mask(ctxt)) + 1) >> 3;
 514}
 515
 516/* Access/update address held in a register, based on addressing mode. */
 517static inline unsigned long
 518address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
 519{
 520	if (ctxt->ad_bytes == sizeof(unsigned long))
 521		return reg;
 522	else
 523		return reg & ad_mask(ctxt);
 524}
 525
 526static inline unsigned long
 527register_address(struct x86_emulate_ctxt *ctxt, int reg)
 528{
 529	return address_mask(ctxt, reg_read(ctxt, reg));
 530}
 531
 532static void masked_increment(ulong *reg, ulong mask, int inc)
 533{
 534	assign_masked(reg, *reg + inc, mask);
 535}
 536
 537static inline void
 538register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
 539{
 540	ulong *preg = reg_rmw(ctxt, reg);
 541
 542	assign_register(preg, *preg + inc, ctxt->ad_bytes);
 543}
 544
 545static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
 546{
 547	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
 548}
 549
 550static u32 desc_limit_scaled(struct desc_struct *desc)
 551{
 552	u32 limit = get_desc_limit(desc);
 553
 554	return desc->g ? (limit << 12) | 0xfff : limit;
 555}
 556
 557static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 558{
 559	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
 560		return 0;
 561
 562	return ctxt->ops->get_cached_segment_base(ctxt, seg);
 563}
 564
 565static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
 566			     u32 error, bool valid)
 567{
 568	WARN_ON(vec > 0x1f);
 
 
 569	ctxt->exception.vector = vec;
 570	ctxt->exception.error_code = error;
 571	ctxt->exception.error_code_valid = valid;
 572	return X86EMUL_PROPAGATE_FAULT;
 573}
 574
 575static int emulate_db(struct x86_emulate_ctxt *ctxt)
 576{
 577	return emulate_exception(ctxt, DB_VECTOR, 0, false);
 578}
 579
 580static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 581{
 582	return emulate_exception(ctxt, GP_VECTOR, err, true);
 583}
 584
 585static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
 586{
 587	return emulate_exception(ctxt, SS_VECTOR, err, true);
 588}
 589
 590static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 591{
 592	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 593}
 594
 595static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 596{
 597	return emulate_exception(ctxt, TS_VECTOR, err, true);
 598}
 599
 600static int emulate_de(struct x86_emulate_ctxt *ctxt)
 601{
 602	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 603}
 604
 605static int emulate_nm(struct x86_emulate_ctxt *ctxt)
 606{
 607	return emulate_exception(ctxt, NM_VECTOR, 0, false);
 608}
 609
 610static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
 611{
 612	u16 selector;
 613	struct desc_struct desc;
 614
 615	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
 616	return selector;
 617}
 618
 619static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
 620				 unsigned seg)
 621{
 622	u16 dummy;
 623	u32 base3;
 624	struct desc_struct desc;
 625
 626	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
 627	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
 628}
 629
 
 
 
 
 
 
 
 
 
 
 
 630/*
 631 * x86 defines three classes of vector instructions: explicitly
 632 * aligned, explicitly unaligned, and the rest, which change behaviour
 633 * depending on whether they're AVX encoded or not.
 634 *
 635 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 636 * subject to the same check.
 
 637 */
 638static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
 639{
 
 
 640	if (likely(size < 16))
 641		return false;
 642
 643	if (ctxt->d & Aligned)
 644		return true;
 645	else if (ctxt->d & Unaligned)
 646		return false;
 647	else if (ctxt->d & Avx)
 648		return false;
 649	else
 650		return true;
 
 
 651}
 652
 653static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 654				       struct segmented_address addr,
 655				       unsigned *max_size, unsigned size,
 656				       bool write, bool fetch,
 657				       enum x86emul_mode mode, ulong *linear)
 658{
 659	struct desc_struct desc;
 660	bool usable;
 661	ulong la;
 662	u32 lim;
 663	u16 sel;
 
 664
 665	la = seg_base(ctxt, addr.seg) + addr.ea;
 666	*max_size = 0;
 667	switch (mode) {
 668	case X86EMUL_MODE_PROT64:
 669		*linear = la;
 670		if (is_noncanonical_address(la))
 
 671			goto bad;
 672
 673		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
 674		if (size > *max_size)
 675			goto bad;
 676		break;
 677	default:
 678		*linear = la = (u32)la;
 679		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
 680						addr.seg);
 681		if (!usable)
 682			goto bad;
 683		/* code segment in protected mode or read-only data segment */
 684		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
 685					|| !(desc.type & 2)) && write)
 686			goto bad;
 687		/* unreadable code segment */
 688		if (!fetch && (desc.type & 8) && !(desc.type & 2))
 689			goto bad;
 690		lim = desc_limit_scaled(&desc);
 691		if (!(desc.type & 8) && (desc.type & 4)) {
 692			/* expand-down segment */
 693			if (addr.ea <= lim)
 694				goto bad;
 695			lim = desc.d ? 0xffffffff : 0xffff;
 696		}
 697		if (addr.ea > lim)
 698			goto bad;
 699		if (lim == 0xffffffff)
 700			*max_size = ~0u;
 701		else {
 702			*max_size = (u64)lim + 1 - addr.ea;
 703			if (size > *max_size)
 704				goto bad;
 705		}
 706		break;
 707	}
 708	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
 709		return emulate_gp(ctxt, 0);
 710	return X86EMUL_CONTINUE;
 711bad:
 712	if (addr.seg == VCPU_SREG_SS)
 713		return emulate_ss(ctxt, 0);
 714	else
 715		return emulate_gp(ctxt, 0);
 716}
 717
 718static int linearize(struct x86_emulate_ctxt *ctxt,
 719		     struct segmented_address addr,
 720		     unsigned size, bool write,
 721		     ulong *linear)
 722{
 723	unsigned max_size;
 724	return __linearize(ctxt, addr, &max_size, size, write, false,
 725			   ctxt->mode, linear);
 726}
 727
 728static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
 729			     enum x86emul_mode mode)
 730{
 731	ulong linear;
 732	int rc;
 733	unsigned max_size;
 734	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 735					   .ea = dst };
 736
 737	if (ctxt->op_bytes != sizeof(unsigned long))
 738		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
 739	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
 740	if (rc == X86EMUL_CONTINUE)
 741		ctxt->_eip = addr.ea;
 742	return rc;
 743}
 744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
 746{
 747	return assign_eip(ctxt, dst, ctxt->mode);
 748}
 749
 750static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
 751			  const struct desc_struct *cs_desc)
 752{
 753	enum x86emul_mode mode = ctxt->mode;
 754	int rc;
 755
 756#ifdef CONFIG_X86_64
 757	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
 758		if (cs_desc->l) {
 759			u64 efer = 0;
 760
 761			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
 762			if (efer & EFER_LMA)
 763				mode = X86EMUL_MODE_PROT64;
 764		} else
 765			mode = X86EMUL_MODE_PROT32; /* temporary value */
 766	}
 767#endif
 768	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
 769		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 770	rc = assign_eip(ctxt, dst, mode);
 771	if (rc == X86EMUL_CONTINUE)
 772		ctxt->mode = mode;
 773	return rc;
 774}
 775
 776static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
 777{
 778	return assign_eip_near(ctxt, ctxt->_eip + rel);
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 781static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 782			      struct segmented_address addr,
 783			      void *data,
 784			      unsigned size)
 785{
 786	int rc;
 787	ulong linear;
 788
 789	rc = linearize(ctxt, addr, size, false, &linear);
 790	if (rc != X86EMUL_CONTINUE)
 791		return rc;
 792	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793}
 794
 795/*
 796 * Prefetch the remaining bytes of the instruction without crossing page
 797 * boundary if they are not in fetch_cache yet.
 798 */
 799static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 800{
 801	int rc;
 802	unsigned size, max_size;
 803	unsigned long linear;
 804	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
 805	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 806					   .ea = ctxt->eip + cur_size };
 807
 808	/*
 809	 * We do not know exactly how many bytes will be needed, and
 810	 * __linearize is expensive, so fetch as much as possible.  We
 811	 * just have to avoid going beyond the 15 byte limit, the end
 812	 * of the segment, or the end of the page.
 813	 *
 814	 * __linearize is called with size 0 so that it does not do any
 815	 * boundary check itself.  Instead, we use max_size to check
 816	 * against op_size.
 817	 */
 818	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
 819			 &linear);
 820	if (unlikely(rc != X86EMUL_CONTINUE))
 821		return rc;
 822
 823	size = min_t(unsigned, 15UL ^ cur_size, max_size);
 824	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 825
 826	/*
 827	 * One instruction can only straddle two pages,
 828	 * and one has been loaded at the beginning of
 829	 * x86_decode_insn.  So, if not enough bytes
 830	 * still, we must have hit the 15-byte boundary.
 831	 */
 832	if (unlikely(size < op_size))
 833		return emulate_gp(ctxt, 0);
 834
 835	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
 836			      size, &ctxt->exception);
 837	if (unlikely(rc != X86EMUL_CONTINUE))
 838		return rc;
 839	ctxt->fetch.end += size;
 840	return X86EMUL_CONTINUE;
 841}
 842
 843static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
 844					       unsigned size)
 845{
 846	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
 847
 848	if (unlikely(done_size < size))
 849		return __do_insn_fetch_bytes(ctxt, size - done_size);
 850	else
 851		return X86EMUL_CONTINUE;
 852}
 853
 854/* Fetch next part of the instruction being emulated. */
 855#define insn_fetch(_type, _ctxt)					\
 856({	_type _x;							\
 857									\
 858	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
 859	if (rc != X86EMUL_CONTINUE)					\
 860		goto done;						\
 861	ctxt->_eip += sizeof(_type);					\
 862	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
 863	ctxt->fetch.ptr += sizeof(_type);				\
 864	_x;								\
 865})
 866
 867#define insn_fetch_arr(_arr, _size, _ctxt)				\
 868({									\
 869	rc = do_insn_fetch_bytes(_ctxt, _size);				\
 870	if (rc != X86EMUL_CONTINUE)					\
 871		goto done;						\
 872	ctxt->_eip += (_size);						\
 873	memcpy(_arr, ctxt->fetch.ptr, _size);				\
 874	ctxt->fetch.ptr += (_size);					\
 875})
 876
 877/*
 878 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 879 * pointer into the block that addresses the relevant register.
 880 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 881 */
 882static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
 883			     int byteop)
 884{
 885	void *p;
 886	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
 887
 888	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
 889		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
 890	else
 891		p = reg_rmw(ctxt, modrm_reg);
 892	return p;
 893}
 894
 895static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 896			   struct segmented_address addr,
 897			   u16 *size, unsigned long *address, int op_bytes)
 898{
 899	int rc;
 900
 901	if (op_bytes == 2)
 902		op_bytes = 3;
 903	*address = 0;
 904	rc = segmented_read_std(ctxt, addr, size, 2);
 905	if (rc != X86EMUL_CONTINUE)
 906		return rc;
 907	addr.ea += 2;
 908	rc = segmented_read_std(ctxt, addr, address, op_bytes);
 909	return rc;
 910}
 911
 912FASTOP2(add);
 913FASTOP2(or);
 914FASTOP2(adc);
 915FASTOP2(sbb);
 916FASTOP2(and);
 917FASTOP2(sub);
 918FASTOP2(xor);
 919FASTOP2(cmp);
 920FASTOP2(test);
 921
 922FASTOP1SRC2(mul, mul_ex);
 923FASTOP1SRC2(imul, imul_ex);
 924FASTOP1SRC2EX(div, div_ex);
 925FASTOP1SRC2EX(idiv, idiv_ex);
 926
 927FASTOP3WCL(shld);
 928FASTOP3WCL(shrd);
 929
 930FASTOP2W(imul);
 931
 932FASTOP1(not);
 933FASTOP1(neg);
 934FASTOP1(inc);
 935FASTOP1(dec);
 936
 937FASTOP2CL(rol);
 938FASTOP2CL(ror);
 939FASTOP2CL(rcl);
 940FASTOP2CL(rcr);
 941FASTOP2CL(shl);
 942FASTOP2CL(shr);
 943FASTOP2CL(sar);
 944
 945FASTOP2W(bsf);
 946FASTOP2W(bsr);
 947FASTOP2W(bt);
 948FASTOP2W(bts);
 949FASTOP2W(btr);
 950FASTOP2W(btc);
 951
 952FASTOP2(xadd);
 953
 954FASTOP2R(cmp, cmp_r);
 955
 956static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
 957{
 958	/* If src is zero, do not writeback, but update flags */
 959	if (ctxt->src.val == 0)
 960		ctxt->dst.type = OP_NONE;
 961	return fastop(ctxt, em_bsf);
 962}
 963
 964static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
 965{
 966	/* If src is zero, do not writeback, but update flags */
 967	if (ctxt->src.val == 0)
 968		ctxt->dst.type = OP_NONE;
 969	return fastop(ctxt, em_bsr);
 970}
 971
 972static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
 973{
 974	u8 rc;
 975	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
 976
 977	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
 978	asm("push %[flags]; popf; call *%[fastop]"
 979	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
 980	return rc;
 981}
 982
 983static void fetch_register_operand(struct operand *op)
 984{
 985	switch (op->bytes) {
 986	case 1:
 987		op->val = *(u8 *)op->addr.reg;
 988		break;
 989	case 2:
 990		op->val = *(u16 *)op->addr.reg;
 991		break;
 992	case 4:
 993		op->val = *(u32 *)op->addr.reg;
 994		break;
 995	case 8:
 996		op->val = *(u64 *)op->addr.reg;
 997		break;
 998	}
 999}
1000
1001static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1002{
1003	ctxt->ops->get_fpu(ctxt);
1004	switch (reg) {
1005	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1006	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1007	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1008	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1009	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1010	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1011	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1012	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1013#ifdef CONFIG_X86_64
1014	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1015	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1016	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1017	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1018	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1019	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1020	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1021	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1022#endif
1023	default: BUG();
1024	}
1025	ctxt->ops->put_fpu(ctxt);
1026}
1027
1028static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1029			  int reg)
1030{
1031	ctxt->ops->get_fpu(ctxt);
1032	switch (reg) {
1033	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1034	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1035	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1036	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1037	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1038	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1039	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1040	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1041#ifdef CONFIG_X86_64
1042	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1043	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1044	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1045	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1046	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1047	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1048	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1049	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1050#endif
1051	default: BUG();
1052	}
1053	ctxt->ops->put_fpu(ctxt);
1054}
1055
1056static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1057{
1058	ctxt->ops->get_fpu(ctxt);
1059	switch (reg) {
1060	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1061	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1062	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1063	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1064	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1065	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1066	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1067	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1068	default: BUG();
1069	}
1070	ctxt->ops->put_fpu(ctxt);
1071}
1072
1073static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1074{
1075	ctxt->ops->get_fpu(ctxt);
1076	switch (reg) {
1077	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1078	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1079	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1080	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1081	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1082	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1083	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1084	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1085	default: BUG();
1086	}
1087	ctxt->ops->put_fpu(ctxt);
1088}
1089
1090static int em_fninit(struct x86_emulate_ctxt *ctxt)
1091{
1092	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1093		return emulate_nm(ctxt);
1094
1095	ctxt->ops->get_fpu(ctxt);
1096	asm volatile("fninit");
1097	ctxt->ops->put_fpu(ctxt);
1098	return X86EMUL_CONTINUE;
1099}
1100
1101static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1102{
1103	u16 fcw;
1104
1105	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1106		return emulate_nm(ctxt);
1107
1108	ctxt->ops->get_fpu(ctxt);
1109	asm volatile("fnstcw %0": "+m"(fcw));
1110	ctxt->ops->put_fpu(ctxt);
1111
1112	ctxt->dst.val = fcw;
1113
1114	return X86EMUL_CONTINUE;
1115}
1116
1117static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1118{
1119	u16 fsw;
1120
1121	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1122		return emulate_nm(ctxt);
1123
1124	ctxt->ops->get_fpu(ctxt);
1125	asm volatile("fnstsw %0": "+m"(fsw));
1126	ctxt->ops->put_fpu(ctxt);
1127
1128	ctxt->dst.val = fsw;
1129
1130	return X86EMUL_CONTINUE;
1131}
1132
1133static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1134				    struct operand *op)
1135{
1136	unsigned reg = ctxt->modrm_reg;
1137
1138	if (!(ctxt->d & ModRM))
 
 
1139		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1140
1141	if (ctxt->d & Sse) {
1142		op->type = OP_XMM;
1143		op->bytes = 16;
1144		op->addr.xmm = reg;
1145		read_sse_reg(ctxt, &op->vec_val, reg);
1146		return;
1147	}
1148	if (ctxt->d & Mmx) {
1149		reg &= 7;
1150		op->type = OP_MM;
1151		op->bytes = 8;
1152		op->addr.mm = reg;
1153		return;
1154	}
1155
1156	op->type = OP_REG;
1157	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1158	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1159
1160	fetch_register_operand(op);
1161	op->orig_val = op->val;
1162}
1163
1164static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1165{
1166	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1167		ctxt->modrm_seg = VCPU_SREG_SS;
1168}
1169
1170static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1171			struct operand *op)
1172{
1173	u8 sib;
1174	int index_reg, base_reg, scale;
1175	int rc = X86EMUL_CONTINUE;
1176	ulong modrm_ea = 0;
1177
1178	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1179	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1180	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1181
1182	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1183	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1184	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1185	ctxt->modrm_seg = VCPU_SREG_DS;
1186
1187	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1188		op->type = OP_REG;
1189		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1190		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1191				ctxt->d & ByteOp);
1192		if (ctxt->d & Sse) {
1193			op->type = OP_XMM;
1194			op->bytes = 16;
1195			op->addr.xmm = ctxt->modrm_rm;
1196			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1197			return rc;
1198		}
1199		if (ctxt->d & Mmx) {
1200			op->type = OP_MM;
1201			op->bytes = 8;
1202			op->addr.mm = ctxt->modrm_rm & 7;
1203			return rc;
1204		}
1205		fetch_register_operand(op);
1206		return rc;
1207	}
1208
1209	op->type = OP_MEM;
1210
1211	if (ctxt->ad_bytes == 2) {
1212		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1213		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1214		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1215		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1216
1217		/* 16-bit ModR/M decode. */
1218		switch (ctxt->modrm_mod) {
1219		case 0:
1220			if (ctxt->modrm_rm == 6)
1221				modrm_ea += insn_fetch(u16, ctxt);
1222			break;
1223		case 1:
1224			modrm_ea += insn_fetch(s8, ctxt);
1225			break;
1226		case 2:
1227			modrm_ea += insn_fetch(u16, ctxt);
1228			break;
1229		}
1230		switch (ctxt->modrm_rm) {
1231		case 0:
1232			modrm_ea += bx + si;
1233			break;
1234		case 1:
1235			modrm_ea += bx + di;
1236			break;
1237		case 2:
1238			modrm_ea += bp + si;
1239			break;
1240		case 3:
1241			modrm_ea += bp + di;
1242			break;
1243		case 4:
1244			modrm_ea += si;
1245			break;
1246		case 5:
1247			modrm_ea += di;
1248			break;
1249		case 6:
1250			if (ctxt->modrm_mod != 0)
1251				modrm_ea += bp;
1252			break;
1253		case 7:
1254			modrm_ea += bx;
1255			break;
1256		}
1257		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1258		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1259			ctxt->modrm_seg = VCPU_SREG_SS;
1260		modrm_ea = (u16)modrm_ea;
1261	} else {
1262		/* 32/64-bit ModR/M decode. */
1263		if ((ctxt->modrm_rm & 7) == 4) {
1264			sib = insn_fetch(u8, ctxt);
1265			index_reg |= (sib >> 3) & 7;
1266			base_reg |= sib & 7;
1267			scale = sib >> 6;
1268
1269			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1270				modrm_ea += insn_fetch(s32, ctxt);
1271			else {
1272				modrm_ea += reg_read(ctxt, base_reg);
1273				adjust_modrm_seg(ctxt, base_reg);
1274				/* Increment ESP on POP [ESP] */
1275				if ((ctxt->d & IncSP) &&
1276				    base_reg == VCPU_REGS_RSP)
1277					modrm_ea += ctxt->op_bytes;
1278			}
1279			if (index_reg != 4)
1280				modrm_ea += reg_read(ctxt, index_reg) << scale;
1281		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1282			modrm_ea += insn_fetch(s32, ctxt);
1283			if (ctxt->mode == X86EMUL_MODE_PROT64)
1284				ctxt->rip_relative = 1;
1285		} else {
1286			base_reg = ctxt->modrm_rm;
1287			modrm_ea += reg_read(ctxt, base_reg);
1288			adjust_modrm_seg(ctxt, base_reg);
1289		}
1290		switch (ctxt->modrm_mod) {
1291		case 1:
1292			modrm_ea += insn_fetch(s8, ctxt);
1293			break;
1294		case 2:
1295			modrm_ea += insn_fetch(s32, ctxt);
1296			break;
1297		}
1298	}
1299	op->addr.mem.ea = modrm_ea;
1300	if (ctxt->ad_bytes != 8)
1301		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1302
1303done:
1304	return rc;
1305}
1306
1307static int decode_abs(struct x86_emulate_ctxt *ctxt,
1308		      struct operand *op)
1309{
1310	int rc = X86EMUL_CONTINUE;
1311
1312	op->type = OP_MEM;
1313	switch (ctxt->ad_bytes) {
1314	case 2:
1315		op->addr.mem.ea = insn_fetch(u16, ctxt);
1316		break;
1317	case 4:
1318		op->addr.mem.ea = insn_fetch(u32, ctxt);
1319		break;
1320	case 8:
1321		op->addr.mem.ea = insn_fetch(u64, ctxt);
1322		break;
1323	}
1324done:
1325	return rc;
1326}
1327
1328static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1329{
1330	long sv = 0, mask;
1331
1332	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1333		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1334
1335		if (ctxt->src.bytes == 2)
1336			sv = (s16)ctxt->src.val & (s16)mask;
1337		else if (ctxt->src.bytes == 4)
1338			sv = (s32)ctxt->src.val & (s32)mask;
1339		else
1340			sv = (s64)ctxt->src.val & (s64)mask;
1341
1342		ctxt->dst.addr.mem.ea = address_mask(ctxt,
1343					   ctxt->dst.addr.mem.ea + (sv >> 3));
1344	}
1345
1346	/* only subword offset */
1347	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1348}
1349
1350static int read_emulated(struct x86_emulate_ctxt *ctxt,
1351			 unsigned long addr, void *dest, unsigned size)
1352{
1353	int rc;
1354	struct read_cache *mc = &ctxt->mem_read;
1355
1356	if (mc->pos < mc->end)
1357		goto read_cached;
1358
1359	WARN_ON((mc->end + size) >= sizeof(mc->data));
 
1360
1361	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1362				      &ctxt->exception);
1363	if (rc != X86EMUL_CONTINUE)
1364		return rc;
1365
1366	mc->end += size;
1367
1368read_cached:
1369	memcpy(dest, mc->data + mc->pos, size);
1370	mc->pos += size;
1371	return X86EMUL_CONTINUE;
1372}
1373
1374static int segmented_read(struct x86_emulate_ctxt *ctxt,
1375			  struct segmented_address addr,
1376			  void *data,
1377			  unsigned size)
1378{
1379	int rc;
1380	ulong linear;
1381
1382	rc = linearize(ctxt, addr, size, false, &linear);
1383	if (rc != X86EMUL_CONTINUE)
1384		return rc;
1385	return read_emulated(ctxt, linear, data, size);
1386}
1387
1388static int segmented_write(struct x86_emulate_ctxt *ctxt,
1389			   struct segmented_address addr,
1390			   const void *data,
1391			   unsigned size)
1392{
1393	int rc;
1394	ulong linear;
1395
1396	rc = linearize(ctxt, addr, size, true, &linear);
1397	if (rc != X86EMUL_CONTINUE)
1398		return rc;
1399	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1400					 &ctxt->exception);
1401}
1402
1403static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1404			     struct segmented_address addr,
1405			     const void *orig_data, const void *data,
1406			     unsigned size)
1407{
1408	int rc;
1409	ulong linear;
1410
1411	rc = linearize(ctxt, addr, size, true, &linear);
1412	if (rc != X86EMUL_CONTINUE)
1413		return rc;
1414	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1415					   size, &ctxt->exception);
1416}
1417
1418static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1419			   unsigned int size, unsigned short port,
1420			   void *dest)
1421{
1422	struct read_cache *rc = &ctxt->io_read;
1423
1424	if (rc->pos == rc->end) { /* refill pio read ahead */
1425		unsigned int in_page, n;
1426		unsigned int count = ctxt->rep_prefix ?
1427			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1428		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1429			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1430			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1431		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1432		if (n == 0)
1433			n = 1;
1434		rc->pos = rc->end = 0;
1435		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1436			return 0;
1437		rc->end = n * size;
1438	}
1439
1440	if (ctxt->rep_prefix && (ctxt->d & String) &&
1441	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1442		ctxt->dst.data = rc->data + rc->pos;
1443		ctxt->dst.type = OP_MEM_STR;
1444		ctxt->dst.count = (rc->end - rc->pos) / size;
1445		rc->pos = rc->end;
1446	} else {
1447		memcpy(dest, rc->data + rc->pos, size);
1448		rc->pos += size;
1449	}
1450	return 1;
1451}
1452
1453static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1454				     u16 index, struct desc_struct *desc)
1455{
1456	struct desc_ptr dt;
1457	ulong addr;
1458
1459	ctxt->ops->get_idt(ctxt, &dt);
1460
1461	if (dt.size < index * 8 + 7)
1462		return emulate_gp(ctxt, index << 3 | 0x2);
1463
1464	addr = dt.address + index * 8;
1465	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1466				   &ctxt->exception);
1467}
1468
1469static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1470				     u16 selector, struct desc_ptr *dt)
1471{
1472	const struct x86_emulate_ops *ops = ctxt->ops;
1473	u32 base3 = 0;
1474
1475	if (selector & 1 << 2) {
1476		struct desc_struct desc;
1477		u16 sel;
1478
1479		memset (dt, 0, sizeof *dt);
1480		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1481				      VCPU_SREG_LDTR))
1482			return;
1483
1484		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1485		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1486	} else
1487		ops->get_gdt(ctxt, dt);
1488}
1489
1490static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1491			      u16 selector, ulong *desc_addr_p)
1492{
1493	struct desc_ptr dt;
1494	u16 index = selector >> 3;
1495	ulong addr;
1496
1497	get_descriptor_table_ptr(ctxt, selector, &dt);
1498
1499	if (dt.size < index * 8 + 7)
1500		return emulate_gp(ctxt, selector & 0xfffc);
1501
1502	addr = dt.address + index * 8;
1503
1504#ifdef CONFIG_X86_64
1505	if (addr >> 32 != 0) {
1506		u64 efer = 0;
1507
1508		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1509		if (!(efer & EFER_LMA))
1510			addr &= (u32)-1;
1511	}
1512#endif
1513
1514	*desc_addr_p = addr;
1515	return X86EMUL_CONTINUE;
1516}
1517
1518/* allowed just for 8 bytes segments */
1519static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1520				   u16 selector, struct desc_struct *desc,
1521				   ulong *desc_addr_p)
1522{
1523	int rc;
1524
1525	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1526	if (rc != X86EMUL_CONTINUE)
1527		return rc;
1528
1529	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1530				   &ctxt->exception);
1531}
1532
1533/* allowed just for 8 bytes segments */
1534static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1535				    u16 selector, struct desc_struct *desc)
1536{
1537	int rc;
1538	ulong addr;
1539
1540	rc = get_descriptor_ptr(ctxt, selector, &addr);
1541	if (rc != X86EMUL_CONTINUE)
1542		return rc;
1543
1544	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1545				    &ctxt->exception);
1546}
1547
1548/* Does not support long mode */
1549static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1550				     u16 selector, int seg, u8 cpl,
1551				     enum x86_transfer_type transfer,
1552				     struct desc_struct *desc)
1553{
1554	struct desc_struct seg_desc, old_desc;
1555	u8 dpl, rpl;
1556	unsigned err_vec = GP_VECTOR;
1557	u32 err_code = 0;
1558	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1559	ulong desc_addr;
1560	int ret;
1561	u16 dummy;
1562	u32 base3 = 0;
1563
1564	memset(&seg_desc, 0, sizeof seg_desc);
1565
1566	if (ctxt->mode == X86EMUL_MODE_REAL) {
1567		/* set real mode segment descriptor (keep limit etc. for
1568		 * unreal mode) */
1569		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1570		set_desc_base(&seg_desc, selector << 4);
1571		goto load;
1572	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1573		/* VM86 needs a clean new segment descriptor */
1574		set_desc_base(&seg_desc, selector << 4);
1575		set_desc_limit(&seg_desc, 0xffff);
1576		seg_desc.type = 3;
1577		seg_desc.p = 1;
1578		seg_desc.s = 1;
1579		seg_desc.dpl = 3;
1580		goto load;
1581	}
1582
1583	rpl = selector & 3;
1584
1585	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
1586	if ((seg == VCPU_SREG_CS
1587	     || (seg == VCPU_SREG_SS
1588		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1589	     || seg == VCPU_SREG_TR)
1590	    && null_selector)
1591		goto exception;
1592
1593	/* TR should be in GDT only */
1594	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1595		goto exception;
1596
1597	if (null_selector) /* for NULL selector skip all following checks */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598		goto load;
 
1599
1600	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1601	if (ret != X86EMUL_CONTINUE)
1602		return ret;
1603
1604	err_code = selector & 0xfffc;
1605	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1606							   GP_VECTOR;
1607
1608	/* can't load system descriptor into segment selector */
1609	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1610		if (transfer == X86_TRANSFER_CALL_JMP)
1611			return X86EMUL_UNHANDLEABLE;
1612		goto exception;
1613	}
1614
1615	if (!seg_desc.p) {
1616		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1617		goto exception;
1618	}
1619
1620	dpl = seg_desc.dpl;
1621
1622	switch (seg) {
1623	case VCPU_SREG_SS:
1624		/*
1625		 * segment is not a writable data segment or segment
1626		 * selector's RPL != CPL or segment selector's RPL != CPL
1627		 */
1628		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1629			goto exception;
1630		break;
1631	case VCPU_SREG_CS:
1632		if (!(seg_desc.type & 8))
1633			goto exception;
1634
1635		if (seg_desc.type & 4) {
1636			/* conforming */
1637			if (dpl > cpl)
1638				goto exception;
1639		} else {
1640			/* nonconforming */
1641			if (rpl > cpl || dpl != cpl)
1642				goto exception;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1643		}
1644		/* in long-mode d/b must be clear if l is set */
1645		if (seg_desc.d && seg_desc.l) {
1646			u64 efer = 0;
1647
1648			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1649			if (efer & EFER_LMA)
1650				goto exception;
1651		}
1652
1653		/* CS(RPL) <- CPL */
1654		selector = (selector & 0xfffc) | cpl;
1655		break;
1656	case VCPU_SREG_TR:
1657		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1658			goto exception;
1659		old_desc = seg_desc;
1660		seg_desc.type |= 2; /* busy */
1661		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1662						  sizeof(seg_desc), &ctxt->exception);
1663		if (ret != X86EMUL_CONTINUE)
1664			return ret;
1665		break;
1666	case VCPU_SREG_LDTR:
1667		if (seg_desc.s || seg_desc.type != 2)
1668			goto exception;
1669		break;
1670	default: /*  DS, ES, FS, or GS */
1671		/*
1672		 * segment is not a data or readable code segment or
1673		 * ((segment is a data or nonconforming code segment)
1674		 * and (both RPL and CPL > DPL))
1675		 */
1676		if ((seg_desc.type & 0xa) == 0x8 ||
1677		    (((seg_desc.type & 0xc) != 0xc) &&
1678		     (rpl > dpl && cpl > dpl)))
1679			goto exception;
1680		break;
1681	}
1682
 
 
 
 
 
1683	if (seg_desc.s) {
1684		/* mark segment as accessed */
1685		if (!(seg_desc.type & 1)) {
1686			seg_desc.type |= 1;
1687			ret = write_segment_descriptor(ctxt, selector,
1688						       &seg_desc);
1689			if (ret != X86EMUL_CONTINUE)
1690				return ret;
1691		}
1692	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1693		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1694				sizeof(base3), &ctxt->exception);
 
 
 
 
 
 
 
 
 
 
 
1695		if (ret != X86EMUL_CONTINUE)
1696			return ret;
1697		if (is_noncanonical_address(get_desc_base(&seg_desc) |
1698					     ((u64)base3 << 32)))
1699			return emulate_gp(ctxt, 0);
1700	}
1701load:
1702	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1703	if (desc)
1704		*desc = seg_desc;
1705	return X86EMUL_CONTINUE;
1706exception:
1707	return emulate_exception(ctxt, err_vec, err_code, true);
1708}
1709
1710static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1711				   u16 selector, int seg)
1712{
1713	u8 cpl = ctxt->ops->cpl(ctxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714	return __load_segment_descriptor(ctxt, selector, seg, cpl,
1715					 X86_TRANSFER_NONE, NULL);
1716}
1717
1718static void write_register_operand(struct operand *op)
1719{
1720	return assign_register(op->addr.reg, op->val, op->bytes);
1721}
1722
1723static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1724{
1725	switch (op->type) {
1726	case OP_REG:
1727		write_register_operand(op);
1728		break;
1729	case OP_MEM:
1730		if (ctxt->lock_prefix)
1731			return segmented_cmpxchg(ctxt,
1732						 op->addr.mem,
1733						 &op->orig_val,
1734						 &op->val,
1735						 op->bytes);
1736		else
1737			return segmented_write(ctxt,
1738					       op->addr.mem,
1739					       &op->val,
1740					       op->bytes);
1741		break;
1742	case OP_MEM_STR:
1743		return segmented_write(ctxt,
1744				       op->addr.mem,
1745				       op->data,
1746				       op->bytes * op->count);
1747		break;
1748	case OP_XMM:
1749		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1750		break;
1751	case OP_MM:
1752		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1753		break;
1754	case OP_NONE:
1755		/* no writeback */
1756		break;
1757	default:
1758		break;
1759	}
1760	return X86EMUL_CONTINUE;
1761}
1762
1763static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1764{
1765	struct segmented_address addr;
1766
1767	rsp_increment(ctxt, -bytes);
1768	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1769	addr.seg = VCPU_SREG_SS;
1770
1771	return segmented_write(ctxt, addr, data, bytes);
1772}
1773
1774static int em_push(struct x86_emulate_ctxt *ctxt)
1775{
1776	/* Disable writeback. */
1777	ctxt->dst.type = OP_NONE;
1778	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1779}
1780
1781static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1782		       void *dest, int len)
1783{
1784	int rc;
1785	struct segmented_address addr;
1786
1787	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1788	addr.seg = VCPU_SREG_SS;
1789	rc = segmented_read(ctxt, addr, dest, len);
1790	if (rc != X86EMUL_CONTINUE)
1791		return rc;
1792
1793	rsp_increment(ctxt, len);
1794	return rc;
1795}
1796
1797static int em_pop(struct x86_emulate_ctxt *ctxt)
1798{
1799	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1800}
1801
1802static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1803			void *dest, int len)
1804{
1805	int rc;
1806	unsigned long val, change_mask;
1807	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1808	int cpl = ctxt->ops->cpl(ctxt);
1809
1810	rc = emulate_pop(ctxt, &val, len);
1811	if (rc != X86EMUL_CONTINUE)
1812		return rc;
1813
1814	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1815		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1816		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1817		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1818
1819	switch(ctxt->mode) {
1820	case X86EMUL_MODE_PROT64:
1821	case X86EMUL_MODE_PROT32:
1822	case X86EMUL_MODE_PROT16:
1823		if (cpl == 0)
1824			change_mask |= X86_EFLAGS_IOPL;
1825		if (cpl <= iopl)
1826			change_mask |= X86_EFLAGS_IF;
1827		break;
1828	case X86EMUL_MODE_VM86:
1829		if (iopl < 3)
1830			return emulate_gp(ctxt, 0);
1831		change_mask |= X86_EFLAGS_IF;
1832		break;
1833	default: /* real mode */
1834		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1835		break;
1836	}
1837
1838	*(unsigned long *)dest =
1839		(ctxt->eflags & ~change_mask) | (val & change_mask);
1840
1841	return rc;
1842}
1843
1844static int em_popf(struct x86_emulate_ctxt *ctxt)
1845{
1846	ctxt->dst.type = OP_REG;
1847	ctxt->dst.addr.reg = &ctxt->eflags;
1848	ctxt->dst.bytes = ctxt->op_bytes;
1849	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1850}
1851
1852static int em_enter(struct x86_emulate_ctxt *ctxt)
1853{
1854	int rc;
1855	unsigned frame_size = ctxt->src.val;
1856	unsigned nesting_level = ctxt->src2.val & 31;
1857	ulong rbp;
1858
1859	if (nesting_level)
1860		return X86EMUL_UNHANDLEABLE;
1861
1862	rbp = reg_read(ctxt, VCPU_REGS_RBP);
1863	rc = push(ctxt, &rbp, stack_size(ctxt));
1864	if (rc != X86EMUL_CONTINUE)
1865		return rc;
1866	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1867		      stack_mask(ctxt));
1868	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1869		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1870		      stack_mask(ctxt));
1871	return X86EMUL_CONTINUE;
1872}
1873
1874static int em_leave(struct x86_emulate_ctxt *ctxt)
1875{
1876	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1877		      stack_mask(ctxt));
1878	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1879}
1880
1881static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1882{
1883	int seg = ctxt->src2.val;
1884
1885	ctxt->src.val = get_segment_selector(ctxt, seg);
1886	if (ctxt->op_bytes == 4) {
1887		rsp_increment(ctxt, -2);
1888		ctxt->op_bytes = 2;
1889	}
1890
1891	return em_push(ctxt);
1892}
1893
1894static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1895{
1896	int seg = ctxt->src2.val;
1897	unsigned long selector;
1898	int rc;
1899
1900	rc = emulate_pop(ctxt, &selector, 2);
1901	if (rc != X86EMUL_CONTINUE)
1902		return rc;
1903
1904	if (ctxt->modrm_reg == VCPU_SREG_SS)
1905		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1906	if (ctxt->op_bytes > 2)
1907		rsp_increment(ctxt, ctxt->op_bytes - 2);
1908
1909	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1910	return rc;
1911}
1912
1913static int em_pusha(struct x86_emulate_ctxt *ctxt)
1914{
1915	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1916	int rc = X86EMUL_CONTINUE;
1917	int reg = VCPU_REGS_RAX;
1918
1919	while (reg <= VCPU_REGS_RDI) {
1920		(reg == VCPU_REGS_RSP) ?
1921		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1922
1923		rc = em_push(ctxt);
1924		if (rc != X86EMUL_CONTINUE)
1925			return rc;
1926
1927		++reg;
1928	}
1929
1930	return rc;
1931}
1932
1933static int em_pushf(struct x86_emulate_ctxt *ctxt)
1934{
1935	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1936	return em_push(ctxt);
1937}
1938
1939static int em_popa(struct x86_emulate_ctxt *ctxt)
1940{
1941	int rc = X86EMUL_CONTINUE;
1942	int reg = VCPU_REGS_RDI;
1943	u32 val;
1944
1945	while (reg >= VCPU_REGS_RAX) {
1946		if (reg == VCPU_REGS_RSP) {
1947			rsp_increment(ctxt, ctxt->op_bytes);
1948			--reg;
1949		}
1950
1951		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1952		if (rc != X86EMUL_CONTINUE)
1953			break;
1954		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1955		--reg;
1956	}
1957	return rc;
1958}
1959
1960static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1961{
1962	const struct x86_emulate_ops *ops = ctxt->ops;
1963	int rc;
1964	struct desc_ptr dt;
1965	gva_t cs_addr;
1966	gva_t eip_addr;
1967	u16 cs, eip;
1968
1969	/* TODO: Add limit checks */
1970	ctxt->src.val = ctxt->eflags;
1971	rc = em_push(ctxt);
1972	if (rc != X86EMUL_CONTINUE)
1973		return rc;
1974
1975	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1976
1977	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1978	rc = em_push(ctxt);
1979	if (rc != X86EMUL_CONTINUE)
1980		return rc;
1981
1982	ctxt->src.val = ctxt->_eip;
1983	rc = em_push(ctxt);
1984	if (rc != X86EMUL_CONTINUE)
1985		return rc;
1986
1987	ops->get_idt(ctxt, &dt);
1988
1989	eip_addr = dt.address + (irq << 2);
1990	cs_addr = dt.address + (irq << 2) + 2;
1991
1992	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1993	if (rc != X86EMUL_CONTINUE)
1994		return rc;
1995
1996	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1997	if (rc != X86EMUL_CONTINUE)
1998		return rc;
1999
2000	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2001	if (rc != X86EMUL_CONTINUE)
2002		return rc;
2003
2004	ctxt->_eip = eip;
2005
2006	return rc;
2007}
2008
2009int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2010{
2011	int rc;
2012
2013	invalidate_registers(ctxt);
2014	rc = __emulate_int_real(ctxt, irq);
2015	if (rc == X86EMUL_CONTINUE)
2016		writeback_registers(ctxt);
2017	return rc;
2018}
2019
2020static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2021{
2022	switch(ctxt->mode) {
2023	case X86EMUL_MODE_REAL:
2024		return __emulate_int_real(ctxt, irq);
2025	case X86EMUL_MODE_VM86:
2026	case X86EMUL_MODE_PROT16:
2027	case X86EMUL_MODE_PROT32:
2028	case X86EMUL_MODE_PROT64:
2029	default:
2030		/* Protected mode interrupts unimplemented yet */
2031		return X86EMUL_UNHANDLEABLE;
2032	}
2033}
2034
2035static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2036{
2037	int rc = X86EMUL_CONTINUE;
2038	unsigned long temp_eip = 0;
2039	unsigned long temp_eflags = 0;
2040	unsigned long cs = 0;
2041	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2042			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2043			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2044			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2045			     X86_EFLAGS_AC | X86_EFLAGS_ID |
2046			     X86_EFLAGS_FIXED;
2047	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2048				  X86_EFLAGS_VIP;
2049
2050	/* TODO: Add stack limit check */
2051
2052	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2053
2054	if (rc != X86EMUL_CONTINUE)
2055		return rc;
2056
2057	if (temp_eip & ~0xffff)
2058		return emulate_gp(ctxt, 0);
2059
2060	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2061
2062	if (rc != X86EMUL_CONTINUE)
2063		return rc;
2064
2065	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2066
2067	if (rc != X86EMUL_CONTINUE)
2068		return rc;
2069
2070	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2071
2072	if (rc != X86EMUL_CONTINUE)
2073		return rc;
2074
2075	ctxt->_eip = temp_eip;
2076
2077	if (ctxt->op_bytes == 4)
2078		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2079	else if (ctxt->op_bytes == 2) {
2080		ctxt->eflags &= ~0xffff;
2081		ctxt->eflags |= temp_eflags;
2082	}
2083
2084	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2085	ctxt->eflags |= X86_EFLAGS_FIXED;
2086	ctxt->ops->set_nmi_mask(ctxt, false);
2087
2088	return rc;
2089}
2090
2091static int em_iret(struct x86_emulate_ctxt *ctxt)
2092{
2093	switch(ctxt->mode) {
2094	case X86EMUL_MODE_REAL:
2095		return emulate_iret_real(ctxt);
2096	case X86EMUL_MODE_VM86:
2097	case X86EMUL_MODE_PROT16:
2098	case X86EMUL_MODE_PROT32:
2099	case X86EMUL_MODE_PROT64:
2100	default:
2101		/* iret from protected mode unimplemented yet */
2102		return X86EMUL_UNHANDLEABLE;
2103	}
2104}
2105
2106static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2107{
2108	int rc;
2109	unsigned short sel, old_sel;
2110	struct desc_struct old_desc, new_desc;
2111	const struct x86_emulate_ops *ops = ctxt->ops;
2112	u8 cpl = ctxt->ops->cpl(ctxt);
2113
2114	/* Assignment of RIP may only fail in 64-bit mode */
2115	if (ctxt->mode == X86EMUL_MODE_PROT64)
2116		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2117				 VCPU_SREG_CS);
2118
2119	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2120
2121	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2122				       X86_TRANSFER_CALL_JMP,
2123				       &new_desc);
2124	if (rc != X86EMUL_CONTINUE)
2125		return rc;
2126
2127	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2128	if (rc != X86EMUL_CONTINUE) {
2129		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2130		/* assigning eip failed; restore the old cs */
2131		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2132		return rc;
2133	}
2134	return rc;
2135}
2136
2137static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2138{
2139	return assign_eip_near(ctxt, ctxt->src.val);
2140}
2141
2142static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2143{
2144	int rc;
2145	long int old_eip;
2146
2147	old_eip = ctxt->_eip;
2148	rc = assign_eip_near(ctxt, ctxt->src.val);
2149	if (rc != X86EMUL_CONTINUE)
2150		return rc;
2151	ctxt->src.val = old_eip;
2152	rc = em_push(ctxt);
2153	return rc;
2154}
2155
2156static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2157{
2158	u64 old = ctxt->dst.orig_val64;
2159
2160	if (ctxt->dst.bytes == 16)
2161		return X86EMUL_UNHANDLEABLE;
2162
2163	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2164	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2165		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2166		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2167		ctxt->eflags &= ~X86_EFLAGS_ZF;
2168	} else {
2169		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2170			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2171
2172		ctxt->eflags |= X86_EFLAGS_ZF;
2173	}
2174	return X86EMUL_CONTINUE;
2175}
2176
2177static int em_ret(struct x86_emulate_ctxt *ctxt)
2178{
2179	int rc;
2180	unsigned long eip;
2181
2182	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2183	if (rc != X86EMUL_CONTINUE)
2184		return rc;
2185
2186	return assign_eip_near(ctxt, eip);
2187}
2188
2189static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2190{
2191	int rc;
2192	unsigned long eip, cs;
2193	u16 old_cs;
2194	int cpl = ctxt->ops->cpl(ctxt);
2195	struct desc_struct old_desc, new_desc;
2196	const struct x86_emulate_ops *ops = ctxt->ops;
2197
2198	if (ctxt->mode == X86EMUL_MODE_PROT64)
2199		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2200				 VCPU_SREG_CS);
2201
2202	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2203	if (rc != X86EMUL_CONTINUE)
2204		return rc;
2205	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2206	if (rc != X86EMUL_CONTINUE)
2207		return rc;
2208	/* Outer-privilege level return is not implemented */
2209	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2210		return X86EMUL_UNHANDLEABLE;
2211	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2212				       X86_TRANSFER_RET,
2213				       &new_desc);
2214	if (rc != X86EMUL_CONTINUE)
2215		return rc;
2216	rc = assign_eip_far(ctxt, eip, &new_desc);
2217	if (rc != X86EMUL_CONTINUE) {
2218		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2219		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2220	}
2221	return rc;
2222}
2223
2224static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2225{
2226        int rc;
2227
2228        rc = em_ret_far(ctxt);
2229        if (rc != X86EMUL_CONTINUE)
2230                return rc;
2231        rsp_increment(ctxt, ctxt->src.val);
2232        return X86EMUL_CONTINUE;
2233}
2234
2235static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2236{
2237	/* Save real source value, then compare EAX against destination. */
2238	ctxt->dst.orig_val = ctxt->dst.val;
2239	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2240	ctxt->src.orig_val = ctxt->src.val;
2241	ctxt->src.val = ctxt->dst.orig_val;
2242	fastop(ctxt, em_cmp);
2243
2244	if (ctxt->eflags & X86_EFLAGS_ZF) {
2245		/* Success: write back to memory; no update of EAX */
2246		ctxt->src.type = OP_NONE;
2247		ctxt->dst.val = ctxt->src.orig_val;
2248	} else {
2249		/* Failure: write the value we saw to EAX. */
2250		ctxt->src.type = OP_REG;
2251		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2252		ctxt->src.val = ctxt->dst.orig_val;
2253		/* Create write-cycle to dest by writing the same value */
2254		ctxt->dst.val = ctxt->dst.orig_val;
2255	}
2256	return X86EMUL_CONTINUE;
2257}
2258
2259static int em_lseg(struct x86_emulate_ctxt *ctxt)
2260{
2261	int seg = ctxt->src2.val;
2262	unsigned short sel;
2263	int rc;
2264
2265	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2266
2267	rc = load_segment_descriptor(ctxt, sel, seg);
2268	if (rc != X86EMUL_CONTINUE)
2269		return rc;
2270
2271	ctxt->dst.val = ctxt->src.val;
2272	return rc;
2273}
2274
2275static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2276{
2277	u32 eax, ebx, ecx, edx;
2278
2279	eax = 0x80000001;
2280	ecx = 0;
2281	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2282	return edx & bit(X86_FEATURE_LM);
2283}
2284
2285#define GET_SMSTATE(type, smbase, offset)				  \
2286	({								  \
2287	 type __val;							  \
2288	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
2289				      sizeof(__val));			  \
2290	 if (r != X86EMUL_CONTINUE)					  \
2291		 return X86EMUL_UNHANDLEABLE;				  \
2292	 __val;								  \
2293	})
2294
2295static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2296{
2297	desc->g    = (flags >> 23) & 1;
2298	desc->d    = (flags >> 22) & 1;
2299	desc->l    = (flags >> 21) & 1;
2300	desc->avl  = (flags >> 20) & 1;
2301	desc->p    = (flags >> 15) & 1;
2302	desc->dpl  = (flags >> 13) & 3;
2303	desc->s    = (flags >> 12) & 1;
2304	desc->type = (flags >>  8) & 15;
2305}
2306
2307static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2308{
2309	struct desc_struct desc;
2310	int offset;
2311	u16 selector;
2312
2313	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);
2314
2315	if (n < 3)
2316		offset = 0x7f84 + n * 12;
2317	else
2318		offset = 0x7f2c + (n - 3) * 12;
2319
2320	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2321	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2322	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
2323	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2324	return X86EMUL_CONTINUE;
2325}
2326
2327static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
2328{
2329	struct desc_struct desc;
2330	int offset;
2331	u16 selector;
2332	u32 base3;
2333
2334	offset = 0x7e00 + n * 16;
2335
2336	selector =                GET_SMSTATE(u16, smbase, offset);
2337	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
2338	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
2339	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
2340	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
2341
2342	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2343	return X86EMUL_CONTINUE;
2344}
2345
2346static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2347				     u64 cr0, u64 cr4)
2348{
2349	int bad;
2350
2351	/*
2352	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2353	 * Then enable protected mode.	However, PCID cannot be enabled
2354	 * if EFER.LMA=0, so set it separately.
2355	 */
2356	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2357	if (bad)
2358		return X86EMUL_UNHANDLEABLE;
2359
2360	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2361	if (bad)
2362		return X86EMUL_UNHANDLEABLE;
2363
2364	if (cr4 & X86_CR4_PCIDE) {
2365		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2366		if (bad)
2367			return X86EMUL_UNHANDLEABLE;
2368	}
2369
2370	return X86EMUL_CONTINUE;
2371}
2372
2373static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
2374{
2375	struct desc_struct desc;
2376	struct desc_ptr dt;
2377	u16 selector;
2378	u32 val, cr0, cr4;
2379	int i;
2380
2381	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
2382	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
2383	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
2384	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
2385
2386	for (i = 0; i < 8; i++)
2387		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);
2388
2389	val = GET_SMSTATE(u32, smbase, 0x7fcc);
2390	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2391	val = GET_SMSTATE(u32, smbase, 0x7fc8);
2392	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2393
2394	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
2395	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
2396	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
2397	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
2398	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2399
2400	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
2401	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
2402	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
2403	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
2404	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2405
2406	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
2407	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
2408	ctxt->ops->set_gdt(ctxt, &dt);
2409
2410	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
2411	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
2412	ctxt->ops->set_idt(ctxt, &dt);
2413
2414	for (i = 0; i < 6; i++) {
2415		int r = rsm_load_seg_32(ctxt, smbase, i);
2416		if (r != X86EMUL_CONTINUE)
2417			return r;
2418	}
2419
2420	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);
2421
2422	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
2423
2424	return rsm_enter_protected_mode(ctxt, cr0, cr4);
2425}
2426
2427static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
2428{
2429	struct desc_struct desc;
2430	struct desc_ptr dt;
2431	u64 val, cr0, cr4;
2432	u32 base3;
2433	u16 selector;
2434	int i, r;
2435
2436	for (i = 0; i < 16; i++)
2437		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
2438
2439	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
2440	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;
2441
2442	val = GET_SMSTATE(u32, smbase, 0x7f68);
2443	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2444	val = GET_SMSTATE(u32, smbase, 0x7f60);
2445	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2446
2447	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
2448	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
2449	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
2450	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
2451	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
2452	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2453
2454	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
2455	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
2456	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
2457	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
2458	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
2459	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2460
2461	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
2462	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
2463	ctxt->ops->set_idt(ctxt, &dt);
2464
2465	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
2466	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
2467	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
2468	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
2469	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
2470	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2471
2472	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
2473	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
2474	ctxt->ops->set_gdt(ctxt, &dt);
2475
2476	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
2477	if (r != X86EMUL_CONTINUE)
2478		return r;
2479
2480	for (i = 0; i < 6; i++) {
2481		r = rsm_load_seg_64(ctxt, smbase, i);
2482		if (r != X86EMUL_CONTINUE)
2483			return r;
2484	}
2485
2486	return X86EMUL_CONTINUE;
2487}
2488
2489static int em_rsm(struct x86_emulate_ctxt *ctxt)
2490{
2491	unsigned long cr0, cr4, efer;
2492	u64 smbase;
2493	int ret;
2494
2495	if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
2496		return emulate_ud(ctxt);
2497
2498	/*
2499	 * Get back to real mode, to prepare a safe state in which to load
2500	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
2501	 * supports long mode.
2502	 */
2503	cr4 = ctxt->ops->get_cr(ctxt, 4);
2504	if (emulator_has_longmode(ctxt)) {
2505		struct desc_struct cs_desc;
2506
2507		/* Zero CR4.PCIDE before CR0.PG.  */
2508		if (cr4 & X86_CR4_PCIDE) {
2509			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2510			cr4 &= ~X86_CR4_PCIDE;
2511		}
2512
2513		/* A 32-bit code segment is required to clear EFER.LMA.  */
2514		memset(&cs_desc, 0, sizeof(cs_desc));
2515		cs_desc.type = 0xb;
2516		cs_desc.s = cs_desc.g = cs_desc.p = 1;
2517		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2518	}
2519
2520	/* For the 64-bit case, this will clear EFER.LMA.  */
2521	cr0 = ctxt->ops->get_cr(ctxt, 0);
2522	if (cr0 & X86_CR0_PE)
2523		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2524
2525	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2526	if (cr4 & X86_CR4_PAE)
2527		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2528
2529	/* And finally go back to 32-bit mode.  */
2530	efer = 0;
2531	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2532
2533	smbase = ctxt->ops->get_smbase(ctxt);
2534	if (emulator_has_longmode(ctxt))
2535		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
2536	else
2537		ret = rsm_load_state_32(ctxt, smbase + 0x8000);
2538
2539	if (ret != X86EMUL_CONTINUE) {
2540		/* FIXME: should triple fault */
2541		return X86EMUL_UNHANDLEABLE;
2542	}
2543
2544	if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2545		ctxt->ops->set_nmi_mask(ctxt, false);
2546
2547	ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
2548	ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
2549	return X86EMUL_CONTINUE;
2550}
2551
2552static void
2553setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2554			struct desc_struct *cs, struct desc_struct *ss)
2555{
2556	cs->l = 0;		/* will be adjusted later */
2557	set_desc_base(cs, 0);	/* flat segment */
2558	cs->g = 1;		/* 4kb granularity */
2559	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2560	cs->type = 0x0b;	/* Read, Execute, Accessed */
2561	cs->s = 1;
2562	cs->dpl = 0;		/* will be adjusted later */
2563	cs->p = 1;
2564	cs->d = 1;
2565	cs->avl = 0;
2566
2567	set_desc_base(ss, 0);	/* flat segment */
2568	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2569	ss->g = 1;		/* 4kb granularity */
2570	ss->s = 1;
2571	ss->type = 0x03;	/* Read/Write, Accessed */
2572	ss->d = 1;		/* 32bit stack segment */
2573	ss->dpl = 0;
2574	ss->p = 1;
2575	ss->l = 0;
2576	ss->avl = 0;
2577}
2578
2579static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2580{
2581	u32 eax, ebx, ecx, edx;
2582
2583	eax = ecx = 0;
2584	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2585	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2586		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2587		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2588}
2589
2590static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2591{
2592	const struct x86_emulate_ops *ops = ctxt->ops;
2593	u32 eax, ebx, ecx, edx;
2594
2595	/*
2596	 * syscall should always be enabled in longmode - so only become
2597	 * vendor specific (cpuid) if other modes are active...
2598	 */
2599	if (ctxt->mode == X86EMUL_MODE_PROT64)
2600		return true;
2601
2602	eax = 0x00000000;
2603	ecx = 0x00000000;
2604	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2605	/*
2606	 * Intel ("GenuineIntel")
2607	 * remark: Intel CPUs only support "syscall" in 64bit
2608	 * longmode. Also an 64bit guest with a
2609	 * 32bit compat-app running will #UD !! While this
2610	 * behaviour can be fixed (by emulating) into AMD
2611	 * response - CPUs of AMD can't behave like Intel.
2612	 */
2613	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2614	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2615	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2616		return false;
2617
2618	/* AMD ("AuthenticAMD") */
2619	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2620	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2621	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2622		return true;
2623
2624	/* AMD ("AMDisbetter!") */
2625	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2626	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2627	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2628		return true;
2629
2630	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
2631	return false;
2632}
2633
2634static int em_syscall(struct x86_emulate_ctxt *ctxt)
2635{
2636	const struct x86_emulate_ops *ops = ctxt->ops;
2637	struct desc_struct cs, ss;
2638	u64 msr_data;
2639	u16 cs_sel, ss_sel;
2640	u64 efer = 0;
2641
2642	/* syscall is not available in real mode */
2643	if (ctxt->mode == X86EMUL_MODE_REAL ||
2644	    ctxt->mode == X86EMUL_MODE_VM86)
2645		return emulate_ud(ctxt);
2646
2647	if (!(em_syscall_is_enabled(ctxt)))
2648		return emulate_ud(ctxt);
2649
2650	ops->get_msr(ctxt, MSR_EFER, &efer);
2651	setup_syscalls_segments(ctxt, &cs, &ss);
2652
2653	if (!(efer & EFER_SCE))
2654		return emulate_ud(ctxt);
2655
 
2656	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2657	msr_data >>= 32;
2658	cs_sel = (u16)(msr_data & 0xfffc);
2659	ss_sel = (u16)(msr_data + 8);
2660
2661	if (efer & EFER_LMA) {
2662		cs.d = 0;
2663		cs.l = 1;
2664	}
2665	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2666	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2667
2668	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2669	if (efer & EFER_LMA) {
2670#ifdef CONFIG_X86_64
2671		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2672
2673		ops->get_msr(ctxt,
2674			     ctxt->mode == X86EMUL_MODE_PROT64 ?
2675			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2676		ctxt->_eip = msr_data;
2677
2678		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2679		ctxt->eflags &= ~msr_data;
2680		ctxt->eflags |= X86_EFLAGS_FIXED;
2681#endif
2682	} else {
2683		/* legacy mode */
2684		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2685		ctxt->_eip = (u32)msr_data;
2686
2687		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2688	}
2689
 
2690	return X86EMUL_CONTINUE;
2691}
2692
2693static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2694{
2695	const struct x86_emulate_ops *ops = ctxt->ops;
2696	struct desc_struct cs, ss;
2697	u64 msr_data;
2698	u16 cs_sel, ss_sel;
2699	u64 efer = 0;
2700
2701	ops->get_msr(ctxt, MSR_EFER, &efer);
2702	/* inject #GP if in real mode */
2703	if (ctxt->mode == X86EMUL_MODE_REAL)
2704		return emulate_gp(ctxt, 0);
2705
2706	/*
2707	 * Not recognized on AMD in compat mode (but is recognized in legacy
2708	 * mode).
2709	 */
2710	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2711	    && !vendor_intel(ctxt))
2712		return emulate_ud(ctxt);
2713
2714	/* sysenter/sysexit have not been tested in 64bit mode. */
2715	if (ctxt->mode == X86EMUL_MODE_PROT64)
2716		return X86EMUL_UNHANDLEABLE;
2717
2718	setup_syscalls_segments(ctxt, &cs, &ss);
2719
2720	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2721	if ((msr_data & 0xfffc) == 0x0)
2722		return emulate_gp(ctxt, 0);
2723
 
2724	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2725	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2726	ss_sel = cs_sel + 8;
2727	if (efer & EFER_LMA) {
2728		cs.d = 0;
2729		cs.l = 1;
2730	}
2731
2732	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2733	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2734
2735	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2736	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2737
2738	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2739	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2740							      (u32)msr_data;
 
 
2741
2742	return X86EMUL_CONTINUE;
2743}
2744
2745static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2746{
2747	const struct x86_emulate_ops *ops = ctxt->ops;
2748	struct desc_struct cs, ss;
2749	u64 msr_data, rcx, rdx;
2750	int usermode;
2751	u16 cs_sel = 0, ss_sel = 0;
2752
2753	/* inject #GP if in real mode or Virtual 8086 mode */
2754	if (ctxt->mode == X86EMUL_MODE_REAL ||
2755	    ctxt->mode == X86EMUL_MODE_VM86)
2756		return emulate_gp(ctxt, 0);
2757
2758	setup_syscalls_segments(ctxt, &cs, &ss);
2759
2760	if ((ctxt->rex_prefix & 0x8) != 0x0)
2761		usermode = X86EMUL_MODE_PROT64;
2762	else
2763		usermode = X86EMUL_MODE_PROT32;
2764
2765	rcx = reg_read(ctxt, VCPU_REGS_RCX);
2766	rdx = reg_read(ctxt, VCPU_REGS_RDX);
2767
2768	cs.dpl = 3;
2769	ss.dpl = 3;
2770	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2771	switch (usermode) {
2772	case X86EMUL_MODE_PROT32:
2773		cs_sel = (u16)(msr_data + 16);
2774		if ((msr_data & 0xfffc) == 0x0)
2775			return emulate_gp(ctxt, 0);
2776		ss_sel = (u16)(msr_data + 24);
2777		rcx = (u32)rcx;
2778		rdx = (u32)rdx;
2779		break;
2780	case X86EMUL_MODE_PROT64:
2781		cs_sel = (u16)(msr_data + 32);
2782		if (msr_data == 0x0)
2783			return emulate_gp(ctxt, 0);
2784		ss_sel = cs_sel + 8;
2785		cs.d = 0;
2786		cs.l = 1;
2787		if (is_noncanonical_address(rcx) ||
2788		    is_noncanonical_address(rdx))
2789			return emulate_gp(ctxt, 0);
2790		break;
2791	}
2792	cs_sel |= SEGMENT_RPL_MASK;
2793	ss_sel |= SEGMENT_RPL_MASK;
2794
2795	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2796	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2797
2798	ctxt->_eip = rdx;
 
2799	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2800
2801	return X86EMUL_CONTINUE;
2802}
2803
2804static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2805{
2806	int iopl;
2807	if (ctxt->mode == X86EMUL_MODE_REAL)
2808		return false;
2809	if (ctxt->mode == X86EMUL_MODE_VM86)
2810		return true;
2811	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2812	return ctxt->ops->cpl(ctxt) > iopl;
2813}
2814
 
 
 
2815static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2816					    u16 port, u16 len)
2817{
2818	const struct x86_emulate_ops *ops = ctxt->ops;
2819	struct desc_struct tr_seg;
2820	u32 base3;
2821	int r;
2822	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2823	unsigned mask = (1 << len) - 1;
2824	unsigned long base;
2825
 
 
 
 
 
 
 
 
2826	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2827	if (!tr_seg.p)
2828		return false;
2829	if (desc_limit_scaled(&tr_seg) < 103)
2830		return false;
2831	base = get_desc_base(&tr_seg);
2832#ifdef CONFIG_X86_64
2833	base |= ((u64)base3) << 32;
2834#endif
2835	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2836	if (r != X86EMUL_CONTINUE)
2837		return false;
2838	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2839		return false;
2840	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2841	if (r != X86EMUL_CONTINUE)
2842		return false;
2843	if ((perm >> bit_idx) & mask)
2844		return false;
2845	return true;
2846}
2847
2848static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2849				 u16 port, u16 len)
2850{
2851	if (ctxt->perm_ok)
2852		return true;
2853
2854	if (emulator_bad_iopl(ctxt))
2855		if (!emulator_io_port_access_allowed(ctxt, port, len))
2856			return false;
2857
2858	ctxt->perm_ok = true;
2859
2860	return true;
2861}
2862
2863static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2864{
2865	/*
2866	 * Intel CPUs mask the counter and pointers in quite strange
2867	 * manner when ECX is zero due to REP-string optimizations.
2868	 */
2869#ifdef CONFIG_X86_64
2870	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2871		return;
2872
2873	*reg_write(ctxt, VCPU_REGS_RCX) = 0;
2874
2875	switch (ctxt->b) {
2876	case 0xa4:	/* movsb */
2877	case 0xa5:	/* movsd/w */
2878		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2879		/* fall through */
2880	case 0xaa:	/* stosb */
2881	case 0xab:	/* stosd/w */
2882		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2883	}
2884#endif
2885}
2886
2887static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2888				struct tss_segment_16 *tss)
2889{
2890	tss->ip = ctxt->_eip;
2891	tss->flag = ctxt->eflags;
2892	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2893	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2894	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2895	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2896	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2897	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2898	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2899	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2900
2901	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2902	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2903	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2904	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2905	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2906}
2907
2908static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2909				 struct tss_segment_16 *tss)
2910{
2911	int ret;
2912	u8 cpl;
2913
2914	ctxt->_eip = tss->ip;
2915	ctxt->eflags = tss->flag | 2;
2916	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2917	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2918	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2919	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2920	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2921	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2922	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2923	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2924
2925	/*
2926	 * SDM says that segment selectors are loaded before segment
2927	 * descriptors
2928	 */
2929	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2930	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2931	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2932	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2933	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2934
2935	cpl = tss->cs & 3;
2936
2937	/*
2938	 * Now load segment descriptors. If fault happens at this stage
2939	 * it is handled in a context of new task
2940	 */
2941	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2942					X86_TRANSFER_TASK_SWITCH, NULL);
2943	if (ret != X86EMUL_CONTINUE)
2944		return ret;
2945	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2946					X86_TRANSFER_TASK_SWITCH, NULL);
2947	if (ret != X86EMUL_CONTINUE)
2948		return ret;
2949	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2950					X86_TRANSFER_TASK_SWITCH, NULL);
2951	if (ret != X86EMUL_CONTINUE)
2952		return ret;
2953	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2954					X86_TRANSFER_TASK_SWITCH, NULL);
2955	if (ret != X86EMUL_CONTINUE)
2956		return ret;
2957	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2958					X86_TRANSFER_TASK_SWITCH, NULL);
2959	if (ret != X86EMUL_CONTINUE)
2960		return ret;
2961
2962	return X86EMUL_CONTINUE;
2963}
2964
2965static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2966			  u16 tss_selector, u16 old_tss_sel,
2967			  ulong old_tss_base, struct desc_struct *new_desc)
2968{
2969	const struct x86_emulate_ops *ops = ctxt->ops;
2970	struct tss_segment_16 tss_seg;
2971	int ret;
2972	u32 new_tss_base = get_desc_base(new_desc);
2973
2974	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2975			    &ctxt->exception);
2976	if (ret != X86EMUL_CONTINUE)
2977		return ret;
2978
2979	save_state_to_tss16(ctxt, &tss_seg);
2980
2981	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2982			     &ctxt->exception);
2983	if (ret != X86EMUL_CONTINUE)
2984		return ret;
2985
2986	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2987			    &ctxt->exception);
2988	if (ret != X86EMUL_CONTINUE)
2989		return ret;
2990
2991	if (old_tss_sel != 0xffff) {
2992		tss_seg.prev_task_link = old_tss_sel;
2993
2994		ret = ops->write_std(ctxt, new_tss_base,
2995				     &tss_seg.prev_task_link,
2996				     sizeof tss_seg.prev_task_link,
2997				     &ctxt->exception);
2998		if (ret != X86EMUL_CONTINUE)
2999			return ret;
3000	}
3001
3002	return load_state_from_tss16(ctxt, &tss_seg);
3003}
3004
3005static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3006				struct tss_segment_32 *tss)
3007{
3008	/* CR3 and ldt selector are not saved intentionally */
3009	tss->eip = ctxt->_eip;
3010	tss->eflags = ctxt->eflags;
3011	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3012	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3013	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3014	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3015	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3016	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3017	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3018	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3019
3020	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3021	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3022	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3023	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3024	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3025	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3026}
3027
3028static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3029				 struct tss_segment_32 *tss)
3030{
3031	int ret;
3032	u8 cpl;
3033
3034	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3035		return emulate_gp(ctxt, 0);
3036	ctxt->_eip = tss->eip;
3037	ctxt->eflags = tss->eflags | 2;
3038
3039	/* General purpose registers */
3040	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3041	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3042	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3043	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3044	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3045	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3046	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3047	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3048
3049	/*
3050	 * SDM says that segment selectors are loaded before segment
3051	 * descriptors.  This is important because CPL checks will
3052	 * use CS.RPL.
3053	 */
3054	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3055	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3056	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3057	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3058	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3059	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3060	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3061
3062	/*
3063	 * If we're switching between Protected Mode and VM86, we need to make
3064	 * sure to update the mode before loading the segment descriptors so
3065	 * that the selectors are interpreted correctly.
3066	 */
3067	if (ctxt->eflags & X86_EFLAGS_VM) {
3068		ctxt->mode = X86EMUL_MODE_VM86;
3069		cpl = 3;
3070	} else {
3071		ctxt->mode = X86EMUL_MODE_PROT32;
3072		cpl = tss->cs & 3;
3073	}
3074
3075	/*
3076	 * Now load segment descriptors. If fault happenes at this stage
3077	 * it is handled in a context of new task
3078	 */
3079	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3080					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3081	if (ret != X86EMUL_CONTINUE)
3082		return ret;
3083	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3084					X86_TRANSFER_TASK_SWITCH, NULL);
3085	if (ret != X86EMUL_CONTINUE)
3086		return ret;
3087	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3088					X86_TRANSFER_TASK_SWITCH, NULL);
3089	if (ret != X86EMUL_CONTINUE)
3090		return ret;
3091	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3092					X86_TRANSFER_TASK_SWITCH, NULL);
3093	if (ret != X86EMUL_CONTINUE)
3094		return ret;
3095	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3096					X86_TRANSFER_TASK_SWITCH, NULL);
3097	if (ret != X86EMUL_CONTINUE)
3098		return ret;
3099	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3100					X86_TRANSFER_TASK_SWITCH, NULL);
3101	if (ret != X86EMUL_CONTINUE)
3102		return ret;
3103	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3104					X86_TRANSFER_TASK_SWITCH, NULL);
3105
3106	return ret;
3107}
3108
3109static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3110			  u16 tss_selector, u16 old_tss_sel,
3111			  ulong old_tss_base, struct desc_struct *new_desc)
3112{
3113	const struct x86_emulate_ops *ops = ctxt->ops;
3114	struct tss_segment_32 tss_seg;
3115	int ret;
3116	u32 new_tss_base = get_desc_base(new_desc);
3117	u32 eip_offset = offsetof(struct tss_segment_32, eip);
3118	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3119
3120	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3121			    &ctxt->exception);
3122	if (ret != X86EMUL_CONTINUE)
3123		return ret;
3124
3125	save_state_to_tss32(ctxt, &tss_seg);
3126
3127	/* Only GP registers and segment selectors are saved */
3128	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3129			     ldt_sel_offset - eip_offset, &ctxt->exception);
3130	if (ret != X86EMUL_CONTINUE)
3131		return ret;
3132
3133	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3134			    &ctxt->exception);
3135	if (ret != X86EMUL_CONTINUE)
3136		return ret;
3137
3138	if (old_tss_sel != 0xffff) {
3139		tss_seg.prev_task_link = old_tss_sel;
3140
3141		ret = ops->write_std(ctxt, new_tss_base,
3142				     &tss_seg.prev_task_link,
3143				     sizeof tss_seg.prev_task_link,
3144				     &ctxt->exception);
3145		if (ret != X86EMUL_CONTINUE)
3146			return ret;
3147	}
3148
3149	return load_state_from_tss32(ctxt, &tss_seg);
3150}
3151
3152static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3153				   u16 tss_selector, int idt_index, int reason,
3154				   bool has_error_code, u32 error_code)
3155{
3156	const struct x86_emulate_ops *ops = ctxt->ops;
3157	struct desc_struct curr_tss_desc, next_tss_desc;
3158	int ret;
3159	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3160	ulong old_tss_base =
3161		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3162	u32 desc_limit;
3163	ulong desc_addr, dr7;
3164
3165	/* FIXME: old_tss_base == ~0 ? */
3166
3167	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3168	if (ret != X86EMUL_CONTINUE)
3169		return ret;
3170	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3171	if (ret != X86EMUL_CONTINUE)
3172		return ret;
3173
3174	/* FIXME: check that next_tss_desc is tss */
3175
3176	/*
3177	 * Check privileges. The three cases are task switch caused by...
3178	 *
3179	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3180	 * 2. Exception/IRQ/iret: No check is performed
3181	 * 3. jmp/call to TSS/task-gate: No check is performed since the
3182	 *    hardware checks it before exiting.
3183	 */
3184	if (reason == TASK_SWITCH_GATE) {
3185		if (idt_index != -1) {
3186			/* Software interrupts */
3187			struct desc_struct task_gate_desc;
3188			int dpl;
3189
3190			ret = read_interrupt_descriptor(ctxt, idt_index,
3191							&task_gate_desc);
3192			if (ret != X86EMUL_CONTINUE)
3193				return ret;
3194
3195			dpl = task_gate_desc.dpl;
3196			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3197				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3198		}
3199	}
3200
3201	desc_limit = desc_limit_scaled(&next_tss_desc);
3202	if (!next_tss_desc.p ||
3203	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3204	     desc_limit < 0x2b)) {
3205		return emulate_ts(ctxt, tss_selector & 0xfffc);
3206	}
3207
3208	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3209		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3210		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3211	}
3212
3213	if (reason == TASK_SWITCH_IRET)
3214		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3215
3216	/* set back link to prev task only if NT bit is set in eflags
3217	   note that old_tss_sel is not used after this point */
3218	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3219		old_tss_sel = 0xffff;
3220
3221	if (next_tss_desc.type & 8)
3222		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3223				     old_tss_base, &next_tss_desc);
3224	else
3225		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3226				     old_tss_base, &next_tss_desc);
3227	if (ret != X86EMUL_CONTINUE)
3228		return ret;
3229
3230	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3231		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3232
3233	if (reason != TASK_SWITCH_IRET) {
3234		next_tss_desc.type |= (1 << 1); /* set busy flag */
3235		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3236	}
3237
3238	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3239	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3240
3241	if (has_error_code) {
3242		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3243		ctxt->lock_prefix = 0;
3244		ctxt->src.val = (unsigned long) error_code;
3245		ret = em_push(ctxt);
3246	}
3247
3248	ops->get_dr(ctxt, 7, &dr7);
3249	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3250
3251	return ret;
3252}
3253
3254int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3255			 u16 tss_selector, int idt_index, int reason,
3256			 bool has_error_code, u32 error_code)
3257{
3258	int rc;
3259
3260	invalidate_registers(ctxt);
3261	ctxt->_eip = ctxt->eip;
3262	ctxt->dst.type = OP_NONE;
3263
3264	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3265				     has_error_code, error_code);
3266
3267	if (rc == X86EMUL_CONTINUE) {
3268		ctxt->eip = ctxt->_eip;
3269		writeback_registers(ctxt);
3270	}
3271
3272	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3273}
3274
3275static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3276		struct operand *op)
3277{
3278	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3279
3280	register_address_increment(ctxt, reg, df * op->bytes);
3281	op->addr.mem.ea = register_address(ctxt, reg);
3282}
3283
3284static int em_das(struct x86_emulate_ctxt *ctxt)
3285{
3286	u8 al, old_al;
3287	bool af, cf, old_cf;
3288
3289	cf = ctxt->eflags & X86_EFLAGS_CF;
3290	al = ctxt->dst.val;
3291
3292	old_al = al;
3293	old_cf = cf;
3294	cf = false;
3295	af = ctxt->eflags & X86_EFLAGS_AF;
3296	if ((al & 0x0f) > 9 || af) {
3297		al -= 6;
3298		cf = old_cf | (al >= 250);
3299		af = true;
3300	} else {
3301		af = false;
3302	}
3303	if (old_al > 0x99 || old_cf) {
3304		al -= 0x60;
3305		cf = true;
3306	}
3307
3308	ctxt->dst.val = al;
3309	/* Set PF, ZF, SF */
3310	ctxt->src.type = OP_IMM;
3311	ctxt->src.val = 0;
3312	ctxt->src.bytes = 1;
3313	fastop(ctxt, em_or);
3314	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3315	if (cf)
3316		ctxt->eflags |= X86_EFLAGS_CF;
3317	if (af)
3318		ctxt->eflags |= X86_EFLAGS_AF;
3319	return X86EMUL_CONTINUE;
3320}
3321
3322static int em_aam(struct x86_emulate_ctxt *ctxt)
3323{
3324	u8 al, ah;
3325
3326	if (ctxt->src.val == 0)
3327		return emulate_de(ctxt);
3328
3329	al = ctxt->dst.val & 0xff;
3330	ah = al / ctxt->src.val;
3331	al %= ctxt->src.val;
3332
3333	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3334
3335	/* Set PF, ZF, SF */
3336	ctxt->src.type = OP_IMM;
3337	ctxt->src.val = 0;
3338	ctxt->src.bytes = 1;
3339	fastop(ctxt, em_or);
3340
3341	return X86EMUL_CONTINUE;
3342}
3343
3344static int em_aad(struct x86_emulate_ctxt *ctxt)
3345{
3346	u8 al = ctxt->dst.val & 0xff;
3347	u8 ah = (ctxt->dst.val >> 8) & 0xff;
3348
3349	al = (al + (ah * ctxt->src.val)) & 0xff;
3350
3351	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3352
3353	/* Set PF, ZF, SF */
3354	ctxt->src.type = OP_IMM;
3355	ctxt->src.val = 0;
3356	ctxt->src.bytes = 1;
3357	fastop(ctxt, em_or);
3358
3359	return X86EMUL_CONTINUE;
3360}
3361
3362static int em_call(struct x86_emulate_ctxt *ctxt)
3363{
3364	int rc;
3365	long rel = ctxt->src.val;
3366
3367	ctxt->src.val = (unsigned long)ctxt->_eip;
3368	rc = jmp_rel(ctxt, rel);
3369	if (rc != X86EMUL_CONTINUE)
3370		return rc;
3371	return em_push(ctxt);
3372}
3373
3374static int em_call_far(struct x86_emulate_ctxt *ctxt)
3375{
3376	u16 sel, old_cs;
3377	ulong old_eip;
3378	int rc;
3379	struct desc_struct old_desc, new_desc;
3380	const struct x86_emulate_ops *ops = ctxt->ops;
3381	int cpl = ctxt->ops->cpl(ctxt);
3382	enum x86emul_mode prev_mode = ctxt->mode;
3383
3384	old_eip = ctxt->_eip;
3385	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3386
3387	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3388	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3389				       X86_TRANSFER_CALL_JMP, &new_desc);
3390	if (rc != X86EMUL_CONTINUE)
3391		return rc;
3392
3393	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3394	if (rc != X86EMUL_CONTINUE)
3395		goto fail;
3396
3397	ctxt->src.val = old_cs;
3398	rc = em_push(ctxt);
3399	if (rc != X86EMUL_CONTINUE)
3400		goto fail;
3401
3402	ctxt->src.val = old_eip;
3403	rc = em_push(ctxt);
3404	/* If we failed, we tainted the memory, but the very least we should
3405	   restore cs */
3406	if (rc != X86EMUL_CONTINUE) {
3407		pr_warn_once("faulting far call emulation tainted memory\n");
3408		goto fail;
3409	}
3410	return rc;
3411fail:
3412	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3413	ctxt->mode = prev_mode;
3414	return rc;
3415
3416}
3417
3418static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3419{
3420	int rc;
3421	unsigned long eip;
3422
3423	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3424	if (rc != X86EMUL_CONTINUE)
3425		return rc;
3426	rc = assign_eip_near(ctxt, eip);
3427	if (rc != X86EMUL_CONTINUE)
3428		return rc;
3429	rsp_increment(ctxt, ctxt->src.val);
3430	return X86EMUL_CONTINUE;
3431}
3432
3433static int em_xchg(struct x86_emulate_ctxt *ctxt)
3434{
3435	/* Write back the register source. */
3436	ctxt->src.val = ctxt->dst.val;
3437	write_register_operand(&ctxt->src);
3438
3439	/* Write back the memory destination with implicit LOCK prefix. */
3440	ctxt->dst.val = ctxt->src.orig_val;
3441	ctxt->lock_prefix = 1;
3442	return X86EMUL_CONTINUE;
3443}
3444
3445static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3446{
3447	ctxt->dst.val = ctxt->src2.val;
3448	return fastop(ctxt, em_imul);
3449}
3450
3451static int em_cwd(struct x86_emulate_ctxt *ctxt)
3452{
3453	ctxt->dst.type = OP_REG;
3454	ctxt->dst.bytes = ctxt->src.bytes;
3455	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3456	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3457
3458	return X86EMUL_CONTINUE;
3459}
3460
 
 
 
 
 
 
 
 
 
 
 
 
3461static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3462{
3463	u64 tsc = 0;
3464
3465	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3466	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3467	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3468	return X86EMUL_CONTINUE;
3469}
3470
3471static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3472{
3473	u64 pmc;
3474
3475	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3476		return emulate_gp(ctxt, 0);
3477	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3478	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3479	return X86EMUL_CONTINUE;
3480}
3481
3482static int em_mov(struct x86_emulate_ctxt *ctxt)
3483{
3484	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3485	return X86EMUL_CONTINUE;
3486}
3487
3488#define FFL(x) bit(X86_FEATURE_##x)
3489
3490static int em_movbe(struct x86_emulate_ctxt *ctxt)
3491{
3492	u32 ebx, ecx, edx, eax = 1;
3493	u16 tmp;
3494
3495	/*
3496	 * Check MOVBE is set in the guest-visible CPUID leaf.
3497	 */
3498	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3499	if (!(ecx & FFL(MOVBE)))
3500		return emulate_ud(ctxt);
3501
3502	switch (ctxt->op_bytes) {
3503	case 2:
3504		/*
3505		 * From MOVBE definition: "...When the operand size is 16 bits,
3506		 * the upper word of the destination register remains unchanged
3507		 * ..."
3508		 *
3509		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3510		 * rules so we have to do the operation almost per hand.
3511		 */
3512		tmp = (u16)ctxt->src.val;
3513		ctxt->dst.val &= ~0xffffUL;
3514		ctxt->dst.val |= (unsigned long)swab16(tmp);
3515		break;
3516	case 4:
3517		ctxt->dst.val = swab32((u32)ctxt->src.val);
3518		break;
3519	case 8:
3520		ctxt->dst.val = swab64(ctxt->src.val);
3521		break;
3522	default:
3523		BUG();
3524	}
3525	return X86EMUL_CONTINUE;
3526}
3527
3528static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3529{
3530	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
 
 
 
3531		return emulate_gp(ctxt, 0);
3532
3533	/* Disable writeback. */
3534	ctxt->dst.type = OP_NONE;
 
 
 
 
 
 
 
 
 
 
 
3535	return X86EMUL_CONTINUE;
3536}
3537
3538static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3539{
3540	unsigned long val;
3541
3542	if (ctxt->mode == X86EMUL_MODE_PROT64)
3543		val = ctxt->src.val & ~0ULL;
3544	else
3545		val = ctxt->src.val & ~0U;
3546
3547	/* #UD condition is already handled. */
3548	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3549		return emulate_gp(ctxt, 0);
3550
3551	/* Disable writeback. */
3552	ctxt->dst.type = OP_NONE;
3553	return X86EMUL_CONTINUE;
3554}
3555
3556static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3557{
 
3558	u64 msr_data;
 
3559
3560	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3561		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3562	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
 
 
3563		return emulate_gp(ctxt, 0);
3564
3565	return X86EMUL_CONTINUE;
3566}
3567
3568static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3569{
 
3570	u64 msr_data;
 
 
 
 
 
 
3571
3572	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
 
 
 
 
 
 
 
 
 
 
 
3573		return emulate_gp(ctxt, 0);
3574
3575	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3576	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
 
3577	return X86EMUL_CONTINUE;
3578}
3579
3580static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3581{
3582	if (ctxt->modrm_reg > VCPU_SREG_GS)
3583		return emulate_ud(ctxt);
3584
3585	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3586	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3587		ctxt->dst.bytes = 2;
3588	return X86EMUL_CONTINUE;
3589}
3590
3591static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3592{
3593	u16 sel = ctxt->src.val;
3594
3595	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3596		return emulate_ud(ctxt);
3597
3598	if (ctxt->modrm_reg == VCPU_SREG_SS)
3599		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3600
3601	/* Disable writeback. */
3602	ctxt->dst.type = OP_NONE;
3603	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3604}
3605
 
 
 
 
 
3606static int em_lldt(struct x86_emulate_ctxt *ctxt)
3607{
3608	u16 sel = ctxt->src.val;
3609
3610	/* Disable writeback. */
3611	ctxt->dst.type = OP_NONE;
3612	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3613}
3614
 
 
 
 
 
3615static int em_ltr(struct x86_emulate_ctxt *ctxt)
3616{
3617	u16 sel = ctxt->src.val;
3618
3619	/* Disable writeback. */
3620	ctxt->dst.type = OP_NONE;
3621	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3622}
3623
3624static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3625{
3626	int rc;
3627	ulong linear;
3628
3629	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3630	if (rc == X86EMUL_CONTINUE)
3631		ctxt->ops->invlpg(ctxt, linear);
3632	/* Disable writeback. */
3633	ctxt->dst.type = OP_NONE;
3634	return X86EMUL_CONTINUE;
3635}
3636
3637static int em_clts(struct x86_emulate_ctxt *ctxt)
3638{
3639	ulong cr0;
3640
3641	cr0 = ctxt->ops->get_cr(ctxt, 0);
3642	cr0 &= ~X86_CR0_TS;
3643	ctxt->ops->set_cr(ctxt, 0, cr0);
3644	return X86EMUL_CONTINUE;
3645}
3646
3647static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3648{
3649	int rc = ctxt->ops->fix_hypercall(ctxt);
3650
3651	if (rc != X86EMUL_CONTINUE)
3652		return rc;
3653
3654	/* Let the processor re-execute the fixed hypercall */
3655	ctxt->_eip = ctxt->eip;
3656	/* Disable writeback. */
3657	ctxt->dst.type = OP_NONE;
3658	return X86EMUL_CONTINUE;
3659}
3660
3661static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3662				  void (*get)(struct x86_emulate_ctxt *ctxt,
3663					      struct desc_ptr *ptr))
3664{
3665	struct desc_ptr desc_ptr;
3666
 
 
 
 
3667	if (ctxt->mode == X86EMUL_MODE_PROT64)
3668		ctxt->op_bytes = 8;
3669	get(ctxt, &desc_ptr);
3670	if (ctxt->op_bytes == 2) {
3671		ctxt->op_bytes = 4;
3672		desc_ptr.address &= 0x00ffffff;
3673	}
3674	/* Disable writeback. */
3675	ctxt->dst.type = OP_NONE;
3676	return segmented_write(ctxt, ctxt->dst.addr.mem,
3677			       &desc_ptr, 2 + ctxt->op_bytes);
3678}
3679
3680static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3681{
3682	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3683}
3684
3685static int em_sidt(struct x86_emulate_ctxt *ctxt)
3686{
3687	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3688}
3689
3690static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3691{
3692	struct desc_ptr desc_ptr;
3693	int rc;
3694
3695	if (ctxt->mode == X86EMUL_MODE_PROT64)
3696		ctxt->op_bytes = 8;
3697	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3698			     &desc_ptr.size, &desc_ptr.address,
3699			     ctxt->op_bytes);
3700	if (rc != X86EMUL_CONTINUE)
3701		return rc;
3702	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3703	    is_noncanonical_address(desc_ptr.address))
3704		return emulate_gp(ctxt, 0);
3705	if (lgdt)
3706		ctxt->ops->set_gdt(ctxt, &desc_ptr);
3707	else
3708		ctxt->ops->set_idt(ctxt, &desc_ptr);
3709	/* Disable writeback. */
3710	ctxt->dst.type = OP_NONE;
3711	return X86EMUL_CONTINUE;
3712}
3713
3714static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3715{
3716	return em_lgdt_lidt(ctxt, true);
3717}
3718
3719static int em_lidt(struct x86_emulate_ctxt *ctxt)
3720{
3721	return em_lgdt_lidt(ctxt, false);
3722}
3723
3724static int em_smsw(struct x86_emulate_ctxt *ctxt)
3725{
 
 
 
 
3726	if (ctxt->dst.type == OP_MEM)
3727		ctxt->dst.bytes = 2;
3728	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3729	return X86EMUL_CONTINUE;
3730}
3731
3732static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3733{
3734	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3735			  | (ctxt->src.val & 0x0f));
3736	ctxt->dst.type = OP_NONE;
3737	return X86EMUL_CONTINUE;
3738}
3739
3740static int em_loop(struct x86_emulate_ctxt *ctxt)
3741{
3742	int rc = X86EMUL_CONTINUE;
3743
3744	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3745	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3746	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3747		rc = jmp_rel(ctxt, ctxt->src.val);
3748
3749	return rc;
3750}
3751
3752static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3753{
3754	int rc = X86EMUL_CONTINUE;
3755
3756	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3757		rc = jmp_rel(ctxt, ctxt->src.val);
3758
3759	return rc;
3760}
3761
3762static int em_in(struct x86_emulate_ctxt *ctxt)
3763{
3764	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3765			     &ctxt->dst.val))
3766		return X86EMUL_IO_NEEDED;
3767
3768	return X86EMUL_CONTINUE;
3769}
3770
3771static int em_out(struct x86_emulate_ctxt *ctxt)
3772{
3773	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3774				    &ctxt->src.val, 1);
3775	/* Disable writeback. */
3776	ctxt->dst.type = OP_NONE;
3777	return X86EMUL_CONTINUE;
3778}
3779
3780static int em_cli(struct x86_emulate_ctxt *ctxt)
3781{
3782	if (emulator_bad_iopl(ctxt))
3783		return emulate_gp(ctxt, 0);
3784
3785	ctxt->eflags &= ~X86_EFLAGS_IF;
3786	return X86EMUL_CONTINUE;
3787}
3788
3789static int em_sti(struct x86_emulate_ctxt *ctxt)
3790{
3791	if (emulator_bad_iopl(ctxt))
3792		return emulate_gp(ctxt, 0);
3793
3794	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3795	ctxt->eflags |= X86_EFLAGS_IF;
3796	return X86EMUL_CONTINUE;
3797}
3798
3799static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3800{
3801	u32 eax, ebx, ecx, edx;
 
 
 
 
 
 
 
3802
3803	eax = reg_read(ctxt, VCPU_REGS_RAX);
3804	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3805	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3806	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
3807	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3808	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3809	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
3810	return X86EMUL_CONTINUE;
3811}
3812
3813static int em_sahf(struct x86_emulate_ctxt *ctxt)
3814{
3815	u32 flags;
3816
3817	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3818		X86_EFLAGS_SF;
3819	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3820
3821	ctxt->eflags &= ~0xffUL;
3822	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3823	return X86EMUL_CONTINUE;
3824}
3825
3826static int em_lahf(struct x86_emulate_ctxt *ctxt)
3827{
3828	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3829	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3830	return X86EMUL_CONTINUE;
3831}
3832
3833static int em_bswap(struct x86_emulate_ctxt *ctxt)
3834{
3835	switch (ctxt->op_bytes) {
3836#ifdef CONFIG_X86_64
3837	case 8:
3838		asm("bswap %0" : "+r"(ctxt->dst.val));
3839		break;
3840#endif
3841	default:
3842		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3843		break;
3844	}
3845	return X86EMUL_CONTINUE;
3846}
3847
3848static int em_clflush(struct x86_emulate_ctxt *ctxt)
3849{
3850	/* emulating clflush regardless of cpuid */
3851	return X86EMUL_CONTINUE;
3852}
3853
 
 
 
 
 
 
3854static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3855{
3856	ctxt->dst.val = (s32) ctxt->src.val;
3857	return X86EMUL_CONTINUE;
3858}
3859
3860static bool valid_cr(int nr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3861{
3862	switch (nr) {
3863	case 0:
3864	case 2 ... 4:
3865	case 8:
3866		return true;
3867	default:
3868		return false;
3869	}
 
 
 
 
 
 
 
 
 
 
3870}
3871
3872static int check_cr_read(struct x86_emulate_ctxt *ctxt)
 
 
 
 
 
 
 
 
3873{
3874	if (!valid_cr(ctxt->modrm_reg))
3875		return emulate_ud(ctxt);
 
 
 
 
3876
3877	return X86EMUL_CONTINUE;
3878}
3879
3880static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3881{
3882	u64 new_val = ctxt->src.val64;
3883	int cr = ctxt->modrm_reg;
3884	u64 efer = 0;
 
 
 
 
 
 
 
 
 
3885
3886	static u64 cr_reserved_bits[] = {
3887		0xffffffff00000000ULL,
3888		0, 0, 0, /* CR3 checked later */
3889		CR4_RESERVED_BITS,
3890		0, 0, 0,
3891		CR8_RESERVED_BITS,
3892	};
3893
3894	if (!valid_cr(cr))
3895		return emulate_ud(ctxt);
 
 
 
3896
3897	if (new_val & cr_reserved_bits[cr])
3898		return emulate_gp(ctxt, 0);
 
 
3899
3900	switch (cr) {
3901	case 0: {
3902		u64 cr4;
3903		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3904		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3905			return emulate_gp(ctxt, 0);
3906
3907		cr4 = ctxt->ops->get_cr(ctxt, 4);
3908		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3909
3910		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3911		    !(cr4 & X86_CR4_PAE))
3912			return emulate_gp(ctxt, 0);
3913
3914		break;
3915		}
3916	case 3: {
3917		u64 rsvd = 0;
3918
3919		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3920		if (efer & EFER_LMA)
3921			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3922
3923		if (new_val & rsvd)
3924			return emulate_gp(ctxt, 0);
 
3925
3926		break;
3927		}
3928	case 4: {
3929		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3930
3931		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3932			return emulate_gp(ctxt, 0);
3933
3934		break;
3935		}
 
 
 
 
 
 
 
3936	}
 
 
 
 
 
 
3937
3938	return X86EMUL_CONTINUE;
3939}
3940
3941static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3942{
3943	unsigned long dr7;
3944
3945	ctxt->ops->get_dr(ctxt, 7, &dr7);
3946
3947	/* Check if DR7.Global_Enable is set */
3948	return dr7 & (1 << 13);
3949}
3950
3951static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3952{
3953	int dr = ctxt->modrm_reg;
3954	u64 cr4;
3955
3956	if (dr > 7)
3957		return emulate_ud(ctxt);
3958
3959	cr4 = ctxt->ops->get_cr(ctxt, 4);
3960	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3961		return emulate_ud(ctxt);
3962
3963	if (check_dr7_gd(ctxt)) {
3964		ulong dr6;
3965
3966		ctxt->ops->get_dr(ctxt, 6, &dr6);
3967		dr6 &= ~15;
3968		dr6 |= DR6_BD | DR6_RTM;
3969		ctxt->ops->set_dr(ctxt, 6, dr6);
3970		return emulate_db(ctxt);
3971	}
3972
3973	return X86EMUL_CONTINUE;
3974}
3975
3976static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3977{
3978	u64 new_val = ctxt->src.val64;
3979	int dr = ctxt->modrm_reg;
3980
3981	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3982		return emulate_gp(ctxt, 0);
3983
3984	return check_dr_read(ctxt);
3985}
3986
3987static int check_svme(struct x86_emulate_ctxt *ctxt)
3988{
3989	u64 efer;
3990
3991	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3992
3993	if (!(efer & EFER_SVME))
3994		return emulate_ud(ctxt);
3995
3996	return X86EMUL_CONTINUE;
3997}
3998
3999static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4000{
4001	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4002
4003	/* Valid physical address? */
4004	if (rax & 0xffff000000000000ULL)
4005		return emulate_gp(ctxt, 0);
4006
4007	return check_svme(ctxt);
4008}
4009
4010static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4011{
4012	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4013
4014	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4015		return emulate_ud(ctxt);
4016
4017	return X86EMUL_CONTINUE;
4018}
4019
4020static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4021{
4022	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4023	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4024
 
 
 
 
 
 
 
 
 
 
 
 
4025	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4026	    ctxt->ops->check_pmc(ctxt, rcx))
4027		return emulate_gp(ctxt, 0);
4028
4029	return X86EMUL_CONTINUE;
4030}
4031
4032static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4033{
4034	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4035	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4036		return emulate_gp(ctxt, 0);
4037
4038	return X86EMUL_CONTINUE;
4039}
4040
4041static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4042{
4043	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4044	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4045		return emulate_gp(ctxt, 0);
4046
4047	return X86EMUL_CONTINUE;
4048}
4049
4050#define D(_y) { .flags = (_y) }
4051#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4052#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4053		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4054#define N    D(NotImpl)
4055#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4056#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4057#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4058#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4059#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4060#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4061#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4062#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4063#define II(_f, _e, _i) \
4064	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4065#define IIP(_f, _e, _i, _p) \
4066	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4067	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4068#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4069
4070#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4071#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4072#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4073#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4074#define I2bvIP(_f, _e, _i, _p) \
4075	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4076
4077#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
4078		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
4079		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4080
4081static const struct opcode group7_rm0[] = {
4082	N,
4083	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4084	N, N, N, N, N, N,
4085};
4086
4087static const struct opcode group7_rm1[] = {
4088	DI(SrcNone | Priv, monitor),
4089	DI(SrcNone | Priv, mwait),
4090	N, N, N, N, N, N,
4091};
4092
 
 
 
 
 
 
4093static const struct opcode group7_rm3[] = {
4094	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4095	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4096	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
4097	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
4098	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
4099	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
4100	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
4101	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4102};
4103
4104static const struct opcode group7_rm7[] = {
4105	N,
4106	DIP(SrcNone, rdtscp, check_rdtsc),
4107	N, N, N, N, N, N,
4108};
4109
4110static const struct opcode group1[] = {
4111	F(Lock, em_add),
4112	F(Lock | PageTable, em_or),
4113	F(Lock, em_adc),
4114	F(Lock, em_sbb),
4115	F(Lock | PageTable, em_and),
4116	F(Lock, em_sub),
4117	F(Lock, em_xor),
4118	F(NoWrite, em_cmp),
4119};
4120
4121static const struct opcode group1A[] = {
4122	I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4123};
4124
4125static const struct opcode group2[] = {
4126	F(DstMem | ModRM, em_rol),
4127	F(DstMem | ModRM, em_ror),
4128	F(DstMem | ModRM, em_rcl),
4129	F(DstMem | ModRM, em_rcr),
4130	F(DstMem | ModRM, em_shl),
4131	F(DstMem | ModRM, em_shr),
4132	F(DstMem | ModRM, em_shl),
4133	F(DstMem | ModRM, em_sar),
4134};
4135
4136static const struct opcode group3[] = {
4137	F(DstMem | SrcImm | NoWrite, em_test),
4138	F(DstMem | SrcImm | NoWrite, em_test),
4139	F(DstMem | SrcNone | Lock, em_not),
4140	F(DstMem | SrcNone | Lock, em_neg),
4141	F(DstXacc | Src2Mem, em_mul_ex),
4142	F(DstXacc | Src2Mem, em_imul_ex),
4143	F(DstXacc | Src2Mem, em_div_ex),
4144	F(DstXacc | Src2Mem, em_idiv_ex),
4145};
4146
4147static const struct opcode group4[] = {
4148	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4149	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4150	N, N, N, N, N, N,
4151};
4152
4153static const struct opcode group5[] = {
4154	F(DstMem | SrcNone | Lock,		em_inc),
4155	F(DstMem | SrcNone | Lock,		em_dec),
4156	I(SrcMem | NearBranch,			em_call_near_abs),
4157	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4158	I(SrcMem | NearBranch,			em_jmp_abs),
4159	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4160	I(SrcMem | Stack,			em_push), D(Undefined),
4161};
4162
4163static const struct opcode group6[] = {
4164	DI(Prot | DstMem,	sldt),
4165	DI(Prot | DstMem,	str),
4166	II(Prot | Priv | SrcMem16, em_lldt, lldt),
4167	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4168	N, N, N, N,
4169};
4170
4171static const struct group_dual group7 = { {
4172	II(Mov | DstMem,			em_sgdt, sgdt),
4173	II(Mov | DstMem,			em_sidt, sidt),
4174	II(SrcMem | Priv,			em_lgdt, lgdt),
4175	II(SrcMem | Priv,			em_lidt, lidt),
4176	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4177	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4178	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4179}, {
4180	EXT(0, group7_rm0),
4181	EXT(0, group7_rm1),
4182	N, EXT(0, group7_rm3),
 
4183	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
4184	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
4185	EXT(0, group7_rm7),
4186} };
4187
4188static const struct opcode group8[] = {
4189	N, N, N, N,
4190	F(DstMem | SrcImmByte | NoWrite,		em_bt),
4191	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
4192	F(DstMem | SrcImmByte | Lock,			em_btr),
4193	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4194};
4195
 
 
 
 
 
 
 
 
 
4196static const struct group_dual group9 = { {
4197	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4198}, {
4199	N, N, N, N, N, N, N, N,
 
4200} };
4201
4202static const struct opcode group11[] = {
4203	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4204	X7(D(Undefined)),
4205};
4206
4207static const struct gprefix pfx_0f_ae_7 = {
4208	I(SrcMem | ByteOp, em_clflush), N, N, N,
4209};
4210
4211static const struct group_dual group15 = { {
4212	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
 
 
4213}, {
4214	N, N, N, N, N, N, N, N,
4215} };
4216
4217static const struct gprefix pfx_0f_6f_0f_7f = {
4218	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4219};
4220
4221static const struct instr_dual instr_dual_0f_2b = {
4222	I(0, em_mov), N
4223};
4224
4225static const struct gprefix pfx_0f_2b = {
4226	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4227};
4228
 
 
 
 
4229static const struct gprefix pfx_0f_28_0f_29 = {
4230	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4231};
4232
4233static const struct gprefix pfx_0f_e7 = {
4234	N, I(Sse, em_mov), N, N,
4235};
4236
4237static const struct escape escape_d9 = { {
4238	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4239}, {
4240	/* 0xC0 - 0xC7 */
4241	N, N, N, N, N, N, N, N,
4242	/* 0xC8 - 0xCF */
4243	N, N, N, N, N, N, N, N,
4244	/* 0xD0 - 0xC7 */
4245	N, N, N, N, N, N, N, N,
4246	/* 0xD8 - 0xDF */
4247	N, N, N, N, N, N, N, N,
4248	/* 0xE0 - 0xE7 */
4249	N, N, N, N, N, N, N, N,
4250	/* 0xE8 - 0xEF */
4251	N, N, N, N, N, N, N, N,
4252	/* 0xF0 - 0xF7 */
4253	N, N, N, N, N, N, N, N,
4254	/* 0xF8 - 0xFF */
4255	N, N, N, N, N, N, N, N,
4256} };
4257
4258static const struct escape escape_db = { {
4259	N, N, N, N, N, N, N, N,
4260}, {
4261	/* 0xC0 - 0xC7 */
4262	N, N, N, N, N, N, N, N,
4263	/* 0xC8 - 0xCF */
4264	N, N, N, N, N, N, N, N,
4265	/* 0xD0 - 0xC7 */
4266	N, N, N, N, N, N, N, N,
4267	/* 0xD8 - 0xDF */
4268	N, N, N, N, N, N, N, N,
4269	/* 0xE0 - 0xE7 */
4270	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4271	/* 0xE8 - 0xEF */
4272	N, N, N, N, N, N, N, N,
4273	/* 0xF0 - 0xF7 */
4274	N, N, N, N, N, N, N, N,
4275	/* 0xF8 - 0xFF */
4276	N, N, N, N, N, N, N, N,
4277} };
4278
4279static const struct escape escape_dd = { {
4280	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4281}, {
4282	/* 0xC0 - 0xC7 */
4283	N, N, N, N, N, N, N, N,
4284	/* 0xC8 - 0xCF */
4285	N, N, N, N, N, N, N, N,
4286	/* 0xD0 - 0xC7 */
4287	N, N, N, N, N, N, N, N,
4288	/* 0xD8 - 0xDF */
4289	N, N, N, N, N, N, N, N,
4290	/* 0xE0 - 0xE7 */
4291	N, N, N, N, N, N, N, N,
4292	/* 0xE8 - 0xEF */
4293	N, N, N, N, N, N, N, N,
4294	/* 0xF0 - 0xF7 */
4295	N, N, N, N, N, N, N, N,
4296	/* 0xF8 - 0xFF */
4297	N, N, N, N, N, N, N, N,
4298} };
4299
4300static const struct instr_dual instr_dual_0f_c3 = {
4301	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4302};
4303
4304static const struct mode_dual mode_dual_63 = {
4305	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4306};
4307
 
 
 
 
4308static const struct opcode opcode_table[256] = {
4309	/* 0x00 - 0x07 */
4310	F6ALU(Lock, em_add),
4311	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4312	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4313	/* 0x08 - 0x0F */
4314	F6ALU(Lock | PageTable, em_or),
4315	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4316	N,
4317	/* 0x10 - 0x17 */
4318	F6ALU(Lock, em_adc),
4319	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4320	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4321	/* 0x18 - 0x1F */
4322	F6ALU(Lock, em_sbb),
4323	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4324	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4325	/* 0x20 - 0x27 */
4326	F6ALU(Lock | PageTable, em_and), N, N,
4327	/* 0x28 - 0x2F */
4328	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4329	/* 0x30 - 0x37 */
4330	F6ALU(Lock, em_xor), N, N,
4331	/* 0x38 - 0x3F */
4332	F6ALU(NoWrite, em_cmp), N, N,
4333	/* 0x40 - 0x4F */
4334	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4335	/* 0x50 - 0x57 */
4336	X8(I(SrcReg | Stack, em_push)),
4337	/* 0x58 - 0x5F */
4338	X8(I(DstReg | Stack, em_pop)),
4339	/* 0x60 - 0x67 */
4340	I(ImplicitOps | Stack | No64, em_pusha),
4341	I(ImplicitOps | Stack | No64, em_popa),
4342	N, MD(ModRM, &mode_dual_63),
4343	N, N, N, N,
4344	/* 0x68 - 0x6F */
4345	I(SrcImm | Mov | Stack, em_push),
4346	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4347	I(SrcImmByte | Mov | Stack, em_push),
4348	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4349	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4350	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4351	/* 0x70 - 0x7F */
4352	X16(D(SrcImmByte | NearBranch)),
4353	/* 0x80 - 0x87 */
4354	G(ByteOp | DstMem | SrcImm, group1),
4355	G(DstMem | SrcImm, group1),
4356	G(ByteOp | DstMem | SrcImm | No64, group1),
4357	G(DstMem | SrcImmByte, group1),
4358	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4359	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4360	/* 0x88 - 0x8F */
4361	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4362	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4363	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4364	D(ModRM | SrcMem | NoAccess | DstReg),
4365	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4366	G(0, group1A),
4367	/* 0x90 - 0x97 */
4368	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4369	/* 0x98 - 0x9F */
4370	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4371	I(SrcImmFAddr | No64, em_call_far), N,
4372	II(ImplicitOps | Stack, em_pushf, pushf),
4373	II(ImplicitOps | Stack, em_popf, popf),
4374	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4375	/* 0xA0 - 0xA7 */
4376	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4377	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4378	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4379	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4380	/* 0xA8 - 0xAF */
4381	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4382	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4383	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4384	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4385	/* 0xB0 - 0xB7 */
4386	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4387	/* 0xB8 - 0xBF */
4388	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4389	/* 0xC0 - 0xC7 */
4390	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4391	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4392	I(ImplicitOps | NearBranch, em_ret),
4393	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4394	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4395	G(ByteOp, group11), G(0, group11),
4396	/* 0xC8 - 0xCF */
4397	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4398	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4399	I(ImplicitOps, em_ret_far),
4400	D(ImplicitOps), DI(SrcImmByte, intn),
4401	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
 
 
4402	/* 0xD0 - 0xD7 */
4403	G(Src2One | ByteOp, group2), G(Src2One, group2),
4404	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4405	I(DstAcc | SrcImmUByte | No64, em_aam),
4406	I(DstAcc | SrcImmUByte | No64, em_aad),
4407	F(DstAcc | ByteOp | No64, em_salc),
4408	I(DstAcc | SrcXLat | ByteOp, em_mov),
4409	/* 0xD8 - 0xDF */
4410	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4411	/* 0xE0 - 0xE7 */
4412	X3(I(SrcImmByte | NearBranch, em_loop)),
4413	I(SrcImmByte | NearBranch, em_jcxz),
4414	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
4415	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4416	/* 0xE8 - 0xEF */
4417	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4418	I(SrcImmFAddr | No64, em_jmp_far),
4419	D(SrcImmByte | ImplicitOps | NearBranch),
 
4420	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
4421	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4422	/* 0xF0 - 0xF7 */
4423	N, DI(ImplicitOps, icebp), N, N,
4424	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4425	G(ByteOp, group3), G(0, group3),
4426	/* 0xF8 - 0xFF */
4427	D(ImplicitOps), D(ImplicitOps),
4428	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4429	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4430};
4431
4432static const struct opcode twobyte_table[256] = {
4433	/* 0x00 - 0x0F */
4434	G(0, group6), GD(0, &group7), N, N,
4435	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4436	II(ImplicitOps | Priv, em_clts, clts), N,
4437	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4438	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4439	/* 0x10 - 0x1F */
4440	N, N, N, N, N, N, N, N,
4441	D(ImplicitOps | ModRM | SrcMem | NoAccess),
4442	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
 
 
 
 
 
 
4443	/* 0x20 - 0x2F */
4444	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4445	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4446	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4447						check_cr_write),
4448	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4449						check_dr_write),
4450	N, N, N, N,
4451	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4452	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4453	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4454	N, N, N, N,
4455	/* 0x30 - 0x3F */
4456	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4457	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4458	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4459	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4460	I(ImplicitOps | EmulateOnUD, em_sysenter),
4461	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4462	N, N,
4463	N, N, N, N, N, N, N, N,
4464	/* 0x40 - 0x4F */
4465	X16(D(DstReg | SrcMem | ModRM)),
4466	/* 0x50 - 0x5F */
4467	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4468	/* 0x60 - 0x6F */
4469	N, N, N, N,
4470	N, N, N, N,
4471	N, N, N, N,
4472	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4473	/* 0x70 - 0x7F */
4474	N, N, N, N,
4475	N, N, N, N,
4476	N, N, N, N,
4477	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4478	/* 0x80 - 0x8F */
4479	X16(D(SrcImm | NearBranch)),
4480	/* 0x90 - 0x9F */
4481	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4482	/* 0xA0 - 0xA7 */
4483	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4484	II(ImplicitOps, em_cpuid, cpuid),
4485	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4486	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4487	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4488	/* 0xA8 - 0xAF */
4489	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4490	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4491	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4492	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4493	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4494	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4495	/* 0xB0 - 0xB7 */
4496	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4497	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4498	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4499	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4500	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4501	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4502	/* 0xB8 - 0xBF */
4503	N, N,
4504	G(BitOp, group8),
4505	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4506	I(DstReg | SrcMem | ModRM, em_bsf_c),
4507	I(DstReg | SrcMem | ModRM, em_bsr_c),
4508	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4509	/* 0xC0 - 0xC7 */
4510	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4511	N, ID(0, &instr_dual_0f_c3),
4512	N, N, N, GD(0, &group9),
4513	/* 0xC8 - 0xCF */
4514	X8(I(DstReg, em_bswap)),
4515	/* 0xD0 - 0xDF */
4516	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4517	/* 0xE0 - 0xEF */
4518	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4519	N, N, N, N, N, N, N, N,
4520	/* 0xF0 - 0xFF */
4521	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4522};
4523
4524static const struct instr_dual instr_dual_0f_38_f0 = {
4525	I(DstReg | SrcMem | Mov, em_movbe), N
4526};
4527
4528static const struct instr_dual instr_dual_0f_38_f1 = {
4529	I(DstMem | SrcReg | Mov, em_movbe), N
4530};
4531
4532static const struct gprefix three_byte_0f_38_f0 = {
4533	ID(0, &instr_dual_0f_38_f0), N, N, N
4534};
4535
4536static const struct gprefix three_byte_0f_38_f1 = {
4537	ID(0, &instr_dual_0f_38_f1), N, N, N
4538};
4539
4540/*
4541 * Insns below are selected by the prefix which indexed by the third opcode
4542 * byte.
4543 */
4544static const struct opcode opcode_map_0f_38[256] = {
4545	/* 0x00 - 0x7f */
4546	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4547	/* 0x80 - 0xef */
4548	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4549	/* 0xf0 - 0xf1 */
4550	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4551	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4552	/* 0xf2 - 0xff */
4553	N, N, X4(N), X8(N)
4554};
4555
4556#undef D
4557#undef N
4558#undef G
4559#undef GD
4560#undef I
4561#undef GP
4562#undef EXT
4563#undef MD
4564#undef ID
4565
4566#undef D2bv
4567#undef D2bvIP
4568#undef I2bv
4569#undef I2bvIP
4570#undef I6ALU
4571
4572static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4573{
4574	unsigned size;
4575
4576	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4577	if (size == 8)
4578		size = 4;
4579	return size;
4580}
4581
4582static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4583		      unsigned size, bool sign_extension)
4584{
4585	int rc = X86EMUL_CONTINUE;
4586
4587	op->type = OP_IMM;
4588	op->bytes = size;
4589	op->addr.mem.ea = ctxt->_eip;
4590	/* NB. Immediates are sign-extended as necessary. */
4591	switch (op->bytes) {
4592	case 1:
4593		op->val = insn_fetch(s8, ctxt);
4594		break;
4595	case 2:
4596		op->val = insn_fetch(s16, ctxt);
4597		break;
4598	case 4:
4599		op->val = insn_fetch(s32, ctxt);
4600		break;
4601	case 8:
4602		op->val = insn_fetch(s64, ctxt);
4603		break;
4604	}
4605	if (!sign_extension) {
4606		switch (op->bytes) {
4607		case 1:
4608			op->val &= 0xff;
4609			break;
4610		case 2:
4611			op->val &= 0xffff;
4612			break;
4613		case 4:
4614			op->val &= 0xffffffff;
4615			break;
4616		}
4617	}
4618done:
4619	return rc;
4620}
4621
4622static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4623			  unsigned d)
4624{
4625	int rc = X86EMUL_CONTINUE;
4626
4627	switch (d) {
4628	case OpReg:
4629		decode_register_operand(ctxt, op);
4630		break;
4631	case OpImmUByte:
4632		rc = decode_imm(ctxt, op, 1, false);
4633		break;
4634	case OpMem:
4635		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4636	mem_common:
4637		*op = ctxt->memop;
4638		ctxt->memopp = op;
4639		if (ctxt->d & BitOp)
4640			fetch_bit_operand(ctxt);
4641		op->orig_val = op->val;
4642		break;
4643	case OpMem64:
4644		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4645		goto mem_common;
4646	case OpAcc:
4647		op->type = OP_REG;
4648		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4649		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4650		fetch_register_operand(op);
4651		op->orig_val = op->val;
4652		break;
4653	case OpAccLo:
4654		op->type = OP_REG;
4655		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4656		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4657		fetch_register_operand(op);
4658		op->orig_val = op->val;
4659		break;
4660	case OpAccHi:
4661		if (ctxt->d & ByteOp) {
4662			op->type = OP_NONE;
4663			break;
4664		}
4665		op->type = OP_REG;
4666		op->bytes = ctxt->op_bytes;
4667		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4668		fetch_register_operand(op);
4669		op->orig_val = op->val;
4670		break;
4671	case OpDI:
4672		op->type = OP_MEM;
4673		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4674		op->addr.mem.ea =
4675			register_address(ctxt, VCPU_REGS_RDI);
4676		op->addr.mem.seg = VCPU_SREG_ES;
4677		op->val = 0;
4678		op->count = 1;
4679		break;
4680	case OpDX:
4681		op->type = OP_REG;
4682		op->bytes = 2;
4683		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4684		fetch_register_operand(op);
4685		break;
4686	case OpCL:
4687		op->type = OP_IMM;
4688		op->bytes = 1;
4689		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4690		break;
4691	case OpImmByte:
4692		rc = decode_imm(ctxt, op, 1, true);
4693		break;
4694	case OpOne:
4695		op->type = OP_IMM;
4696		op->bytes = 1;
4697		op->val = 1;
4698		break;
4699	case OpImm:
4700		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4701		break;
4702	case OpImm64:
4703		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4704		break;
4705	case OpMem8:
4706		ctxt->memop.bytes = 1;
4707		if (ctxt->memop.type == OP_REG) {
4708			ctxt->memop.addr.reg = decode_register(ctxt,
4709					ctxt->modrm_rm, true);
4710			fetch_register_operand(&ctxt->memop);
4711		}
4712		goto mem_common;
4713	case OpMem16:
4714		ctxt->memop.bytes = 2;
4715		goto mem_common;
4716	case OpMem32:
4717		ctxt->memop.bytes = 4;
4718		goto mem_common;
4719	case OpImmU16:
4720		rc = decode_imm(ctxt, op, 2, false);
4721		break;
4722	case OpImmU:
4723		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4724		break;
4725	case OpSI:
4726		op->type = OP_MEM;
4727		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4728		op->addr.mem.ea =
4729			register_address(ctxt, VCPU_REGS_RSI);
4730		op->addr.mem.seg = ctxt->seg_override;
4731		op->val = 0;
4732		op->count = 1;
4733		break;
4734	case OpXLat:
4735		op->type = OP_MEM;
4736		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4737		op->addr.mem.ea =
4738			address_mask(ctxt,
4739				reg_read(ctxt, VCPU_REGS_RBX) +
4740				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4741		op->addr.mem.seg = ctxt->seg_override;
4742		op->val = 0;
4743		break;
4744	case OpImmFAddr:
4745		op->type = OP_IMM;
4746		op->addr.mem.ea = ctxt->_eip;
4747		op->bytes = ctxt->op_bytes + 2;
4748		insn_fetch_arr(op->valptr, op->bytes, ctxt);
4749		break;
4750	case OpMemFAddr:
4751		ctxt->memop.bytes = ctxt->op_bytes + 2;
4752		goto mem_common;
4753	case OpES:
4754		op->type = OP_IMM;
4755		op->val = VCPU_SREG_ES;
4756		break;
4757	case OpCS:
4758		op->type = OP_IMM;
4759		op->val = VCPU_SREG_CS;
4760		break;
4761	case OpSS:
4762		op->type = OP_IMM;
4763		op->val = VCPU_SREG_SS;
4764		break;
4765	case OpDS:
4766		op->type = OP_IMM;
4767		op->val = VCPU_SREG_DS;
4768		break;
4769	case OpFS:
4770		op->type = OP_IMM;
4771		op->val = VCPU_SREG_FS;
4772		break;
4773	case OpGS:
4774		op->type = OP_IMM;
4775		op->val = VCPU_SREG_GS;
4776		break;
4777	case OpImplicit:
4778		/* Special instructions do their own operand decoding. */
4779	default:
4780		op->type = OP_NONE; /* Disable writeback. */
4781		break;
4782	}
4783
4784done:
4785	return rc;
4786}
4787
4788int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4789{
4790	int rc = X86EMUL_CONTINUE;
4791	int mode = ctxt->mode;
4792	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4793	bool op_prefix = false;
4794	bool has_seg_override = false;
4795	struct opcode opcode;
 
 
4796
4797	ctxt->memop.type = OP_NONE;
4798	ctxt->memopp = NULL;
4799	ctxt->_eip = ctxt->eip;
4800	ctxt->fetch.ptr = ctxt->fetch.data;
4801	ctxt->fetch.end = ctxt->fetch.data + insn_len;
4802	ctxt->opcode_len = 1;
 
4803	if (insn_len > 0)
4804		memcpy(ctxt->fetch.data, insn, insn_len);
4805	else {
4806		rc = __do_insn_fetch_bytes(ctxt, 1);
4807		if (rc != X86EMUL_CONTINUE)
4808			return rc;
4809	}
4810
4811	switch (mode) {
4812	case X86EMUL_MODE_REAL:
4813	case X86EMUL_MODE_VM86:
 
 
 
 
 
4814	case X86EMUL_MODE_PROT16:
4815		def_op_bytes = def_ad_bytes = 2;
4816		break;
4817	case X86EMUL_MODE_PROT32:
4818		def_op_bytes = def_ad_bytes = 4;
4819		break;
4820#ifdef CONFIG_X86_64
4821	case X86EMUL_MODE_PROT64:
4822		def_op_bytes = 4;
4823		def_ad_bytes = 8;
4824		break;
4825#endif
4826	default:
4827		return EMULATION_FAILED;
4828	}
4829
4830	ctxt->op_bytes = def_op_bytes;
4831	ctxt->ad_bytes = def_ad_bytes;
4832
4833	/* Legacy prefixes. */
4834	for (;;) {
4835		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4836		case 0x66:	/* operand-size override */
4837			op_prefix = true;
4838			/* switch between 2/4 bytes */
4839			ctxt->op_bytes = def_op_bytes ^ 6;
4840			break;
4841		case 0x67:	/* address-size override */
4842			if (mode == X86EMUL_MODE_PROT64)
4843				/* switch between 4/8 bytes */
4844				ctxt->ad_bytes = def_ad_bytes ^ 12;
4845			else
4846				/* switch between 2/4 bytes */
4847				ctxt->ad_bytes = def_ad_bytes ^ 6;
4848			break;
4849		case 0x26:	/* ES override */
 
 
 
4850		case 0x2e:	/* CS override */
 
 
 
4851		case 0x36:	/* SS override */
 
 
 
4852		case 0x3e:	/* DS override */
4853			has_seg_override = true;
4854			ctxt->seg_override = (ctxt->b >> 3) & 3;
4855			break;
4856		case 0x64:	/* FS override */
 
 
 
4857		case 0x65:	/* GS override */
4858			has_seg_override = true;
4859			ctxt->seg_override = ctxt->b & 7;
4860			break;
4861		case 0x40 ... 0x4f: /* REX */
4862			if (mode != X86EMUL_MODE_PROT64)
4863				goto done_prefixes;
4864			ctxt->rex_prefix = ctxt->b;
4865			continue;
4866		case 0xf0:	/* LOCK */
4867			ctxt->lock_prefix = 1;
4868			break;
4869		case 0xf2:	/* REPNE/REPNZ */
4870		case 0xf3:	/* REP/REPE/REPZ */
4871			ctxt->rep_prefix = ctxt->b;
4872			break;
4873		default:
4874			goto done_prefixes;
4875		}
4876
4877		/* Any legacy prefix after a REX prefix nullifies its effect. */
4878
4879		ctxt->rex_prefix = 0;
4880	}
4881
4882done_prefixes:
4883
4884	/* REX prefix. */
4885	if (ctxt->rex_prefix & 8)
4886		ctxt->op_bytes = 8;	/* REX.W */
4887
4888	/* Opcode byte(s). */
4889	opcode = opcode_table[ctxt->b];
4890	/* Two-byte opcode? */
4891	if (ctxt->b == 0x0f) {
4892		ctxt->opcode_len = 2;
4893		ctxt->b = insn_fetch(u8, ctxt);
4894		opcode = twobyte_table[ctxt->b];
4895
4896		/* 0F_38 opcode map */
4897		if (ctxt->b == 0x38) {
4898			ctxt->opcode_len = 3;
4899			ctxt->b = insn_fetch(u8, ctxt);
4900			opcode = opcode_map_0f_38[ctxt->b];
4901		}
4902	}
4903	ctxt->d = opcode.flags;
4904
4905	if (ctxt->d & ModRM)
4906		ctxt->modrm = insn_fetch(u8, ctxt);
4907
4908	/* vex-prefix instructions are not implemented */
4909	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4910	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4911		ctxt->d = NotImpl;
4912	}
4913
4914	while (ctxt->d & GroupMask) {
4915		switch (ctxt->d & GroupMask) {
4916		case Group:
4917			goffset = (ctxt->modrm >> 3) & 7;
4918			opcode = opcode.u.group[goffset];
4919			break;
4920		case GroupDual:
4921			goffset = (ctxt->modrm >> 3) & 7;
4922			if ((ctxt->modrm >> 6) == 3)
4923				opcode = opcode.u.gdual->mod3[goffset];
4924			else
4925				opcode = opcode.u.gdual->mod012[goffset];
4926			break;
4927		case RMExt:
4928			goffset = ctxt->modrm & 7;
4929			opcode = opcode.u.group[goffset];
4930			break;
4931		case Prefix:
4932			if (ctxt->rep_prefix && op_prefix)
4933				return EMULATION_FAILED;
4934			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4935			switch (simd_prefix) {
4936			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4937			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4938			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4939			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4940			}
4941			break;
4942		case Escape:
4943			if (ctxt->modrm > 0xbf)
4944				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4945			else
 
 
 
 
4946				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
 
4947			break;
4948		case InstrDual:
4949			if ((ctxt->modrm >> 6) == 3)
4950				opcode = opcode.u.idual->mod3;
4951			else
4952				opcode = opcode.u.idual->mod012;
4953			break;
4954		case ModeDual:
4955			if (ctxt->mode == X86EMUL_MODE_PROT64)
4956				opcode = opcode.u.mdual->mode64;
4957			else
4958				opcode = opcode.u.mdual->mode32;
4959			break;
4960		default:
4961			return EMULATION_FAILED;
4962		}
4963
4964		ctxt->d &= ~(u64)GroupMask;
4965		ctxt->d |= opcode.flags;
4966	}
4967
 
 
4968	/* Unrecognised? */
4969	if (ctxt->d == 0)
4970		return EMULATION_FAILED;
4971
4972	ctxt->execute = opcode.u.execute;
4973
4974	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
 
4975		return EMULATION_FAILED;
4976
4977	if (unlikely(ctxt->d &
4978	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4979	     No16))) {
4980		/*
4981		 * These are copied unconditionally here, and checked unconditionally
4982		 * in x86_emulate_insn.
4983		 */
4984		ctxt->check_perm = opcode.check_perm;
4985		ctxt->intercept = opcode.intercept;
4986
4987		if (ctxt->d & NotImpl)
4988			return EMULATION_FAILED;
4989
4990		if (mode == X86EMUL_MODE_PROT64) {
4991			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4992				ctxt->op_bytes = 8;
4993			else if (ctxt->d & NearBranch)
4994				ctxt->op_bytes = 8;
4995		}
4996
4997		if (ctxt->d & Op3264) {
4998			if (mode == X86EMUL_MODE_PROT64)
4999				ctxt->op_bytes = 8;
5000			else
5001				ctxt->op_bytes = 4;
5002		}
5003
5004		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5005			ctxt->op_bytes = 4;
5006
5007		if (ctxt->d & Sse)
5008			ctxt->op_bytes = 16;
5009		else if (ctxt->d & Mmx)
5010			ctxt->op_bytes = 8;
5011	}
5012
5013	/* ModRM and SIB bytes. */
5014	if (ctxt->d & ModRM) {
5015		rc = decode_modrm(ctxt, &ctxt->memop);
5016		if (!has_seg_override) {
5017			has_seg_override = true;
5018			ctxt->seg_override = ctxt->modrm_seg;
5019		}
5020	} else if (ctxt->d & MemAbs)
5021		rc = decode_abs(ctxt, &ctxt->memop);
5022	if (rc != X86EMUL_CONTINUE)
5023		goto done;
5024
5025	if (!has_seg_override)
5026		ctxt->seg_override = VCPU_SREG_DS;
5027
5028	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5029
5030	/*
5031	 * Decode and fetch the source operand: register, memory
5032	 * or immediate.
5033	 */
5034	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5035	if (rc != X86EMUL_CONTINUE)
5036		goto done;
5037
5038	/*
5039	 * Decode and fetch the second source operand: register, memory
5040	 * or immediate.
5041	 */
5042	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5043	if (rc != X86EMUL_CONTINUE)
5044		goto done;
5045
5046	/* Decode and fetch the destination operand: register or memory. */
5047	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5048
5049	if (ctxt->rip_relative)
5050		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5051					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5052
5053done:
 
 
5054	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5055}
5056
5057bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5058{
5059	return ctxt->d & PageTable;
5060}
5061
5062static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5063{
5064	/* The second termination condition only applies for REPE
5065	 * and REPNE. Test if the repeat string operation prefix is
5066	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5067	 * corresponding termination condition according to:
5068	 * 	- if REPE/REPZ and ZF = 0 then done
5069	 * 	- if REPNE/REPNZ and ZF = 1 then done
5070	 */
5071	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5072	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5073	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5074		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5075		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5076		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5077		return true;
5078
5079	return false;
5080}
5081
5082static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5083{
5084	bool fault = false;
5085
5086	ctxt->ops->get_fpu(ctxt);
5087	asm volatile("1: fwait \n\t"
5088		     "2: \n\t"
5089		     ".pushsection .fixup,\"ax\" \n\t"
5090		     "3: \n\t"
5091		     "movb $1, %[fault] \n\t"
5092		     "jmp 2b \n\t"
5093		     ".popsection \n\t"
5094		     _ASM_EXTABLE(1b, 3b)
5095		     : [fault]"+qm"(fault));
5096	ctxt->ops->put_fpu(ctxt);
5097
5098	if (unlikely(fault))
5099		return emulate_exception(ctxt, MF_VECTOR, 0, false);
5100
5101	return X86EMUL_CONTINUE;
5102}
5103
5104static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5105				       struct operand *op)
5106{
5107	if (op->type == OP_MM)
5108		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5109}
5110
5111static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5112{
5113	register void *__sp asm(_ASM_SP);
5114	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5115
5116	if (!(ctxt->d & ByteOp))
5117		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5118
5119	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5120	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5121	      [fastop]"+S"(fop), "+r"(__sp)
5122	    : "c"(ctxt->src2.val));
5123
5124	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5125	if (!fop) /* exception is returned in fop variable */
5126		return emulate_de(ctxt);
5127	return X86EMUL_CONTINUE;
5128}
5129
5130void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5131{
5132	memset(&ctxt->rip_relative, 0,
5133	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
 
 
 
 
 
5134
5135	ctxt->io_read.pos = 0;
5136	ctxt->io_read.end = 0;
5137	ctxt->mem_read.end = 0;
5138}
5139
5140int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5141{
5142	const struct x86_emulate_ops *ops = ctxt->ops;
5143	int rc = X86EMUL_CONTINUE;
5144	int saved_dst_type = ctxt->dst.type;
 
5145
5146	ctxt->mem_read.pos = 0;
5147
5148	/* LOCK prefix is allowed only with some instructions */
5149	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5150		rc = emulate_ud(ctxt);
5151		goto done;
5152	}
5153
5154	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5155		rc = emulate_ud(ctxt);
5156		goto done;
5157	}
5158
 
5159	if (unlikely(ctxt->d &
5160		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5161		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5162				(ctxt->d & Undefined)) {
5163			rc = emulate_ud(ctxt);
5164			goto done;
5165		}
5166
5167		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5168		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5169			rc = emulate_ud(ctxt);
5170			goto done;
5171		}
5172
5173		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5174			rc = emulate_nm(ctxt);
5175			goto done;
5176		}
5177
5178		if (ctxt->d & Mmx) {
5179			rc = flush_pending_x87_faults(ctxt);
5180			if (rc != X86EMUL_CONTINUE)
5181				goto done;
5182			/*
5183			 * Now that we know the fpu is exception safe, we can fetch
5184			 * operands from it.
5185			 */
5186			fetch_possible_mmx_operand(ctxt, &ctxt->src);
5187			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5188			if (!(ctxt->d & Mov))
5189				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5190		}
5191
5192		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5193			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5194						      X86_ICPT_PRE_EXCEPT);
5195			if (rc != X86EMUL_CONTINUE)
5196				goto done;
5197		}
5198
5199		/* Instruction can only be executed in protected mode */
5200		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5201			rc = emulate_ud(ctxt);
5202			goto done;
5203		}
5204
5205		/* Privileged instruction can be executed only in CPL=0 */
5206		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5207			if (ctxt->d & PrivUD)
5208				rc = emulate_ud(ctxt);
5209			else
5210				rc = emulate_gp(ctxt, 0);
5211			goto done;
5212		}
5213
5214		/* Do instruction specific permission checks */
5215		if (ctxt->d & CheckPerm) {
5216			rc = ctxt->check_perm(ctxt);
5217			if (rc != X86EMUL_CONTINUE)
5218				goto done;
5219		}
5220
5221		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5222			rc = emulator_check_intercept(ctxt, ctxt->intercept,
5223						      X86_ICPT_POST_EXCEPT);
5224			if (rc != X86EMUL_CONTINUE)
5225				goto done;
5226		}
5227
5228		if (ctxt->rep_prefix && (ctxt->d & String)) {
5229			/* All REP prefixes have the same first termination condition */
5230			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5231				string_registers_quirk(ctxt);
5232				ctxt->eip = ctxt->_eip;
5233				ctxt->eflags &= ~X86_EFLAGS_RF;
5234				goto done;
5235			}
5236		}
5237	}
5238
5239	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5240		rc = segmented_read(ctxt, ctxt->src.addr.mem,
5241				    ctxt->src.valptr, ctxt->src.bytes);
5242		if (rc != X86EMUL_CONTINUE)
5243			goto done;
5244		ctxt->src.orig_val64 = ctxt->src.val64;
5245	}
5246
5247	if (ctxt->src2.type == OP_MEM) {
5248		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5249				    &ctxt->src2.val, ctxt->src2.bytes);
5250		if (rc != X86EMUL_CONTINUE)
5251			goto done;
5252	}
5253
5254	if ((ctxt->d & DstMask) == ImplicitOps)
5255		goto special_insn;
5256
5257
5258	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5259		/* optimisation - avoid slow emulated read if Mov */
5260		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5261				   &ctxt->dst.val, ctxt->dst.bytes);
5262		if (rc != X86EMUL_CONTINUE) {
5263			if (!(ctxt->d & NoWrite) &&
5264			    rc == X86EMUL_PROPAGATE_FAULT &&
5265			    ctxt->exception.vector == PF_VECTOR)
5266				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5267			goto done;
5268		}
5269	}
5270	/* Copy full 64-bit value for CMPXCHG8B.  */
5271	ctxt->dst.orig_val64 = ctxt->dst.val64;
5272
5273special_insn:
5274
5275	if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5276		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5277					      X86_ICPT_POST_MEMACCESS);
5278		if (rc != X86EMUL_CONTINUE)
5279			goto done;
5280	}
5281
5282	if (ctxt->rep_prefix && (ctxt->d & String))
5283		ctxt->eflags |= X86_EFLAGS_RF;
5284	else
5285		ctxt->eflags &= ~X86_EFLAGS_RF;
5286
5287	if (ctxt->execute) {
5288		if (ctxt->d & Fastop) {
5289			void (*fop)(struct fastop *) = (void *)ctxt->execute;
5290			rc = fastop(ctxt, fop);
5291			if (rc != X86EMUL_CONTINUE)
5292				goto done;
5293			goto writeback;
5294		}
5295		rc = ctxt->execute(ctxt);
5296		if (rc != X86EMUL_CONTINUE)
5297			goto done;
5298		goto writeback;
5299	}
5300
5301	if (ctxt->opcode_len == 2)
5302		goto twobyte_insn;
5303	else if (ctxt->opcode_len == 3)
5304		goto threebyte_insn;
5305
5306	switch (ctxt->b) {
5307	case 0x70 ... 0x7f: /* jcc (short) */
5308		if (test_cc(ctxt->b, ctxt->eflags))
5309			rc = jmp_rel(ctxt, ctxt->src.val);
5310		break;
5311	case 0x8d: /* lea r16/r32, m */
5312		ctxt->dst.val = ctxt->src.addr.mem.ea;
5313		break;
5314	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5315		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5316			ctxt->dst.type = OP_NONE;
5317		else
5318			rc = em_xchg(ctxt);
5319		break;
5320	case 0x98: /* cbw/cwde/cdqe */
5321		switch (ctxt->op_bytes) {
5322		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5323		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5324		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5325		}
5326		break;
5327	case 0xcc:		/* int3 */
5328		rc = emulate_int(ctxt, 3);
5329		break;
5330	case 0xcd:		/* int n */
5331		rc = emulate_int(ctxt, ctxt->src.val);
5332		break;
5333	case 0xce:		/* into */
5334		if (ctxt->eflags & X86_EFLAGS_OF)
5335			rc = emulate_int(ctxt, 4);
5336		break;
5337	case 0xe9: /* jmp rel */
5338	case 0xeb: /* jmp rel short */
5339		rc = jmp_rel(ctxt, ctxt->src.val);
5340		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5341		break;
5342	case 0xf4:              /* hlt */
5343		ctxt->ops->halt(ctxt);
5344		break;
5345	case 0xf5:	/* cmc */
5346		/* complement carry flag from eflags reg */
5347		ctxt->eflags ^= X86_EFLAGS_CF;
5348		break;
5349	case 0xf8: /* clc */
5350		ctxt->eflags &= ~X86_EFLAGS_CF;
5351		break;
5352	case 0xf9: /* stc */
5353		ctxt->eflags |= X86_EFLAGS_CF;
5354		break;
5355	case 0xfc: /* cld */
5356		ctxt->eflags &= ~X86_EFLAGS_DF;
5357		break;
5358	case 0xfd: /* std */
5359		ctxt->eflags |= X86_EFLAGS_DF;
5360		break;
5361	default:
5362		goto cannot_emulate;
5363	}
5364
5365	if (rc != X86EMUL_CONTINUE)
5366		goto done;
5367
5368writeback:
5369	if (ctxt->d & SrcWrite) {
5370		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5371		rc = writeback(ctxt, &ctxt->src);
5372		if (rc != X86EMUL_CONTINUE)
5373			goto done;
5374	}
5375	if (!(ctxt->d & NoWrite)) {
5376		rc = writeback(ctxt, &ctxt->dst);
5377		if (rc != X86EMUL_CONTINUE)
5378			goto done;
5379	}
5380
5381	/*
5382	 * restore dst type in case the decoding will be reused
5383	 * (happens for string instruction )
5384	 */
5385	ctxt->dst.type = saved_dst_type;
5386
5387	if ((ctxt->d & SrcMask) == SrcSI)
5388		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5389
5390	if ((ctxt->d & DstMask) == DstDI)
5391		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5392
5393	if (ctxt->rep_prefix && (ctxt->d & String)) {
5394		unsigned int count;
5395		struct read_cache *r = &ctxt->io_read;
5396		if ((ctxt->d & SrcMask) == SrcSI)
5397			count = ctxt->src.count;
5398		else
5399			count = ctxt->dst.count;
5400		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5401
5402		if (!string_insn_completed(ctxt)) {
5403			/*
5404			 * Re-enter guest when pio read ahead buffer is empty
5405			 * or, if it is not used, after each 1024 iteration.
5406			 */
5407			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5408			    (r->end == 0 || r->end != r->pos)) {
5409				/*
5410				 * Reset read cache. Usually happens before
5411				 * decode, but since instruction is restarted
5412				 * we have to do it here.
5413				 */
5414				ctxt->mem_read.end = 0;
5415				writeback_registers(ctxt);
5416				return EMULATION_RESTART;
5417			}
5418			goto done; /* skip rip writeback */
5419		}
5420		ctxt->eflags &= ~X86_EFLAGS_RF;
5421	}
5422
5423	ctxt->eip = ctxt->_eip;
 
 
5424
5425done:
5426	if (rc == X86EMUL_PROPAGATE_FAULT) {
5427		WARN_ON(ctxt->exception.vector > 0x1f);
 
5428		ctxt->have_exception = true;
5429	}
5430	if (rc == X86EMUL_INTERCEPTED)
5431		return EMULATION_INTERCEPTED;
5432
5433	if (rc == X86EMUL_CONTINUE)
5434		writeback_registers(ctxt);
5435
5436	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5437
5438twobyte_insn:
5439	switch (ctxt->b) {
5440	case 0x09:		/* wbinvd */
5441		(ctxt->ops->wbinvd)(ctxt);
5442		break;
5443	case 0x08:		/* invd */
5444	case 0x0d:		/* GrpP (prefetch) */
5445	case 0x18:		/* Grp16 (prefetch/nop) */
5446	case 0x1f:		/* nop */
5447		break;
5448	case 0x20: /* mov cr, reg */
5449		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5450		break;
5451	case 0x21: /* mov from dr to reg */
5452		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5453		break;
5454	case 0x40 ... 0x4f:	/* cmov */
5455		if (test_cc(ctxt->b, ctxt->eflags))
5456			ctxt->dst.val = ctxt->src.val;
5457		else if (ctxt->op_bytes != 4)
5458			ctxt->dst.type = OP_NONE; /* no writeback */
5459		break;
5460	case 0x80 ... 0x8f: /* jnz rel, etc*/
5461		if (test_cc(ctxt->b, ctxt->eflags))
5462			rc = jmp_rel(ctxt, ctxt->src.val);
5463		break;
5464	case 0x90 ... 0x9f:     /* setcc r/m8 */
5465		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5466		break;
5467	case 0xb6 ... 0xb7:	/* movzx */
5468		ctxt->dst.bytes = ctxt->op_bytes;
5469		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5470						       : (u16) ctxt->src.val;
5471		break;
5472	case 0xbe ... 0xbf:	/* movsx */
5473		ctxt->dst.bytes = ctxt->op_bytes;
5474		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5475							(s16) ctxt->src.val;
5476		break;
5477	default:
5478		goto cannot_emulate;
5479	}
5480
5481threebyte_insn:
5482
5483	if (rc != X86EMUL_CONTINUE)
5484		goto done;
5485
5486	goto writeback;
5487
5488cannot_emulate:
5489	return EMULATION_FAILED;
5490}
5491
5492void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5493{
5494	invalidate_registers(ctxt);
5495}
5496
5497void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5498{
5499	writeback_registers(ctxt);
 
 
 
 
 
 
 
 
 
 
 
5500}