Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11 *
  12 * Authors:
  13 *   Yaniv Kamay  <yaniv@qumranet.com>
  14 *   Avi Kivity   <avi@qumranet.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.  See
  17 * the COPYING file in the top-level directory.
  18 *
  19 */
  20
  21/*
  22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  23 * so the code in this file is compiled twice, once per pte size.
  24 */
  25
  26#if PTTYPE == 64
  27	#define pt_element_t u64
  28	#define guest_walker guest_walker64
  29	#define FNAME(name) paging##64_##name
  30	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  31	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  32	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  33	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  34	#define PT_LEVEL_BITS PT64_LEVEL_BITS
  35	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  36	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  37	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
  38	#ifdef CONFIG_X86_64
  39	#define PT_MAX_FULL_LEVELS 4
  40	#define CMPXCHG cmpxchg
  41	#else
  42	#define CMPXCHG cmpxchg64
  43	#define PT_MAX_FULL_LEVELS 2
  44	#endif
  45#elif PTTYPE == 32
  46	#define pt_element_t u32
  47	#define guest_walker guest_walker32
  48	#define FNAME(name) paging##32_##name
  49	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  50	#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  51	#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  52	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  53	#define PT_LEVEL_BITS PT32_LEVEL_BITS
  54	#define PT_MAX_FULL_LEVELS 2
  55	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  56	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  57	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
  58	#define CMPXCHG cmpxchg
  59#elif PTTYPE == PTTYPE_EPT
  60	#define pt_element_t u64
  61	#define guest_walker guest_walkerEPT
  62	#define FNAME(name) ept_##name
  63	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  64	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  65	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  66	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  67	#define PT_LEVEL_BITS PT64_LEVEL_BITS
  68	#define PT_GUEST_DIRTY_SHIFT 9
  69	#define PT_GUEST_ACCESSED_SHIFT 8
  70	#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
  71	#define CMPXCHG cmpxchg64
  72	#define PT_MAX_FULL_LEVELS 4
  73#else
  74	#error Invalid PTTYPE value
  75#endif
  76
  77#define PT_GUEST_DIRTY_MASK    (1 << PT_GUEST_DIRTY_SHIFT)
  78#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
  79
  80#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  81#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  82
  83/*
  84 * The guest_walker structure emulates the behavior of the hardware page
  85 * table walker.
  86 */
  87struct guest_walker {
  88	int level;
  89	unsigned max_level;
  90	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  91	pt_element_t ptes[PT_MAX_FULL_LEVELS];
  92	pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  93	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  94	pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
  95	bool pte_writable[PT_MAX_FULL_LEVELS];
  96	unsigned pt_access;
  97	unsigned pte_access;
  98	gfn_t gfn;
  99	struct x86_exception fault;
 100};
 101
 102static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
 103{
 104	return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
 105}
 106
 107static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
 108					     unsigned gpte)
 109{
 110	unsigned mask;
 111
 112	/* dirty bit is not supported, so no need to track it */
 113	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
 114		return;
 115
 116	BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
 117
 118	mask = (unsigned)~ACC_WRITE_MASK;
 119	/* Allow write access to dirty gptes */
 120	mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
 121		PT_WRITABLE_MASK;
 122	*access &= mask;
 123}
 124
 125static inline int FNAME(is_present_gpte)(unsigned long pte)
 126{
 127#if PTTYPE != PTTYPE_EPT
 128	return pte & PT_PRESENT_MASK;
 129#else
 130	return pte & 7;
 131#endif
 132}
 133
 134static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 135			       pt_element_t __user *ptep_user, unsigned index,
 136			       pt_element_t orig_pte, pt_element_t new_pte)
 137{
 138	int npages;
 139	pt_element_t ret;
 140	pt_element_t *table;
 141	struct page *page;
 142
 143	npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
 144	/* Check if the user is doing something meaningless. */
 145	if (unlikely(npages != 1))
 146		return -EFAULT;
 147
 148	table = kmap_atomic(page);
 149	ret = CMPXCHG(&table[index], orig_pte, new_pte);
 150	kunmap_atomic(table);
 151
 152	kvm_release_page_dirty(page);
 153
 154	return (ret != orig_pte);
 155}
 156
 157static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
 158				  struct kvm_mmu_page *sp, u64 *spte,
 159				  u64 gpte)
 160{
 161	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
 162		goto no_present;
 163
 164	if (!FNAME(is_present_gpte)(gpte))
 165		goto no_present;
 166
 167	/* if accessed bit is not supported prefetch non accessed gpte */
 168	if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK))
 169		goto no_present;
 170
 171	return false;
 172
 173no_present:
 174	drop_spte(vcpu->kvm, spte);
 175	return true;
 176}
 177
 178/*
 179 * For PTTYPE_EPT, a page table can be executable but not readable
 180 * on supported processors. Therefore, set_spte does not automatically
 181 * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
 182 * to signify readability since it isn't used in the EPT case
 183 */
 184static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
 185{
 186	unsigned access;
 187#if PTTYPE == PTTYPE_EPT
 188	access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
 189		((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
 190		((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
 191#else
 192	BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
 193	BUILD_BUG_ON(ACC_EXEC_MASK != 1);
 194	access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
 195	/* Combine NX with P (which is set here) to get ACC_EXEC_MASK.  */
 196	access ^= (gpte >> PT64_NX_SHIFT);
 197#endif
 198
 199	return access;
 200}
 201
 202static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
 203					     struct kvm_mmu *mmu,
 204					     struct guest_walker *walker,
 205					     int write_fault)
 206{
 207	unsigned level, index;
 208	pt_element_t pte, orig_pte;
 209	pt_element_t __user *ptep_user;
 210	gfn_t table_gfn;
 211	int ret;
 212
 213	/* dirty/accessed bits are not supported, so no need to update them */
 214	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
 215		return 0;
 216
 217	for (level = walker->max_level; level >= walker->level; --level) {
 218		pte = orig_pte = walker->ptes[level - 1];
 219		table_gfn = walker->table_gfn[level - 1];
 220		ptep_user = walker->ptep_user[level - 1];
 221		index = offset_in_page(ptep_user) / sizeof(pt_element_t);
 222		if (!(pte & PT_GUEST_ACCESSED_MASK)) {
 223			trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
 224			pte |= PT_GUEST_ACCESSED_MASK;
 225		}
 226		if (level == walker->level && write_fault &&
 227				!(pte & PT_GUEST_DIRTY_MASK)) {
 228			trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 229#if PTTYPE == PTTYPE_EPT
 230			if (kvm_arch_write_log_dirty(vcpu))
 231				return -EINVAL;
 232#endif
 233			pte |= PT_GUEST_DIRTY_MASK;
 234		}
 235		if (pte == orig_pte)
 236			continue;
 237
 238		/*
 239		 * If the slot is read-only, simply do not process the accessed
 240		 * and dirty bits.  This is the correct thing to do if the slot
 241		 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
 242		 * are only supported if the accessed and dirty bits are already
 243		 * set in the ROM (so that MMIO writes are never needed).
 244		 *
 245		 * Note that NPT does not allow this at all and faults, since
 246		 * it always wants nested page table entries for the guest
 247		 * page tables to be writable.  And EPT works but will simply
 248		 * overwrite the read-only memory to set the accessed and dirty
 249		 * bits.
 250		 */
 251		if (unlikely(!walker->pte_writable[level - 1]))
 252			continue;
 253
 254		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
 255		if (ret)
 256			return ret;
 257
 258		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
 259		walker->ptes[level - 1] = pte;
 260	}
 261	return 0;
 262}
 263
 264static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
 265{
 266	unsigned pkeys = 0;
 267#if PTTYPE == 64
 268	pte_t pte = {.pte = gpte};
 269
 270	pkeys = pte_flags_pkey(pte_flags(pte));
 271#endif
 272	return pkeys;
 273}
 274
 275/*
 276 * Fetch a guest pte for a guest virtual address
 277 */
 278static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 279				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 280				    gva_t addr, u32 access)
 281{
 282	int ret;
 283	pt_element_t pte;
 284	pt_element_t __user *uninitialized_var(ptep_user);
 285	gfn_t table_gfn;
 286	u64 pt_access, pte_access;
 287	unsigned index, accessed_dirty, pte_pkey;
 288	unsigned nested_access;
 289	gpa_t pte_gpa;
 290	bool have_ad;
 291	int offset;
 292	u64 walk_nx_mask = 0;
 293	const int write_fault = access & PFERR_WRITE_MASK;
 294	const int user_fault  = access & PFERR_USER_MASK;
 295	const int fetch_fault = access & PFERR_FETCH_MASK;
 296	u16 errcode = 0;
 297	gpa_t real_gpa;
 298	gfn_t gfn;
 299
 300	trace_kvm_mmu_pagetable_walk(addr, access);
 301retry_walk:
 302	walker->level = mmu->root_level;
 303	pte           = mmu->get_cr3(vcpu);
 304	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
 305
 306#if PTTYPE == 64
 307	walk_nx_mask = 1ULL << PT64_NX_SHIFT;
 308	if (walker->level == PT32E_ROOT_LEVEL) {
 309		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
 310		trace_kvm_mmu_paging_element(pte, walker->level);
 311		if (!FNAME(is_present_gpte)(pte))
 312			goto error;
 313		--walker->level;
 314	}
 315#endif
 316	walker->max_level = walker->level;
 317	ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
 318
 319	/*
 320	 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
 321	 * by the MOV to CR instruction are treated as reads and do not cause the
 322	 * processor to set the dirty flag in any EPT paging-structure entry.
 323	 */
 324	nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
 325
 326	pte_access = ~0;
 327	++walker->level;
 328
 329	do {
 330		gfn_t real_gfn;
 331		unsigned long host_addr;
 332
 333		pt_access = pte_access;
 334		--walker->level;
 335
 336		index = PT_INDEX(addr, walker->level);
 337		table_gfn = gpte_to_gfn(pte);
 338		offset    = index * sizeof(pt_element_t);
 339		pte_gpa   = gfn_to_gpa(table_gfn) + offset;
 340
 341		BUG_ON(walker->level < 1);
 342		walker->table_gfn[walker->level - 1] = table_gfn;
 343		walker->pte_gpa[walker->level - 1] = pte_gpa;
 344
 345		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 346					      nested_access,
 347					      &walker->fault);
 348
 349		/*
 350		 * FIXME: This can happen if emulation (for of an INS/OUTS
 351		 * instruction) triggers a nested page fault.  The exit
 352		 * qualification / exit info field will incorrectly have
 353		 * "guest page access" as the nested page fault's cause,
 354		 * instead of "guest page structure access".  To fix this,
 355		 * the x86_exception struct should be augmented with enough
 356		 * information to fix the exit_qualification or exit_info_1
 357		 * fields.
 358		 */
 359		if (unlikely(real_gfn == UNMAPPED_GVA))
 360			return 0;
 361
 362		real_gfn = gpa_to_gfn(real_gfn);
 363
 364		host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
 365					    &walker->pte_writable[walker->level - 1]);
 366		if (unlikely(kvm_is_error_hva(host_addr)))
 367			goto error;
 368
 369		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 370		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
 371			goto error;
 372		walker->ptep_user[walker->level - 1] = ptep_user;
 373
 374		trace_kvm_mmu_paging_element(pte, walker->level);
 375
 376		/*
 377		 * Inverting the NX it lets us AND it like other
 378		 * permission bits.
 379		 */
 380		pte_access = pt_access & (pte ^ walk_nx_mask);
 381
 382		if (unlikely(!FNAME(is_present_gpte)(pte)))
 383			goto error;
 384
 385		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
 386			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 387			goto error;
 388		}
 389
 390		walker->ptes[walker->level - 1] = pte;
 391	} while (!is_last_gpte(mmu, walker->level, pte));
 392
 393	pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
 394	accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
 395
 396	/* Convert to ACC_*_MASK flags for struct guest_walker.  */
 397	walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
 398	walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
 399	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
 400	if (unlikely(errcode))
 401		goto error;
 402
 403	gfn = gpte_to_gfn_lvl(pte, walker->level);
 404	gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
 405
 406	if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
 407		gfn += pse36_gfn_delta(pte);
 408
 409	real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
 410	if (real_gpa == UNMAPPED_GVA)
 411		return 0;
 412
 413	walker->gfn = real_gpa >> PAGE_SHIFT;
 414
 415	if (!write_fault)
 416		FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
 417	else
 418		/*
 419		 * On a write fault, fold the dirty bit into accessed_dirty.
 420		 * For modes without A/D bits support accessed_dirty will be
 421		 * always clear.
 422		 */
 423		accessed_dirty &= pte >>
 424			(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 425
 426	if (unlikely(!accessed_dirty)) {
 427		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
 428		if (unlikely(ret < 0))
 429			goto error;
 430		else if (ret)
 431			goto retry_walk;
 432	}
 433
 434	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 435		 __func__, (u64)pte, walker->pte_access, walker->pt_access);
 436	return 1;
 437
 438error:
 439	errcode |= write_fault | user_fault;
 440	if (fetch_fault && (mmu->nx ||
 441			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 442		errcode |= PFERR_FETCH_MASK;
 443
 444	walker->fault.vector = PF_VECTOR;
 445	walker->fault.error_code_valid = true;
 446	walker->fault.error_code = errcode;
 447
 448#if PTTYPE == PTTYPE_EPT
 449	/*
 450	 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
 451	 * misconfiguration requires to be injected. The detection is
 452	 * done by is_rsvd_bits_set() above.
 453	 *
 454	 * We set up the value of exit_qualification to inject:
 455	 * [2:0] - Derive from the access bits. The exit_qualification might be
 456	 *         out of date if it is serving an EPT misconfiguration.
 457	 * [5:3] - Calculated by the page walk of the guest EPT page tables
 458	 * [7:8] - Derived from [7:8] of real exit_qualification
 459	 *
 460	 * The other bits are set to 0.
 461	 */
 462	if (!(errcode & PFERR_RSVD_MASK)) {
 463		vcpu->arch.exit_qualification &= 0x180;
 464		if (write_fault)
 465			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
 466		if (user_fault)
 467			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
 468		if (fetch_fault)
 469			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
 470		vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
 471	}
 472#endif
 473	walker->fault.address = addr;
 474	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 475
 476	trace_kvm_mmu_walker_error(walker->fault.error_code);
 477	return 0;
 478}
 479
 480static int FNAME(walk_addr)(struct guest_walker *walker,
 481			    struct kvm_vcpu *vcpu, gva_t addr, u32 access)
 482{
 483	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
 484					access);
 485}
 486
 487#if PTTYPE != PTTYPE_EPT
 488static int FNAME(walk_addr_nested)(struct guest_walker *walker,
 489				   struct kvm_vcpu *vcpu, gva_t addr,
 490				   u32 access)
 491{
 492	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
 493					addr, access);
 494}
 495#endif
 496
 497static bool
 498FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 499		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 500{
 501	unsigned pte_access;
 502	gfn_t gfn;
 503	kvm_pfn_t pfn;
 504
 505	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 506		return false;
 507
 508	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 509
 510	gfn = gpte_to_gfn(gpte);
 511	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 512	FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
 513	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
 514			no_dirty_log && (pte_access & ACC_WRITE_MASK));
 515	if (is_error_pfn(pfn))
 516		return false;
 517
 518	/*
 519	 * we call mmu_set_spte() with host_writable = true because
 520	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 521	 */
 522	mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
 523		     true, true);
 524
 525	return true;
 526}
 527
 528static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 529			      u64 *spte, const void *pte)
 530{
 531	pt_element_t gpte = *(const pt_element_t *)pte;
 532
 533	FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
 534}
 535
 536static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 537				struct guest_walker *gw, int level)
 538{
 539	pt_element_t curr_pte;
 540	gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
 541	u64 mask;
 542	int r, index;
 543
 544	if (level == PT_PAGE_TABLE_LEVEL) {
 545		mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
 546		base_gpa = pte_gpa & ~mask;
 547		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 548
 549		r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
 550				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
 551		curr_pte = gw->prefetch_ptes[index];
 552	} else
 553		r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
 554				  &curr_pte, sizeof(curr_pte));
 555
 556	return r || curr_pte != gw->ptes[level - 1];
 557}
 558
 559static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 560				u64 *sptep)
 561{
 562	struct kvm_mmu_page *sp;
 563	pt_element_t *gptep = gw->prefetch_ptes;
 564	u64 *spte;
 565	int i;
 566
 567	sp = page_header(__pa(sptep));
 568
 569	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
 570		return;
 571
 572	if (sp->role.direct)
 573		return __direct_pte_prefetch(vcpu, sp, sptep);
 574
 575	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
 576	spte = sp->spt + i;
 577
 578	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
 579		if (spte == sptep)
 580			continue;
 581
 582		if (is_shadow_present_pte(*spte))
 583			continue;
 584
 585		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
 586			break;
 587	}
 588}
 589
 590/*
 591 * Fetch a shadow pte for a specific level in the paging hierarchy.
 592 * If the guest tries to write a write-protected page, we need to
 593 * emulate this operation, return 1 to indicate this case.
 594 */
 595static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 596			 struct guest_walker *gw,
 597			 int write_fault, int hlevel,
 598			 kvm_pfn_t pfn, bool map_writable, bool prefault)
 599{
 600	struct kvm_mmu_page *sp = NULL;
 601	struct kvm_shadow_walk_iterator it;
 602	unsigned direct_access, access = gw->pt_access;
 603	int top_level, ret;
 604
 605	direct_access = gw->pte_access;
 606
 607	top_level = vcpu->arch.mmu.root_level;
 608	if (top_level == PT32E_ROOT_LEVEL)
 609		top_level = PT32_ROOT_LEVEL;
 610	/*
 611	 * Verify that the top-level gpte is still there.  Since the page
 612	 * is a root page, it is either write protected (and cannot be
 613	 * changed from now on) or it is invalid (in which case, we don't
 614	 * really care if it changes underneath us after this point).
 615	 */
 616	if (FNAME(gpte_changed)(vcpu, gw, top_level))
 617		goto out_gpte_changed;
 618
 619	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
 620		goto out_gpte_changed;
 621
 622	for (shadow_walk_init(&it, vcpu, addr);
 623	     shadow_walk_okay(&it) && it.level > gw->level;
 624	     shadow_walk_next(&it)) {
 625		gfn_t table_gfn;
 626
 627		clear_sp_write_flooding_count(it.sptep);
 628		drop_large_spte(vcpu, it.sptep);
 629
 630		sp = NULL;
 631		if (!is_shadow_present_pte(*it.sptep)) {
 632			table_gfn = gw->table_gfn[it.level - 2];
 633			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
 634					      false, access);
 635		}
 636
 637		/*
 638		 * Verify that the gpte in the page we've just write
 639		 * protected is still there.
 640		 */
 641		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
 642			goto out_gpte_changed;
 643
 644		if (sp)
 645			link_shadow_page(vcpu, it.sptep, sp);
 646	}
 647
 648	for (;
 649	     shadow_walk_okay(&it) && it.level > hlevel;
 650	     shadow_walk_next(&it)) {
 651		gfn_t direct_gfn;
 652
 653		clear_sp_write_flooding_count(it.sptep);
 654		validate_direct_spte(vcpu, it.sptep, direct_access);
 655
 656		drop_large_spte(vcpu, it.sptep);
 657
 658		if (is_shadow_present_pte(*it.sptep))
 659			continue;
 660
 661		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 662
 663		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
 664				      true, direct_access);
 665		link_shadow_page(vcpu, it.sptep, sp);
 666	}
 667
 668	clear_sp_write_flooding_count(it.sptep);
 669	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
 670			   it.level, gw->gfn, pfn, prefault, map_writable);
 671	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 672
 673	return ret;
 674
 675out_gpte_changed:
 676	kvm_release_pfn_clean(pfn);
 677	return RET_PF_RETRY;
 678}
 679
 680 /*
 681 * To see whether the mapped gfn can write its page table in the current
 682 * mapping.
 683 *
 684 * It is the helper function of FNAME(page_fault). When guest uses large page
 685 * size to map the writable gfn which is used as current page table, we should
 686 * force kvm to use small page size to map it because new shadow page will be
 687 * created when kvm establishes shadow page table that stop kvm using large
 688 * page size. Do it early can avoid unnecessary #PF and emulation.
 689 *
 690 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
 691 * currently used as its page table.
 692 *
 693 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
 694 * since the PDPT is always shadowed, that means, we can not use large page
 695 * size to map the gfn which is used as PDPT.
 696 */
 697static bool
 698FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 699			      struct guest_walker *walker, int user_fault,
 700			      bool *write_fault_to_shadow_pgtable)
 701{
 702	int level;
 703	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
 704	bool self_changed = false;
 705
 706	if (!(walker->pte_access & ACC_WRITE_MASK ||
 707	      (!is_write_protection(vcpu) && !user_fault)))
 708		return false;
 709
 710	for (level = walker->level; level <= walker->max_level; level++) {
 711		gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
 712
 713		self_changed |= !(gfn & mask);
 714		*write_fault_to_shadow_pgtable |= !gfn;
 715	}
 716
 717	return self_changed;
 718}
 719
 720/*
 721 * Page fault handler.  There are several causes for a page fault:
 722 *   - there is no shadow pte for the guest pte
 723 *   - write access through a shadow pte marked read only so that we can set
 724 *     the dirty bit
 725 *   - write access to a shadow pte marked read only so we can update the page
 726 *     dirty bitmap, when userspace requests it
 727 *   - mmio access; in this case we will never install a present shadow pte
 728 *   - normal guest page fault due to the guest pte marked not present, not
 729 *     writable, or not executable
 730 *
 731 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 732 *           a negative value on error.
 733 */
 734static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 735			     bool prefault)
 736{
 737	int write_fault = error_code & PFERR_WRITE_MASK;
 738	int user_fault = error_code & PFERR_USER_MASK;
 739	struct guest_walker walker;
 740	int r;
 741	kvm_pfn_t pfn;
 742	int level = PT_PAGE_TABLE_LEVEL;
 743	bool force_pt_level = false;
 744	unsigned long mmu_seq;
 745	bool map_writable, is_self_change_mapping;
 746
 747	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 748
 749	r = mmu_topup_memory_caches(vcpu);
 750	if (r)
 751		return r;
 752
 753	/*
 754	 * If PFEC.RSVD is set, this is a shadow page fault.
 755	 * The bit needs to be cleared before walking guest page tables.
 756	 */
 757	error_code &= ~PFERR_RSVD_MASK;
 758
 759	/*
 760	 * Look up the guest pte for the faulting address.
 761	 */
 762	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
 763
 764	/*
 765	 * The page is not mapped by the guest.  Let the guest handle it.
 766	 */
 767	if (!r) {
 768		pgprintk("%s: guest page fault\n", __func__);
 769		if (!prefault)
 770			inject_page_fault(vcpu, &walker.fault);
 771
 772		return RET_PF_RETRY;
 773	}
 774
 775	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
 776		shadow_page_table_clear_flood(vcpu, addr);
 777		return RET_PF_EMULATE;
 778	}
 779
 780	vcpu->arch.write_fault_to_shadow_pgtable = false;
 781
 782	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
 783	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 784
 785	if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
 786		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
 787		if (likely(!force_pt_level)) {
 788			level = min(walker.level, level);
 789			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 790		}
 791	} else
 792		force_pt_level = true;
 793
 794	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 795	smp_rmb();
 796
 797	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
 798			 &map_writable))
 799		return RET_PF_RETRY;
 800
 801	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
 802		return r;
 803
 804	/*
 805	 * Do not change pte_access if the pfn is a mmio page, otherwise
 806	 * we will cache the incorrect access into mmio spte.
 807	 */
 808	if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
 809	     !is_write_protection(vcpu) && !user_fault &&
 810	      !is_noslot_pfn(pfn)) {
 811		walker.pte_access |= ACC_WRITE_MASK;
 812		walker.pte_access &= ~ACC_USER_MASK;
 813
 814		/*
 815		 * If we converted a user page to a kernel page,
 816		 * so that the kernel can write to it when cr0.wp=0,
 817		 * then we should prevent the kernel from executing it
 818		 * if SMEP is enabled.
 819		 */
 820		if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
 821			walker.pte_access &= ~ACC_EXEC_MASK;
 822	}
 823
 824	spin_lock(&vcpu->kvm->mmu_lock);
 825	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 826		goto out_unlock;
 827
 828	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 829	if (make_mmu_pages_available(vcpu) < 0)
 830		goto out_unlock;
 831	if (!force_pt_level)
 832		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 833	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
 834			 level, pfn, map_writable, prefault);
 835	++vcpu->stat.pf_fixed;
 836	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 837	spin_unlock(&vcpu->kvm->mmu_lock);
 838
 839	return r;
 840
 841out_unlock:
 842	spin_unlock(&vcpu->kvm->mmu_lock);
 843	kvm_release_pfn_clean(pfn);
 844	return RET_PF_RETRY;
 845}
 846
 847static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 848{
 849	int offset = 0;
 850
 851	WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
 852
 853	if (PTTYPE == 32)
 854		offset = sp->role.quadrant << PT64_LEVEL_BITS;
 855
 856	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 857}
 858
 859static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 860{
 861	struct kvm_shadow_walk_iterator iterator;
 862	struct kvm_mmu_page *sp;
 863	int level;
 864	u64 *sptep;
 865
 866	vcpu_clear_mmio_info(vcpu, gva);
 867
 868	/*
 869	 * No need to check return value here, rmap_can_add() can
 870	 * help us to skip pte prefetch later.
 871	 */
 872	mmu_topup_memory_caches(vcpu);
 873
 874	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
 875		WARN_ON(1);
 876		return;
 877	}
 878
 879	spin_lock(&vcpu->kvm->mmu_lock);
 880	for_each_shadow_entry(vcpu, gva, iterator) {
 881		level = iterator.level;
 882		sptep = iterator.sptep;
 883
 884		sp = page_header(__pa(sptep));
 885		if (is_last_spte(*sptep, level)) {
 886			pt_element_t gpte;
 887			gpa_t pte_gpa;
 888
 889			if (!sp->unsync)
 890				break;
 891
 892			pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 893			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 894
 895			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
 896				kvm_flush_remote_tlbs(vcpu->kvm);
 897
 898			if (!rmap_can_add(vcpu))
 899				break;
 900
 901			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 902						       sizeof(pt_element_t)))
 903				break;
 904
 905			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 906		}
 907
 908		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 909			break;
 910	}
 911	spin_unlock(&vcpu->kvm->mmu_lock);
 912}
 913
 914static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 915			       struct x86_exception *exception)
 916{
 917	struct guest_walker walker;
 918	gpa_t gpa = UNMAPPED_GVA;
 919	int r;
 920
 921	r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
 922
 923	if (r) {
 924		gpa = gfn_to_gpa(walker.gfn);
 925		gpa |= vaddr & ~PAGE_MASK;
 926	} else if (exception)
 927		*exception = walker.fault;
 928
 929	return gpa;
 930}
 931
 932#if PTTYPE != PTTYPE_EPT
 933static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
 934				      u32 access,
 935				      struct x86_exception *exception)
 936{
 937	struct guest_walker walker;
 938	gpa_t gpa = UNMAPPED_GVA;
 939	int r;
 940
 941	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 942
 943	if (r) {
 944		gpa = gfn_to_gpa(walker.gfn);
 945		gpa |= vaddr & ~PAGE_MASK;
 946	} else if (exception)
 947		*exception = walker.fault;
 948
 949	return gpa;
 950}
 951#endif
 952
 953/*
 954 * Using the cached information from sp->gfns is safe because:
 955 * - The spte has a reference to the struct page, so the pfn for a given gfn
 956 *   can't change unless all sptes pointing to it are nuked first.
 957 *
 958 * Note:
 959 *   We should flush all tlbs if spte is dropped even though guest is
 960 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 961 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 962 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 963 *   freed pages.
 964 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
 965 */
 966static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 967{
 968	int i, nr_present = 0;
 969	bool host_writable;
 970	gpa_t first_pte_gpa;
 971
 972	/* direct kvm_mmu_page can not be unsync. */
 973	BUG_ON(sp->role.direct);
 974
 975	first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 976
 977	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 978		unsigned pte_access;
 979		pt_element_t gpte;
 980		gpa_t pte_gpa;
 981		gfn_t gfn;
 982
 983		if (!sp->spt[i])
 984			continue;
 985
 986		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 987
 988		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 989					       sizeof(pt_element_t)))
 990			return 0;
 991
 992		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 993			/*
 994			 * Update spte before increasing tlbs_dirty to make
 995			 * sure no tlb flush is lost after spte is zapped; see
 996			 * the comments in kvm_flush_remote_tlbs().
 997			 */
 998			smp_wmb();
 999			vcpu->kvm->tlbs_dirty++;
1000			continue;
1001		}
1002
1003		gfn = gpte_to_gfn(gpte);
1004		pte_access = sp->role.access;
1005		pte_access &= FNAME(gpte_access)(vcpu, gpte);
1006		FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
1007
1008		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
1009		      &nr_present))
1010			continue;
1011
1012		if (gfn != sp->gfns[i]) {
1013			drop_spte(vcpu->kvm, &sp->spt[i]);
1014			/*
1015			 * The same as above where we are doing
1016			 * prefetch_invalid_gpte().
1017			 */
1018			smp_wmb();
1019			vcpu->kvm->tlbs_dirty++;
1020			continue;
1021		}
1022
1023		nr_present++;
1024
1025		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
1026
1027		set_spte(vcpu, &sp->spt[i], pte_access,
1028			 PT_PAGE_TABLE_LEVEL, gfn,
1029			 spte_to_pfn(sp->spt[i]), true, false,
1030			 host_writable);
1031	}
1032
1033	return nr_present;
1034}
1035
1036#undef pt_element_t
1037#undef guest_walker
1038#undef FNAME
1039#undef PT_BASE_ADDR_MASK
1040#undef PT_INDEX
1041#undef PT_LVL_ADDR_MASK
1042#undef PT_LVL_OFFSET_MASK
1043#undef PT_LEVEL_BITS
1044#undef PT_MAX_FULL_LEVELS
1045#undef gpte_to_gfn
1046#undef gpte_to_gfn_lvl
1047#undef CMPXCHG
1048#undef PT_GUEST_ACCESSED_MASK
1049#undef PT_GUEST_DIRTY_MASK
1050#undef PT_GUEST_DIRTY_SHIFT
1051#undef PT_GUEST_ACCESSED_SHIFT
1052#undef PT_HAVE_ACCESSED_DIRTY