Linux Audio

Check our new training course

Loading...
v3.15
   1/*P:700
   2 * The pagetable code, on the other hand, still shows the scars of
   3 * previous encounters.  It's functional, and as neat as it can be in the
   4 * circumstances, but be wary, for these things are subtle and break easily.
   5 * The Guest provides a virtual to physical mapping, but we can neither trust
   6 * it nor use it: we verify and convert it here then point the CPU to the
   7 * converted Guest pages when running the Guest.
   8:*/
   9
  10/* Copyright (C) Rusty Russell IBM Corporation 2013.
  11 * GPL v2 and any later version */
  12#include <linux/mm.h>
  13#include <linux/gfp.h>
  14#include <linux/types.h>
  15#include <linux/spinlock.h>
  16#include <linux/random.h>
  17#include <linux/percpu.h>
  18#include <asm/tlbflush.h>
  19#include <asm/uaccess.h>
  20#include "lg.h"
  21
  22/*M:008
  23 * We hold reference to pages, which prevents them from being swapped.
  24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
  25 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
  26 * could probably consider launching Guests as non-root.
  27:*/
  28
  29/*H:300
  30 * The Page Table Code
  31 *
  32 * We use two-level page tables for the Guest, or three-level with PAE.  If
  33 * you're not entirely comfortable with virtual addresses, physical addresses
  34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
  35 * Table Handling" (with diagrams!).
  36 *
  37 * The Guest keeps page tables, but we maintain the actual ones here: these are
  38 * called "shadow" page tables.  Which is a very Guest-centric name: these are
  39 * the real page tables the CPU uses, although we keep them up to date to
  40 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
  41 * shadows reflect anything?)
  42 *
  43 * Anyway, this is the most complicated part of the Host code.  There are seven
  44 * parts to this:
  45 *  (i) Looking up a page table entry when the Guest faults,
  46 *  (ii) Making sure the Guest stack is mapped,
  47 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
  48 *  (iv) Switching page tables,
  49 *  (v) Flushing (throwing away) page tables,
  50 *  (vi) Mapping the Switcher when the Guest is about to run,
  51 *  (vii) Setting up the page tables initially.
  52:*/
  53
  54/*
  55 * The Switcher uses the complete top PTE page.  That's 1024 PTE entries (4MB)
  56 * or 512 PTE entries with PAE (2MB).
  57 */
  58#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
  59
  60/*
  61 * For PAE we need the PMD index as well. We use the last 2MB, so we
  62 * will need the last pmd entry of the last pmd page.
  63 */
  64#ifdef CONFIG_X86_PAE
  65#define CHECK_GPGD_MASK		_PAGE_PRESENT
  66#else
  67#define CHECK_GPGD_MASK		_PAGE_TABLE
  68#endif
  69
  70/*H:320
  71 * The page table code is curly enough to need helper functions to keep it
  72 * clear and clean.  The kernel itself provides many of them; one advantage
  73 * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
  74 *
  75 * There are two functions which return pointers to the shadow (aka "real")
  76 * page tables.
  77 *
  78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
  79 * page directory entry (PGD) for that address.  Since we keep track of several
  80 * page tables, the "i" argument tells us which one we're interested in (it's
  81 * usually the current one).
  82 */
  83static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
  84{
  85	unsigned int index = pgd_index(vaddr);
  86
  87	/* Return a pointer index'th pgd entry for the i'th page table. */
  88	return &cpu->lg->pgdirs[i].pgdir[index];
  89}
  90
  91#ifdef CONFIG_X86_PAE
  92/*
  93 * This routine then takes the PGD entry given above, which contains the
  94 * address of the PMD page.  It then returns a pointer to the PMD entry for the
  95 * given address.
  96 */
  97static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  98{
  99	unsigned int index = pmd_index(vaddr);
 100	pmd_t *page;
 101
 102	/* You should never call this if the PGD entry wasn't valid */
 103	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
 104	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
 105
 106	return &page[index];
 107}
 108#endif
 109
 110/*
 111 * This routine then takes the page directory entry returned above, which
 112 * contains the address of the page table entry (PTE) page.  It then returns a
 113 * pointer to the PTE entry for the given address.
 114 */
 115static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
 116{
 117#ifdef CONFIG_X86_PAE
 118	pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
 119	pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
 120
 121	/* You should never call this if the PMD entry wasn't valid */
 122	BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
 123#else
 124	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
 125	/* You should never call this if the PGD entry wasn't valid */
 126	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
 127#endif
 128
 129	return &page[pte_index(vaddr)];
 130}
 131
 132/*
 133 * These functions are just like the above, except they access the Guest
 134 * page tables.  Hence they return a Guest address.
 135 */
 136static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
 137{
 138	unsigned int index = vaddr >> (PGDIR_SHIFT);
 139	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
 140}
 141
 142#ifdef CONFIG_X86_PAE
 143/* Follow the PGD to the PMD. */
 144static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
 145{
 146	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
 147	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
 148	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
 149}
 150
 151/* Follow the PMD to the PTE. */
 152static unsigned long gpte_addr(struct lg_cpu *cpu,
 153			       pmd_t gpmd, unsigned long vaddr)
 154{
 155	unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
 156
 157	BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
 158	return gpage + pte_index(vaddr) * sizeof(pte_t);
 159}
 160#else
 161/* Follow the PGD to the PTE (no mid-level for !PAE). */
 162static unsigned long gpte_addr(struct lg_cpu *cpu,
 163				pgd_t gpgd, unsigned long vaddr)
 164{
 165	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
 166
 167	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
 168	return gpage + pte_index(vaddr) * sizeof(pte_t);
 169}
 170#endif
 171/*:*/
 172
 173/*M:007
 174 * get_pfn is slow: we could probably try to grab batches of pages here as
 175 * an optimization (ie. pre-faulting).
 176:*/
 177
 178/*H:350
 179 * This routine takes a page number given by the Guest and converts it to
 180 * an actual, physical page number.  It can fail for several reasons: the
 181 * virtual address might not be mapped by the Launcher, the write flag is set
 182 * and the page is read-only, or the write flag was set and the page was
 183 * shared so had to be copied, but we ran out of memory.
 184 *
 185 * This holds a reference to the page, so release_pte() is careful to put that
 186 * back.
 187 */
 188static unsigned long get_pfn(unsigned long virtpfn, int write)
 189{
 190	struct page *page;
 191
 192	/* gup me one page at this address please! */
 193	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
 194		return page_to_pfn(page);
 195
 196	/* This value indicates failure. */
 197	return -1UL;
 198}
 199
 200/*H:340
 201 * Converting a Guest page table entry to a shadow (ie. real) page table
 202 * entry can be a little tricky.  The flags are (almost) the same, but the
 203 * Guest PTE contains a virtual page number: the CPU needs the real page
 204 * number.
 205 */
 206static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
 207{
 208	unsigned long pfn, base, flags;
 209
 210	/*
 211	 * The Guest sets the global flag, because it thinks that it is using
 212	 * PGE.  We only told it to use PGE so it would tell us whether it was
 213	 * flushing a kernel mapping or a userspace mapping.  We don't actually
 214	 * use the global bit, so throw it away.
 215	 */
 216	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
 217
 218	/* The Guest's pages are offset inside the Launcher. */
 219	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
 220
 221	/*
 222	 * We need a temporary "unsigned long" variable to hold the answer from
 223	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
 224	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
 225	 * page, given the virtual number.
 226	 */
 227	pfn = get_pfn(base + pte_pfn(gpte), write);
 228	if (pfn == -1UL) {
 229		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
 230		/*
 231		 * When we destroy the Guest, we'll go through the shadow page
 232		 * tables and release_pte() them.  Make sure we don't think
 233		 * this one is valid!
 234		 */
 235		flags = 0;
 236	}
 237	/* Now we assemble our shadow PTE from the page number and flags. */
 238	return pfn_pte(pfn, __pgprot(flags));
 239}
 240
 241/*H:460 And to complete the chain, release_pte() looks like this: */
 242static void release_pte(pte_t pte)
 243{
 244	/*
 245	 * Remember that get_user_pages_fast() took a reference to the page, in
 246	 * get_pfn()?  We have to put it back now.
 247	 */
 248	if (pte_flags(pte) & _PAGE_PRESENT)
 249		put_page(pte_page(pte));
 250}
 251/*:*/
 252
 
 
 
 
 
 
 
 
 
 
 253static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
 254{
 255	if ((pte_flags(gpte) & _PAGE_PSE) ||
 256	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {
 257		kill_guest(cpu, "bad page table entry");
 258		return false;
 259	}
 260	return true;
 261}
 262
 263static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
 264{
 265	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
 266	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
 267		kill_guest(cpu, "bad page directory entry");
 268		return false;
 269	}
 270	return true;
 271}
 272
 273#ifdef CONFIG_X86_PAE
 274static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
 275{
 276	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
 277	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
 278		kill_guest(cpu, "bad page middle directory entry");
 279		return false;
 280	}
 281	return true;
 282}
 283#endif
 284
 285/*H:331
 286 * This is the core routine to walk the shadow page tables and find the page
 287 * table entry for a specific address.
 288 *
 289 * If allocate is set, then we allocate any missing levels, setting the flags
 290 * on the new page directory and mid-level directories using the arguments
 291 * (which are copied from the Guest's page table entries).
 292 */
 293static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
 294			int pgd_flags, int pmd_flags)
 295{
 296	pgd_t *spgd;
 297	/* Mid level for PAE. */
 298#ifdef CONFIG_X86_PAE
 299	pmd_t *spmd;
 300#endif
 301
 302	/* Get top level entry. */
 303	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
 304	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
 305		/* No shadow entry: allocate a new shadow PTE page. */
 306		unsigned long ptepage;
 307
 308		/* If they didn't want us to allocate anything, stop. */
 309		if (!allocate)
 310			return NULL;
 311
 312		ptepage = get_zeroed_page(GFP_KERNEL);
 313		/*
 314		 * This is not really the Guest's fault, but killing it is
 315		 * simple for this corner case.
 316		 */
 317		if (!ptepage) {
 318			kill_guest(cpu, "out of memory allocating pte page");
 319			return NULL;
 320		}
 321		/*
 322		 * And we copy the flags to the shadow PGD entry.  The page
 323		 * number in the shadow PGD is the page we just allocated.
 324		 */
 325		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
 326	}
 327
 328	/*
 329	 * Intel's Physical Address Extension actually uses three levels of
 330	 * page tables, so we need to look in the mid-level.
 331	 */
 332#ifdef CONFIG_X86_PAE
 333	/* Now look at the mid-level shadow entry. */
 334	spmd = spmd_addr(cpu, *spgd, vaddr);
 335
 336	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
 337		/* No shadow entry: allocate a new shadow PTE page. */
 338		unsigned long ptepage;
 339
 340		/* If they didn't want us to allocate anything, stop. */
 341		if (!allocate)
 342			return NULL;
 343
 344		ptepage = get_zeroed_page(GFP_KERNEL);
 345
 346		/*
 347		 * This is not really the Guest's fault, but killing it is
 348		 * simple for this corner case.
 349		 */
 350		if (!ptepage) {
 351			kill_guest(cpu, "out of memory allocating pmd page");
 352			return NULL;
 353		}
 354
 355		/*
 356		 * And we copy the flags to the shadow PMD entry.  The page
 357		 * number in the shadow PMD is the page we just allocated.
 358		 */
 359		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
 360	}
 361#endif
 362
 363	/* Get the pointer to the shadow PTE entry we're going to set. */
 364	return spte_addr(cpu, *spgd, vaddr);
 365}
 366
 367/*H:330
 368 * (i) Looking up a page table entry when the Guest faults.
 369 *
 370 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 371 * come here.  That's because we only set up the shadow page tables lazily as
 372 * they're needed, so we get page faults all the time and quietly fix them up
 373 * and return to the Guest without it knowing.
 374 *
 375 * If we fixed up the fault (ie. we mapped the address), this routine returns
 376 * true.  Otherwise, it was a real fault and we need to tell the Guest.
 
 
 
 
 
 377 */
 378bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
 
 379{
 380	unsigned long gpte_ptr;
 381	pte_t gpte;
 382	pte_t *spte;
 383	pmd_t gpmd;
 384	pgd_t gpgd;
 385
 
 
 386	/* We never demand page the Switcher, so trying is a mistake. */
 387	if (vaddr >= switcher_addr)
 388		return false;
 389
 390	/* First step: get the top-level Guest page table entry. */
 391	if (unlikely(cpu->linear_pages)) {
 392		/* Faking up a linear mapping. */
 393		gpgd = __pgd(CHECK_GPGD_MASK);
 394	} else {
 395		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
 396		/* Toplevel not present?  We can't map it in. */
 397		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
 398			return false;
 399
 400		/* 
 401		 * This kills the Guest if it has weird flags or tries to
 402		 * refer to a "physical" address outside the bounds.
 403		 */
 404		if (!check_gpgd(cpu, gpgd))
 405			return false;
 406	}
 407
 408	/* This "mid-level" entry is only used for non-linear, PAE mode. */
 409	gpmd = __pmd(_PAGE_TABLE);
 410
 411#ifdef CONFIG_X86_PAE
 412	if (likely(!cpu->linear_pages)) {
 413		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
 414		/* Middle level not present?  We can't map it in. */
 415		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
 416			return false;
 417
 418		/* 
 419		 * This kills the Guest if it has weird flags or tries to
 420		 * refer to a "physical" address outside the bounds.
 421		 */
 422		if (!check_gpmd(cpu, gpmd))
 423			return false;
 424	}
 425
 426	/*
 427	 * OK, now we look at the lower level in the Guest page table: keep its
 428	 * address, because we might update it later.
 429	 */
 430	gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
 431#else
 432	/*
 433	 * OK, now we look at the lower level in the Guest page table: keep its
 434	 * address, because we might update it later.
 435	 */
 436	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
 437#endif
 438
 439	if (unlikely(cpu->linear_pages)) {
 440		/* Linear?  Make up a PTE which points to same page. */
 441		gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
 442	} else {
 443		/* Read the actual PTE value. */
 444		gpte = lgread(cpu, gpte_ptr, pte_t);
 445	}
 446
 447	/* If this page isn't in the Guest page tables, we can't page it in. */
 448	if (!(pte_flags(gpte) & _PAGE_PRESENT))
 449		return false;
 450
 451	/*
 452	 * Check they're not trying to write to a page the Guest wants
 453	 * read-only (bit 2 of errcode == write).
 454	 */
 455	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
 456		return false;
 457
 458	/* User access to a kernel-only page? (bit 3 == user access) */
 459	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
 460		return false;
 461
 
 
 
 
 
 
 462	/*
 463	 * Check that the Guest PTE flags are OK, and the page number is below
 464	 * the pfn_limit (ie. not mapping the Launcher binary).
 465	 */
 466	if (!check_gpte(cpu, gpte))
 467		return false;
 468
 469	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
 470	gpte = pte_mkyoung(gpte);
 471	if (errcode & 2)
 472		gpte = pte_mkdirty(gpte);
 473
 474	/* Get the pointer to the shadow PTE entry we're going to set. */
 475	spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
 476	if (!spte)
 477		return false;
 478
 479	/*
 480	 * If there was a valid shadow PTE entry here before, we release it.
 481	 * This can happen with a write to a previously read-only entry.
 482	 */
 483	release_pte(*spte);
 484
 485	/*
 486	 * If this is a write, we insist that the Guest page is writable (the
 487	 * final arg to gpte_to_spte()).
 488	 */
 489	if (pte_dirty(gpte))
 490		*spte = gpte_to_spte(cpu, gpte, 1);
 491	else
 492		/*
 493		 * If this is a read, don't set the "writable" bit in the page
 494		 * table entry, even if the Guest says it's writable.  That way
 495		 * we will come back here when a write does actually occur, so
 496		 * we can update the Guest's _PAGE_DIRTY flag.
 497		 */
 498		set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
 499
 500	/*
 501	 * Finally, we write the Guest PTE entry back: we've set the
 502	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
 503	 */
 504	if (likely(!cpu->linear_pages))
 505		lgwrite(cpu, gpte_ptr, pte_t, gpte);
 506
 507	/*
 508	 * The fault is fixed, the page table is populated, the mapping
 509	 * manipulated, the result returned and the code complete.  A small
 510	 * delay and a trace of alliteration are the only indications the Guest
 511	 * has that a page fault occurred at all.
 512	 */
 513	return true;
 514}
 515
 516/*H:360
 517 * (ii) Making sure the Guest stack is mapped.
 518 *
 519 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
 520 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
 521 * we've seen that logic is quite long, and usually the stack pages are already
 522 * mapped, so it's overkill.
 523 *
 524 * This is a quick version which answers the question: is this virtual address
 525 * mapped by the shadow page tables, and is it writable?
 526 */
 527static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
 528{
 529	pte_t *spte;
 530	unsigned long flags;
 531
 532	/* You can't put your stack in the Switcher! */
 533	if (vaddr >= switcher_addr)
 534		return false;
 535
 536	/* If there's no shadow PTE, it's not writable. */
 537	spte = find_spte(cpu, vaddr, false, 0, 0);
 538	if (!spte)
 539		return false;
 540
 541	/*
 542	 * Check the flags on the pte entry itself: it must be present and
 543	 * writable.
 544	 */
 545	flags = pte_flags(*spte);
 546	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
 547}
 548
 549/*
 550 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
 551 * in the page tables, and if not, we call demand_page() with error code 2
 552 * (meaning "write").
 553 */
 554void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
 555{
 556	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
 
 
 557		kill_guest(cpu, "bad stack page %#lx", vaddr);
 558}
 559/*:*/
 560
 561#ifdef CONFIG_X86_PAE
 562static void release_pmd(pmd_t *spmd)
 563{
 564	/* If the entry's not present, there's nothing to release. */
 565	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
 566		unsigned int i;
 567		pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
 568		/* For each entry in the page, we might need to release it. */
 569		for (i = 0; i < PTRS_PER_PTE; i++)
 570			release_pte(ptepage[i]);
 571		/* Now we can free the page of PTEs */
 572		free_page((long)ptepage);
 573		/* And zero out the PMD entry so we never release it twice. */
 574		set_pmd(spmd, __pmd(0));
 575	}
 576}
 577
 578static void release_pgd(pgd_t *spgd)
 579{
 580	/* If the entry's not present, there's nothing to release. */
 581	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 582		unsigned int i;
 583		pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
 584
 585		for (i = 0; i < PTRS_PER_PMD; i++)
 586			release_pmd(&pmdpage[i]);
 587
 588		/* Now we can free the page of PMDs */
 589		free_page((long)pmdpage);
 590		/* And zero out the PGD entry so we never release it twice. */
 591		set_pgd(spgd, __pgd(0));
 592	}
 593}
 594
 595#else /* !CONFIG_X86_PAE */
 596/*H:450
 597 * If we chase down the release_pgd() code, the non-PAE version looks like
 598 * this.  The PAE version is almost identical, but instead of calling
 599 * release_pte it calls release_pmd(), which looks much like this.
 600 */
 601static void release_pgd(pgd_t *spgd)
 602{
 603	/* If the entry's not present, there's nothing to release. */
 604	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 605		unsigned int i;
 606		/*
 607		 * Converting the pfn to find the actual PTE page is easy: turn
 608		 * the page number into a physical address, then convert to a
 609		 * virtual address (easy for kernel pages like this one).
 610		 */
 611		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
 612		/* For each entry in the page, we might need to release it. */
 613		for (i = 0; i < PTRS_PER_PTE; i++)
 614			release_pte(ptepage[i]);
 615		/* Now we can free the page of PTEs */
 616		free_page((long)ptepage);
 617		/* And zero out the PGD entry so we never release it twice. */
 618		*spgd = __pgd(0);
 619	}
 620}
 621#endif
 622
 623/*H:445
 624 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
 625 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
 626 * It simply releases every PTE page from 0 up to the Guest's kernel address.
 627 */
 628static void flush_user_mappings(struct lguest *lg, int idx)
 629{
 630	unsigned int i;
 631	/* Release every pgd entry up to the kernel's address. */
 632	for (i = 0; i < pgd_index(lg->kernel_address); i++)
 633		release_pgd(lg->pgdirs[idx].pgdir + i);
 634}
 635
 636/*H:440
 637 * (v) Flushing (throwing away) page tables,
 638 *
 639 * The Guest has a hypercall to throw away the page tables: it's used when a
 640 * large number of mappings have been changed.
 641 */
 642void guest_pagetable_flush_user(struct lg_cpu *cpu)
 643{
 644	/* Drop the userspace part of the current page table. */
 645	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
 646}
 647/*:*/
 648
 649/* We walk down the guest page tables to get a guest-physical address */
 650unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
 651{
 652	pgd_t gpgd;
 653	pte_t gpte;
 654#ifdef CONFIG_X86_PAE
 655	pmd_t gpmd;
 656#endif
 657
 658	/* Still not set up?  Just map 1:1. */
 659	if (unlikely(cpu->linear_pages))
 660		return vaddr;
 
 
 661
 662	/* First step: get the top-level Guest page table entry. */
 663	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
 664	/* Toplevel not present?  We can't map it in. */
 665	if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
 666		kill_guest(cpu, "Bad address %#lx", vaddr);
 667		return -1UL;
 668	}
 669
 670#ifdef CONFIG_X86_PAE
 671	gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
 672	if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
 673		kill_guest(cpu, "Bad address %#lx", vaddr);
 674		return -1UL;
 675	}
 676	gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
 677#else
 678	gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
 679#endif
 680	if (!(pte_flags(gpte) & _PAGE_PRESENT))
 681		kill_guest(cpu, "Bad address %#lx", vaddr);
 
 
 
 
 
 
 
 
 682
 683	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
 
 
 
 
 
 
 
 
 
 
 684}
 685
 686/*
 687 * We keep several page tables.  This is a simple routine to find the page
 688 * table (if any) corresponding to this top-level address the Guest has given
 689 * us.
 690 */
 691static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
 692{
 693	unsigned int i;
 694	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
 695		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
 696			break;
 697	return i;
 698}
 699
 700/*H:435
 701 * And this is us, creating the new page directory.  If we really do
 702 * allocate a new one (and so the kernel parts are not there), we set
 703 * blank_pgdir.
 704 */
 705static unsigned int new_pgdir(struct lg_cpu *cpu,
 706			      unsigned long gpgdir,
 707			      int *blank_pgdir)
 708{
 709	unsigned int next;
 710
 711	/*
 712	 * We pick one entry at random to throw out.  Choosing the Least
 713	 * Recently Used might be better, but this is easy.
 714	 */
 715	next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
 716	/* If it's never been allocated at all before, try now. */
 717	if (!cpu->lg->pgdirs[next].pgdir) {
 718		cpu->lg->pgdirs[next].pgdir =
 719					(pgd_t *)get_zeroed_page(GFP_KERNEL);
 720		/* If the allocation fails, just keep using the one we have */
 721		if (!cpu->lg->pgdirs[next].pgdir)
 722			next = cpu->cpu_pgd;
 723		else {
 724			/*
 725			 * This is a blank page, so there are no kernel
 726			 * mappings: caller must map the stack!
 727			 */
 728			*blank_pgdir = 1;
 729		}
 730	}
 731	/* Record which Guest toplevel this shadows. */
 732	cpu->lg->pgdirs[next].gpgdir = gpgdir;
 733	/* Release all the non-kernel mappings. */
 734	flush_user_mappings(cpu->lg, next);
 735
 736	/* This hasn't run on any CPU at all. */
 737	cpu->lg->pgdirs[next].last_host_cpu = -1;
 738
 739	return next;
 740}
 741
 742/*H:501
 743 * We do need the Switcher code mapped at all times, so we allocate that
 744 * part of the Guest page table here.  We map the Switcher code immediately,
 745 * but defer mapping of the guest register page and IDT/LDT etc page until
 746 * just before we run the guest in map_switcher_in_guest().
 747 *
 748 * We *could* do this setup in map_switcher_in_guest(), but at that point
 749 * we've interrupts disabled, and allocating pages like that is fraught: we
 750 * can't sleep if we need to free up some memory.
 751 */
 752static bool allocate_switcher_mapping(struct lg_cpu *cpu)
 753{
 754	int i;
 755
 756	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
 757		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
 758				       CHECK_GPGD_MASK, _PAGE_TABLE);
 759		if (!pte)
 760			return false;
 761
 762		/*
 763		 * Map the switcher page if not already there.  It might
 764		 * already be there because we call allocate_switcher_mapping()
 765		 * in guest_set_pgd() just in case it did discard our Switcher
 766		 * mapping, but it probably didn't.
 767		 */
 768		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
 769			/* Get a reference to the Switcher page. */
 770			get_page(lg_switcher_pages[0]);
 771			/* Create a read-only, exectuable, kernel-style PTE */
 772			set_pte(pte,
 773				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
 774		}
 775	}
 776	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
 777	return true;
 778}
 779
 780/*H:470
 781 * Finally, a routine which throws away everything: all PGD entries in all
 782 * the shadow page tables, including the Guest's kernel mappings.  This is used
 783 * when we destroy the Guest.
 784 */
 785static void release_all_pagetables(struct lguest *lg)
 786{
 787	unsigned int i, j;
 788
 789	/* Every shadow pagetable this Guest has */
 790	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
 791		if (!lg->pgdirs[i].pgdir)
 792			continue;
 793
 794		/* Every PGD entry. */
 795		for (j = 0; j < PTRS_PER_PGD; j++)
 796			release_pgd(lg->pgdirs[i].pgdir + j);
 797		lg->pgdirs[i].switcher_mapped = false;
 798		lg->pgdirs[i].last_host_cpu = -1;
 799	}
 800}
 801
 802/*
 803 * We also throw away everything when a Guest tells us it's changed a kernel
 804 * mapping.  Since kernel mappings are in every page table, it's easiest to
 805 * throw them all away.  This traps the Guest in amber for a while as
 806 * everything faults back in, but it's rare.
 807 */
 808void guest_pagetable_clear_all(struct lg_cpu *cpu)
 809{
 810	release_all_pagetables(cpu->lg);
 811	/* We need the Guest kernel stack mapped again. */
 812	pin_stack_pages(cpu);
 813	/* And we need Switcher allocated. */
 814	if (!allocate_switcher_mapping(cpu))
 815		kill_guest(cpu, "Cannot populate switcher mapping");
 816}
 817
 818/*H:430
 819 * (iv) Switching page tables
 820 *
 821 * Now we've seen all the page table setting and manipulation, let's see
 822 * what happens when the Guest changes page tables (ie. changes the top-level
 823 * pgdir).  This occurs on almost every context switch.
 824 */
 825void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
 826{
 827	int newpgdir, repin = 0;
 828
 829	/*
 830	 * The very first time they call this, we're actually running without
 831	 * any page tables; we've been making it up.  Throw them away now.
 832	 */
 833	if (unlikely(cpu->linear_pages)) {
 834		release_all_pagetables(cpu->lg);
 835		cpu->linear_pages = false;
 836		/* Force allocation of a new pgdir. */
 837		newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
 838	} else {
 839		/* Look to see if we have this one already. */
 840		newpgdir = find_pgdir(cpu->lg, pgtable);
 841	}
 842
 843	/*
 844	 * If not, we allocate or mug an existing one: if it's a fresh one,
 845	 * repin gets set to 1.
 846	 */
 847	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
 848		newpgdir = new_pgdir(cpu, pgtable, &repin);
 849	/* Change the current pgd index to the new one. */
 850	cpu->cpu_pgd = newpgdir;
 851	/*
 852	 * If it was completely blank, we map in the Guest kernel stack and
 853	 * the Switcher.
 854	 */
 855	if (repin)
 856		pin_stack_pages(cpu);
 857
 858	if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
 859		if (!allocate_switcher_mapping(cpu))
 860			kill_guest(cpu, "Cannot populate switcher mapping");
 861	}
 862}
 863/*:*/
 864
 865/*M:009
 866 * Since we throw away all mappings when a kernel mapping changes, our
 867 * performance sucks for guests using highmem.  In fact, a guest with
 868 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
 869 * usually slower than a Guest with less memory.
 870 *
 871 * This, of course, cannot be fixed.  It would take some kind of... well, I
 872 * don't know, but the term "puissant code-fu" comes to mind.
 873:*/
 874
 875/*H:420
 876 * This is the routine which actually sets the page table entry for then
 877 * "idx"'th shadow page table.
 878 *
 879 * Normally, we can just throw out the old entry and replace it with 0: if they
 880 * use it demand_page() will put the new entry in.  We need to do this anyway:
 881 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
 882 * is read from, and _PAGE_DIRTY when it's written to.
 883 *
 884 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
 885 * these bits on PTEs immediately anyway.  This is done to save the CPU from
 886 * having to update them, but it helps us the same way: if they set
 887 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
 888 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
 889 */
 890static void __guest_set_pte(struct lg_cpu *cpu, int idx,
 891		       unsigned long vaddr, pte_t gpte)
 892{
 893	/* Look up the matching shadow page directory entry. */
 894	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
 895#ifdef CONFIG_X86_PAE
 896	pmd_t *spmd;
 897#endif
 898
 899	/* If the top level isn't present, there's no entry to update. */
 900	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 901#ifdef CONFIG_X86_PAE
 902		spmd = spmd_addr(cpu, *spgd, vaddr);
 903		if (pmd_flags(*spmd) & _PAGE_PRESENT) {
 904#endif
 905			/* Otherwise, start by releasing the existing entry. */
 906			pte_t *spte = spte_addr(cpu, *spgd, vaddr);
 907			release_pte(*spte);
 908
 909			/*
 910			 * If they're setting this entry as dirty or accessed,
 911			 * we might as well put that entry they've given us in
 912			 * now.  This shaves 10% off a copy-on-write
 913			 * micro-benchmark.
 914			 */
 915			if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
 
 916				if (!check_gpte(cpu, gpte))
 917					return;
 918				set_pte(spte,
 919					gpte_to_spte(cpu, gpte,
 920						pte_flags(gpte) & _PAGE_DIRTY));
 921			} else {
 922				/*
 923				 * Otherwise kill it and we can demand_page()
 924				 * it in later.
 925				 */
 926				set_pte(spte, __pte(0));
 927			}
 928#ifdef CONFIG_X86_PAE
 929		}
 930#endif
 931	}
 932}
 933
 934/*H:410
 935 * Updating a PTE entry is a little trickier.
 936 *
 937 * We keep track of several different page tables (the Guest uses one for each
 938 * process, so it makes sense to cache at least a few).  Each of these have
 939 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
 940 * all processes.  So when the page table above that address changes, we update
 941 * all the page tables, not just the current one.  This is rare.
 942 *
 943 * The benefit is that when we have to track a new page table, we can keep all
 944 * the kernel mappings.  This speeds up context switch immensely.
 945 */
 946void guest_set_pte(struct lg_cpu *cpu,
 947		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
 948{
 949	/* We don't let you remap the Switcher; we need it to get back! */
 950	if (vaddr >= switcher_addr) {
 951		kill_guest(cpu, "attempt to set pte into Switcher pages");
 952		return;
 953	}
 954
 955	/*
 956	 * Kernel mappings must be changed on all top levels.  Slow, but doesn't
 957	 * happen often.
 958	 */
 959	if (vaddr >= cpu->lg->kernel_address) {
 960		unsigned int i;
 961		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
 962			if (cpu->lg->pgdirs[i].pgdir)
 963				__guest_set_pte(cpu, i, vaddr, gpte);
 964	} else {
 965		/* Is this page table one we have a shadow for? */
 966		int pgdir = find_pgdir(cpu->lg, gpgdir);
 967		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
 968			/* If so, do the update. */
 969			__guest_set_pte(cpu, pgdir, vaddr, gpte);
 970	}
 971}
 972
 973/*H:400
 974 * (iii) Setting up a page table entry when the Guest tells us one has changed.
 975 *
 976 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
 977 * with the other side of page tables while we're here: what happens when the
 978 * Guest asks for a page table to be updated?
 979 *
 980 * We already saw that demand_page() will fill in the shadow page tables when
 981 * needed, so we can simply remove shadow page table entries whenever the Guest
 982 * tells us they've changed.  When the Guest tries to use the new entry it will
 983 * fault and demand_page() will fix it up.
 984 *
 985 * So with that in mind here's our code to update a (top-level) PGD entry:
 986 */
 987void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
 988{
 989	int pgdir;
 990
 991	if (idx > PTRS_PER_PGD) {
 992		kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
 993			   idx, PTRS_PER_PGD);
 994		return;
 995	}
 996
 997	/* If they're talking about a page table we have a shadow for... */
 998	pgdir = find_pgdir(lg, gpgdir);
 999	if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
1000		/* ... throw it away. */
1001		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
1002		/* That might have been the Switcher mapping, remap it. */
1003		if (!allocate_switcher_mapping(&lg->cpus[0])) {
1004			kill_guest(&lg->cpus[0],
1005				   "Cannot populate switcher mapping");
1006		}
1007		lg->pgdirs[pgdir].last_host_cpu = -1;
1008	}
1009}
1010
1011#ifdef CONFIG_X86_PAE
1012/* For setting a mid-level, we just throw everything away.  It's easy. */
1013void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
1014{
1015	guest_pagetable_clear_all(&lg->cpus[0]);
1016}
1017#endif
1018
1019/*H:500
1020 * (vii) Setting up the page tables initially.
1021 *
1022 * When a Guest is first created, set initialize a shadow page table which
1023 * we will populate on future faults.  The Guest doesn't have any actual
1024 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
1025 * for the moment.
1026 *
1027 * We do need the Switcher to be mapped at all times, so we allocate that
1028 * part of the Guest page table here.
1029 */
1030int init_guest_pagetable(struct lguest *lg)
1031{
1032	struct lg_cpu *cpu = &lg->cpus[0];
1033	int allocated = 0;
1034
1035	/* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
1036	cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
1037	if (!allocated)
1038		return -ENOMEM;
1039
1040	/* We start with a linear mapping until the initialize. */
1041	cpu->linear_pages = true;
1042
1043	/* Allocate the page tables for the Switcher. */
1044	if (!allocate_switcher_mapping(cpu)) {
1045		release_all_pagetables(lg);
1046		return -ENOMEM;
1047	}
1048
1049	return 0;
1050}
1051
1052/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1053void page_table_guest_data_init(struct lg_cpu *cpu)
1054{
1055	/*
1056	 * We tell the Guest that it can't use the virtual addresses
1057	 * used by the Switcher.  This trick is equivalent to 4GB -
1058	 * switcher_addr.
1059	 */
1060	u32 top = ~switcher_addr + 1;
1061
1062	/* We get the kernel address: above this is all kernel memory. */
1063	if (get_user(cpu->lg->kernel_address,
1064		     &cpu->lg->lguest_data->kernel_address)
1065		/*
1066		 * We tell the Guest that it can't use the top virtual
1067		 * addresses (used by the Switcher).
1068		 */
1069	    || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
1070		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1071		return;
1072	}
1073
1074	/*
1075	 * In flush_user_mappings() we loop from 0 to
1076	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
1077	 * Switcher mappings, so check that now.
1078	 */
1079	if (cpu->lg->kernel_address >= switcher_addr)
1080		kill_guest(cpu, "bad kernel address %#lx",
1081				 cpu->lg->kernel_address);
1082}
1083
1084/* When a Guest dies, our cleanup is fairly simple. */
1085void free_guest_pagetable(struct lguest *lg)
1086{
1087	unsigned int i;
1088
1089	/* Throw away all page table pages. */
1090	release_all_pagetables(lg);
1091	/* Now free the top levels: free_page() can handle 0 just fine. */
1092	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1093		free_page((long)lg->pgdirs[i].pgdir);
1094}
1095
1096/*H:481
1097 * This clears the Switcher mappings for cpu #i.
1098 */
1099static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
1100{
1101	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
1102	pte_t *pte;
1103
1104	/* Clear the mappings for both pages. */
1105	pte = find_spte(cpu, base, false, 0, 0);
1106	release_pte(*pte);
1107	set_pte(pte, __pte(0));
1108
1109	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1110	release_pte(*pte);
1111	set_pte(pte, __pte(0));
1112}
1113
1114/*H:480
1115 * (vi) Mapping the Switcher when the Guest is about to run.
1116 *
1117 * The Switcher and the two pages for this CPU need to be visible in the Guest
1118 * (and not the pages for other CPUs).
1119 *
1120 * The pages for the pagetables have all been allocated before: we just need
1121 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1122 * on.
1123 */
1124void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1125{
1126	unsigned long base;
1127	struct page *percpu_switcher_page, *regs_page;
1128	pte_t *pte;
1129	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
1130
1131	/* Switcher page should always be mapped by now! */
1132	BUG_ON(!pgdir->switcher_mapped);
1133
1134	/* 
1135	 * Remember that we have two pages for each Host CPU, so we can run a
1136	 * Guest on each CPU without them interfering.  We need to make sure
1137	 * those pages are mapped correctly in the Guest, but since we usually
1138	 * run on the same CPU, we cache that, and only update the mappings
1139	 * when we move.
1140	 */
1141	if (pgdir->last_host_cpu == raw_smp_processor_id())
1142		return;
1143
1144	/* -1 means unknown so we remove everything. */
1145	if (pgdir->last_host_cpu == -1) {
1146		unsigned int i;
1147		for_each_possible_cpu(i)
1148			remove_switcher_percpu_map(cpu, i);
1149	} else {
1150		/* We know exactly what CPU mapping to remove. */
1151		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
1152	}
1153
1154	/*
1155	 * When we're running the Guest, we want the Guest's "regs" page to
1156	 * appear where the first Switcher page for this CPU is.  This is an
1157	 * optimization: when the Switcher saves the Guest registers, it saves
1158	 * them into the first page of this CPU's "struct lguest_pages": if we
1159	 * make sure the Guest's register page is already mapped there, we
1160	 * don't have to copy them out again.
1161	 */
1162	/* Find the shadow PTE for this regs page. */
1163	base = switcher_addr + PAGE_SIZE
1164		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
1165	pte = find_spte(cpu, base, false, 0, 0);
1166	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
1167	get_page(regs_page);
1168	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
1169
1170	/*
1171	 * We map the second page of the struct lguest_pages read-only in
1172	 * the Guest: the IDT, GDT and other things it's not supposed to
1173	 * change.
1174	 */
1175	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1176	percpu_switcher_page
1177		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
1178	get_page(percpu_switcher_page);
1179	set_pte(pte, mk_pte(percpu_switcher_page,
1180			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1181
1182	pgdir->last_host_cpu = raw_smp_processor_id();
1183}
1184
1185/*H:490
1186 * We've made it through the page table code.  Perhaps our tired brains are
1187 * still processing the details, or perhaps we're simply glad it's over.
1188 *
1189 * If nothing else, note that all this complexity in juggling shadow page tables
1190 * in sync with the Guest's page tables is for one reason: for most Guests this
1191 * page table dance determines how bad performance will be.  This is why Xen
1192 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1193 * have implemented shadow page table support directly into hardware.
1194 *
1195 * There is just one file remaining in the Host.
1196 */
v4.6
   1/*P:700
   2 * The pagetable code, on the other hand, still shows the scars of
   3 * previous encounters.  It's functional, and as neat as it can be in the
   4 * circumstances, but be wary, for these things are subtle and break easily.
   5 * The Guest provides a virtual to physical mapping, but we can neither trust
   6 * it nor use it: we verify and convert it here then point the CPU to the
   7 * converted Guest pages when running the Guest.
   8:*/
   9
  10/* Copyright (C) Rusty Russell IBM Corporation 2013.
  11 * GPL v2 and any later version */
  12#include <linux/mm.h>
  13#include <linux/gfp.h>
  14#include <linux/types.h>
  15#include <linux/spinlock.h>
  16#include <linux/random.h>
  17#include <linux/percpu.h>
  18#include <asm/tlbflush.h>
  19#include <asm/uaccess.h>
  20#include "lg.h"
  21
  22/*M:008
  23 * We hold reference to pages, which prevents them from being swapped.
  24 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
  25 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
  26 * could probably consider launching Guests as non-root.
  27:*/
  28
  29/*H:300
  30 * The Page Table Code
  31 *
  32 * We use two-level page tables for the Guest, or three-level with PAE.  If
  33 * you're not entirely comfortable with virtual addresses, physical addresses
  34 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
  35 * Table Handling" (with diagrams!).
  36 *
  37 * The Guest keeps page tables, but we maintain the actual ones here: these are
  38 * called "shadow" page tables.  Which is a very Guest-centric name: these are
  39 * the real page tables the CPU uses, although we keep them up to date to
  40 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
  41 * shadows reflect anything?)
  42 *
  43 * Anyway, this is the most complicated part of the Host code.  There are seven
  44 * parts to this:
  45 *  (i) Looking up a page table entry when the Guest faults,
  46 *  (ii) Making sure the Guest stack is mapped,
  47 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
  48 *  (iv) Switching page tables,
  49 *  (v) Flushing (throwing away) page tables,
  50 *  (vi) Mapping the Switcher when the Guest is about to run,
  51 *  (vii) Setting up the page tables initially.
  52:*/
  53
  54/*
  55 * The Switcher uses the complete top PTE page.  That's 1024 PTE entries (4MB)
  56 * or 512 PTE entries with PAE (2MB).
  57 */
  58#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
  59
  60/*
  61 * For PAE we need the PMD index as well. We use the last 2MB, so we
  62 * will need the last pmd entry of the last pmd page.
  63 */
  64#ifdef CONFIG_X86_PAE
  65#define CHECK_GPGD_MASK		_PAGE_PRESENT
  66#else
  67#define CHECK_GPGD_MASK		_PAGE_TABLE
  68#endif
  69
  70/*H:320
  71 * The page table code is curly enough to need helper functions to keep it
  72 * clear and clean.  The kernel itself provides many of them; one advantage
  73 * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
  74 *
  75 * There are two functions which return pointers to the shadow (aka "real")
  76 * page tables.
  77 *
  78 * spgd_addr() takes the virtual address and returns a pointer to the top-level
  79 * page directory entry (PGD) for that address.  Since we keep track of several
  80 * page tables, the "i" argument tells us which one we're interested in (it's
  81 * usually the current one).
  82 */
  83static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
  84{
  85	unsigned int index = pgd_index(vaddr);
  86
  87	/* Return a pointer index'th pgd entry for the i'th page table. */
  88	return &cpu->lg->pgdirs[i].pgdir[index];
  89}
  90
  91#ifdef CONFIG_X86_PAE
  92/*
  93 * This routine then takes the PGD entry given above, which contains the
  94 * address of the PMD page.  It then returns a pointer to the PMD entry for the
  95 * given address.
  96 */
  97static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  98{
  99	unsigned int index = pmd_index(vaddr);
 100	pmd_t *page;
 101
 102	/* You should never call this if the PGD entry wasn't valid */
 103	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
 104	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
 105
 106	return &page[index];
 107}
 108#endif
 109
 110/*
 111 * This routine then takes the page directory entry returned above, which
 112 * contains the address of the page table entry (PTE) page.  It then returns a
 113 * pointer to the PTE entry for the given address.
 114 */
 115static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
 116{
 117#ifdef CONFIG_X86_PAE
 118	pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
 119	pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
 120
 121	/* You should never call this if the PMD entry wasn't valid */
 122	BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
 123#else
 124	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
 125	/* You should never call this if the PGD entry wasn't valid */
 126	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
 127#endif
 128
 129	return &page[pte_index(vaddr)];
 130}
 131
 132/*
 133 * These functions are just like the above, except they access the Guest
 134 * page tables.  Hence they return a Guest address.
 135 */
 136static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
 137{
 138	unsigned int index = vaddr >> (PGDIR_SHIFT);
 139	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
 140}
 141
 142#ifdef CONFIG_X86_PAE
 143/* Follow the PGD to the PMD. */
 144static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
 145{
 146	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
 147	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
 148	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
 149}
 150
 151/* Follow the PMD to the PTE. */
 152static unsigned long gpte_addr(struct lg_cpu *cpu,
 153			       pmd_t gpmd, unsigned long vaddr)
 154{
 155	unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
 156
 157	BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
 158	return gpage + pte_index(vaddr) * sizeof(pte_t);
 159}
 160#else
 161/* Follow the PGD to the PTE (no mid-level for !PAE). */
 162static unsigned long gpte_addr(struct lg_cpu *cpu,
 163				pgd_t gpgd, unsigned long vaddr)
 164{
 165	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
 166
 167	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
 168	return gpage + pte_index(vaddr) * sizeof(pte_t);
 169}
 170#endif
 171/*:*/
 172
 173/*M:007
 174 * get_pfn is slow: we could probably try to grab batches of pages here as
 175 * an optimization (ie. pre-faulting).
 176:*/
 177
 178/*H:350
 179 * This routine takes a page number given by the Guest and converts it to
 180 * an actual, physical page number.  It can fail for several reasons: the
 181 * virtual address might not be mapped by the Launcher, the write flag is set
 182 * and the page is read-only, or the write flag was set and the page was
 183 * shared so had to be copied, but we ran out of memory.
 184 *
 185 * This holds a reference to the page, so release_pte() is careful to put that
 186 * back.
 187 */
 188static unsigned long get_pfn(unsigned long virtpfn, int write)
 189{
 190	struct page *page;
 191
 192	/* gup me one page at this address please! */
 193	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
 194		return page_to_pfn(page);
 195
 196	/* This value indicates failure. */
 197	return -1UL;
 198}
 199
 200/*H:340
 201 * Converting a Guest page table entry to a shadow (ie. real) page table
 202 * entry can be a little tricky.  The flags are (almost) the same, but the
 203 * Guest PTE contains a virtual page number: the CPU needs the real page
 204 * number.
 205 */
 206static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
 207{
 208	unsigned long pfn, base, flags;
 209
 210	/*
 211	 * The Guest sets the global flag, because it thinks that it is using
 212	 * PGE.  We only told it to use PGE so it would tell us whether it was
 213	 * flushing a kernel mapping or a userspace mapping.  We don't actually
 214	 * use the global bit, so throw it away.
 215	 */
 216	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
 217
 218	/* The Guest's pages are offset inside the Launcher. */
 219	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
 220
 221	/*
 222	 * We need a temporary "unsigned long" variable to hold the answer from
 223	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
 224	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
 225	 * page, given the virtual number.
 226	 */
 227	pfn = get_pfn(base + pte_pfn(gpte), write);
 228	if (pfn == -1UL) {
 229		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
 230		/*
 231		 * When we destroy the Guest, we'll go through the shadow page
 232		 * tables and release_pte() them.  Make sure we don't think
 233		 * this one is valid!
 234		 */
 235		flags = 0;
 236	}
 237	/* Now we assemble our shadow PTE from the page number and flags. */
 238	return pfn_pte(pfn, __pgprot(flags));
 239}
 240
 241/*H:460 And to complete the chain, release_pte() looks like this: */
 242static void release_pte(pte_t pte)
 243{
 244	/*
 245	 * Remember that get_user_pages_fast() took a reference to the page, in
 246	 * get_pfn()?  We have to put it back now.
 247	 */
 248	if (pte_flags(pte) & _PAGE_PRESENT)
 249		put_page(pte_page(pte));
 250}
 251/*:*/
 252
 253static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
 254{
 255	/* We don't handle large pages. */
 256	if (pte_flags(gpte) & _PAGE_PSE)
 257		return false;
 258
 259	return (pte_pfn(gpte) >= cpu->lg->pfn_limit
 260		&& pte_pfn(gpte) < cpu->lg->device_limit);
 261}
 262
 263static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
 264{
 265	if ((pte_flags(gpte) & _PAGE_PSE) ||
 266	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {
 267		kill_guest(cpu, "bad page table entry");
 268		return false;
 269	}
 270	return true;
 271}
 272
 273static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
 274{
 275	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
 276	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
 277		kill_guest(cpu, "bad page directory entry");
 278		return false;
 279	}
 280	return true;
 281}
 282
 283#ifdef CONFIG_X86_PAE
 284static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
 285{
 286	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
 287	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
 288		kill_guest(cpu, "bad page middle directory entry");
 289		return false;
 290	}
 291	return true;
 292}
 293#endif
 294
 295/*H:331
 296 * This is the core routine to walk the shadow page tables and find the page
 297 * table entry for a specific address.
 298 *
 299 * If allocate is set, then we allocate any missing levels, setting the flags
 300 * on the new page directory and mid-level directories using the arguments
 301 * (which are copied from the Guest's page table entries).
 302 */
 303static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
 304			int pgd_flags, int pmd_flags)
 305{
 306	pgd_t *spgd;
 307	/* Mid level for PAE. */
 308#ifdef CONFIG_X86_PAE
 309	pmd_t *spmd;
 310#endif
 311
 312	/* Get top level entry. */
 313	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
 314	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
 315		/* No shadow entry: allocate a new shadow PTE page. */
 316		unsigned long ptepage;
 317
 318		/* If they didn't want us to allocate anything, stop. */
 319		if (!allocate)
 320			return NULL;
 321
 322		ptepage = get_zeroed_page(GFP_KERNEL);
 323		/*
 324		 * This is not really the Guest's fault, but killing it is
 325		 * simple for this corner case.
 326		 */
 327		if (!ptepage) {
 328			kill_guest(cpu, "out of memory allocating pte page");
 329			return NULL;
 330		}
 331		/*
 332		 * And we copy the flags to the shadow PGD entry.  The page
 333		 * number in the shadow PGD is the page we just allocated.
 334		 */
 335		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
 336	}
 337
 338	/*
 339	 * Intel's Physical Address Extension actually uses three levels of
 340	 * page tables, so we need to look in the mid-level.
 341	 */
 342#ifdef CONFIG_X86_PAE
 343	/* Now look at the mid-level shadow entry. */
 344	spmd = spmd_addr(cpu, *spgd, vaddr);
 345
 346	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
 347		/* No shadow entry: allocate a new shadow PTE page. */
 348		unsigned long ptepage;
 349
 350		/* If they didn't want us to allocate anything, stop. */
 351		if (!allocate)
 352			return NULL;
 353
 354		ptepage = get_zeroed_page(GFP_KERNEL);
 355
 356		/*
 357		 * This is not really the Guest's fault, but killing it is
 358		 * simple for this corner case.
 359		 */
 360		if (!ptepage) {
 361			kill_guest(cpu, "out of memory allocating pmd page");
 362			return NULL;
 363		}
 364
 365		/*
 366		 * And we copy the flags to the shadow PMD entry.  The page
 367		 * number in the shadow PMD is the page we just allocated.
 368		 */
 369		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
 370	}
 371#endif
 372
 373	/* Get the pointer to the shadow PTE entry we're going to set. */
 374	return spte_addr(cpu, *spgd, vaddr);
 375}
 376
 377/*H:330
 378 * (i) Looking up a page table entry when the Guest faults.
 379 *
 380 * We saw this call in run_guest(): when we see a page fault in the Guest, we
 381 * come here.  That's because we only set up the shadow page tables lazily as
 382 * they're needed, so we get page faults all the time and quietly fix them up
 383 * and return to the Guest without it knowing.
 384 *
 385 * If we fixed up the fault (ie. we mapped the address), this routine returns
 386 * true.  Otherwise, it was a real fault and we need to tell the Guest.
 387 *
 388 * There's a corner case: they're trying to access memory between
 389 * pfn_limit and device_limit, which is I/O memory.  In this case, we
 390 * return false and set @iomem to the physical address, so the the
 391 * Launcher can handle the instruction manually.
 392 */
 393bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
 394		 unsigned long *iomem)
 395{
 396	unsigned long gpte_ptr;
 397	pte_t gpte;
 398	pte_t *spte;
 399	pmd_t gpmd;
 400	pgd_t gpgd;
 401
 402	*iomem = 0;
 403
 404	/* We never demand page the Switcher, so trying is a mistake. */
 405	if (vaddr >= switcher_addr)
 406		return false;
 407
 408	/* First step: get the top-level Guest page table entry. */
 409	if (unlikely(cpu->linear_pages)) {
 410		/* Faking up a linear mapping. */
 411		gpgd = __pgd(CHECK_GPGD_MASK);
 412	} else {
 413		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
 414		/* Toplevel not present?  We can't map it in. */
 415		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
 416			return false;
 417
 418		/* 
 419		 * This kills the Guest if it has weird flags or tries to
 420		 * refer to a "physical" address outside the bounds.
 421		 */
 422		if (!check_gpgd(cpu, gpgd))
 423			return false;
 424	}
 425
 426	/* This "mid-level" entry is only used for non-linear, PAE mode. */
 427	gpmd = __pmd(_PAGE_TABLE);
 428
 429#ifdef CONFIG_X86_PAE
 430	if (likely(!cpu->linear_pages)) {
 431		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
 432		/* Middle level not present?  We can't map it in. */
 433		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
 434			return false;
 435
 436		/* 
 437		 * This kills the Guest if it has weird flags or tries to
 438		 * refer to a "physical" address outside the bounds.
 439		 */
 440		if (!check_gpmd(cpu, gpmd))
 441			return false;
 442	}
 443
 444	/*
 445	 * OK, now we look at the lower level in the Guest page table: keep its
 446	 * address, because we might update it later.
 447	 */
 448	gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
 449#else
 450	/*
 451	 * OK, now we look at the lower level in the Guest page table: keep its
 452	 * address, because we might update it later.
 453	 */
 454	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
 455#endif
 456
 457	if (unlikely(cpu->linear_pages)) {
 458		/* Linear?  Make up a PTE which points to same page. */
 459		gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
 460	} else {
 461		/* Read the actual PTE value. */
 462		gpte = lgread(cpu, gpte_ptr, pte_t);
 463	}
 464
 465	/* If this page isn't in the Guest page tables, we can't page it in. */
 466	if (!(pte_flags(gpte) & _PAGE_PRESENT))
 467		return false;
 468
 469	/*
 470	 * Check they're not trying to write to a page the Guest wants
 471	 * read-only (bit 2 of errcode == write).
 472	 */
 473	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
 474		return false;
 475
 476	/* User access to a kernel-only page? (bit 3 == user access) */
 477	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
 478		return false;
 479
 480	/* If they're accessing io memory, we expect a fault. */
 481	if (gpte_in_iomem(cpu, gpte)) {
 482		*iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
 483		return false;
 484	}
 485
 486	/*
 487	 * Check that the Guest PTE flags are OK, and the page number is below
 488	 * the pfn_limit (ie. not mapping the Launcher binary).
 489	 */
 490	if (!check_gpte(cpu, gpte))
 491		return false;
 492
 493	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
 494	gpte = pte_mkyoung(gpte);
 495	if (errcode & 2)
 496		gpte = pte_mkdirty(gpte);
 497
 498	/* Get the pointer to the shadow PTE entry we're going to set. */
 499	spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
 500	if (!spte)
 501		return false;
 502
 503	/*
 504	 * If there was a valid shadow PTE entry here before, we release it.
 505	 * This can happen with a write to a previously read-only entry.
 506	 */
 507	release_pte(*spte);
 508
 509	/*
 510	 * If this is a write, we insist that the Guest page is writable (the
 511	 * final arg to gpte_to_spte()).
 512	 */
 513	if (pte_dirty(gpte))
 514		*spte = gpte_to_spte(cpu, gpte, 1);
 515	else
 516		/*
 517		 * If this is a read, don't set the "writable" bit in the page
 518		 * table entry, even if the Guest says it's writable.  That way
 519		 * we will come back here when a write does actually occur, so
 520		 * we can update the Guest's _PAGE_DIRTY flag.
 521		 */
 522		set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
 523
 524	/*
 525	 * Finally, we write the Guest PTE entry back: we've set the
 526	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
 527	 */
 528	if (likely(!cpu->linear_pages))
 529		lgwrite(cpu, gpte_ptr, pte_t, gpte);
 530
 531	/*
 532	 * The fault is fixed, the page table is populated, the mapping
 533	 * manipulated, the result returned and the code complete.  A small
 534	 * delay and a trace of alliteration are the only indications the Guest
 535	 * has that a page fault occurred at all.
 536	 */
 537	return true;
 538}
 539
 540/*H:360
 541 * (ii) Making sure the Guest stack is mapped.
 542 *
 543 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
 544 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
 545 * we've seen that logic is quite long, and usually the stack pages are already
 546 * mapped, so it's overkill.
 547 *
 548 * This is a quick version which answers the question: is this virtual address
 549 * mapped by the shadow page tables, and is it writable?
 550 */
 551static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
 552{
 553	pte_t *spte;
 554	unsigned long flags;
 555
 556	/* You can't put your stack in the Switcher! */
 557	if (vaddr >= switcher_addr)
 558		return false;
 559
 560	/* If there's no shadow PTE, it's not writable. */
 561	spte = find_spte(cpu, vaddr, false, 0, 0);
 562	if (!spte)
 563		return false;
 564
 565	/*
 566	 * Check the flags on the pte entry itself: it must be present and
 567	 * writable.
 568	 */
 569	flags = pte_flags(*spte);
 570	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
 571}
 572
 573/*
 574 * So, when pin_stack_pages() asks us to pin a page, we check if it's already
 575 * in the page tables, and if not, we call demand_page() with error code 2
 576 * (meaning "write").
 577 */
 578void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
 579{
 580	unsigned long iomem;
 581
 582	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
 583		kill_guest(cpu, "bad stack page %#lx", vaddr);
 584}
 585/*:*/
 586
 587#ifdef CONFIG_X86_PAE
 588static void release_pmd(pmd_t *spmd)
 589{
 590	/* If the entry's not present, there's nothing to release. */
 591	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
 592		unsigned int i;
 593		pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
 594		/* For each entry in the page, we might need to release it. */
 595		for (i = 0; i < PTRS_PER_PTE; i++)
 596			release_pte(ptepage[i]);
 597		/* Now we can free the page of PTEs */
 598		free_page((long)ptepage);
 599		/* And zero out the PMD entry so we never release it twice. */
 600		set_pmd(spmd, __pmd(0));
 601	}
 602}
 603
 604static void release_pgd(pgd_t *spgd)
 605{
 606	/* If the entry's not present, there's nothing to release. */
 607	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 608		unsigned int i;
 609		pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
 610
 611		for (i = 0; i < PTRS_PER_PMD; i++)
 612			release_pmd(&pmdpage[i]);
 613
 614		/* Now we can free the page of PMDs */
 615		free_page((long)pmdpage);
 616		/* And zero out the PGD entry so we never release it twice. */
 617		set_pgd(spgd, __pgd(0));
 618	}
 619}
 620
 621#else /* !CONFIG_X86_PAE */
 622/*H:450
 623 * If we chase down the release_pgd() code, the non-PAE version looks like
 624 * this.  The PAE version is almost identical, but instead of calling
 625 * release_pte it calls release_pmd(), which looks much like this.
 626 */
 627static void release_pgd(pgd_t *spgd)
 628{
 629	/* If the entry's not present, there's nothing to release. */
 630	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 631		unsigned int i;
 632		/*
 633		 * Converting the pfn to find the actual PTE page is easy: turn
 634		 * the page number into a physical address, then convert to a
 635		 * virtual address (easy for kernel pages like this one).
 636		 */
 637		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
 638		/* For each entry in the page, we might need to release it. */
 639		for (i = 0; i < PTRS_PER_PTE; i++)
 640			release_pte(ptepage[i]);
 641		/* Now we can free the page of PTEs */
 642		free_page((long)ptepage);
 643		/* And zero out the PGD entry so we never release it twice. */
 644		*spgd = __pgd(0);
 645	}
 646}
 647#endif
 648
 649/*H:445
 650 * We saw flush_user_mappings() twice: once from the flush_user_mappings()
 651 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
 652 * It simply releases every PTE page from 0 up to the Guest's kernel address.
 653 */
 654static void flush_user_mappings(struct lguest *lg, int idx)
 655{
 656	unsigned int i;
 657	/* Release every pgd entry up to the kernel's address. */
 658	for (i = 0; i < pgd_index(lg->kernel_address); i++)
 659		release_pgd(lg->pgdirs[idx].pgdir + i);
 660}
 661
 662/*H:440
 663 * (v) Flushing (throwing away) page tables,
 664 *
 665 * The Guest has a hypercall to throw away the page tables: it's used when a
 666 * large number of mappings have been changed.
 667 */
 668void guest_pagetable_flush_user(struct lg_cpu *cpu)
 669{
 670	/* Drop the userspace part of the current page table. */
 671	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
 672}
 673/*:*/
 674
 675/* We walk down the guest page tables to get a guest-physical address */
 676bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
 677{
 678	pgd_t gpgd;
 679	pte_t gpte;
 680#ifdef CONFIG_X86_PAE
 681	pmd_t gpmd;
 682#endif
 683
 684	/* Still not set up?  Just map 1:1. */
 685	if (unlikely(cpu->linear_pages)) {
 686		*paddr = vaddr;
 687		return true;
 688	}
 689
 690	/* First step: get the top-level Guest page table entry. */
 691	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
 692	/* Toplevel not present?  We can't map it in. */
 693	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
 694		goto fail;
 
 
 695
 696#ifdef CONFIG_X86_PAE
 697	gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
 698	if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
 699		goto fail;
 
 
 700	gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
 701#else
 702	gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
 703#endif
 704	if (!(pte_flags(gpte) & _PAGE_PRESENT))
 705		goto fail;
 706
 707	*paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
 708	return true;
 709
 710fail:
 711	*paddr = -1UL;
 712	return false;
 713}
 714
 715/*
 716 * This is the version we normally use: kills the Guest if it uses a
 717 * bad address
 718 */
 719unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
 720{
 721	unsigned long paddr;
 722
 723	if (!__guest_pa(cpu, vaddr, &paddr))
 724		kill_guest(cpu, "Bad address %#lx", vaddr);
 725	return paddr;
 726}
 727
 728/*
 729 * We keep several page tables.  This is a simple routine to find the page
 730 * table (if any) corresponding to this top-level address the Guest has given
 731 * us.
 732 */
 733static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
 734{
 735	unsigned int i;
 736	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
 737		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
 738			break;
 739	return i;
 740}
 741
 742/*H:435
 743 * And this is us, creating the new page directory.  If we really do
 744 * allocate a new one (and so the kernel parts are not there), we set
 745 * blank_pgdir.
 746 */
 747static unsigned int new_pgdir(struct lg_cpu *cpu,
 748			      unsigned long gpgdir,
 749			      int *blank_pgdir)
 750{
 751	unsigned int next;
 752
 753	/*
 754	 * We pick one entry at random to throw out.  Choosing the Least
 755	 * Recently Used might be better, but this is easy.
 756	 */
 757	next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
 758	/* If it's never been allocated at all before, try now. */
 759	if (!cpu->lg->pgdirs[next].pgdir) {
 760		cpu->lg->pgdirs[next].pgdir =
 761					(pgd_t *)get_zeroed_page(GFP_KERNEL);
 762		/* If the allocation fails, just keep using the one we have */
 763		if (!cpu->lg->pgdirs[next].pgdir)
 764			next = cpu->cpu_pgd;
 765		else {
 766			/*
 767			 * This is a blank page, so there are no kernel
 768			 * mappings: caller must map the stack!
 769			 */
 770			*blank_pgdir = 1;
 771		}
 772	}
 773	/* Record which Guest toplevel this shadows. */
 774	cpu->lg->pgdirs[next].gpgdir = gpgdir;
 775	/* Release all the non-kernel mappings. */
 776	flush_user_mappings(cpu->lg, next);
 777
 778	/* This hasn't run on any CPU at all. */
 779	cpu->lg->pgdirs[next].last_host_cpu = -1;
 780
 781	return next;
 782}
 783
 784/*H:501
 785 * We do need the Switcher code mapped at all times, so we allocate that
 786 * part of the Guest page table here.  We map the Switcher code immediately,
 787 * but defer mapping of the guest register page and IDT/LDT etc page until
 788 * just before we run the guest in map_switcher_in_guest().
 789 *
 790 * We *could* do this setup in map_switcher_in_guest(), but at that point
 791 * we've interrupts disabled, and allocating pages like that is fraught: we
 792 * can't sleep if we need to free up some memory.
 793 */
 794static bool allocate_switcher_mapping(struct lg_cpu *cpu)
 795{
 796	int i;
 797
 798	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
 799		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
 800				       CHECK_GPGD_MASK, _PAGE_TABLE);
 801		if (!pte)
 802			return false;
 803
 804		/*
 805		 * Map the switcher page if not already there.  It might
 806		 * already be there because we call allocate_switcher_mapping()
 807		 * in guest_set_pgd() just in case it did discard our Switcher
 808		 * mapping, but it probably didn't.
 809		 */
 810		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
 811			/* Get a reference to the Switcher page. */
 812			get_page(lg_switcher_pages[0]);
 813			/* Create a read-only, exectuable, kernel-style PTE */
 814			set_pte(pte,
 815				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
 816		}
 817	}
 818	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
 819	return true;
 820}
 821
 822/*H:470
 823 * Finally, a routine which throws away everything: all PGD entries in all
 824 * the shadow page tables, including the Guest's kernel mappings.  This is used
 825 * when we destroy the Guest.
 826 */
 827static void release_all_pagetables(struct lguest *lg)
 828{
 829	unsigned int i, j;
 830
 831	/* Every shadow pagetable this Guest has */
 832	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
 833		if (!lg->pgdirs[i].pgdir)
 834			continue;
 835
 836		/* Every PGD entry. */
 837		for (j = 0; j < PTRS_PER_PGD; j++)
 838			release_pgd(lg->pgdirs[i].pgdir + j);
 839		lg->pgdirs[i].switcher_mapped = false;
 840		lg->pgdirs[i].last_host_cpu = -1;
 841	}
 842}
 843
 844/*
 845 * We also throw away everything when a Guest tells us it's changed a kernel
 846 * mapping.  Since kernel mappings are in every page table, it's easiest to
 847 * throw them all away.  This traps the Guest in amber for a while as
 848 * everything faults back in, but it's rare.
 849 */
 850void guest_pagetable_clear_all(struct lg_cpu *cpu)
 851{
 852	release_all_pagetables(cpu->lg);
 853	/* We need the Guest kernel stack mapped again. */
 854	pin_stack_pages(cpu);
 855	/* And we need Switcher allocated. */
 856	if (!allocate_switcher_mapping(cpu))
 857		kill_guest(cpu, "Cannot populate switcher mapping");
 858}
 859
 860/*H:430
 861 * (iv) Switching page tables
 862 *
 863 * Now we've seen all the page table setting and manipulation, let's see
 864 * what happens when the Guest changes page tables (ie. changes the top-level
 865 * pgdir).  This occurs on almost every context switch.
 866 */
 867void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
 868{
 869	int newpgdir, repin = 0;
 870
 871	/*
 872	 * The very first time they call this, we're actually running without
 873	 * any page tables; we've been making it up.  Throw them away now.
 874	 */
 875	if (unlikely(cpu->linear_pages)) {
 876		release_all_pagetables(cpu->lg);
 877		cpu->linear_pages = false;
 878		/* Force allocation of a new pgdir. */
 879		newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
 880	} else {
 881		/* Look to see if we have this one already. */
 882		newpgdir = find_pgdir(cpu->lg, pgtable);
 883	}
 884
 885	/*
 886	 * If not, we allocate or mug an existing one: if it's a fresh one,
 887	 * repin gets set to 1.
 888	 */
 889	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
 890		newpgdir = new_pgdir(cpu, pgtable, &repin);
 891	/* Change the current pgd index to the new one. */
 892	cpu->cpu_pgd = newpgdir;
 893	/*
 894	 * If it was completely blank, we map in the Guest kernel stack and
 895	 * the Switcher.
 896	 */
 897	if (repin)
 898		pin_stack_pages(cpu);
 899
 900	if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
 901		if (!allocate_switcher_mapping(cpu))
 902			kill_guest(cpu, "Cannot populate switcher mapping");
 903	}
 904}
 905/*:*/
 906
 907/*M:009
 908 * Since we throw away all mappings when a kernel mapping changes, our
 909 * performance sucks for guests using highmem.  In fact, a guest with
 910 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
 911 * usually slower than a Guest with less memory.
 912 *
 913 * This, of course, cannot be fixed.  It would take some kind of... well, I
 914 * don't know, but the term "puissant code-fu" comes to mind.
 915:*/
 916
 917/*H:420
 918 * This is the routine which actually sets the page table entry for then
 919 * "idx"'th shadow page table.
 920 *
 921 * Normally, we can just throw out the old entry and replace it with 0: if they
 922 * use it demand_page() will put the new entry in.  We need to do this anyway:
 923 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
 924 * is read from, and _PAGE_DIRTY when it's written to.
 925 *
 926 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
 927 * these bits on PTEs immediately anyway.  This is done to save the CPU from
 928 * having to update them, but it helps us the same way: if they set
 929 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
 930 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
 931 */
 932static void __guest_set_pte(struct lg_cpu *cpu, int idx,
 933		       unsigned long vaddr, pte_t gpte)
 934{
 935	/* Look up the matching shadow page directory entry. */
 936	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
 937#ifdef CONFIG_X86_PAE
 938	pmd_t *spmd;
 939#endif
 940
 941	/* If the top level isn't present, there's no entry to update. */
 942	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
 943#ifdef CONFIG_X86_PAE
 944		spmd = spmd_addr(cpu, *spgd, vaddr);
 945		if (pmd_flags(*spmd) & _PAGE_PRESENT) {
 946#endif
 947			/* Otherwise, start by releasing the existing entry. */
 948			pte_t *spte = spte_addr(cpu, *spgd, vaddr);
 949			release_pte(*spte);
 950
 951			/*
 952			 * If they're setting this entry as dirty or accessed,
 953			 * we might as well put that entry they've given us in
 954			 * now.  This shaves 10% off a copy-on-write
 955			 * micro-benchmark.
 956			 */
 957			if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
 958			    && !gpte_in_iomem(cpu, gpte)) {
 959				if (!check_gpte(cpu, gpte))
 960					return;
 961				set_pte(spte,
 962					gpte_to_spte(cpu, gpte,
 963						pte_flags(gpte) & _PAGE_DIRTY));
 964			} else {
 965				/*
 966				 * Otherwise kill it and we can demand_page()
 967				 * it in later.
 968				 */
 969				set_pte(spte, __pte(0));
 970			}
 971#ifdef CONFIG_X86_PAE
 972		}
 973#endif
 974	}
 975}
 976
 977/*H:410
 978 * Updating a PTE entry is a little trickier.
 979 *
 980 * We keep track of several different page tables (the Guest uses one for each
 981 * process, so it makes sense to cache at least a few).  Each of these have
 982 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
 983 * all processes.  So when the page table above that address changes, we update
 984 * all the page tables, not just the current one.  This is rare.
 985 *
 986 * The benefit is that when we have to track a new page table, we can keep all
 987 * the kernel mappings.  This speeds up context switch immensely.
 988 */
 989void guest_set_pte(struct lg_cpu *cpu,
 990		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
 991{
 992	/* We don't let you remap the Switcher; we need it to get back! */
 993	if (vaddr >= switcher_addr) {
 994		kill_guest(cpu, "attempt to set pte into Switcher pages");
 995		return;
 996	}
 997
 998	/*
 999	 * Kernel mappings must be changed on all top levels.  Slow, but doesn't
1000	 * happen often.
1001	 */
1002	if (vaddr >= cpu->lg->kernel_address) {
1003		unsigned int i;
1004		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
1005			if (cpu->lg->pgdirs[i].pgdir)
1006				__guest_set_pte(cpu, i, vaddr, gpte);
1007	} else {
1008		/* Is this page table one we have a shadow for? */
1009		int pgdir = find_pgdir(cpu->lg, gpgdir);
1010		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
1011			/* If so, do the update. */
1012			__guest_set_pte(cpu, pgdir, vaddr, gpte);
1013	}
1014}
1015
1016/*H:400
1017 * (iii) Setting up a page table entry when the Guest tells us one has changed.
1018 *
1019 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
1020 * with the other side of page tables while we're here: what happens when the
1021 * Guest asks for a page table to be updated?
1022 *
1023 * We already saw that demand_page() will fill in the shadow page tables when
1024 * needed, so we can simply remove shadow page table entries whenever the Guest
1025 * tells us they've changed.  When the Guest tries to use the new entry it will
1026 * fault and demand_page() will fix it up.
1027 *
1028 * So with that in mind here's our code to update a (top-level) PGD entry:
1029 */
1030void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1031{
1032	int pgdir;
1033
1034	if (idx > PTRS_PER_PGD) {
1035		kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
1036			   idx, PTRS_PER_PGD);
1037		return;
1038	}
1039
1040	/* If they're talking about a page table we have a shadow for... */
1041	pgdir = find_pgdir(lg, gpgdir);
1042	if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
1043		/* ... throw it away. */
1044		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
1045		/* That might have been the Switcher mapping, remap it. */
1046		if (!allocate_switcher_mapping(&lg->cpus[0])) {
1047			kill_guest(&lg->cpus[0],
1048				   "Cannot populate switcher mapping");
1049		}
1050		lg->pgdirs[pgdir].last_host_cpu = -1;
1051	}
1052}
1053
1054#ifdef CONFIG_X86_PAE
1055/* For setting a mid-level, we just throw everything away.  It's easy. */
1056void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
1057{
1058	guest_pagetable_clear_all(&lg->cpus[0]);
1059}
1060#endif
1061
1062/*H:500
1063 * (vii) Setting up the page tables initially.
1064 *
1065 * When a Guest is first created, set initialize a shadow page table which
1066 * we will populate on future faults.  The Guest doesn't have any actual
1067 * pagetables yet, so we set linear_pages to tell demand_page() to fake it
1068 * for the moment.
1069 *
1070 * We do need the Switcher to be mapped at all times, so we allocate that
1071 * part of the Guest page table here.
1072 */
1073int init_guest_pagetable(struct lguest *lg)
1074{
1075	struct lg_cpu *cpu = &lg->cpus[0];
1076	int allocated = 0;
1077
1078	/* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
1079	cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
1080	if (!allocated)
1081		return -ENOMEM;
1082
1083	/* We start with a linear mapping until the initialize. */
1084	cpu->linear_pages = true;
1085
1086	/* Allocate the page tables for the Switcher. */
1087	if (!allocate_switcher_mapping(cpu)) {
1088		release_all_pagetables(lg);
1089		return -ENOMEM;
1090	}
1091
1092	return 0;
1093}
1094
1095/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
1096void page_table_guest_data_init(struct lg_cpu *cpu)
1097{
1098	/*
1099	 * We tell the Guest that it can't use the virtual addresses
1100	 * used by the Switcher.  This trick is equivalent to 4GB -
1101	 * switcher_addr.
1102	 */
1103	u32 top = ~switcher_addr + 1;
1104
1105	/* We get the kernel address: above this is all kernel memory. */
1106	if (get_user(cpu->lg->kernel_address,
1107		     &cpu->lg->lguest_data->kernel_address)
1108		/*
1109		 * We tell the Guest that it can't use the top virtual
1110		 * addresses (used by the Switcher).
1111		 */
1112	    || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
1113		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1114		return;
1115	}
1116
1117	/*
1118	 * In flush_user_mappings() we loop from 0 to
1119	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
1120	 * Switcher mappings, so check that now.
1121	 */
1122	if (cpu->lg->kernel_address >= switcher_addr)
1123		kill_guest(cpu, "bad kernel address %#lx",
1124				 cpu->lg->kernel_address);
1125}
1126
1127/* When a Guest dies, our cleanup is fairly simple. */
1128void free_guest_pagetable(struct lguest *lg)
1129{
1130	unsigned int i;
1131
1132	/* Throw away all page table pages. */
1133	release_all_pagetables(lg);
1134	/* Now free the top levels: free_page() can handle 0 just fine. */
1135	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1136		free_page((long)lg->pgdirs[i].pgdir);
1137}
1138
1139/*H:481
1140 * This clears the Switcher mappings for cpu #i.
1141 */
1142static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
1143{
1144	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
1145	pte_t *pte;
1146
1147	/* Clear the mappings for both pages. */
1148	pte = find_spte(cpu, base, false, 0, 0);
1149	release_pte(*pte);
1150	set_pte(pte, __pte(0));
1151
1152	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1153	release_pte(*pte);
1154	set_pte(pte, __pte(0));
1155}
1156
1157/*H:480
1158 * (vi) Mapping the Switcher when the Guest is about to run.
1159 *
1160 * The Switcher and the two pages for this CPU need to be visible in the Guest
1161 * (and not the pages for other CPUs).
1162 *
1163 * The pages for the pagetables have all been allocated before: we just need
1164 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1165 * on.
1166 */
1167void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1168{
1169	unsigned long base;
1170	struct page *percpu_switcher_page, *regs_page;
1171	pte_t *pte;
1172	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
1173
1174	/* Switcher page should always be mapped by now! */
1175	BUG_ON(!pgdir->switcher_mapped);
1176
1177	/* 
1178	 * Remember that we have two pages for each Host CPU, so we can run a
1179	 * Guest on each CPU without them interfering.  We need to make sure
1180	 * those pages are mapped correctly in the Guest, but since we usually
1181	 * run on the same CPU, we cache that, and only update the mappings
1182	 * when we move.
1183	 */
1184	if (pgdir->last_host_cpu == raw_smp_processor_id())
1185		return;
1186
1187	/* -1 means unknown so we remove everything. */
1188	if (pgdir->last_host_cpu == -1) {
1189		unsigned int i;
1190		for_each_possible_cpu(i)
1191			remove_switcher_percpu_map(cpu, i);
1192	} else {
1193		/* We know exactly what CPU mapping to remove. */
1194		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
1195	}
1196
1197	/*
1198	 * When we're running the Guest, we want the Guest's "regs" page to
1199	 * appear where the first Switcher page for this CPU is.  This is an
1200	 * optimization: when the Switcher saves the Guest registers, it saves
1201	 * them into the first page of this CPU's "struct lguest_pages": if we
1202	 * make sure the Guest's register page is already mapped there, we
1203	 * don't have to copy them out again.
1204	 */
1205	/* Find the shadow PTE for this regs page. */
1206	base = switcher_addr + PAGE_SIZE
1207		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
1208	pte = find_spte(cpu, base, false, 0, 0);
1209	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
1210	get_page(regs_page);
1211	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
1212
1213	/*
1214	 * We map the second page of the struct lguest_pages read-only in
1215	 * the Guest: the IDT, GDT and other things it's not supposed to
1216	 * change.
1217	 */
1218	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1219	percpu_switcher_page
1220		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
1221	get_page(percpu_switcher_page);
1222	set_pte(pte, mk_pte(percpu_switcher_page,
1223			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1224
1225	pgdir->last_host_cpu = raw_smp_processor_id();
1226}
1227
1228/*H:490
1229 * We've made it through the page table code.  Perhaps our tired brains are
1230 * still processing the details, or perhaps we're simply glad it's over.
1231 *
1232 * If nothing else, note that all this complexity in juggling shadow page tables
1233 * in sync with the Guest's page tables is for one reason: for most Guests this
1234 * page table dance determines how bad performance will be.  This is why Xen
1235 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1236 * have implemented shadow page table support directly into hardware.
1237 *
1238 * There is just one file remaining in the Host.
1239 */