Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Memory Encryption Support
   4 *
   5 * Copyright (C) 2019 SUSE
   6 *
   7 * Author: Joerg Roedel <jroedel@suse.de>
   8 */
   9
  10#define pr_fmt(fmt)	"SEV: " fmt
  11
  12#include <linux/sched/debug.h>	/* For show_regs() */
  13#include <linux/percpu-defs.h>
  14#include <linux/cc_platform.h>
  15#include <linux/printk.h>
  16#include <linux/mm_types.h>
  17#include <linux/set_memory.h>
  18#include <linux/memblock.h>
  19#include <linux/kernel.h>
  20#include <linux/mm.h>
  21#include <linux/cpumask.h>
  22#include <linux/efi.h>
  23#include <linux/platform_device.h>
  24#include <linux/io.h>
 
 
  25
  26#include <asm/cpu_entry_area.h>
  27#include <asm/stacktrace.h>
  28#include <asm/sev.h>
  29#include <asm/insn-eval.h>
  30#include <asm/fpu/xcr.h>
  31#include <asm/processor.h>
  32#include <asm/realmode.h>
  33#include <asm/setup.h>
  34#include <asm/traps.h>
  35#include <asm/svm.h>
  36#include <asm/smp.h>
  37#include <asm/cpu.h>
  38#include <asm/apic.h>
  39#include <asm/cpuid.h>
  40#include <asm/cmdline.h>
  41
  42#define DR7_RESET_VALUE        0x400
  43
  44/* AP INIT values as documented in the APM2  section "Processor Initialization State" */
  45#define AP_INIT_CS_LIMIT		0xffff
  46#define AP_INIT_DS_LIMIT		0xffff
  47#define AP_INIT_LDTR_LIMIT		0xffff
  48#define AP_INIT_GDTR_LIMIT		0xffff
  49#define AP_INIT_IDTR_LIMIT		0xffff
  50#define AP_INIT_TR_LIMIT		0xffff
  51#define AP_INIT_RFLAGS_DEFAULT		0x2
  52#define AP_INIT_DR6_DEFAULT		0xffff0ff0
  53#define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
  54#define AP_INIT_XCR0_DEFAULT		0x1
  55#define AP_INIT_X87_FTW_DEFAULT		0x5555
  56#define AP_INIT_X87_FCW_DEFAULT		0x0040
  57#define AP_INIT_CR0_DEFAULT		0x60000010
  58#define AP_INIT_MXCSR_DEFAULT		0x1f80
  59
  60/* For early boot hypervisor communication in SEV-ES enabled guests */
  61static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
  62
  63/*
  64 * Needs to be in the .data section because we need it NULL before bss is
  65 * cleared
  66 */
  67static struct ghcb *boot_ghcb __section(".data");
  68
  69/* Bitmap of SEV features supported by the hypervisor */
  70static u64 sev_hv_features __ro_after_init;
  71
  72/* #VC handler runtime per-CPU data */
  73struct sev_es_runtime_data {
  74	struct ghcb ghcb_page;
  75
  76	/*
  77	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
  78	 * It is needed when an NMI happens while the #VC handler uses the real
  79	 * GHCB, and the NMI handler itself is causing another #VC exception. In
  80	 * that case the GHCB content of the first handler needs to be backed up
  81	 * and restored.
  82	 */
  83	struct ghcb backup_ghcb;
  84
  85	/*
  86	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
  87	 * There is no need for it to be atomic, because nothing is written to
  88	 * the GHCB between the read and the write of ghcb_active. So it is safe
  89	 * to use it when a nested #VC exception happens before the write.
  90	 *
  91	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
  92	 * happens while the first #VC handler uses the GHCB. When the NMI code
  93	 * raises a second #VC handler it might overwrite the contents of the
  94	 * GHCB written by the first handler. To avoid this the content of the
  95	 * GHCB is saved and restored when the GHCB is detected to be in use
  96	 * already.
  97	 */
  98	bool ghcb_active;
  99	bool backup_ghcb_active;
 100
 101	/*
 102	 * Cached DR7 value - write it on DR7 writes and return it on reads.
 103	 * That value will never make it to the real hardware DR7 as debugging
 104	 * is currently unsupported in SEV-ES guests.
 105	 */
 106	unsigned long dr7;
 107};
 108
 109struct ghcb_state {
 110	struct ghcb *ghcb;
 111};
 112
 113static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
 114DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
 115
 116static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
 117
 118struct sev_config {
 119	__u64 debug		: 1,
 120	      __reserved	: 63;
 
 
 
 
 
 
 
 
 
 
 
 
 121};
 122
 123static struct sev_config sev_cfg __read_mostly;
 124
 125static __always_inline bool on_vc_stack(struct pt_regs *regs)
 126{
 127	unsigned long sp = regs->sp;
 128
 129	/* User-mode RSP is not trusted */
 130	if (user_mode(regs))
 131		return false;
 132
 133	/* SYSCALL gap still has user-mode RSP */
 134	if (ip_within_syscall_gap(regs))
 135		return false;
 136
 137	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
 138}
 139
 140/*
 141 * This function handles the case when an NMI is raised in the #VC
 142 * exception handler entry code, before the #VC handler has switched off
 143 * its IST stack. In this case, the IST entry for #VC must be adjusted,
 144 * so that any nested #VC exception will not overwrite the stack
 145 * contents of the interrupted #VC handler.
 146 *
 147 * The IST entry is adjusted unconditionally so that it can be also be
 148 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
 149 * nested sev_es_ist_exit() call may adjust back the IST entry too
 150 * early.
 151 *
 152 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
 153 * on the NMI IST stack, as they are only called from NMI handling code
 154 * right now.
 155 */
 156void noinstr __sev_es_ist_enter(struct pt_regs *regs)
 157{
 158	unsigned long old_ist, new_ist;
 159
 160	/* Read old IST entry */
 161	new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 162
 163	/*
 164	 * If NMI happened while on the #VC IST stack, set the new IST
 165	 * value below regs->sp, so that the interrupted stack frame is
 166	 * not overwritten by subsequent #VC exceptions.
 167	 */
 168	if (on_vc_stack(regs))
 169		new_ist = regs->sp;
 170
 171	/*
 172	 * Reserve additional 8 bytes and store old IST value so this
 173	 * adjustment can be unrolled in __sev_es_ist_exit().
 174	 */
 175	new_ist -= sizeof(old_ist);
 176	*(unsigned long *)new_ist = old_ist;
 177
 178	/* Set new IST entry */
 179	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
 180}
 181
 182void noinstr __sev_es_ist_exit(void)
 183{
 184	unsigned long ist;
 185
 186	/* Read IST entry */
 187	ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 188
 189	if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
 190		return;
 191
 192	/* Read back old IST entry and write it to the TSS */
 193	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
 194}
 195
 196/*
 197 * Nothing shall interrupt this code path while holding the per-CPU
 198 * GHCB. The backup GHCB is only for NMIs interrupting this path.
 199 *
 200 * Callers must disable local interrupts around it.
 201 */
 202static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
 203{
 204	struct sev_es_runtime_data *data;
 205	struct ghcb *ghcb;
 206
 207	WARN_ON(!irqs_disabled());
 208
 209	data = this_cpu_read(runtime_data);
 210	ghcb = &data->ghcb_page;
 211
 212	if (unlikely(data->ghcb_active)) {
 213		/* GHCB is already in use - save its contents */
 214
 215		if (unlikely(data->backup_ghcb_active)) {
 216			/*
 217			 * Backup-GHCB is also already in use. There is no way
 218			 * to continue here so just kill the machine. To make
 219			 * panic() work, mark GHCBs inactive so that messages
 220			 * can be printed out.
 221			 */
 222			data->ghcb_active        = false;
 223			data->backup_ghcb_active = false;
 224
 225			instrumentation_begin();
 226			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
 227			instrumentation_end();
 228		}
 229
 230		/* Mark backup_ghcb active before writing to it */
 231		data->backup_ghcb_active = true;
 232
 233		state->ghcb = &data->backup_ghcb;
 234
 235		/* Backup GHCB content */
 236		*state->ghcb = *ghcb;
 237	} else {
 238		state->ghcb = NULL;
 239		data->ghcb_active = true;
 240	}
 241
 242	return ghcb;
 243}
 244
 245static inline u64 sev_es_rd_ghcb_msr(void)
 246{
 247	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 248}
 249
 250static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 251{
 252	u32 low, high;
 253
 254	low  = (u32)(val);
 255	high = (u32)(val >> 32);
 256
 257	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
 258}
 259
 260static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
 261				unsigned char *buffer)
 262{
 263	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
 264}
 265
 266static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
 267{
 268	char buffer[MAX_INSN_SIZE];
 269	int insn_bytes;
 270
 271	insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
 272	if (insn_bytes == 0) {
 273		/* Nothing could be copied */
 274		ctxt->fi.vector     = X86_TRAP_PF;
 275		ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
 276		ctxt->fi.cr2        = ctxt->regs->ip;
 277		return ES_EXCEPTION;
 278	} else if (insn_bytes == -EINVAL) {
 279		/* Effective RIP could not be calculated */
 280		ctxt->fi.vector     = X86_TRAP_GP;
 281		ctxt->fi.error_code = 0;
 282		ctxt->fi.cr2        = 0;
 283		return ES_EXCEPTION;
 284	}
 285
 286	if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
 287		return ES_DECODE_FAILED;
 288
 289	if (ctxt->insn.immediate.got)
 290		return ES_OK;
 291	else
 292		return ES_DECODE_FAILED;
 293}
 294
 295static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
 296{
 297	char buffer[MAX_INSN_SIZE];
 298	int res, ret;
 299
 300	res = vc_fetch_insn_kernel(ctxt, buffer);
 301	if (res) {
 302		ctxt->fi.vector     = X86_TRAP_PF;
 303		ctxt->fi.error_code = X86_PF_INSTR;
 304		ctxt->fi.cr2        = ctxt->regs->ip;
 305		return ES_EXCEPTION;
 306	}
 307
 308	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
 309	if (ret < 0)
 310		return ES_DECODE_FAILED;
 311	else
 312		return ES_OK;
 313}
 314
 315static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
 316{
 317	if (user_mode(ctxt->regs))
 318		return __vc_decode_user_insn(ctxt);
 319	else
 320		return __vc_decode_kern_insn(ctxt);
 321}
 322
 323static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
 324				   char *dst, char *buf, size_t size)
 325{
 326	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
 327
 328	/*
 329	 * This function uses __put_user() independent of whether kernel or user
 330	 * memory is accessed. This works fine because __put_user() does no
 331	 * sanity checks of the pointer being accessed. All that it does is
 332	 * to report when the access failed.
 333	 *
 334	 * Also, this function runs in atomic context, so __put_user() is not
 335	 * allowed to sleep. The page-fault handler detects that it is running
 336	 * in atomic context and will not try to take mmap_sem and handle the
 337	 * fault, so additional pagefault_enable()/disable() calls are not
 338	 * needed.
 339	 *
 340	 * The access can't be done via copy_to_user() here because
 341	 * vc_write_mem() must not use string instructions to access unsafe
 342	 * memory. The reason is that MOVS is emulated by the #VC handler by
 343	 * splitting the move up into a read and a write and taking a nested #VC
 344	 * exception on whatever of them is the MMIO access. Using string
 345	 * instructions here would cause infinite nesting.
 346	 */
 347	switch (size) {
 348	case 1: {
 349		u8 d1;
 350		u8 __user *target = (u8 __user *)dst;
 351
 352		memcpy(&d1, buf, 1);
 353		if (__put_user(d1, target))
 354			goto fault;
 355		break;
 356	}
 357	case 2: {
 358		u16 d2;
 359		u16 __user *target = (u16 __user *)dst;
 360
 361		memcpy(&d2, buf, 2);
 362		if (__put_user(d2, target))
 363			goto fault;
 364		break;
 365	}
 366	case 4: {
 367		u32 d4;
 368		u32 __user *target = (u32 __user *)dst;
 369
 370		memcpy(&d4, buf, 4);
 371		if (__put_user(d4, target))
 372			goto fault;
 373		break;
 374	}
 375	case 8: {
 376		u64 d8;
 377		u64 __user *target = (u64 __user *)dst;
 378
 379		memcpy(&d8, buf, 8);
 380		if (__put_user(d8, target))
 381			goto fault;
 382		break;
 383	}
 384	default:
 385		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 386		return ES_UNSUPPORTED;
 387	}
 388
 389	return ES_OK;
 390
 391fault:
 392	if (user_mode(ctxt->regs))
 393		error_code |= X86_PF_USER;
 394
 395	ctxt->fi.vector = X86_TRAP_PF;
 396	ctxt->fi.error_code = error_code;
 397	ctxt->fi.cr2 = (unsigned long)dst;
 398
 399	return ES_EXCEPTION;
 400}
 401
 402static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 403				  char *src, char *buf, size_t size)
 404{
 405	unsigned long error_code = X86_PF_PROT;
 406
 407	/*
 408	 * This function uses __get_user() independent of whether kernel or user
 409	 * memory is accessed. This works fine because __get_user() does no
 410	 * sanity checks of the pointer being accessed. All that it does is
 411	 * to report when the access failed.
 412	 *
 413	 * Also, this function runs in atomic context, so __get_user() is not
 414	 * allowed to sleep. The page-fault handler detects that it is running
 415	 * in atomic context and will not try to take mmap_sem and handle the
 416	 * fault, so additional pagefault_enable()/disable() calls are not
 417	 * needed.
 418	 *
 419	 * The access can't be done via copy_from_user() here because
 420	 * vc_read_mem() must not use string instructions to access unsafe
 421	 * memory. The reason is that MOVS is emulated by the #VC handler by
 422	 * splitting the move up into a read and a write and taking a nested #VC
 423	 * exception on whatever of them is the MMIO access. Using string
 424	 * instructions here would cause infinite nesting.
 425	 */
 426	switch (size) {
 427	case 1: {
 428		u8 d1;
 429		u8 __user *s = (u8 __user *)src;
 430
 431		if (__get_user(d1, s))
 432			goto fault;
 433		memcpy(buf, &d1, 1);
 434		break;
 435	}
 436	case 2: {
 437		u16 d2;
 438		u16 __user *s = (u16 __user *)src;
 439
 440		if (__get_user(d2, s))
 441			goto fault;
 442		memcpy(buf, &d2, 2);
 443		break;
 444	}
 445	case 4: {
 446		u32 d4;
 447		u32 __user *s = (u32 __user *)src;
 448
 449		if (__get_user(d4, s))
 450			goto fault;
 451		memcpy(buf, &d4, 4);
 452		break;
 453	}
 454	case 8: {
 455		u64 d8;
 456		u64 __user *s = (u64 __user *)src;
 457		if (__get_user(d8, s))
 458			goto fault;
 459		memcpy(buf, &d8, 8);
 460		break;
 461	}
 462	default:
 463		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 464		return ES_UNSUPPORTED;
 465	}
 466
 467	return ES_OK;
 468
 469fault:
 470	if (user_mode(ctxt->regs))
 471		error_code |= X86_PF_USER;
 472
 473	ctxt->fi.vector = X86_TRAP_PF;
 474	ctxt->fi.error_code = error_code;
 475	ctxt->fi.cr2 = (unsigned long)src;
 476
 477	return ES_EXCEPTION;
 478}
 479
 480static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
 481					   unsigned long vaddr, phys_addr_t *paddr)
 482{
 483	unsigned long va = (unsigned long)vaddr;
 484	unsigned int level;
 485	phys_addr_t pa;
 486	pgd_t *pgd;
 487	pte_t *pte;
 488
 489	pgd = __va(read_cr3_pa());
 490	pgd = &pgd[pgd_index(va)];
 491	pte = lookup_address_in_pgd(pgd, va, &level);
 492	if (!pte) {
 493		ctxt->fi.vector     = X86_TRAP_PF;
 494		ctxt->fi.cr2        = vaddr;
 495		ctxt->fi.error_code = 0;
 496
 497		if (user_mode(ctxt->regs))
 498			ctxt->fi.error_code |= X86_PF_USER;
 499
 500		return ES_EXCEPTION;
 501	}
 502
 503	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
 504		/* Emulated MMIO to/from encrypted memory not supported */
 505		return ES_UNSUPPORTED;
 506
 507	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
 508	pa |= va & ~page_level_mask(level);
 509
 510	*paddr = pa;
 511
 512	return ES_OK;
 513}
 514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515/* Include code shared with pre-decompression boot stage */
 516#include "sev-shared.c"
 517
 518static noinstr void __sev_put_ghcb(struct ghcb_state *state)
 519{
 520	struct sev_es_runtime_data *data;
 521	struct ghcb *ghcb;
 522
 523	WARN_ON(!irqs_disabled());
 524
 525	data = this_cpu_read(runtime_data);
 526	ghcb = &data->ghcb_page;
 527
 528	if (state->ghcb) {
 529		/* Restore GHCB from Backup */
 530		*ghcb = *state->ghcb;
 531		data->backup_ghcb_active = false;
 532		state->ghcb = NULL;
 533	} else {
 534		/*
 535		 * Invalidate the GHCB so a VMGEXIT instruction issued
 536		 * from userspace won't appear to be valid.
 537		 */
 538		vc_ghcb_invalidate(ghcb);
 539		data->ghcb_active = false;
 540	}
 541}
 542
 543void noinstr __sev_es_nmi_complete(void)
 544{
 545	struct ghcb_state state;
 546	struct ghcb *ghcb;
 547
 548	ghcb = __sev_get_ghcb(&state);
 549
 550	vc_ghcb_invalidate(ghcb);
 551	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
 552	ghcb_set_sw_exit_info_1(ghcb, 0);
 553	ghcb_set_sw_exit_info_2(ghcb, 0);
 554
 555	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
 556	VMGEXIT();
 557
 558	__sev_put_ghcb(&state);
 559}
 560
 561static u64 __init get_secrets_page(void)
 562{
 563	u64 pa_data = boot_params.cc_blob_address;
 564	struct cc_blob_sev_info info;
 565	void *map;
 566
 567	/*
 568	 * The CC blob contains the address of the secrets page, check if the
 569	 * blob is present.
 570	 */
 571	if (!pa_data)
 572		return 0;
 573
 574	map = early_memremap(pa_data, sizeof(info));
 575	if (!map) {
 576		pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
 577		return 0;
 578	}
 579	memcpy(&info, map, sizeof(info));
 580	early_memunmap(map, sizeof(info));
 581
 582	/* smoke-test the secrets page passed */
 583	if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
 584		return 0;
 585
 586	return info.secrets_phys;
 587}
 588
 589static u64 __init get_snp_jump_table_addr(void)
 590{
 591	struct snp_secrets_page_layout *layout;
 592	void __iomem *mem;
 593	u64 pa, addr;
 594
 595	pa = get_secrets_page();
 596	if (!pa)
 597		return 0;
 598
 599	mem = ioremap_encrypted(pa, PAGE_SIZE);
 600	if (!mem) {
 601		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
 602		return 0;
 603	}
 604
 605	layout = (__force struct snp_secrets_page_layout *)mem;
 606
 607	addr = layout->os_area.ap_jump_table_pa;
 608	iounmap(mem);
 609
 610	return addr;
 611}
 612
 613static u64 __init get_jump_table_addr(void)
 614{
 615	struct ghcb_state state;
 616	unsigned long flags;
 617	struct ghcb *ghcb;
 618	u64 ret = 0;
 619
 620	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 621		return get_snp_jump_table_addr();
 622
 623	local_irq_save(flags);
 624
 625	ghcb = __sev_get_ghcb(&state);
 626
 627	vc_ghcb_invalidate(ghcb);
 628	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
 629	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
 630	ghcb_set_sw_exit_info_2(ghcb, 0);
 631
 632	sev_es_wr_ghcb_msr(__pa(ghcb));
 633	VMGEXIT();
 634
 635	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
 636	    ghcb_sw_exit_info_2_is_valid(ghcb))
 637		ret = ghcb->save.sw_exit_info_2;
 638
 639	__sev_put_ghcb(&state);
 640
 641	local_irq_restore(flags);
 642
 643	return ret;
 644}
 645
 646static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
 647{
 648	unsigned long vaddr_end;
 649	int rc;
 650
 651	vaddr = vaddr & PAGE_MASK;
 652	vaddr_end = vaddr + (npages << PAGE_SHIFT);
 653
 654	while (vaddr < vaddr_end) {
 655		rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
 656		if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
 657			sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
 658
 659		vaddr = vaddr + PAGE_SIZE;
 660	}
 661}
 662
 663static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
 664{
 665	unsigned long paddr_end;
 666	u64 val;
 
 
 
 667
 668	paddr = paddr & PAGE_MASK;
 669	paddr_end = paddr + (npages << PAGE_SHIFT);
 670
 671	while (paddr < paddr_end) {
 
 
 
 
 
 
 
 672		/*
 673		 * Use the MSR protocol because this function can be called before
 674		 * the GHCB is established.
 675		 */
 676		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
 677		VMGEXIT();
 678
 679		val = sev_es_rd_ghcb_msr();
 680
 681		if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
 682			 "Wrong PSC response code: 0x%x\n",
 683			 (unsigned int)GHCB_RESP_CODE(val)))
 684			goto e_term;
 685
 686		if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
 687			 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
 688			 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
 689			 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
 690			goto e_term;
 691
 692		paddr = paddr + PAGE_SIZE;
 
 
 
 
 
 
 
 
 693	}
 694
 695	return;
 696
 697e_term:
 698	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 699}
 700
 701void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
 702					 unsigned int npages)
 703{
 704	/*
 705	 * This can be invoked in early boot while running identity mapped, so
 706	 * use an open coded check for SNP instead of using cc_platform_has().
 707	 * This eliminates worries about jump tables or checking boot_cpu_data
 708	 * in the cc_platform_has() function.
 709	 */
 710	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 711		return;
 712
 713	 /*
 714	  * Ask the hypervisor to mark the memory pages as private in the RMP
 715	  * table.
 716	  */
 717	early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
 718
 719	/* Validate the memory pages after they've been added in the RMP table. */
 720	pvalidate_pages(vaddr, npages, true);
 721}
 722
 723void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
 724					unsigned int npages)
 725{
 726	/*
 727	 * This can be invoked in early boot while running identity mapped, so
 728	 * use an open coded check for SNP instead of using cc_platform_has().
 729	 * This eliminates worries about jump tables or checking boot_cpu_data
 730	 * in the cc_platform_has() function.
 731	 */
 732	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 733		return;
 734
 735	/* Invalidate the memory pages before they are marked shared in the RMP table. */
 736	pvalidate_pages(vaddr, npages, false);
 737
 738	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
 739	early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
 740}
 741
 742void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
 743{
 744	unsigned long vaddr, npages;
 745
 746	vaddr = (unsigned long)__va(paddr);
 747	npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
 748
 749	if (op == SNP_PAGE_STATE_PRIVATE)
 750		early_snp_set_memory_private(vaddr, paddr, npages);
 751	else if (op == SNP_PAGE_STATE_SHARED)
 752		early_snp_set_memory_shared(vaddr, paddr, npages);
 753	else
 754		WARN(1, "invalid memory op %d\n", op);
 755}
 756
 757static int vmgexit_psc(struct snp_psc_desc *desc)
 
 758{
 759	int cur_entry, end_entry, ret = 0;
 760	struct snp_psc_desc *data;
 761	struct ghcb_state state;
 762	struct es_em_ctxt ctxt;
 763	unsigned long flags;
 764	struct ghcb *ghcb;
 765
 766	/*
 767	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
 768	 * a per-CPU GHCB.
 769	 */
 770	local_irq_save(flags);
 771
 772	ghcb = __sev_get_ghcb(&state);
 773	if (!ghcb) {
 774		ret = 1;
 775		goto out_unlock;
 776	}
 777
 778	/* Copy the input desc into GHCB shared buffer */
 779	data = (struct snp_psc_desc *)ghcb->shared_buffer;
 780	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
 781
 782	/*
 783	 * As per the GHCB specification, the hypervisor can resume the guest
 784	 * before processing all the entries. Check whether all the entries
 785	 * are processed. If not, then keep retrying. Note, the hypervisor
 786	 * will update the data memory directly to indicate the status, so
 787	 * reference the data->hdr everywhere.
 788	 *
 789	 * The strategy here is to wait for the hypervisor to change the page
 790	 * state in the RMP table before guest accesses the memory pages. If the
 791	 * page state change was not successful, then later memory access will
 792	 * result in a crash.
 793	 */
 794	cur_entry = data->hdr.cur_entry;
 795	end_entry = data->hdr.end_entry;
 796
 797	while (data->hdr.cur_entry <= data->hdr.end_entry) {
 798		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
 799
 800		/* This will advance the shared buffer data points to. */
 801		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
 802
 803		/*
 804		 * Page State Change VMGEXIT can pass error code through
 805		 * exit_info_2.
 806		 */
 807		if (WARN(ret || ghcb->save.sw_exit_info_2,
 808			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
 809			 ret, ghcb->save.sw_exit_info_2)) {
 810			ret = 1;
 811			goto out;
 812		}
 813
 814		/* Verify that reserved bit is not set */
 815		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
 816			ret = 1;
 817			goto out;
 818		}
 819
 820		/*
 821		 * Sanity check that entry processing is not going backwards.
 822		 * This will happen only if hypervisor is tricking us.
 823		 */
 824		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
 825"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
 826			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
 827			ret = 1;
 828			goto out;
 829		}
 830	}
 831
 832out:
 833	__sev_put_ghcb(&state);
 834
 835out_unlock:
 836	local_irq_restore(flags);
 837
 838	return ret;
 839}
 840
 841static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
 842			      unsigned long vaddr_end, int op)
 843{
 844	struct psc_hdr *hdr;
 845	struct psc_entry *e;
 
 846	unsigned long pfn;
 
 847	int i;
 848
 849	hdr = &data->hdr;
 850	e = data->entries;
 851
 852	memset(data, 0, sizeof(*data));
 853	i = 0;
 854
 855	while (vaddr < vaddr_end) {
 856		if (is_vmalloc_addr((void *)vaddr))
 
 
 857			pfn = vmalloc_to_pfn((void *)vaddr);
 858		else
 
 859			pfn = __pa(vaddr) >> PAGE_SHIFT;
 
 
 860
 861		e->gfn = pfn;
 862		e->operation = op;
 863		hdr->end_entry = i;
 864
 865		/*
 866		 * Current SNP implementation doesn't keep track of the RMP page
 867		 * size so use 4K for simplicity.
 868		 */
 869		e->pagesize = RMP_PG_SIZE_4K;
 
 
 
 870
 871		vaddr = vaddr + PAGE_SIZE;
 872		e++;
 873		i++;
 874	}
 875
 876	if (vmgexit_psc(data))
 
 
 
 
 
 
 
 
 
 
 
 
 877		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 
 
 
 
 
 
 
 
 
 
 
 878}
 879
 880static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
 881{
 882	unsigned long vaddr_end, next_vaddr;
 883	struct snp_psc_desc *desc;
 884
 885	desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
 886	if (!desc)
 887		panic("SNP: failed to allocate memory for PSC descriptor\n");
 888
 889	vaddr = vaddr & PAGE_MASK;
 890	vaddr_end = vaddr + (npages << PAGE_SHIFT);
 891
 892	while (vaddr < vaddr_end) {
 893		/* Calculate the last vaddr that fits in one struct snp_psc_desc. */
 894		next_vaddr = min_t(unsigned long, vaddr_end,
 895				   (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
 896
 897		__set_pages_state(desc, vaddr, next_vaddr, op);
 898
 899		vaddr = next_vaddr;
 900	}
 901
 902	kfree(desc);
 903}
 904
 905void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
 906{
 907	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 908		return;
 909
 910	pvalidate_pages(vaddr, npages, false);
 911
 912	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
 913}
 914
 915void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
 916{
 917	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 918		return;
 919
 920	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
 
 
 
 
 
 
 
 
 
 
 
 921
 922	pvalidate_pages(vaddr, npages, true);
 923}
 924
 925static int snp_set_vmsa(void *va, bool vmsa)
 926{
 927	u64 attrs;
 928
 929	/*
 930	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
 931	 * using the RMPADJUST instruction. However, for the instruction to
 932	 * succeed it must target the permissions of a lesser privileged
 933	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
 934	 * instruction in the AMD64 APM Volume 3).
 935	 */
 936	attrs = 1;
 937	if (vmsa)
 938		attrs |= RMPADJUST_VMSA_PAGE_BIT;
 939
 940	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
 941}
 942
 943#define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
 944#define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
 945#define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
 946
 947#define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
 948#define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
 949
 950static void *snp_alloc_vmsa_page(void)
 951{
 952	struct page *p;
 953
 954	/*
 955	 * Allocate VMSA page to work around the SNP erratum where the CPU will
 956	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
 957	 * collides with the RMP entry of VMSA page. The recommended workaround
 958	 * is to not use a large page.
 959	 *
 960	 * Allocate an 8k page which is also 8k-aligned.
 961	 */
 962	p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
 963	if (!p)
 964		return NULL;
 965
 966	split_page(p, 1);
 967
 968	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
 969	__free_page(p);
 970
 971	return page_address(p + 1);
 972}
 973
 974static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
 975{
 976	int err;
 977
 978	err = snp_set_vmsa(vmsa, false);
 979	if (err)
 980		pr_err("clear VMSA page failed (%u), leaking page\n", err);
 981	else
 982		free_page((unsigned long)vmsa);
 983}
 984
 985static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
 986{
 987	struct sev_es_save_area *cur_vmsa, *vmsa;
 988	struct ghcb_state state;
 989	unsigned long flags;
 990	struct ghcb *ghcb;
 991	u8 sipi_vector;
 992	int cpu, ret;
 993	u64 cr4;
 994
 995	/*
 996	 * The hypervisor SNP feature support check has happened earlier, just check
 997	 * the AP_CREATION one here.
 998	 */
 999	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
1000		return -EOPNOTSUPP;
1001
1002	/*
1003	 * Verify the desired start IP against the known trampoline start IP
1004	 * to catch any future new trampolines that may be introduced that
1005	 * would require a new protected guest entry point.
1006	 */
1007	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
1008		      "Unsupported SNP start_ip: %lx\n", start_ip))
1009		return -EINVAL;
1010
1011	/* Override start_ip with known protected guest start IP */
1012	start_ip = real_mode_header->sev_es_trampoline_start;
1013
1014	/* Find the logical CPU for the APIC ID */
1015	for_each_present_cpu(cpu) {
1016		if (arch_match_cpu_phys_id(cpu, apic_id))
1017			break;
1018	}
1019	if (cpu >= nr_cpu_ids)
1020		return -EINVAL;
1021
1022	cur_vmsa = per_cpu(sev_vmsa, cpu);
1023
1024	/*
1025	 * A new VMSA is created each time because there is no guarantee that
1026	 * the current VMSA is the kernels or that the vCPU is not running. If
1027	 * an attempt was done to use the current VMSA with a running vCPU, a
1028	 * #VMEXIT of that vCPU would wipe out all of the settings being done
1029	 * here.
1030	 */
1031	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1032	if (!vmsa)
1033		return -ENOMEM;
1034
1035	/* CR4 should maintain the MCE value */
1036	cr4 = native_read_cr4() & X86_CR4_MCE;
1037
1038	/* Set the CS value based on the start_ip converted to a SIPI vector */
1039	sipi_vector		= (start_ip >> 12);
1040	vmsa->cs.base		= sipi_vector << 12;
1041	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
1042	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
1043	vmsa->cs.selector	= sipi_vector << 8;
1044
1045	/* Set the RIP value based on start_ip */
1046	vmsa->rip		= start_ip & 0xfff;
1047
1048	/* Set AP INIT defaults as documented in the APM */
1049	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
1050	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
1051	vmsa->es		= vmsa->ds;
1052	vmsa->fs		= vmsa->ds;
1053	vmsa->gs		= vmsa->ds;
1054	vmsa->ss		= vmsa->ds;
1055
1056	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
1057	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
1058	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
1059	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
1060	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
1061	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
1062
1063	vmsa->cr4		= cr4;
1064	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
1065	vmsa->dr7		= DR7_RESET_VALUE;
1066	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
1067	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
1068	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
1069	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
1070	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
1071	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
1072	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
1073
1074	/* SVME must be set. */
1075	vmsa->efer		= EFER_SVME;
1076
1077	/*
1078	 * Set the SNP-specific fields for this VMSA:
1079	 *   VMPL level
1080	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1081	 */
1082	vmsa->vmpl		= 0;
1083	vmsa->sev_features	= sev_status >> 2;
1084
1085	/* Switch the page over to a VMSA page now that it is initialized */
1086	ret = snp_set_vmsa(vmsa, true);
1087	if (ret) {
1088		pr_err("set VMSA page failed (%u)\n", ret);
1089		free_page((unsigned long)vmsa);
1090
1091		return -EINVAL;
1092	}
1093
1094	/* Issue VMGEXIT AP Creation NAE event */
1095	local_irq_save(flags);
1096
1097	ghcb = __sev_get_ghcb(&state);
1098
1099	vc_ghcb_invalidate(ghcb);
1100	ghcb_set_rax(ghcb, vmsa->sev_features);
1101	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1102	ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1103	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1104
1105	sev_es_wr_ghcb_msr(__pa(ghcb));
1106	VMGEXIT();
1107
1108	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1109	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
1110		pr_err("SNP AP Creation error\n");
1111		ret = -EINVAL;
1112	}
1113
1114	__sev_put_ghcb(&state);
1115
1116	local_irq_restore(flags);
1117
1118	/* Perform cleanup if there was an error */
1119	if (ret) {
1120		snp_cleanup_vmsa(vmsa);
1121		vmsa = NULL;
1122	}
1123
1124	/* Free up any previous VMSA page */
1125	if (cur_vmsa)
1126		snp_cleanup_vmsa(cur_vmsa);
1127
1128	/* Record the current VMSA page */
1129	per_cpu(sev_vmsa, cpu) = vmsa;
1130
1131	return ret;
1132}
1133
1134void snp_set_wakeup_secondary_cpu(void)
1135{
1136	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1137		return;
1138
1139	/*
1140	 * Always set this override if SNP is enabled. This makes it the
1141	 * required method to start APs under SNP. If the hypervisor does
1142	 * not support AP creation, then no APs will be started.
1143	 */
1144	apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
1145}
1146
1147int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1148{
1149	u16 startup_cs, startup_ip;
1150	phys_addr_t jump_table_pa;
1151	u64 jump_table_addr;
1152	u16 __iomem *jump_table;
1153
1154	jump_table_addr = get_jump_table_addr();
1155
1156	/* On UP guests there is no jump table so this is not a failure */
1157	if (!jump_table_addr)
1158		return 0;
1159
1160	/* Check if AP Jump Table is page-aligned */
1161	if (jump_table_addr & ~PAGE_MASK)
1162		return -EINVAL;
1163
1164	jump_table_pa = jump_table_addr & PAGE_MASK;
1165
1166	startup_cs = (u16)(rmh->trampoline_start >> 4);
1167	startup_ip = (u16)(rmh->sev_es_trampoline_start -
1168			   rmh->trampoline_start);
1169
1170	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1171	if (!jump_table)
1172		return -EIO;
1173
1174	writew(startup_ip, &jump_table[0]);
1175	writew(startup_cs, &jump_table[1]);
1176
1177	iounmap(jump_table);
1178
1179	return 0;
1180}
1181
1182/*
1183 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1184 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1185 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1186 */
1187int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1188{
1189	struct sev_es_runtime_data *data;
1190	unsigned long address, pflags;
1191	int cpu;
1192	u64 pfn;
1193
1194	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1195		return 0;
1196
1197	pflags = _PAGE_NX | _PAGE_RW;
1198
1199	for_each_possible_cpu(cpu) {
1200		data = per_cpu(runtime_data, cpu);
1201
1202		address = __pa(&data->ghcb_page);
1203		pfn = address >> PAGE_SHIFT;
1204
1205		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1206			return 1;
1207	}
1208
1209	return 0;
1210}
1211
1212static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1213{
1214	struct pt_regs *regs = ctxt->regs;
1215	enum es_result ret;
1216	u64 exit_info_1;
1217
1218	/* Is it a WRMSR? */
1219	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1220
1221	ghcb_set_rcx(ghcb, regs->cx);
1222	if (exit_info_1) {
1223		ghcb_set_rax(ghcb, regs->ax);
1224		ghcb_set_rdx(ghcb, regs->dx);
1225	}
1226
1227	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1228
1229	if ((ret == ES_OK) && (!exit_info_1)) {
1230		regs->ax = ghcb->save.rax;
1231		regs->dx = ghcb->save.rdx;
1232	}
1233
1234	return ret;
1235}
1236
1237static void snp_register_per_cpu_ghcb(void)
1238{
1239	struct sev_es_runtime_data *data;
1240	struct ghcb *ghcb;
1241
1242	data = this_cpu_read(runtime_data);
1243	ghcb = &data->ghcb_page;
1244
1245	snp_register_ghcb_early(__pa(ghcb));
1246}
1247
1248void setup_ghcb(void)
1249{
1250	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1251		return;
1252
1253	/* First make sure the hypervisor talks a supported protocol. */
1254	if (!sev_es_negotiate_protocol())
1255		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1256
1257	/*
1258	 * Check whether the runtime #VC exception handler is active. It uses
1259	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1260	 *
1261	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1262	 * exception handler can use it.
1263	 */
1264	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1265		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1266			snp_register_per_cpu_ghcb();
1267
 
 
1268		return;
1269	}
1270
1271	/*
 
 
 
 
 
 
 
1272	 * Clear the boot_ghcb. The first exception comes in before the bss
1273	 * section is cleared.
1274	 */
1275	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1276
1277	/* Alright - Make the boot-ghcb public */
1278	boot_ghcb = &boot_ghcb_page;
1279
1280	/* SNP guest requires that GHCB GPA must be registered. */
1281	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1282		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1283}
1284
1285#ifdef CONFIG_HOTPLUG_CPU
1286static void sev_es_ap_hlt_loop(void)
1287{
1288	struct ghcb_state state;
1289	struct ghcb *ghcb;
1290
1291	ghcb = __sev_get_ghcb(&state);
1292
1293	while (true) {
1294		vc_ghcb_invalidate(ghcb);
1295		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1296		ghcb_set_sw_exit_info_1(ghcb, 0);
1297		ghcb_set_sw_exit_info_2(ghcb, 0);
1298
1299		sev_es_wr_ghcb_msr(__pa(ghcb));
1300		VMGEXIT();
1301
1302		/* Wakeup signal? */
1303		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1304		    ghcb->save.sw_exit_info_2)
1305			break;
1306	}
1307
1308	__sev_put_ghcb(&state);
1309}
1310
1311/*
1312 * Play_dead handler when running under SEV-ES. This is needed because
1313 * the hypervisor can't deliver an SIPI request to restart the AP.
1314 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1315 * hypervisor wakes it up again.
1316 */
1317static void sev_es_play_dead(void)
1318{
1319	play_dead_common();
1320
1321	/* IRQs now disabled */
1322
1323	sev_es_ap_hlt_loop();
1324
1325	/*
1326	 * If we get here, the VCPU was woken up again. Jump to CPU
1327	 * startup code to get it back online.
1328	 */
1329	start_cpu0();
1330}
1331#else  /* CONFIG_HOTPLUG_CPU */
1332#define sev_es_play_dead	native_play_dead
1333#endif /* CONFIG_HOTPLUG_CPU */
1334
1335#ifdef CONFIG_SMP
1336static void __init sev_es_setup_play_dead(void)
1337{
1338	smp_ops.play_dead = sev_es_play_dead;
1339}
1340#else
1341static inline void sev_es_setup_play_dead(void) { }
1342#endif
1343
1344static void __init alloc_runtime_data(int cpu)
1345{
1346	struct sev_es_runtime_data *data;
1347
1348	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1349	if (!data)
1350		panic("Can't allocate SEV-ES runtime data");
1351
1352	per_cpu(runtime_data, cpu) = data;
1353}
1354
1355static void __init init_ghcb(int cpu)
1356{
1357	struct sev_es_runtime_data *data;
1358	int err;
1359
1360	data = per_cpu(runtime_data, cpu);
1361
1362	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1363					 sizeof(data->ghcb_page));
1364	if (err)
1365		panic("Can't map GHCBs unencrypted");
1366
1367	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1368
1369	data->ghcb_active = false;
1370	data->backup_ghcb_active = false;
1371}
1372
1373void __init sev_es_init_vc_handling(void)
1374{
1375	int cpu;
1376
1377	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1378
1379	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1380		return;
1381
1382	if (!sev_es_check_cpu_features())
1383		panic("SEV-ES CPU Features missing");
1384
1385	/*
1386	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1387	 * features.
1388	 */
1389	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1390		sev_hv_features = get_hv_features();
1391
1392		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1393			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1394	}
1395
1396	/* Enable SEV-ES special handling */
1397	static_branch_enable(&sev_es_enable_key);
1398
1399	/* Initialize per-cpu GHCB pages */
1400	for_each_possible_cpu(cpu) {
1401		alloc_runtime_data(cpu);
1402		init_ghcb(cpu);
1403	}
1404
1405	sev_es_setup_play_dead();
1406
1407	/* Secondary CPUs use the runtime #VC handler */
1408	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1409}
1410
1411static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1412{
1413	int trapnr = ctxt->fi.vector;
1414
1415	if (trapnr == X86_TRAP_PF)
1416		native_write_cr2(ctxt->fi.cr2);
1417
1418	ctxt->regs->orig_ax = ctxt->fi.error_code;
1419	do_early_exception(ctxt->regs, trapnr);
1420}
1421
1422static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1423{
1424	long *reg_array;
1425	int offset;
1426
1427	reg_array = (long *)ctxt->regs;
1428	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1429
1430	if (offset < 0)
1431		return NULL;
1432
1433	offset /= sizeof(long);
1434
1435	return reg_array + offset;
1436}
1437static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1438				 unsigned int bytes, bool read)
1439{
1440	u64 exit_code, exit_info_1, exit_info_2;
1441	unsigned long ghcb_pa = __pa(ghcb);
1442	enum es_result res;
1443	phys_addr_t paddr;
1444	void __user *ref;
1445
1446	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1447	if (ref == (void __user *)-1L)
1448		return ES_UNSUPPORTED;
1449
1450	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1451
1452	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1453	if (res != ES_OK) {
1454		if (res == ES_EXCEPTION && !read)
1455			ctxt->fi.error_code |= X86_PF_WRITE;
1456
1457		return res;
1458	}
1459
1460	exit_info_1 = paddr;
1461	/* Can never be greater than 8 */
1462	exit_info_2 = bytes;
1463
1464	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1465
1466	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1467}
1468
1469/*
1470 * The MOVS instruction has two memory operands, which raises the
1471 * problem that it is not known whether the access to the source or the
1472 * destination caused the #VC exception (and hence whether an MMIO read
1473 * or write operation needs to be emulated).
1474 *
1475 * Instead of playing games with walking page-tables and trying to guess
1476 * whether the source or destination is an MMIO range, split the move
1477 * into two operations, a read and a write with only one memory operand.
1478 * This will cause a nested #VC exception on the MMIO address which can
1479 * then be handled.
1480 *
1481 * This implementation has the benefit that it also supports MOVS where
1482 * source _and_ destination are MMIO regions.
1483 *
1484 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1485 * rare operation. If it turns out to be a performance problem the split
1486 * operations can be moved to memcpy_fromio() and memcpy_toio().
1487 */
1488static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1489					  unsigned int bytes)
1490{
1491	unsigned long ds_base, es_base;
1492	unsigned char *src, *dst;
1493	unsigned char buffer[8];
1494	enum es_result ret;
1495	bool rep;
1496	int off;
1497
1498	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1499	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1500
1501	if (ds_base == -1L || es_base == -1L) {
1502		ctxt->fi.vector = X86_TRAP_GP;
1503		ctxt->fi.error_code = 0;
1504		return ES_EXCEPTION;
1505	}
1506
1507	src = ds_base + (unsigned char *)ctxt->regs->si;
1508	dst = es_base + (unsigned char *)ctxt->regs->di;
1509
1510	ret = vc_read_mem(ctxt, src, buffer, bytes);
1511	if (ret != ES_OK)
1512		return ret;
1513
1514	ret = vc_write_mem(ctxt, dst, buffer, bytes);
1515	if (ret != ES_OK)
1516		return ret;
1517
1518	if (ctxt->regs->flags & X86_EFLAGS_DF)
1519		off = -bytes;
1520	else
1521		off =  bytes;
1522
1523	ctxt->regs->si += off;
1524	ctxt->regs->di += off;
1525
1526	rep = insn_has_rep_prefix(&ctxt->insn);
1527	if (rep)
1528		ctxt->regs->cx -= 1;
1529
1530	if (!rep || ctxt->regs->cx == 0)
1531		return ES_OK;
1532	else
1533		return ES_RETRY;
1534}
1535
1536static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1537{
1538	struct insn *insn = &ctxt->insn;
1539	enum insn_mmio_type mmio;
1540	unsigned int bytes = 0;
1541	enum es_result ret;
1542	u8 sign_byte;
1543	long *reg_data;
1544
1545	mmio = insn_decode_mmio(insn, &bytes);
1546	if (mmio == INSN_MMIO_DECODE_FAILED)
1547		return ES_DECODE_FAILED;
1548
1549	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
1550		reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1551		if (!reg_data)
1552			return ES_DECODE_FAILED;
1553	}
1554
 
 
 
1555	switch (mmio) {
1556	case INSN_MMIO_WRITE:
1557		memcpy(ghcb->shared_buffer, reg_data, bytes);
1558		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1559		break;
1560	case INSN_MMIO_WRITE_IMM:
1561		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1562		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1563		break;
1564	case INSN_MMIO_READ:
1565		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1566		if (ret)
1567			break;
1568
1569		/* Zero-extend for 32-bit operation */
1570		if (bytes == 4)
1571			*reg_data = 0;
1572
1573		memcpy(reg_data, ghcb->shared_buffer, bytes);
1574		break;
1575	case INSN_MMIO_READ_ZERO_EXTEND:
1576		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1577		if (ret)
1578			break;
1579
1580		/* Zero extend based on operand size */
1581		memset(reg_data, 0, insn->opnd_bytes);
1582		memcpy(reg_data, ghcb->shared_buffer, bytes);
1583		break;
1584	case INSN_MMIO_READ_SIGN_EXTEND:
1585		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1586		if (ret)
1587			break;
1588
1589		if (bytes == 1) {
1590			u8 *val = (u8 *)ghcb->shared_buffer;
1591
1592			sign_byte = (*val & 0x80) ? 0xff : 0x00;
1593		} else {
1594			u16 *val = (u16 *)ghcb->shared_buffer;
1595
1596			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1597		}
1598
1599		/* Sign extend based on operand size */
1600		memset(reg_data, sign_byte, insn->opnd_bytes);
1601		memcpy(reg_data, ghcb->shared_buffer, bytes);
1602		break;
1603	case INSN_MMIO_MOVS:
1604		ret = vc_handle_mmio_movs(ctxt, bytes);
1605		break;
1606	default:
1607		ret = ES_UNSUPPORTED;
1608		break;
1609	}
1610
1611	return ret;
1612}
1613
1614static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1615					  struct es_em_ctxt *ctxt)
1616{
1617	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1618	long val, *reg = vc_insn_get_rm(ctxt);
1619	enum es_result ret;
1620
 
 
 
1621	if (!reg)
1622		return ES_DECODE_FAILED;
1623
1624	val = *reg;
1625
1626	/* Upper 32 bits must be written as zeroes */
1627	if (val >> 32) {
1628		ctxt->fi.vector = X86_TRAP_GP;
1629		ctxt->fi.error_code = 0;
1630		return ES_EXCEPTION;
1631	}
1632
1633	/* Clear out other reserved bits and set bit 10 */
1634	val = (val & 0xffff23ffL) | BIT(10);
1635
1636	/* Early non-zero writes to DR7 are not supported */
1637	if (!data && (val & ~DR7_RESET_VALUE))
1638		return ES_UNSUPPORTED;
1639
1640	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
1641	ghcb_set_rax(ghcb, val);
1642	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1643	if (ret != ES_OK)
1644		return ret;
1645
1646	if (data)
1647		data->dr7 = val;
1648
1649	return ES_OK;
1650}
1651
1652static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1653					 struct es_em_ctxt *ctxt)
1654{
1655	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1656	long *reg = vc_insn_get_rm(ctxt);
1657
 
 
 
1658	if (!reg)
1659		return ES_DECODE_FAILED;
1660
1661	if (data)
1662		*reg = data->dr7;
1663	else
1664		*reg = DR7_RESET_VALUE;
1665
1666	return ES_OK;
1667}
1668
1669static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1670				       struct es_em_ctxt *ctxt)
1671{
1672	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1673}
1674
1675static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1676{
1677	enum es_result ret;
1678
1679	ghcb_set_rcx(ghcb, ctxt->regs->cx);
1680
1681	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1682	if (ret != ES_OK)
1683		return ret;
1684
1685	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1686		return ES_VMM_ERROR;
1687
1688	ctxt->regs->ax = ghcb->save.rax;
1689	ctxt->regs->dx = ghcb->save.rdx;
1690
1691	return ES_OK;
1692}
1693
1694static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1695					struct es_em_ctxt *ctxt)
1696{
1697	/*
1698	 * Treat it as a NOP and do not leak a physical address to the
1699	 * hypervisor.
1700	 */
1701	return ES_OK;
1702}
1703
1704static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1705				      struct es_em_ctxt *ctxt)
1706{
1707	/* Treat the same as MONITOR/MONITORX */
1708	return ES_OK;
1709}
1710
1711static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1712					struct es_em_ctxt *ctxt)
1713{
1714	enum es_result ret;
1715
1716	ghcb_set_rax(ghcb, ctxt->regs->ax);
1717	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1718
1719	if (x86_platform.hyper.sev_es_hcall_prepare)
1720		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1721
1722	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1723	if (ret != ES_OK)
1724		return ret;
1725
1726	if (!ghcb_rax_is_valid(ghcb))
1727		return ES_VMM_ERROR;
1728
1729	ctxt->regs->ax = ghcb->save.rax;
1730
1731	/*
1732	 * Call sev_es_hcall_finish() after regs->ax is already set.
1733	 * This allows the hypervisor handler to overwrite it again if
1734	 * necessary.
1735	 */
1736	if (x86_platform.hyper.sev_es_hcall_finish &&
1737	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1738		return ES_VMM_ERROR;
1739
1740	return ES_OK;
1741}
1742
1743static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1744					struct es_em_ctxt *ctxt)
1745{
1746	/*
1747	 * Calling ecx_alignment_check() directly does not work, because it
1748	 * enables IRQs and the GHCB is active. Forward the exception and call
1749	 * it later from vc_forward_exception().
1750	 */
1751	ctxt->fi.vector = X86_TRAP_AC;
1752	ctxt->fi.error_code = 0;
1753	return ES_EXCEPTION;
1754}
1755
1756static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1757					 struct ghcb *ghcb,
1758					 unsigned long exit_code)
1759{
1760	enum es_result result;
1761
1762	switch (exit_code) {
1763	case SVM_EXIT_READ_DR7:
1764		result = vc_handle_dr7_read(ghcb, ctxt);
1765		break;
1766	case SVM_EXIT_WRITE_DR7:
1767		result = vc_handle_dr7_write(ghcb, ctxt);
1768		break;
1769	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1770		result = vc_handle_trap_ac(ghcb, ctxt);
1771		break;
1772	case SVM_EXIT_RDTSC:
1773	case SVM_EXIT_RDTSCP:
1774		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1775		break;
1776	case SVM_EXIT_RDPMC:
1777		result = vc_handle_rdpmc(ghcb, ctxt);
1778		break;
1779	case SVM_EXIT_INVD:
1780		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1781		result = ES_UNSUPPORTED;
1782		break;
1783	case SVM_EXIT_CPUID:
1784		result = vc_handle_cpuid(ghcb, ctxt);
1785		break;
1786	case SVM_EXIT_IOIO:
1787		result = vc_handle_ioio(ghcb, ctxt);
1788		break;
1789	case SVM_EXIT_MSR:
1790		result = vc_handle_msr(ghcb, ctxt);
1791		break;
1792	case SVM_EXIT_VMMCALL:
1793		result = vc_handle_vmmcall(ghcb, ctxt);
1794		break;
1795	case SVM_EXIT_WBINVD:
1796		result = vc_handle_wbinvd(ghcb, ctxt);
1797		break;
1798	case SVM_EXIT_MONITOR:
1799		result = vc_handle_monitor(ghcb, ctxt);
1800		break;
1801	case SVM_EXIT_MWAIT:
1802		result = vc_handle_mwait(ghcb, ctxt);
1803		break;
1804	case SVM_EXIT_NPF:
1805		result = vc_handle_mmio(ghcb, ctxt);
1806		break;
1807	default:
1808		/*
1809		 * Unexpected #VC exception
1810		 */
1811		result = ES_UNSUPPORTED;
1812	}
1813
1814	return result;
1815}
1816
1817static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1818{
1819	long error_code = ctxt->fi.error_code;
1820	int trapnr = ctxt->fi.vector;
1821
1822	ctxt->regs->orig_ax = ctxt->fi.error_code;
1823
1824	switch (trapnr) {
1825	case X86_TRAP_GP:
1826		exc_general_protection(ctxt->regs, error_code);
1827		break;
1828	case X86_TRAP_UD:
1829		exc_invalid_op(ctxt->regs);
1830		break;
1831	case X86_TRAP_PF:
1832		write_cr2(ctxt->fi.cr2);
1833		exc_page_fault(ctxt->regs, error_code);
1834		break;
1835	case X86_TRAP_AC:
1836		exc_alignment_check(ctxt->regs, error_code);
1837		break;
1838	default:
1839		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1840		BUG();
1841	}
1842}
1843
1844static __always_inline bool is_vc2_stack(unsigned long sp)
1845{
1846	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1847}
1848
1849static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1850{
1851	unsigned long sp, prev_sp;
1852
1853	sp      = (unsigned long)regs;
1854	prev_sp = regs->sp;
1855
1856	/*
1857	 * If the code was already executing on the VC2 stack when the #VC
1858	 * happened, let it proceed to the normal handling routine. This way the
1859	 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1860	 */
1861	return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1862}
1863
1864static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1865{
1866	struct ghcb_state state;
1867	struct es_em_ctxt ctxt;
1868	enum es_result result;
1869	struct ghcb *ghcb;
1870	bool ret = true;
1871
1872	ghcb = __sev_get_ghcb(&state);
1873
1874	vc_ghcb_invalidate(ghcb);
1875	result = vc_init_em_ctxt(&ctxt, regs, error_code);
1876
1877	if (result == ES_OK)
1878		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1879
1880	__sev_put_ghcb(&state);
1881
1882	/* Done - now check the result */
1883	switch (result) {
1884	case ES_OK:
1885		vc_finish_insn(&ctxt);
1886		break;
1887	case ES_UNSUPPORTED:
1888		pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1889				   error_code, regs->ip);
1890		ret = false;
1891		break;
1892	case ES_VMM_ERROR:
1893		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1894				   error_code, regs->ip);
1895		ret = false;
1896		break;
1897	case ES_DECODE_FAILED:
1898		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1899				   error_code, regs->ip);
1900		ret = false;
1901		break;
1902	case ES_EXCEPTION:
1903		vc_forward_exception(&ctxt);
1904		break;
1905	case ES_RETRY:
1906		/* Nothing to do */
1907		break;
1908	default:
1909		pr_emerg("Unknown result in %s():%d\n", __func__, result);
1910		/*
1911		 * Emulating the instruction which caused the #VC exception
1912		 * failed - can't continue so print debug information
1913		 */
1914		BUG();
1915	}
1916
1917	return ret;
1918}
1919
1920static __always_inline bool vc_is_db(unsigned long error_code)
1921{
1922	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1923}
1924
1925/*
1926 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1927 * and will panic when an error happens.
1928 */
1929DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1930{
1931	irqentry_state_t irq_state;
1932
1933	/*
1934	 * With the current implementation it is always possible to switch to a
1935	 * safe stack because #VC exceptions only happen at known places, like
1936	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1937	 * also happen with code instrumentation when the hypervisor intercepts
1938	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1939	 * exceptions currently also only happen in safe places.
1940	 *
1941	 * But keep this here in case the noinstr annotations are violated due
1942	 * to bug elsewhere.
1943	 */
1944	if (unlikely(vc_from_invalid_context(regs))) {
1945		instrumentation_begin();
1946		panic("Can't handle #VC exception from unsupported context\n");
1947		instrumentation_end();
1948	}
1949
1950	/*
1951	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1952	 */
1953	if (vc_is_db(error_code)) {
1954		exc_debug(regs);
1955		return;
1956	}
1957
1958	irq_state = irqentry_nmi_enter(regs);
1959
1960	instrumentation_begin();
1961
1962	if (!vc_raw_handle_exception(regs, error_code)) {
1963		/* Show some debug info */
1964		show_regs(regs);
1965
1966		/* Ask hypervisor to sev_es_terminate */
1967		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1968
1969		/* If that fails and we get here - just panic */
1970		panic("Returned from Terminate-Request to Hypervisor\n");
1971	}
1972
1973	instrumentation_end();
1974	irqentry_nmi_exit(regs, irq_state);
1975}
1976
1977/*
1978 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1979 * and will kill the current task with SIGBUS when an error happens.
1980 */
1981DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1982{
1983	/*
1984	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1985	 */
1986	if (vc_is_db(error_code)) {
1987		noist_exc_debug(regs);
1988		return;
1989	}
1990
1991	irqentry_enter_from_user_mode(regs);
1992	instrumentation_begin();
1993
1994	if (!vc_raw_handle_exception(regs, error_code)) {
1995		/*
1996		 * Do not kill the machine if user-space triggered the
1997		 * exception. Send SIGBUS instead and let user-space deal with
1998		 * it.
1999		 */
2000		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2001	}
2002
2003	instrumentation_end();
2004	irqentry_exit_to_user_mode(regs);
2005}
2006
2007bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2008{
2009	unsigned long exit_code = regs->orig_ax;
2010	struct es_em_ctxt ctxt;
2011	enum es_result result;
2012
2013	vc_ghcb_invalidate(boot_ghcb);
2014
2015	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2016	if (result == ES_OK)
2017		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2018
2019	/* Done - now check the result */
2020	switch (result) {
2021	case ES_OK:
2022		vc_finish_insn(&ctxt);
2023		break;
2024	case ES_UNSUPPORTED:
2025		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2026				exit_code, regs->ip);
2027		goto fail;
2028	case ES_VMM_ERROR:
2029		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2030				exit_code, regs->ip);
2031		goto fail;
2032	case ES_DECODE_FAILED:
2033		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2034				exit_code, regs->ip);
2035		goto fail;
2036	case ES_EXCEPTION:
2037		vc_early_forward_exception(&ctxt);
2038		break;
2039	case ES_RETRY:
2040		/* Nothing to do */
2041		break;
2042	default:
2043		BUG();
2044	}
2045
2046	return true;
2047
2048fail:
2049	show_regs(regs);
2050
2051	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2052}
2053
2054/*
2055 * Initial set up of SNP relies on information provided by the
2056 * Confidential Computing blob, which can be passed to the kernel
2057 * in the following ways, depending on how it is booted:
2058 *
2059 * - when booted via the boot/decompress kernel:
2060 *   - via boot_params
2061 *
2062 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2063 *   - via a setup_data entry, as defined by the Linux Boot Protocol
2064 *
2065 * Scan for the blob in that order.
2066 */
2067static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2068{
2069	struct cc_blob_sev_info *cc_info;
2070
2071	/* Boot kernel would have passed the CC blob via boot_params. */
2072	if (bp->cc_blob_address) {
2073		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2074		goto found_cc_info;
2075	}
2076
2077	/*
2078	 * If kernel was booted directly, without the use of the
2079	 * boot/decompression kernel, the CC blob may have been passed via
2080	 * setup_data instead.
2081	 */
2082	cc_info = find_cc_blob_setup_data(bp);
2083	if (!cc_info)
2084		return NULL;
2085
2086found_cc_info:
2087	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2088		snp_abort();
2089
2090	return cc_info;
2091}
2092
2093bool __init snp_init(struct boot_params *bp)
2094{
2095	struct cc_blob_sev_info *cc_info;
2096
2097	if (!bp)
2098		return false;
2099
2100	cc_info = find_cc_blob(bp);
2101	if (!cc_info)
2102		return false;
2103
2104	setup_cpuid_table(cc_info);
2105
2106	/*
2107	 * The CC blob will be used later to access the secrets page. Cache
2108	 * it here like the boot kernel does.
2109	 */
2110	bp->cc_blob_address = (u32)(unsigned long)cc_info;
2111
2112	return true;
2113}
2114
2115void __init __noreturn snp_abort(void)
2116{
2117	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2118}
2119
2120static void dump_cpuid_table(void)
2121{
2122	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2123	int i = 0;
2124
2125	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2126		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2127
2128	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2129		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2130
2131		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2132			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2133			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2134	}
2135}
2136
2137/*
2138 * It is useful from an auditing/testing perspective to provide an easy way
2139 * for the guest owner to know that the CPUID table has been initialized as
2140 * expected, but that initialization happens too early in boot to print any
2141 * sort of indicator, and there's not really any other good place to do it,
2142 * so do it here.
2143 */
2144static int __init report_cpuid_table(void)
2145{
2146	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2147
2148	if (!cpuid_table->count)
2149		return 0;
2150
2151	pr_info("Using SNP CPUID table, %d entries present.\n",
2152		cpuid_table->count);
2153
2154	if (sev_cfg.debug)
2155		dump_cpuid_table();
2156
2157	return 0;
2158}
2159arch_initcall(report_cpuid_table);
2160
2161static int __init init_sev_config(char *str)
2162{
2163	char *s;
2164
2165	while ((s = strsep(&str, ","))) {
2166		if (!strcmp(s, "debug")) {
2167			sev_cfg.debug = true;
2168			continue;
2169		}
2170
2171		pr_info("SEV command-line option '%s' was not recognized\n", s);
2172	}
2173
2174	return 1;
2175}
2176__setup("sev=", init_sev_config);
2177
2178int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
2179{
2180	struct ghcb_state state;
2181	struct es_em_ctxt ctxt;
2182	unsigned long flags;
2183	struct ghcb *ghcb;
2184	int ret;
2185
2186	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2187		return -ENODEV;
2188
2189	if (!fw_err)
2190		return -EINVAL;
2191
2192	/*
2193	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2194	 * a per-CPU GHCB.
2195	 */
2196	local_irq_save(flags);
2197
2198	ghcb = __sev_get_ghcb(&state);
2199	if (!ghcb) {
2200		ret = -EIO;
2201		goto e_restore_irq;
2202	}
2203
2204	vc_ghcb_invalidate(ghcb);
2205
2206	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2207		ghcb_set_rax(ghcb, input->data_gpa);
2208		ghcb_set_rbx(ghcb, input->data_npages);
2209	}
2210
2211	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2212	if (ret)
2213		goto e_put;
2214
2215	if (ghcb->save.sw_exit_info_2) {
2216		/* Number of expected pages are returned in RBX */
2217		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
2218		    ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
2219			input->data_npages = ghcb_get_rbx(ghcb);
2220
2221		*fw_err = ghcb->save.sw_exit_info_2;
 
 
2222
 
 
 
 
 
 
 
 
 
2223		ret = -EIO;
 
2224	}
2225
2226e_put:
2227	__sev_put_ghcb(&state);
2228e_restore_irq:
2229	local_irq_restore(flags);
2230
2231	return ret;
2232}
2233EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2234
2235static struct platform_device sev_guest_device = {
2236	.name		= "sev-guest",
2237	.id		= -1,
2238};
2239
2240static int __init snp_init_platform_device(void)
2241{
2242	struct sev_guest_platform_data data;
2243	u64 gpa;
2244
2245	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2246		return -ENODEV;
2247
2248	gpa = get_secrets_page();
2249	if (!gpa)
2250		return -ENODEV;
2251
2252	data.secrets_gpa = gpa;
2253	if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2254		return -ENODEV;
2255
2256	if (platform_device_register(&sev_guest_device))
2257		return -ENODEV;
2258
2259	pr_info("SNP guest platform device initialized.\n");
2260	return 0;
2261}
2262device_initcall(snp_init_platform_device);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Memory Encryption Support
   4 *
   5 * Copyright (C) 2019 SUSE
   6 *
   7 * Author: Joerg Roedel <jroedel@suse.de>
   8 */
   9
  10#define pr_fmt(fmt)	"SEV: " fmt
  11
  12#include <linux/sched/debug.h>	/* For show_regs() */
  13#include <linux/percpu-defs.h>
  14#include <linux/cc_platform.h>
  15#include <linux/printk.h>
  16#include <linux/mm_types.h>
  17#include <linux/set_memory.h>
  18#include <linux/memblock.h>
  19#include <linux/kernel.h>
  20#include <linux/mm.h>
  21#include <linux/cpumask.h>
  22#include <linux/efi.h>
  23#include <linux/platform_device.h>
  24#include <linux/io.h>
  25#include <linux/psp-sev.h>
  26#include <uapi/linux/sev-guest.h>
  27
  28#include <asm/cpu_entry_area.h>
  29#include <asm/stacktrace.h>
  30#include <asm/sev.h>
  31#include <asm/insn-eval.h>
  32#include <asm/fpu/xcr.h>
  33#include <asm/processor.h>
  34#include <asm/realmode.h>
  35#include <asm/setup.h>
  36#include <asm/traps.h>
  37#include <asm/svm.h>
  38#include <asm/smp.h>
  39#include <asm/cpu.h>
  40#include <asm/apic.h>
  41#include <asm/cpuid.h>
  42#include <asm/cmdline.h>
  43
  44#define DR7_RESET_VALUE        0x400
  45
  46/* AP INIT values as documented in the APM2  section "Processor Initialization State" */
  47#define AP_INIT_CS_LIMIT		0xffff
  48#define AP_INIT_DS_LIMIT		0xffff
  49#define AP_INIT_LDTR_LIMIT		0xffff
  50#define AP_INIT_GDTR_LIMIT		0xffff
  51#define AP_INIT_IDTR_LIMIT		0xffff
  52#define AP_INIT_TR_LIMIT		0xffff
  53#define AP_INIT_RFLAGS_DEFAULT		0x2
  54#define AP_INIT_DR6_DEFAULT		0xffff0ff0
  55#define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
  56#define AP_INIT_XCR0_DEFAULT		0x1
  57#define AP_INIT_X87_FTW_DEFAULT		0x5555
  58#define AP_INIT_X87_FCW_DEFAULT		0x0040
  59#define AP_INIT_CR0_DEFAULT		0x60000010
  60#define AP_INIT_MXCSR_DEFAULT		0x1f80
  61
  62/* For early boot hypervisor communication in SEV-ES enabled guests */
  63static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
  64
  65/*
  66 * Needs to be in the .data section because we need it NULL before bss is
  67 * cleared
  68 */
  69static struct ghcb *boot_ghcb __section(".data");
  70
  71/* Bitmap of SEV features supported by the hypervisor */
  72static u64 sev_hv_features __ro_after_init;
  73
  74/* #VC handler runtime per-CPU data */
  75struct sev_es_runtime_data {
  76	struct ghcb ghcb_page;
  77
  78	/*
  79	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
  80	 * It is needed when an NMI happens while the #VC handler uses the real
  81	 * GHCB, and the NMI handler itself is causing another #VC exception. In
  82	 * that case the GHCB content of the first handler needs to be backed up
  83	 * and restored.
  84	 */
  85	struct ghcb backup_ghcb;
  86
  87	/*
  88	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
  89	 * There is no need for it to be atomic, because nothing is written to
  90	 * the GHCB between the read and the write of ghcb_active. So it is safe
  91	 * to use it when a nested #VC exception happens before the write.
  92	 *
  93	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
  94	 * happens while the first #VC handler uses the GHCB. When the NMI code
  95	 * raises a second #VC handler it might overwrite the contents of the
  96	 * GHCB written by the first handler. To avoid this the content of the
  97	 * GHCB is saved and restored when the GHCB is detected to be in use
  98	 * already.
  99	 */
 100	bool ghcb_active;
 101	bool backup_ghcb_active;
 102
 103	/*
 104	 * Cached DR7 value - write it on DR7 writes and return it on reads.
 105	 * That value will never make it to the real hardware DR7 as debugging
 106	 * is currently unsupported in SEV-ES guests.
 107	 */
 108	unsigned long dr7;
 109};
 110
 111struct ghcb_state {
 112	struct ghcb *ghcb;
 113};
 114
 115static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
 
 
 116static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
 117
 118struct sev_config {
 119	__u64 debug		: 1,
 120
 121	      /*
 122	       * A flag used by __set_pages_state() that indicates when the
 123	       * per-CPU GHCB has been created and registered and thus can be
 124	       * used by the BSP instead of the early boot GHCB.
 125	       *
 126	       * For APs, the per-CPU GHCB is created before they are started
 127	       * and registered upon startup, so this flag can be used globally
 128	       * for the BSP and APs.
 129	       */
 130	      ghcbs_initialized	: 1,
 131
 132	      __reserved	: 62;
 133};
 134
 135static struct sev_config sev_cfg __read_mostly;
 136
 137static __always_inline bool on_vc_stack(struct pt_regs *regs)
 138{
 139	unsigned long sp = regs->sp;
 140
 141	/* User-mode RSP is not trusted */
 142	if (user_mode(regs))
 143		return false;
 144
 145	/* SYSCALL gap still has user-mode RSP */
 146	if (ip_within_syscall_gap(regs))
 147		return false;
 148
 149	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
 150}
 151
 152/*
 153 * This function handles the case when an NMI is raised in the #VC
 154 * exception handler entry code, before the #VC handler has switched off
 155 * its IST stack. In this case, the IST entry for #VC must be adjusted,
 156 * so that any nested #VC exception will not overwrite the stack
 157 * contents of the interrupted #VC handler.
 158 *
 159 * The IST entry is adjusted unconditionally so that it can be also be
 160 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
 161 * nested sev_es_ist_exit() call may adjust back the IST entry too
 162 * early.
 163 *
 164 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
 165 * on the NMI IST stack, as they are only called from NMI handling code
 166 * right now.
 167 */
 168void noinstr __sev_es_ist_enter(struct pt_regs *regs)
 169{
 170	unsigned long old_ist, new_ist;
 171
 172	/* Read old IST entry */
 173	new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 174
 175	/*
 176	 * If NMI happened while on the #VC IST stack, set the new IST
 177	 * value below regs->sp, so that the interrupted stack frame is
 178	 * not overwritten by subsequent #VC exceptions.
 179	 */
 180	if (on_vc_stack(regs))
 181		new_ist = regs->sp;
 182
 183	/*
 184	 * Reserve additional 8 bytes and store old IST value so this
 185	 * adjustment can be unrolled in __sev_es_ist_exit().
 186	 */
 187	new_ist -= sizeof(old_ist);
 188	*(unsigned long *)new_ist = old_ist;
 189
 190	/* Set new IST entry */
 191	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
 192}
 193
 194void noinstr __sev_es_ist_exit(void)
 195{
 196	unsigned long ist;
 197
 198	/* Read IST entry */
 199	ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 200
 201	if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
 202		return;
 203
 204	/* Read back old IST entry and write it to the TSS */
 205	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
 206}
 207
 208/*
 209 * Nothing shall interrupt this code path while holding the per-CPU
 210 * GHCB. The backup GHCB is only for NMIs interrupting this path.
 211 *
 212 * Callers must disable local interrupts around it.
 213 */
 214static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
 215{
 216	struct sev_es_runtime_data *data;
 217	struct ghcb *ghcb;
 218
 219	WARN_ON(!irqs_disabled());
 220
 221	data = this_cpu_read(runtime_data);
 222	ghcb = &data->ghcb_page;
 223
 224	if (unlikely(data->ghcb_active)) {
 225		/* GHCB is already in use - save its contents */
 226
 227		if (unlikely(data->backup_ghcb_active)) {
 228			/*
 229			 * Backup-GHCB is also already in use. There is no way
 230			 * to continue here so just kill the machine. To make
 231			 * panic() work, mark GHCBs inactive so that messages
 232			 * can be printed out.
 233			 */
 234			data->ghcb_active        = false;
 235			data->backup_ghcb_active = false;
 236
 237			instrumentation_begin();
 238			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
 239			instrumentation_end();
 240		}
 241
 242		/* Mark backup_ghcb active before writing to it */
 243		data->backup_ghcb_active = true;
 244
 245		state->ghcb = &data->backup_ghcb;
 246
 247		/* Backup GHCB content */
 248		*state->ghcb = *ghcb;
 249	} else {
 250		state->ghcb = NULL;
 251		data->ghcb_active = true;
 252	}
 253
 254	return ghcb;
 255}
 256
 257static inline u64 sev_es_rd_ghcb_msr(void)
 258{
 259	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 260}
 261
 262static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 263{
 264	u32 low, high;
 265
 266	low  = (u32)(val);
 267	high = (u32)(val >> 32);
 268
 269	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
 270}
 271
 272static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
 273				unsigned char *buffer)
 274{
 275	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
 276}
 277
 278static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
 279{
 280	char buffer[MAX_INSN_SIZE];
 281	int insn_bytes;
 282
 283	insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
 284	if (insn_bytes == 0) {
 285		/* Nothing could be copied */
 286		ctxt->fi.vector     = X86_TRAP_PF;
 287		ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
 288		ctxt->fi.cr2        = ctxt->regs->ip;
 289		return ES_EXCEPTION;
 290	} else if (insn_bytes == -EINVAL) {
 291		/* Effective RIP could not be calculated */
 292		ctxt->fi.vector     = X86_TRAP_GP;
 293		ctxt->fi.error_code = 0;
 294		ctxt->fi.cr2        = 0;
 295		return ES_EXCEPTION;
 296	}
 297
 298	if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
 299		return ES_DECODE_FAILED;
 300
 301	if (ctxt->insn.immediate.got)
 302		return ES_OK;
 303	else
 304		return ES_DECODE_FAILED;
 305}
 306
 307static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
 308{
 309	char buffer[MAX_INSN_SIZE];
 310	int res, ret;
 311
 312	res = vc_fetch_insn_kernel(ctxt, buffer);
 313	if (res) {
 314		ctxt->fi.vector     = X86_TRAP_PF;
 315		ctxt->fi.error_code = X86_PF_INSTR;
 316		ctxt->fi.cr2        = ctxt->regs->ip;
 317		return ES_EXCEPTION;
 318	}
 319
 320	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
 321	if (ret < 0)
 322		return ES_DECODE_FAILED;
 323	else
 324		return ES_OK;
 325}
 326
 327static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
 328{
 329	if (user_mode(ctxt->regs))
 330		return __vc_decode_user_insn(ctxt);
 331	else
 332		return __vc_decode_kern_insn(ctxt);
 333}
 334
 335static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
 336				   char *dst, char *buf, size_t size)
 337{
 338	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
 339
 340	/*
 341	 * This function uses __put_user() independent of whether kernel or user
 342	 * memory is accessed. This works fine because __put_user() does no
 343	 * sanity checks of the pointer being accessed. All that it does is
 344	 * to report when the access failed.
 345	 *
 346	 * Also, this function runs in atomic context, so __put_user() is not
 347	 * allowed to sleep. The page-fault handler detects that it is running
 348	 * in atomic context and will not try to take mmap_sem and handle the
 349	 * fault, so additional pagefault_enable()/disable() calls are not
 350	 * needed.
 351	 *
 352	 * The access can't be done via copy_to_user() here because
 353	 * vc_write_mem() must not use string instructions to access unsafe
 354	 * memory. The reason is that MOVS is emulated by the #VC handler by
 355	 * splitting the move up into a read and a write and taking a nested #VC
 356	 * exception on whatever of them is the MMIO access. Using string
 357	 * instructions here would cause infinite nesting.
 358	 */
 359	switch (size) {
 360	case 1: {
 361		u8 d1;
 362		u8 __user *target = (u8 __user *)dst;
 363
 364		memcpy(&d1, buf, 1);
 365		if (__put_user(d1, target))
 366			goto fault;
 367		break;
 368	}
 369	case 2: {
 370		u16 d2;
 371		u16 __user *target = (u16 __user *)dst;
 372
 373		memcpy(&d2, buf, 2);
 374		if (__put_user(d2, target))
 375			goto fault;
 376		break;
 377	}
 378	case 4: {
 379		u32 d4;
 380		u32 __user *target = (u32 __user *)dst;
 381
 382		memcpy(&d4, buf, 4);
 383		if (__put_user(d4, target))
 384			goto fault;
 385		break;
 386	}
 387	case 8: {
 388		u64 d8;
 389		u64 __user *target = (u64 __user *)dst;
 390
 391		memcpy(&d8, buf, 8);
 392		if (__put_user(d8, target))
 393			goto fault;
 394		break;
 395	}
 396	default:
 397		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 398		return ES_UNSUPPORTED;
 399	}
 400
 401	return ES_OK;
 402
 403fault:
 404	if (user_mode(ctxt->regs))
 405		error_code |= X86_PF_USER;
 406
 407	ctxt->fi.vector = X86_TRAP_PF;
 408	ctxt->fi.error_code = error_code;
 409	ctxt->fi.cr2 = (unsigned long)dst;
 410
 411	return ES_EXCEPTION;
 412}
 413
 414static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 415				  char *src, char *buf, size_t size)
 416{
 417	unsigned long error_code = X86_PF_PROT;
 418
 419	/*
 420	 * This function uses __get_user() independent of whether kernel or user
 421	 * memory is accessed. This works fine because __get_user() does no
 422	 * sanity checks of the pointer being accessed. All that it does is
 423	 * to report when the access failed.
 424	 *
 425	 * Also, this function runs in atomic context, so __get_user() is not
 426	 * allowed to sleep. The page-fault handler detects that it is running
 427	 * in atomic context and will not try to take mmap_sem and handle the
 428	 * fault, so additional pagefault_enable()/disable() calls are not
 429	 * needed.
 430	 *
 431	 * The access can't be done via copy_from_user() here because
 432	 * vc_read_mem() must not use string instructions to access unsafe
 433	 * memory. The reason is that MOVS is emulated by the #VC handler by
 434	 * splitting the move up into a read and a write and taking a nested #VC
 435	 * exception on whatever of them is the MMIO access. Using string
 436	 * instructions here would cause infinite nesting.
 437	 */
 438	switch (size) {
 439	case 1: {
 440		u8 d1;
 441		u8 __user *s = (u8 __user *)src;
 442
 443		if (__get_user(d1, s))
 444			goto fault;
 445		memcpy(buf, &d1, 1);
 446		break;
 447	}
 448	case 2: {
 449		u16 d2;
 450		u16 __user *s = (u16 __user *)src;
 451
 452		if (__get_user(d2, s))
 453			goto fault;
 454		memcpy(buf, &d2, 2);
 455		break;
 456	}
 457	case 4: {
 458		u32 d4;
 459		u32 __user *s = (u32 __user *)src;
 460
 461		if (__get_user(d4, s))
 462			goto fault;
 463		memcpy(buf, &d4, 4);
 464		break;
 465	}
 466	case 8: {
 467		u64 d8;
 468		u64 __user *s = (u64 __user *)src;
 469		if (__get_user(d8, s))
 470			goto fault;
 471		memcpy(buf, &d8, 8);
 472		break;
 473	}
 474	default:
 475		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 476		return ES_UNSUPPORTED;
 477	}
 478
 479	return ES_OK;
 480
 481fault:
 482	if (user_mode(ctxt->regs))
 483		error_code |= X86_PF_USER;
 484
 485	ctxt->fi.vector = X86_TRAP_PF;
 486	ctxt->fi.error_code = error_code;
 487	ctxt->fi.cr2 = (unsigned long)src;
 488
 489	return ES_EXCEPTION;
 490}
 491
 492static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
 493					   unsigned long vaddr, phys_addr_t *paddr)
 494{
 495	unsigned long va = (unsigned long)vaddr;
 496	unsigned int level;
 497	phys_addr_t pa;
 498	pgd_t *pgd;
 499	pte_t *pte;
 500
 501	pgd = __va(read_cr3_pa());
 502	pgd = &pgd[pgd_index(va)];
 503	pte = lookup_address_in_pgd(pgd, va, &level);
 504	if (!pte) {
 505		ctxt->fi.vector     = X86_TRAP_PF;
 506		ctxt->fi.cr2        = vaddr;
 507		ctxt->fi.error_code = 0;
 508
 509		if (user_mode(ctxt->regs))
 510			ctxt->fi.error_code |= X86_PF_USER;
 511
 512		return ES_EXCEPTION;
 513	}
 514
 515	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
 516		/* Emulated MMIO to/from encrypted memory not supported */
 517		return ES_UNSUPPORTED;
 518
 519	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
 520	pa |= va & ~page_level_mask(level);
 521
 522	*paddr = pa;
 523
 524	return ES_OK;
 525}
 526
 527static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
 528{
 529	BUG_ON(size > 4);
 530
 531	if (user_mode(ctxt->regs)) {
 532		struct thread_struct *t = &current->thread;
 533		struct io_bitmap *iobm = t->io_bitmap;
 534		size_t idx;
 535
 536		if (!iobm)
 537			goto fault;
 538
 539		for (idx = port; idx < port + size; ++idx) {
 540			if (test_bit(idx, iobm->bitmap))
 541				goto fault;
 542		}
 543	}
 544
 545	return ES_OK;
 546
 547fault:
 548	ctxt->fi.vector = X86_TRAP_GP;
 549	ctxt->fi.error_code = 0;
 550
 551	return ES_EXCEPTION;
 552}
 553
 554/* Include code shared with pre-decompression boot stage */
 555#include "sev-shared.c"
 556
 557static noinstr void __sev_put_ghcb(struct ghcb_state *state)
 558{
 559	struct sev_es_runtime_data *data;
 560	struct ghcb *ghcb;
 561
 562	WARN_ON(!irqs_disabled());
 563
 564	data = this_cpu_read(runtime_data);
 565	ghcb = &data->ghcb_page;
 566
 567	if (state->ghcb) {
 568		/* Restore GHCB from Backup */
 569		*ghcb = *state->ghcb;
 570		data->backup_ghcb_active = false;
 571		state->ghcb = NULL;
 572	} else {
 573		/*
 574		 * Invalidate the GHCB so a VMGEXIT instruction issued
 575		 * from userspace won't appear to be valid.
 576		 */
 577		vc_ghcb_invalidate(ghcb);
 578		data->ghcb_active = false;
 579	}
 580}
 581
 582void noinstr __sev_es_nmi_complete(void)
 583{
 584	struct ghcb_state state;
 585	struct ghcb *ghcb;
 586
 587	ghcb = __sev_get_ghcb(&state);
 588
 589	vc_ghcb_invalidate(ghcb);
 590	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
 591	ghcb_set_sw_exit_info_1(ghcb, 0);
 592	ghcb_set_sw_exit_info_2(ghcb, 0);
 593
 594	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
 595	VMGEXIT();
 596
 597	__sev_put_ghcb(&state);
 598}
 599
 600static u64 __init get_secrets_page(void)
 601{
 602	u64 pa_data = boot_params.cc_blob_address;
 603	struct cc_blob_sev_info info;
 604	void *map;
 605
 606	/*
 607	 * The CC blob contains the address of the secrets page, check if the
 608	 * blob is present.
 609	 */
 610	if (!pa_data)
 611		return 0;
 612
 613	map = early_memremap(pa_data, sizeof(info));
 614	if (!map) {
 615		pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
 616		return 0;
 617	}
 618	memcpy(&info, map, sizeof(info));
 619	early_memunmap(map, sizeof(info));
 620
 621	/* smoke-test the secrets page passed */
 622	if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
 623		return 0;
 624
 625	return info.secrets_phys;
 626}
 627
 628static u64 __init get_snp_jump_table_addr(void)
 629{
 630	struct snp_secrets_page_layout *layout;
 631	void __iomem *mem;
 632	u64 pa, addr;
 633
 634	pa = get_secrets_page();
 635	if (!pa)
 636		return 0;
 637
 638	mem = ioremap_encrypted(pa, PAGE_SIZE);
 639	if (!mem) {
 640		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
 641		return 0;
 642	}
 643
 644	layout = (__force struct snp_secrets_page_layout *)mem;
 645
 646	addr = layout->os_area.ap_jump_table_pa;
 647	iounmap(mem);
 648
 649	return addr;
 650}
 651
 652static u64 __init get_jump_table_addr(void)
 653{
 654	struct ghcb_state state;
 655	unsigned long flags;
 656	struct ghcb *ghcb;
 657	u64 ret = 0;
 658
 659	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 660		return get_snp_jump_table_addr();
 661
 662	local_irq_save(flags);
 663
 664	ghcb = __sev_get_ghcb(&state);
 665
 666	vc_ghcb_invalidate(ghcb);
 667	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
 668	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
 669	ghcb_set_sw_exit_info_2(ghcb, 0);
 670
 671	sev_es_wr_ghcb_msr(__pa(ghcb));
 672	VMGEXIT();
 673
 674	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
 675	    ghcb_sw_exit_info_2_is_valid(ghcb))
 676		ret = ghcb->save.sw_exit_info_2;
 677
 678	__sev_put_ghcb(&state);
 679
 680	local_irq_restore(flags);
 681
 682	return ret;
 683}
 684
 685static void early_set_pages_state(unsigned long vaddr, unsigned long paddr,
 686				  unsigned long npages, enum psc_op op)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687{
 688	unsigned long paddr_end;
 689	u64 val;
 690	int ret;
 691
 692	vaddr = vaddr & PAGE_MASK;
 693
 694	paddr = paddr & PAGE_MASK;
 695	paddr_end = paddr + (npages << PAGE_SHIFT);
 696
 697	while (paddr < paddr_end) {
 698		if (op == SNP_PAGE_STATE_SHARED) {
 699			/* Page validation must be rescinded before changing to shared */
 700			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false);
 701			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
 702				goto e_term;
 703		}
 704
 705		/*
 706		 * Use the MSR protocol because this function can be called before
 707		 * the GHCB is established.
 708		 */
 709		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
 710		VMGEXIT();
 711
 712		val = sev_es_rd_ghcb_msr();
 713
 714		if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
 715			 "Wrong PSC response code: 0x%x\n",
 716			 (unsigned int)GHCB_RESP_CODE(val)))
 717			goto e_term;
 718
 719		if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
 720			 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
 721			 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
 722			 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
 723			goto e_term;
 724
 725		if (op == SNP_PAGE_STATE_PRIVATE) {
 726			/* Page validation must be performed after changing to private */
 727			ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true);
 728			if (WARN(ret, "Failed to validate address 0x%lx ret %d", paddr, ret))
 729				goto e_term;
 730		}
 731
 732		vaddr += PAGE_SIZE;
 733		paddr += PAGE_SIZE;
 734	}
 735
 736	return;
 737
 738e_term:
 739	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 740}
 741
 742void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
 743					 unsigned long npages)
 744{
 745	/*
 746	 * This can be invoked in early boot while running identity mapped, so
 747	 * use an open coded check for SNP instead of using cc_platform_has().
 748	 * This eliminates worries about jump tables or checking boot_cpu_data
 749	 * in the cc_platform_has() function.
 750	 */
 751	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 752		return;
 753
 754	 /*
 755	  * Ask the hypervisor to mark the memory pages as private in the RMP
 756	  * table.
 757	  */
 758	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_PRIVATE);
 
 
 
 759}
 760
 761void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
 762					unsigned long npages)
 763{
 764	/*
 765	 * This can be invoked in early boot while running identity mapped, so
 766	 * use an open coded check for SNP instead of using cc_platform_has().
 767	 * This eliminates worries about jump tables or checking boot_cpu_data
 768	 * in the cc_platform_has() function.
 769	 */
 770	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 771		return;
 772
 
 
 
 773	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
 774	early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
 775}
 776
 777void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
 778{
 779	unsigned long vaddr, npages;
 780
 781	vaddr = (unsigned long)__va(paddr);
 782	npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
 783
 784	if (op == SNP_PAGE_STATE_PRIVATE)
 785		early_snp_set_memory_private(vaddr, paddr, npages);
 786	else if (op == SNP_PAGE_STATE_SHARED)
 787		early_snp_set_memory_shared(vaddr, paddr, npages);
 788	else
 789		WARN(1, "invalid memory op %d\n", op);
 790}
 791
 792static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
 793				       unsigned long vaddr_end, int op)
 794{
 
 
 795	struct ghcb_state state;
 796	bool use_large_entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797	struct psc_hdr *hdr;
 798	struct psc_entry *e;
 799	unsigned long flags;
 800	unsigned long pfn;
 801	struct ghcb *ghcb;
 802	int i;
 803
 804	hdr = &data->hdr;
 805	e = data->entries;
 806
 807	memset(data, 0, sizeof(*data));
 808	i = 0;
 809
 810	while (vaddr < vaddr_end && i < ARRAY_SIZE(data->entries)) {
 811		hdr->end_entry = i;
 812
 813		if (is_vmalloc_addr((void *)vaddr)) {
 814			pfn = vmalloc_to_pfn((void *)vaddr);
 815			use_large_entry = false;
 816		} else {
 817			pfn = __pa(vaddr) >> PAGE_SHIFT;
 818			use_large_entry = true;
 819		}
 820
 821		e->gfn = pfn;
 822		e->operation = op;
 
 823
 824		if (use_large_entry && IS_ALIGNED(vaddr, PMD_SIZE) &&
 825		    (vaddr_end - vaddr) >= PMD_SIZE) {
 826			e->pagesize = RMP_PG_SIZE_2M;
 827			vaddr += PMD_SIZE;
 828		} else {
 829			e->pagesize = RMP_PG_SIZE_4K;
 830			vaddr += PAGE_SIZE;
 831		}
 832
 
 833		e++;
 834		i++;
 835	}
 836
 837	/* Page validation must be rescinded before changing to shared */
 838	if (op == SNP_PAGE_STATE_SHARED)
 839		pvalidate_pages(data);
 840
 841	local_irq_save(flags);
 842
 843	if (sev_cfg.ghcbs_initialized)
 844		ghcb = __sev_get_ghcb(&state);
 845	else
 846		ghcb = boot_ghcb;
 847
 848	/* Invoke the hypervisor to perform the page state changes */
 849	if (!ghcb || vmgexit_psc(ghcb, data))
 850		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 851
 852	if (sev_cfg.ghcbs_initialized)
 853		__sev_put_ghcb(&state);
 854
 855	local_irq_restore(flags);
 856
 857	/* Page validation must be performed after changing to private */
 858	if (op == SNP_PAGE_STATE_PRIVATE)
 859		pvalidate_pages(data);
 860
 861	return vaddr;
 862}
 863
 864static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
 865{
 866	struct snp_psc_desc desc;
 867	unsigned long vaddr_end;
 868
 869	/* Use the MSR protocol when a GHCB is not available. */
 870	if (!boot_ghcb)
 871		return early_set_pages_state(vaddr, __pa(vaddr), npages, op);
 872
 873	vaddr = vaddr & PAGE_MASK;
 874	vaddr_end = vaddr + (npages << PAGE_SHIFT);
 875
 876	while (vaddr < vaddr_end)
 877		vaddr = __set_pages_state(&desc, vaddr, vaddr_end, op);
 
 
 
 
 
 
 
 
 
 878}
 879
 880void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
 881{
 882	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 883		return;
 884
 
 
 885	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
 886}
 887
 888void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
 889{
 890	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 891		return;
 892
 893	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
 894}
 895
 896void snp_accept_memory(phys_addr_t start, phys_addr_t end)
 897{
 898	unsigned long vaddr, npages;
 899
 900	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 901		return;
 902
 903	vaddr = (unsigned long)__va(start);
 904	npages = (end - start) >> PAGE_SHIFT;
 905
 906	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
 907}
 908
 909static int snp_set_vmsa(void *va, bool vmsa)
 910{
 911	u64 attrs;
 912
 913	/*
 914	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
 915	 * using the RMPADJUST instruction. However, for the instruction to
 916	 * succeed it must target the permissions of a lesser privileged
 917	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
 918	 * instruction in the AMD64 APM Volume 3).
 919	 */
 920	attrs = 1;
 921	if (vmsa)
 922		attrs |= RMPADJUST_VMSA_PAGE_BIT;
 923
 924	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
 925}
 926
 927#define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
 928#define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
 929#define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
 930
 931#define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
 932#define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
 933
 934static void *snp_alloc_vmsa_page(void)
 935{
 936	struct page *p;
 937
 938	/*
 939	 * Allocate VMSA page to work around the SNP erratum where the CPU will
 940	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
 941	 * collides with the RMP entry of VMSA page. The recommended workaround
 942	 * is to not use a large page.
 943	 *
 944	 * Allocate an 8k page which is also 8k-aligned.
 945	 */
 946	p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
 947	if (!p)
 948		return NULL;
 949
 950	split_page(p, 1);
 951
 952	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
 953	__free_page(p);
 954
 955	return page_address(p + 1);
 956}
 957
 958static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
 959{
 960	int err;
 961
 962	err = snp_set_vmsa(vmsa, false);
 963	if (err)
 964		pr_err("clear VMSA page failed (%u), leaking page\n", err);
 965	else
 966		free_page((unsigned long)vmsa);
 967}
 968
 969static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip)
 970{
 971	struct sev_es_save_area *cur_vmsa, *vmsa;
 972	struct ghcb_state state;
 973	unsigned long flags;
 974	struct ghcb *ghcb;
 975	u8 sipi_vector;
 976	int cpu, ret;
 977	u64 cr4;
 978
 979	/*
 980	 * The hypervisor SNP feature support check has happened earlier, just check
 981	 * the AP_CREATION one here.
 982	 */
 983	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
 984		return -EOPNOTSUPP;
 985
 986	/*
 987	 * Verify the desired start IP against the known trampoline start IP
 988	 * to catch any future new trampolines that may be introduced that
 989	 * would require a new protected guest entry point.
 990	 */
 991	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
 992		      "Unsupported SNP start_ip: %lx\n", start_ip))
 993		return -EINVAL;
 994
 995	/* Override start_ip with known protected guest start IP */
 996	start_ip = real_mode_header->sev_es_trampoline_start;
 997
 998	/* Find the logical CPU for the APIC ID */
 999	for_each_present_cpu(cpu) {
1000		if (arch_match_cpu_phys_id(cpu, apic_id))
1001			break;
1002	}
1003	if (cpu >= nr_cpu_ids)
1004		return -EINVAL;
1005
1006	cur_vmsa = per_cpu(sev_vmsa, cpu);
1007
1008	/*
1009	 * A new VMSA is created each time because there is no guarantee that
1010	 * the current VMSA is the kernels or that the vCPU is not running. If
1011	 * an attempt was done to use the current VMSA with a running vCPU, a
1012	 * #VMEXIT of that vCPU would wipe out all of the settings being done
1013	 * here.
1014	 */
1015	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1016	if (!vmsa)
1017		return -ENOMEM;
1018
1019	/* CR4 should maintain the MCE value */
1020	cr4 = native_read_cr4() & X86_CR4_MCE;
1021
1022	/* Set the CS value based on the start_ip converted to a SIPI vector */
1023	sipi_vector		= (start_ip >> 12);
1024	vmsa->cs.base		= sipi_vector << 12;
1025	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
1026	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
1027	vmsa->cs.selector	= sipi_vector << 8;
1028
1029	/* Set the RIP value based on start_ip */
1030	vmsa->rip		= start_ip & 0xfff;
1031
1032	/* Set AP INIT defaults as documented in the APM */
1033	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
1034	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
1035	vmsa->es		= vmsa->ds;
1036	vmsa->fs		= vmsa->ds;
1037	vmsa->gs		= vmsa->ds;
1038	vmsa->ss		= vmsa->ds;
1039
1040	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
1041	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
1042	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
1043	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
1044	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
1045	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
1046
1047	vmsa->cr4		= cr4;
1048	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
1049	vmsa->dr7		= DR7_RESET_VALUE;
1050	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
1051	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
1052	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
1053	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
1054	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
1055	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
1056	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
1057
1058	/* SVME must be set. */
1059	vmsa->efer		= EFER_SVME;
1060
1061	/*
1062	 * Set the SNP-specific fields for this VMSA:
1063	 *   VMPL level
1064	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1065	 */
1066	vmsa->vmpl		= 0;
1067	vmsa->sev_features	= sev_status >> 2;
1068
1069	/* Switch the page over to a VMSA page now that it is initialized */
1070	ret = snp_set_vmsa(vmsa, true);
1071	if (ret) {
1072		pr_err("set VMSA page failed (%u)\n", ret);
1073		free_page((unsigned long)vmsa);
1074
1075		return -EINVAL;
1076	}
1077
1078	/* Issue VMGEXIT AP Creation NAE event */
1079	local_irq_save(flags);
1080
1081	ghcb = __sev_get_ghcb(&state);
1082
1083	vc_ghcb_invalidate(ghcb);
1084	ghcb_set_rax(ghcb, vmsa->sev_features);
1085	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1086	ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1087	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1088
1089	sev_es_wr_ghcb_msr(__pa(ghcb));
1090	VMGEXIT();
1091
1092	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1093	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
1094		pr_err("SNP AP Creation error\n");
1095		ret = -EINVAL;
1096	}
1097
1098	__sev_put_ghcb(&state);
1099
1100	local_irq_restore(flags);
1101
1102	/* Perform cleanup if there was an error */
1103	if (ret) {
1104		snp_cleanup_vmsa(vmsa);
1105		vmsa = NULL;
1106	}
1107
1108	/* Free up any previous VMSA page */
1109	if (cur_vmsa)
1110		snp_cleanup_vmsa(cur_vmsa);
1111
1112	/* Record the current VMSA page */
1113	per_cpu(sev_vmsa, cpu) = vmsa;
1114
1115	return ret;
1116}
1117
1118void __init snp_set_wakeup_secondary_cpu(void)
1119{
1120	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1121		return;
1122
1123	/*
1124	 * Always set this override if SNP is enabled. This makes it the
1125	 * required method to start APs under SNP. If the hypervisor does
1126	 * not support AP creation, then no APs will be started.
1127	 */
1128	apic_update_callback(wakeup_secondary_cpu, wakeup_cpu_via_vmgexit);
1129}
1130
1131int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1132{
1133	u16 startup_cs, startup_ip;
1134	phys_addr_t jump_table_pa;
1135	u64 jump_table_addr;
1136	u16 __iomem *jump_table;
1137
1138	jump_table_addr = get_jump_table_addr();
1139
1140	/* On UP guests there is no jump table so this is not a failure */
1141	if (!jump_table_addr)
1142		return 0;
1143
1144	/* Check if AP Jump Table is page-aligned */
1145	if (jump_table_addr & ~PAGE_MASK)
1146		return -EINVAL;
1147
1148	jump_table_pa = jump_table_addr & PAGE_MASK;
1149
1150	startup_cs = (u16)(rmh->trampoline_start >> 4);
1151	startup_ip = (u16)(rmh->sev_es_trampoline_start -
1152			   rmh->trampoline_start);
1153
1154	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1155	if (!jump_table)
1156		return -EIO;
1157
1158	writew(startup_ip, &jump_table[0]);
1159	writew(startup_cs, &jump_table[1]);
1160
1161	iounmap(jump_table);
1162
1163	return 0;
1164}
1165
1166/*
1167 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1168 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1169 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1170 */
1171int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1172{
1173	struct sev_es_runtime_data *data;
1174	unsigned long address, pflags;
1175	int cpu;
1176	u64 pfn;
1177
1178	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1179		return 0;
1180
1181	pflags = _PAGE_NX | _PAGE_RW;
1182
1183	for_each_possible_cpu(cpu) {
1184		data = per_cpu(runtime_data, cpu);
1185
1186		address = __pa(&data->ghcb_page);
1187		pfn = address >> PAGE_SHIFT;
1188
1189		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1190			return 1;
1191	}
1192
1193	return 0;
1194}
1195
1196static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1197{
1198	struct pt_regs *regs = ctxt->regs;
1199	enum es_result ret;
1200	u64 exit_info_1;
1201
1202	/* Is it a WRMSR? */
1203	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1204
1205	ghcb_set_rcx(ghcb, regs->cx);
1206	if (exit_info_1) {
1207		ghcb_set_rax(ghcb, regs->ax);
1208		ghcb_set_rdx(ghcb, regs->dx);
1209	}
1210
1211	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1212
1213	if ((ret == ES_OK) && (!exit_info_1)) {
1214		regs->ax = ghcb->save.rax;
1215		regs->dx = ghcb->save.rdx;
1216	}
1217
1218	return ret;
1219}
1220
1221static void snp_register_per_cpu_ghcb(void)
1222{
1223	struct sev_es_runtime_data *data;
1224	struct ghcb *ghcb;
1225
1226	data = this_cpu_read(runtime_data);
1227	ghcb = &data->ghcb_page;
1228
1229	snp_register_ghcb_early(__pa(ghcb));
1230}
1231
1232void setup_ghcb(void)
1233{
1234	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1235		return;
1236
 
 
 
 
1237	/*
1238	 * Check whether the runtime #VC exception handler is active. It uses
1239	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1240	 *
1241	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1242	 * exception handler can use it.
1243	 */
1244	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1245		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1246			snp_register_per_cpu_ghcb();
1247
1248		sev_cfg.ghcbs_initialized = true;
1249
1250		return;
1251	}
1252
1253	/*
1254	 * Make sure the hypervisor talks a supported protocol.
1255	 * This gets called only in the BSP boot phase.
1256	 */
1257	if (!sev_es_negotiate_protocol())
1258		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1259
1260	/*
1261	 * Clear the boot_ghcb. The first exception comes in before the bss
1262	 * section is cleared.
1263	 */
1264	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1265
1266	/* Alright - Make the boot-ghcb public */
1267	boot_ghcb = &boot_ghcb_page;
1268
1269	/* SNP guest requires that GHCB GPA must be registered. */
1270	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1271		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1272}
1273
1274#ifdef CONFIG_HOTPLUG_CPU
1275static void sev_es_ap_hlt_loop(void)
1276{
1277	struct ghcb_state state;
1278	struct ghcb *ghcb;
1279
1280	ghcb = __sev_get_ghcb(&state);
1281
1282	while (true) {
1283		vc_ghcb_invalidate(ghcb);
1284		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1285		ghcb_set_sw_exit_info_1(ghcb, 0);
1286		ghcb_set_sw_exit_info_2(ghcb, 0);
1287
1288		sev_es_wr_ghcb_msr(__pa(ghcb));
1289		VMGEXIT();
1290
1291		/* Wakeup signal? */
1292		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1293		    ghcb->save.sw_exit_info_2)
1294			break;
1295	}
1296
1297	__sev_put_ghcb(&state);
1298}
1299
1300/*
1301 * Play_dead handler when running under SEV-ES. This is needed because
1302 * the hypervisor can't deliver an SIPI request to restart the AP.
1303 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1304 * hypervisor wakes it up again.
1305 */
1306static void sev_es_play_dead(void)
1307{
1308	play_dead_common();
1309
1310	/* IRQs now disabled */
1311
1312	sev_es_ap_hlt_loop();
1313
1314	/*
1315	 * If we get here, the VCPU was woken up again. Jump to CPU
1316	 * startup code to get it back online.
1317	 */
1318	soft_restart_cpu();
1319}
1320#else  /* CONFIG_HOTPLUG_CPU */
1321#define sev_es_play_dead	native_play_dead
1322#endif /* CONFIG_HOTPLUG_CPU */
1323
1324#ifdef CONFIG_SMP
1325static void __init sev_es_setup_play_dead(void)
1326{
1327	smp_ops.play_dead = sev_es_play_dead;
1328}
1329#else
1330static inline void sev_es_setup_play_dead(void) { }
1331#endif
1332
1333static void __init alloc_runtime_data(int cpu)
1334{
1335	struct sev_es_runtime_data *data;
1336
1337	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1338	if (!data)
1339		panic("Can't allocate SEV-ES runtime data");
1340
1341	per_cpu(runtime_data, cpu) = data;
1342}
1343
1344static void __init init_ghcb(int cpu)
1345{
1346	struct sev_es_runtime_data *data;
1347	int err;
1348
1349	data = per_cpu(runtime_data, cpu);
1350
1351	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1352					 sizeof(data->ghcb_page));
1353	if (err)
1354		panic("Can't map GHCBs unencrypted");
1355
1356	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1357
1358	data->ghcb_active = false;
1359	data->backup_ghcb_active = false;
1360}
1361
1362void __init sev_es_init_vc_handling(void)
1363{
1364	int cpu;
1365
1366	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1367
1368	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1369		return;
1370
1371	if (!sev_es_check_cpu_features())
1372		panic("SEV-ES CPU Features missing");
1373
1374	/*
1375	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1376	 * features.
1377	 */
1378	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1379		sev_hv_features = get_hv_features();
1380
1381		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1382			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1383	}
1384
 
 
 
1385	/* Initialize per-cpu GHCB pages */
1386	for_each_possible_cpu(cpu) {
1387		alloc_runtime_data(cpu);
1388		init_ghcb(cpu);
1389	}
1390
1391	sev_es_setup_play_dead();
1392
1393	/* Secondary CPUs use the runtime #VC handler */
1394	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1395}
1396
1397static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1398{
1399	int trapnr = ctxt->fi.vector;
1400
1401	if (trapnr == X86_TRAP_PF)
1402		native_write_cr2(ctxt->fi.cr2);
1403
1404	ctxt->regs->orig_ax = ctxt->fi.error_code;
1405	do_early_exception(ctxt->regs, trapnr);
1406}
1407
1408static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1409{
1410	long *reg_array;
1411	int offset;
1412
1413	reg_array = (long *)ctxt->regs;
1414	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1415
1416	if (offset < 0)
1417		return NULL;
1418
1419	offset /= sizeof(long);
1420
1421	return reg_array + offset;
1422}
1423static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1424				 unsigned int bytes, bool read)
1425{
1426	u64 exit_code, exit_info_1, exit_info_2;
1427	unsigned long ghcb_pa = __pa(ghcb);
1428	enum es_result res;
1429	phys_addr_t paddr;
1430	void __user *ref;
1431
1432	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1433	if (ref == (void __user *)-1L)
1434		return ES_UNSUPPORTED;
1435
1436	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1437
1438	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1439	if (res != ES_OK) {
1440		if (res == ES_EXCEPTION && !read)
1441			ctxt->fi.error_code |= X86_PF_WRITE;
1442
1443		return res;
1444	}
1445
1446	exit_info_1 = paddr;
1447	/* Can never be greater than 8 */
1448	exit_info_2 = bytes;
1449
1450	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1451
1452	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1453}
1454
1455/*
1456 * The MOVS instruction has two memory operands, which raises the
1457 * problem that it is not known whether the access to the source or the
1458 * destination caused the #VC exception (and hence whether an MMIO read
1459 * or write operation needs to be emulated).
1460 *
1461 * Instead of playing games with walking page-tables and trying to guess
1462 * whether the source or destination is an MMIO range, split the move
1463 * into two operations, a read and a write with only one memory operand.
1464 * This will cause a nested #VC exception on the MMIO address which can
1465 * then be handled.
1466 *
1467 * This implementation has the benefit that it also supports MOVS where
1468 * source _and_ destination are MMIO regions.
1469 *
1470 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1471 * rare operation. If it turns out to be a performance problem the split
1472 * operations can be moved to memcpy_fromio() and memcpy_toio().
1473 */
1474static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1475					  unsigned int bytes)
1476{
1477	unsigned long ds_base, es_base;
1478	unsigned char *src, *dst;
1479	unsigned char buffer[8];
1480	enum es_result ret;
1481	bool rep;
1482	int off;
1483
1484	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1485	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1486
1487	if (ds_base == -1L || es_base == -1L) {
1488		ctxt->fi.vector = X86_TRAP_GP;
1489		ctxt->fi.error_code = 0;
1490		return ES_EXCEPTION;
1491	}
1492
1493	src = ds_base + (unsigned char *)ctxt->regs->si;
1494	dst = es_base + (unsigned char *)ctxt->regs->di;
1495
1496	ret = vc_read_mem(ctxt, src, buffer, bytes);
1497	if (ret != ES_OK)
1498		return ret;
1499
1500	ret = vc_write_mem(ctxt, dst, buffer, bytes);
1501	if (ret != ES_OK)
1502		return ret;
1503
1504	if (ctxt->regs->flags & X86_EFLAGS_DF)
1505		off = -bytes;
1506	else
1507		off =  bytes;
1508
1509	ctxt->regs->si += off;
1510	ctxt->regs->di += off;
1511
1512	rep = insn_has_rep_prefix(&ctxt->insn);
1513	if (rep)
1514		ctxt->regs->cx -= 1;
1515
1516	if (!rep || ctxt->regs->cx == 0)
1517		return ES_OK;
1518	else
1519		return ES_RETRY;
1520}
1521
1522static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1523{
1524	struct insn *insn = &ctxt->insn;
1525	enum insn_mmio_type mmio;
1526	unsigned int bytes = 0;
1527	enum es_result ret;
1528	u8 sign_byte;
1529	long *reg_data;
1530
1531	mmio = insn_decode_mmio(insn, &bytes);
1532	if (mmio == INSN_MMIO_DECODE_FAILED)
1533		return ES_DECODE_FAILED;
1534
1535	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
1536		reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1537		if (!reg_data)
1538			return ES_DECODE_FAILED;
1539	}
1540
1541	if (user_mode(ctxt->regs))
1542		return ES_UNSUPPORTED;
1543
1544	switch (mmio) {
1545	case INSN_MMIO_WRITE:
1546		memcpy(ghcb->shared_buffer, reg_data, bytes);
1547		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1548		break;
1549	case INSN_MMIO_WRITE_IMM:
1550		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1551		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1552		break;
1553	case INSN_MMIO_READ:
1554		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1555		if (ret)
1556			break;
1557
1558		/* Zero-extend for 32-bit operation */
1559		if (bytes == 4)
1560			*reg_data = 0;
1561
1562		memcpy(reg_data, ghcb->shared_buffer, bytes);
1563		break;
1564	case INSN_MMIO_READ_ZERO_EXTEND:
1565		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1566		if (ret)
1567			break;
1568
1569		/* Zero extend based on operand size */
1570		memset(reg_data, 0, insn->opnd_bytes);
1571		memcpy(reg_data, ghcb->shared_buffer, bytes);
1572		break;
1573	case INSN_MMIO_READ_SIGN_EXTEND:
1574		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1575		if (ret)
1576			break;
1577
1578		if (bytes == 1) {
1579			u8 *val = (u8 *)ghcb->shared_buffer;
1580
1581			sign_byte = (*val & 0x80) ? 0xff : 0x00;
1582		} else {
1583			u16 *val = (u16 *)ghcb->shared_buffer;
1584
1585			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1586		}
1587
1588		/* Sign extend based on operand size */
1589		memset(reg_data, sign_byte, insn->opnd_bytes);
1590		memcpy(reg_data, ghcb->shared_buffer, bytes);
1591		break;
1592	case INSN_MMIO_MOVS:
1593		ret = vc_handle_mmio_movs(ctxt, bytes);
1594		break;
1595	default:
1596		ret = ES_UNSUPPORTED;
1597		break;
1598	}
1599
1600	return ret;
1601}
1602
1603static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1604					  struct es_em_ctxt *ctxt)
1605{
1606	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1607	long val, *reg = vc_insn_get_rm(ctxt);
1608	enum es_result ret;
1609
1610	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1611		return ES_VMM_ERROR;
1612
1613	if (!reg)
1614		return ES_DECODE_FAILED;
1615
1616	val = *reg;
1617
1618	/* Upper 32 bits must be written as zeroes */
1619	if (val >> 32) {
1620		ctxt->fi.vector = X86_TRAP_GP;
1621		ctxt->fi.error_code = 0;
1622		return ES_EXCEPTION;
1623	}
1624
1625	/* Clear out other reserved bits and set bit 10 */
1626	val = (val & 0xffff23ffL) | BIT(10);
1627
1628	/* Early non-zero writes to DR7 are not supported */
1629	if (!data && (val & ~DR7_RESET_VALUE))
1630		return ES_UNSUPPORTED;
1631
1632	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
1633	ghcb_set_rax(ghcb, val);
1634	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1635	if (ret != ES_OK)
1636		return ret;
1637
1638	if (data)
1639		data->dr7 = val;
1640
1641	return ES_OK;
1642}
1643
1644static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1645					 struct es_em_ctxt *ctxt)
1646{
1647	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1648	long *reg = vc_insn_get_rm(ctxt);
1649
1650	if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
1651		return ES_VMM_ERROR;
1652
1653	if (!reg)
1654		return ES_DECODE_FAILED;
1655
1656	if (data)
1657		*reg = data->dr7;
1658	else
1659		*reg = DR7_RESET_VALUE;
1660
1661	return ES_OK;
1662}
1663
1664static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1665				       struct es_em_ctxt *ctxt)
1666{
1667	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1668}
1669
1670static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1671{
1672	enum es_result ret;
1673
1674	ghcb_set_rcx(ghcb, ctxt->regs->cx);
1675
1676	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1677	if (ret != ES_OK)
1678		return ret;
1679
1680	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1681		return ES_VMM_ERROR;
1682
1683	ctxt->regs->ax = ghcb->save.rax;
1684	ctxt->regs->dx = ghcb->save.rdx;
1685
1686	return ES_OK;
1687}
1688
1689static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1690					struct es_em_ctxt *ctxt)
1691{
1692	/*
1693	 * Treat it as a NOP and do not leak a physical address to the
1694	 * hypervisor.
1695	 */
1696	return ES_OK;
1697}
1698
1699static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1700				      struct es_em_ctxt *ctxt)
1701{
1702	/* Treat the same as MONITOR/MONITORX */
1703	return ES_OK;
1704}
1705
1706static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1707					struct es_em_ctxt *ctxt)
1708{
1709	enum es_result ret;
1710
1711	ghcb_set_rax(ghcb, ctxt->regs->ax);
1712	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1713
1714	if (x86_platform.hyper.sev_es_hcall_prepare)
1715		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1716
1717	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1718	if (ret != ES_OK)
1719		return ret;
1720
1721	if (!ghcb_rax_is_valid(ghcb))
1722		return ES_VMM_ERROR;
1723
1724	ctxt->regs->ax = ghcb->save.rax;
1725
1726	/*
1727	 * Call sev_es_hcall_finish() after regs->ax is already set.
1728	 * This allows the hypervisor handler to overwrite it again if
1729	 * necessary.
1730	 */
1731	if (x86_platform.hyper.sev_es_hcall_finish &&
1732	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1733		return ES_VMM_ERROR;
1734
1735	return ES_OK;
1736}
1737
1738static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1739					struct es_em_ctxt *ctxt)
1740{
1741	/*
1742	 * Calling ecx_alignment_check() directly does not work, because it
1743	 * enables IRQs and the GHCB is active. Forward the exception and call
1744	 * it later from vc_forward_exception().
1745	 */
1746	ctxt->fi.vector = X86_TRAP_AC;
1747	ctxt->fi.error_code = 0;
1748	return ES_EXCEPTION;
1749}
1750
1751static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1752					 struct ghcb *ghcb,
1753					 unsigned long exit_code)
1754{
1755	enum es_result result;
1756
1757	switch (exit_code) {
1758	case SVM_EXIT_READ_DR7:
1759		result = vc_handle_dr7_read(ghcb, ctxt);
1760		break;
1761	case SVM_EXIT_WRITE_DR7:
1762		result = vc_handle_dr7_write(ghcb, ctxt);
1763		break;
1764	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1765		result = vc_handle_trap_ac(ghcb, ctxt);
1766		break;
1767	case SVM_EXIT_RDTSC:
1768	case SVM_EXIT_RDTSCP:
1769		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1770		break;
1771	case SVM_EXIT_RDPMC:
1772		result = vc_handle_rdpmc(ghcb, ctxt);
1773		break;
1774	case SVM_EXIT_INVD:
1775		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1776		result = ES_UNSUPPORTED;
1777		break;
1778	case SVM_EXIT_CPUID:
1779		result = vc_handle_cpuid(ghcb, ctxt);
1780		break;
1781	case SVM_EXIT_IOIO:
1782		result = vc_handle_ioio(ghcb, ctxt);
1783		break;
1784	case SVM_EXIT_MSR:
1785		result = vc_handle_msr(ghcb, ctxt);
1786		break;
1787	case SVM_EXIT_VMMCALL:
1788		result = vc_handle_vmmcall(ghcb, ctxt);
1789		break;
1790	case SVM_EXIT_WBINVD:
1791		result = vc_handle_wbinvd(ghcb, ctxt);
1792		break;
1793	case SVM_EXIT_MONITOR:
1794		result = vc_handle_monitor(ghcb, ctxt);
1795		break;
1796	case SVM_EXIT_MWAIT:
1797		result = vc_handle_mwait(ghcb, ctxt);
1798		break;
1799	case SVM_EXIT_NPF:
1800		result = vc_handle_mmio(ghcb, ctxt);
1801		break;
1802	default:
1803		/*
1804		 * Unexpected #VC exception
1805		 */
1806		result = ES_UNSUPPORTED;
1807	}
1808
1809	return result;
1810}
1811
1812static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1813{
1814	long error_code = ctxt->fi.error_code;
1815	int trapnr = ctxt->fi.vector;
1816
1817	ctxt->regs->orig_ax = ctxt->fi.error_code;
1818
1819	switch (trapnr) {
1820	case X86_TRAP_GP:
1821		exc_general_protection(ctxt->regs, error_code);
1822		break;
1823	case X86_TRAP_UD:
1824		exc_invalid_op(ctxt->regs);
1825		break;
1826	case X86_TRAP_PF:
1827		write_cr2(ctxt->fi.cr2);
1828		exc_page_fault(ctxt->regs, error_code);
1829		break;
1830	case X86_TRAP_AC:
1831		exc_alignment_check(ctxt->regs, error_code);
1832		break;
1833	default:
1834		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1835		BUG();
1836	}
1837}
1838
1839static __always_inline bool is_vc2_stack(unsigned long sp)
1840{
1841	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1842}
1843
1844static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1845{
1846	unsigned long sp, prev_sp;
1847
1848	sp      = (unsigned long)regs;
1849	prev_sp = regs->sp;
1850
1851	/*
1852	 * If the code was already executing on the VC2 stack when the #VC
1853	 * happened, let it proceed to the normal handling routine. This way the
1854	 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1855	 */
1856	return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1857}
1858
1859static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1860{
1861	struct ghcb_state state;
1862	struct es_em_ctxt ctxt;
1863	enum es_result result;
1864	struct ghcb *ghcb;
1865	bool ret = true;
1866
1867	ghcb = __sev_get_ghcb(&state);
1868
1869	vc_ghcb_invalidate(ghcb);
1870	result = vc_init_em_ctxt(&ctxt, regs, error_code);
1871
1872	if (result == ES_OK)
1873		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1874
1875	__sev_put_ghcb(&state);
1876
1877	/* Done - now check the result */
1878	switch (result) {
1879	case ES_OK:
1880		vc_finish_insn(&ctxt);
1881		break;
1882	case ES_UNSUPPORTED:
1883		pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1884				   error_code, regs->ip);
1885		ret = false;
1886		break;
1887	case ES_VMM_ERROR:
1888		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1889				   error_code, regs->ip);
1890		ret = false;
1891		break;
1892	case ES_DECODE_FAILED:
1893		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1894				   error_code, regs->ip);
1895		ret = false;
1896		break;
1897	case ES_EXCEPTION:
1898		vc_forward_exception(&ctxt);
1899		break;
1900	case ES_RETRY:
1901		/* Nothing to do */
1902		break;
1903	default:
1904		pr_emerg("Unknown result in %s():%d\n", __func__, result);
1905		/*
1906		 * Emulating the instruction which caused the #VC exception
1907		 * failed - can't continue so print debug information
1908		 */
1909		BUG();
1910	}
1911
1912	return ret;
1913}
1914
1915static __always_inline bool vc_is_db(unsigned long error_code)
1916{
1917	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1918}
1919
1920/*
1921 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1922 * and will panic when an error happens.
1923 */
1924DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1925{
1926	irqentry_state_t irq_state;
1927
1928	/*
1929	 * With the current implementation it is always possible to switch to a
1930	 * safe stack because #VC exceptions only happen at known places, like
1931	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1932	 * also happen with code instrumentation when the hypervisor intercepts
1933	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1934	 * exceptions currently also only happen in safe places.
1935	 *
1936	 * But keep this here in case the noinstr annotations are violated due
1937	 * to bug elsewhere.
1938	 */
1939	if (unlikely(vc_from_invalid_context(regs))) {
1940		instrumentation_begin();
1941		panic("Can't handle #VC exception from unsupported context\n");
1942		instrumentation_end();
1943	}
1944
1945	/*
1946	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1947	 */
1948	if (vc_is_db(error_code)) {
1949		exc_debug(regs);
1950		return;
1951	}
1952
1953	irq_state = irqentry_nmi_enter(regs);
1954
1955	instrumentation_begin();
1956
1957	if (!vc_raw_handle_exception(regs, error_code)) {
1958		/* Show some debug info */
1959		show_regs(regs);
1960
1961		/* Ask hypervisor to sev_es_terminate */
1962		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1963
1964		/* If that fails and we get here - just panic */
1965		panic("Returned from Terminate-Request to Hypervisor\n");
1966	}
1967
1968	instrumentation_end();
1969	irqentry_nmi_exit(regs, irq_state);
1970}
1971
1972/*
1973 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1974 * and will kill the current task with SIGBUS when an error happens.
1975 */
1976DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1977{
1978	/*
1979	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1980	 */
1981	if (vc_is_db(error_code)) {
1982		noist_exc_debug(regs);
1983		return;
1984	}
1985
1986	irqentry_enter_from_user_mode(regs);
1987	instrumentation_begin();
1988
1989	if (!vc_raw_handle_exception(regs, error_code)) {
1990		/*
1991		 * Do not kill the machine if user-space triggered the
1992		 * exception. Send SIGBUS instead and let user-space deal with
1993		 * it.
1994		 */
1995		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
1996	}
1997
1998	instrumentation_end();
1999	irqentry_exit_to_user_mode(regs);
2000}
2001
2002bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2003{
2004	unsigned long exit_code = regs->orig_ax;
2005	struct es_em_ctxt ctxt;
2006	enum es_result result;
2007
2008	vc_ghcb_invalidate(boot_ghcb);
2009
2010	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2011	if (result == ES_OK)
2012		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2013
2014	/* Done - now check the result */
2015	switch (result) {
2016	case ES_OK:
2017		vc_finish_insn(&ctxt);
2018		break;
2019	case ES_UNSUPPORTED:
2020		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2021				exit_code, regs->ip);
2022		goto fail;
2023	case ES_VMM_ERROR:
2024		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2025				exit_code, regs->ip);
2026		goto fail;
2027	case ES_DECODE_FAILED:
2028		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2029				exit_code, regs->ip);
2030		goto fail;
2031	case ES_EXCEPTION:
2032		vc_early_forward_exception(&ctxt);
2033		break;
2034	case ES_RETRY:
2035		/* Nothing to do */
2036		break;
2037	default:
2038		BUG();
2039	}
2040
2041	return true;
2042
2043fail:
2044	show_regs(regs);
2045
2046	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2047}
2048
2049/*
2050 * Initial set up of SNP relies on information provided by the
2051 * Confidential Computing blob, which can be passed to the kernel
2052 * in the following ways, depending on how it is booted:
2053 *
2054 * - when booted via the boot/decompress kernel:
2055 *   - via boot_params
2056 *
2057 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2058 *   - via a setup_data entry, as defined by the Linux Boot Protocol
2059 *
2060 * Scan for the blob in that order.
2061 */
2062static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2063{
2064	struct cc_blob_sev_info *cc_info;
2065
2066	/* Boot kernel would have passed the CC blob via boot_params. */
2067	if (bp->cc_blob_address) {
2068		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2069		goto found_cc_info;
2070	}
2071
2072	/*
2073	 * If kernel was booted directly, without the use of the
2074	 * boot/decompression kernel, the CC blob may have been passed via
2075	 * setup_data instead.
2076	 */
2077	cc_info = find_cc_blob_setup_data(bp);
2078	if (!cc_info)
2079		return NULL;
2080
2081found_cc_info:
2082	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2083		snp_abort();
2084
2085	return cc_info;
2086}
2087
2088bool __init snp_init(struct boot_params *bp)
2089{
2090	struct cc_blob_sev_info *cc_info;
2091
2092	if (!bp)
2093		return false;
2094
2095	cc_info = find_cc_blob(bp);
2096	if (!cc_info)
2097		return false;
2098
2099	setup_cpuid_table(cc_info);
2100
2101	/*
2102	 * The CC blob will be used later to access the secrets page. Cache
2103	 * it here like the boot kernel does.
2104	 */
2105	bp->cc_blob_address = (u32)(unsigned long)cc_info;
2106
2107	return true;
2108}
2109
2110void __init __noreturn snp_abort(void)
2111{
2112	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2113}
2114
2115static void dump_cpuid_table(void)
2116{
2117	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2118	int i = 0;
2119
2120	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2121		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2122
2123	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2124		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2125
2126		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2127			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2128			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2129	}
2130}
2131
2132/*
2133 * It is useful from an auditing/testing perspective to provide an easy way
2134 * for the guest owner to know that the CPUID table has been initialized as
2135 * expected, but that initialization happens too early in boot to print any
2136 * sort of indicator, and there's not really any other good place to do it,
2137 * so do it here.
2138 */
2139static int __init report_cpuid_table(void)
2140{
2141	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2142
2143	if (!cpuid_table->count)
2144		return 0;
2145
2146	pr_info("Using SNP CPUID table, %d entries present.\n",
2147		cpuid_table->count);
2148
2149	if (sev_cfg.debug)
2150		dump_cpuid_table();
2151
2152	return 0;
2153}
2154arch_initcall(report_cpuid_table);
2155
2156static int __init init_sev_config(char *str)
2157{
2158	char *s;
2159
2160	while ((s = strsep(&str, ","))) {
2161		if (!strcmp(s, "debug")) {
2162			sev_cfg.debug = true;
2163			continue;
2164		}
2165
2166		pr_info("SEV command-line option '%s' was not recognized\n", s);
2167	}
2168
2169	return 1;
2170}
2171__setup("sev=", init_sev_config);
2172
2173int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
2174{
2175	struct ghcb_state state;
2176	struct es_em_ctxt ctxt;
2177	unsigned long flags;
2178	struct ghcb *ghcb;
2179	int ret;
2180
2181	rio->exitinfo2 = SEV_RET_NO_FW_CALL;
 
 
 
 
2182
2183	/*
2184	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2185	 * a per-CPU GHCB.
2186	 */
2187	local_irq_save(flags);
2188
2189	ghcb = __sev_get_ghcb(&state);
2190	if (!ghcb) {
2191		ret = -EIO;
2192		goto e_restore_irq;
2193	}
2194
2195	vc_ghcb_invalidate(ghcb);
2196
2197	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2198		ghcb_set_rax(ghcb, input->data_gpa);
2199		ghcb_set_rbx(ghcb, input->data_npages);
2200	}
2201
2202	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2203	if (ret)
2204		goto e_put;
2205
2206	rio->exitinfo2 = ghcb->save.sw_exit_info_2;
2207	switch (rio->exitinfo2) {
2208	case 0:
2209		break;
 
2210
2211	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
2212		ret = -EAGAIN;
2213		break;
2214
2215	case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
2216		/* Number of expected pages are returned in RBX */
2217		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2218			input->data_npages = ghcb_get_rbx(ghcb);
2219			ret = -ENOSPC;
2220			break;
2221		}
2222		fallthrough;
2223	default:
2224		ret = -EIO;
2225		break;
2226	}
2227
2228e_put:
2229	__sev_put_ghcb(&state);
2230e_restore_irq:
2231	local_irq_restore(flags);
2232
2233	return ret;
2234}
2235EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2236
2237static struct platform_device sev_guest_device = {
2238	.name		= "sev-guest",
2239	.id		= -1,
2240};
2241
2242static int __init snp_init_platform_device(void)
2243{
2244	struct sev_guest_platform_data data;
2245	u64 gpa;
2246
2247	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2248		return -ENODEV;
2249
2250	gpa = get_secrets_page();
2251	if (!gpa)
2252		return -ENODEV;
2253
2254	data.secrets_gpa = gpa;
2255	if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2256		return -ENODEV;
2257
2258	if (platform_device_register(&sev_guest_device))
2259		return -ENODEV;
2260
2261	pr_info("SNP guest platform device initialized.\n");
2262	return 0;
2263}
2264device_initcall(snp_init_platform_device);