Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Memory Encryption Support
   4 *
   5 * Copyright (C) 2019 SUSE
   6 *
   7 * Author: Joerg Roedel <jroedel@suse.de>
   8 */
   9
  10#define pr_fmt(fmt)	"SEV: " fmt
  11
  12#include <linux/sched/debug.h>	/* For show_regs() */
  13#include <linux/percpu-defs.h>
  14#include <linux/cc_platform.h>
  15#include <linux/printk.h>
  16#include <linux/mm_types.h>
  17#include <linux/set_memory.h>
  18#include <linux/memblock.h>
  19#include <linux/kernel.h>
  20#include <linux/mm.h>
  21#include <linux/cpumask.h>
  22#include <linux/efi.h>
  23#include <linux/platform_device.h>
  24#include <linux/io.h>
  25
  26#include <asm/cpu_entry_area.h>
  27#include <asm/stacktrace.h>
  28#include <asm/sev.h>
  29#include <asm/insn-eval.h>
  30#include <asm/fpu/xcr.h>
  31#include <asm/processor.h>
  32#include <asm/realmode.h>
  33#include <asm/setup.h>
  34#include <asm/traps.h>
  35#include <asm/svm.h>
  36#include <asm/smp.h>
  37#include <asm/cpu.h>
  38#include <asm/apic.h>
  39#include <asm/cpuid.h>
  40#include <asm/cmdline.h>
  41
  42#define DR7_RESET_VALUE        0x400
  43
  44/* AP INIT values as documented in the APM2  section "Processor Initialization State" */
  45#define AP_INIT_CS_LIMIT		0xffff
  46#define AP_INIT_DS_LIMIT		0xffff
  47#define AP_INIT_LDTR_LIMIT		0xffff
  48#define AP_INIT_GDTR_LIMIT		0xffff
  49#define AP_INIT_IDTR_LIMIT		0xffff
  50#define AP_INIT_TR_LIMIT		0xffff
  51#define AP_INIT_RFLAGS_DEFAULT		0x2
  52#define AP_INIT_DR6_DEFAULT		0xffff0ff0
  53#define AP_INIT_GPAT_DEFAULT		0x0007040600070406ULL
  54#define AP_INIT_XCR0_DEFAULT		0x1
  55#define AP_INIT_X87_FTW_DEFAULT		0x5555
  56#define AP_INIT_X87_FCW_DEFAULT		0x0040
  57#define AP_INIT_CR0_DEFAULT		0x60000010
  58#define AP_INIT_MXCSR_DEFAULT		0x1f80
  59
  60/* For early boot hypervisor communication in SEV-ES enabled guests */
  61static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
  62
  63/*
  64 * Needs to be in the .data section because we need it NULL before bss is
  65 * cleared
  66 */
  67static struct ghcb *boot_ghcb __section(".data");
  68
  69/* Bitmap of SEV features supported by the hypervisor */
  70static u64 sev_hv_features __ro_after_init;
  71
  72/* #VC handler runtime per-CPU data */
  73struct sev_es_runtime_data {
  74	struct ghcb ghcb_page;
  75
  76	/*
  77	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
  78	 * It is needed when an NMI happens while the #VC handler uses the real
  79	 * GHCB, and the NMI handler itself is causing another #VC exception. In
  80	 * that case the GHCB content of the first handler needs to be backed up
  81	 * and restored.
  82	 */
  83	struct ghcb backup_ghcb;
  84
  85	/*
  86	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
  87	 * There is no need for it to be atomic, because nothing is written to
  88	 * the GHCB between the read and the write of ghcb_active. So it is safe
  89	 * to use it when a nested #VC exception happens before the write.
  90	 *
  91	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
  92	 * happens while the first #VC handler uses the GHCB. When the NMI code
  93	 * raises a second #VC handler it might overwrite the contents of the
  94	 * GHCB written by the first handler. To avoid this the content of the
  95	 * GHCB is saved and restored when the GHCB is detected to be in use
  96	 * already.
  97	 */
  98	bool ghcb_active;
  99	bool backup_ghcb_active;
 100
 101	/*
 102	 * Cached DR7 value - write it on DR7 writes and return it on reads.
 103	 * That value will never make it to the real hardware DR7 as debugging
 104	 * is currently unsupported in SEV-ES guests.
 105	 */
 106	unsigned long dr7;
 107};
 108
 109struct ghcb_state {
 110	struct ghcb *ghcb;
 111};
 112
 113static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
 114DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
 115
 116static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
 117
 118struct sev_config {
 119	__u64 debug		: 1,
 120	      __reserved	: 63;
 121};
 122
 123static struct sev_config sev_cfg __read_mostly;
 124
 125static __always_inline bool on_vc_stack(struct pt_regs *regs)
 126{
 127	unsigned long sp = regs->sp;
 128
 129	/* User-mode RSP is not trusted */
 130	if (user_mode(regs))
 131		return false;
 132
 133	/* SYSCALL gap still has user-mode RSP */
 134	if (ip_within_syscall_gap(regs))
 135		return false;
 136
 137	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
 138}
 139
 140/*
 141 * This function handles the case when an NMI is raised in the #VC
 142 * exception handler entry code, before the #VC handler has switched off
 143 * its IST stack. In this case, the IST entry for #VC must be adjusted,
 144 * so that any nested #VC exception will not overwrite the stack
 145 * contents of the interrupted #VC handler.
 146 *
 147 * The IST entry is adjusted unconditionally so that it can be also be
 148 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
 149 * nested sev_es_ist_exit() call may adjust back the IST entry too
 150 * early.
 151 *
 152 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
 153 * on the NMI IST stack, as they are only called from NMI handling code
 154 * right now.
 155 */
 156void noinstr __sev_es_ist_enter(struct pt_regs *regs)
 157{
 158	unsigned long old_ist, new_ist;
 159
 160	/* Read old IST entry */
 161	new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 162
 163	/*
 164	 * If NMI happened while on the #VC IST stack, set the new IST
 165	 * value below regs->sp, so that the interrupted stack frame is
 166	 * not overwritten by subsequent #VC exceptions.
 167	 */
 168	if (on_vc_stack(regs))
 169		new_ist = regs->sp;
 170
 171	/*
 172	 * Reserve additional 8 bytes and store old IST value so this
 173	 * adjustment can be unrolled in __sev_es_ist_exit().
 174	 */
 175	new_ist -= sizeof(old_ist);
 176	*(unsigned long *)new_ist = old_ist;
 177
 178	/* Set new IST entry */
 179	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
 180}
 181
 182void noinstr __sev_es_ist_exit(void)
 183{
 184	unsigned long ist;
 185
 186	/* Read IST entry */
 187	ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
 188
 189	if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
 190		return;
 191
 192	/* Read back old IST entry and write it to the TSS */
 193	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
 194}
 195
 196/*
 197 * Nothing shall interrupt this code path while holding the per-CPU
 198 * GHCB. The backup GHCB is only for NMIs interrupting this path.
 199 *
 200 * Callers must disable local interrupts around it.
 201 */
 202static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
 203{
 204	struct sev_es_runtime_data *data;
 205	struct ghcb *ghcb;
 206
 207	WARN_ON(!irqs_disabled());
 208
 209	data = this_cpu_read(runtime_data);
 210	ghcb = &data->ghcb_page;
 211
 212	if (unlikely(data->ghcb_active)) {
 213		/* GHCB is already in use - save its contents */
 214
 215		if (unlikely(data->backup_ghcb_active)) {
 216			/*
 217			 * Backup-GHCB is also already in use. There is no way
 218			 * to continue here so just kill the machine. To make
 219			 * panic() work, mark GHCBs inactive so that messages
 220			 * can be printed out.
 221			 */
 222			data->ghcb_active        = false;
 223			data->backup_ghcb_active = false;
 224
 225			instrumentation_begin();
 226			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
 227			instrumentation_end();
 228		}
 229
 230		/* Mark backup_ghcb active before writing to it */
 231		data->backup_ghcb_active = true;
 232
 233		state->ghcb = &data->backup_ghcb;
 234
 235		/* Backup GHCB content */
 236		*state->ghcb = *ghcb;
 237	} else {
 238		state->ghcb = NULL;
 239		data->ghcb_active = true;
 240	}
 241
 242	return ghcb;
 243}
 244
 245static inline u64 sev_es_rd_ghcb_msr(void)
 246{
 247	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 248}
 249
 250static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 251{
 252	u32 low, high;
 253
 254	low  = (u32)(val);
 255	high = (u32)(val >> 32);
 256
 257	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
 258}
 259
 260static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
 261				unsigned char *buffer)
 262{
 263	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
 264}
 265
 266static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
 267{
 268	char buffer[MAX_INSN_SIZE];
 269	int insn_bytes;
 270
 271	insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
 272	if (insn_bytes == 0) {
 273		/* Nothing could be copied */
 274		ctxt->fi.vector     = X86_TRAP_PF;
 275		ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
 276		ctxt->fi.cr2        = ctxt->regs->ip;
 277		return ES_EXCEPTION;
 278	} else if (insn_bytes == -EINVAL) {
 279		/* Effective RIP could not be calculated */
 280		ctxt->fi.vector     = X86_TRAP_GP;
 281		ctxt->fi.error_code = 0;
 282		ctxt->fi.cr2        = 0;
 283		return ES_EXCEPTION;
 284	}
 285
 286	if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
 287		return ES_DECODE_FAILED;
 288
 289	if (ctxt->insn.immediate.got)
 290		return ES_OK;
 291	else
 292		return ES_DECODE_FAILED;
 293}
 294
 295static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
 296{
 297	char buffer[MAX_INSN_SIZE];
 298	int res, ret;
 299
 300	res = vc_fetch_insn_kernel(ctxt, buffer);
 301	if (res) {
 302		ctxt->fi.vector     = X86_TRAP_PF;
 303		ctxt->fi.error_code = X86_PF_INSTR;
 304		ctxt->fi.cr2        = ctxt->regs->ip;
 305		return ES_EXCEPTION;
 306	}
 307
 308	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
 309	if (ret < 0)
 310		return ES_DECODE_FAILED;
 311	else
 312		return ES_OK;
 313}
 314
 315static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
 316{
 317	if (user_mode(ctxt->regs))
 318		return __vc_decode_user_insn(ctxt);
 319	else
 320		return __vc_decode_kern_insn(ctxt);
 321}
 322
 323static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
 324				   char *dst, char *buf, size_t size)
 325{
 326	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
 327
 328	/*
 329	 * This function uses __put_user() independent of whether kernel or user
 330	 * memory is accessed. This works fine because __put_user() does no
 331	 * sanity checks of the pointer being accessed. All that it does is
 332	 * to report when the access failed.
 333	 *
 334	 * Also, this function runs in atomic context, so __put_user() is not
 335	 * allowed to sleep. The page-fault handler detects that it is running
 336	 * in atomic context and will not try to take mmap_sem and handle the
 337	 * fault, so additional pagefault_enable()/disable() calls are not
 338	 * needed.
 339	 *
 340	 * The access can't be done via copy_to_user() here because
 341	 * vc_write_mem() must not use string instructions to access unsafe
 342	 * memory. The reason is that MOVS is emulated by the #VC handler by
 343	 * splitting the move up into a read and a write and taking a nested #VC
 344	 * exception on whatever of them is the MMIO access. Using string
 345	 * instructions here would cause infinite nesting.
 346	 */
 347	switch (size) {
 348	case 1: {
 349		u8 d1;
 350		u8 __user *target = (u8 __user *)dst;
 351
 352		memcpy(&d1, buf, 1);
 353		if (__put_user(d1, target))
 354			goto fault;
 355		break;
 356	}
 357	case 2: {
 358		u16 d2;
 359		u16 __user *target = (u16 __user *)dst;
 360
 361		memcpy(&d2, buf, 2);
 362		if (__put_user(d2, target))
 363			goto fault;
 364		break;
 365	}
 366	case 4: {
 367		u32 d4;
 368		u32 __user *target = (u32 __user *)dst;
 369
 370		memcpy(&d4, buf, 4);
 371		if (__put_user(d4, target))
 372			goto fault;
 373		break;
 374	}
 375	case 8: {
 376		u64 d8;
 377		u64 __user *target = (u64 __user *)dst;
 378
 379		memcpy(&d8, buf, 8);
 380		if (__put_user(d8, target))
 381			goto fault;
 382		break;
 383	}
 384	default:
 385		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 386		return ES_UNSUPPORTED;
 387	}
 388
 389	return ES_OK;
 390
 391fault:
 392	if (user_mode(ctxt->regs))
 393		error_code |= X86_PF_USER;
 394
 395	ctxt->fi.vector = X86_TRAP_PF;
 396	ctxt->fi.error_code = error_code;
 397	ctxt->fi.cr2 = (unsigned long)dst;
 398
 399	return ES_EXCEPTION;
 400}
 401
 402static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
 403				  char *src, char *buf, size_t size)
 404{
 405	unsigned long error_code = X86_PF_PROT;
 406
 407	/*
 408	 * This function uses __get_user() independent of whether kernel or user
 409	 * memory is accessed. This works fine because __get_user() does no
 410	 * sanity checks of the pointer being accessed. All that it does is
 411	 * to report when the access failed.
 412	 *
 413	 * Also, this function runs in atomic context, so __get_user() is not
 414	 * allowed to sleep. The page-fault handler detects that it is running
 415	 * in atomic context and will not try to take mmap_sem and handle the
 416	 * fault, so additional pagefault_enable()/disable() calls are not
 417	 * needed.
 418	 *
 419	 * The access can't be done via copy_from_user() here because
 420	 * vc_read_mem() must not use string instructions to access unsafe
 421	 * memory. The reason is that MOVS is emulated by the #VC handler by
 422	 * splitting the move up into a read and a write and taking a nested #VC
 423	 * exception on whatever of them is the MMIO access. Using string
 424	 * instructions here would cause infinite nesting.
 425	 */
 426	switch (size) {
 427	case 1: {
 428		u8 d1;
 429		u8 __user *s = (u8 __user *)src;
 430
 431		if (__get_user(d1, s))
 432			goto fault;
 433		memcpy(buf, &d1, 1);
 434		break;
 435	}
 436	case 2: {
 437		u16 d2;
 438		u16 __user *s = (u16 __user *)src;
 439
 440		if (__get_user(d2, s))
 441			goto fault;
 442		memcpy(buf, &d2, 2);
 443		break;
 444	}
 445	case 4: {
 446		u32 d4;
 447		u32 __user *s = (u32 __user *)src;
 448
 449		if (__get_user(d4, s))
 450			goto fault;
 451		memcpy(buf, &d4, 4);
 452		break;
 453	}
 454	case 8: {
 455		u64 d8;
 456		u64 __user *s = (u64 __user *)src;
 457		if (__get_user(d8, s))
 458			goto fault;
 459		memcpy(buf, &d8, 8);
 460		break;
 461	}
 462	default:
 463		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
 464		return ES_UNSUPPORTED;
 465	}
 466
 467	return ES_OK;
 468
 469fault:
 470	if (user_mode(ctxt->regs))
 471		error_code |= X86_PF_USER;
 472
 473	ctxt->fi.vector = X86_TRAP_PF;
 474	ctxt->fi.error_code = error_code;
 475	ctxt->fi.cr2 = (unsigned long)src;
 476
 477	return ES_EXCEPTION;
 478}
 479
 480static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
 481					   unsigned long vaddr, phys_addr_t *paddr)
 482{
 483	unsigned long va = (unsigned long)vaddr;
 484	unsigned int level;
 485	phys_addr_t pa;
 486	pgd_t *pgd;
 487	pte_t *pte;
 488
 489	pgd = __va(read_cr3_pa());
 490	pgd = &pgd[pgd_index(va)];
 491	pte = lookup_address_in_pgd(pgd, va, &level);
 492	if (!pte) {
 493		ctxt->fi.vector     = X86_TRAP_PF;
 494		ctxt->fi.cr2        = vaddr;
 495		ctxt->fi.error_code = 0;
 496
 497		if (user_mode(ctxt->regs))
 498			ctxt->fi.error_code |= X86_PF_USER;
 499
 500		return ES_EXCEPTION;
 501	}
 502
 503	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
 504		/* Emulated MMIO to/from encrypted memory not supported */
 505		return ES_UNSUPPORTED;
 506
 507	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
 508	pa |= va & ~page_level_mask(level);
 509
 510	*paddr = pa;
 511
 512	return ES_OK;
 513}
 514
 515/* Include code shared with pre-decompression boot stage */
 516#include "sev-shared.c"
 517
 518static noinstr void __sev_put_ghcb(struct ghcb_state *state)
 519{
 520	struct sev_es_runtime_data *data;
 521	struct ghcb *ghcb;
 522
 523	WARN_ON(!irqs_disabled());
 524
 525	data = this_cpu_read(runtime_data);
 526	ghcb = &data->ghcb_page;
 527
 528	if (state->ghcb) {
 529		/* Restore GHCB from Backup */
 530		*ghcb = *state->ghcb;
 531		data->backup_ghcb_active = false;
 532		state->ghcb = NULL;
 533	} else {
 534		/*
 535		 * Invalidate the GHCB so a VMGEXIT instruction issued
 536		 * from userspace won't appear to be valid.
 537		 */
 538		vc_ghcb_invalidate(ghcb);
 539		data->ghcb_active = false;
 540	}
 541}
 542
 543void noinstr __sev_es_nmi_complete(void)
 544{
 545	struct ghcb_state state;
 546	struct ghcb *ghcb;
 547
 548	ghcb = __sev_get_ghcb(&state);
 549
 550	vc_ghcb_invalidate(ghcb);
 551	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
 552	ghcb_set_sw_exit_info_1(ghcb, 0);
 553	ghcb_set_sw_exit_info_2(ghcb, 0);
 554
 555	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
 556	VMGEXIT();
 557
 558	__sev_put_ghcb(&state);
 559}
 560
 561static u64 __init get_secrets_page(void)
 562{
 563	u64 pa_data = boot_params.cc_blob_address;
 564	struct cc_blob_sev_info info;
 565	void *map;
 566
 567	/*
 568	 * The CC blob contains the address of the secrets page, check if the
 569	 * blob is present.
 570	 */
 571	if (!pa_data)
 572		return 0;
 573
 574	map = early_memremap(pa_data, sizeof(info));
 575	if (!map) {
 576		pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
 577		return 0;
 578	}
 579	memcpy(&info, map, sizeof(info));
 580	early_memunmap(map, sizeof(info));
 581
 582	/* smoke-test the secrets page passed */
 583	if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
 584		return 0;
 585
 586	return info.secrets_phys;
 587}
 588
 589static u64 __init get_snp_jump_table_addr(void)
 590{
 591	struct snp_secrets_page_layout *layout;
 592	void __iomem *mem;
 593	u64 pa, addr;
 594
 595	pa = get_secrets_page();
 596	if (!pa)
 597		return 0;
 598
 599	mem = ioremap_encrypted(pa, PAGE_SIZE);
 600	if (!mem) {
 601		pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
 602		return 0;
 603	}
 604
 605	layout = (__force struct snp_secrets_page_layout *)mem;
 606
 607	addr = layout->os_area.ap_jump_table_pa;
 608	iounmap(mem);
 609
 610	return addr;
 611}
 612
 613static u64 __init get_jump_table_addr(void)
 614{
 615	struct ghcb_state state;
 616	unsigned long flags;
 617	struct ghcb *ghcb;
 618	u64 ret = 0;
 619
 620	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 621		return get_snp_jump_table_addr();
 622
 623	local_irq_save(flags);
 624
 625	ghcb = __sev_get_ghcb(&state);
 626
 627	vc_ghcb_invalidate(ghcb);
 628	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
 629	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
 630	ghcb_set_sw_exit_info_2(ghcb, 0);
 631
 632	sev_es_wr_ghcb_msr(__pa(ghcb));
 633	VMGEXIT();
 634
 635	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
 636	    ghcb_sw_exit_info_2_is_valid(ghcb))
 637		ret = ghcb->save.sw_exit_info_2;
 638
 639	__sev_put_ghcb(&state);
 640
 641	local_irq_restore(flags);
 642
 643	return ret;
 644}
 645
 646static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
 647{
 648	unsigned long vaddr_end;
 649	int rc;
 650
 651	vaddr = vaddr & PAGE_MASK;
 652	vaddr_end = vaddr + (npages << PAGE_SHIFT);
 653
 654	while (vaddr < vaddr_end) {
 655		rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
 656		if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
 657			sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
 658
 659		vaddr = vaddr + PAGE_SIZE;
 660	}
 661}
 662
 663static void __init early_set_pages_state(unsigned long paddr, unsigned int npages, enum psc_op op)
 664{
 665	unsigned long paddr_end;
 666	u64 val;
 667
 668	paddr = paddr & PAGE_MASK;
 669	paddr_end = paddr + (npages << PAGE_SHIFT);
 670
 671	while (paddr < paddr_end) {
 672		/*
 673		 * Use the MSR protocol because this function can be called before
 674		 * the GHCB is established.
 675		 */
 676		sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
 677		VMGEXIT();
 678
 679		val = sev_es_rd_ghcb_msr();
 680
 681		if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
 682			 "Wrong PSC response code: 0x%x\n",
 683			 (unsigned int)GHCB_RESP_CODE(val)))
 684			goto e_term;
 685
 686		if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
 687			 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
 688			 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
 689			 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
 690			goto e_term;
 691
 692		paddr = paddr + PAGE_SIZE;
 693	}
 694
 695	return;
 696
 697e_term:
 698	sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 699}
 700
 701void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
 702					 unsigned int npages)
 703{
 704	/*
 705	 * This can be invoked in early boot while running identity mapped, so
 706	 * use an open coded check for SNP instead of using cc_platform_has().
 707	 * This eliminates worries about jump tables or checking boot_cpu_data
 708	 * in the cc_platform_has() function.
 709	 */
 710	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 711		return;
 712
 713	 /*
 714	  * Ask the hypervisor to mark the memory pages as private in the RMP
 715	  * table.
 716	  */
 717	early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
 718
 719	/* Validate the memory pages after they've been added in the RMP table. */
 720	pvalidate_pages(vaddr, npages, true);
 721}
 722
 723void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
 724					unsigned int npages)
 725{
 726	/*
 727	 * This can be invoked in early boot while running identity mapped, so
 728	 * use an open coded check for SNP instead of using cc_platform_has().
 729	 * This eliminates worries about jump tables or checking boot_cpu_data
 730	 * in the cc_platform_has() function.
 731	 */
 732	if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
 733		return;
 734
 735	/* Invalidate the memory pages before they are marked shared in the RMP table. */
 736	pvalidate_pages(vaddr, npages, false);
 737
 738	 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
 739	early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
 740}
 741
 742void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
 743{
 744	unsigned long vaddr, npages;
 745
 746	vaddr = (unsigned long)__va(paddr);
 747	npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
 748
 749	if (op == SNP_PAGE_STATE_PRIVATE)
 750		early_snp_set_memory_private(vaddr, paddr, npages);
 751	else if (op == SNP_PAGE_STATE_SHARED)
 752		early_snp_set_memory_shared(vaddr, paddr, npages);
 753	else
 754		WARN(1, "invalid memory op %d\n", op);
 755}
 756
 757static int vmgexit_psc(struct snp_psc_desc *desc)
 758{
 759	int cur_entry, end_entry, ret = 0;
 760	struct snp_psc_desc *data;
 761	struct ghcb_state state;
 762	struct es_em_ctxt ctxt;
 763	unsigned long flags;
 764	struct ghcb *ghcb;
 765
 766	/*
 767	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
 768	 * a per-CPU GHCB.
 769	 */
 770	local_irq_save(flags);
 771
 772	ghcb = __sev_get_ghcb(&state);
 773	if (!ghcb) {
 774		ret = 1;
 775		goto out_unlock;
 776	}
 777
 778	/* Copy the input desc into GHCB shared buffer */
 779	data = (struct snp_psc_desc *)ghcb->shared_buffer;
 780	memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
 781
 782	/*
 783	 * As per the GHCB specification, the hypervisor can resume the guest
 784	 * before processing all the entries. Check whether all the entries
 785	 * are processed. If not, then keep retrying. Note, the hypervisor
 786	 * will update the data memory directly to indicate the status, so
 787	 * reference the data->hdr everywhere.
 788	 *
 789	 * The strategy here is to wait for the hypervisor to change the page
 790	 * state in the RMP table before guest accesses the memory pages. If the
 791	 * page state change was not successful, then later memory access will
 792	 * result in a crash.
 793	 */
 794	cur_entry = data->hdr.cur_entry;
 795	end_entry = data->hdr.end_entry;
 796
 797	while (data->hdr.cur_entry <= data->hdr.end_entry) {
 798		ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
 799
 800		/* This will advance the shared buffer data points to. */
 801		ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
 802
 803		/*
 804		 * Page State Change VMGEXIT can pass error code through
 805		 * exit_info_2.
 806		 */
 807		if (WARN(ret || ghcb->save.sw_exit_info_2,
 808			 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
 809			 ret, ghcb->save.sw_exit_info_2)) {
 810			ret = 1;
 811			goto out;
 812		}
 813
 814		/* Verify that reserved bit is not set */
 815		if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
 816			ret = 1;
 817			goto out;
 818		}
 819
 820		/*
 821		 * Sanity check that entry processing is not going backwards.
 822		 * This will happen only if hypervisor is tricking us.
 823		 */
 824		if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
 825"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
 826			 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
 827			ret = 1;
 828			goto out;
 829		}
 830	}
 831
 832out:
 833	__sev_put_ghcb(&state);
 834
 835out_unlock:
 836	local_irq_restore(flags);
 837
 838	return ret;
 839}
 840
 841static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
 842			      unsigned long vaddr_end, int op)
 843{
 844	struct psc_hdr *hdr;
 845	struct psc_entry *e;
 846	unsigned long pfn;
 847	int i;
 848
 849	hdr = &data->hdr;
 850	e = data->entries;
 851
 852	memset(data, 0, sizeof(*data));
 853	i = 0;
 854
 855	while (vaddr < vaddr_end) {
 856		if (is_vmalloc_addr((void *)vaddr))
 857			pfn = vmalloc_to_pfn((void *)vaddr);
 858		else
 859			pfn = __pa(vaddr) >> PAGE_SHIFT;
 860
 861		e->gfn = pfn;
 862		e->operation = op;
 863		hdr->end_entry = i;
 864
 865		/*
 866		 * Current SNP implementation doesn't keep track of the RMP page
 867		 * size so use 4K for simplicity.
 868		 */
 869		e->pagesize = RMP_PG_SIZE_4K;
 870
 871		vaddr = vaddr + PAGE_SIZE;
 872		e++;
 873		i++;
 874	}
 875
 876	if (vmgexit_psc(data))
 877		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
 878}
 879
 880static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
 881{
 882	unsigned long vaddr_end, next_vaddr;
 883	struct snp_psc_desc *desc;
 884
 885	desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
 886	if (!desc)
 887		panic("SNP: failed to allocate memory for PSC descriptor\n");
 888
 889	vaddr = vaddr & PAGE_MASK;
 890	vaddr_end = vaddr + (npages << PAGE_SHIFT);
 891
 892	while (vaddr < vaddr_end) {
 893		/* Calculate the last vaddr that fits in one struct snp_psc_desc. */
 894		next_vaddr = min_t(unsigned long, vaddr_end,
 895				   (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
 896
 897		__set_pages_state(desc, vaddr, next_vaddr, op);
 898
 899		vaddr = next_vaddr;
 900	}
 901
 902	kfree(desc);
 903}
 904
 905void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
 906{
 907	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 908		return;
 909
 910	pvalidate_pages(vaddr, npages, false);
 911
 912	set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
 913}
 914
 915void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
 916{
 917	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
 918		return;
 919
 920	set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
 921
 922	pvalidate_pages(vaddr, npages, true);
 923}
 924
 925static int snp_set_vmsa(void *va, bool vmsa)
 926{
 927	u64 attrs;
 928
 929	/*
 930	 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
 931	 * using the RMPADJUST instruction. However, for the instruction to
 932	 * succeed it must target the permissions of a lesser privileged
 933	 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
 934	 * instruction in the AMD64 APM Volume 3).
 935	 */
 936	attrs = 1;
 937	if (vmsa)
 938		attrs |= RMPADJUST_VMSA_PAGE_BIT;
 939
 940	return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
 941}
 942
 943#define __ATTR_BASE		(SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
 944#define INIT_CS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
 945#define INIT_DS_ATTRIBS		(__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
 946
 947#define INIT_LDTR_ATTRIBS	(SVM_SELECTOR_P_MASK | 2)
 948#define INIT_TR_ATTRIBS		(SVM_SELECTOR_P_MASK | 3)
 949
 950static void *snp_alloc_vmsa_page(void)
 951{
 952	struct page *p;
 953
 954	/*
 955	 * Allocate VMSA page to work around the SNP erratum where the CPU will
 956	 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
 957	 * collides with the RMP entry of VMSA page. The recommended workaround
 958	 * is to not use a large page.
 959	 *
 960	 * Allocate an 8k page which is also 8k-aligned.
 961	 */
 962	p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
 963	if (!p)
 964		return NULL;
 965
 966	split_page(p, 1);
 967
 968	/* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
 969	__free_page(p);
 970
 971	return page_address(p + 1);
 972}
 973
 974static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
 975{
 976	int err;
 977
 978	err = snp_set_vmsa(vmsa, false);
 979	if (err)
 980		pr_err("clear VMSA page failed (%u), leaking page\n", err);
 981	else
 982		free_page((unsigned long)vmsa);
 983}
 984
 985static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
 986{
 987	struct sev_es_save_area *cur_vmsa, *vmsa;
 988	struct ghcb_state state;
 989	unsigned long flags;
 990	struct ghcb *ghcb;
 991	u8 sipi_vector;
 992	int cpu, ret;
 993	u64 cr4;
 994
 995	/*
 996	 * The hypervisor SNP feature support check has happened earlier, just check
 997	 * the AP_CREATION one here.
 998	 */
 999	if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
1000		return -EOPNOTSUPP;
1001
1002	/*
1003	 * Verify the desired start IP against the known trampoline start IP
1004	 * to catch any future new trampolines that may be introduced that
1005	 * would require a new protected guest entry point.
1006	 */
1007	if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
1008		      "Unsupported SNP start_ip: %lx\n", start_ip))
1009		return -EINVAL;
1010
1011	/* Override start_ip with known protected guest start IP */
1012	start_ip = real_mode_header->sev_es_trampoline_start;
1013
1014	/* Find the logical CPU for the APIC ID */
1015	for_each_present_cpu(cpu) {
1016		if (arch_match_cpu_phys_id(cpu, apic_id))
1017			break;
1018	}
1019	if (cpu >= nr_cpu_ids)
1020		return -EINVAL;
1021
1022	cur_vmsa = per_cpu(sev_vmsa, cpu);
1023
1024	/*
1025	 * A new VMSA is created each time because there is no guarantee that
1026	 * the current VMSA is the kernels or that the vCPU is not running. If
1027	 * an attempt was done to use the current VMSA with a running vCPU, a
1028	 * #VMEXIT of that vCPU would wipe out all of the settings being done
1029	 * here.
1030	 */
1031	vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1032	if (!vmsa)
1033		return -ENOMEM;
1034
1035	/* CR4 should maintain the MCE value */
1036	cr4 = native_read_cr4() & X86_CR4_MCE;
1037
1038	/* Set the CS value based on the start_ip converted to a SIPI vector */
1039	sipi_vector		= (start_ip >> 12);
1040	vmsa->cs.base		= sipi_vector << 12;
1041	vmsa->cs.limit		= AP_INIT_CS_LIMIT;
1042	vmsa->cs.attrib		= INIT_CS_ATTRIBS;
1043	vmsa->cs.selector	= sipi_vector << 8;
1044
1045	/* Set the RIP value based on start_ip */
1046	vmsa->rip		= start_ip & 0xfff;
1047
1048	/* Set AP INIT defaults as documented in the APM */
1049	vmsa->ds.limit		= AP_INIT_DS_LIMIT;
1050	vmsa->ds.attrib		= INIT_DS_ATTRIBS;
1051	vmsa->es		= vmsa->ds;
1052	vmsa->fs		= vmsa->ds;
1053	vmsa->gs		= vmsa->ds;
1054	vmsa->ss		= vmsa->ds;
1055
1056	vmsa->gdtr.limit	= AP_INIT_GDTR_LIMIT;
1057	vmsa->ldtr.limit	= AP_INIT_LDTR_LIMIT;
1058	vmsa->ldtr.attrib	= INIT_LDTR_ATTRIBS;
1059	vmsa->idtr.limit	= AP_INIT_IDTR_LIMIT;
1060	vmsa->tr.limit		= AP_INIT_TR_LIMIT;
1061	vmsa->tr.attrib		= INIT_TR_ATTRIBS;
1062
1063	vmsa->cr4		= cr4;
1064	vmsa->cr0		= AP_INIT_CR0_DEFAULT;
1065	vmsa->dr7		= DR7_RESET_VALUE;
1066	vmsa->dr6		= AP_INIT_DR6_DEFAULT;
1067	vmsa->rflags		= AP_INIT_RFLAGS_DEFAULT;
1068	vmsa->g_pat		= AP_INIT_GPAT_DEFAULT;
1069	vmsa->xcr0		= AP_INIT_XCR0_DEFAULT;
1070	vmsa->mxcsr		= AP_INIT_MXCSR_DEFAULT;
1071	vmsa->x87_ftw		= AP_INIT_X87_FTW_DEFAULT;
1072	vmsa->x87_fcw		= AP_INIT_X87_FCW_DEFAULT;
1073
1074	/* SVME must be set. */
1075	vmsa->efer		= EFER_SVME;
1076
1077	/*
1078	 * Set the SNP-specific fields for this VMSA:
1079	 *   VMPL level
1080	 *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1081	 */
1082	vmsa->vmpl		= 0;
1083	vmsa->sev_features	= sev_status >> 2;
1084
1085	/* Switch the page over to a VMSA page now that it is initialized */
1086	ret = snp_set_vmsa(vmsa, true);
1087	if (ret) {
1088		pr_err("set VMSA page failed (%u)\n", ret);
1089		free_page((unsigned long)vmsa);
1090
1091		return -EINVAL;
1092	}
1093
1094	/* Issue VMGEXIT AP Creation NAE event */
1095	local_irq_save(flags);
1096
1097	ghcb = __sev_get_ghcb(&state);
1098
1099	vc_ghcb_invalidate(ghcb);
1100	ghcb_set_rax(ghcb, vmsa->sev_features);
1101	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1102	ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1103	ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1104
1105	sev_es_wr_ghcb_msr(__pa(ghcb));
1106	VMGEXIT();
1107
1108	if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1109	    lower_32_bits(ghcb->save.sw_exit_info_1)) {
1110		pr_err("SNP AP Creation error\n");
1111		ret = -EINVAL;
1112	}
1113
1114	__sev_put_ghcb(&state);
1115
1116	local_irq_restore(flags);
1117
1118	/* Perform cleanup if there was an error */
1119	if (ret) {
1120		snp_cleanup_vmsa(vmsa);
1121		vmsa = NULL;
1122	}
1123
1124	/* Free up any previous VMSA page */
1125	if (cur_vmsa)
1126		snp_cleanup_vmsa(cur_vmsa);
1127
1128	/* Record the current VMSA page */
1129	per_cpu(sev_vmsa, cpu) = vmsa;
1130
1131	return ret;
1132}
1133
1134void snp_set_wakeup_secondary_cpu(void)
1135{
1136	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1137		return;
1138
1139	/*
1140	 * Always set this override if SNP is enabled. This makes it the
1141	 * required method to start APs under SNP. If the hypervisor does
1142	 * not support AP creation, then no APs will be started.
1143	 */
1144	apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
1145}
1146
1147int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1148{
1149	u16 startup_cs, startup_ip;
1150	phys_addr_t jump_table_pa;
1151	u64 jump_table_addr;
1152	u16 __iomem *jump_table;
1153
1154	jump_table_addr = get_jump_table_addr();
1155
1156	/* On UP guests there is no jump table so this is not a failure */
1157	if (!jump_table_addr)
1158		return 0;
1159
1160	/* Check if AP Jump Table is page-aligned */
1161	if (jump_table_addr & ~PAGE_MASK)
1162		return -EINVAL;
1163
1164	jump_table_pa = jump_table_addr & PAGE_MASK;
1165
1166	startup_cs = (u16)(rmh->trampoline_start >> 4);
1167	startup_ip = (u16)(rmh->sev_es_trampoline_start -
1168			   rmh->trampoline_start);
1169
1170	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1171	if (!jump_table)
1172		return -EIO;
1173
1174	writew(startup_ip, &jump_table[0]);
1175	writew(startup_cs, &jump_table[1]);
1176
1177	iounmap(jump_table);
1178
1179	return 0;
1180}
1181
1182/*
1183 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1184 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1185 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1186 */
1187int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1188{
1189	struct sev_es_runtime_data *data;
1190	unsigned long address, pflags;
1191	int cpu;
1192	u64 pfn;
1193
1194	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1195		return 0;
1196
1197	pflags = _PAGE_NX | _PAGE_RW;
1198
1199	for_each_possible_cpu(cpu) {
1200		data = per_cpu(runtime_data, cpu);
1201
1202		address = __pa(&data->ghcb_page);
1203		pfn = address >> PAGE_SHIFT;
1204
1205		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1206			return 1;
1207	}
1208
1209	return 0;
1210}
1211
1212static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1213{
1214	struct pt_regs *regs = ctxt->regs;
1215	enum es_result ret;
1216	u64 exit_info_1;
1217
1218	/* Is it a WRMSR? */
1219	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1220
1221	ghcb_set_rcx(ghcb, regs->cx);
1222	if (exit_info_1) {
1223		ghcb_set_rax(ghcb, regs->ax);
1224		ghcb_set_rdx(ghcb, regs->dx);
1225	}
1226
1227	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1228
1229	if ((ret == ES_OK) && (!exit_info_1)) {
1230		regs->ax = ghcb->save.rax;
1231		regs->dx = ghcb->save.rdx;
1232	}
1233
1234	return ret;
1235}
1236
1237static void snp_register_per_cpu_ghcb(void)
1238{
1239	struct sev_es_runtime_data *data;
1240	struct ghcb *ghcb;
1241
1242	data = this_cpu_read(runtime_data);
1243	ghcb = &data->ghcb_page;
1244
1245	snp_register_ghcb_early(__pa(ghcb));
1246}
1247
1248void setup_ghcb(void)
1249{
1250	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1251		return;
1252
1253	/* First make sure the hypervisor talks a supported protocol. */
1254	if (!sev_es_negotiate_protocol())
1255		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1256
1257	/*
1258	 * Check whether the runtime #VC exception handler is active. It uses
1259	 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1260	 *
1261	 * If SNP is active, register the per-CPU GHCB page so that the runtime
1262	 * exception handler can use it.
1263	 */
1264	if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1265		if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1266			snp_register_per_cpu_ghcb();
1267
1268		return;
1269	}
1270
1271	/*
1272	 * Clear the boot_ghcb. The first exception comes in before the bss
1273	 * section is cleared.
1274	 */
1275	memset(&boot_ghcb_page, 0, PAGE_SIZE);
1276
1277	/* Alright - Make the boot-ghcb public */
1278	boot_ghcb = &boot_ghcb_page;
1279
1280	/* SNP guest requires that GHCB GPA must be registered. */
1281	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1282		snp_register_ghcb_early(__pa(&boot_ghcb_page));
1283}
1284
1285#ifdef CONFIG_HOTPLUG_CPU
1286static void sev_es_ap_hlt_loop(void)
1287{
1288	struct ghcb_state state;
1289	struct ghcb *ghcb;
1290
1291	ghcb = __sev_get_ghcb(&state);
1292
1293	while (true) {
1294		vc_ghcb_invalidate(ghcb);
1295		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1296		ghcb_set_sw_exit_info_1(ghcb, 0);
1297		ghcb_set_sw_exit_info_2(ghcb, 0);
1298
1299		sev_es_wr_ghcb_msr(__pa(ghcb));
1300		VMGEXIT();
1301
1302		/* Wakeup signal? */
1303		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1304		    ghcb->save.sw_exit_info_2)
1305			break;
1306	}
1307
1308	__sev_put_ghcb(&state);
1309}
1310
1311/*
1312 * Play_dead handler when running under SEV-ES. This is needed because
1313 * the hypervisor can't deliver an SIPI request to restart the AP.
1314 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1315 * hypervisor wakes it up again.
1316 */
1317static void sev_es_play_dead(void)
1318{
1319	play_dead_common();
1320
1321	/* IRQs now disabled */
1322
1323	sev_es_ap_hlt_loop();
1324
1325	/*
1326	 * If we get here, the VCPU was woken up again. Jump to CPU
1327	 * startup code to get it back online.
1328	 */
1329	start_cpu0();
1330}
1331#else  /* CONFIG_HOTPLUG_CPU */
1332#define sev_es_play_dead	native_play_dead
1333#endif /* CONFIG_HOTPLUG_CPU */
1334
1335#ifdef CONFIG_SMP
1336static void __init sev_es_setup_play_dead(void)
1337{
1338	smp_ops.play_dead = sev_es_play_dead;
1339}
1340#else
1341static inline void sev_es_setup_play_dead(void) { }
1342#endif
1343
1344static void __init alloc_runtime_data(int cpu)
1345{
1346	struct sev_es_runtime_data *data;
1347
1348	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1349	if (!data)
1350		panic("Can't allocate SEV-ES runtime data");
1351
1352	per_cpu(runtime_data, cpu) = data;
1353}
1354
1355static void __init init_ghcb(int cpu)
1356{
1357	struct sev_es_runtime_data *data;
1358	int err;
1359
1360	data = per_cpu(runtime_data, cpu);
1361
1362	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1363					 sizeof(data->ghcb_page));
1364	if (err)
1365		panic("Can't map GHCBs unencrypted");
1366
1367	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1368
1369	data->ghcb_active = false;
1370	data->backup_ghcb_active = false;
1371}
1372
1373void __init sev_es_init_vc_handling(void)
1374{
1375	int cpu;
1376
1377	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1378
1379	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1380		return;
1381
1382	if (!sev_es_check_cpu_features())
1383		panic("SEV-ES CPU Features missing");
1384
1385	/*
1386	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1387	 * features.
1388	 */
1389	if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1390		sev_hv_features = get_hv_features();
1391
1392		if (!(sev_hv_features & GHCB_HV_FT_SNP))
1393			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1394	}
1395
1396	/* Enable SEV-ES special handling */
1397	static_branch_enable(&sev_es_enable_key);
1398
1399	/* Initialize per-cpu GHCB pages */
1400	for_each_possible_cpu(cpu) {
1401		alloc_runtime_data(cpu);
1402		init_ghcb(cpu);
1403	}
1404
1405	sev_es_setup_play_dead();
1406
1407	/* Secondary CPUs use the runtime #VC handler */
1408	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1409}
1410
1411static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1412{
1413	int trapnr = ctxt->fi.vector;
1414
1415	if (trapnr == X86_TRAP_PF)
1416		native_write_cr2(ctxt->fi.cr2);
1417
1418	ctxt->regs->orig_ax = ctxt->fi.error_code;
1419	do_early_exception(ctxt->regs, trapnr);
1420}
1421
1422static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1423{
1424	long *reg_array;
1425	int offset;
1426
1427	reg_array = (long *)ctxt->regs;
1428	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1429
1430	if (offset < 0)
1431		return NULL;
1432
1433	offset /= sizeof(long);
1434
1435	return reg_array + offset;
1436}
1437static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1438				 unsigned int bytes, bool read)
1439{
1440	u64 exit_code, exit_info_1, exit_info_2;
1441	unsigned long ghcb_pa = __pa(ghcb);
1442	enum es_result res;
1443	phys_addr_t paddr;
1444	void __user *ref;
1445
1446	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1447	if (ref == (void __user *)-1L)
1448		return ES_UNSUPPORTED;
1449
1450	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1451
1452	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1453	if (res != ES_OK) {
1454		if (res == ES_EXCEPTION && !read)
1455			ctxt->fi.error_code |= X86_PF_WRITE;
1456
1457		return res;
1458	}
1459
1460	exit_info_1 = paddr;
1461	/* Can never be greater than 8 */
1462	exit_info_2 = bytes;
1463
1464	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1465
1466	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1467}
1468
1469/*
1470 * The MOVS instruction has two memory operands, which raises the
1471 * problem that it is not known whether the access to the source or the
1472 * destination caused the #VC exception (and hence whether an MMIO read
1473 * or write operation needs to be emulated).
1474 *
1475 * Instead of playing games with walking page-tables and trying to guess
1476 * whether the source or destination is an MMIO range, split the move
1477 * into two operations, a read and a write with only one memory operand.
1478 * This will cause a nested #VC exception on the MMIO address which can
1479 * then be handled.
1480 *
1481 * This implementation has the benefit that it also supports MOVS where
1482 * source _and_ destination are MMIO regions.
1483 *
1484 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1485 * rare operation. If it turns out to be a performance problem the split
1486 * operations can be moved to memcpy_fromio() and memcpy_toio().
1487 */
1488static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1489					  unsigned int bytes)
1490{
1491	unsigned long ds_base, es_base;
1492	unsigned char *src, *dst;
1493	unsigned char buffer[8];
1494	enum es_result ret;
1495	bool rep;
1496	int off;
1497
1498	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1499	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1500
1501	if (ds_base == -1L || es_base == -1L) {
1502		ctxt->fi.vector = X86_TRAP_GP;
1503		ctxt->fi.error_code = 0;
1504		return ES_EXCEPTION;
1505	}
1506
1507	src = ds_base + (unsigned char *)ctxt->regs->si;
1508	dst = es_base + (unsigned char *)ctxt->regs->di;
1509
1510	ret = vc_read_mem(ctxt, src, buffer, bytes);
1511	if (ret != ES_OK)
1512		return ret;
1513
1514	ret = vc_write_mem(ctxt, dst, buffer, bytes);
1515	if (ret != ES_OK)
1516		return ret;
1517
1518	if (ctxt->regs->flags & X86_EFLAGS_DF)
1519		off = -bytes;
1520	else
1521		off =  bytes;
1522
1523	ctxt->regs->si += off;
1524	ctxt->regs->di += off;
1525
1526	rep = insn_has_rep_prefix(&ctxt->insn);
1527	if (rep)
1528		ctxt->regs->cx -= 1;
1529
1530	if (!rep || ctxt->regs->cx == 0)
1531		return ES_OK;
1532	else
1533		return ES_RETRY;
1534}
1535
1536static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1537{
1538	struct insn *insn = &ctxt->insn;
1539	enum insn_mmio_type mmio;
1540	unsigned int bytes = 0;
1541	enum es_result ret;
1542	u8 sign_byte;
1543	long *reg_data;
1544
1545	mmio = insn_decode_mmio(insn, &bytes);
1546	if (mmio == INSN_MMIO_DECODE_FAILED)
1547		return ES_DECODE_FAILED;
1548
1549	if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
1550		reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1551		if (!reg_data)
1552			return ES_DECODE_FAILED;
1553	}
1554
1555	switch (mmio) {
1556	case INSN_MMIO_WRITE:
1557		memcpy(ghcb->shared_buffer, reg_data, bytes);
1558		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1559		break;
1560	case INSN_MMIO_WRITE_IMM:
1561		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1562		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1563		break;
1564	case INSN_MMIO_READ:
1565		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1566		if (ret)
1567			break;
1568
1569		/* Zero-extend for 32-bit operation */
1570		if (bytes == 4)
1571			*reg_data = 0;
1572
1573		memcpy(reg_data, ghcb->shared_buffer, bytes);
1574		break;
1575	case INSN_MMIO_READ_ZERO_EXTEND:
1576		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1577		if (ret)
1578			break;
1579
1580		/* Zero extend based on operand size */
1581		memset(reg_data, 0, insn->opnd_bytes);
1582		memcpy(reg_data, ghcb->shared_buffer, bytes);
1583		break;
1584	case INSN_MMIO_READ_SIGN_EXTEND:
1585		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1586		if (ret)
1587			break;
1588
1589		if (bytes == 1) {
1590			u8 *val = (u8 *)ghcb->shared_buffer;
1591
1592			sign_byte = (*val & 0x80) ? 0xff : 0x00;
1593		} else {
1594			u16 *val = (u16 *)ghcb->shared_buffer;
1595
1596			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1597		}
1598
1599		/* Sign extend based on operand size */
1600		memset(reg_data, sign_byte, insn->opnd_bytes);
1601		memcpy(reg_data, ghcb->shared_buffer, bytes);
1602		break;
1603	case INSN_MMIO_MOVS:
1604		ret = vc_handle_mmio_movs(ctxt, bytes);
1605		break;
1606	default:
1607		ret = ES_UNSUPPORTED;
1608		break;
1609	}
1610
1611	return ret;
1612}
1613
1614static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1615					  struct es_em_ctxt *ctxt)
1616{
1617	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1618	long val, *reg = vc_insn_get_rm(ctxt);
1619	enum es_result ret;
1620
1621	if (!reg)
1622		return ES_DECODE_FAILED;
1623
1624	val = *reg;
1625
1626	/* Upper 32 bits must be written as zeroes */
1627	if (val >> 32) {
1628		ctxt->fi.vector = X86_TRAP_GP;
1629		ctxt->fi.error_code = 0;
1630		return ES_EXCEPTION;
1631	}
1632
1633	/* Clear out other reserved bits and set bit 10 */
1634	val = (val & 0xffff23ffL) | BIT(10);
1635
1636	/* Early non-zero writes to DR7 are not supported */
1637	if (!data && (val & ~DR7_RESET_VALUE))
1638		return ES_UNSUPPORTED;
1639
1640	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
1641	ghcb_set_rax(ghcb, val);
1642	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1643	if (ret != ES_OK)
1644		return ret;
1645
1646	if (data)
1647		data->dr7 = val;
1648
1649	return ES_OK;
1650}
1651
1652static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1653					 struct es_em_ctxt *ctxt)
1654{
1655	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1656	long *reg = vc_insn_get_rm(ctxt);
1657
1658	if (!reg)
1659		return ES_DECODE_FAILED;
1660
1661	if (data)
1662		*reg = data->dr7;
1663	else
1664		*reg = DR7_RESET_VALUE;
1665
1666	return ES_OK;
1667}
1668
1669static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1670				       struct es_em_ctxt *ctxt)
1671{
1672	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1673}
1674
1675static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1676{
1677	enum es_result ret;
1678
1679	ghcb_set_rcx(ghcb, ctxt->regs->cx);
1680
1681	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1682	if (ret != ES_OK)
1683		return ret;
1684
1685	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1686		return ES_VMM_ERROR;
1687
1688	ctxt->regs->ax = ghcb->save.rax;
1689	ctxt->regs->dx = ghcb->save.rdx;
1690
1691	return ES_OK;
1692}
1693
1694static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1695					struct es_em_ctxt *ctxt)
1696{
1697	/*
1698	 * Treat it as a NOP and do not leak a physical address to the
1699	 * hypervisor.
1700	 */
1701	return ES_OK;
1702}
1703
1704static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1705				      struct es_em_ctxt *ctxt)
1706{
1707	/* Treat the same as MONITOR/MONITORX */
1708	return ES_OK;
1709}
1710
1711static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1712					struct es_em_ctxt *ctxt)
1713{
1714	enum es_result ret;
1715
1716	ghcb_set_rax(ghcb, ctxt->regs->ax);
1717	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1718
1719	if (x86_platform.hyper.sev_es_hcall_prepare)
1720		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1721
1722	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1723	if (ret != ES_OK)
1724		return ret;
1725
1726	if (!ghcb_rax_is_valid(ghcb))
1727		return ES_VMM_ERROR;
1728
1729	ctxt->regs->ax = ghcb->save.rax;
1730
1731	/*
1732	 * Call sev_es_hcall_finish() after regs->ax is already set.
1733	 * This allows the hypervisor handler to overwrite it again if
1734	 * necessary.
1735	 */
1736	if (x86_platform.hyper.sev_es_hcall_finish &&
1737	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1738		return ES_VMM_ERROR;
1739
1740	return ES_OK;
1741}
1742
1743static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1744					struct es_em_ctxt *ctxt)
1745{
1746	/*
1747	 * Calling ecx_alignment_check() directly does not work, because it
1748	 * enables IRQs and the GHCB is active. Forward the exception and call
1749	 * it later from vc_forward_exception().
1750	 */
1751	ctxt->fi.vector = X86_TRAP_AC;
1752	ctxt->fi.error_code = 0;
1753	return ES_EXCEPTION;
1754}
1755
1756static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1757					 struct ghcb *ghcb,
1758					 unsigned long exit_code)
1759{
1760	enum es_result result;
1761
1762	switch (exit_code) {
1763	case SVM_EXIT_READ_DR7:
1764		result = vc_handle_dr7_read(ghcb, ctxt);
1765		break;
1766	case SVM_EXIT_WRITE_DR7:
1767		result = vc_handle_dr7_write(ghcb, ctxt);
1768		break;
1769	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1770		result = vc_handle_trap_ac(ghcb, ctxt);
1771		break;
1772	case SVM_EXIT_RDTSC:
1773	case SVM_EXIT_RDTSCP:
1774		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1775		break;
1776	case SVM_EXIT_RDPMC:
1777		result = vc_handle_rdpmc(ghcb, ctxt);
1778		break;
1779	case SVM_EXIT_INVD:
1780		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1781		result = ES_UNSUPPORTED;
1782		break;
1783	case SVM_EXIT_CPUID:
1784		result = vc_handle_cpuid(ghcb, ctxt);
1785		break;
1786	case SVM_EXIT_IOIO:
1787		result = vc_handle_ioio(ghcb, ctxt);
1788		break;
1789	case SVM_EXIT_MSR:
1790		result = vc_handle_msr(ghcb, ctxt);
1791		break;
1792	case SVM_EXIT_VMMCALL:
1793		result = vc_handle_vmmcall(ghcb, ctxt);
1794		break;
1795	case SVM_EXIT_WBINVD:
1796		result = vc_handle_wbinvd(ghcb, ctxt);
1797		break;
1798	case SVM_EXIT_MONITOR:
1799		result = vc_handle_monitor(ghcb, ctxt);
1800		break;
1801	case SVM_EXIT_MWAIT:
1802		result = vc_handle_mwait(ghcb, ctxt);
1803		break;
1804	case SVM_EXIT_NPF:
1805		result = vc_handle_mmio(ghcb, ctxt);
1806		break;
1807	default:
1808		/*
1809		 * Unexpected #VC exception
1810		 */
1811		result = ES_UNSUPPORTED;
1812	}
1813
1814	return result;
1815}
1816
1817static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1818{
1819	long error_code = ctxt->fi.error_code;
1820	int trapnr = ctxt->fi.vector;
1821
1822	ctxt->regs->orig_ax = ctxt->fi.error_code;
1823
1824	switch (trapnr) {
1825	case X86_TRAP_GP:
1826		exc_general_protection(ctxt->regs, error_code);
1827		break;
1828	case X86_TRAP_UD:
1829		exc_invalid_op(ctxt->regs);
1830		break;
1831	case X86_TRAP_PF:
1832		write_cr2(ctxt->fi.cr2);
1833		exc_page_fault(ctxt->regs, error_code);
1834		break;
1835	case X86_TRAP_AC:
1836		exc_alignment_check(ctxt->regs, error_code);
1837		break;
1838	default:
1839		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1840		BUG();
1841	}
1842}
1843
1844static __always_inline bool is_vc2_stack(unsigned long sp)
1845{
1846	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1847}
1848
1849static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1850{
1851	unsigned long sp, prev_sp;
1852
1853	sp      = (unsigned long)regs;
1854	prev_sp = regs->sp;
1855
1856	/*
1857	 * If the code was already executing on the VC2 stack when the #VC
1858	 * happened, let it proceed to the normal handling routine. This way the
1859	 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1860	 */
1861	return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1862}
1863
1864static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1865{
1866	struct ghcb_state state;
1867	struct es_em_ctxt ctxt;
1868	enum es_result result;
1869	struct ghcb *ghcb;
1870	bool ret = true;
1871
1872	ghcb = __sev_get_ghcb(&state);
1873
1874	vc_ghcb_invalidate(ghcb);
1875	result = vc_init_em_ctxt(&ctxt, regs, error_code);
1876
1877	if (result == ES_OK)
1878		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1879
1880	__sev_put_ghcb(&state);
1881
1882	/* Done - now check the result */
1883	switch (result) {
1884	case ES_OK:
1885		vc_finish_insn(&ctxt);
1886		break;
1887	case ES_UNSUPPORTED:
1888		pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1889				   error_code, regs->ip);
1890		ret = false;
1891		break;
1892	case ES_VMM_ERROR:
1893		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1894				   error_code, regs->ip);
1895		ret = false;
1896		break;
1897	case ES_DECODE_FAILED:
1898		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1899				   error_code, regs->ip);
1900		ret = false;
1901		break;
1902	case ES_EXCEPTION:
1903		vc_forward_exception(&ctxt);
1904		break;
1905	case ES_RETRY:
1906		/* Nothing to do */
1907		break;
1908	default:
1909		pr_emerg("Unknown result in %s():%d\n", __func__, result);
1910		/*
1911		 * Emulating the instruction which caused the #VC exception
1912		 * failed - can't continue so print debug information
1913		 */
1914		BUG();
1915	}
1916
1917	return ret;
1918}
1919
1920static __always_inline bool vc_is_db(unsigned long error_code)
1921{
1922	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1923}
1924
1925/*
1926 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1927 * and will panic when an error happens.
1928 */
1929DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1930{
1931	irqentry_state_t irq_state;
1932
1933	/*
1934	 * With the current implementation it is always possible to switch to a
1935	 * safe stack because #VC exceptions only happen at known places, like
1936	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1937	 * also happen with code instrumentation when the hypervisor intercepts
1938	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1939	 * exceptions currently also only happen in safe places.
1940	 *
1941	 * But keep this here in case the noinstr annotations are violated due
1942	 * to bug elsewhere.
1943	 */
1944	if (unlikely(vc_from_invalid_context(regs))) {
1945		instrumentation_begin();
1946		panic("Can't handle #VC exception from unsupported context\n");
1947		instrumentation_end();
1948	}
1949
1950	/*
1951	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1952	 */
1953	if (vc_is_db(error_code)) {
1954		exc_debug(regs);
1955		return;
1956	}
1957
1958	irq_state = irqentry_nmi_enter(regs);
1959
1960	instrumentation_begin();
1961
1962	if (!vc_raw_handle_exception(regs, error_code)) {
1963		/* Show some debug info */
1964		show_regs(regs);
1965
1966		/* Ask hypervisor to sev_es_terminate */
1967		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1968
1969		/* If that fails and we get here - just panic */
1970		panic("Returned from Terminate-Request to Hypervisor\n");
1971	}
1972
1973	instrumentation_end();
1974	irqentry_nmi_exit(regs, irq_state);
1975}
1976
1977/*
1978 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
1979 * and will kill the current task with SIGBUS when an error happens.
1980 */
1981DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
1982{
1983	/*
1984	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1985	 */
1986	if (vc_is_db(error_code)) {
1987		noist_exc_debug(regs);
1988		return;
1989	}
1990
1991	irqentry_enter_from_user_mode(regs);
1992	instrumentation_begin();
1993
1994	if (!vc_raw_handle_exception(regs, error_code)) {
1995		/*
1996		 * Do not kill the machine if user-space triggered the
1997		 * exception. Send SIGBUS instead and let user-space deal with
1998		 * it.
1999		 */
2000		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2001	}
2002
2003	instrumentation_end();
2004	irqentry_exit_to_user_mode(regs);
2005}
2006
2007bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2008{
2009	unsigned long exit_code = regs->orig_ax;
2010	struct es_em_ctxt ctxt;
2011	enum es_result result;
2012
2013	vc_ghcb_invalidate(boot_ghcb);
2014
2015	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2016	if (result == ES_OK)
2017		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2018
2019	/* Done - now check the result */
2020	switch (result) {
2021	case ES_OK:
2022		vc_finish_insn(&ctxt);
2023		break;
2024	case ES_UNSUPPORTED:
2025		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2026				exit_code, regs->ip);
2027		goto fail;
2028	case ES_VMM_ERROR:
2029		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2030				exit_code, regs->ip);
2031		goto fail;
2032	case ES_DECODE_FAILED:
2033		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2034				exit_code, regs->ip);
2035		goto fail;
2036	case ES_EXCEPTION:
2037		vc_early_forward_exception(&ctxt);
2038		break;
2039	case ES_RETRY:
2040		/* Nothing to do */
2041		break;
2042	default:
2043		BUG();
2044	}
2045
2046	return true;
2047
2048fail:
2049	show_regs(regs);
2050
2051	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2052}
2053
2054/*
2055 * Initial set up of SNP relies on information provided by the
2056 * Confidential Computing blob, which can be passed to the kernel
2057 * in the following ways, depending on how it is booted:
2058 *
2059 * - when booted via the boot/decompress kernel:
2060 *   - via boot_params
2061 *
2062 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2063 *   - via a setup_data entry, as defined by the Linux Boot Protocol
2064 *
2065 * Scan for the blob in that order.
2066 */
2067static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2068{
2069	struct cc_blob_sev_info *cc_info;
2070
2071	/* Boot kernel would have passed the CC blob via boot_params. */
2072	if (bp->cc_blob_address) {
2073		cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2074		goto found_cc_info;
2075	}
2076
2077	/*
2078	 * If kernel was booted directly, without the use of the
2079	 * boot/decompression kernel, the CC blob may have been passed via
2080	 * setup_data instead.
2081	 */
2082	cc_info = find_cc_blob_setup_data(bp);
2083	if (!cc_info)
2084		return NULL;
2085
2086found_cc_info:
2087	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2088		snp_abort();
2089
2090	return cc_info;
2091}
2092
2093bool __init snp_init(struct boot_params *bp)
2094{
2095	struct cc_blob_sev_info *cc_info;
2096
2097	if (!bp)
2098		return false;
2099
2100	cc_info = find_cc_blob(bp);
2101	if (!cc_info)
2102		return false;
2103
2104	setup_cpuid_table(cc_info);
2105
2106	/*
2107	 * The CC blob will be used later to access the secrets page. Cache
2108	 * it here like the boot kernel does.
2109	 */
2110	bp->cc_blob_address = (u32)(unsigned long)cc_info;
2111
2112	return true;
2113}
2114
2115void __init __noreturn snp_abort(void)
2116{
2117	sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2118}
2119
2120static void dump_cpuid_table(void)
2121{
2122	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2123	int i = 0;
2124
2125	pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2126		cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2127
2128	for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2129		const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2130
2131		pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2132			i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2133			fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2134	}
2135}
2136
2137/*
2138 * It is useful from an auditing/testing perspective to provide an easy way
2139 * for the guest owner to know that the CPUID table has been initialized as
2140 * expected, but that initialization happens too early in boot to print any
2141 * sort of indicator, and there's not really any other good place to do it,
2142 * so do it here.
2143 */
2144static int __init report_cpuid_table(void)
2145{
2146	const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2147
2148	if (!cpuid_table->count)
2149		return 0;
2150
2151	pr_info("Using SNP CPUID table, %d entries present.\n",
2152		cpuid_table->count);
2153
2154	if (sev_cfg.debug)
2155		dump_cpuid_table();
2156
2157	return 0;
2158}
2159arch_initcall(report_cpuid_table);
2160
2161static int __init init_sev_config(char *str)
2162{
2163	char *s;
2164
2165	while ((s = strsep(&str, ","))) {
2166		if (!strcmp(s, "debug")) {
2167			sev_cfg.debug = true;
2168			continue;
2169		}
2170
2171		pr_info("SEV command-line option '%s' was not recognized\n", s);
2172	}
2173
2174	return 1;
2175}
2176__setup("sev=", init_sev_config);
2177
2178int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err)
2179{
2180	struct ghcb_state state;
2181	struct es_em_ctxt ctxt;
2182	unsigned long flags;
2183	struct ghcb *ghcb;
2184	int ret;
2185
2186	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2187		return -ENODEV;
2188
2189	if (!fw_err)
2190		return -EINVAL;
2191
2192	/*
2193	 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2194	 * a per-CPU GHCB.
2195	 */
2196	local_irq_save(flags);
2197
2198	ghcb = __sev_get_ghcb(&state);
2199	if (!ghcb) {
2200		ret = -EIO;
2201		goto e_restore_irq;
2202	}
2203
2204	vc_ghcb_invalidate(ghcb);
2205
2206	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2207		ghcb_set_rax(ghcb, input->data_gpa);
2208		ghcb_set_rbx(ghcb, input->data_npages);
2209	}
2210
2211	ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2212	if (ret)
2213		goto e_put;
2214
2215	if (ghcb->save.sw_exit_info_2) {
2216		/* Number of expected pages are returned in RBX */
2217		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
2218		    ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
2219			input->data_npages = ghcb_get_rbx(ghcb);
2220
2221		*fw_err = ghcb->save.sw_exit_info_2;
2222
2223		ret = -EIO;
2224	}
2225
2226e_put:
2227	__sev_put_ghcb(&state);
2228e_restore_irq:
2229	local_irq_restore(flags);
2230
2231	return ret;
2232}
2233EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2234
2235static struct platform_device sev_guest_device = {
2236	.name		= "sev-guest",
2237	.id		= -1,
2238};
2239
2240static int __init snp_init_platform_device(void)
2241{
2242	struct sev_guest_platform_data data;
2243	u64 gpa;
2244
2245	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2246		return -ENODEV;
2247
2248	gpa = get_secrets_page();
2249	if (!gpa)
2250		return -ENODEV;
2251
2252	data.secrets_gpa = gpa;
2253	if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2254		return -ENODEV;
2255
2256	if (platform_device_register(&sev_guest_device))
2257		return -ENODEV;
2258
2259	pr_info("SNP guest platform device initialized.\n");
2260	return 0;
2261}
2262device_initcall(snp_init_platform_device);