Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
22#include <asm/debugreg.h>
23
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
28#include "smm.h"
29#include "cpuid.h"
30#include "lapic.h"
31#include "svm.h"
32#include "hyperv.h"
33
34#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
35
36static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37 struct x86_exception *fault)
38{
39 struct vcpu_svm *svm = to_svm(vcpu);
40 struct vmcb *vmcb = svm->vmcb;
41
42 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43 /*
44 * TODO: track the cause of the nested page fault, and
45 * correctly fill in the high bits of exit_info_1.
46 */
47 vmcb->control.exit_code = SVM_EXIT_NPF;
48 vmcb->control.exit_code_hi = 0;
49 vmcb->control.exit_info_1 = (1ULL << 32);
50 vmcb->control.exit_info_2 = fault->address;
51 }
52
53 vmcb->control.exit_info_1 &= ~0xffffffffULL;
54 vmcb->control.exit_info_1 |= fault->error_code;
55
56 nested_svm_vmexit(svm);
57}
58
59static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60{
61 struct vcpu_svm *svm = to_svm(vcpu);
62 u64 cr3 = svm->nested.ctl.nested_cr3;
63 u64 pdpte;
64 int ret;
65
66 /*
67 * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
68 * nCR3[4:0] when loading PDPTEs from memory.
69 */
70 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
71 (cr3 & GENMASK(11, 5)) + index * 8, 8);
72 if (ret)
73 return 0;
74 return pdpte;
75}
76
77static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
78{
79 struct vcpu_svm *svm = to_svm(vcpu);
80
81 return svm->nested.ctl.nested_cr3;
82}
83
84static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
85{
86 struct vcpu_svm *svm = to_svm(vcpu);
87
88 WARN_ON(mmu_is_nested(vcpu));
89
90 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
91
92 /*
93 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
94 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
95 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
96 */
97 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
98 svm->vmcb01.ptr->save.efer,
99 svm->nested.ctl.nested_cr3);
100 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
101 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
102 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
103 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
104}
105
106static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
107{
108 vcpu->arch.mmu = &vcpu->arch.root_mmu;
109 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
110}
111
112static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
113{
114 if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
115 return true;
116
117 if (!nested_npt_enabled(svm))
118 return true;
119
120 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
121 return true;
122
123 return false;
124}
125
126void recalc_intercepts(struct vcpu_svm *svm)
127{
128 struct vmcb_control_area *c, *h;
129 struct vmcb_ctrl_area_cached *g;
130 unsigned int i;
131
132 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
133
134 if (!is_guest_mode(&svm->vcpu))
135 return;
136
137 c = &svm->vmcb->control;
138 h = &svm->vmcb01.ptr->control;
139 g = &svm->nested.ctl;
140
141 for (i = 0; i < MAX_INTERCEPT; i++)
142 c->intercepts[i] = h->intercepts[i];
143
144 if (g->int_ctl & V_INTR_MASKING_MASK) {
145 /*
146 * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
147 * disable intercept of CR8 writes as L2's CR8 does not affect
148 * any interrupt KVM may want to inject.
149 *
150 * Similarly, disable intercept of virtual interrupts (used to
151 * detect interrupt windows) if the saved RFLAGS.IF is '0', as
152 * the effective RFLAGS.IF for L1 interrupts will never be set
153 * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
154 */
155 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
156 if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
157 vmcb_clr_intercept(c, INTERCEPT_VINTR);
158 }
159
160 /*
161 * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
162 * flush feature is enabled.
163 */
164 if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
165 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
166
167 for (i = 0; i < MAX_INTERCEPT; i++)
168 c->intercepts[i] |= g->intercepts[i];
169
170 /* If SMI is not intercepted, ignore guest SMI intercept as well */
171 if (!intercept_smi)
172 vmcb_clr_intercept(c, INTERCEPT_SMI);
173
174 if (nested_vmcb_needs_vls_intercept(svm)) {
175 /*
176 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
177 * we must intercept these instructions to correctly
178 * emulate them in case L1 doesn't intercept them.
179 */
180 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
181 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
182 } else {
183 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
184 }
185}
186
187/*
188 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
189 * is optimized in that it only merges the parts where KVM MSR permission bitmap
190 * may contain zero bits.
191 */
192static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
193{
194 int i;
195
196 /*
197 * MSR bitmap update can be skipped when:
198 * - MSR bitmap for L1 hasn't changed.
199 * - Nested hypervisor (L1) is attempting to launch the same L2 as
200 * before.
201 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
202 * tells KVM (L0) there were no changes in MSR bitmap for L2.
203 */
204#ifdef CONFIG_KVM_HYPERV
205 if (!svm->nested.force_msr_bitmap_recalc) {
206 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
207
208 if (kvm_hv_hypercall_enabled(&svm->vcpu) &&
209 hve->hv_enlightenments_control.msr_bitmap &&
210 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
211 goto set_msrpm_base_pa;
212 }
213#endif
214
215 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
216 return true;
217
218 for (i = 0; i < MSRPM_OFFSETS; i++) {
219 u32 value, p;
220 u64 offset;
221
222 if (msrpm_offsets[i] == 0xffffffff)
223 break;
224
225 p = msrpm_offsets[i];
226
227 /* x2apic msrs are intercepted always for the nested guest */
228 if (is_x2apic_msrpm_offset(p))
229 continue;
230
231 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
232
233 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
234 return false;
235
236 svm->nested.msrpm[p] = svm->msrpm[p] | value;
237 }
238
239 svm->nested.force_msr_bitmap_recalc = false;
240
241#ifdef CONFIG_KVM_HYPERV
242set_msrpm_base_pa:
243#endif
244 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
245
246 return true;
247}
248
249/*
250 * Bits 11:0 of bitmap address are ignored by hardware
251 */
252static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
253{
254 u64 addr = PAGE_ALIGN(pa);
255
256 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
257 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
258}
259
260static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
261 struct vmcb_ctrl_area_cached *control)
262{
263 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
264 return false;
265
266 if (CC(control->asid == 0))
267 return false;
268
269 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
270 return false;
271
272 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
273 MSRPM_SIZE)))
274 return false;
275 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
276 IOPM_SIZE)))
277 return false;
278
279 if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
280 !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
281 return false;
282 }
283
284 return true;
285}
286
287/* Common checks that apply to both L1 and L2 state. */
288static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
289 struct vmcb_save_area_cached *save)
290{
291 if (CC(!(save->efer & EFER_SVME)))
292 return false;
293
294 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
295 CC(save->cr0 & ~0xffffffffULL))
296 return false;
297
298 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
299 return false;
300
301 /*
302 * These checks are also performed by KVM_SET_SREGS,
303 * except that EFER.LMA is not checked by SVM against
304 * CR0.PG && EFER.LME.
305 */
306 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
307 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
308 CC(!(save->cr0 & X86_CR0_PE)) ||
309 CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
310 return false;
311 }
312
313 /* Note, SVM doesn't have any additional restrictions on CR4. */
314 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
315 return false;
316
317 if (CC(!kvm_valid_efer(vcpu, save->efer)))
318 return false;
319
320 return true;
321}
322
323static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
324{
325 struct vcpu_svm *svm = to_svm(vcpu);
326 struct vmcb_save_area_cached *save = &svm->nested.save;
327
328 return __nested_vmcb_check_save(vcpu, save);
329}
330
331static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
332{
333 struct vcpu_svm *svm = to_svm(vcpu);
334 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
335
336 return __nested_vmcb_check_controls(vcpu, ctl);
337}
338
339static
340void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
341 struct vmcb_ctrl_area_cached *to,
342 struct vmcb_control_area *from)
343{
344 unsigned int i;
345
346 for (i = 0; i < MAX_INTERCEPT; i++)
347 to->intercepts[i] = from->intercepts[i];
348
349 to->iopm_base_pa = from->iopm_base_pa;
350 to->msrpm_base_pa = from->msrpm_base_pa;
351 to->tsc_offset = from->tsc_offset;
352 to->tlb_ctl = from->tlb_ctl;
353 to->int_ctl = from->int_ctl;
354 to->int_vector = from->int_vector;
355 to->int_state = from->int_state;
356 to->exit_code = from->exit_code;
357 to->exit_code_hi = from->exit_code_hi;
358 to->exit_info_1 = from->exit_info_1;
359 to->exit_info_2 = from->exit_info_2;
360 to->exit_int_info = from->exit_int_info;
361 to->exit_int_info_err = from->exit_int_info_err;
362 to->nested_ctl = from->nested_ctl;
363 to->event_inj = from->event_inj;
364 to->event_inj_err = from->event_inj_err;
365 to->next_rip = from->next_rip;
366 to->nested_cr3 = from->nested_cr3;
367 to->virt_ext = from->virt_ext;
368 to->pause_filter_count = from->pause_filter_count;
369 to->pause_filter_thresh = from->pause_filter_thresh;
370
371 /* Copy asid here because nested_vmcb_check_controls will check it. */
372 to->asid = from->asid;
373 to->msrpm_base_pa &= ~0x0fffULL;
374 to->iopm_base_pa &= ~0x0fffULL;
375
376#ifdef CONFIG_KVM_HYPERV
377 /* Hyper-V extensions (Enlightened VMCB) */
378 if (kvm_hv_hypercall_enabled(vcpu)) {
379 to->clean = from->clean;
380 memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
381 sizeof(to->hv_enlightenments));
382 }
383#endif
384}
385
386void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
387 struct vmcb_control_area *control)
388{
389 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
390}
391
392static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
393 struct vmcb_save_area *from)
394{
395 /*
396 * Copy only fields that are validated, as we need them
397 * to avoid TOC/TOU races.
398 */
399 to->efer = from->efer;
400 to->cr0 = from->cr0;
401 to->cr3 = from->cr3;
402 to->cr4 = from->cr4;
403
404 to->dr6 = from->dr6;
405 to->dr7 = from->dr7;
406}
407
408void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
409 struct vmcb_save_area *save)
410{
411 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
412}
413
414/*
415 * Synchronize fields that are written by the processor, so that
416 * they can be copied back into the vmcb12.
417 */
418void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
419{
420 u32 mask;
421 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
422 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
423
424 /* Only a few fields of int_ctl are written by the processor. */
425 mask = V_IRQ_MASK | V_TPR_MASK;
426 /*
427 * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
428 * virtual interrupts in order to request an interrupt window, as KVM
429 * has usurped vmcb02's int_ctl. If an interrupt window opens before
430 * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
431 * If no window opens, V_IRQ will be correctly preserved in vmcb12's
432 * int_ctl (because it was never recognized while L2 was running).
433 */
434 if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
435 !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
436 mask &= ~V_IRQ_MASK;
437
438 if (nested_vgif_enabled(svm))
439 mask |= V_GIF_MASK;
440
441 if (nested_vnmi_enabled(svm))
442 mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
443
444 svm->nested.ctl.int_ctl &= ~mask;
445 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
446}
447
448/*
449 * Transfer any event that L0 or L1 wanted to inject into L2 to
450 * EXIT_INT_INFO.
451 */
452static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
453 struct vmcb *vmcb12)
454{
455 struct kvm_vcpu *vcpu = &svm->vcpu;
456 u32 exit_int_info = 0;
457 unsigned int nr;
458
459 if (vcpu->arch.exception.injected) {
460 nr = vcpu->arch.exception.vector;
461 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
462
463 if (vcpu->arch.exception.has_error_code) {
464 exit_int_info |= SVM_EVTINJ_VALID_ERR;
465 vmcb12->control.exit_int_info_err =
466 vcpu->arch.exception.error_code;
467 }
468
469 } else if (vcpu->arch.nmi_injected) {
470 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
471
472 } else if (vcpu->arch.interrupt.injected) {
473 nr = vcpu->arch.interrupt.nr;
474 exit_int_info = nr | SVM_EVTINJ_VALID;
475
476 if (vcpu->arch.interrupt.soft)
477 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
478 else
479 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
480 }
481
482 vmcb12->control.exit_int_info = exit_int_info;
483}
484
485static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
486{
487 /* Handle pending Hyper-V TLB flush requests */
488 kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
489
490 /*
491 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
492 * things to fix before this can be conditional:
493 *
494 * - Flush TLBs for both L1 and L2 remote TLB flush
495 * - Honor L1's request to flush an ASID on nested VMRUN
496 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
497 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
498 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
499 *
500 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
501 * NPT guest-physical mappings on VMRUN.
502 */
503 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
504 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
505}
506
507/*
508 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
509 * if we are emulating VM-Entry into a guest with NPT enabled.
510 */
511static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
512 bool nested_npt, bool reload_pdptrs)
513{
514 if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
515 return -EINVAL;
516
517 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
518 CC(!load_pdptrs(vcpu, cr3)))
519 return -EINVAL;
520
521 vcpu->arch.cr3 = cr3;
522
523 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
524 kvm_init_mmu(vcpu);
525
526 if (!nested_npt)
527 kvm_mmu_new_pgd(vcpu, cr3);
528
529 return 0;
530}
531
532void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
533{
534 if (!svm->nested.vmcb02.ptr)
535 return;
536
537 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
538 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
539}
540
541static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
542{
543 bool new_vmcb12 = false;
544 struct vmcb *vmcb01 = svm->vmcb01.ptr;
545 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
546 struct kvm_vcpu *vcpu = &svm->vcpu;
547
548 nested_vmcb02_compute_g_pat(svm);
549
550 /* Load the nested guest state */
551 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
552 new_vmcb12 = true;
553 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
554 svm->nested.force_msr_bitmap_recalc = true;
555 }
556
557 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
558 vmcb02->save.es = vmcb12->save.es;
559 vmcb02->save.cs = vmcb12->save.cs;
560 vmcb02->save.ss = vmcb12->save.ss;
561 vmcb02->save.ds = vmcb12->save.ds;
562 vmcb02->save.cpl = vmcb12->save.cpl;
563 vmcb_mark_dirty(vmcb02, VMCB_SEG);
564 }
565
566 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
567 vmcb02->save.gdtr = vmcb12->save.gdtr;
568 vmcb02->save.idtr = vmcb12->save.idtr;
569 vmcb_mark_dirty(vmcb02, VMCB_DT);
570 }
571
572 kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
573
574 svm_set_efer(vcpu, svm->nested.save.efer);
575
576 svm_set_cr0(vcpu, svm->nested.save.cr0);
577 svm_set_cr4(vcpu, svm->nested.save.cr4);
578
579 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
580
581 kvm_rax_write(vcpu, vmcb12->save.rax);
582 kvm_rsp_write(vcpu, vmcb12->save.rsp);
583 kvm_rip_write(vcpu, vmcb12->save.rip);
584
585 /* In case we don't even reach vcpu_run, the fields are not updated */
586 vmcb02->save.rax = vmcb12->save.rax;
587 vmcb02->save.rsp = vmcb12->save.rsp;
588 vmcb02->save.rip = vmcb12->save.rip;
589
590 /* These bits will be set properly on the first execution when new_vmc12 is true */
591 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
592 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
593 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
594 vmcb_mark_dirty(vmcb02, VMCB_DR);
595 }
596
597 if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
598 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
599 /*
600 * Reserved bits of DEBUGCTL are ignored. Be consistent with
601 * svm_set_msr's definition of reserved bits.
602 */
603 svm_copy_lbrs(vmcb02, vmcb12);
604 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
605 svm_update_lbrv(&svm->vcpu);
606
607 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
608 svm_copy_lbrs(vmcb02, vmcb01);
609 }
610}
611
612static inline bool is_evtinj_soft(u32 evtinj)
613{
614 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
615 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
616
617 if (!(evtinj & SVM_EVTINJ_VALID))
618 return false;
619
620 if (type == SVM_EVTINJ_TYPE_SOFT)
621 return true;
622
623 return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
624}
625
626static bool is_evtinj_nmi(u32 evtinj)
627{
628 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
629
630 if (!(evtinj & SVM_EVTINJ_VALID))
631 return false;
632
633 return type == SVM_EVTINJ_TYPE_NMI;
634}
635
636static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
637 unsigned long vmcb12_rip,
638 unsigned long vmcb12_csbase)
639{
640 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
641 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
642
643 struct kvm_vcpu *vcpu = &svm->vcpu;
644 struct vmcb *vmcb01 = svm->vmcb01.ptr;
645 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
646 u32 pause_count12;
647 u32 pause_thresh12;
648
649 nested_svm_transition_tlb_flush(vcpu);
650
651 /* Enter Guest-Mode */
652 enter_guest_mode(vcpu);
653
654 /*
655 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
656 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
657 */
658
659 if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
660 (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
661 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
662 else
663 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
664
665 if (vnmi) {
666 if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
667 svm->vcpu.arch.nmi_pending++;
668 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
669 }
670 if (nested_vnmi_enabled(svm))
671 int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
672 V_NMI_ENABLE_MASK |
673 V_NMI_BLOCKING_MASK);
674 }
675
676 /* Copied from vmcb01. msrpm_base can be overwritten later. */
677 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
678 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
679 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
680
681 /* Done at vmrun: asid. */
682
683 /* Also overwritten later if necessary. */
684 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
685
686 /* nested_cr3. */
687 if (nested_npt_enabled(svm))
688 nested_svm_init_mmu_context(vcpu);
689
690 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
691 vcpu->arch.l1_tsc_offset,
692 svm->nested.ctl.tsc_offset,
693 svm->tsc_ratio_msr);
694
695 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
696
697 if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
698 svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
699 nested_svm_update_tsc_ratio_msr(vcpu);
700
701 vmcb02->control.int_ctl =
702 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
703 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
704
705 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
706 vmcb02->control.int_state = svm->nested.ctl.int_state;
707 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
708 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
709
710 /*
711 * next_rip is consumed on VMRUN as the return address pushed on the
712 * stack for injected soft exceptions/interrupts. If nrips is exposed
713 * to L1, take it verbatim from vmcb12. If nrips is supported in
714 * hardware but not exposed to L1, stuff the actual L2 RIP to emulate
715 * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
716 * prior to injecting the event).
717 */
718 if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
719 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
720 else if (boot_cpu_has(X86_FEATURE_NRIPS))
721 vmcb02->control.next_rip = vmcb12_rip;
722
723 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
724 if (is_evtinj_soft(vmcb02->control.event_inj)) {
725 svm->soft_int_injected = true;
726 svm->soft_int_csbase = vmcb12_csbase;
727 svm->soft_int_old_rip = vmcb12_rip;
728 if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
729 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
730 else
731 svm->soft_int_next_rip = vmcb12_rip;
732 }
733
734 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
735 LBR_CTL_ENABLE_MASK;
736 if (guest_can_use(vcpu, X86_FEATURE_LBRV))
737 vmcb02->control.virt_ext |=
738 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
739
740 if (!nested_vmcb_needs_vls_intercept(svm))
741 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
742
743 if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
744 pause_count12 = svm->nested.ctl.pause_filter_count;
745 else
746 pause_count12 = 0;
747 if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
748 pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
749 else
750 pause_thresh12 = 0;
751 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
752 /* use guest values since host doesn't intercept PAUSE */
753 vmcb02->control.pause_filter_count = pause_count12;
754 vmcb02->control.pause_filter_thresh = pause_thresh12;
755
756 } else {
757 /* start from host values otherwise */
758 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
759 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
760
761 /* ... but ensure filtering is disabled if so requested. */
762 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
763 if (!pause_count12)
764 vmcb02->control.pause_filter_count = 0;
765 if (!pause_thresh12)
766 vmcb02->control.pause_filter_thresh = 0;
767 }
768 }
769
770 /*
771 * Merge guest and host intercepts - must be called with vcpu in
772 * guest-mode to take effect.
773 */
774 recalc_intercepts(svm);
775}
776
777static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
778{
779 /*
780 * Some VMCB state is shared between L1 and L2 and thus has to be
781 * moved at the time of nested vmrun and vmexit.
782 *
783 * VMLOAD/VMSAVE state would also belong in this category, but KVM
784 * always performs VMLOAD and VMSAVE from the VMCB01.
785 */
786 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
787}
788
789int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
790 struct vmcb *vmcb12, bool from_vmrun)
791{
792 struct vcpu_svm *svm = to_svm(vcpu);
793 int ret;
794
795 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
796 vmcb12_gpa,
797 vmcb12->save.rip,
798 vmcb12->control.int_ctl,
799 vmcb12->control.event_inj,
800 vmcb12->control.nested_ctl,
801 vmcb12->control.nested_cr3,
802 vmcb12->save.cr3,
803 KVM_ISA_SVM);
804
805 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
806 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
807 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
808 vmcb12->control.intercepts[INTERCEPT_WORD3],
809 vmcb12->control.intercepts[INTERCEPT_WORD4],
810 vmcb12->control.intercepts[INTERCEPT_WORD5]);
811
812
813 svm->nested.vmcb12_gpa = vmcb12_gpa;
814
815 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
816
817 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
818
819 svm_switch_vmcb(svm, &svm->nested.vmcb02);
820 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
821 nested_vmcb02_prepare_save(svm, vmcb12);
822
823 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
824 nested_npt_enabled(svm), from_vmrun);
825 if (ret)
826 return ret;
827
828 if (!from_vmrun)
829 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
830
831 svm_set_gif(svm, true);
832
833 if (kvm_vcpu_apicv_active(vcpu))
834 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
835
836 nested_svm_hv_update_vm_vp_ids(vcpu);
837
838 return 0;
839}
840
841int nested_svm_vmrun(struct kvm_vcpu *vcpu)
842{
843 struct vcpu_svm *svm = to_svm(vcpu);
844 int ret;
845 struct vmcb *vmcb12;
846 struct kvm_host_map map;
847 u64 vmcb12_gpa;
848 struct vmcb *vmcb01 = svm->vmcb01.ptr;
849
850 if (!svm->nested.hsave_msr) {
851 kvm_inject_gp(vcpu, 0);
852 return 1;
853 }
854
855 if (is_smm(vcpu)) {
856 kvm_queue_exception(vcpu, UD_VECTOR);
857 return 1;
858 }
859
860 /* This fails when VP assist page is enabled but the supplied GPA is bogus */
861 ret = kvm_hv_verify_vp_assist(vcpu);
862 if (ret) {
863 kvm_inject_gp(vcpu, 0);
864 return ret;
865 }
866
867 vmcb12_gpa = svm->vmcb->save.rax;
868 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
869 if (ret == -EINVAL) {
870 kvm_inject_gp(vcpu, 0);
871 return 1;
872 } else if (ret) {
873 return kvm_skip_emulated_instruction(vcpu);
874 }
875
876 ret = kvm_skip_emulated_instruction(vcpu);
877
878 vmcb12 = map.hva;
879
880 if (WARN_ON_ONCE(!svm->nested.initialized))
881 return -EINVAL;
882
883 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
884 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
885
886 if (!nested_vmcb_check_save(vcpu) ||
887 !nested_vmcb_check_controls(vcpu)) {
888 vmcb12->control.exit_code = SVM_EXIT_ERR;
889 vmcb12->control.exit_code_hi = 0;
890 vmcb12->control.exit_info_1 = 0;
891 vmcb12->control.exit_info_2 = 0;
892 goto out;
893 }
894
895 /*
896 * Since vmcb01 is not in use, we can use it to store some of the L1
897 * state.
898 */
899 vmcb01->save.efer = vcpu->arch.efer;
900 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
901 vmcb01->save.cr4 = vcpu->arch.cr4;
902 vmcb01->save.rflags = kvm_get_rflags(vcpu);
903 vmcb01->save.rip = kvm_rip_read(vcpu);
904
905 if (!npt_enabled)
906 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
907
908 svm->nested.nested_run_pending = 1;
909
910 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
911 goto out_exit_err;
912
913 if (nested_svm_vmrun_msrpm(svm))
914 goto out;
915
916out_exit_err:
917 svm->nested.nested_run_pending = 0;
918 svm->nmi_l1_to_l2 = false;
919 svm->soft_int_injected = false;
920
921 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
922 svm->vmcb->control.exit_code_hi = 0;
923 svm->vmcb->control.exit_info_1 = 0;
924 svm->vmcb->control.exit_info_2 = 0;
925
926 nested_svm_vmexit(svm);
927
928out:
929 kvm_vcpu_unmap(vcpu, &map);
930
931 return ret;
932}
933
934/* Copy state save area fields which are handled by VMRUN */
935void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
936 struct vmcb_save_area *from_save)
937{
938 to_save->es = from_save->es;
939 to_save->cs = from_save->cs;
940 to_save->ss = from_save->ss;
941 to_save->ds = from_save->ds;
942 to_save->gdtr = from_save->gdtr;
943 to_save->idtr = from_save->idtr;
944 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
945 to_save->efer = from_save->efer;
946 to_save->cr0 = from_save->cr0;
947 to_save->cr3 = from_save->cr3;
948 to_save->cr4 = from_save->cr4;
949 to_save->rax = from_save->rax;
950 to_save->rsp = from_save->rsp;
951 to_save->rip = from_save->rip;
952 to_save->cpl = 0;
953}
954
955void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
956{
957 to_vmcb->save.fs = from_vmcb->save.fs;
958 to_vmcb->save.gs = from_vmcb->save.gs;
959 to_vmcb->save.tr = from_vmcb->save.tr;
960 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
961 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
962 to_vmcb->save.star = from_vmcb->save.star;
963 to_vmcb->save.lstar = from_vmcb->save.lstar;
964 to_vmcb->save.cstar = from_vmcb->save.cstar;
965 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
966 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
967 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
968 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
969}
970
971int nested_svm_vmexit(struct vcpu_svm *svm)
972{
973 struct kvm_vcpu *vcpu = &svm->vcpu;
974 struct vmcb *vmcb01 = svm->vmcb01.ptr;
975 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
976 struct vmcb *vmcb12;
977 struct kvm_host_map map;
978 int rc;
979
980 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
981 if (rc) {
982 if (rc == -EINVAL)
983 kvm_inject_gp(vcpu, 0);
984 return 1;
985 }
986
987 vmcb12 = map.hva;
988
989 /* Exit Guest-Mode */
990 leave_guest_mode(vcpu);
991 svm->nested.vmcb12_gpa = 0;
992 WARN_ON_ONCE(svm->nested.nested_run_pending);
993
994 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
995
996 /* in case we halted in L2 */
997 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
998
999 /* Give the current vmcb to the guest */
1000
1001 vmcb12->save.es = vmcb02->save.es;
1002 vmcb12->save.cs = vmcb02->save.cs;
1003 vmcb12->save.ss = vmcb02->save.ss;
1004 vmcb12->save.ds = vmcb02->save.ds;
1005 vmcb12->save.gdtr = vmcb02->save.gdtr;
1006 vmcb12->save.idtr = vmcb02->save.idtr;
1007 vmcb12->save.efer = svm->vcpu.arch.efer;
1008 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1009 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1010 vmcb12->save.cr2 = vmcb02->save.cr2;
1011 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1012 vmcb12->save.rflags = kvm_get_rflags(vcpu);
1013 vmcb12->save.rip = kvm_rip_read(vcpu);
1014 vmcb12->save.rsp = kvm_rsp_read(vcpu);
1015 vmcb12->save.rax = kvm_rax_read(vcpu);
1016 vmcb12->save.dr7 = vmcb02->save.dr7;
1017 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1018 vmcb12->save.cpl = vmcb02->save.cpl;
1019
1020 vmcb12->control.int_state = vmcb02->control.int_state;
1021 vmcb12->control.exit_code = vmcb02->control.exit_code;
1022 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1023 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1024 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1025
1026 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1027 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1028
1029 if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
1030 vmcb12->control.next_rip = vmcb02->control.next_rip;
1031
1032 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1033 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1034 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1035
1036 if (!kvm_pause_in_guest(vcpu->kvm)) {
1037 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1038 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1039
1040 }
1041
1042 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1043
1044 svm_switch_vmcb(svm, &svm->vmcb01);
1045
1046 /*
1047 * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
1048 *
1049 * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
1050 * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
1051 * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
1052 * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
1053 * KVM re-requests an interrupt window if necessary, which implicitly
1054 * copies this bits from vmcb02 to vmcb01.
1055 *
1056 * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
1057 * is stored in vmcb02, but its value doesn't need to be copied from/to
1058 * vmcb01 because it is copied from/to the virtual APIC's TPR register
1059 * on each VM entry/exit.
1060 *
1061 * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
1062 * V_GIF. However, GIF is architecturally clear on each VM exit, thus
1063 * there is no need to copy V_GIF from vmcb02 to vmcb01.
1064 */
1065 if (!nested_exit_on_intr(svm))
1066 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1067
1068 if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1069 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1070 svm_copy_lbrs(vmcb12, vmcb02);
1071 svm_update_lbrv(vcpu);
1072 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1073 svm_copy_lbrs(vmcb01, vmcb02);
1074 svm_update_lbrv(vcpu);
1075 }
1076
1077 if (vnmi) {
1078 if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1079 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1080 else
1081 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1082
1083 if (vcpu->arch.nmi_pending) {
1084 vcpu->arch.nmi_pending--;
1085 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1086 } else {
1087 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1088 }
1089 }
1090
1091 /*
1092 * On vmexit the GIF is set to false and
1093 * no event can be injected in L1.
1094 */
1095 svm_set_gif(svm, false);
1096 vmcb01->control.exit_int_info = 0;
1097
1098 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1099 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1100 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1101 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1102 }
1103
1104 if (kvm_caps.has_tsc_control &&
1105 vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1106 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1107 svm_write_tsc_multiplier(vcpu);
1108 }
1109
1110 svm->nested.ctl.nested_cr3 = 0;
1111
1112 /*
1113 * Restore processor state that had been saved in vmcb01
1114 */
1115 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1116 svm_set_efer(vcpu, vmcb01->save.efer);
1117 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1118 svm_set_cr4(vcpu, vmcb01->save.cr4);
1119 kvm_rax_write(vcpu, vmcb01->save.rax);
1120 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1121 kvm_rip_write(vcpu, vmcb01->save.rip);
1122
1123 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1124 kvm_update_dr7(&svm->vcpu);
1125
1126 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1127 vmcb12->control.exit_info_1,
1128 vmcb12->control.exit_info_2,
1129 vmcb12->control.exit_int_info,
1130 vmcb12->control.exit_int_info_err,
1131 KVM_ISA_SVM);
1132
1133 kvm_vcpu_unmap(vcpu, &map);
1134
1135 nested_svm_transition_tlb_flush(vcpu);
1136
1137 nested_svm_uninit_mmu_context(vcpu);
1138
1139 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1140 if (rc)
1141 return 1;
1142
1143 /*
1144 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1145 * doesn't end up in L1.
1146 */
1147 svm->vcpu.arch.nmi_injected = false;
1148 kvm_clear_exception_queue(vcpu);
1149 kvm_clear_interrupt_queue(vcpu);
1150
1151 /*
1152 * If we are here following the completion of a VMRUN that
1153 * is being single-stepped, queue the pending #DB intercept
1154 * right now so that it an be accounted for before we execute
1155 * L1's next instruction.
1156 */
1157 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1158 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1159
1160 /*
1161 * Un-inhibit the AVIC right away, so that other vCPUs can start
1162 * to benefit from it right away.
1163 */
1164 if (kvm_apicv_activated(vcpu->kvm))
1165 __kvm_vcpu_update_apicv(vcpu);
1166
1167 return 0;
1168}
1169
1170static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1171{
1172 struct vcpu_svm *svm = to_svm(vcpu);
1173
1174 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1175 return;
1176
1177 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1178 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1179}
1180
1181int svm_allocate_nested(struct vcpu_svm *svm)
1182{
1183 struct page *vmcb02_page;
1184
1185 if (svm->nested.initialized)
1186 return 0;
1187
1188 vmcb02_page = snp_safe_alloc_page();
1189 if (!vmcb02_page)
1190 return -ENOMEM;
1191 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1192 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1193
1194 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1195 if (!svm->nested.msrpm)
1196 goto err_free_vmcb02;
1197 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1198
1199 svm->nested.initialized = true;
1200 return 0;
1201
1202err_free_vmcb02:
1203 __free_page(vmcb02_page);
1204 return -ENOMEM;
1205}
1206
1207void svm_free_nested(struct vcpu_svm *svm)
1208{
1209 if (!svm->nested.initialized)
1210 return;
1211
1212 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1213 svm_switch_vmcb(svm, &svm->vmcb01);
1214
1215 svm_vcpu_free_msrpm(svm->nested.msrpm);
1216 svm->nested.msrpm = NULL;
1217
1218 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1219 svm->nested.vmcb02.ptr = NULL;
1220
1221 /*
1222 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1223 * some vmcb12 fields are not loaded if they are marked clean
1224 * in the vmcb12, since in this case they are up to date already.
1225 *
1226 * When the vmcb02 is freed, this optimization becomes invalid.
1227 */
1228 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1229
1230 svm->nested.initialized = false;
1231}
1232
1233void svm_leave_nested(struct kvm_vcpu *vcpu)
1234{
1235 struct vcpu_svm *svm = to_svm(vcpu);
1236
1237 if (is_guest_mode(vcpu)) {
1238 svm->nested.nested_run_pending = 0;
1239 svm->nested.vmcb12_gpa = INVALID_GPA;
1240
1241 leave_guest_mode(vcpu);
1242
1243 svm_switch_vmcb(svm, &svm->vmcb01);
1244
1245 nested_svm_uninit_mmu_context(vcpu);
1246 vmcb_mark_all_dirty(svm->vmcb);
1247
1248 if (kvm_apicv_activated(vcpu->kvm))
1249 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1250 }
1251
1252 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1253}
1254
1255static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1256{
1257 u32 offset, msr, value;
1258 int write, mask;
1259
1260 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1261 return NESTED_EXIT_HOST;
1262
1263 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1264 offset = svm_msrpm_offset(msr);
1265 write = svm->vmcb->control.exit_info_1 & 1;
1266 mask = 1 << ((2 * (msr & 0xf)) + write);
1267
1268 if (offset == MSR_INVALID)
1269 return NESTED_EXIT_DONE;
1270
1271 /* Offset is in 32 bit units but need in 8 bit units */
1272 offset *= 4;
1273
1274 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1275 return NESTED_EXIT_DONE;
1276
1277 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1278}
1279
1280static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1281{
1282 unsigned port, size, iopm_len;
1283 u16 val, mask;
1284 u8 start_bit;
1285 u64 gpa;
1286
1287 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1288 return NESTED_EXIT_HOST;
1289
1290 port = svm->vmcb->control.exit_info_1 >> 16;
1291 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1292 SVM_IOIO_SIZE_SHIFT;
1293 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1294 start_bit = port % 8;
1295 iopm_len = (start_bit + size > 8) ? 2 : 1;
1296 mask = (0xf >> (4 - size)) << start_bit;
1297 val = 0;
1298
1299 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1300 return NESTED_EXIT_DONE;
1301
1302 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1303}
1304
1305static int nested_svm_intercept(struct vcpu_svm *svm)
1306{
1307 u32 exit_code = svm->vmcb->control.exit_code;
1308 int vmexit = NESTED_EXIT_HOST;
1309
1310 switch (exit_code) {
1311 case SVM_EXIT_MSR:
1312 vmexit = nested_svm_exit_handled_msr(svm);
1313 break;
1314 case SVM_EXIT_IOIO:
1315 vmexit = nested_svm_intercept_ioio(svm);
1316 break;
1317 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1318 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1319 vmexit = NESTED_EXIT_DONE;
1320 break;
1321 }
1322 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1323 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1324 vmexit = NESTED_EXIT_DONE;
1325 break;
1326 }
1327 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1328 /*
1329 * Host-intercepted exceptions have been checked already in
1330 * nested_svm_exit_special. There is nothing to do here,
1331 * the vmexit is injected by svm_check_nested_events.
1332 */
1333 vmexit = NESTED_EXIT_DONE;
1334 break;
1335 }
1336 case SVM_EXIT_ERR: {
1337 vmexit = NESTED_EXIT_DONE;
1338 break;
1339 }
1340 default: {
1341 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1342 vmexit = NESTED_EXIT_DONE;
1343 }
1344 }
1345
1346 return vmexit;
1347}
1348
1349int nested_svm_exit_handled(struct vcpu_svm *svm)
1350{
1351 int vmexit;
1352
1353 vmexit = nested_svm_intercept(svm);
1354
1355 if (vmexit == NESTED_EXIT_DONE)
1356 nested_svm_vmexit(svm);
1357
1358 return vmexit;
1359}
1360
1361int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1362{
1363 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1364 kvm_queue_exception(vcpu, UD_VECTOR);
1365 return 1;
1366 }
1367
1368 if (to_svm(vcpu)->vmcb->save.cpl) {
1369 kvm_inject_gp(vcpu, 0);
1370 return 1;
1371 }
1372
1373 return 0;
1374}
1375
1376static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
1377 u32 error_code)
1378{
1379 struct vcpu_svm *svm = to_svm(vcpu);
1380
1381 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1382}
1383
1384static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
1385{
1386 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1387 struct vcpu_svm *svm = to_svm(vcpu);
1388 struct vmcb *vmcb = svm->vmcb;
1389
1390 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1391 vmcb->control.exit_code_hi = 0;
1392
1393 if (ex->has_error_code)
1394 vmcb->control.exit_info_1 = ex->error_code;
1395
1396 /*
1397 * EXITINFO2 is undefined for all exception intercepts other
1398 * than #PF.
1399 */
1400 if (ex->vector == PF_VECTOR) {
1401 if (ex->has_payload)
1402 vmcb->control.exit_info_2 = ex->payload;
1403 else
1404 vmcb->control.exit_info_2 = vcpu->arch.cr2;
1405 } else if (ex->vector == DB_VECTOR) {
1406 /* See kvm_check_and_inject_events(). */
1407 kvm_deliver_exception_payload(vcpu, ex);
1408
1409 if (vcpu->arch.dr7 & DR7_GD) {
1410 vcpu->arch.dr7 &= ~DR7_GD;
1411 kvm_update_dr7(vcpu);
1412 }
1413 } else {
1414 WARN_ON(ex->has_payload);
1415 }
1416
1417 nested_svm_vmexit(svm);
1418}
1419
1420static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1421{
1422 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1423}
1424
1425static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1426{
1427 struct kvm_lapic *apic = vcpu->arch.apic;
1428 struct vcpu_svm *svm = to_svm(vcpu);
1429 /*
1430 * Only a pending nested run blocks a pending exception. If there is a
1431 * previously injected event, the pending exception occurred while said
1432 * event was being delivered and thus needs to be handled.
1433 */
1434 bool block_nested_exceptions = svm->nested.nested_run_pending;
1435 /*
1436 * New events (not exceptions) are only recognized at instruction
1437 * boundaries. If an event needs reinjection, then KVM is handling a
1438 * VM-Exit that occurred _during_ instruction execution; new events are
1439 * blocked until the instruction completes.
1440 */
1441 bool block_nested_events = block_nested_exceptions ||
1442 kvm_event_needs_reinjection(vcpu);
1443
1444 if (lapic_in_kernel(vcpu) &&
1445 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1446 if (block_nested_events)
1447 return -EBUSY;
1448 if (!nested_exit_on_init(svm))
1449 return 0;
1450 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1451 return 0;
1452 }
1453
1454 if (vcpu->arch.exception_vmexit.pending) {
1455 if (block_nested_exceptions)
1456 return -EBUSY;
1457 nested_svm_inject_exception_vmexit(vcpu);
1458 return 0;
1459 }
1460
1461 if (vcpu->arch.exception.pending) {
1462 if (block_nested_exceptions)
1463 return -EBUSY;
1464 return 0;
1465 }
1466
1467#ifdef CONFIG_KVM_SMM
1468 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1469 if (block_nested_events)
1470 return -EBUSY;
1471 if (!nested_exit_on_smi(svm))
1472 return 0;
1473 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1474 return 0;
1475 }
1476#endif
1477
1478 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1479 if (block_nested_events)
1480 return -EBUSY;
1481 if (!nested_exit_on_nmi(svm))
1482 return 0;
1483 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1484 return 0;
1485 }
1486
1487 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1488 if (block_nested_events)
1489 return -EBUSY;
1490 if (!nested_exit_on_intr(svm))
1491 return 0;
1492 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1493 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1494 return 0;
1495 }
1496
1497 return 0;
1498}
1499
1500int nested_svm_exit_special(struct vcpu_svm *svm)
1501{
1502 u32 exit_code = svm->vmcb->control.exit_code;
1503 struct kvm_vcpu *vcpu = &svm->vcpu;
1504
1505 switch (exit_code) {
1506 case SVM_EXIT_INTR:
1507 case SVM_EXIT_NMI:
1508 case SVM_EXIT_NPF:
1509 return NESTED_EXIT_HOST;
1510 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1511 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1512
1513 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1514 excp_bits)
1515 return NESTED_EXIT_HOST;
1516 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1517 svm->vcpu.arch.apf.host_apf_flags)
1518 /* Trap async PF even if not shadowing */
1519 return NESTED_EXIT_HOST;
1520 break;
1521 }
1522 case SVM_EXIT_VMMCALL:
1523 /* Hyper-V L2 TLB flush hypercall is handled by L0 */
1524 if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
1525 nested_svm_l2_tlb_flush_enabled(vcpu) &&
1526 kvm_hv_is_tlb_flush_hcall(vcpu))
1527 return NESTED_EXIT_HOST;
1528 break;
1529 default:
1530 break;
1531 }
1532
1533 return NESTED_EXIT_CONTINUE;
1534}
1535
1536void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1537{
1538 struct vcpu_svm *svm = to_svm(vcpu);
1539
1540 vcpu->arch.tsc_scaling_ratio =
1541 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1542 svm->tsc_ratio_msr);
1543 svm_write_tsc_multiplier(vcpu);
1544}
1545
1546/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
1547static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1548 struct vmcb_ctrl_area_cached *from)
1549{
1550 unsigned int i;
1551
1552 memset(dst, 0, sizeof(struct vmcb_control_area));
1553
1554 for (i = 0; i < MAX_INTERCEPT; i++)
1555 dst->intercepts[i] = from->intercepts[i];
1556
1557 dst->iopm_base_pa = from->iopm_base_pa;
1558 dst->msrpm_base_pa = from->msrpm_base_pa;
1559 dst->tsc_offset = from->tsc_offset;
1560 dst->asid = from->asid;
1561 dst->tlb_ctl = from->tlb_ctl;
1562 dst->int_ctl = from->int_ctl;
1563 dst->int_vector = from->int_vector;
1564 dst->int_state = from->int_state;
1565 dst->exit_code = from->exit_code;
1566 dst->exit_code_hi = from->exit_code_hi;
1567 dst->exit_info_1 = from->exit_info_1;
1568 dst->exit_info_2 = from->exit_info_2;
1569 dst->exit_int_info = from->exit_int_info;
1570 dst->exit_int_info_err = from->exit_int_info_err;
1571 dst->nested_ctl = from->nested_ctl;
1572 dst->event_inj = from->event_inj;
1573 dst->event_inj_err = from->event_inj_err;
1574 dst->next_rip = from->next_rip;
1575 dst->nested_cr3 = from->nested_cr3;
1576 dst->virt_ext = from->virt_ext;
1577 dst->pause_filter_count = from->pause_filter_count;
1578 dst->pause_filter_thresh = from->pause_filter_thresh;
1579 /* 'clean' and 'hv_enlightenments' are not changed by KVM */
1580}
1581
1582static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1583 struct kvm_nested_state __user *user_kvm_nested_state,
1584 u32 user_data_size)
1585{
1586 struct vcpu_svm *svm;
1587 struct vmcb_control_area *ctl;
1588 unsigned long r;
1589 struct kvm_nested_state kvm_state = {
1590 .flags = 0,
1591 .format = KVM_STATE_NESTED_FORMAT_SVM,
1592 .size = sizeof(kvm_state),
1593 };
1594 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1595 &user_kvm_nested_state->data.svm[0];
1596
1597 if (!vcpu)
1598 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1599
1600 svm = to_svm(vcpu);
1601
1602 if (user_data_size < kvm_state.size)
1603 goto out;
1604
1605 /* First fill in the header and copy it out. */
1606 if (is_guest_mode(vcpu)) {
1607 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1608 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1609 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1610
1611 if (svm->nested.nested_run_pending)
1612 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1613 }
1614
1615 if (gif_set(svm))
1616 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1617
1618 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1619 return -EFAULT;
1620
1621 if (!is_guest_mode(vcpu))
1622 goto out;
1623
1624 /*
1625 * Copy over the full size of the VMCB rather than just the size
1626 * of the structs.
1627 */
1628 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1629 return -EFAULT;
1630
1631 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1632 if (!ctl)
1633 return -ENOMEM;
1634
1635 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1636 r = copy_to_user(&user_vmcb->control, ctl,
1637 sizeof(user_vmcb->control));
1638 kfree(ctl);
1639 if (r)
1640 return -EFAULT;
1641
1642 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1643 sizeof(user_vmcb->save)))
1644 return -EFAULT;
1645out:
1646 return kvm_state.size;
1647}
1648
1649static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1650 struct kvm_nested_state __user *user_kvm_nested_state,
1651 struct kvm_nested_state *kvm_state)
1652{
1653 struct vcpu_svm *svm = to_svm(vcpu);
1654 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1655 &user_kvm_nested_state->data.svm[0];
1656 struct vmcb_control_area *ctl;
1657 struct vmcb_save_area *save;
1658 struct vmcb_save_area_cached save_cached;
1659 struct vmcb_ctrl_area_cached ctl_cached;
1660 unsigned long cr0;
1661 int ret;
1662
1663 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1664 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1665
1666 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1667 return -EINVAL;
1668
1669 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1670 KVM_STATE_NESTED_RUN_PENDING |
1671 KVM_STATE_NESTED_GIF_SET))
1672 return -EINVAL;
1673
1674 /*
1675 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1676 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1677 */
1678 if (!(vcpu->arch.efer & EFER_SVME)) {
1679 /* GIF=1 and no guest mode are required if SVME=0. */
1680 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1681 return -EINVAL;
1682 }
1683
1684 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1685 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1686 return -EINVAL;
1687
1688 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1689 svm_leave_nested(vcpu);
1690 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1691 return 0;
1692 }
1693
1694 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1695 return -EINVAL;
1696 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1697 return -EINVAL;
1698
1699 ret = -ENOMEM;
1700 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1701 save = kzalloc(sizeof(*save), GFP_KERNEL);
1702 if (!ctl || !save)
1703 goto out_free;
1704
1705 ret = -EFAULT;
1706 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1707 goto out_free;
1708 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1709 goto out_free;
1710
1711 ret = -EINVAL;
1712 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1713 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1714 goto out_free;
1715
1716 /*
1717 * Processor state contains L2 state. Check that it is
1718 * valid for guest mode (see nested_vmcb_check_save).
1719 */
1720 cr0 = kvm_read_cr0(vcpu);
1721 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1722 goto out_free;
1723
1724 /*
1725 * Validate host state saved from before VMRUN (see
1726 * nested_svm_check_permissions).
1727 */
1728 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1729 if (!(save->cr0 & X86_CR0_PG) ||
1730 !(save->cr0 & X86_CR0_PE) ||
1731 (save->rflags & X86_EFLAGS_VM) ||
1732 !__nested_vmcb_check_save(vcpu, &save_cached))
1733 goto out_free;
1734
1735
1736 /*
1737 * All checks done, we can enter guest mode. Userspace provides
1738 * vmcb12.control, which will be combined with L1 and stored into
1739 * vmcb02, and the L1 save state which we store in vmcb01.
1740 * L2 registers if needed are moved from the current VMCB to VMCB02.
1741 */
1742
1743 if (is_guest_mode(vcpu))
1744 svm_leave_nested(vcpu);
1745 else
1746 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1747
1748 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1749
1750 svm->nested.nested_run_pending =
1751 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1752
1753 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1754
1755 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1756 nested_copy_vmcb_control_to_cache(svm, ctl);
1757
1758 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1759 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1760
1761 /*
1762 * While the nested guest CR3 is already checked and set by
1763 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1764 * thus MMU might not be initialized correctly.
1765 * Set it again to fix this.
1766 */
1767
1768 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1769 nested_npt_enabled(svm), false);
1770 if (WARN_ON_ONCE(ret))
1771 goto out_free;
1772
1773 svm->nested.force_msr_bitmap_recalc = true;
1774
1775 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1776 ret = 0;
1777out_free:
1778 kfree(save);
1779 kfree(ctl);
1780
1781 return ret;
1782}
1783
1784static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1785{
1786 struct vcpu_svm *svm = to_svm(vcpu);
1787
1788 if (WARN_ON(!is_guest_mode(vcpu)))
1789 return true;
1790
1791 if (!vcpu->arch.pdptrs_from_userspace &&
1792 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1793 /*
1794 * Reload the guest's PDPTRs since after a migration
1795 * the guest CR3 might be restored prior to setting the nested
1796 * state which can lead to a load of wrong PDPTRs.
1797 */
1798 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1799 return false;
1800
1801 if (!nested_svm_vmrun_msrpm(svm)) {
1802 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1803 vcpu->run->internal.suberror =
1804 KVM_INTERNAL_ERROR_EMULATION;
1805 vcpu->run->internal.ndata = 0;
1806 return false;
1807 }
1808
1809 if (kvm_hv_verify_vp_assist(vcpu))
1810 return false;
1811
1812 return true;
1813}
1814
1815struct kvm_x86_nested_ops svm_nested_ops = {
1816 .leave_nested = svm_leave_nested,
1817 .is_exception_vmexit = nested_svm_is_exception_vmexit,
1818 .check_events = svm_check_nested_events,
1819 .triple_fault = nested_svm_triple_fault,
1820 .get_nested_state_pages = svm_get_nested_state_pages,
1821 .get_state = svm_get_nested_state,
1822 .set_state = svm_set_nested_state,
1823 .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
1824};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
22#include <asm/debugreg.h>
23
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
28#include "cpuid.h"
29#include "lapic.h"
30#include "svm.h"
31
32static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33 struct x86_exception *fault)
34{
35 struct vcpu_svm *svm = to_svm(vcpu);
36
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38 /*
39 * TODO: track the cause of the nested page fault, and
40 * correctly fill in the high bits of exit_info_1.
41 */
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 svm->vmcb->control.exit_code_hi = 0;
44 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 svm->vmcb->control.exit_info_2 = fault->address;
46 }
47
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 svm->vmcb->control.exit_info_1 |= fault->error_code;
50
51 nested_svm_vmexit(svm);
52}
53
54static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
55{
56 struct vcpu_svm *svm = to_svm(vcpu);
57 u64 cr3 = svm->nested.ctl.nested_cr3;
58 u64 pdpte;
59 int ret;
60
61 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
62 offset_in_page(cr3) + index * 8, 8);
63 if (ret)
64 return 0;
65 return pdpte;
66}
67
68static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
69{
70 struct vcpu_svm *svm = to_svm(vcpu);
71
72 return svm->nested.ctl.nested_cr3;
73}
74
75static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
76{
77 struct vcpu_svm *svm = to_svm(vcpu);
78 struct vmcb *hsave = svm->nested.hsave;
79
80 WARN_ON(mmu_is_nested(vcpu));
81
82 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
83 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
84 svm->nested.ctl.nested_cr3);
85 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
86 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
87 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
89 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
90}
91
92static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
93{
94 vcpu->arch.mmu = &vcpu->arch.root_mmu;
95 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
96}
97
98void recalc_intercepts(struct vcpu_svm *svm)
99{
100 struct vmcb_control_area *c, *h, *g;
101
102 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
103
104 if (!is_guest_mode(&svm->vcpu))
105 return;
106
107 c = &svm->vmcb->control;
108 h = &svm->nested.hsave->control;
109 g = &svm->nested.ctl;
110
111 svm->nested.host_intercept_exceptions = h->intercept_exceptions;
112
113 c->intercept_cr = h->intercept_cr;
114 c->intercept_dr = h->intercept_dr;
115 c->intercept_exceptions = h->intercept_exceptions;
116 c->intercept = h->intercept;
117
118 if (g->int_ctl & V_INTR_MASKING_MASK) {
119 /* We only want the cr8 intercept bits of L1 */
120 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
121 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
122
123 /*
124 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
125 * affect any interrupt we may want to inject; therefore,
126 * interrupt window vmexits are irrelevant to L0.
127 */
128 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
129 }
130
131 /* We don't want to see VMMCALLs from a nested guest */
132 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
133
134 c->intercept_cr |= g->intercept_cr;
135 c->intercept_dr |= g->intercept_dr;
136 c->intercept_exceptions |= g->intercept_exceptions;
137 c->intercept |= g->intercept;
138}
139
140static void copy_vmcb_control_area(struct vmcb_control_area *dst,
141 struct vmcb_control_area *from)
142{
143 dst->intercept_cr = from->intercept_cr;
144 dst->intercept_dr = from->intercept_dr;
145 dst->intercept_exceptions = from->intercept_exceptions;
146 dst->intercept = from->intercept;
147 dst->iopm_base_pa = from->iopm_base_pa;
148 dst->msrpm_base_pa = from->msrpm_base_pa;
149 dst->tsc_offset = from->tsc_offset;
150 /* asid not copied, it is handled manually for svm->vmcb. */
151 dst->tlb_ctl = from->tlb_ctl;
152 dst->int_ctl = from->int_ctl;
153 dst->int_vector = from->int_vector;
154 dst->int_state = from->int_state;
155 dst->exit_code = from->exit_code;
156 dst->exit_code_hi = from->exit_code_hi;
157 dst->exit_info_1 = from->exit_info_1;
158 dst->exit_info_2 = from->exit_info_2;
159 dst->exit_int_info = from->exit_int_info;
160 dst->exit_int_info_err = from->exit_int_info_err;
161 dst->nested_ctl = from->nested_ctl;
162 dst->event_inj = from->event_inj;
163 dst->event_inj_err = from->event_inj_err;
164 dst->nested_cr3 = from->nested_cr3;
165 dst->virt_ext = from->virt_ext;
166 dst->pause_filter_count = from->pause_filter_count;
167 dst->pause_filter_thresh = from->pause_filter_thresh;
168}
169
170static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
171{
172 /*
173 * This function merges the msr permission bitmaps of kvm and the
174 * nested vmcb. It is optimized in that it only merges the parts where
175 * the kvm msr permission bitmap may contain zero bits
176 */
177 int i;
178
179 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
180 return true;
181
182 for (i = 0; i < MSRPM_OFFSETS; i++) {
183 u32 value, p;
184 u64 offset;
185
186 if (msrpm_offsets[i] == 0xffffffff)
187 break;
188
189 p = msrpm_offsets[i];
190 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
191
192 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
193 return false;
194
195 svm->nested.msrpm[p] = svm->msrpm[p] | value;
196 }
197
198 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
199
200 return true;
201}
202
203static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
204{
205 if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
206 return false;
207
208 if (control->asid == 0)
209 return false;
210
211 if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
212 !npt_enabled)
213 return false;
214
215 return true;
216}
217
218static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
219{
220 bool nested_vmcb_lma;
221 if ((vmcb->save.efer & EFER_SVME) == 0)
222 return false;
223
224 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
225 (vmcb->save.cr0 & X86_CR0_NW))
226 return false;
227
228 if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
229 return false;
230
231 nested_vmcb_lma =
232 (vmcb->save.efer & EFER_LME) &&
233 (vmcb->save.cr0 & X86_CR0_PG);
234
235 if (!nested_vmcb_lma) {
236 if (vmcb->save.cr4 & X86_CR4_PAE) {
237 if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
238 return false;
239 } else {
240 if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
241 return false;
242 }
243 } else {
244 if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
245 !(vmcb->save.cr0 & X86_CR0_PE) ||
246 (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
247 return false;
248 }
249 if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
250 return false;
251
252 return nested_vmcb_check_controls(&vmcb->control);
253}
254
255static void load_nested_vmcb_control(struct vcpu_svm *svm,
256 struct vmcb_control_area *control)
257{
258 copy_vmcb_control_area(&svm->nested.ctl, control);
259
260 /* Copy it here because nested_svm_check_controls will check it. */
261 svm->nested.ctl.asid = control->asid;
262 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
263 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
264}
265
266/*
267 * Synchronize fields that are written by the processor, so that
268 * they can be copied back into the nested_vmcb.
269 */
270void sync_nested_vmcb_control(struct vcpu_svm *svm)
271{
272 u32 mask;
273 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
274 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
275
276 /* Only a few fields of int_ctl are written by the processor. */
277 mask = V_IRQ_MASK | V_TPR_MASK;
278 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
279 svm_is_intercept(svm, INTERCEPT_VINTR)) {
280 /*
281 * In order to request an interrupt window, L0 is usurping
282 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
283 * even if it was clear in L1's VMCB. Restoring it would be
284 * wrong. However, in this case V_IRQ will remain true until
285 * interrupt_window_interception calls svm_clear_vintr and
286 * restores int_ctl. We can just leave it aside.
287 */
288 mask &= ~V_IRQ_MASK;
289 }
290 svm->nested.ctl.int_ctl &= ~mask;
291 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
292}
293
294/*
295 * Transfer any event that L0 or L1 wanted to inject into L2 to
296 * EXIT_INT_INFO.
297 */
298static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
299 struct vmcb *nested_vmcb)
300{
301 struct kvm_vcpu *vcpu = &svm->vcpu;
302 u32 exit_int_info = 0;
303 unsigned int nr;
304
305 if (vcpu->arch.exception.injected) {
306 nr = vcpu->arch.exception.nr;
307 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
308
309 if (vcpu->arch.exception.has_error_code) {
310 exit_int_info |= SVM_EVTINJ_VALID_ERR;
311 nested_vmcb->control.exit_int_info_err =
312 vcpu->arch.exception.error_code;
313 }
314
315 } else if (vcpu->arch.nmi_injected) {
316 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
317
318 } else if (vcpu->arch.interrupt.injected) {
319 nr = vcpu->arch.interrupt.nr;
320 exit_int_info = nr | SVM_EVTINJ_VALID;
321
322 if (vcpu->arch.interrupt.soft)
323 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
324 else
325 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
326 }
327
328 nested_vmcb->control.exit_int_info = exit_int_info;
329}
330
331static inline bool nested_npt_enabled(struct vcpu_svm *svm)
332{
333 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
334}
335
336/*
337 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
338 * if we are emulating VM-Entry into a guest with NPT enabled.
339 */
340static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
341 bool nested_npt)
342{
343 if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
344 return -EINVAL;
345
346 if (!nested_npt && is_pae_paging(vcpu) &&
347 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
348 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
349 return -EINVAL;
350 }
351
352 /*
353 * TODO: optimize unconditional TLB flush/MMU sync here and in
354 * kvm_init_shadow_npt_mmu().
355 */
356 if (!nested_npt)
357 kvm_mmu_new_pgd(vcpu, cr3, false, false);
358
359 vcpu->arch.cr3 = cr3;
360 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
361
362 kvm_init_mmu(vcpu, false);
363
364 return 0;
365}
366
367static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
368{
369 /* Load the nested guest state */
370 svm->vmcb->save.es = nested_vmcb->save.es;
371 svm->vmcb->save.cs = nested_vmcb->save.cs;
372 svm->vmcb->save.ss = nested_vmcb->save.ss;
373 svm->vmcb->save.ds = nested_vmcb->save.ds;
374 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
375 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
376 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
377 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
378 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
379 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
380 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
381 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
382 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
383 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
384
385 /* In case we don't even reach vcpu_run, the fields are not updated */
386 svm->vmcb->save.rax = nested_vmcb->save.rax;
387 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
388 svm->vmcb->save.rip = nested_vmcb->save.rip;
389 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
390 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
391 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
392}
393
394static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
395{
396 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
397
398 if (nested_npt_enabled(svm))
399 nested_svm_init_mmu_context(&svm->vcpu);
400
401 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
402 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
403
404 svm->vmcb->control.int_ctl =
405 (svm->nested.ctl.int_ctl & ~mask) |
406 (svm->nested.hsave->control.int_ctl & mask);
407
408 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
409 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
410 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
411 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
412 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
413
414 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
415 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
416
417 /* Enter Guest-Mode */
418 enter_guest_mode(&svm->vcpu);
419
420 /*
421 * Merge guest and host intercepts - must be called with vcpu in
422 * guest-mode to take affect here
423 */
424 recalc_intercepts(svm);
425
426 vmcb_mark_all_dirty(svm->vmcb);
427}
428
429int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
430 struct vmcb *nested_vmcb)
431{
432 int ret;
433
434 svm->nested.vmcb = vmcb_gpa;
435 load_nested_vmcb_control(svm, &nested_vmcb->control);
436 nested_prepare_vmcb_save(svm, nested_vmcb);
437 nested_prepare_vmcb_control(svm);
438
439 ret = nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3,
440 nested_npt_enabled(svm));
441 if (ret)
442 return ret;
443
444 svm_set_gif(svm, true);
445
446 return 0;
447}
448
449int nested_svm_vmrun(struct vcpu_svm *svm)
450{
451 int ret;
452 struct vmcb *nested_vmcb;
453 struct vmcb *hsave = svm->nested.hsave;
454 struct vmcb *vmcb = svm->vmcb;
455 struct kvm_host_map map;
456 u64 vmcb_gpa;
457
458 if (is_smm(&svm->vcpu)) {
459 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
460 return 1;
461 }
462
463 vmcb_gpa = svm->vmcb->save.rax;
464 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
465 if (ret == -EINVAL) {
466 kvm_inject_gp(&svm->vcpu, 0);
467 return 1;
468 } else if (ret) {
469 return kvm_skip_emulated_instruction(&svm->vcpu);
470 }
471
472 ret = kvm_skip_emulated_instruction(&svm->vcpu);
473
474 nested_vmcb = map.hva;
475
476 if (!nested_vmcb_checks(svm, nested_vmcb)) {
477 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
478 nested_vmcb->control.exit_code_hi = 0;
479 nested_vmcb->control.exit_info_1 = 0;
480 nested_vmcb->control.exit_info_2 = 0;
481 goto out;
482 }
483
484 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
485 nested_vmcb->save.rip,
486 nested_vmcb->control.int_ctl,
487 nested_vmcb->control.event_inj,
488 nested_vmcb->control.nested_ctl);
489
490 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
491 nested_vmcb->control.intercept_cr >> 16,
492 nested_vmcb->control.intercept_exceptions,
493 nested_vmcb->control.intercept);
494
495 /* Clear internal status */
496 kvm_clear_exception_queue(&svm->vcpu);
497 kvm_clear_interrupt_queue(&svm->vcpu);
498
499 /*
500 * Save the old vmcb, so we don't need to pick what we save, but can
501 * restore everything when a VMEXIT occurs
502 */
503 hsave->save.es = vmcb->save.es;
504 hsave->save.cs = vmcb->save.cs;
505 hsave->save.ss = vmcb->save.ss;
506 hsave->save.ds = vmcb->save.ds;
507 hsave->save.gdtr = vmcb->save.gdtr;
508 hsave->save.idtr = vmcb->save.idtr;
509 hsave->save.efer = svm->vcpu.arch.efer;
510 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
511 hsave->save.cr4 = svm->vcpu.arch.cr4;
512 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
513 hsave->save.rip = kvm_rip_read(&svm->vcpu);
514 hsave->save.rsp = vmcb->save.rsp;
515 hsave->save.rax = vmcb->save.rax;
516 if (npt_enabled)
517 hsave->save.cr3 = vmcb->save.cr3;
518 else
519 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
520
521 copy_vmcb_control_area(&hsave->control, &vmcb->control);
522
523 svm->nested.nested_run_pending = 1;
524
525 if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb))
526 goto out_exit_err;
527
528 if (nested_svm_vmrun_msrpm(svm))
529 goto out;
530
531out_exit_err:
532 svm->nested.nested_run_pending = 0;
533
534 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
535 svm->vmcb->control.exit_code_hi = 0;
536 svm->vmcb->control.exit_info_1 = 0;
537 svm->vmcb->control.exit_info_2 = 0;
538
539 nested_svm_vmexit(svm);
540
541out:
542 kvm_vcpu_unmap(&svm->vcpu, &map, true);
543
544 return ret;
545}
546
547void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
548{
549 to_vmcb->save.fs = from_vmcb->save.fs;
550 to_vmcb->save.gs = from_vmcb->save.gs;
551 to_vmcb->save.tr = from_vmcb->save.tr;
552 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
553 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
554 to_vmcb->save.star = from_vmcb->save.star;
555 to_vmcb->save.lstar = from_vmcb->save.lstar;
556 to_vmcb->save.cstar = from_vmcb->save.cstar;
557 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
558 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
559 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
560 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
561}
562
563int nested_svm_vmexit(struct vcpu_svm *svm)
564{
565 int rc;
566 struct vmcb *nested_vmcb;
567 struct vmcb *hsave = svm->nested.hsave;
568 struct vmcb *vmcb = svm->vmcb;
569 struct kvm_host_map map;
570
571 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
572 if (rc) {
573 if (rc == -EINVAL)
574 kvm_inject_gp(&svm->vcpu, 0);
575 return 1;
576 }
577
578 nested_vmcb = map.hva;
579
580 /* Exit Guest-Mode */
581 leave_guest_mode(&svm->vcpu);
582 svm->nested.vmcb = 0;
583 WARN_ON_ONCE(svm->nested.nested_run_pending);
584
585 /* in case we halted in L2 */
586 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
587
588 /* Give the current vmcb to the guest */
589
590 nested_vmcb->save.es = vmcb->save.es;
591 nested_vmcb->save.cs = vmcb->save.cs;
592 nested_vmcb->save.ss = vmcb->save.ss;
593 nested_vmcb->save.ds = vmcb->save.ds;
594 nested_vmcb->save.gdtr = vmcb->save.gdtr;
595 nested_vmcb->save.idtr = vmcb->save.idtr;
596 nested_vmcb->save.efer = svm->vcpu.arch.efer;
597 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
598 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
599 nested_vmcb->save.cr2 = vmcb->save.cr2;
600 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
601 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
602 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu);
603 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu);
604 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu);
605 nested_vmcb->save.dr7 = vmcb->save.dr7;
606 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
607 nested_vmcb->save.cpl = vmcb->save.cpl;
608
609 nested_vmcb->control.int_state = vmcb->control.int_state;
610 nested_vmcb->control.exit_code = vmcb->control.exit_code;
611 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
612 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
613 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
614
615 if (nested_vmcb->control.exit_code != SVM_EXIT_ERR)
616 nested_vmcb_save_pending_event(svm, nested_vmcb);
617
618 if (svm->nrips_enabled)
619 nested_vmcb->control.next_rip = vmcb->control.next_rip;
620
621 nested_vmcb->control.int_ctl = svm->nested.ctl.int_ctl;
622 nested_vmcb->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
623 nested_vmcb->control.event_inj = svm->nested.ctl.event_inj;
624 nested_vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
625
626 nested_vmcb->control.pause_filter_count =
627 svm->vmcb->control.pause_filter_count;
628 nested_vmcb->control.pause_filter_thresh =
629 svm->vmcb->control.pause_filter_thresh;
630
631 /* Restore the original control entries */
632 copy_vmcb_control_area(&vmcb->control, &hsave->control);
633
634 /* On vmexit the GIF is set to false */
635 svm_set_gif(svm, false);
636
637 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
638 svm->vcpu.arch.l1_tsc_offset;
639
640 svm->nested.ctl.nested_cr3 = 0;
641
642 /* Restore selected save entries */
643 svm->vmcb->save.es = hsave->save.es;
644 svm->vmcb->save.cs = hsave->save.cs;
645 svm->vmcb->save.ss = hsave->save.ss;
646 svm->vmcb->save.ds = hsave->save.ds;
647 svm->vmcb->save.gdtr = hsave->save.gdtr;
648 svm->vmcb->save.idtr = hsave->save.idtr;
649 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
650 svm_set_efer(&svm->vcpu, hsave->save.efer);
651 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
652 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
653 kvm_rax_write(&svm->vcpu, hsave->save.rax);
654 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
655 kvm_rip_write(&svm->vcpu, hsave->save.rip);
656 svm->vmcb->save.dr7 = 0;
657 svm->vmcb->save.cpl = 0;
658 svm->vmcb->control.exit_int_info = 0;
659
660 vmcb_mark_all_dirty(svm->vmcb);
661
662 trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code,
663 nested_vmcb->control.exit_info_1,
664 nested_vmcb->control.exit_info_2,
665 nested_vmcb->control.exit_int_info,
666 nested_vmcb->control.exit_int_info_err,
667 KVM_ISA_SVM);
668
669 kvm_vcpu_unmap(&svm->vcpu, &map, true);
670
671 nested_svm_uninit_mmu_context(&svm->vcpu);
672
673 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
674 if (rc)
675 return 1;
676
677 if (npt_enabled)
678 svm->vmcb->save.cr3 = hsave->save.cr3;
679
680 /*
681 * Drop what we picked up for L2 via svm_complete_interrupts() so it
682 * doesn't end up in L1.
683 */
684 svm->vcpu.arch.nmi_injected = false;
685 kvm_clear_exception_queue(&svm->vcpu);
686 kvm_clear_interrupt_queue(&svm->vcpu);
687
688 return 0;
689}
690
691/*
692 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
693 */
694void svm_leave_nested(struct vcpu_svm *svm)
695{
696 if (is_guest_mode(&svm->vcpu)) {
697 struct vmcb *hsave = svm->nested.hsave;
698 struct vmcb *vmcb = svm->vmcb;
699
700 svm->nested.nested_run_pending = 0;
701 leave_guest_mode(&svm->vcpu);
702 copy_vmcb_control_area(&vmcb->control, &hsave->control);
703 nested_svm_uninit_mmu_context(&svm->vcpu);
704 }
705}
706
707static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
708{
709 u32 offset, msr, value;
710 int write, mask;
711
712 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
713 return NESTED_EXIT_HOST;
714
715 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
716 offset = svm_msrpm_offset(msr);
717 write = svm->vmcb->control.exit_info_1 & 1;
718 mask = 1 << ((2 * (msr & 0xf)) + write);
719
720 if (offset == MSR_INVALID)
721 return NESTED_EXIT_DONE;
722
723 /* Offset is in 32 bit units but need in 8 bit units */
724 offset *= 4;
725
726 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
727 return NESTED_EXIT_DONE;
728
729 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
730}
731
732static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
733{
734 unsigned port, size, iopm_len;
735 u16 val, mask;
736 u8 start_bit;
737 u64 gpa;
738
739 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
740 return NESTED_EXIT_HOST;
741
742 port = svm->vmcb->control.exit_info_1 >> 16;
743 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
744 SVM_IOIO_SIZE_SHIFT;
745 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
746 start_bit = port % 8;
747 iopm_len = (start_bit + size > 8) ? 2 : 1;
748 mask = (0xf >> (4 - size)) << start_bit;
749 val = 0;
750
751 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
752 return NESTED_EXIT_DONE;
753
754 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
755}
756
757static int nested_svm_intercept(struct vcpu_svm *svm)
758{
759 u32 exit_code = svm->vmcb->control.exit_code;
760 int vmexit = NESTED_EXIT_HOST;
761
762 switch (exit_code) {
763 case SVM_EXIT_MSR:
764 vmexit = nested_svm_exit_handled_msr(svm);
765 break;
766 case SVM_EXIT_IOIO:
767 vmexit = nested_svm_intercept_ioio(svm);
768 break;
769 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
770 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
771 if (svm->nested.ctl.intercept_cr & bit)
772 vmexit = NESTED_EXIT_DONE;
773 break;
774 }
775 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
776 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
777 if (svm->nested.ctl.intercept_dr & bit)
778 vmexit = NESTED_EXIT_DONE;
779 break;
780 }
781 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
782 /*
783 * Host-intercepted exceptions have been checked already in
784 * nested_svm_exit_special. There is nothing to do here,
785 * the vmexit is injected by svm_check_nested_events.
786 */
787 vmexit = NESTED_EXIT_DONE;
788 break;
789 }
790 case SVM_EXIT_ERR: {
791 vmexit = NESTED_EXIT_DONE;
792 break;
793 }
794 default: {
795 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
796 if (svm->nested.ctl.intercept & exit_bits)
797 vmexit = NESTED_EXIT_DONE;
798 }
799 }
800
801 return vmexit;
802}
803
804int nested_svm_exit_handled(struct vcpu_svm *svm)
805{
806 int vmexit;
807
808 vmexit = nested_svm_intercept(svm);
809
810 if (vmexit == NESTED_EXIT_DONE)
811 nested_svm_vmexit(svm);
812
813 return vmexit;
814}
815
816int nested_svm_check_permissions(struct vcpu_svm *svm)
817{
818 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
819 !is_paging(&svm->vcpu)) {
820 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
821 return 1;
822 }
823
824 if (svm->vmcb->save.cpl) {
825 kvm_inject_gp(&svm->vcpu, 0);
826 return 1;
827 }
828
829 return 0;
830}
831
832static bool nested_exit_on_exception(struct vcpu_svm *svm)
833{
834 unsigned int nr = svm->vcpu.arch.exception.nr;
835
836 return (svm->nested.ctl.intercept_exceptions & (1 << nr));
837}
838
839static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
840{
841 unsigned int nr = svm->vcpu.arch.exception.nr;
842
843 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
844 svm->vmcb->control.exit_code_hi = 0;
845
846 if (svm->vcpu.arch.exception.has_error_code)
847 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
848
849 /*
850 * EXITINFO2 is undefined for all exception intercepts other
851 * than #PF.
852 */
853 if (nr == PF_VECTOR) {
854 if (svm->vcpu.arch.exception.nested_apf)
855 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
856 else if (svm->vcpu.arch.exception.has_payload)
857 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
858 else
859 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
860 } else if (nr == DB_VECTOR) {
861 /* See inject_pending_event. */
862 kvm_deliver_exception_payload(&svm->vcpu);
863 if (svm->vcpu.arch.dr7 & DR7_GD) {
864 svm->vcpu.arch.dr7 &= ~DR7_GD;
865 kvm_update_dr7(&svm->vcpu);
866 }
867 } else
868 WARN_ON(svm->vcpu.arch.exception.has_payload);
869
870 nested_svm_vmexit(svm);
871}
872
873static void nested_svm_smi(struct vcpu_svm *svm)
874{
875 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
876 svm->vmcb->control.exit_info_1 = 0;
877 svm->vmcb->control.exit_info_2 = 0;
878
879 nested_svm_vmexit(svm);
880}
881
882static void nested_svm_nmi(struct vcpu_svm *svm)
883{
884 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
885 svm->vmcb->control.exit_info_1 = 0;
886 svm->vmcb->control.exit_info_2 = 0;
887
888 nested_svm_vmexit(svm);
889}
890
891static void nested_svm_intr(struct vcpu_svm *svm)
892{
893 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
894
895 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
896 svm->vmcb->control.exit_info_1 = 0;
897 svm->vmcb->control.exit_info_2 = 0;
898
899 nested_svm_vmexit(svm);
900}
901
902static inline bool nested_exit_on_init(struct vcpu_svm *svm)
903{
904 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT));
905}
906
907static void nested_svm_init(struct vcpu_svm *svm)
908{
909 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
910 svm->vmcb->control.exit_info_1 = 0;
911 svm->vmcb->control.exit_info_2 = 0;
912
913 nested_svm_vmexit(svm);
914}
915
916
917static int svm_check_nested_events(struct kvm_vcpu *vcpu)
918{
919 struct vcpu_svm *svm = to_svm(vcpu);
920 bool block_nested_events =
921 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
922 struct kvm_lapic *apic = vcpu->arch.apic;
923
924 if (lapic_in_kernel(vcpu) &&
925 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
926 if (block_nested_events)
927 return -EBUSY;
928 if (!nested_exit_on_init(svm))
929 return 0;
930 nested_svm_init(svm);
931 return 0;
932 }
933
934 if (vcpu->arch.exception.pending) {
935 if (block_nested_events)
936 return -EBUSY;
937 if (!nested_exit_on_exception(svm))
938 return 0;
939 nested_svm_inject_exception_vmexit(svm);
940 return 0;
941 }
942
943 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
944 if (block_nested_events)
945 return -EBUSY;
946 if (!nested_exit_on_smi(svm))
947 return 0;
948 nested_svm_smi(svm);
949 return 0;
950 }
951
952 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
953 if (block_nested_events)
954 return -EBUSY;
955 if (!nested_exit_on_nmi(svm))
956 return 0;
957 nested_svm_nmi(svm);
958 return 0;
959 }
960
961 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
962 if (block_nested_events)
963 return -EBUSY;
964 if (!nested_exit_on_intr(svm))
965 return 0;
966 nested_svm_intr(svm);
967 return 0;
968 }
969
970 return 0;
971}
972
973int nested_svm_exit_special(struct vcpu_svm *svm)
974{
975 u32 exit_code = svm->vmcb->control.exit_code;
976
977 switch (exit_code) {
978 case SVM_EXIT_INTR:
979 case SVM_EXIT_NMI:
980 case SVM_EXIT_NPF:
981 return NESTED_EXIT_HOST;
982 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
983 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
984
985 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
986 return NESTED_EXIT_HOST;
987 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
988 svm->vcpu.arch.apf.host_apf_flags)
989 /* Trap async PF even if not shadowing */
990 return NESTED_EXIT_HOST;
991 break;
992 }
993 default:
994 break;
995 }
996
997 return NESTED_EXIT_CONTINUE;
998}
999
1000static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1001 struct kvm_nested_state __user *user_kvm_nested_state,
1002 u32 user_data_size)
1003{
1004 struct vcpu_svm *svm;
1005 struct kvm_nested_state kvm_state = {
1006 .flags = 0,
1007 .format = KVM_STATE_NESTED_FORMAT_SVM,
1008 .size = sizeof(kvm_state),
1009 };
1010 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1011 &user_kvm_nested_state->data.svm[0];
1012
1013 if (!vcpu)
1014 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1015
1016 svm = to_svm(vcpu);
1017
1018 if (user_data_size < kvm_state.size)
1019 goto out;
1020
1021 /* First fill in the header and copy it out. */
1022 if (is_guest_mode(vcpu)) {
1023 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb;
1024 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1025 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1026
1027 if (svm->nested.nested_run_pending)
1028 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1029 }
1030
1031 if (gif_set(svm))
1032 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1033
1034 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1035 return -EFAULT;
1036
1037 if (!is_guest_mode(vcpu))
1038 goto out;
1039
1040 /*
1041 * Copy over the full size of the VMCB rather than just the size
1042 * of the structs.
1043 */
1044 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1045 return -EFAULT;
1046 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1047 sizeof(user_vmcb->control)))
1048 return -EFAULT;
1049 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1050 sizeof(user_vmcb->save)))
1051 return -EFAULT;
1052
1053out:
1054 return kvm_state.size;
1055}
1056
1057static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1058 struct kvm_nested_state __user *user_kvm_nested_state,
1059 struct kvm_nested_state *kvm_state)
1060{
1061 struct vcpu_svm *svm = to_svm(vcpu);
1062 struct vmcb *hsave = svm->nested.hsave;
1063 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1064 &user_kvm_nested_state->data.svm[0];
1065 struct vmcb_control_area ctl;
1066 struct vmcb_save_area save;
1067 u32 cr0;
1068
1069 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1070 return -EINVAL;
1071
1072 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1073 KVM_STATE_NESTED_RUN_PENDING |
1074 KVM_STATE_NESTED_GIF_SET))
1075 return -EINVAL;
1076
1077 /*
1078 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1079 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1080 */
1081 if (!(vcpu->arch.efer & EFER_SVME)) {
1082 /* GIF=1 and no guest mode are required if SVME=0. */
1083 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1084 return -EINVAL;
1085 }
1086
1087 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1088 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1089 return -EINVAL;
1090
1091 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1092 svm_leave_nested(svm);
1093 goto out_set_gif;
1094 }
1095
1096 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1097 return -EINVAL;
1098 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1099 return -EINVAL;
1100 if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
1101 return -EFAULT;
1102 if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
1103 return -EFAULT;
1104
1105 if (!nested_vmcb_check_controls(&ctl))
1106 return -EINVAL;
1107
1108 /*
1109 * Processor state contains L2 state. Check that it is
1110 * valid for guest mode (see nested_vmcb_checks).
1111 */
1112 cr0 = kvm_read_cr0(vcpu);
1113 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1114 return -EINVAL;
1115
1116 /*
1117 * Validate host state saved from before VMRUN (see
1118 * nested_svm_check_permissions).
1119 * TODO: validate reserved bits for all saved state.
1120 */
1121 if (!(save.cr0 & X86_CR0_PG))
1122 return -EINVAL;
1123
1124 /*
1125 * All checks done, we can enter guest mode. L1 control fields
1126 * come from the nested save state. Guest state is already
1127 * in the registers, the save area of the nested state instead
1128 * contains saved L1 state.
1129 */
1130 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1131 hsave->save = save;
1132
1133 svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
1134 load_nested_vmcb_control(svm, &ctl);
1135 nested_prepare_vmcb_control(svm);
1136
1137 if (!nested_svm_vmrun_msrpm(svm))
1138 return -EINVAL;
1139
1140out_set_gif:
1141 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1142 return 0;
1143}
1144
1145struct kvm_x86_nested_ops svm_nested_ops = {
1146 .check_events = svm_check_nested_events,
1147 .get_state = svm_get_nested_state,
1148 .set_state = svm_set_nested_state,
1149};