Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
22#include <asm/debugreg.h>
23
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
28#include "smm.h"
29#include "cpuid.h"
30#include "lapic.h"
31#include "svm.h"
32#include "hyperv.h"
33
34#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
35
36static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37 struct x86_exception *fault)
38{
39 struct vcpu_svm *svm = to_svm(vcpu);
40 struct vmcb *vmcb = svm->vmcb;
41
42 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43 /*
44 * TODO: track the cause of the nested page fault, and
45 * correctly fill in the high bits of exit_info_1.
46 */
47 vmcb->control.exit_code = SVM_EXIT_NPF;
48 vmcb->control.exit_code_hi = 0;
49 vmcb->control.exit_info_1 = (1ULL << 32);
50 vmcb->control.exit_info_2 = fault->address;
51 }
52
53 vmcb->control.exit_info_1 &= ~0xffffffffULL;
54 vmcb->control.exit_info_1 |= fault->error_code;
55
56 nested_svm_vmexit(svm);
57}
58
59static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60{
61 struct vcpu_svm *svm = to_svm(vcpu);
62 u64 cr3 = svm->nested.ctl.nested_cr3;
63 u64 pdpte;
64 int ret;
65
66 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
67 offset_in_page(cr3) + index * 8, 8);
68 if (ret)
69 return 0;
70 return pdpte;
71}
72
73static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
74{
75 struct vcpu_svm *svm = to_svm(vcpu);
76
77 return svm->nested.ctl.nested_cr3;
78}
79
80static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
81{
82 struct vcpu_svm *svm = to_svm(vcpu);
83
84 WARN_ON(mmu_is_nested(vcpu));
85
86 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
87
88 /*
89 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
90 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
91 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
92 */
93 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
94 svm->vmcb01.ptr->save.efer,
95 svm->nested.ctl.nested_cr3);
96 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
97 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
98 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
99 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
100}
101
102static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
103{
104 vcpu->arch.mmu = &vcpu->arch.root_mmu;
105 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
106}
107
108static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
109{
110 if (!svm->v_vmload_vmsave_enabled)
111 return true;
112
113 if (!nested_npt_enabled(svm))
114 return true;
115
116 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
117 return true;
118
119 return false;
120}
121
122void recalc_intercepts(struct vcpu_svm *svm)
123{
124 struct vmcb_control_area *c, *h;
125 struct vmcb_ctrl_area_cached *g;
126 unsigned int i;
127
128 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
129
130 if (!is_guest_mode(&svm->vcpu))
131 return;
132
133 c = &svm->vmcb->control;
134 h = &svm->vmcb01.ptr->control;
135 g = &svm->nested.ctl;
136
137 for (i = 0; i < MAX_INTERCEPT; i++)
138 c->intercepts[i] = h->intercepts[i];
139
140 if (g->int_ctl & V_INTR_MASKING_MASK) {
141 /*
142 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8
143 * does not affect any interrupt we may want to inject;
144 * therefore, writes to CR8 are irrelevant to L0, as are
145 * interrupt window vmexits.
146 */
147 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
148 vmcb_clr_intercept(c, INTERCEPT_VINTR);
149 }
150
151 /*
152 * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
153 * flush feature is enabled.
154 */
155 if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
156 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
157
158 for (i = 0; i < MAX_INTERCEPT; i++)
159 c->intercepts[i] |= g->intercepts[i];
160
161 /* If SMI is not intercepted, ignore guest SMI intercept as well */
162 if (!intercept_smi)
163 vmcb_clr_intercept(c, INTERCEPT_SMI);
164
165 if (nested_vmcb_needs_vls_intercept(svm)) {
166 /*
167 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
168 * we must intercept these instructions to correctly
169 * emulate them in case L1 doesn't intercept them.
170 */
171 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
172 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
173 } else {
174 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
175 }
176}
177
178/*
179 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
180 * is optimized in that it only merges the parts where KVM MSR permission bitmap
181 * may contain zero bits.
182 */
183static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
184{
185 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
186 int i;
187
188 /*
189 * MSR bitmap update can be skipped when:
190 * - MSR bitmap for L1 hasn't changed.
191 * - Nested hypervisor (L1) is attempting to launch the same L2 as
192 * before.
193 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
194 * tells KVM (L0) there were no changes in MSR bitmap for L2.
195 */
196 if (!svm->nested.force_msr_bitmap_recalc &&
197 kvm_hv_hypercall_enabled(&svm->vcpu) &&
198 hve->hv_enlightenments_control.msr_bitmap &&
199 (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
200 goto set_msrpm_base_pa;
201
202 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
203 return true;
204
205 for (i = 0; i < MSRPM_OFFSETS; i++) {
206 u32 value, p;
207 u64 offset;
208
209 if (msrpm_offsets[i] == 0xffffffff)
210 break;
211
212 p = msrpm_offsets[i];
213
214 /* x2apic msrs are intercepted always for the nested guest */
215 if (is_x2apic_msrpm_offset(p))
216 continue;
217
218 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
219
220 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
221 return false;
222
223 svm->nested.msrpm[p] = svm->msrpm[p] | value;
224 }
225
226 svm->nested.force_msr_bitmap_recalc = false;
227
228set_msrpm_base_pa:
229 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
230
231 return true;
232}
233
234/*
235 * Bits 11:0 of bitmap address are ignored by hardware
236 */
237static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
238{
239 u64 addr = PAGE_ALIGN(pa);
240
241 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
242 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
243}
244
245static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
246{
247 /* Nested FLUSHBYASID is not supported yet. */
248 switch(tlb_ctl) {
249 case TLB_CONTROL_DO_NOTHING:
250 case TLB_CONTROL_FLUSH_ALL_ASID:
251 return true;
252 default:
253 return false;
254 }
255}
256
257static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
258 struct vmcb_ctrl_area_cached *control)
259{
260 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
261 return false;
262
263 if (CC(control->asid == 0))
264 return false;
265
266 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
267 return false;
268
269 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
270 MSRPM_SIZE)))
271 return false;
272 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
273 IOPM_SIZE)))
274 return false;
275
276 if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
277 return false;
278
279 return true;
280}
281
282/* Common checks that apply to both L1 and L2 state. */
283static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
284 struct vmcb_save_area_cached *save)
285{
286 if (CC(!(save->efer & EFER_SVME)))
287 return false;
288
289 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
290 CC(save->cr0 & ~0xffffffffULL))
291 return false;
292
293 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
294 return false;
295
296 /*
297 * These checks are also performed by KVM_SET_SREGS,
298 * except that EFER.LMA is not checked by SVM against
299 * CR0.PG && EFER.LME.
300 */
301 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
302 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
303 CC(!(save->cr0 & X86_CR0_PE)) ||
304 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
305 return false;
306 }
307
308 /* Note, SVM doesn't have any additional restrictions on CR4. */
309 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
310 return false;
311
312 if (CC(!kvm_valid_efer(vcpu, save->efer)))
313 return false;
314
315 return true;
316}
317
318static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
319{
320 struct vcpu_svm *svm = to_svm(vcpu);
321 struct vmcb_save_area_cached *save = &svm->nested.save;
322
323 return __nested_vmcb_check_save(vcpu, save);
324}
325
326static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
327{
328 struct vcpu_svm *svm = to_svm(vcpu);
329 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
330
331 return __nested_vmcb_check_controls(vcpu, ctl);
332}
333
334static
335void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
336 struct vmcb_ctrl_area_cached *to,
337 struct vmcb_control_area *from)
338{
339 unsigned int i;
340
341 for (i = 0; i < MAX_INTERCEPT; i++)
342 to->intercepts[i] = from->intercepts[i];
343
344 to->iopm_base_pa = from->iopm_base_pa;
345 to->msrpm_base_pa = from->msrpm_base_pa;
346 to->tsc_offset = from->tsc_offset;
347 to->tlb_ctl = from->tlb_ctl;
348 to->int_ctl = from->int_ctl;
349 to->int_vector = from->int_vector;
350 to->int_state = from->int_state;
351 to->exit_code = from->exit_code;
352 to->exit_code_hi = from->exit_code_hi;
353 to->exit_info_1 = from->exit_info_1;
354 to->exit_info_2 = from->exit_info_2;
355 to->exit_int_info = from->exit_int_info;
356 to->exit_int_info_err = from->exit_int_info_err;
357 to->nested_ctl = from->nested_ctl;
358 to->event_inj = from->event_inj;
359 to->event_inj_err = from->event_inj_err;
360 to->next_rip = from->next_rip;
361 to->nested_cr3 = from->nested_cr3;
362 to->virt_ext = from->virt_ext;
363 to->pause_filter_count = from->pause_filter_count;
364 to->pause_filter_thresh = from->pause_filter_thresh;
365
366 /* Copy asid here because nested_vmcb_check_controls will check it. */
367 to->asid = from->asid;
368 to->msrpm_base_pa &= ~0x0fffULL;
369 to->iopm_base_pa &= ~0x0fffULL;
370
371 /* Hyper-V extensions (Enlightened VMCB) */
372 if (kvm_hv_hypercall_enabled(vcpu)) {
373 to->clean = from->clean;
374 memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
375 sizeof(to->hv_enlightenments));
376 }
377}
378
379void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
380 struct vmcb_control_area *control)
381{
382 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
383}
384
385static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
386 struct vmcb_save_area *from)
387{
388 /*
389 * Copy only fields that are validated, as we need them
390 * to avoid TOC/TOU races.
391 */
392 to->efer = from->efer;
393 to->cr0 = from->cr0;
394 to->cr3 = from->cr3;
395 to->cr4 = from->cr4;
396
397 to->dr6 = from->dr6;
398 to->dr7 = from->dr7;
399}
400
401void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
402 struct vmcb_save_area *save)
403{
404 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
405}
406
407/*
408 * Synchronize fields that are written by the processor, so that
409 * they can be copied back into the vmcb12.
410 */
411void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
412{
413 u32 mask;
414 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
415 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
416
417 /* Only a few fields of int_ctl are written by the processor. */
418 mask = V_IRQ_MASK | V_TPR_MASK;
419 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
420 svm_is_intercept(svm, INTERCEPT_VINTR)) {
421 /*
422 * In order to request an interrupt window, L0 is usurping
423 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
424 * even if it was clear in L1's VMCB. Restoring it would be
425 * wrong. However, in this case V_IRQ will remain true until
426 * interrupt_window_interception calls svm_clear_vintr and
427 * restores int_ctl. We can just leave it aside.
428 */
429 mask &= ~V_IRQ_MASK;
430 }
431
432 if (nested_vgif_enabled(svm))
433 mask |= V_GIF_MASK;
434
435 svm->nested.ctl.int_ctl &= ~mask;
436 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
437}
438
439/*
440 * Transfer any event that L0 or L1 wanted to inject into L2 to
441 * EXIT_INT_INFO.
442 */
443static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
444 struct vmcb *vmcb12)
445{
446 struct kvm_vcpu *vcpu = &svm->vcpu;
447 u32 exit_int_info = 0;
448 unsigned int nr;
449
450 if (vcpu->arch.exception.injected) {
451 nr = vcpu->arch.exception.vector;
452 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
453
454 if (vcpu->arch.exception.has_error_code) {
455 exit_int_info |= SVM_EVTINJ_VALID_ERR;
456 vmcb12->control.exit_int_info_err =
457 vcpu->arch.exception.error_code;
458 }
459
460 } else if (vcpu->arch.nmi_injected) {
461 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
462
463 } else if (vcpu->arch.interrupt.injected) {
464 nr = vcpu->arch.interrupt.nr;
465 exit_int_info = nr | SVM_EVTINJ_VALID;
466
467 if (vcpu->arch.interrupt.soft)
468 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
469 else
470 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
471 }
472
473 vmcb12->control.exit_int_info = exit_int_info;
474}
475
476static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
477{
478 /*
479 * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
480 * L2's VP_ID upon request from the guest. Make sure we check for
481 * pending entries in the right FIFO upon L1/L2 transition as these
482 * requests are put by other vCPUs asynchronously.
483 */
484 if (to_hv_vcpu(vcpu) && npt_enabled)
485 kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
486
487 /*
488 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
489 * things to fix before this can be conditional:
490 *
491 * - Flush TLBs for both L1 and L2 remote TLB flush
492 * - Honor L1's request to flush an ASID on nested VMRUN
493 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
494 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
495 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
496 *
497 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
498 * NPT guest-physical mappings on VMRUN.
499 */
500 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
501 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
502}
503
504/*
505 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
506 * if we are emulating VM-Entry into a guest with NPT enabled.
507 */
508static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
509 bool nested_npt, bool reload_pdptrs)
510{
511 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
512 return -EINVAL;
513
514 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
515 CC(!load_pdptrs(vcpu, cr3)))
516 return -EINVAL;
517
518 vcpu->arch.cr3 = cr3;
519
520 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
521 kvm_init_mmu(vcpu);
522
523 if (!nested_npt)
524 kvm_mmu_new_pgd(vcpu, cr3);
525
526 return 0;
527}
528
529void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
530{
531 if (!svm->nested.vmcb02.ptr)
532 return;
533
534 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
535 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
536}
537
538static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
539{
540 bool new_vmcb12 = false;
541 struct vmcb *vmcb01 = svm->vmcb01.ptr;
542 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
543
544 nested_vmcb02_compute_g_pat(svm);
545
546 /* Load the nested guest state */
547 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
548 new_vmcb12 = true;
549 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
550 svm->nested.force_msr_bitmap_recalc = true;
551 }
552
553 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
554 vmcb02->save.es = vmcb12->save.es;
555 vmcb02->save.cs = vmcb12->save.cs;
556 vmcb02->save.ss = vmcb12->save.ss;
557 vmcb02->save.ds = vmcb12->save.ds;
558 vmcb02->save.cpl = vmcb12->save.cpl;
559 vmcb_mark_dirty(vmcb02, VMCB_SEG);
560 }
561
562 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
563 vmcb02->save.gdtr = vmcb12->save.gdtr;
564 vmcb02->save.idtr = vmcb12->save.idtr;
565 vmcb_mark_dirty(vmcb02, VMCB_DT);
566 }
567
568 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
569
570 svm_set_efer(&svm->vcpu, svm->nested.save.efer);
571
572 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
573 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
574
575 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
576
577 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
578 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
579 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
580
581 /* In case we don't even reach vcpu_run, the fields are not updated */
582 vmcb02->save.rax = vmcb12->save.rax;
583 vmcb02->save.rsp = vmcb12->save.rsp;
584 vmcb02->save.rip = vmcb12->save.rip;
585
586 /* These bits will be set properly on the first execution when new_vmc12 is true */
587 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
588 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
589 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
590 vmcb_mark_dirty(vmcb02, VMCB_DR);
591 }
592
593 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
594 /*
595 * Reserved bits of DEBUGCTL are ignored. Be consistent with
596 * svm_set_msr's definition of reserved bits.
597 */
598 svm_copy_lbrs(vmcb02, vmcb12);
599 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
600 svm_update_lbrv(&svm->vcpu);
601
602 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
603 svm_copy_lbrs(vmcb02, vmcb01);
604 }
605}
606
607static inline bool is_evtinj_soft(u32 evtinj)
608{
609 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
610 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
611
612 if (!(evtinj & SVM_EVTINJ_VALID))
613 return false;
614
615 if (type == SVM_EVTINJ_TYPE_SOFT)
616 return true;
617
618 return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
619}
620
621static bool is_evtinj_nmi(u32 evtinj)
622{
623 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
624
625 if (!(evtinj & SVM_EVTINJ_VALID))
626 return false;
627
628 return type == SVM_EVTINJ_TYPE_NMI;
629}
630
631static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
632 unsigned long vmcb12_rip,
633 unsigned long vmcb12_csbase)
634{
635 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
636 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
637
638 struct kvm_vcpu *vcpu = &svm->vcpu;
639 struct vmcb *vmcb01 = svm->vmcb01.ptr;
640 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
641 u32 pause_count12;
642 u32 pause_thresh12;
643
644 /*
645 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
646 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
647 */
648
649 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
650 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
651 else
652 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
653
654 /* Copied from vmcb01. msrpm_base can be overwritten later. */
655 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
656 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
657 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
658
659 /* Done at vmrun: asid. */
660
661 /* Also overwritten later if necessary. */
662 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
663
664 /* nested_cr3. */
665 if (nested_npt_enabled(svm))
666 nested_svm_init_mmu_context(vcpu);
667
668 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
669 vcpu->arch.l1_tsc_offset,
670 svm->nested.ctl.tsc_offset,
671 svm->tsc_ratio_msr);
672
673 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
674
675 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
676 WARN_ON(!svm->tsc_scaling_enabled);
677 nested_svm_update_tsc_ratio_msr(vcpu);
678 }
679
680 vmcb02->control.int_ctl =
681 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
682 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
683
684 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
685 vmcb02->control.int_state = svm->nested.ctl.int_state;
686 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
687 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
688
689 /*
690 * next_rip is consumed on VMRUN as the return address pushed on the
691 * stack for injected soft exceptions/interrupts. If nrips is exposed
692 * to L1, take it verbatim from vmcb12. If nrips is supported in
693 * hardware but not exposed to L1, stuff the actual L2 RIP to emulate
694 * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
695 * prior to injecting the event).
696 */
697 if (svm->nrips_enabled)
698 vmcb02->control.next_rip = svm->nested.ctl.next_rip;
699 else if (boot_cpu_has(X86_FEATURE_NRIPS))
700 vmcb02->control.next_rip = vmcb12_rip;
701
702 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
703 if (is_evtinj_soft(vmcb02->control.event_inj)) {
704 svm->soft_int_injected = true;
705 svm->soft_int_csbase = vmcb12_csbase;
706 svm->soft_int_old_rip = vmcb12_rip;
707 if (svm->nrips_enabled)
708 svm->soft_int_next_rip = svm->nested.ctl.next_rip;
709 else
710 svm->soft_int_next_rip = vmcb12_rip;
711 }
712
713 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
714 LBR_CTL_ENABLE_MASK;
715 if (svm->lbrv_enabled)
716 vmcb02->control.virt_ext |=
717 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
718
719 if (!nested_vmcb_needs_vls_intercept(svm))
720 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
721
722 pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
723 pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
724 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
725 /* use guest values since host doesn't intercept PAUSE */
726 vmcb02->control.pause_filter_count = pause_count12;
727 vmcb02->control.pause_filter_thresh = pause_thresh12;
728
729 } else {
730 /* start from host values otherwise */
731 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
732 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
733
734 /* ... but ensure filtering is disabled if so requested. */
735 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
736 if (!pause_count12)
737 vmcb02->control.pause_filter_count = 0;
738 if (!pause_thresh12)
739 vmcb02->control.pause_filter_thresh = 0;
740 }
741 }
742
743 nested_svm_transition_tlb_flush(vcpu);
744
745 /* Enter Guest-Mode */
746 enter_guest_mode(vcpu);
747
748 /*
749 * Merge guest and host intercepts - must be called with vcpu in
750 * guest-mode to take effect.
751 */
752 recalc_intercepts(svm);
753}
754
755static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
756{
757 /*
758 * Some VMCB state is shared between L1 and L2 and thus has to be
759 * moved at the time of nested vmrun and vmexit.
760 *
761 * VMLOAD/VMSAVE state would also belong in this category, but KVM
762 * always performs VMLOAD and VMSAVE from the VMCB01.
763 */
764 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
765}
766
767int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
768 struct vmcb *vmcb12, bool from_vmrun)
769{
770 struct vcpu_svm *svm = to_svm(vcpu);
771 int ret;
772
773 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
774 vmcb12_gpa,
775 vmcb12->save.rip,
776 vmcb12->control.int_ctl,
777 vmcb12->control.event_inj,
778 vmcb12->control.nested_ctl,
779 vmcb12->control.nested_cr3,
780 vmcb12->save.cr3,
781 KVM_ISA_SVM);
782
783 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
784 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
785 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
786 vmcb12->control.intercepts[INTERCEPT_WORD3],
787 vmcb12->control.intercepts[INTERCEPT_WORD4],
788 vmcb12->control.intercepts[INTERCEPT_WORD5]);
789
790
791 svm->nested.vmcb12_gpa = vmcb12_gpa;
792
793 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
794
795 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
796
797 svm_switch_vmcb(svm, &svm->nested.vmcb02);
798 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
799 nested_vmcb02_prepare_save(svm, vmcb12);
800
801 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
802 nested_npt_enabled(svm), from_vmrun);
803 if (ret)
804 return ret;
805
806 if (!from_vmrun)
807 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
808
809 svm_set_gif(svm, true);
810
811 if (kvm_vcpu_apicv_active(vcpu))
812 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
813
814 nested_svm_hv_update_vm_vp_ids(vcpu);
815
816 return 0;
817}
818
819int nested_svm_vmrun(struct kvm_vcpu *vcpu)
820{
821 struct vcpu_svm *svm = to_svm(vcpu);
822 int ret;
823 struct vmcb *vmcb12;
824 struct kvm_host_map map;
825 u64 vmcb12_gpa;
826 struct vmcb *vmcb01 = svm->vmcb01.ptr;
827
828 if (!svm->nested.hsave_msr) {
829 kvm_inject_gp(vcpu, 0);
830 return 1;
831 }
832
833 if (is_smm(vcpu)) {
834 kvm_queue_exception(vcpu, UD_VECTOR);
835 return 1;
836 }
837
838 /* This fails when VP assist page is enabled but the supplied GPA is bogus */
839 ret = kvm_hv_verify_vp_assist(vcpu);
840 if (ret) {
841 kvm_inject_gp(vcpu, 0);
842 return ret;
843 }
844
845 vmcb12_gpa = svm->vmcb->save.rax;
846 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
847 if (ret == -EINVAL) {
848 kvm_inject_gp(vcpu, 0);
849 return 1;
850 } else if (ret) {
851 return kvm_skip_emulated_instruction(vcpu);
852 }
853
854 ret = kvm_skip_emulated_instruction(vcpu);
855
856 vmcb12 = map.hva;
857
858 if (WARN_ON_ONCE(!svm->nested.initialized))
859 return -EINVAL;
860
861 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
862 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
863
864 if (!nested_vmcb_check_save(vcpu) ||
865 !nested_vmcb_check_controls(vcpu)) {
866 vmcb12->control.exit_code = SVM_EXIT_ERR;
867 vmcb12->control.exit_code_hi = 0;
868 vmcb12->control.exit_info_1 = 0;
869 vmcb12->control.exit_info_2 = 0;
870 goto out;
871 }
872
873 /*
874 * Since vmcb01 is not in use, we can use it to store some of the L1
875 * state.
876 */
877 vmcb01->save.efer = vcpu->arch.efer;
878 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
879 vmcb01->save.cr4 = vcpu->arch.cr4;
880 vmcb01->save.rflags = kvm_get_rflags(vcpu);
881 vmcb01->save.rip = kvm_rip_read(vcpu);
882
883 if (!npt_enabled)
884 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
885
886 svm->nested.nested_run_pending = 1;
887
888 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
889 goto out_exit_err;
890
891 if (nested_svm_vmrun_msrpm(svm))
892 goto out;
893
894out_exit_err:
895 svm->nested.nested_run_pending = 0;
896 svm->nmi_l1_to_l2 = false;
897 svm->soft_int_injected = false;
898
899 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
900 svm->vmcb->control.exit_code_hi = 0;
901 svm->vmcb->control.exit_info_1 = 0;
902 svm->vmcb->control.exit_info_2 = 0;
903
904 nested_svm_vmexit(svm);
905
906out:
907 kvm_vcpu_unmap(vcpu, &map, true);
908
909 return ret;
910}
911
912/* Copy state save area fields which are handled by VMRUN */
913void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
914 struct vmcb_save_area *from_save)
915{
916 to_save->es = from_save->es;
917 to_save->cs = from_save->cs;
918 to_save->ss = from_save->ss;
919 to_save->ds = from_save->ds;
920 to_save->gdtr = from_save->gdtr;
921 to_save->idtr = from_save->idtr;
922 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
923 to_save->efer = from_save->efer;
924 to_save->cr0 = from_save->cr0;
925 to_save->cr3 = from_save->cr3;
926 to_save->cr4 = from_save->cr4;
927 to_save->rax = from_save->rax;
928 to_save->rsp = from_save->rsp;
929 to_save->rip = from_save->rip;
930 to_save->cpl = 0;
931}
932
933void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
934{
935 to_vmcb->save.fs = from_vmcb->save.fs;
936 to_vmcb->save.gs = from_vmcb->save.gs;
937 to_vmcb->save.tr = from_vmcb->save.tr;
938 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
939 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
940 to_vmcb->save.star = from_vmcb->save.star;
941 to_vmcb->save.lstar = from_vmcb->save.lstar;
942 to_vmcb->save.cstar = from_vmcb->save.cstar;
943 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
944 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
945 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
946 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
947}
948
949int nested_svm_vmexit(struct vcpu_svm *svm)
950{
951 struct kvm_vcpu *vcpu = &svm->vcpu;
952 struct vmcb *vmcb01 = svm->vmcb01.ptr;
953 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
954 struct vmcb *vmcb12;
955 struct kvm_host_map map;
956 int rc;
957
958 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
959 if (rc) {
960 if (rc == -EINVAL)
961 kvm_inject_gp(vcpu, 0);
962 return 1;
963 }
964
965 vmcb12 = map.hva;
966
967 /* Exit Guest-Mode */
968 leave_guest_mode(vcpu);
969 svm->nested.vmcb12_gpa = 0;
970 WARN_ON_ONCE(svm->nested.nested_run_pending);
971
972 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
973
974 /* in case we halted in L2 */
975 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
976
977 /* Give the current vmcb to the guest */
978
979 vmcb12->save.es = vmcb02->save.es;
980 vmcb12->save.cs = vmcb02->save.cs;
981 vmcb12->save.ss = vmcb02->save.ss;
982 vmcb12->save.ds = vmcb02->save.ds;
983 vmcb12->save.gdtr = vmcb02->save.gdtr;
984 vmcb12->save.idtr = vmcb02->save.idtr;
985 vmcb12->save.efer = svm->vcpu.arch.efer;
986 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
987 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
988 vmcb12->save.cr2 = vmcb02->save.cr2;
989 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
990 vmcb12->save.rflags = kvm_get_rflags(vcpu);
991 vmcb12->save.rip = kvm_rip_read(vcpu);
992 vmcb12->save.rsp = kvm_rsp_read(vcpu);
993 vmcb12->save.rax = kvm_rax_read(vcpu);
994 vmcb12->save.dr7 = vmcb02->save.dr7;
995 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
996 vmcb12->save.cpl = vmcb02->save.cpl;
997
998 vmcb12->control.int_state = vmcb02->control.int_state;
999 vmcb12->control.exit_code = vmcb02->control.exit_code;
1000 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1001 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1002 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1003
1004 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1005 nested_save_pending_event_to_vmcb12(svm, vmcb12);
1006
1007 if (svm->nrips_enabled)
1008 vmcb12->control.next_rip = vmcb02->control.next_rip;
1009
1010 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1011 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
1012 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1013 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1014
1015 if (!kvm_pause_in_guest(vcpu->kvm)) {
1016 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1017 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1018
1019 }
1020
1021 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1022
1023 svm_switch_vmcb(svm, &svm->vmcb01);
1024
1025 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1026 svm_copy_lbrs(vmcb12, vmcb02);
1027 svm_update_lbrv(vcpu);
1028 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1029 svm_copy_lbrs(vmcb01, vmcb02);
1030 svm_update_lbrv(vcpu);
1031 }
1032
1033 /*
1034 * On vmexit the GIF is set to false and
1035 * no event can be injected in L1.
1036 */
1037 svm_set_gif(svm, false);
1038 vmcb01->control.exit_int_info = 0;
1039
1040 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1041 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1042 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1043 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1044 }
1045
1046 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
1047 WARN_ON(!svm->tsc_scaling_enabled);
1048 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1049 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1050 }
1051
1052 svm->nested.ctl.nested_cr3 = 0;
1053
1054 /*
1055 * Restore processor state that had been saved in vmcb01
1056 */
1057 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1058 svm_set_efer(vcpu, vmcb01->save.efer);
1059 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1060 svm_set_cr4(vcpu, vmcb01->save.cr4);
1061 kvm_rax_write(vcpu, vmcb01->save.rax);
1062 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1063 kvm_rip_write(vcpu, vmcb01->save.rip);
1064
1065 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1066 kvm_update_dr7(&svm->vcpu);
1067
1068 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1069 vmcb12->control.exit_info_1,
1070 vmcb12->control.exit_info_2,
1071 vmcb12->control.exit_int_info,
1072 vmcb12->control.exit_int_info_err,
1073 KVM_ISA_SVM);
1074
1075 kvm_vcpu_unmap(vcpu, &map, true);
1076
1077 nested_svm_transition_tlb_flush(vcpu);
1078
1079 nested_svm_uninit_mmu_context(vcpu);
1080
1081 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1082 if (rc)
1083 return 1;
1084
1085 /*
1086 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1087 * doesn't end up in L1.
1088 */
1089 svm->vcpu.arch.nmi_injected = false;
1090 kvm_clear_exception_queue(vcpu);
1091 kvm_clear_interrupt_queue(vcpu);
1092
1093 /*
1094 * If we are here following the completion of a VMRUN that
1095 * is being single-stepped, queue the pending #DB intercept
1096 * right now so that it an be accounted for before we execute
1097 * L1's next instruction.
1098 */
1099 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1100 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1101
1102 /*
1103 * Un-inhibit the AVIC right away, so that other vCPUs can start
1104 * to benefit from it right away.
1105 */
1106 if (kvm_apicv_activated(vcpu->kvm))
1107 kvm_vcpu_update_apicv(vcpu);
1108
1109 return 0;
1110}
1111
1112static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1113{
1114 struct vcpu_svm *svm = to_svm(vcpu);
1115
1116 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
1117 return;
1118
1119 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1120 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1121}
1122
1123int svm_allocate_nested(struct vcpu_svm *svm)
1124{
1125 struct page *vmcb02_page;
1126
1127 if (svm->nested.initialized)
1128 return 0;
1129
1130 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1131 if (!vmcb02_page)
1132 return -ENOMEM;
1133 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1134 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1135
1136 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1137 if (!svm->nested.msrpm)
1138 goto err_free_vmcb02;
1139 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1140
1141 svm->nested.initialized = true;
1142 return 0;
1143
1144err_free_vmcb02:
1145 __free_page(vmcb02_page);
1146 return -ENOMEM;
1147}
1148
1149void svm_free_nested(struct vcpu_svm *svm)
1150{
1151 if (!svm->nested.initialized)
1152 return;
1153
1154 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1155 svm_switch_vmcb(svm, &svm->vmcb01);
1156
1157 svm_vcpu_free_msrpm(svm->nested.msrpm);
1158 svm->nested.msrpm = NULL;
1159
1160 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1161 svm->nested.vmcb02.ptr = NULL;
1162
1163 /*
1164 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1165 * some vmcb12 fields are not loaded if they are marked clean
1166 * in the vmcb12, since in this case they are up to date already.
1167 *
1168 * When the vmcb02 is freed, this optimization becomes invalid.
1169 */
1170 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1171
1172 svm->nested.initialized = false;
1173}
1174
1175void svm_leave_nested(struct kvm_vcpu *vcpu)
1176{
1177 struct vcpu_svm *svm = to_svm(vcpu);
1178
1179 if (is_guest_mode(vcpu)) {
1180 svm->nested.nested_run_pending = 0;
1181 svm->nested.vmcb12_gpa = INVALID_GPA;
1182
1183 leave_guest_mode(vcpu);
1184
1185 svm_switch_vmcb(svm, &svm->vmcb01);
1186
1187 nested_svm_uninit_mmu_context(vcpu);
1188 vmcb_mark_all_dirty(svm->vmcb);
1189 }
1190
1191 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1192}
1193
1194static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1195{
1196 u32 offset, msr, value;
1197 int write, mask;
1198
1199 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1200 return NESTED_EXIT_HOST;
1201
1202 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1203 offset = svm_msrpm_offset(msr);
1204 write = svm->vmcb->control.exit_info_1 & 1;
1205 mask = 1 << ((2 * (msr & 0xf)) + write);
1206
1207 if (offset == MSR_INVALID)
1208 return NESTED_EXIT_DONE;
1209
1210 /* Offset is in 32 bit units but need in 8 bit units */
1211 offset *= 4;
1212
1213 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1214 return NESTED_EXIT_DONE;
1215
1216 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1217}
1218
1219static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1220{
1221 unsigned port, size, iopm_len;
1222 u16 val, mask;
1223 u8 start_bit;
1224 u64 gpa;
1225
1226 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1227 return NESTED_EXIT_HOST;
1228
1229 port = svm->vmcb->control.exit_info_1 >> 16;
1230 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1231 SVM_IOIO_SIZE_SHIFT;
1232 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1233 start_bit = port % 8;
1234 iopm_len = (start_bit + size > 8) ? 2 : 1;
1235 mask = (0xf >> (4 - size)) << start_bit;
1236 val = 0;
1237
1238 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1239 return NESTED_EXIT_DONE;
1240
1241 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1242}
1243
1244static int nested_svm_intercept(struct vcpu_svm *svm)
1245{
1246 u32 exit_code = svm->vmcb->control.exit_code;
1247 int vmexit = NESTED_EXIT_HOST;
1248
1249 switch (exit_code) {
1250 case SVM_EXIT_MSR:
1251 vmexit = nested_svm_exit_handled_msr(svm);
1252 break;
1253 case SVM_EXIT_IOIO:
1254 vmexit = nested_svm_intercept_ioio(svm);
1255 break;
1256 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1257 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1258 vmexit = NESTED_EXIT_DONE;
1259 break;
1260 }
1261 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1262 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1263 vmexit = NESTED_EXIT_DONE;
1264 break;
1265 }
1266 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1267 /*
1268 * Host-intercepted exceptions have been checked already in
1269 * nested_svm_exit_special. There is nothing to do here,
1270 * the vmexit is injected by svm_check_nested_events.
1271 */
1272 vmexit = NESTED_EXIT_DONE;
1273 break;
1274 }
1275 case SVM_EXIT_ERR: {
1276 vmexit = NESTED_EXIT_DONE;
1277 break;
1278 }
1279 default: {
1280 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1281 vmexit = NESTED_EXIT_DONE;
1282 }
1283 }
1284
1285 return vmexit;
1286}
1287
1288int nested_svm_exit_handled(struct vcpu_svm *svm)
1289{
1290 int vmexit;
1291
1292 vmexit = nested_svm_intercept(svm);
1293
1294 if (vmexit == NESTED_EXIT_DONE)
1295 nested_svm_vmexit(svm);
1296
1297 return vmexit;
1298}
1299
1300int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1301{
1302 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1303 kvm_queue_exception(vcpu, UD_VECTOR);
1304 return 1;
1305 }
1306
1307 if (to_svm(vcpu)->vmcb->save.cpl) {
1308 kvm_inject_gp(vcpu, 0);
1309 return 1;
1310 }
1311
1312 return 0;
1313}
1314
1315static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
1316 u32 error_code)
1317{
1318 struct vcpu_svm *svm = to_svm(vcpu);
1319
1320 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
1321}
1322
1323static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
1324{
1325 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1326 struct vcpu_svm *svm = to_svm(vcpu);
1327 struct vmcb *vmcb = svm->vmcb;
1328
1329 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1330 vmcb->control.exit_code_hi = 0;
1331
1332 if (ex->has_error_code)
1333 vmcb->control.exit_info_1 = ex->error_code;
1334
1335 /*
1336 * EXITINFO2 is undefined for all exception intercepts other
1337 * than #PF.
1338 */
1339 if (ex->vector == PF_VECTOR) {
1340 if (ex->has_payload)
1341 vmcb->control.exit_info_2 = ex->payload;
1342 else
1343 vmcb->control.exit_info_2 = vcpu->arch.cr2;
1344 } else if (ex->vector == DB_VECTOR) {
1345 /* See kvm_check_and_inject_events(). */
1346 kvm_deliver_exception_payload(vcpu, ex);
1347
1348 if (vcpu->arch.dr7 & DR7_GD) {
1349 vcpu->arch.dr7 &= ~DR7_GD;
1350 kvm_update_dr7(vcpu);
1351 }
1352 } else {
1353 WARN_ON(ex->has_payload);
1354 }
1355
1356 nested_svm_vmexit(svm);
1357}
1358
1359static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1360{
1361 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1362}
1363
1364static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1365{
1366 struct kvm_lapic *apic = vcpu->arch.apic;
1367 struct vcpu_svm *svm = to_svm(vcpu);
1368 /*
1369 * Only a pending nested run blocks a pending exception. If there is a
1370 * previously injected event, the pending exception occurred while said
1371 * event was being delivered and thus needs to be handled.
1372 */
1373 bool block_nested_exceptions = svm->nested.nested_run_pending;
1374 /*
1375 * New events (not exceptions) are only recognized at instruction
1376 * boundaries. If an event needs reinjection, then KVM is handling a
1377 * VM-Exit that occurred _during_ instruction execution; new events are
1378 * blocked until the instruction completes.
1379 */
1380 bool block_nested_events = block_nested_exceptions ||
1381 kvm_event_needs_reinjection(vcpu);
1382
1383 if (lapic_in_kernel(vcpu) &&
1384 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1385 if (block_nested_events)
1386 return -EBUSY;
1387 if (!nested_exit_on_init(svm))
1388 return 0;
1389 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1390 return 0;
1391 }
1392
1393 if (vcpu->arch.exception_vmexit.pending) {
1394 if (block_nested_exceptions)
1395 return -EBUSY;
1396 nested_svm_inject_exception_vmexit(vcpu);
1397 return 0;
1398 }
1399
1400 if (vcpu->arch.exception.pending) {
1401 if (block_nested_exceptions)
1402 return -EBUSY;
1403 return 0;
1404 }
1405
1406#ifdef CONFIG_KVM_SMM
1407 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1408 if (block_nested_events)
1409 return -EBUSY;
1410 if (!nested_exit_on_smi(svm))
1411 return 0;
1412 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1413 return 0;
1414 }
1415#endif
1416
1417 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1418 if (block_nested_events)
1419 return -EBUSY;
1420 if (!nested_exit_on_nmi(svm))
1421 return 0;
1422 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1423 return 0;
1424 }
1425
1426 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1427 if (block_nested_events)
1428 return -EBUSY;
1429 if (!nested_exit_on_intr(svm))
1430 return 0;
1431 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1432 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1433 return 0;
1434 }
1435
1436 return 0;
1437}
1438
1439int nested_svm_exit_special(struct vcpu_svm *svm)
1440{
1441 u32 exit_code = svm->vmcb->control.exit_code;
1442 struct kvm_vcpu *vcpu = &svm->vcpu;
1443
1444 switch (exit_code) {
1445 case SVM_EXIT_INTR:
1446 case SVM_EXIT_NMI:
1447 case SVM_EXIT_NPF:
1448 return NESTED_EXIT_HOST;
1449 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1450 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1451
1452 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1453 excp_bits)
1454 return NESTED_EXIT_HOST;
1455 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1456 svm->vcpu.arch.apf.host_apf_flags)
1457 /* Trap async PF even if not shadowing */
1458 return NESTED_EXIT_HOST;
1459 break;
1460 }
1461 case SVM_EXIT_VMMCALL:
1462 /* Hyper-V L2 TLB flush hypercall is handled by L0 */
1463 if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
1464 nested_svm_l2_tlb_flush_enabled(vcpu) &&
1465 kvm_hv_is_tlb_flush_hcall(vcpu))
1466 return NESTED_EXIT_HOST;
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 return NESTED_EXIT_CONTINUE;
1473}
1474
1475void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1476{
1477 struct vcpu_svm *svm = to_svm(vcpu);
1478
1479 vcpu->arch.tsc_scaling_ratio =
1480 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1481 svm->tsc_ratio_msr);
1482 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1483}
1484
1485/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
1486static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1487 struct vmcb_ctrl_area_cached *from)
1488{
1489 unsigned int i;
1490
1491 memset(dst, 0, sizeof(struct vmcb_control_area));
1492
1493 for (i = 0; i < MAX_INTERCEPT; i++)
1494 dst->intercepts[i] = from->intercepts[i];
1495
1496 dst->iopm_base_pa = from->iopm_base_pa;
1497 dst->msrpm_base_pa = from->msrpm_base_pa;
1498 dst->tsc_offset = from->tsc_offset;
1499 dst->asid = from->asid;
1500 dst->tlb_ctl = from->tlb_ctl;
1501 dst->int_ctl = from->int_ctl;
1502 dst->int_vector = from->int_vector;
1503 dst->int_state = from->int_state;
1504 dst->exit_code = from->exit_code;
1505 dst->exit_code_hi = from->exit_code_hi;
1506 dst->exit_info_1 = from->exit_info_1;
1507 dst->exit_info_2 = from->exit_info_2;
1508 dst->exit_int_info = from->exit_int_info;
1509 dst->exit_int_info_err = from->exit_int_info_err;
1510 dst->nested_ctl = from->nested_ctl;
1511 dst->event_inj = from->event_inj;
1512 dst->event_inj_err = from->event_inj_err;
1513 dst->next_rip = from->next_rip;
1514 dst->nested_cr3 = from->nested_cr3;
1515 dst->virt_ext = from->virt_ext;
1516 dst->pause_filter_count = from->pause_filter_count;
1517 dst->pause_filter_thresh = from->pause_filter_thresh;
1518 /* 'clean' and 'hv_enlightenments' are not changed by KVM */
1519}
1520
1521static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1522 struct kvm_nested_state __user *user_kvm_nested_state,
1523 u32 user_data_size)
1524{
1525 struct vcpu_svm *svm;
1526 struct vmcb_control_area *ctl;
1527 unsigned long r;
1528 struct kvm_nested_state kvm_state = {
1529 .flags = 0,
1530 .format = KVM_STATE_NESTED_FORMAT_SVM,
1531 .size = sizeof(kvm_state),
1532 };
1533 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1534 &user_kvm_nested_state->data.svm[0];
1535
1536 if (!vcpu)
1537 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1538
1539 svm = to_svm(vcpu);
1540
1541 if (user_data_size < kvm_state.size)
1542 goto out;
1543
1544 /* First fill in the header and copy it out. */
1545 if (is_guest_mode(vcpu)) {
1546 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1547 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1548 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1549
1550 if (svm->nested.nested_run_pending)
1551 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1552 }
1553
1554 if (gif_set(svm))
1555 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1556
1557 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1558 return -EFAULT;
1559
1560 if (!is_guest_mode(vcpu))
1561 goto out;
1562
1563 /*
1564 * Copy over the full size of the VMCB rather than just the size
1565 * of the structs.
1566 */
1567 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1568 return -EFAULT;
1569
1570 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1571 if (!ctl)
1572 return -ENOMEM;
1573
1574 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1575 r = copy_to_user(&user_vmcb->control, ctl,
1576 sizeof(user_vmcb->control));
1577 kfree(ctl);
1578 if (r)
1579 return -EFAULT;
1580
1581 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1582 sizeof(user_vmcb->save)))
1583 return -EFAULT;
1584out:
1585 return kvm_state.size;
1586}
1587
1588static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1589 struct kvm_nested_state __user *user_kvm_nested_state,
1590 struct kvm_nested_state *kvm_state)
1591{
1592 struct vcpu_svm *svm = to_svm(vcpu);
1593 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1594 &user_kvm_nested_state->data.svm[0];
1595 struct vmcb_control_area *ctl;
1596 struct vmcb_save_area *save;
1597 struct vmcb_save_area_cached save_cached;
1598 struct vmcb_ctrl_area_cached ctl_cached;
1599 unsigned long cr0;
1600 int ret;
1601
1602 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1603 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1604
1605 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1606 return -EINVAL;
1607
1608 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1609 KVM_STATE_NESTED_RUN_PENDING |
1610 KVM_STATE_NESTED_GIF_SET))
1611 return -EINVAL;
1612
1613 /*
1614 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1615 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1616 */
1617 if (!(vcpu->arch.efer & EFER_SVME)) {
1618 /* GIF=1 and no guest mode are required if SVME=0. */
1619 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1620 return -EINVAL;
1621 }
1622
1623 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1624 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1625 return -EINVAL;
1626
1627 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1628 svm_leave_nested(vcpu);
1629 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1630 return 0;
1631 }
1632
1633 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1634 return -EINVAL;
1635 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1636 return -EINVAL;
1637
1638 ret = -ENOMEM;
1639 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1640 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1641 if (!ctl || !save)
1642 goto out_free;
1643
1644 ret = -EFAULT;
1645 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1646 goto out_free;
1647 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1648 goto out_free;
1649
1650 ret = -EINVAL;
1651 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1652 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1653 goto out_free;
1654
1655 /*
1656 * Processor state contains L2 state. Check that it is
1657 * valid for guest mode (see nested_vmcb_check_save).
1658 */
1659 cr0 = kvm_read_cr0(vcpu);
1660 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1661 goto out_free;
1662
1663 /*
1664 * Validate host state saved from before VMRUN (see
1665 * nested_svm_check_permissions).
1666 */
1667 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1668 if (!(save->cr0 & X86_CR0_PG) ||
1669 !(save->cr0 & X86_CR0_PE) ||
1670 (save->rflags & X86_EFLAGS_VM) ||
1671 !__nested_vmcb_check_save(vcpu, &save_cached))
1672 goto out_free;
1673
1674
1675 /*
1676 * All checks done, we can enter guest mode. Userspace provides
1677 * vmcb12.control, which will be combined with L1 and stored into
1678 * vmcb02, and the L1 save state which we store in vmcb01.
1679 * L2 registers if needed are moved from the current VMCB to VMCB02.
1680 */
1681
1682 if (is_guest_mode(vcpu))
1683 svm_leave_nested(vcpu);
1684 else
1685 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1686
1687 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1688
1689 svm->nested.nested_run_pending =
1690 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1691
1692 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1693
1694 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1695 nested_copy_vmcb_control_to_cache(svm, ctl);
1696
1697 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1698 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1699
1700 /*
1701 * While the nested guest CR3 is already checked and set by
1702 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1703 * thus MMU might not be initialized correctly.
1704 * Set it again to fix this.
1705 */
1706
1707 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1708 nested_npt_enabled(svm), false);
1709 if (WARN_ON_ONCE(ret))
1710 goto out_free;
1711
1712 svm->nested.force_msr_bitmap_recalc = true;
1713
1714 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1715 ret = 0;
1716out_free:
1717 kfree(save);
1718 kfree(ctl);
1719
1720 return ret;
1721}
1722
1723static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1724{
1725 struct vcpu_svm *svm = to_svm(vcpu);
1726
1727 if (WARN_ON(!is_guest_mode(vcpu)))
1728 return true;
1729
1730 if (!vcpu->arch.pdptrs_from_userspace &&
1731 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1732 /*
1733 * Reload the guest's PDPTRs since after a migration
1734 * the guest CR3 might be restored prior to setting the nested
1735 * state which can lead to a load of wrong PDPTRs.
1736 */
1737 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1738 return false;
1739
1740 if (!nested_svm_vmrun_msrpm(svm)) {
1741 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1742 vcpu->run->internal.suberror =
1743 KVM_INTERNAL_ERROR_EMULATION;
1744 vcpu->run->internal.ndata = 0;
1745 return false;
1746 }
1747
1748 if (kvm_hv_verify_vp_assist(vcpu))
1749 return false;
1750
1751 return true;
1752}
1753
1754struct kvm_x86_nested_ops svm_nested_ops = {
1755 .leave_nested = svm_leave_nested,
1756 .is_exception_vmexit = nested_svm_is_exception_vmexit,
1757 .check_events = svm_check_nested_events,
1758 .triple_fault = nested_svm_triple_fault,
1759 .get_nested_state_pages = svm_get_nested_state_pages,
1760 .get_state = svm_get_nested_state,
1761 .set_state = svm_set_nested_state,
1762 .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
1763};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
22#include <asm/debugreg.h>
23
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
28#include "cpuid.h"
29#include "lapic.h"
30#include "svm.h"
31
32#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
33
34static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
35 struct x86_exception *fault)
36{
37 struct vcpu_svm *svm = to_svm(vcpu);
38
39 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
40 /*
41 * TODO: track the cause of the nested page fault, and
42 * correctly fill in the high bits of exit_info_1.
43 */
44 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
45 svm->vmcb->control.exit_code_hi = 0;
46 svm->vmcb->control.exit_info_1 = (1ULL << 32);
47 svm->vmcb->control.exit_info_2 = fault->address;
48 }
49
50 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
51 svm->vmcb->control.exit_info_1 |= fault->error_code;
52
53 nested_svm_vmexit(svm);
54}
55
56static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
57{
58 struct vcpu_svm *svm = to_svm(vcpu);
59 WARN_ON(!is_guest_mode(vcpu));
60
61 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
62 !svm->nested.nested_run_pending) {
63 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
64 svm->vmcb->control.exit_code_hi = 0;
65 svm->vmcb->control.exit_info_1 = fault->error_code;
66 svm->vmcb->control.exit_info_2 = fault->address;
67 nested_svm_vmexit(svm);
68 } else {
69 kvm_inject_page_fault(vcpu, fault);
70 }
71}
72
73static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
74{
75 struct vcpu_svm *svm = to_svm(vcpu);
76 u64 cr3 = svm->nested.ctl.nested_cr3;
77 u64 pdpte;
78 int ret;
79
80 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
81 offset_in_page(cr3) + index * 8, 8);
82 if (ret)
83 return 0;
84 return pdpte;
85}
86
87static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
88{
89 struct vcpu_svm *svm = to_svm(vcpu);
90
91 return svm->nested.ctl.nested_cr3;
92}
93
94static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
95{
96 struct vcpu_svm *svm = to_svm(vcpu);
97
98 WARN_ON(mmu_is_nested(vcpu));
99
100 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
101
102 /*
103 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
104 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
105 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
106 */
107 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
108 svm->vmcb01.ptr->save.efer,
109 svm->nested.ctl.nested_cr3);
110 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
111 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
112 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
113 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
114}
115
116static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
117{
118 vcpu->arch.mmu = &vcpu->arch.root_mmu;
119 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
120}
121
122void recalc_intercepts(struct vcpu_svm *svm)
123{
124 struct vmcb_control_area *c, *h, *g;
125 unsigned int i;
126
127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
128
129 if (!is_guest_mode(&svm->vcpu))
130 return;
131
132 c = &svm->vmcb->control;
133 h = &svm->vmcb01.ptr->control;
134 g = &svm->nested.ctl;
135
136 for (i = 0; i < MAX_INTERCEPT; i++)
137 c->intercepts[i] = h->intercepts[i];
138
139 if (g->int_ctl & V_INTR_MASKING_MASK) {
140 /* We only want the cr8 intercept bits of L1 */
141 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
142 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
143
144 /*
145 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
146 * affect any interrupt we may want to inject; therefore,
147 * interrupt window vmexits are irrelevant to L0.
148 */
149 vmcb_clr_intercept(c, INTERCEPT_VINTR);
150 }
151
152 /* We don't want to see VMMCALLs from a nested guest */
153 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
154
155 for (i = 0; i < MAX_INTERCEPT; i++)
156 c->intercepts[i] |= g->intercepts[i];
157
158 /* If SMI is not intercepted, ignore guest SMI intercept as well */
159 if (!intercept_smi)
160 vmcb_clr_intercept(c, INTERCEPT_SMI);
161
162 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
163 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
164}
165
166static void copy_vmcb_control_area(struct vmcb_control_area *dst,
167 struct vmcb_control_area *from)
168{
169 unsigned int i;
170
171 for (i = 0; i < MAX_INTERCEPT; i++)
172 dst->intercepts[i] = from->intercepts[i];
173
174 dst->iopm_base_pa = from->iopm_base_pa;
175 dst->msrpm_base_pa = from->msrpm_base_pa;
176 dst->tsc_offset = from->tsc_offset;
177 /* asid not copied, it is handled manually for svm->vmcb. */
178 dst->tlb_ctl = from->tlb_ctl;
179 dst->int_ctl = from->int_ctl;
180 dst->int_vector = from->int_vector;
181 dst->int_state = from->int_state;
182 dst->exit_code = from->exit_code;
183 dst->exit_code_hi = from->exit_code_hi;
184 dst->exit_info_1 = from->exit_info_1;
185 dst->exit_info_2 = from->exit_info_2;
186 dst->exit_int_info = from->exit_int_info;
187 dst->exit_int_info_err = from->exit_int_info_err;
188 dst->nested_ctl = from->nested_ctl;
189 dst->event_inj = from->event_inj;
190 dst->event_inj_err = from->event_inj_err;
191 dst->nested_cr3 = from->nested_cr3;
192 dst->virt_ext = from->virt_ext;
193 dst->pause_filter_count = from->pause_filter_count;
194 dst->pause_filter_thresh = from->pause_filter_thresh;
195}
196
197static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
198{
199 /*
200 * This function merges the msr permission bitmaps of kvm and the
201 * nested vmcb. It is optimized in that it only merges the parts where
202 * the kvm msr permission bitmap may contain zero bits
203 */
204 int i;
205
206 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
207 return true;
208
209 for (i = 0; i < MSRPM_OFFSETS; i++) {
210 u32 value, p;
211 u64 offset;
212
213 if (msrpm_offsets[i] == 0xffffffff)
214 break;
215
216 p = msrpm_offsets[i];
217 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
218
219 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
220 return false;
221
222 svm->nested.msrpm[p] = svm->msrpm[p] | value;
223 }
224
225 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
226
227 return true;
228}
229
230/*
231 * Bits 11:0 of bitmap address are ignored by hardware
232 */
233static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
234{
235 u64 addr = PAGE_ALIGN(pa);
236
237 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
238 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
239}
240
241static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
242 struct vmcb_control_area *control)
243{
244 if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
245 return false;
246
247 if (CC(control->asid == 0))
248 return false;
249
250 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
251 return false;
252
253 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
254 MSRPM_SIZE)))
255 return false;
256 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
257 IOPM_SIZE)))
258 return false;
259
260 return true;
261}
262
263static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
264 struct vmcb_save_area *save)
265{
266 /*
267 * These checks are also performed by KVM_SET_SREGS,
268 * except that EFER.LMA is not checked by SVM against
269 * CR0.PG && EFER.LME.
270 */
271 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
272 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
273 CC(!(save->cr0 & X86_CR0_PE)) ||
274 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
275 return false;
276 }
277
278 if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
279 return false;
280
281 return true;
282}
283
284/* Common checks that apply to both L1 and L2 state. */
285static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
286 struct vmcb_save_area *save)
287{
288 /*
289 * FIXME: these should be done after copying the fields,
290 * to avoid TOC/TOU races. For these save area checks
291 * the possible damage is limited since kvm_set_cr0 and
292 * kvm_set_cr4 handle failure; EFER_SVME is an exception
293 * so it is force-set later in nested_prepare_vmcb_save.
294 */
295 if (CC(!(save->efer & EFER_SVME)))
296 return false;
297
298 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
299 CC(save->cr0 & ~0xffffffffULL))
300 return false;
301
302 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
303 return false;
304
305 if (!nested_vmcb_check_cr3_cr4(vcpu, save))
306 return false;
307
308 if (CC(!kvm_valid_efer(vcpu, save->efer)))
309 return false;
310
311 return true;
312}
313
314void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
315 struct vmcb_control_area *control)
316{
317 copy_vmcb_control_area(&svm->nested.ctl, control);
318
319 /* Copy it here because nested_svm_check_controls will check it. */
320 svm->nested.ctl.asid = control->asid;
321 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
322 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
323}
324
325/*
326 * Synchronize fields that are written by the processor, so that
327 * they can be copied back into the vmcb12.
328 */
329void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
330{
331 u32 mask;
332 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
333 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
334
335 /* Only a few fields of int_ctl are written by the processor. */
336 mask = V_IRQ_MASK | V_TPR_MASK;
337 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
338 svm_is_intercept(svm, INTERCEPT_VINTR)) {
339 /*
340 * In order to request an interrupt window, L0 is usurping
341 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
342 * even if it was clear in L1's VMCB. Restoring it would be
343 * wrong. However, in this case V_IRQ will remain true until
344 * interrupt_window_interception calls svm_clear_vintr and
345 * restores int_ctl. We can just leave it aside.
346 */
347 mask &= ~V_IRQ_MASK;
348 }
349 svm->nested.ctl.int_ctl &= ~mask;
350 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
351}
352
353/*
354 * Transfer any event that L0 or L1 wanted to inject into L2 to
355 * EXIT_INT_INFO.
356 */
357static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
358 struct vmcb *vmcb12)
359{
360 struct kvm_vcpu *vcpu = &svm->vcpu;
361 u32 exit_int_info = 0;
362 unsigned int nr;
363
364 if (vcpu->arch.exception.injected) {
365 nr = vcpu->arch.exception.nr;
366 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
367
368 if (vcpu->arch.exception.has_error_code) {
369 exit_int_info |= SVM_EVTINJ_VALID_ERR;
370 vmcb12->control.exit_int_info_err =
371 vcpu->arch.exception.error_code;
372 }
373
374 } else if (vcpu->arch.nmi_injected) {
375 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
376
377 } else if (vcpu->arch.interrupt.injected) {
378 nr = vcpu->arch.interrupt.nr;
379 exit_int_info = nr | SVM_EVTINJ_VALID;
380
381 if (vcpu->arch.interrupt.soft)
382 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
383 else
384 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
385 }
386
387 vmcb12->control.exit_int_info = exit_int_info;
388}
389
390static inline bool nested_npt_enabled(struct vcpu_svm *svm)
391{
392 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
393}
394
395static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
396{
397 /*
398 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
399 * things to fix before this can be conditional:
400 *
401 * - Flush TLBs for both L1 and L2 remote TLB flush
402 * - Honor L1's request to flush an ASID on nested VMRUN
403 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
404 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
405 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
406 *
407 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
408 * NPT guest-physical mappings on VMRUN.
409 */
410 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
411 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
412}
413
414/*
415 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
416 * if we are emulating VM-Entry into a guest with NPT enabled.
417 */
418static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
419 bool nested_npt, bool reload_pdptrs)
420{
421 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
422 return -EINVAL;
423
424 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
425 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
426 return -EINVAL;
427
428 if (!nested_npt)
429 kvm_mmu_new_pgd(vcpu, cr3);
430
431 vcpu->arch.cr3 = cr3;
432 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
433
434 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
435 kvm_init_mmu(vcpu);
436
437 return 0;
438}
439
440void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
441{
442 if (!svm->nested.vmcb02.ptr)
443 return;
444
445 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
446 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
447}
448
449static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
450{
451 bool new_vmcb12 = false;
452
453 nested_vmcb02_compute_g_pat(svm);
454
455 /* Load the nested guest state */
456 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
457 new_vmcb12 = true;
458 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
459 }
460
461 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
462 svm->vmcb->save.es = vmcb12->save.es;
463 svm->vmcb->save.cs = vmcb12->save.cs;
464 svm->vmcb->save.ss = vmcb12->save.ss;
465 svm->vmcb->save.ds = vmcb12->save.ds;
466 svm->vmcb->save.cpl = vmcb12->save.cpl;
467 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
468 }
469
470 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
471 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
472 svm->vmcb->save.idtr = vmcb12->save.idtr;
473 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
474 }
475
476 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
477
478 /*
479 * Force-set EFER_SVME even though it is checked earlier on the
480 * VMCB12, because the guest can flip the bit between the check
481 * and now. Clearing EFER_SVME would call svm_free_nested.
482 */
483 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
484
485 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
486 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
487
488 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
489
490 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
491 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
492 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
493
494 /* In case we don't even reach vcpu_run, the fields are not updated */
495 svm->vmcb->save.rax = vmcb12->save.rax;
496 svm->vmcb->save.rsp = vmcb12->save.rsp;
497 svm->vmcb->save.rip = vmcb12->save.rip;
498
499 /* These bits will be set properly on the first execution when new_vmc12 is true */
500 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
501 svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
502 svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
503 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
504 }
505}
506
507static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
508{
509 const u32 int_ctl_vmcb01_bits =
510 V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
511
512 const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
513
514 struct kvm_vcpu *vcpu = &svm->vcpu;
515
516 /*
517 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
518 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
519 */
520
521 /*
522 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
523 * avic_physical_id.
524 */
525 WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
526
527 /* Copied from vmcb01. msrpm_base can be overwritten later. */
528 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
529 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
530 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
531
532 /* Done at vmrun: asid. */
533
534 /* Also overwritten later if necessary. */
535 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
536
537 /* nested_cr3. */
538 if (nested_npt_enabled(svm))
539 nested_svm_init_mmu_context(vcpu);
540
541 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
542 vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
543
544 svm->vmcb->control.int_ctl =
545 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
546 (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits);
547
548 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
549 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
550 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
551 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
552
553 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
554 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
555
556 nested_svm_transition_tlb_flush(vcpu);
557
558 /* Enter Guest-Mode */
559 enter_guest_mode(vcpu);
560
561 /*
562 * Merge guest and host intercepts - must be called with vcpu in
563 * guest-mode to take effect.
564 */
565 recalc_intercepts(svm);
566}
567
568static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
569{
570 /*
571 * Some VMCB state is shared between L1 and L2 and thus has to be
572 * moved at the time of nested vmrun and vmexit.
573 *
574 * VMLOAD/VMSAVE state would also belong in this category, but KVM
575 * always performs VMLOAD and VMSAVE from the VMCB01.
576 */
577 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
578}
579
580int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
581 struct vmcb *vmcb12)
582{
583 struct vcpu_svm *svm = to_svm(vcpu);
584 int ret;
585
586 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
587 vmcb12->save.rip,
588 vmcb12->control.int_ctl,
589 vmcb12->control.event_inj,
590 vmcb12->control.nested_ctl);
591
592 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
593 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
594 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
595 vmcb12->control.intercepts[INTERCEPT_WORD3],
596 vmcb12->control.intercepts[INTERCEPT_WORD4],
597 vmcb12->control.intercepts[INTERCEPT_WORD5]);
598
599
600 svm->nested.vmcb12_gpa = vmcb12_gpa;
601
602 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
603
604 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
605
606 svm_switch_vmcb(svm, &svm->nested.vmcb02);
607 nested_vmcb02_prepare_control(svm);
608 nested_vmcb02_prepare_save(svm, vmcb12);
609
610 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
611 nested_npt_enabled(svm), true);
612 if (ret)
613 return ret;
614
615 if (!npt_enabled)
616 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
617
618 svm_set_gif(svm, true);
619
620 return 0;
621}
622
623int nested_svm_vmrun(struct kvm_vcpu *vcpu)
624{
625 struct vcpu_svm *svm = to_svm(vcpu);
626 int ret;
627 struct vmcb *vmcb12;
628 struct kvm_host_map map;
629 u64 vmcb12_gpa;
630
631 if (!svm->nested.hsave_msr) {
632 kvm_inject_gp(vcpu, 0);
633 return 1;
634 }
635
636 if (is_smm(vcpu)) {
637 kvm_queue_exception(vcpu, UD_VECTOR);
638 return 1;
639 }
640
641 vmcb12_gpa = svm->vmcb->save.rax;
642 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
643 if (ret == -EINVAL) {
644 kvm_inject_gp(vcpu, 0);
645 return 1;
646 } else if (ret) {
647 return kvm_skip_emulated_instruction(vcpu);
648 }
649
650 ret = kvm_skip_emulated_instruction(vcpu);
651
652 vmcb12 = map.hva;
653
654 if (WARN_ON_ONCE(!svm->nested.initialized))
655 return -EINVAL;
656
657 nested_load_control_from_vmcb12(svm, &vmcb12->control);
658
659 if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
660 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
661 vmcb12->control.exit_code = SVM_EXIT_ERR;
662 vmcb12->control.exit_code_hi = 0;
663 vmcb12->control.exit_info_1 = 0;
664 vmcb12->control.exit_info_2 = 0;
665 goto out;
666 }
667
668
669 /* Clear internal status */
670 kvm_clear_exception_queue(vcpu);
671 kvm_clear_interrupt_queue(vcpu);
672
673 /*
674 * Since vmcb01 is not in use, we can use it to store some of the L1
675 * state.
676 */
677 svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
678 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
679 svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
680 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
681 svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
682
683 if (!npt_enabled)
684 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
685
686 svm->nested.nested_run_pending = 1;
687
688 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
689 goto out_exit_err;
690
691 if (nested_svm_vmrun_msrpm(svm))
692 goto out;
693
694out_exit_err:
695 svm->nested.nested_run_pending = 0;
696
697 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
698 svm->vmcb->control.exit_code_hi = 0;
699 svm->vmcb->control.exit_info_1 = 0;
700 svm->vmcb->control.exit_info_2 = 0;
701
702 nested_svm_vmexit(svm);
703
704out:
705 kvm_vcpu_unmap(vcpu, &map, true);
706
707 return ret;
708}
709
710/* Copy state save area fields which are handled by VMRUN */
711void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
712 struct vmcb_save_area *from_save)
713{
714 to_save->es = from_save->es;
715 to_save->cs = from_save->cs;
716 to_save->ss = from_save->ss;
717 to_save->ds = from_save->ds;
718 to_save->gdtr = from_save->gdtr;
719 to_save->idtr = from_save->idtr;
720 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
721 to_save->efer = from_save->efer;
722 to_save->cr0 = from_save->cr0;
723 to_save->cr3 = from_save->cr3;
724 to_save->cr4 = from_save->cr4;
725 to_save->rax = from_save->rax;
726 to_save->rsp = from_save->rsp;
727 to_save->rip = from_save->rip;
728 to_save->cpl = 0;
729}
730
731void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
732{
733 to_vmcb->save.fs = from_vmcb->save.fs;
734 to_vmcb->save.gs = from_vmcb->save.gs;
735 to_vmcb->save.tr = from_vmcb->save.tr;
736 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
737 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
738 to_vmcb->save.star = from_vmcb->save.star;
739 to_vmcb->save.lstar = from_vmcb->save.lstar;
740 to_vmcb->save.cstar = from_vmcb->save.cstar;
741 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
742 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
743 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
744 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
745}
746
747int nested_svm_vmexit(struct vcpu_svm *svm)
748{
749 struct kvm_vcpu *vcpu = &svm->vcpu;
750 struct vmcb *vmcb12;
751 struct vmcb *vmcb = svm->vmcb;
752 struct kvm_host_map map;
753 int rc;
754
755 /* Triple faults in L2 should never escape. */
756 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
757
758 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
759 if (rc) {
760 if (rc == -EINVAL)
761 kvm_inject_gp(vcpu, 0);
762 return 1;
763 }
764
765 vmcb12 = map.hva;
766
767 /* Exit Guest-Mode */
768 leave_guest_mode(vcpu);
769 svm->nested.vmcb12_gpa = 0;
770 WARN_ON_ONCE(svm->nested.nested_run_pending);
771
772 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
773
774 /* in case we halted in L2 */
775 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
776
777 /* Give the current vmcb to the guest */
778
779 vmcb12->save.es = vmcb->save.es;
780 vmcb12->save.cs = vmcb->save.cs;
781 vmcb12->save.ss = vmcb->save.ss;
782 vmcb12->save.ds = vmcb->save.ds;
783 vmcb12->save.gdtr = vmcb->save.gdtr;
784 vmcb12->save.idtr = vmcb->save.idtr;
785 vmcb12->save.efer = svm->vcpu.arch.efer;
786 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
787 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
788 vmcb12->save.cr2 = vmcb->save.cr2;
789 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
790 vmcb12->save.rflags = kvm_get_rflags(vcpu);
791 vmcb12->save.rip = kvm_rip_read(vcpu);
792 vmcb12->save.rsp = kvm_rsp_read(vcpu);
793 vmcb12->save.rax = kvm_rax_read(vcpu);
794 vmcb12->save.dr7 = vmcb->save.dr7;
795 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
796 vmcb12->save.cpl = vmcb->save.cpl;
797
798 vmcb12->control.int_state = vmcb->control.int_state;
799 vmcb12->control.exit_code = vmcb->control.exit_code;
800 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi;
801 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1;
802 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
803
804 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
805 nested_save_pending_event_to_vmcb12(svm, vmcb12);
806
807 if (svm->nrips_enabled)
808 vmcb12->control.next_rip = vmcb->control.next_rip;
809
810 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
811 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
812 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
813 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
814
815 vmcb12->control.pause_filter_count =
816 svm->vmcb->control.pause_filter_count;
817 vmcb12->control.pause_filter_thresh =
818 svm->vmcb->control.pause_filter_thresh;
819
820 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
821
822 svm_switch_vmcb(svm, &svm->vmcb01);
823
824 /*
825 * On vmexit the GIF is set to false and
826 * no event can be injected in L1.
827 */
828 svm_set_gif(svm, false);
829 svm->vmcb->control.exit_int_info = 0;
830
831 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
832 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
833 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
834 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
835 }
836
837 svm->nested.ctl.nested_cr3 = 0;
838
839 /*
840 * Restore processor state that had been saved in vmcb01
841 */
842 kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
843 svm_set_efer(vcpu, svm->vmcb->save.efer);
844 svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
845 svm_set_cr4(vcpu, svm->vmcb->save.cr4);
846 kvm_rax_write(vcpu, svm->vmcb->save.rax);
847 kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
848 kvm_rip_write(vcpu, svm->vmcb->save.rip);
849
850 svm->vcpu.arch.dr7 = DR7_FIXED_1;
851 kvm_update_dr7(&svm->vcpu);
852
853 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
854 vmcb12->control.exit_info_1,
855 vmcb12->control.exit_info_2,
856 vmcb12->control.exit_int_info,
857 vmcb12->control.exit_int_info_err,
858 KVM_ISA_SVM);
859
860 kvm_vcpu_unmap(vcpu, &map, true);
861
862 nested_svm_transition_tlb_flush(vcpu);
863
864 nested_svm_uninit_mmu_context(vcpu);
865
866 rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
867 if (rc)
868 return 1;
869
870 /*
871 * Drop what we picked up for L2 via svm_complete_interrupts() so it
872 * doesn't end up in L1.
873 */
874 svm->vcpu.arch.nmi_injected = false;
875 kvm_clear_exception_queue(vcpu);
876 kvm_clear_interrupt_queue(vcpu);
877
878 /*
879 * If we are here following the completion of a VMRUN that
880 * is being single-stepped, queue the pending #DB intercept
881 * right now so that it an be accounted for before we execute
882 * L1's next instruction.
883 */
884 if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
885 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
886
887 return 0;
888}
889
890static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
891{
892 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
893}
894
895int svm_allocate_nested(struct vcpu_svm *svm)
896{
897 struct page *vmcb02_page;
898
899 if (svm->nested.initialized)
900 return 0;
901
902 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
903 if (!vmcb02_page)
904 return -ENOMEM;
905 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
906 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
907
908 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
909 if (!svm->nested.msrpm)
910 goto err_free_vmcb02;
911 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
912
913 svm->nested.initialized = true;
914 return 0;
915
916err_free_vmcb02:
917 __free_page(vmcb02_page);
918 return -ENOMEM;
919}
920
921void svm_free_nested(struct vcpu_svm *svm)
922{
923 if (!svm->nested.initialized)
924 return;
925
926 svm_vcpu_free_msrpm(svm->nested.msrpm);
927 svm->nested.msrpm = NULL;
928
929 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
930 svm->nested.vmcb02.ptr = NULL;
931
932 /*
933 * When last_vmcb12_gpa matches the current vmcb12 gpa,
934 * some vmcb12 fields are not loaded if they are marked clean
935 * in the vmcb12, since in this case they are up to date already.
936 *
937 * When the vmcb02 is freed, this optimization becomes invalid.
938 */
939 svm->nested.last_vmcb12_gpa = INVALID_GPA;
940
941 svm->nested.initialized = false;
942}
943
944/*
945 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
946 */
947void svm_leave_nested(struct vcpu_svm *svm)
948{
949 struct kvm_vcpu *vcpu = &svm->vcpu;
950
951 if (is_guest_mode(vcpu)) {
952 svm->nested.nested_run_pending = 0;
953 svm->nested.vmcb12_gpa = INVALID_GPA;
954
955 leave_guest_mode(vcpu);
956
957 svm_switch_vmcb(svm, &svm->vmcb01);
958
959 nested_svm_uninit_mmu_context(vcpu);
960 vmcb_mark_all_dirty(svm->vmcb);
961 }
962
963 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
964}
965
966static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
967{
968 u32 offset, msr, value;
969 int write, mask;
970
971 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
972 return NESTED_EXIT_HOST;
973
974 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
975 offset = svm_msrpm_offset(msr);
976 write = svm->vmcb->control.exit_info_1 & 1;
977 mask = 1 << ((2 * (msr & 0xf)) + write);
978
979 if (offset == MSR_INVALID)
980 return NESTED_EXIT_DONE;
981
982 /* Offset is in 32 bit units but need in 8 bit units */
983 offset *= 4;
984
985 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
986 return NESTED_EXIT_DONE;
987
988 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
989}
990
991static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
992{
993 unsigned port, size, iopm_len;
994 u16 val, mask;
995 u8 start_bit;
996 u64 gpa;
997
998 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
999 return NESTED_EXIT_HOST;
1000
1001 port = svm->vmcb->control.exit_info_1 >> 16;
1002 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1003 SVM_IOIO_SIZE_SHIFT;
1004 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1005 start_bit = port % 8;
1006 iopm_len = (start_bit + size > 8) ? 2 : 1;
1007 mask = (0xf >> (4 - size)) << start_bit;
1008 val = 0;
1009
1010 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1011 return NESTED_EXIT_DONE;
1012
1013 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1014}
1015
1016static int nested_svm_intercept(struct vcpu_svm *svm)
1017{
1018 u32 exit_code = svm->vmcb->control.exit_code;
1019 int vmexit = NESTED_EXIT_HOST;
1020
1021 switch (exit_code) {
1022 case SVM_EXIT_MSR:
1023 vmexit = nested_svm_exit_handled_msr(svm);
1024 break;
1025 case SVM_EXIT_IOIO:
1026 vmexit = nested_svm_intercept_ioio(svm);
1027 break;
1028 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1029 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1030 vmexit = NESTED_EXIT_DONE;
1031 break;
1032 }
1033 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1034 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1035 vmexit = NESTED_EXIT_DONE;
1036 break;
1037 }
1038 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1039 /*
1040 * Host-intercepted exceptions have been checked already in
1041 * nested_svm_exit_special. There is nothing to do here,
1042 * the vmexit is injected by svm_check_nested_events.
1043 */
1044 vmexit = NESTED_EXIT_DONE;
1045 break;
1046 }
1047 case SVM_EXIT_ERR: {
1048 vmexit = NESTED_EXIT_DONE;
1049 break;
1050 }
1051 default: {
1052 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
1053 vmexit = NESTED_EXIT_DONE;
1054 }
1055 }
1056
1057 return vmexit;
1058}
1059
1060int nested_svm_exit_handled(struct vcpu_svm *svm)
1061{
1062 int vmexit;
1063
1064 vmexit = nested_svm_intercept(svm);
1065
1066 if (vmexit == NESTED_EXIT_DONE)
1067 nested_svm_vmexit(svm);
1068
1069 return vmexit;
1070}
1071
1072int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1073{
1074 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1075 kvm_queue_exception(vcpu, UD_VECTOR);
1076 return 1;
1077 }
1078
1079 if (to_svm(vcpu)->vmcb->save.cpl) {
1080 kvm_inject_gp(vcpu, 0);
1081 return 1;
1082 }
1083
1084 return 0;
1085}
1086
1087static bool nested_exit_on_exception(struct vcpu_svm *svm)
1088{
1089 unsigned int nr = svm->vcpu.arch.exception.nr;
1090
1091 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1092}
1093
1094static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1095{
1096 unsigned int nr = svm->vcpu.arch.exception.nr;
1097
1098 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1099 svm->vmcb->control.exit_code_hi = 0;
1100
1101 if (svm->vcpu.arch.exception.has_error_code)
1102 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1103
1104 /*
1105 * EXITINFO2 is undefined for all exception intercepts other
1106 * than #PF.
1107 */
1108 if (nr == PF_VECTOR) {
1109 if (svm->vcpu.arch.exception.nested_apf)
1110 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1111 else if (svm->vcpu.arch.exception.has_payload)
1112 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1113 else
1114 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1115 } else if (nr == DB_VECTOR) {
1116 /* See inject_pending_event. */
1117 kvm_deliver_exception_payload(&svm->vcpu);
1118 if (svm->vcpu.arch.dr7 & DR7_GD) {
1119 svm->vcpu.arch.dr7 &= ~DR7_GD;
1120 kvm_update_dr7(&svm->vcpu);
1121 }
1122 } else
1123 WARN_ON(svm->vcpu.arch.exception.has_payload);
1124
1125 nested_svm_vmexit(svm);
1126}
1127
1128static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1129{
1130 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1131}
1132
1133static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1134{
1135 struct vcpu_svm *svm = to_svm(vcpu);
1136 bool block_nested_events =
1137 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1138 struct kvm_lapic *apic = vcpu->arch.apic;
1139
1140 if (lapic_in_kernel(vcpu) &&
1141 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1142 if (block_nested_events)
1143 return -EBUSY;
1144 if (!nested_exit_on_init(svm))
1145 return 0;
1146 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1147 return 0;
1148 }
1149
1150 if (vcpu->arch.exception.pending) {
1151 /*
1152 * Only a pending nested run can block a pending exception.
1153 * Otherwise an injected NMI/interrupt should either be
1154 * lost or delivered to the nested hypervisor in the EXITINTINFO
1155 * vmcb field, while delivering the pending exception.
1156 */
1157 if (svm->nested.nested_run_pending)
1158 return -EBUSY;
1159 if (!nested_exit_on_exception(svm))
1160 return 0;
1161 nested_svm_inject_exception_vmexit(svm);
1162 return 0;
1163 }
1164
1165 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1166 if (block_nested_events)
1167 return -EBUSY;
1168 if (!nested_exit_on_smi(svm))
1169 return 0;
1170 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1171 return 0;
1172 }
1173
1174 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1175 if (block_nested_events)
1176 return -EBUSY;
1177 if (!nested_exit_on_nmi(svm))
1178 return 0;
1179 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1180 return 0;
1181 }
1182
1183 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1184 if (block_nested_events)
1185 return -EBUSY;
1186 if (!nested_exit_on_intr(svm))
1187 return 0;
1188 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1189 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1190 return 0;
1191 }
1192
1193 return 0;
1194}
1195
1196int nested_svm_exit_special(struct vcpu_svm *svm)
1197{
1198 u32 exit_code = svm->vmcb->control.exit_code;
1199
1200 switch (exit_code) {
1201 case SVM_EXIT_INTR:
1202 case SVM_EXIT_NMI:
1203 case SVM_EXIT_NPF:
1204 return NESTED_EXIT_HOST;
1205 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1206 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1207
1208 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1209 excp_bits)
1210 return NESTED_EXIT_HOST;
1211 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1212 svm->vcpu.arch.apf.host_apf_flags)
1213 /* Trap async PF even if not shadowing */
1214 return NESTED_EXIT_HOST;
1215 break;
1216 }
1217 default:
1218 break;
1219 }
1220
1221 return NESTED_EXIT_CONTINUE;
1222}
1223
1224static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1225 struct kvm_nested_state __user *user_kvm_nested_state,
1226 u32 user_data_size)
1227{
1228 struct vcpu_svm *svm;
1229 struct kvm_nested_state kvm_state = {
1230 .flags = 0,
1231 .format = KVM_STATE_NESTED_FORMAT_SVM,
1232 .size = sizeof(kvm_state),
1233 };
1234 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1235 &user_kvm_nested_state->data.svm[0];
1236
1237 if (!vcpu)
1238 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1239
1240 svm = to_svm(vcpu);
1241
1242 if (user_data_size < kvm_state.size)
1243 goto out;
1244
1245 /* First fill in the header and copy it out. */
1246 if (is_guest_mode(vcpu)) {
1247 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1248 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1249 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1250
1251 if (svm->nested.nested_run_pending)
1252 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1253 }
1254
1255 if (gif_set(svm))
1256 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1257
1258 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1259 return -EFAULT;
1260
1261 if (!is_guest_mode(vcpu))
1262 goto out;
1263
1264 /*
1265 * Copy over the full size of the VMCB rather than just the size
1266 * of the structs.
1267 */
1268 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1269 return -EFAULT;
1270 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1271 sizeof(user_vmcb->control)))
1272 return -EFAULT;
1273 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1274 sizeof(user_vmcb->save)))
1275 return -EFAULT;
1276out:
1277 return kvm_state.size;
1278}
1279
1280static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1281 struct kvm_nested_state __user *user_kvm_nested_state,
1282 struct kvm_nested_state *kvm_state)
1283{
1284 struct vcpu_svm *svm = to_svm(vcpu);
1285 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1286 &user_kvm_nested_state->data.svm[0];
1287 struct vmcb_control_area *ctl;
1288 struct vmcb_save_area *save;
1289 unsigned long cr0;
1290 int ret;
1291
1292 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1293 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1294
1295 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1296 return -EINVAL;
1297
1298 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1299 KVM_STATE_NESTED_RUN_PENDING |
1300 KVM_STATE_NESTED_GIF_SET))
1301 return -EINVAL;
1302
1303 /*
1304 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1305 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1306 */
1307 if (!(vcpu->arch.efer & EFER_SVME)) {
1308 /* GIF=1 and no guest mode are required if SVME=0. */
1309 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1310 return -EINVAL;
1311 }
1312
1313 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1314 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1315 return -EINVAL;
1316
1317 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1318 svm_leave_nested(svm);
1319 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1320 return 0;
1321 }
1322
1323 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1324 return -EINVAL;
1325 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1326 return -EINVAL;
1327
1328 ret = -ENOMEM;
1329 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1330 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1331 if (!ctl || !save)
1332 goto out_free;
1333
1334 ret = -EFAULT;
1335 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1336 goto out_free;
1337 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1338 goto out_free;
1339
1340 ret = -EINVAL;
1341 if (!nested_vmcb_check_controls(vcpu, ctl))
1342 goto out_free;
1343
1344 /*
1345 * Processor state contains L2 state. Check that it is
1346 * valid for guest mode (see nested_vmcb_check_save).
1347 */
1348 cr0 = kvm_read_cr0(vcpu);
1349 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1350 goto out_free;
1351
1352 /*
1353 * Validate host state saved from before VMRUN (see
1354 * nested_svm_check_permissions).
1355 */
1356 if (!(save->cr0 & X86_CR0_PG) ||
1357 !(save->cr0 & X86_CR0_PE) ||
1358 (save->rflags & X86_EFLAGS_VM) ||
1359 !nested_vmcb_valid_sregs(vcpu, save))
1360 goto out_free;
1361
1362 /*
1363 * While the nested guest CR3 is already checked and set by
1364 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1365 * thus MMU might not be initialized correctly.
1366 * Set it again to fix this.
1367 */
1368
1369 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1370 nested_npt_enabled(svm), false);
1371 if (WARN_ON_ONCE(ret))
1372 goto out_free;
1373
1374
1375 /*
1376 * All checks done, we can enter guest mode. Userspace provides
1377 * vmcb12.control, which will be combined with L1 and stored into
1378 * vmcb02, and the L1 save state which we store in vmcb01.
1379 * L2 registers if needed are moved from the current VMCB to VMCB02.
1380 */
1381
1382 if (is_guest_mode(vcpu))
1383 svm_leave_nested(svm);
1384 else
1385 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1386
1387 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1388
1389 svm->nested.nested_run_pending =
1390 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1391
1392 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1393
1394 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1395 nested_load_control_from_vmcb12(svm, ctl);
1396
1397 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1398 nested_vmcb02_prepare_control(svm);
1399 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1400 ret = 0;
1401out_free:
1402 kfree(save);
1403 kfree(ctl);
1404
1405 return ret;
1406}
1407
1408static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1409{
1410 struct vcpu_svm *svm = to_svm(vcpu);
1411
1412 if (WARN_ON(!is_guest_mode(vcpu)))
1413 return true;
1414
1415 if (!vcpu->arch.pdptrs_from_userspace &&
1416 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1417 /*
1418 * Reload the guest's PDPTRs since after a migration
1419 * the guest CR3 might be restored prior to setting the nested
1420 * state which can lead to a load of wrong PDPTRs.
1421 */
1422 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
1423 return false;
1424
1425 if (!nested_svm_vmrun_msrpm(svm)) {
1426 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1427 vcpu->run->internal.suberror =
1428 KVM_INTERNAL_ERROR_EMULATION;
1429 vcpu->run->internal.ndata = 0;
1430 return false;
1431 }
1432
1433 return true;
1434}
1435
1436struct kvm_x86_nested_ops svm_nested_ops = {
1437 .check_events = svm_check_nested_events,
1438 .triple_fault = nested_svm_triple_fault,
1439 .get_nested_state_pages = svm_get_nested_state_pages,
1440 .get_state = svm_get_nested_state,
1441 .set_state = svm_set_nested_state,
1442};