Loading...
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
13
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h>
18#include <linux/cpumask.h>
19#include <linux/irq_work.h>
20
21#include <linux/kvm.h>
22#include <linux/kvm_para.h>
23#include <linux/kvm_types.h>
24#include <linux/perf_event.h>
25#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h>
27#include <linux/irqbypass.h>
28#include <linux/hyperv.h>
29
30#include <asm/pvclock-abi.h>
31#include <asm/desc.h>
32#include <asm/mtrr.h>
33#include <asm/msr-index.h>
34#include <asm/asm.h>
35#include <asm/kvm_page_track.h>
36
37#define KVM_MAX_VCPUS 255
38#define KVM_SOFT_MAX_VCPUS 160
39#define KVM_USER_MEM_SLOTS 509
40/* memory slots that are not exposed to userspace */
41#define KVM_PRIVATE_MEM_SLOTS 3
42#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
43
44#define KVM_PIO_PAGE_OFFSET 1
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
46#define KVM_HALT_POLL_NS_DEFAULT 400000
47
48#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
49
50/* x86-specific vcpu->requests bit members */
51#define KVM_REQ_MIGRATE_TIMER 8
52#define KVM_REQ_REPORT_TPR_ACCESS 9
53#define KVM_REQ_TRIPLE_FAULT 10
54#define KVM_REQ_MMU_SYNC 11
55#define KVM_REQ_CLOCK_UPDATE 12
56#define KVM_REQ_DEACTIVATE_FPU 13
57#define KVM_REQ_EVENT 14
58#define KVM_REQ_APF_HALT 15
59#define KVM_REQ_STEAL_UPDATE 16
60#define KVM_REQ_NMI 17
61#define KVM_REQ_PMU 18
62#define KVM_REQ_PMI 19
63#define KVM_REQ_SMI 20
64#define KVM_REQ_MASTERCLOCK_UPDATE 21
65#define KVM_REQ_MCLOCK_INPROGRESS 22
66#define KVM_REQ_SCAN_IOAPIC 23
67#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
68#define KVM_REQ_APIC_PAGE_RELOAD 25
69#define KVM_REQ_HV_CRASH 26
70#define KVM_REQ_IOAPIC_EOI_EXIT 27
71#define KVM_REQ_HV_RESET 28
72#define KVM_REQ_HV_EXIT 29
73#define KVM_REQ_HV_STIMER 30
74
75#define CR0_RESERVED_BITS \
76 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
77 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
78 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
79
80#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
81#define CR3_PCID_INVD BIT_64(63)
82#define CR4_RESERVED_BITS \
83 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
84 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
85 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
86 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
87 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
88 | X86_CR4_PKE))
89
90#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
91
92
93
94#define INVALID_PAGE (~(hpa_t)0)
95#define VALID_PAGE(x) ((x) != INVALID_PAGE)
96
97#define UNMAPPED_GVA (~(gpa_t)0)
98
99/* KVM Hugepage definitions for x86 */
100#define KVM_NR_PAGE_SIZES 3
101#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
102#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
103#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
104#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
105#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
106
107static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
108{
109 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
110 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
111 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
112}
113
114#define KVM_PERMILLE_MMU_PAGES 20
115#define KVM_MIN_ALLOC_MMU_PAGES 64
116#define KVM_MMU_HASH_SHIFT 10
117#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
118#define KVM_MIN_FREE_MMU_PAGES 5
119#define KVM_REFILL_PAGES 25
120#define KVM_MAX_CPUID_ENTRIES 80
121#define KVM_NR_FIXED_MTRR_REGION 88
122#define KVM_NR_VAR_MTRR 8
123
124#define ASYNC_PF_PER_VCPU 64
125
126enum kvm_reg {
127 VCPU_REGS_RAX = 0,
128 VCPU_REGS_RCX = 1,
129 VCPU_REGS_RDX = 2,
130 VCPU_REGS_RBX = 3,
131 VCPU_REGS_RSP = 4,
132 VCPU_REGS_RBP = 5,
133 VCPU_REGS_RSI = 6,
134 VCPU_REGS_RDI = 7,
135#ifdef CONFIG_X86_64
136 VCPU_REGS_R8 = 8,
137 VCPU_REGS_R9 = 9,
138 VCPU_REGS_R10 = 10,
139 VCPU_REGS_R11 = 11,
140 VCPU_REGS_R12 = 12,
141 VCPU_REGS_R13 = 13,
142 VCPU_REGS_R14 = 14,
143 VCPU_REGS_R15 = 15,
144#endif
145 VCPU_REGS_RIP,
146 NR_VCPU_REGS
147};
148
149enum kvm_reg_ex {
150 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
151 VCPU_EXREG_CR3,
152 VCPU_EXREG_RFLAGS,
153 VCPU_EXREG_SEGMENTS,
154};
155
156enum {
157 VCPU_SREG_ES,
158 VCPU_SREG_CS,
159 VCPU_SREG_SS,
160 VCPU_SREG_DS,
161 VCPU_SREG_FS,
162 VCPU_SREG_GS,
163 VCPU_SREG_TR,
164 VCPU_SREG_LDTR,
165};
166
167#include <asm/kvm_emulate.h>
168
169#define KVM_NR_MEM_OBJS 40
170
171#define KVM_NR_DB_REGS 4
172
173#define DR6_BD (1 << 13)
174#define DR6_BS (1 << 14)
175#define DR6_RTM (1 << 16)
176#define DR6_FIXED_1 0xfffe0ff0
177#define DR6_INIT 0xffff0ff0
178#define DR6_VOLATILE 0x0001e00f
179
180#define DR7_BP_EN_MASK 0x000000ff
181#define DR7_GE (1 << 9)
182#define DR7_GD (1 << 13)
183#define DR7_FIXED_1 0x00000400
184#define DR7_VOLATILE 0xffff2bff
185
186#define PFERR_PRESENT_BIT 0
187#define PFERR_WRITE_BIT 1
188#define PFERR_USER_BIT 2
189#define PFERR_RSVD_BIT 3
190#define PFERR_FETCH_BIT 4
191#define PFERR_PK_BIT 5
192
193#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
194#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
195#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
196#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
197#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
198#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
199
200/* apic attention bits */
201#define KVM_APIC_CHECK_VAPIC 0
202/*
203 * The following bit is set with PV-EOI, unset on EOI.
204 * We detect PV-EOI changes by guest by comparing
205 * this bit with PV-EOI in guest memory.
206 * See the implementation in apic_update_pv_eoi.
207 */
208#define KVM_APIC_PV_EOI_PENDING 1
209
210struct kvm_kernel_irq_routing_entry;
211
212/*
213 * We don't want allocation failures within the mmu code, so we preallocate
214 * enough memory for a single page fault in a cache.
215 */
216struct kvm_mmu_memory_cache {
217 int nobjs;
218 void *objects[KVM_NR_MEM_OBJS];
219};
220
221/*
222 * the pages used as guest page table on soft mmu are tracked by
223 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
224 * by indirect shadow page can not be more than 15 bits.
225 *
226 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
227 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
228 */
229union kvm_mmu_page_role {
230 unsigned word;
231 struct {
232 unsigned level:4;
233 unsigned cr4_pae:1;
234 unsigned quadrant:2;
235 unsigned direct:1;
236 unsigned access:3;
237 unsigned invalid:1;
238 unsigned nxe:1;
239 unsigned cr0_wp:1;
240 unsigned smep_andnot_wp:1;
241 unsigned smap_andnot_wp:1;
242 unsigned :8;
243
244 /*
245 * This is left at the top of the word so that
246 * kvm_memslots_for_spte_role can extract it with a
247 * simple shift. While there is room, give it a whole
248 * byte so it is also faster to load it from memory.
249 */
250 unsigned smm:8;
251 };
252};
253
254struct kvm_rmap_head {
255 unsigned long val;
256};
257
258struct kvm_mmu_page {
259 struct list_head link;
260 struct hlist_node hash_link;
261
262 /*
263 * The following two entries are used to key the shadow page in the
264 * hash table.
265 */
266 gfn_t gfn;
267 union kvm_mmu_page_role role;
268
269 u64 *spt;
270 /* hold the gfn of each spte inside spt */
271 gfn_t *gfns;
272 bool unsync;
273 int root_count; /* Currently serving as active root */
274 unsigned int unsync_children;
275 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
276
277 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
278 unsigned long mmu_valid_gen;
279
280 DECLARE_BITMAP(unsync_child_bitmap, 512);
281
282#ifdef CONFIG_X86_32
283 /*
284 * Used out of the mmu-lock to avoid reading spte values while an
285 * update is in progress; see the comments in __get_spte_lockless().
286 */
287 int clear_spte_count;
288#endif
289
290 /* Number of writes since the last time traversal visited this page. */
291 atomic_t write_flooding_count;
292};
293
294struct kvm_pio_request {
295 unsigned long count;
296 int in;
297 int port;
298 int size;
299};
300
301struct rsvd_bits_validate {
302 u64 rsvd_bits_mask[2][4];
303 u64 bad_mt_xwr;
304};
305
306/*
307 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
308 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
309 * mode.
310 */
311struct kvm_mmu {
312 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
313 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
314 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
315 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
316 bool prefault);
317 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
318 struct x86_exception *fault);
319 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
320 struct x86_exception *exception);
321 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
322 struct x86_exception *exception);
323 int (*sync_page)(struct kvm_vcpu *vcpu,
324 struct kvm_mmu_page *sp);
325 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
326 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
327 u64 *spte, const void *pte);
328 hpa_t root_hpa;
329 int root_level;
330 int shadow_root_level;
331 union kvm_mmu_page_role base_role;
332 bool direct_map;
333
334 /*
335 * Bitmap; bit set = permission fault
336 * Byte index: page fault error code [4:1]
337 * Bit index: pte permissions in ACC_* format
338 */
339 u8 permissions[16];
340
341 /*
342 * The pkru_mask indicates if protection key checks are needed. It
343 * consists of 16 domains indexed by page fault error code bits [4:1],
344 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
345 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
346 */
347 u32 pkru_mask;
348
349 u64 *pae_root;
350 u64 *lm_root;
351
352 /*
353 * check zero bits on shadow page table entries, these
354 * bits include not only hardware reserved bits but also
355 * the bits spte never used.
356 */
357 struct rsvd_bits_validate shadow_zero_check;
358
359 struct rsvd_bits_validate guest_rsvd_check;
360
361 /* Can have large pages at levels 2..last_nonleaf_level-1. */
362 u8 last_nonleaf_level;
363
364 bool nx;
365
366 u64 pdptrs[4]; /* pae */
367};
368
369enum pmc_type {
370 KVM_PMC_GP = 0,
371 KVM_PMC_FIXED,
372};
373
374struct kvm_pmc {
375 enum pmc_type type;
376 u8 idx;
377 u64 counter;
378 u64 eventsel;
379 struct perf_event *perf_event;
380 struct kvm_vcpu *vcpu;
381};
382
383struct kvm_pmu {
384 unsigned nr_arch_gp_counters;
385 unsigned nr_arch_fixed_counters;
386 unsigned available_event_types;
387 u64 fixed_ctr_ctrl;
388 u64 global_ctrl;
389 u64 global_status;
390 u64 global_ovf_ctrl;
391 u64 counter_bitmask[2];
392 u64 global_ctrl_mask;
393 u64 reserved_bits;
394 u8 version;
395 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
396 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
397 struct irq_work irq_work;
398 u64 reprogram_pmi;
399};
400
401struct kvm_pmu_ops;
402
403enum {
404 KVM_DEBUGREG_BP_ENABLED = 1,
405 KVM_DEBUGREG_WONT_EXIT = 2,
406 KVM_DEBUGREG_RELOAD = 4,
407};
408
409struct kvm_mtrr_range {
410 u64 base;
411 u64 mask;
412 struct list_head node;
413};
414
415struct kvm_mtrr {
416 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
417 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
418 u64 deftype;
419
420 struct list_head head;
421};
422
423/* Hyper-V SynIC timer */
424struct kvm_vcpu_hv_stimer {
425 struct hrtimer timer;
426 int index;
427 u64 config;
428 u64 count;
429 u64 exp_time;
430 struct hv_message msg;
431 bool msg_pending;
432};
433
434/* Hyper-V synthetic interrupt controller (SynIC)*/
435struct kvm_vcpu_hv_synic {
436 u64 version;
437 u64 control;
438 u64 msg_page;
439 u64 evt_page;
440 atomic64_t sint[HV_SYNIC_SINT_COUNT];
441 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
442 DECLARE_BITMAP(auto_eoi_bitmap, 256);
443 DECLARE_BITMAP(vec_bitmap, 256);
444 bool active;
445};
446
447/* Hyper-V per vcpu emulation context */
448struct kvm_vcpu_hv {
449 u64 hv_vapic;
450 s64 runtime_offset;
451 struct kvm_vcpu_hv_synic synic;
452 struct kvm_hyperv_exit exit;
453 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
454 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
455};
456
457struct kvm_vcpu_arch {
458 /*
459 * rip and regs accesses must go through
460 * kvm_{register,rip}_{read,write} functions.
461 */
462 unsigned long regs[NR_VCPU_REGS];
463 u32 regs_avail;
464 u32 regs_dirty;
465
466 unsigned long cr0;
467 unsigned long cr0_guest_owned_bits;
468 unsigned long cr2;
469 unsigned long cr3;
470 unsigned long cr4;
471 unsigned long cr4_guest_owned_bits;
472 unsigned long cr8;
473 u32 hflags;
474 u64 efer;
475 u64 apic_base;
476 struct kvm_lapic *apic; /* kernel irqchip context */
477 bool apicv_active;
478 DECLARE_BITMAP(ioapic_handled_vectors, 256);
479 unsigned long apic_attention;
480 int32_t apic_arb_prio;
481 int mp_state;
482 u64 ia32_misc_enable_msr;
483 u64 smbase;
484 bool tpr_access_reporting;
485 u64 ia32_xss;
486
487 /*
488 * Paging state of the vcpu
489 *
490 * If the vcpu runs in guest mode with two level paging this still saves
491 * the paging mode of the l1 guest. This context is always used to
492 * handle faults.
493 */
494 struct kvm_mmu mmu;
495
496 /*
497 * Paging state of an L2 guest (used for nested npt)
498 *
499 * This context will save all necessary information to walk page tables
500 * of the an L2 guest. This context is only initialized for page table
501 * walking and not for faulting since we never handle l2 page faults on
502 * the host.
503 */
504 struct kvm_mmu nested_mmu;
505
506 /*
507 * Pointer to the mmu context currently used for
508 * gva_to_gpa translations.
509 */
510 struct kvm_mmu *walk_mmu;
511
512 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
513 struct kvm_mmu_memory_cache mmu_page_cache;
514 struct kvm_mmu_memory_cache mmu_page_header_cache;
515
516 struct fpu guest_fpu;
517 u64 xcr0;
518 u64 guest_supported_xcr0;
519 u32 guest_xstate_size;
520
521 struct kvm_pio_request pio;
522 void *pio_data;
523
524 u8 event_exit_inst_len;
525
526 struct kvm_queued_exception {
527 bool pending;
528 bool has_error_code;
529 bool reinject;
530 u8 nr;
531 u32 error_code;
532 } exception;
533
534 struct kvm_queued_interrupt {
535 bool pending;
536 bool soft;
537 u8 nr;
538 } interrupt;
539
540 int halt_request; /* real mode on Intel only */
541
542 int cpuid_nent;
543 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
544
545 int maxphyaddr;
546
547 /* emulate context */
548
549 struct x86_emulate_ctxt emulate_ctxt;
550 bool emulate_regs_need_sync_to_vcpu;
551 bool emulate_regs_need_sync_from_vcpu;
552 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
553
554 gpa_t time;
555 struct pvclock_vcpu_time_info hv_clock;
556 unsigned int hw_tsc_khz;
557 struct gfn_to_hva_cache pv_time;
558 bool pv_time_enabled;
559 /* set guest stopped flag in pvclock flags field */
560 bool pvclock_set_guest_stopped_request;
561
562 struct {
563 u64 msr_val;
564 u64 last_steal;
565 u64 accum_steal;
566 struct gfn_to_hva_cache stime;
567 struct kvm_steal_time steal;
568 } st;
569
570 u64 last_guest_tsc;
571 u64 last_host_tsc;
572 u64 tsc_offset_adjustment;
573 u64 this_tsc_nsec;
574 u64 this_tsc_write;
575 u64 this_tsc_generation;
576 bool tsc_catchup;
577 bool tsc_always_catchup;
578 s8 virtual_tsc_shift;
579 u32 virtual_tsc_mult;
580 u32 virtual_tsc_khz;
581 s64 ia32_tsc_adjust_msr;
582 u64 tsc_scaling_ratio;
583
584 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
585 unsigned nmi_pending; /* NMI queued after currently running handler */
586 bool nmi_injected; /* Trying to inject an NMI this entry */
587 bool smi_pending; /* SMI queued after currently running handler */
588
589 struct kvm_mtrr mtrr_state;
590 u64 pat;
591
592 unsigned switch_db_regs;
593 unsigned long db[KVM_NR_DB_REGS];
594 unsigned long dr6;
595 unsigned long dr7;
596 unsigned long eff_db[KVM_NR_DB_REGS];
597 unsigned long guest_debug_dr7;
598
599 u64 mcg_cap;
600 u64 mcg_status;
601 u64 mcg_ctl;
602 u64 *mce_banks;
603
604 /* Cache MMIO info */
605 u64 mmio_gva;
606 unsigned access;
607 gfn_t mmio_gfn;
608 u64 mmio_gen;
609
610 struct kvm_pmu pmu;
611
612 /* used for guest single stepping over the given code position */
613 unsigned long singlestep_rip;
614
615 struct kvm_vcpu_hv hyperv;
616
617 cpumask_var_t wbinvd_dirty_mask;
618
619 unsigned long last_retry_eip;
620 unsigned long last_retry_addr;
621
622 struct {
623 bool halted;
624 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
625 struct gfn_to_hva_cache data;
626 u64 msr_val;
627 u32 id;
628 bool send_user_only;
629 } apf;
630
631 /* OSVW MSRs (AMD only) */
632 struct {
633 u64 length;
634 u64 status;
635 } osvw;
636
637 struct {
638 u64 msr_val;
639 struct gfn_to_hva_cache data;
640 } pv_eoi;
641
642 /*
643 * Indicate whether the access faults on its page table in guest
644 * which is set when fix page fault and used to detect unhandeable
645 * instruction.
646 */
647 bool write_fault_to_shadow_pgtable;
648
649 /* set at EPT violation at this point */
650 unsigned long exit_qualification;
651
652 /* pv related host specific info */
653 struct {
654 bool pv_unhalted;
655 } pv;
656
657 int pending_ioapic_eoi;
658 int pending_external_vector;
659};
660
661struct kvm_lpage_info {
662 int disallow_lpage;
663};
664
665struct kvm_arch_memory_slot {
666 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
667 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
668 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
669};
670
671/*
672 * We use as the mode the number of bits allocated in the LDR for the
673 * logical processor ID. It happens that these are all powers of two.
674 * This makes it is very easy to detect cases where the APICs are
675 * configured for multiple modes; in that case, we cannot use the map and
676 * hence cannot use kvm_irq_delivery_to_apic_fast either.
677 */
678#define KVM_APIC_MODE_XAPIC_CLUSTER 4
679#define KVM_APIC_MODE_XAPIC_FLAT 8
680#define KVM_APIC_MODE_X2APIC 16
681
682struct kvm_apic_map {
683 struct rcu_head rcu;
684 u8 mode;
685 struct kvm_lapic *phys_map[256];
686 /* first index is cluster id second is cpu id in a cluster */
687 struct kvm_lapic *logical_map[16][16];
688};
689
690/* Hyper-V emulation context */
691struct kvm_hv {
692 u64 hv_guest_os_id;
693 u64 hv_hypercall;
694 u64 hv_tsc_page;
695
696 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
697 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
698 u64 hv_crash_ctl;
699};
700
701struct kvm_arch {
702 unsigned int n_used_mmu_pages;
703 unsigned int n_requested_mmu_pages;
704 unsigned int n_max_mmu_pages;
705 unsigned int indirect_shadow_pages;
706 unsigned long mmu_valid_gen;
707 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
708 /*
709 * Hash table of struct kvm_mmu_page.
710 */
711 struct list_head active_mmu_pages;
712 struct list_head zapped_obsolete_pages;
713 struct kvm_page_track_notifier_node mmu_sp_tracker;
714 struct kvm_page_track_notifier_head track_notifier_head;
715
716 struct list_head assigned_dev_head;
717 struct iommu_domain *iommu_domain;
718 bool iommu_noncoherent;
719#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
720 atomic_t noncoherent_dma_count;
721#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
722 atomic_t assigned_device_count;
723 struct kvm_pic *vpic;
724 struct kvm_ioapic *vioapic;
725 struct kvm_pit *vpit;
726 atomic_t vapics_in_nmi_mode;
727 struct mutex apic_map_lock;
728 struct kvm_apic_map *apic_map;
729
730 unsigned int tss_addr;
731 bool apic_access_page_done;
732
733 gpa_t wall_clock;
734
735 bool ept_identity_pagetable_done;
736 gpa_t ept_identity_map_addr;
737
738 unsigned long irq_sources_bitmap;
739 s64 kvmclock_offset;
740 raw_spinlock_t tsc_write_lock;
741 u64 last_tsc_nsec;
742 u64 last_tsc_write;
743 u32 last_tsc_khz;
744 u64 cur_tsc_nsec;
745 u64 cur_tsc_write;
746 u64 cur_tsc_offset;
747 u64 cur_tsc_generation;
748 int nr_vcpus_matched_tsc;
749
750 spinlock_t pvclock_gtod_sync_lock;
751 bool use_master_clock;
752 u64 master_kernel_ns;
753 cycle_t master_cycle_now;
754 struct delayed_work kvmclock_update_work;
755 struct delayed_work kvmclock_sync_work;
756
757 struct kvm_xen_hvm_config xen_hvm_config;
758
759 /* reads protected by irq_srcu, writes by irq_lock */
760 struct hlist_head mask_notifier_list;
761
762 struct kvm_hv hyperv;
763
764 #ifdef CONFIG_KVM_MMU_AUDIT
765 int audit_point;
766 #endif
767
768 bool boot_vcpu_runs_old_kvmclock;
769 u32 bsp_vcpu_id;
770
771 u64 disabled_quirks;
772
773 bool irqchip_split;
774 u8 nr_reserved_ioapic_pins;
775
776 bool disabled_lapic_found;
777};
778
779struct kvm_vm_stat {
780 u32 mmu_shadow_zapped;
781 u32 mmu_pte_write;
782 u32 mmu_pte_updated;
783 u32 mmu_pde_zapped;
784 u32 mmu_flooded;
785 u32 mmu_recycled;
786 u32 mmu_cache_miss;
787 u32 mmu_unsync;
788 u32 remote_tlb_flush;
789 u32 lpages;
790};
791
792struct kvm_vcpu_stat {
793 u32 pf_fixed;
794 u32 pf_guest;
795 u32 tlb_flush;
796 u32 invlpg;
797
798 u32 exits;
799 u32 io_exits;
800 u32 mmio_exits;
801 u32 signal_exits;
802 u32 irq_window_exits;
803 u32 nmi_window_exits;
804 u32 halt_exits;
805 u32 halt_successful_poll;
806 u32 halt_attempted_poll;
807 u32 halt_wakeup;
808 u32 request_irq_exits;
809 u32 irq_exits;
810 u32 host_state_reload;
811 u32 efer_reload;
812 u32 fpu_reload;
813 u32 insn_emulation;
814 u32 insn_emulation_fail;
815 u32 hypercalls;
816 u32 irq_injections;
817 u32 nmi_injections;
818};
819
820struct x86_instruction_info;
821
822struct msr_data {
823 bool host_initiated;
824 u32 index;
825 u64 data;
826};
827
828struct kvm_lapic_irq {
829 u32 vector;
830 u16 delivery_mode;
831 u16 dest_mode;
832 bool level;
833 u16 trig_mode;
834 u32 shorthand;
835 u32 dest_id;
836 bool msi_redir_hint;
837};
838
839struct kvm_x86_ops {
840 int (*cpu_has_kvm_support)(void); /* __init */
841 int (*disabled_by_bios)(void); /* __init */
842 int (*hardware_enable)(void);
843 void (*hardware_disable)(void);
844 void (*check_processor_compatibility)(void *rtn);
845 int (*hardware_setup)(void); /* __init */
846 void (*hardware_unsetup)(void); /* __exit */
847 bool (*cpu_has_accelerated_tpr)(void);
848 bool (*cpu_has_high_real_mode_segbase)(void);
849 void (*cpuid_update)(struct kvm_vcpu *vcpu);
850
851 /* Create, but do not attach this VCPU */
852 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
853 void (*vcpu_free)(struct kvm_vcpu *vcpu);
854 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
855
856 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
857 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
858 void (*vcpu_put)(struct kvm_vcpu *vcpu);
859
860 void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
861 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
862 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
863 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
864 void (*get_segment)(struct kvm_vcpu *vcpu,
865 struct kvm_segment *var, int seg);
866 int (*get_cpl)(struct kvm_vcpu *vcpu);
867 void (*set_segment)(struct kvm_vcpu *vcpu,
868 struct kvm_segment *var, int seg);
869 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
870 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
871 void (*decache_cr3)(struct kvm_vcpu *vcpu);
872 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
873 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
874 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
875 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
876 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
877 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
878 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
879 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
880 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
881 u64 (*get_dr6)(struct kvm_vcpu *vcpu);
882 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
883 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
884 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
885 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
886 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
887 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
888 u32 (*get_pkru)(struct kvm_vcpu *vcpu);
889 void (*fpu_activate)(struct kvm_vcpu *vcpu);
890 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
891
892 void (*tlb_flush)(struct kvm_vcpu *vcpu);
893
894 void (*run)(struct kvm_vcpu *vcpu);
895 int (*handle_exit)(struct kvm_vcpu *vcpu);
896 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
897 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
898 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
899 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
900 unsigned char *hypercall_addr);
901 void (*set_irq)(struct kvm_vcpu *vcpu);
902 void (*set_nmi)(struct kvm_vcpu *vcpu);
903 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
904 bool has_error_code, u32 error_code,
905 bool reinject);
906 void (*cancel_injection)(struct kvm_vcpu *vcpu);
907 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
908 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
909 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
910 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
911 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
912 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
913 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
914 bool (*get_enable_apicv)(void);
915 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
916 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
917 void (*hwapic_isr_update)(struct kvm *kvm, int isr);
918 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
919 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
920 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
921 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
922 void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
923 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
924 int (*get_tdp_level)(void);
925 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
926 int (*get_lpage_level)(void);
927 bool (*rdtscp_supported)(void);
928 bool (*invpcid_supported)(void);
929 void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
930
931 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
932
933 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
934
935 bool (*has_wbinvd_exit)(void);
936
937 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
938 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
939
940 u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
941
942 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
943
944 int (*check_intercept)(struct kvm_vcpu *vcpu,
945 struct x86_instruction_info *info,
946 enum x86_intercept_stage stage);
947 void (*handle_external_intr)(struct kvm_vcpu *vcpu);
948 bool (*mpx_supported)(void);
949 bool (*xsaves_supported)(void);
950
951 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
952
953 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
954
955 /*
956 * Arch-specific dirty logging hooks. These hooks are only supposed to
957 * be valid if the specific arch has hardware-accelerated dirty logging
958 * mechanism. Currently only for PML on VMX.
959 *
960 * - slot_enable_log_dirty:
961 * called when enabling log dirty mode for the slot.
962 * - slot_disable_log_dirty:
963 * called when disabling log dirty mode for the slot.
964 * also called when slot is created with log dirty disabled.
965 * - flush_log_dirty:
966 * called before reporting dirty_bitmap to userspace.
967 * - enable_log_dirty_pt_masked:
968 * called when reenabling log dirty for the GFNs in the mask after
969 * corresponding bits are cleared in slot->dirty_bitmap.
970 */
971 void (*slot_enable_log_dirty)(struct kvm *kvm,
972 struct kvm_memory_slot *slot);
973 void (*slot_disable_log_dirty)(struct kvm *kvm,
974 struct kvm_memory_slot *slot);
975 void (*flush_log_dirty)(struct kvm *kvm);
976 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
977 struct kvm_memory_slot *slot,
978 gfn_t offset, unsigned long mask);
979 /* pmu operations of sub-arch */
980 const struct kvm_pmu_ops *pmu_ops;
981
982 /*
983 * Architecture specific hooks for vCPU blocking due to
984 * HLT instruction.
985 * Returns for .pre_block():
986 * - 0 means continue to block the vCPU.
987 * - 1 means we cannot block the vCPU since some event
988 * happens during this period, such as, 'ON' bit in
989 * posted-interrupts descriptor is set.
990 */
991 int (*pre_block)(struct kvm_vcpu *vcpu);
992 void (*post_block)(struct kvm_vcpu *vcpu);
993 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
994 uint32_t guest_irq, bool set);
995};
996
997struct kvm_arch_async_pf {
998 u32 token;
999 gfn_t gfn;
1000 unsigned long cr3;
1001 bool direct_map;
1002};
1003
1004extern struct kvm_x86_ops *kvm_x86_ops;
1005
1006int kvm_mmu_module_init(void);
1007void kvm_mmu_module_exit(void);
1008
1009void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1010int kvm_mmu_create(struct kvm_vcpu *vcpu);
1011void kvm_mmu_setup(struct kvm_vcpu *vcpu);
1012void kvm_mmu_init_vm(struct kvm *kvm);
1013void kvm_mmu_uninit_vm(struct kvm *kvm);
1014void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1015 u64 dirty_mask, u64 nx_mask, u64 x_mask);
1016
1017void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1018void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1019 struct kvm_memory_slot *memslot);
1020void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1021 const struct kvm_memory_slot *memslot);
1022void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1023 struct kvm_memory_slot *memslot);
1024void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1025 struct kvm_memory_slot *memslot);
1026void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1027 struct kvm_memory_slot *memslot);
1028void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1029 struct kvm_memory_slot *slot,
1030 gfn_t gfn_offset, unsigned long mask);
1031void kvm_mmu_zap_all(struct kvm *kvm);
1032void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
1033unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1034void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1035
1036int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1037
1038int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1039 const void *val, int bytes);
1040
1041struct kvm_irq_mask_notifier {
1042 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1043 int irq;
1044 struct hlist_node link;
1045};
1046
1047void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1048 struct kvm_irq_mask_notifier *kimn);
1049void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1050 struct kvm_irq_mask_notifier *kimn);
1051void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1052 bool mask);
1053
1054extern bool tdp_enabled;
1055
1056u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1057
1058/* control of guest tsc rate supported? */
1059extern bool kvm_has_tsc_control;
1060/* maximum supported tsc_khz for guests */
1061extern u32 kvm_max_guest_tsc_khz;
1062/* number of bits of the fractional part of the TSC scaling ratio */
1063extern u8 kvm_tsc_scaling_ratio_frac_bits;
1064/* maximum allowed value of TSC scaling ratio */
1065extern u64 kvm_max_tsc_scaling_ratio;
1066
1067enum emulation_result {
1068 EMULATE_DONE, /* no further processing */
1069 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
1070 EMULATE_FAIL, /* can't emulate this instruction */
1071};
1072
1073#define EMULTYPE_NO_DECODE (1 << 0)
1074#define EMULTYPE_TRAP_UD (1 << 1)
1075#define EMULTYPE_SKIP (1 << 2)
1076#define EMULTYPE_RETRY (1 << 3)
1077#define EMULTYPE_NO_REEXECUTE (1 << 4)
1078int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
1079 int emulation_type, void *insn, int insn_len);
1080
1081static inline int emulate_instruction(struct kvm_vcpu *vcpu,
1082 int emulation_type)
1083{
1084 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
1085}
1086
1087void kvm_enable_efer_bits(u64);
1088bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1089int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1090int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1091
1092struct x86_emulate_ctxt;
1093
1094int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
1095void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1096int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1097int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1098int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1099
1100void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1101int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1102void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1103
1104int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1105 int reason, bool has_error_code, u32 error_code);
1106
1107int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1108int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1109int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1110int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1111int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1112int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1113unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1114void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1115void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1116int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1117
1118int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1119int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1120
1121unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1122void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1123bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1124
1125void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1126void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1127void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1128void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1129void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1130int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1131 gfn_t gfn, void *data, int offset, int len,
1132 u32 access);
1133bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1134bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1135
1136static inline int __kvm_irq_line_state(unsigned long *irq_state,
1137 int irq_source_id, int level)
1138{
1139 /* Logical OR for level trig interrupt */
1140 if (level)
1141 __set_bit(irq_source_id, irq_state);
1142 else
1143 __clear_bit(irq_source_id, irq_state);
1144
1145 return !!(*irq_state);
1146}
1147
1148int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1149void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1150
1151void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1152
1153int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1154int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1155void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1156int kvm_mmu_load(struct kvm_vcpu *vcpu);
1157void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1158void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1159gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1160 struct x86_exception *exception);
1161gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1162 struct x86_exception *exception);
1163gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1164 struct x86_exception *exception);
1165gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1166 struct x86_exception *exception);
1167gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1168 struct x86_exception *exception);
1169
1170void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1171
1172int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1173
1174int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
1175 void *insn, int insn_len);
1176void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1177void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
1178
1179void kvm_enable_tdp(void);
1180void kvm_disable_tdp(void);
1181
1182static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1183 struct x86_exception *exception)
1184{
1185 return gpa;
1186}
1187
1188static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1189{
1190 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1191
1192 return (struct kvm_mmu_page *)page_private(page);
1193}
1194
1195static inline u16 kvm_read_ldt(void)
1196{
1197 u16 ldt;
1198 asm("sldt %0" : "=g"(ldt));
1199 return ldt;
1200}
1201
1202static inline void kvm_load_ldt(u16 sel)
1203{
1204 asm("lldt %0" : : "rm"(sel));
1205}
1206
1207#ifdef CONFIG_X86_64
1208static inline unsigned long read_msr(unsigned long msr)
1209{
1210 u64 value;
1211
1212 rdmsrl(msr, value);
1213 return value;
1214}
1215#endif
1216
1217static inline u32 get_rdx_init_val(void)
1218{
1219 return 0x600; /* P6 family */
1220}
1221
1222static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1223{
1224 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1225}
1226
1227static inline u64 get_canonical(u64 la)
1228{
1229 return ((int64_t)la << 16) >> 16;
1230}
1231
1232static inline bool is_noncanonical_address(u64 la)
1233{
1234#ifdef CONFIG_X86_64
1235 return get_canonical(la) != la;
1236#else
1237 return false;
1238#endif
1239}
1240
1241#define TSS_IOPB_BASE_OFFSET 0x66
1242#define TSS_BASE_SIZE 0x68
1243#define TSS_IOPB_SIZE (65536 / 8)
1244#define TSS_REDIRECTION_SIZE (256 / 8)
1245#define RMODE_TSS_SIZE \
1246 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1247
1248enum {
1249 TASK_SWITCH_CALL = 0,
1250 TASK_SWITCH_IRET = 1,
1251 TASK_SWITCH_JMP = 2,
1252 TASK_SWITCH_GATE = 3,
1253};
1254
1255#define HF_GIF_MASK (1 << 0)
1256#define HF_HIF_MASK (1 << 1)
1257#define HF_VINTR_MASK (1 << 2)
1258#define HF_NMI_MASK (1 << 3)
1259#define HF_IRET_MASK (1 << 4)
1260#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
1261#define HF_SMM_MASK (1 << 6)
1262#define HF_SMM_INSIDE_NMI_MASK (1 << 7)
1263
1264#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1265#define KVM_ADDRESS_SPACE_NUM 2
1266
1267#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1268#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1269
1270/*
1271 * Hardware virtualization extension instructions may fault if a
1272 * reboot turns off virtualization while processes are running.
1273 * Trap the fault and ignore the instruction if that happens.
1274 */
1275asmlinkage void kvm_spurious_fault(void);
1276
1277#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
1278 "666: " insn "\n\t" \
1279 "668: \n\t" \
1280 ".pushsection .fixup, \"ax\" \n" \
1281 "667: \n\t" \
1282 cleanup_insn "\n\t" \
1283 "cmpb $0, kvm_rebooting \n\t" \
1284 "jne 668b \n\t" \
1285 __ASM_SIZE(push) " $666b \n\t" \
1286 "call kvm_spurious_fault \n\t" \
1287 ".popsection \n\t" \
1288 _ASM_EXTABLE(666b, 667b)
1289
1290#define __kvm_handle_fault_on_reboot(insn) \
1291 ____kvm_handle_fault_on_reboot(insn, "")
1292
1293#define KVM_ARCH_WANT_MMU_NOTIFIER
1294int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1295int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1296int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1297int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1298void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1299int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1300int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1301int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1302int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1303void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1304void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1305void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1306 unsigned long address);
1307
1308void kvm_define_shared_msr(unsigned index, u32 msr);
1309int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1310
1311u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1312u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1313
1314unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1315bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1316
1317void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1318void kvm_make_scan_ioapic_request(struct kvm *kvm);
1319
1320void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1321 struct kvm_async_pf *work);
1322void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1323 struct kvm_async_pf *work);
1324void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1325 struct kvm_async_pf *work);
1326bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1327extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1328
1329void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1330
1331int kvm_is_in_guest(void);
1332
1333int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1334int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1335bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1336bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1337
1338bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1339 struct kvm_vcpu **dest_vcpu);
1340
1341void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
1342 struct kvm_lapic_irq *irq);
1343
1344static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1345static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1346
1347#endif /* _ASM_X86_KVM_HOST_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This header defines architecture specific interfaces, x86 version
6 */
7
8#ifndef _ASM_X86_KVM_HOST_H
9#define _ASM_X86_KVM_HOST_H
10
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/mmu_notifier.h>
14#include <linux/tracepoint.h>
15#include <linux/cpumask.h>
16#include <linux/irq_work.h>
17#include <linux/irq.h>
18#include <linux/workqueue.h>
19
20#include <linux/kvm.h>
21#include <linux/kvm_para.h>
22#include <linux/kvm_types.h>
23#include <linux/perf_event.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/clocksource.h>
26#include <linux/irqbypass.h>
27#include <linux/hyperv.h>
28#include <linux/kfifo.h>
29
30#include <asm/apic.h>
31#include <asm/pvclock-abi.h>
32#include <asm/desc.h>
33#include <asm/mtrr.h>
34#include <asm/msr-index.h>
35#include <asm/asm.h>
36#include <asm/kvm_page_track.h>
37#include <asm/kvm_vcpu_regs.h>
38#include <asm/hyperv-tlfs.h>
39
40#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
41
42/*
43 * CONFIG_KVM_MAX_NR_VCPUS is defined iff CONFIG_KVM!=n, provide a dummy max if
44 * KVM is disabled (arbitrarily use the default from CONFIG_KVM_MAX_NR_VCPUS).
45 */
46#ifdef CONFIG_KVM_MAX_NR_VCPUS
47#define KVM_MAX_VCPUS CONFIG_KVM_MAX_NR_VCPUS
48#else
49#define KVM_MAX_VCPUS 1024
50#endif
51
52/*
53 * In x86, the VCPU ID corresponds to the APIC ID, and APIC IDs
54 * might be larger than the actual number of VCPUs because the
55 * APIC ID encodes CPU topology information.
56 *
57 * In the worst case, we'll need less than one extra bit for the
58 * Core ID, and less than one extra bit for the Package (Die) ID,
59 * so ratio of 4 should be enough.
60 */
61#define KVM_VCPU_ID_RATIO 4
62#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
63
64/* memory slots that are not exposed to userspace */
65#define KVM_INTERNAL_MEM_SLOTS 3
66
67#define KVM_HALT_POLL_NS_DEFAULT 200000
68
69#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
70
71#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
72 KVM_DIRTY_LOG_INITIALLY_SET)
73
74#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \
75 KVM_BUS_LOCK_DETECTION_EXIT)
76
77#define KVM_X86_NOTIFY_VMEXIT_VALID_BITS (KVM_X86_NOTIFY_VMEXIT_ENABLED | \
78 KVM_X86_NOTIFY_VMEXIT_USER)
79
80/* x86-specific vcpu->requests bit members */
81#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
82#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
83#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
84#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
85#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
86#define KVM_REQ_LOAD_MMU_PGD KVM_ARCH_REQ(5)
87#define KVM_REQ_EVENT KVM_ARCH_REQ(6)
88#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
89#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
90#define KVM_REQ_NMI KVM_ARCH_REQ(9)
91#define KVM_REQ_PMU KVM_ARCH_REQ(10)
92#define KVM_REQ_PMI KVM_ARCH_REQ(11)
93#ifdef CONFIG_KVM_SMM
94#define KVM_REQ_SMI KVM_ARCH_REQ(12)
95#endif
96#define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
97#define KVM_REQ_MCLOCK_INPROGRESS \
98 KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
99#define KVM_REQ_SCAN_IOAPIC \
100 KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
101#define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
102#define KVM_REQ_APIC_PAGE_RELOAD \
103 KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
104#define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
105#define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
106#define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
107#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
108#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
109#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
110#define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
111#define KVM_REQ_APICV_UPDATE \
112 KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
113#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
114#define KVM_REQ_TLB_FLUSH_GUEST \
115 KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
116#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
117#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
118#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
119 KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
120#define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
121 KVM_ARCH_REQ_FLAGS(31, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
122#define KVM_REQ_HV_TLB_FLUSH \
123 KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
124
125#define CR0_RESERVED_BITS \
126 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
127 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
128 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
129
130#define CR4_RESERVED_BITS \
131 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
132 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
133 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
134 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
135 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
136 | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
137 | X86_CR4_LAM_SUP))
138
139#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
140
141
142
143#define INVALID_PAGE (~(hpa_t)0)
144#define VALID_PAGE(x) ((x) != INVALID_PAGE)
145
146/* KVM Hugepage definitions for x86 */
147#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
148#define KVM_NR_PAGE_SIZES (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
149#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
150#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
151#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
152#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
153#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
154
155#define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
156#define KVM_MIN_ALLOC_MMU_PAGES 64UL
157#define KVM_MMU_HASH_SHIFT 12
158#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
159#define KVM_MIN_FREE_MMU_PAGES 5
160#define KVM_REFILL_PAGES 25
161#define KVM_MAX_CPUID_ENTRIES 256
162#define KVM_NR_FIXED_MTRR_REGION 88
163#define KVM_NR_VAR_MTRR 8
164
165#define ASYNC_PF_PER_VCPU 64
166
167enum kvm_reg {
168 VCPU_REGS_RAX = __VCPU_REGS_RAX,
169 VCPU_REGS_RCX = __VCPU_REGS_RCX,
170 VCPU_REGS_RDX = __VCPU_REGS_RDX,
171 VCPU_REGS_RBX = __VCPU_REGS_RBX,
172 VCPU_REGS_RSP = __VCPU_REGS_RSP,
173 VCPU_REGS_RBP = __VCPU_REGS_RBP,
174 VCPU_REGS_RSI = __VCPU_REGS_RSI,
175 VCPU_REGS_RDI = __VCPU_REGS_RDI,
176#ifdef CONFIG_X86_64
177 VCPU_REGS_R8 = __VCPU_REGS_R8,
178 VCPU_REGS_R9 = __VCPU_REGS_R9,
179 VCPU_REGS_R10 = __VCPU_REGS_R10,
180 VCPU_REGS_R11 = __VCPU_REGS_R11,
181 VCPU_REGS_R12 = __VCPU_REGS_R12,
182 VCPU_REGS_R13 = __VCPU_REGS_R13,
183 VCPU_REGS_R14 = __VCPU_REGS_R14,
184 VCPU_REGS_R15 = __VCPU_REGS_R15,
185#endif
186 VCPU_REGS_RIP,
187 NR_VCPU_REGS,
188
189 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
190 VCPU_EXREG_CR0,
191 VCPU_EXREG_CR3,
192 VCPU_EXREG_CR4,
193 VCPU_EXREG_RFLAGS,
194 VCPU_EXREG_SEGMENTS,
195 VCPU_EXREG_EXIT_INFO_1,
196 VCPU_EXREG_EXIT_INFO_2,
197};
198
199enum {
200 VCPU_SREG_ES,
201 VCPU_SREG_CS,
202 VCPU_SREG_SS,
203 VCPU_SREG_DS,
204 VCPU_SREG_FS,
205 VCPU_SREG_GS,
206 VCPU_SREG_TR,
207 VCPU_SREG_LDTR,
208};
209
210enum exit_fastpath_completion {
211 EXIT_FASTPATH_NONE,
212 EXIT_FASTPATH_REENTER_GUEST,
213 EXIT_FASTPATH_EXIT_HANDLED,
214};
215typedef enum exit_fastpath_completion fastpath_t;
216
217struct x86_emulate_ctxt;
218struct x86_exception;
219union kvm_smram;
220enum x86_intercept;
221enum x86_intercept_stage;
222
223#define KVM_NR_DB_REGS 4
224
225#define DR6_BUS_LOCK (1 << 11)
226#define DR6_BD (1 << 13)
227#define DR6_BS (1 << 14)
228#define DR6_BT (1 << 15)
229#define DR6_RTM (1 << 16)
230/*
231 * DR6_ACTIVE_LOW combines fixed-1 and active-low bits.
232 * We can regard all the bits in DR6_FIXED_1 as active_low bits;
233 * they will never be 0 for now, but when they are defined
234 * in the future it will require no code change.
235 *
236 * DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
237 */
238#define DR6_ACTIVE_LOW 0xffff0ff0
239#define DR6_VOLATILE 0x0001e80f
240#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
241
242#define DR7_BP_EN_MASK 0x000000ff
243#define DR7_GE (1 << 9)
244#define DR7_GD (1 << 13)
245#define DR7_FIXED_1 0x00000400
246#define DR7_VOLATILE 0xffff2bff
247
248#define KVM_GUESTDBG_VALID_MASK \
249 (KVM_GUESTDBG_ENABLE | \
250 KVM_GUESTDBG_SINGLESTEP | \
251 KVM_GUESTDBG_USE_HW_BP | \
252 KVM_GUESTDBG_USE_SW_BP | \
253 KVM_GUESTDBG_INJECT_BP | \
254 KVM_GUESTDBG_INJECT_DB | \
255 KVM_GUESTDBG_BLOCKIRQ)
256
257
258#define PFERR_PRESENT_BIT 0
259#define PFERR_WRITE_BIT 1
260#define PFERR_USER_BIT 2
261#define PFERR_RSVD_BIT 3
262#define PFERR_FETCH_BIT 4
263#define PFERR_PK_BIT 5
264#define PFERR_SGX_BIT 15
265#define PFERR_GUEST_FINAL_BIT 32
266#define PFERR_GUEST_PAGE_BIT 33
267#define PFERR_IMPLICIT_ACCESS_BIT 48
268
269#define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
270#define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
271#define PFERR_USER_MASK BIT(PFERR_USER_BIT)
272#define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
273#define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
274#define PFERR_PK_MASK BIT(PFERR_PK_BIT)
275#define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
276#define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
277#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
278#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
279
280#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
281 PFERR_WRITE_MASK | \
282 PFERR_PRESENT_MASK)
283
284/* apic attention bits */
285#define KVM_APIC_CHECK_VAPIC 0
286/*
287 * The following bit is set with PV-EOI, unset on EOI.
288 * We detect PV-EOI changes by guest by comparing
289 * this bit with PV-EOI in guest memory.
290 * See the implementation in apic_update_pv_eoi.
291 */
292#define KVM_APIC_PV_EOI_PENDING 1
293
294struct kvm_kernel_irq_routing_entry;
295
296/*
297 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
298 * also includes TDP pages) to determine whether or not a page can be used in
299 * the given MMU context. This is a subset of the overall kvm_cpu_role to
300 * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
301 * allocating 2 bytes per gfn instead of 4 bytes per gfn.
302 *
303 * Upper-level shadow pages having gptes are tracked for write-protection via
304 * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must
305 * not create more than 2^16-1 upper-level shadow pages at a single gfn,
306 * otherwise gfn_write_track will overflow and explosions will ensue.
307 *
308 * A unique shadow page (SP) for a gfn is created if and only if an existing SP
309 * cannot be reused. The ability to reuse a SP is tracked by its role, which
310 * incorporates various mode bits and properties of the SP. Roughly speaking,
311 * the number of unique SPs that can theoretically be created is 2^n, where n
312 * is the number of bits that are used to compute the role.
313 *
314 * But, even though there are 19 bits in the mask below, not all combinations
315 * of modes and flags are possible:
316 *
317 * - invalid shadow pages are not accounted, so the bits are effectively 18
318 *
319 * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
320 * execonly and ad_disabled are only used for nested EPT which has
321 * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
322 *
323 * - the 4 bits of level are effectively limited to the values 2/3/4/5,
324 * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
325 * paging has exactly one upper level, making level completely redundant
326 * when has_4_byte_gpte=1.
327 *
328 * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
329 * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
330 *
331 * Therefore, the maximum number of possible upper-level shadow pages for a
332 * single gfn is a bit less than 2^13.
333 */
334union kvm_mmu_page_role {
335 u32 word;
336 struct {
337 unsigned level:4;
338 unsigned has_4_byte_gpte:1;
339 unsigned quadrant:2;
340 unsigned direct:1;
341 unsigned access:3;
342 unsigned invalid:1;
343 unsigned efer_nx:1;
344 unsigned cr0_wp:1;
345 unsigned smep_andnot_wp:1;
346 unsigned smap_andnot_wp:1;
347 unsigned ad_disabled:1;
348 unsigned guest_mode:1;
349 unsigned passthrough:1;
350 unsigned :5;
351
352 /*
353 * This is left at the top of the word so that
354 * kvm_memslots_for_spte_role can extract it with a
355 * simple shift. While there is room, give it a whole
356 * byte so it is also faster to load it from memory.
357 */
358 unsigned smm:8;
359 };
360};
361
362/*
363 * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
364 * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
365 * including on nested transitions, if nothing in the full role changes then
366 * MMU re-configuration can be skipped. @valid bit is set on first usage so we
367 * don't treat all-zero structure as valid data.
368 *
369 * The properties that are tracked in the extended role but not the page role
370 * are for things that either (a) do not affect the validity of the shadow page
371 * or (b) are indirectly reflected in the shadow page's role. For example,
372 * CR4.PKE only affects permission checks for software walks of the guest page
373 * tables (because KVM doesn't support Protection Keys with shadow paging), and
374 * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
375 *
376 * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
377 * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
378 * SMAP, but the MMU's permission checks for software walks need to be SMEP and
379 * SMAP aware regardless of CR0.WP.
380 */
381union kvm_mmu_extended_role {
382 u32 word;
383 struct {
384 unsigned int valid:1;
385 unsigned int execonly:1;
386 unsigned int cr4_pse:1;
387 unsigned int cr4_pke:1;
388 unsigned int cr4_smap:1;
389 unsigned int cr4_smep:1;
390 unsigned int cr4_la57:1;
391 unsigned int efer_lma:1;
392 };
393};
394
395union kvm_cpu_role {
396 u64 as_u64;
397 struct {
398 union kvm_mmu_page_role base;
399 union kvm_mmu_extended_role ext;
400 };
401};
402
403struct kvm_rmap_head {
404 unsigned long val;
405};
406
407struct kvm_pio_request {
408 unsigned long linear_rip;
409 unsigned long count;
410 int in;
411 int port;
412 int size;
413};
414
415#define PT64_ROOT_MAX_LEVEL 5
416
417struct rsvd_bits_validate {
418 u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
419 u64 bad_mt_xwr;
420};
421
422struct kvm_mmu_root_info {
423 gpa_t pgd;
424 hpa_t hpa;
425};
426
427#define KVM_MMU_ROOT_INFO_INVALID \
428 ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
429
430#define KVM_MMU_NUM_PREV_ROOTS 3
431
432#define KVM_MMU_ROOT_CURRENT BIT(0)
433#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i)
434#define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1)
435
436#define KVM_HAVE_MMU_RWLOCK
437
438struct kvm_mmu_page;
439struct kvm_page_fault;
440
441/*
442 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
443 * and 2-level 32-bit). The kvm_mmu structure abstracts the details of the
444 * current mmu mode.
445 */
446struct kvm_mmu {
447 unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
448 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
449 int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
450 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
451 struct x86_exception *fault);
452 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
453 gpa_t gva_or_gpa, u64 access,
454 struct x86_exception *exception);
455 int (*sync_spte)(struct kvm_vcpu *vcpu,
456 struct kvm_mmu_page *sp, int i);
457 struct kvm_mmu_root_info root;
458 union kvm_cpu_role cpu_role;
459 union kvm_mmu_page_role root_role;
460
461 /*
462 * The pkru_mask indicates if protection key checks are needed. It
463 * consists of 16 domains indexed by page fault error code bits [4:1],
464 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
465 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
466 */
467 u32 pkru_mask;
468
469 struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
470
471 /*
472 * Bitmap; bit set = permission fault
473 * Byte index: page fault error code [4:1]
474 * Bit index: pte permissions in ACC_* format
475 */
476 u8 permissions[16];
477
478 u64 *pae_root;
479 u64 *pml4_root;
480 u64 *pml5_root;
481
482 /*
483 * check zero bits on shadow page table entries, these
484 * bits include not only hardware reserved bits but also
485 * the bits spte never used.
486 */
487 struct rsvd_bits_validate shadow_zero_check;
488
489 struct rsvd_bits_validate guest_rsvd_check;
490
491 u64 pdptrs[4]; /* pae */
492};
493
494enum pmc_type {
495 KVM_PMC_GP = 0,
496 KVM_PMC_FIXED,
497};
498
499struct kvm_pmc {
500 enum pmc_type type;
501 u8 idx;
502 bool is_paused;
503 bool intr;
504 /*
505 * Base value of the PMC counter, relative to the *consumed* count in
506 * the associated perf_event. This value includes counter updates from
507 * the perf_event and emulated_count since the last time the counter
508 * was reprogrammed, but it is *not* the current value as seen by the
509 * guest or userspace.
510 *
511 * The count is relative to the associated perf_event so that KVM
512 * doesn't need to reprogram the perf_event every time the guest writes
513 * to the counter.
514 */
515 u64 counter;
516 /*
517 * PMC events triggered by KVM emulation that haven't been fully
518 * processed, i.e. haven't undergone overflow detection.
519 */
520 u64 emulated_counter;
521 u64 eventsel;
522 struct perf_event *perf_event;
523 struct kvm_vcpu *vcpu;
524 /*
525 * only for creating or reusing perf_event,
526 * eventsel value for general purpose counters,
527 * ctrl value for fixed counters.
528 */
529 u64 current_config;
530};
531
532/* More counters may conflict with other existing Architectural MSRs */
533#define KVM_INTEL_PMC_MAX_GENERIC 8
534#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
535#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1)
536#define KVM_PMC_MAX_FIXED 3
537#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
538#define KVM_AMD_PMC_MAX_GENERIC 6
539struct kvm_pmu {
540 u8 version;
541 unsigned nr_arch_gp_counters;
542 unsigned nr_arch_fixed_counters;
543 unsigned available_event_types;
544 u64 fixed_ctr_ctrl;
545 u64 fixed_ctr_ctrl_mask;
546 u64 global_ctrl;
547 u64 global_status;
548 u64 counter_bitmask[2];
549 u64 global_ctrl_mask;
550 u64 global_status_mask;
551 u64 reserved_bits;
552 u64 raw_event_mask;
553 struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
554 struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
555
556 /*
557 * Overlay the bitmap with a 64-bit atomic so that all bits can be
558 * set in a single access, e.g. to reprogram all counters when the PMU
559 * filter changes.
560 */
561 union {
562 DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
563 atomic64_t __reprogram_pmi;
564 };
565 DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
566 DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
567
568 u64 ds_area;
569 u64 pebs_enable;
570 u64 pebs_enable_mask;
571 u64 pebs_data_cfg;
572 u64 pebs_data_cfg_mask;
573
574 /*
575 * If a guest counter is cross-mapped to host counter with different
576 * index, its PEBS capability will be temporarily disabled.
577 *
578 * The user should make sure that this mask is updated
579 * after disabling interrupts and before perf_guest_get_msrs();
580 */
581 u64 host_cross_mapped_mask;
582
583 /*
584 * The gate to release perf_events not marked in
585 * pmc_in_use only once in a vcpu time slice.
586 */
587 bool need_cleanup;
588
589 /*
590 * The total number of programmed perf_events and it helps to avoid
591 * redundant check before cleanup if guest don't use vPMU at all.
592 */
593 u8 event_count;
594};
595
596struct kvm_pmu_ops;
597
598enum {
599 KVM_DEBUGREG_BP_ENABLED = 1,
600 KVM_DEBUGREG_WONT_EXIT = 2,
601};
602
603struct kvm_mtrr_range {
604 u64 base;
605 u64 mask;
606 struct list_head node;
607};
608
609struct kvm_mtrr {
610 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
611 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
612 u64 deftype;
613
614 struct list_head head;
615};
616
617/* Hyper-V SynIC timer */
618struct kvm_vcpu_hv_stimer {
619 struct hrtimer timer;
620 int index;
621 union hv_stimer_config config;
622 u64 count;
623 u64 exp_time;
624 struct hv_message msg;
625 bool msg_pending;
626};
627
628/* Hyper-V synthetic interrupt controller (SynIC)*/
629struct kvm_vcpu_hv_synic {
630 u64 version;
631 u64 control;
632 u64 msg_page;
633 u64 evt_page;
634 atomic64_t sint[HV_SYNIC_SINT_COUNT];
635 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
636 DECLARE_BITMAP(auto_eoi_bitmap, 256);
637 DECLARE_BITMAP(vec_bitmap, 256);
638 bool active;
639 bool dont_zero_synic_pages;
640};
641
642/* The maximum number of entries on the TLB flush fifo. */
643#define KVM_HV_TLB_FLUSH_FIFO_SIZE (16)
644/*
645 * Note: the following 'magic' entry is made up by KVM to avoid putting
646 * anything besides GVA on the TLB flush fifo. It is theoretically possible
647 * to observe a request to flush 4095 PFNs starting from 0xfffffffffffff000
648 * which will look identical. KVM's action to 'flush everything' instead of
649 * flushing these particular addresses is, however, fully legitimate as
650 * flushing more than requested is always OK.
651 */
652#define KVM_HV_TLB_FLUSHALL_ENTRY ((u64)-1)
653
654enum hv_tlb_flush_fifos {
655 HV_L1_TLB_FLUSH_FIFO,
656 HV_L2_TLB_FLUSH_FIFO,
657 HV_NR_TLB_FLUSH_FIFOS,
658};
659
660struct kvm_vcpu_hv_tlb_flush_fifo {
661 spinlock_t write_lock;
662 DECLARE_KFIFO(entries, u64, KVM_HV_TLB_FLUSH_FIFO_SIZE);
663};
664
665/* Hyper-V per vcpu emulation context */
666struct kvm_vcpu_hv {
667 struct kvm_vcpu *vcpu;
668 u32 vp_index;
669 u64 hv_vapic;
670 s64 runtime_offset;
671 struct kvm_vcpu_hv_synic synic;
672 struct kvm_hyperv_exit exit;
673 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
674 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
675 bool enforce_cpuid;
676 struct {
677 u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
678 u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
679 u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
680 u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
681 u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
682 u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
683 u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
684 u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
685 } cpuid_cache;
686
687 struct kvm_vcpu_hv_tlb_flush_fifo tlb_flush_fifo[HV_NR_TLB_FLUSH_FIFOS];
688
689 /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
690 u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
691
692 struct hv_vp_assist_page vp_assist_page;
693
694 struct {
695 u64 pa_page_gpa;
696 u64 vm_id;
697 u32 vp_id;
698 } nested;
699};
700
701struct kvm_hypervisor_cpuid {
702 u32 base;
703 u32 limit;
704};
705
706#ifdef CONFIG_KVM_XEN
707/* Xen HVM per vcpu emulation context */
708struct kvm_vcpu_xen {
709 u64 hypercall_rip;
710 u32 current_runstate;
711 u8 upcall_vector;
712 struct gfn_to_pfn_cache vcpu_info_cache;
713 struct gfn_to_pfn_cache vcpu_time_info_cache;
714 struct gfn_to_pfn_cache runstate_cache;
715 struct gfn_to_pfn_cache runstate2_cache;
716 u64 last_steal;
717 u64 runstate_entry_time;
718 u64 runstate_times[4];
719 unsigned long evtchn_pending_sel;
720 u32 vcpu_id; /* The Xen / ACPI vCPU ID */
721 u32 timer_virq;
722 u64 timer_expires; /* In guest epoch */
723 atomic_t timer_pending;
724 struct hrtimer timer;
725 int poll_evtchn;
726 struct timer_list poll_timer;
727 struct kvm_hypervisor_cpuid cpuid;
728};
729#endif
730
731struct kvm_queued_exception {
732 bool pending;
733 bool injected;
734 bool has_error_code;
735 u8 vector;
736 u32 error_code;
737 unsigned long payload;
738 bool has_payload;
739};
740
741struct kvm_vcpu_arch {
742 /*
743 * rip and regs accesses must go through
744 * kvm_{register,rip}_{read,write} functions.
745 */
746 unsigned long regs[NR_VCPU_REGS];
747 u32 regs_avail;
748 u32 regs_dirty;
749
750 unsigned long cr0;
751 unsigned long cr0_guest_owned_bits;
752 unsigned long cr2;
753 unsigned long cr3;
754 unsigned long cr4;
755 unsigned long cr4_guest_owned_bits;
756 unsigned long cr4_guest_rsvd_bits;
757 unsigned long cr8;
758 u32 host_pkru;
759 u32 pkru;
760 u32 hflags;
761 u64 efer;
762 u64 apic_base;
763 struct kvm_lapic *apic; /* kernel irqchip context */
764 bool load_eoi_exitmap_pending;
765 DECLARE_BITMAP(ioapic_handled_vectors, 256);
766 unsigned long apic_attention;
767 int32_t apic_arb_prio;
768 int mp_state;
769 u64 ia32_misc_enable_msr;
770 u64 smbase;
771 u64 smi_count;
772 bool at_instruction_boundary;
773 bool tpr_access_reporting;
774 bool xfd_no_write_intercept;
775 u64 ia32_xss;
776 u64 microcode_version;
777 u64 arch_capabilities;
778 u64 perf_capabilities;
779
780 /*
781 * Paging state of the vcpu
782 *
783 * If the vcpu runs in guest mode with two level paging this still saves
784 * the paging mode of the l1 guest. This context is always used to
785 * handle faults.
786 */
787 struct kvm_mmu *mmu;
788
789 /* Non-nested MMU for L1 */
790 struct kvm_mmu root_mmu;
791
792 /* L1 MMU when running nested */
793 struct kvm_mmu guest_mmu;
794
795 /*
796 * Paging state of an L2 guest (used for nested npt)
797 *
798 * This context will save all necessary information to walk page tables
799 * of an L2 guest. This context is only initialized for page table
800 * walking and not for faulting since we never handle l2 page faults on
801 * the host.
802 */
803 struct kvm_mmu nested_mmu;
804
805 /*
806 * Pointer to the mmu context currently used for
807 * gva_to_gpa translations.
808 */
809 struct kvm_mmu *walk_mmu;
810
811 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
812 struct kvm_mmu_memory_cache mmu_shadow_page_cache;
813 struct kvm_mmu_memory_cache mmu_shadowed_info_cache;
814 struct kvm_mmu_memory_cache mmu_page_header_cache;
815
816 /*
817 * QEMU userspace and the guest each have their own FPU state.
818 * In vcpu_run, we switch between the user and guest FPU contexts.
819 * While running a VCPU, the VCPU thread will have the guest FPU
820 * context.
821 *
822 * Note that while the PKRU state lives inside the fpu registers,
823 * it is switched out separately at VMENTER and VMEXIT time. The
824 * "guest_fpstate" state here contains the guest FPU context, with the
825 * host PRKU bits.
826 */
827 struct fpu_guest guest_fpu;
828
829 u64 xcr0;
830 u64 guest_supported_xcr0;
831
832 struct kvm_pio_request pio;
833 void *pio_data;
834 void *sev_pio_data;
835 unsigned sev_pio_count;
836
837 u8 event_exit_inst_len;
838
839 bool exception_from_userspace;
840
841 /* Exceptions to be injected to the guest. */
842 struct kvm_queued_exception exception;
843 /* Exception VM-Exits to be synthesized to L1. */
844 struct kvm_queued_exception exception_vmexit;
845
846 struct kvm_queued_interrupt {
847 bool injected;
848 bool soft;
849 u8 nr;
850 } interrupt;
851
852 int halt_request; /* real mode on Intel only */
853
854 int cpuid_nent;
855 struct kvm_cpuid_entry2 *cpuid_entries;
856 struct kvm_hypervisor_cpuid kvm_cpuid;
857
858 /*
859 * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
860 * when "struct kvm_vcpu_arch" is no longer defined in an
861 * arch/x86/include/asm header. The max is mostly arbitrary, i.e.
862 * can be increased as necessary.
863 */
864#define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG
865
866 /*
867 * Track whether or not the guest is allowed to use features that are
868 * governed by KVM, where "governed" means KVM needs to manage state
869 * and/or explicitly enable the feature in hardware. Typically, but
870 * not always, governed features can be used by the guest if and only
871 * if both KVM and userspace want to expose the feature to the guest.
872 */
873 struct {
874 DECLARE_BITMAP(enabled, KVM_MAX_NR_GOVERNED_FEATURES);
875 } governed_features;
876
877 u64 reserved_gpa_bits;
878 int maxphyaddr;
879
880 /* emulate context */
881
882 struct x86_emulate_ctxt *emulate_ctxt;
883 bool emulate_regs_need_sync_to_vcpu;
884 bool emulate_regs_need_sync_from_vcpu;
885 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
886
887 gpa_t time;
888 struct pvclock_vcpu_time_info hv_clock;
889 unsigned int hw_tsc_khz;
890 struct gfn_to_pfn_cache pv_time;
891 /* set guest stopped flag in pvclock flags field */
892 bool pvclock_set_guest_stopped_request;
893
894 struct {
895 u8 preempted;
896 u64 msr_val;
897 u64 last_steal;
898 struct gfn_to_hva_cache cache;
899 } st;
900
901 u64 l1_tsc_offset;
902 u64 tsc_offset; /* current tsc offset */
903 u64 last_guest_tsc;
904 u64 last_host_tsc;
905 u64 tsc_offset_adjustment;
906 u64 this_tsc_nsec;
907 u64 this_tsc_write;
908 u64 this_tsc_generation;
909 bool tsc_catchup;
910 bool tsc_always_catchup;
911 s8 virtual_tsc_shift;
912 u32 virtual_tsc_mult;
913 u32 virtual_tsc_khz;
914 s64 ia32_tsc_adjust_msr;
915 u64 msr_ia32_power_ctl;
916 u64 l1_tsc_scaling_ratio;
917 u64 tsc_scaling_ratio; /* current scaling ratio */
918
919 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
920 /* Number of NMIs pending injection, not including hardware vNMIs. */
921 unsigned int nmi_pending;
922 bool nmi_injected; /* Trying to inject an NMI this entry */
923 bool smi_pending; /* SMI queued after currently running handler */
924 u8 handling_intr_from_guest;
925
926 struct kvm_mtrr mtrr_state;
927 u64 pat;
928
929 unsigned switch_db_regs;
930 unsigned long db[KVM_NR_DB_REGS];
931 unsigned long dr6;
932 unsigned long dr7;
933 unsigned long eff_db[KVM_NR_DB_REGS];
934 unsigned long guest_debug_dr7;
935 u64 msr_platform_info;
936 u64 msr_misc_features_enables;
937
938 u64 mcg_cap;
939 u64 mcg_status;
940 u64 mcg_ctl;
941 u64 mcg_ext_ctl;
942 u64 *mce_banks;
943 u64 *mci_ctl2_banks;
944
945 /* Cache MMIO info */
946 u64 mmio_gva;
947 unsigned mmio_access;
948 gfn_t mmio_gfn;
949 u64 mmio_gen;
950
951 struct kvm_pmu pmu;
952
953 /* used for guest single stepping over the given code position */
954 unsigned long singlestep_rip;
955
956#ifdef CONFIG_KVM_HYPERV
957 bool hyperv_enabled;
958 struct kvm_vcpu_hv *hyperv;
959#endif
960#ifdef CONFIG_KVM_XEN
961 struct kvm_vcpu_xen xen;
962#endif
963 cpumask_var_t wbinvd_dirty_mask;
964
965 unsigned long last_retry_eip;
966 unsigned long last_retry_addr;
967
968 struct {
969 bool halted;
970 gfn_t gfns[ASYNC_PF_PER_VCPU];
971 struct gfn_to_hva_cache data;
972 u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
973 u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
974 u16 vec;
975 u32 id;
976 bool send_user_only;
977 u32 host_apf_flags;
978 bool delivery_as_pf_vmexit;
979 bool pageready_pending;
980 } apf;
981
982 /* OSVW MSRs (AMD only) */
983 struct {
984 u64 length;
985 u64 status;
986 } osvw;
987
988 struct {
989 u64 msr_val;
990 struct gfn_to_hva_cache data;
991 } pv_eoi;
992
993 u64 msr_kvm_poll_control;
994
995 /* set at EPT violation at this point */
996 unsigned long exit_qualification;
997
998 /* pv related host specific info */
999 struct {
1000 bool pv_unhalted;
1001 } pv;
1002
1003 int pending_ioapic_eoi;
1004 int pending_external_vector;
1005
1006 /* be preempted when it's in kernel-mode(cpl=0) */
1007 bool preempted_in_kernel;
1008
1009 /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
1010 bool l1tf_flush_l1d;
1011
1012 /* Host CPU on which VM-entry was most recently attempted */
1013 int last_vmentry_cpu;
1014
1015 /* AMD MSRC001_0015 Hardware Configuration */
1016 u64 msr_hwcr;
1017
1018 /* pv related cpuid info */
1019 struct {
1020 /*
1021 * value of the eax register in the KVM_CPUID_FEATURES CPUID
1022 * leaf.
1023 */
1024 u32 features;
1025
1026 /*
1027 * indicates whether pv emulation should be disabled if features
1028 * are not present in the guest's cpuid
1029 */
1030 bool enforce;
1031 } pv_cpuid;
1032
1033 /* Protected Guests */
1034 bool guest_state_protected;
1035
1036 /*
1037 * Set when PDPTS were loaded directly by the userspace without
1038 * reading the guest memory
1039 */
1040 bool pdptrs_from_userspace;
1041
1042#if IS_ENABLED(CONFIG_HYPERV)
1043 hpa_t hv_root_tdp;
1044#endif
1045};
1046
1047struct kvm_lpage_info {
1048 int disallow_lpage;
1049};
1050
1051struct kvm_arch_memory_slot {
1052 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
1053 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
1054 unsigned short *gfn_write_track;
1055};
1056
1057/*
1058 * Track the mode of the optimized logical map, as the rules for decoding the
1059 * destination vary per mode. Enabling the optimized logical map requires all
1060 * software-enabled local APIs to be in the same mode, each addressable APIC to
1061 * be mapped to only one MDA, and each MDA to map to at most one APIC.
1062 */
1063enum kvm_apic_logical_mode {
1064 /* All local APICs are software disabled. */
1065 KVM_APIC_MODE_SW_DISABLED,
1066 /* All software enabled local APICs in xAPIC cluster addressing mode. */
1067 KVM_APIC_MODE_XAPIC_CLUSTER,
1068 /* All software enabled local APICs in xAPIC flat addressing mode. */
1069 KVM_APIC_MODE_XAPIC_FLAT,
1070 /* All software enabled local APICs in x2APIC mode. */
1071 KVM_APIC_MODE_X2APIC,
1072 /*
1073 * Optimized map disabled, e.g. not all local APICs in the same logical
1074 * mode, same logical ID assigned to multiple APICs, etc.
1075 */
1076 KVM_APIC_MODE_MAP_DISABLED,
1077};
1078
1079struct kvm_apic_map {
1080 struct rcu_head rcu;
1081 enum kvm_apic_logical_mode logical_mode;
1082 u32 max_apic_id;
1083 union {
1084 struct kvm_lapic *xapic_flat_map[8];
1085 struct kvm_lapic *xapic_cluster_map[16][4];
1086 };
1087 struct kvm_lapic *phys_map[];
1088};
1089
1090/* Hyper-V synthetic debugger (SynDbg)*/
1091struct kvm_hv_syndbg {
1092 struct {
1093 u64 control;
1094 u64 status;
1095 u64 send_page;
1096 u64 recv_page;
1097 u64 pending_page;
1098 } control;
1099 u64 options;
1100};
1101
1102/* Current state of Hyper-V TSC page clocksource */
1103enum hv_tsc_page_status {
1104 /* TSC page was not set up or disabled */
1105 HV_TSC_PAGE_UNSET = 0,
1106 /* TSC page MSR was written by the guest, update pending */
1107 HV_TSC_PAGE_GUEST_CHANGED,
1108 /* TSC page update was triggered from the host side */
1109 HV_TSC_PAGE_HOST_CHANGED,
1110 /* TSC page was properly set up and is currently active */
1111 HV_TSC_PAGE_SET,
1112 /* TSC page was set up with an inaccessible GPA */
1113 HV_TSC_PAGE_BROKEN,
1114};
1115
1116#ifdef CONFIG_KVM_HYPERV
1117/* Hyper-V emulation context */
1118struct kvm_hv {
1119 struct mutex hv_lock;
1120 u64 hv_guest_os_id;
1121 u64 hv_hypercall;
1122 u64 hv_tsc_page;
1123 enum hv_tsc_page_status hv_tsc_page_status;
1124
1125 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
1126 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
1127 u64 hv_crash_ctl;
1128
1129 struct ms_hyperv_tsc_page tsc_ref;
1130
1131 struct idr conn_to_evt;
1132
1133 u64 hv_reenlightenment_control;
1134 u64 hv_tsc_emulation_control;
1135 u64 hv_tsc_emulation_status;
1136 u64 hv_invtsc_control;
1137
1138 /* How many vCPUs have VP index != vCPU index */
1139 atomic_t num_mismatched_vp_indexes;
1140
1141 /*
1142 * How many SynICs use 'AutoEOI' feature
1143 * (protected by arch.apicv_update_lock)
1144 */
1145 unsigned int synic_auto_eoi_used;
1146
1147 struct kvm_hv_syndbg hv_syndbg;
1148
1149 bool xsaves_xsavec_checked;
1150};
1151#endif
1152
1153struct msr_bitmap_range {
1154 u32 flags;
1155 u32 nmsrs;
1156 u32 base;
1157 unsigned long *bitmap;
1158};
1159
1160#ifdef CONFIG_KVM_XEN
1161/* Xen emulation context */
1162struct kvm_xen {
1163 struct mutex xen_lock;
1164 u32 xen_version;
1165 bool long_mode;
1166 bool runstate_update_flag;
1167 u8 upcall_vector;
1168 struct gfn_to_pfn_cache shinfo_cache;
1169 struct idr evtchn_ports;
1170 unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
1171};
1172#endif
1173
1174enum kvm_irqchip_mode {
1175 KVM_IRQCHIP_NONE,
1176 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
1177 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
1178};
1179
1180struct kvm_x86_msr_filter {
1181 u8 count;
1182 bool default_allow:1;
1183 struct msr_bitmap_range ranges[16];
1184};
1185
1186struct kvm_x86_pmu_event_filter {
1187 __u32 action;
1188 __u32 nevents;
1189 __u32 fixed_counter_bitmap;
1190 __u32 flags;
1191 __u32 nr_includes;
1192 __u32 nr_excludes;
1193 __u64 *includes;
1194 __u64 *excludes;
1195 __u64 events[];
1196};
1197
1198enum kvm_apicv_inhibit {
1199
1200 /********************************************************************/
1201 /* INHIBITs that are relevant to both Intel's APICv and AMD's AVIC. */
1202 /********************************************************************/
1203
1204 /*
1205 * APIC acceleration is disabled by a module parameter
1206 * and/or not supported in hardware.
1207 */
1208 APICV_INHIBIT_REASON_DISABLE,
1209
1210 /*
1211 * APIC acceleration is inhibited because AutoEOI feature is
1212 * being used by a HyperV guest.
1213 */
1214 APICV_INHIBIT_REASON_HYPERV,
1215
1216 /*
1217 * APIC acceleration is inhibited because the userspace didn't yet
1218 * enable the kernel/split irqchip.
1219 */
1220 APICV_INHIBIT_REASON_ABSENT,
1221
1222 /* APIC acceleration is inhibited because KVM_GUESTDBG_BLOCKIRQ
1223 * (out of band, debug measure of blocking all interrupts on this vCPU)
1224 * was enabled, to avoid AVIC/APICv bypassing it.
1225 */
1226 APICV_INHIBIT_REASON_BLOCKIRQ,
1227
1228 /*
1229 * APICv is disabled because not all vCPUs have a 1:1 mapping between
1230 * APIC ID and vCPU, _and_ KVM is not applying its x2APIC hotplug hack.
1231 */
1232 APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED,
1233
1234 /*
1235 * For simplicity, the APIC acceleration is inhibited
1236 * first time either APIC ID or APIC base are changed by the guest
1237 * from their reset values.
1238 */
1239 APICV_INHIBIT_REASON_APIC_ID_MODIFIED,
1240 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED,
1241
1242 /******************************************************/
1243 /* INHIBITs that are relevant only to the AMD's AVIC. */
1244 /******************************************************/
1245
1246 /*
1247 * AVIC is inhibited on a vCPU because it runs a nested guest.
1248 *
1249 * This is needed because unlike APICv, the peers of this vCPU
1250 * cannot use the doorbell mechanism to signal interrupts via AVIC when
1251 * a vCPU runs nested.
1252 */
1253 APICV_INHIBIT_REASON_NESTED,
1254
1255 /*
1256 * On SVM, the wait for the IRQ window is implemented with pending vIRQ,
1257 * which cannot be injected when the AVIC is enabled, thus AVIC
1258 * is inhibited while KVM waits for IRQ window.
1259 */
1260 APICV_INHIBIT_REASON_IRQWIN,
1261
1262 /*
1263 * PIT (i8254) 're-inject' mode, relies on EOI intercept,
1264 * which AVIC doesn't support for edge triggered interrupts.
1265 */
1266 APICV_INHIBIT_REASON_PIT_REINJ,
1267
1268 /*
1269 * AVIC is disabled because SEV doesn't support it.
1270 */
1271 APICV_INHIBIT_REASON_SEV,
1272
1273 /*
1274 * AVIC is disabled because not all vCPUs with a valid LDR have a 1:1
1275 * mapping between logical ID and vCPU.
1276 */
1277 APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED,
1278};
1279
1280struct kvm_arch {
1281 unsigned long vm_type;
1282 unsigned long n_used_mmu_pages;
1283 unsigned long n_requested_mmu_pages;
1284 unsigned long n_max_mmu_pages;
1285 unsigned int indirect_shadow_pages;
1286 u8 mmu_valid_gen;
1287 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
1288 struct list_head active_mmu_pages;
1289 struct list_head zapped_obsolete_pages;
1290 /*
1291 * A list of kvm_mmu_page structs that, if zapped, could possibly be
1292 * replaced by an NX huge page. A shadow page is on this list if its
1293 * existence disallows an NX huge page (nx_huge_page_disallowed is set)
1294 * and there are no other conditions that prevent a huge page, e.g.
1295 * the backing host page is huge, dirtly logging is not enabled for its
1296 * memslot, etc... Note, zapping shadow pages on this list doesn't
1297 * guarantee an NX huge page will be created in its stead, e.g. if the
1298 * guest attempts to execute from the region then KVM obviously can't
1299 * create an NX huge page (without hanging the guest).
1300 */
1301 struct list_head possible_nx_huge_pages;
1302#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
1303 struct kvm_page_track_notifier_head track_notifier_head;
1304#endif
1305 /*
1306 * Protects marking pages unsync during page faults, as TDP MMU page
1307 * faults only take mmu_lock for read. For simplicity, the unsync
1308 * pages lock is always taken when marking pages unsync regardless of
1309 * whether mmu_lock is held for read or write.
1310 */
1311 spinlock_t mmu_unsync_pages_lock;
1312
1313 struct iommu_domain *iommu_domain;
1314 bool iommu_noncoherent;
1315#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
1316 atomic_t noncoherent_dma_count;
1317#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1318 atomic_t assigned_device_count;
1319 struct kvm_pic *vpic;
1320 struct kvm_ioapic *vioapic;
1321 struct kvm_pit *vpit;
1322 atomic_t vapics_in_nmi_mode;
1323 struct mutex apic_map_lock;
1324 struct kvm_apic_map __rcu *apic_map;
1325 atomic_t apic_map_dirty;
1326
1327 bool apic_access_memslot_enabled;
1328 bool apic_access_memslot_inhibited;
1329
1330 /* Protects apicv_inhibit_reasons */
1331 struct rw_semaphore apicv_update_lock;
1332 unsigned long apicv_inhibit_reasons;
1333
1334 gpa_t wall_clock;
1335
1336 bool mwait_in_guest;
1337 bool hlt_in_guest;
1338 bool pause_in_guest;
1339 bool cstate_in_guest;
1340
1341 unsigned long irq_sources_bitmap;
1342 s64 kvmclock_offset;
1343
1344 /*
1345 * This also protects nr_vcpus_matched_tsc which is read from a
1346 * preemption-disabled region, so it must be a raw spinlock.
1347 */
1348 raw_spinlock_t tsc_write_lock;
1349 u64 last_tsc_nsec;
1350 u64 last_tsc_write;
1351 u32 last_tsc_khz;
1352 u64 last_tsc_offset;
1353 u64 cur_tsc_nsec;
1354 u64 cur_tsc_write;
1355 u64 cur_tsc_offset;
1356 u64 cur_tsc_generation;
1357 int nr_vcpus_matched_tsc;
1358
1359 u32 default_tsc_khz;
1360 bool user_set_tsc;
1361
1362 seqcount_raw_spinlock_t pvclock_sc;
1363 bool use_master_clock;
1364 u64 master_kernel_ns;
1365 u64 master_cycle_now;
1366 struct delayed_work kvmclock_update_work;
1367 struct delayed_work kvmclock_sync_work;
1368
1369 struct kvm_xen_hvm_config xen_hvm_config;
1370
1371 /* reads protected by irq_srcu, writes by irq_lock */
1372 struct hlist_head mask_notifier_list;
1373
1374#ifdef CONFIG_KVM_HYPERV
1375 struct kvm_hv hyperv;
1376#endif
1377
1378#ifdef CONFIG_KVM_XEN
1379 struct kvm_xen xen;
1380#endif
1381
1382 bool backwards_tsc_observed;
1383 bool boot_vcpu_runs_old_kvmclock;
1384 u32 bsp_vcpu_id;
1385
1386 u64 disabled_quirks;
1387
1388 enum kvm_irqchip_mode irqchip_mode;
1389 u8 nr_reserved_ioapic_pins;
1390
1391 bool disabled_lapic_found;
1392
1393 bool x2apic_format;
1394 bool x2apic_broadcast_quirk_disabled;
1395
1396 bool guest_can_read_msr_platform_info;
1397 bool exception_payload_enabled;
1398
1399 bool triple_fault_event;
1400
1401 bool bus_lock_detection_enabled;
1402 bool enable_pmu;
1403
1404 u32 notify_window;
1405 u32 notify_vmexit_flags;
1406 /*
1407 * If exit_on_emulation_error is set, and the in-kernel instruction
1408 * emulator fails to emulate an instruction, allow userspace
1409 * the opportunity to look at it.
1410 */
1411 bool exit_on_emulation_error;
1412
1413 /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
1414 u32 user_space_msr_mask;
1415 struct kvm_x86_msr_filter __rcu *msr_filter;
1416
1417 u32 hypercall_exit_enabled;
1418
1419 /* Guest can access the SGX PROVISIONKEY. */
1420 bool sgx_provisioning_allowed;
1421
1422 struct kvm_x86_pmu_event_filter __rcu *pmu_event_filter;
1423 struct task_struct *nx_huge_page_recovery_thread;
1424
1425#ifdef CONFIG_X86_64
1426 /* The number of TDP MMU pages across all roots. */
1427 atomic64_t tdp_mmu_pages;
1428
1429 /*
1430 * List of struct kvm_mmu_pages being used as roots.
1431 * All struct kvm_mmu_pages in the list should have
1432 * tdp_mmu_page set.
1433 *
1434 * For reads, this list is protected by:
1435 * the MMU lock in read mode + RCU or
1436 * the MMU lock in write mode
1437 *
1438 * For writes, this list is protected by tdp_mmu_pages_lock; see
1439 * below for the details.
1440 *
1441 * Roots will remain in the list until their tdp_mmu_root_count
1442 * drops to zero, at which point the thread that decremented the
1443 * count to zero should removed the root from the list and clean
1444 * it up, freeing the root after an RCU grace period.
1445 */
1446 struct list_head tdp_mmu_roots;
1447
1448 /*
1449 * Protects accesses to the following fields when the MMU lock
1450 * is held in read mode:
1451 * - tdp_mmu_roots (above)
1452 * - the link field of kvm_mmu_page structs used by the TDP MMU
1453 * - possible_nx_huge_pages;
1454 * - the possible_nx_huge_page_link field of kvm_mmu_page structs used
1455 * by the TDP MMU
1456 * Because the lock is only taken within the MMU lock, strictly
1457 * speaking it is redundant to acquire this lock when the thread
1458 * holds the MMU lock in write mode. However it often simplifies
1459 * the code to do so.
1460 */
1461 spinlock_t tdp_mmu_pages_lock;
1462#endif /* CONFIG_X86_64 */
1463
1464 /*
1465 * If set, at least one shadow root has been allocated. This flag
1466 * is used as one input when determining whether certain memslot
1467 * related allocations are necessary.
1468 */
1469 bool shadow_root_allocated;
1470
1471#if IS_ENABLED(CONFIG_HYPERV)
1472 hpa_t hv_root_tdp;
1473 spinlock_t hv_root_tdp_lock;
1474 struct hv_partition_assist_pg *hv_pa_pg;
1475#endif
1476 /*
1477 * VM-scope maximum vCPU ID. Used to determine the size of structures
1478 * that increase along with the maximum vCPU ID, in which case, using
1479 * the global KVM_MAX_VCPU_IDS may lead to significant memory waste.
1480 */
1481 u32 max_vcpu_ids;
1482
1483 bool disable_nx_huge_pages;
1484
1485 /*
1486 * Memory caches used to allocate shadow pages when performing eager
1487 * page splitting. No need for a shadowed_info_cache since eager page
1488 * splitting only allocates direct shadow pages.
1489 *
1490 * Protected by kvm->slots_lock.
1491 */
1492 struct kvm_mmu_memory_cache split_shadow_page_cache;
1493 struct kvm_mmu_memory_cache split_page_header_cache;
1494
1495 /*
1496 * Memory cache used to allocate pte_list_desc structs while splitting
1497 * huge pages. In the worst case, to split one huge page, 512
1498 * pte_list_desc structs are needed to add each lower level leaf sptep
1499 * to the rmap plus 1 to extend the parent_ptes rmap of the lower level
1500 * page table.
1501 *
1502 * Protected by kvm->slots_lock.
1503 */
1504#define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1)
1505 struct kvm_mmu_memory_cache split_desc_cache;
1506};
1507
1508struct kvm_vm_stat {
1509 struct kvm_vm_stat_generic generic;
1510 u64 mmu_shadow_zapped;
1511 u64 mmu_pte_write;
1512 u64 mmu_pde_zapped;
1513 u64 mmu_flooded;
1514 u64 mmu_recycled;
1515 u64 mmu_cache_miss;
1516 u64 mmu_unsync;
1517 union {
1518 struct {
1519 atomic64_t pages_4k;
1520 atomic64_t pages_2m;
1521 atomic64_t pages_1g;
1522 };
1523 atomic64_t pages[KVM_NR_PAGE_SIZES];
1524 };
1525 u64 nx_lpage_splits;
1526 u64 max_mmu_page_hash_collisions;
1527 u64 max_mmu_rmap_size;
1528};
1529
1530struct kvm_vcpu_stat {
1531 struct kvm_vcpu_stat_generic generic;
1532 u64 pf_taken;
1533 u64 pf_fixed;
1534 u64 pf_emulate;
1535 u64 pf_spurious;
1536 u64 pf_fast;
1537 u64 pf_mmio_spte_created;
1538 u64 pf_guest;
1539 u64 tlb_flush;
1540 u64 invlpg;
1541
1542 u64 exits;
1543 u64 io_exits;
1544 u64 mmio_exits;
1545 u64 signal_exits;
1546 u64 irq_window_exits;
1547 u64 nmi_window_exits;
1548 u64 l1d_flush;
1549 u64 halt_exits;
1550 u64 request_irq_exits;
1551 u64 irq_exits;
1552 u64 host_state_reload;
1553 u64 fpu_reload;
1554 u64 insn_emulation;
1555 u64 insn_emulation_fail;
1556 u64 hypercalls;
1557 u64 irq_injections;
1558 u64 nmi_injections;
1559 u64 req_event;
1560 u64 nested_run;
1561 u64 directed_yield_attempted;
1562 u64 directed_yield_successful;
1563 u64 preemption_reported;
1564 u64 preemption_other;
1565 u64 guest_mode;
1566 u64 notify_window_exits;
1567};
1568
1569struct x86_instruction_info;
1570
1571struct msr_data {
1572 bool host_initiated;
1573 u32 index;
1574 u64 data;
1575};
1576
1577struct kvm_lapic_irq {
1578 u32 vector;
1579 u16 delivery_mode;
1580 u16 dest_mode;
1581 bool level;
1582 u16 trig_mode;
1583 u32 shorthand;
1584 u32 dest_id;
1585 bool msi_redir_hint;
1586};
1587
1588static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1589{
1590 return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1591}
1592
1593struct kvm_x86_ops {
1594 const char *name;
1595
1596 int (*check_processor_compatibility)(void);
1597
1598 int (*hardware_enable)(void);
1599 void (*hardware_disable)(void);
1600 void (*hardware_unsetup)(void);
1601 bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1602 void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1603
1604 unsigned int vm_size;
1605 int (*vm_init)(struct kvm *kvm);
1606 void (*vm_destroy)(struct kvm *kvm);
1607
1608 /* Create, but do not attach this VCPU */
1609 int (*vcpu_precreate)(struct kvm *kvm);
1610 int (*vcpu_create)(struct kvm_vcpu *vcpu);
1611 void (*vcpu_free)(struct kvm_vcpu *vcpu);
1612 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1613
1614 void (*prepare_switch_to_guest)(struct kvm_vcpu *vcpu);
1615 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1616 void (*vcpu_put)(struct kvm_vcpu *vcpu);
1617
1618 void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1619 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1620 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1621 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1622 void (*get_segment)(struct kvm_vcpu *vcpu,
1623 struct kvm_segment *var, int seg);
1624 int (*get_cpl)(struct kvm_vcpu *vcpu);
1625 void (*set_segment)(struct kvm_vcpu *vcpu,
1626 struct kvm_segment *var, int seg);
1627 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1628 bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1629 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1630 void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1631 bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1632 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1633 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1634 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1635 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1636 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1637 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1638 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1639 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1640 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1641 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1642 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1643 bool (*get_if_flag)(struct kvm_vcpu *vcpu);
1644
1645 void (*flush_tlb_all)(struct kvm_vcpu *vcpu);
1646 void (*flush_tlb_current)(struct kvm_vcpu *vcpu);
1647#if IS_ENABLED(CONFIG_HYPERV)
1648 int (*flush_remote_tlbs)(struct kvm *kvm);
1649 int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
1650 gfn_t nr_pages);
1651#endif
1652
1653 /*
1654 * Flush any TLB entries associated with the given GVA.
1655 * Does not need to flush GPA->HPA mappings.
1656 * Can potentially get non-canonical addresses through INVLPGs, which
1657 * the implementation may choose to ignore if appropriate.
1658 */
1659 void (*flush_tlb_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1660
1661 /*
1662 * Flush any TLB entries created by the guest. Like tlb_flush_gva(),
1663 * does not need to flush GPA->HPA mappings.
1664 */
1665 void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
1666
1667 int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
1668 enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
1669 int (*handle_exit)(struct kvm_vcpu *vcpu,
1670 enum exit_fastpath_completion exit_fastpath);
1671 int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1672 void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1673 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1674 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1675 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1676 unsigned char *hypercall_addr);
1677 void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
1678 void (*inject_nmi)(struct kvm_vcpu *vcpu);
1679 void (*inject_exception)(struct kvm_vcpu *vcpu);
1680 void (*cancel_injection)(struct kvm_vcpu *vcpu);
1681 int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1682 int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1683 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1684 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1685 /* Whether or not a virtual NMI is pending in hardware. */
1686 bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu);
1687 /*
1688 * Attempt to pend a virtual NMI in hardware. Returns %true on success
1689 * to allow using static_call_ret0 as the fallback.
1690 */
1691 bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu);
1692 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1693 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1694 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1695 bool (*check_apicv_inhibit_reasons)(enum kvm_apicv_inhibit reason);
1696 const unsigned long required_apicv_inhibits;
1697 bool allow_apicv_in_x2apic_without_x2apic_virtualization;
1698 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1699 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1700 void (*hwapic_isr_update)(int isr);
1701 bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1702 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1703 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1704 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1705 void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
1706 int trig_mode, int vector);
1707 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1708 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1709 int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1710 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1711
1712 void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
1713 int root_level);
1714
1715 bool (*has_wbinvd_exit)(void);
1716
1717 u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
1718 u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1719 void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
1720 void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
1721
1722 /*
1723 * Retrieve somewhat arbitrary exit information. Intended to
1724 * be used only from within tracepoints or error paths.
1725 */
1726 void (*get_exit_info)(struct kvm_vcpu *vcpu, u32 *reason,
1727 u64 *info1, u64 *info2,
1728 u32 *exit_int_info, u32 *exit_int_info_err_code);
1729
1730 int (*check_intercept)(struct kvm_vcpu *vcpu,
1731 struct x86_instruction_info *info,
1732 enum x86_intercept_stage stage,
1733 struct x86_exception *exception);
1734 void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1735
1736 void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1737
1738 void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
1739
1740 /*
1741 * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
1742 * value indicates CPU dirty logging is unsupported or disabled.
1743 */
1744 int cpu_dirty_log_size;
1745 void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
1746
1747 const struct kvm_x86_nested_ops *nested_ops;
1748
1749 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1750 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1751
1752 int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
1753 uint32_t guest_irq, bool set);
1754 void (*pi_start_assignment)(struct kvm *kvm);
1755 void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
1756 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1757 bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1758
1759 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1760 bool *expired);
1761 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1762
1763 void (*setup_mce)(struct kvm_vcpu *vcpu);
1764
1765#ifdef CONFIG_KVM_SMM
1766 int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1767 int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
1768 int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
1769 void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1770#endif
1771
1772 int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
1773 int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1774 int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1775 int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1776 int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
1777 void (*guest_memory_reclaimed)(struct kvm *kvm);
1778
1779 int (*get_msr_feature)(struct kvm_msr_entry *entry);
1780
1781 int (*check_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
1782 void *insn, int insn_len);
1783
1784 bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1785 int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
1786
1787 void (*migrate_timers)(struct kvm_vcpu *vcpu);
1788 void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
1789 int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1790
1791 void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1792
1793 /*
1794 * Returns vCPU specific APICv inhibit reasons
1795 */
1796 unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
1797
1798 gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags);
1799};
1800
1801struct kvm_x86_nested_ops {
1802 void (*leave_nested)(struct kvm_vcpu *vcpu);
1803 bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
1804 u32 error_code);
1805 int (*check_events)(struct kvm_vcpu *vcpu);
1806 bool (*has_events)(struct kvm_vcpu *vcpu);
1807 void (*triple_fault)(struct kvm_vcpu *vcpu);
1808 int (*get_state)(struct kvm_vcpu *vcpu,
1809 struct kvm_nested_state __user *user_kvm_nested_state,
1810 unsigned user_data_size);
1811 int (*set_state)(struct kvm_vcpu *vcpu,
1812 struct kvm_nested_state __user *user_kvm_nested_state,
1813 struct kvm_nested_state *kvm_state);
1814 bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
1815 int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1816
1817 int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1818 uint16_t *vmcs_version);
1819 uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
1820 void (*hv_inject_synthetic_vmexit_post_tlb_flush)(struct kvm_vcpu *vcpu);
1821};
1822
1823struct kvm_x86_init_ops {
1824 int (*hardware_setup)(void);
1825 unsigned int (*handle_intel_pt_intr)(void);
1826
1827 struct kvm_x86_ops *runtime_ops;
1828 struct kvm_pmu_ops *pmu_ops;
1829};
1830
1831struct kvm_arch_async_pf {
1832 u32 token;
1833 gfn_t gfn;
1834 unsigned long cr3;
1835 bool direct_map;
1836};
1837
1838extern u32 __read_mostly kvm_nr_uret_msrs;
1839extern u64 __read_mostly host_efer;
1840extern bool __read_mostly allow_smaller_maxphyaddr;
1841extern bool __read_mostly enable_apicv;
1842extern struct kvm_x86_ops kvm_x86_ops;
1843
1844#define KVM_X86_OP(func) \
1845 DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
1846#define KVM_X86_OP_OPTIONAL KVM_X86_OP
1847#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
1848#include <asm/kvm-x86-ops.h>
1849
1850int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
1851void kvm_x86_vendor_exit(void);
1852
1853#define __KVM_HAVE_ARCH_VM_ALLOC
1854static inline struct kvm *kvm_arch_alloc_vm(void)
1855{
1856 return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1857}
1858
1859#define __KVM_HAVE_ARCH_VM_FREE
1860void kvm_arch_free_vm(struct kvm *kvm);
1861
1862#if IS_ENABLED(CONFIG_HYPERV)
1863#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
1864static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
1865{
1866 if (kvm_x86_ops.flush_remote_tlbs &&
1867 !static_call(kvm_x86_flush_remote_tlbs)(kvm))
1868 return 0;
1869 else
1870 return -ENOTSUPP;
1871}
1872
1873#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
1874static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
1875 u64 nr_pages)
1876{
1877 if (!kvm_x86_ops.flush_remote_tlbs_range)
1878 return -EOPNOTSUPP;
1879
1880 return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
1881}
1882#endif /* CONFIG_HYPERV */
1883
1884#define kvm_arch_pmi_in_guest(vcpu) \
1885 ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
1886
1887void __init kvm_mmu_x86_module_init(void);
1888int kvm_mmu_vendor_module_init(void);
1889void kvm_mmu_vendor_module_exit(void);
1890
1891void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1892int kvm_mmu_create(struct kvm_vcpu *vcpu);
1893void kvm_mmu_init_vm(struct kvm *kvm);
1894void kvm_mmu_uninit_vm(struct kvm *kvm);
1895
1896void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
1897 struct kvm_memory_slot *slot);
1898
1899void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
1900void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1901void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1902 const struct kvm_memory_slot *memslot,
1903 int start_level);
1904void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
1905 const struct kvm_memory_slot *memslot,
1906 int target_level);
1907void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
1908 const struct kvm_memory_slot *memslot,
1909 u64 start, u64 end,
1910 int target_level);
1911void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1912 const struct kvm_memory_slot *memslot);
1913void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1914 const struct kvm_memory_slot *memslot);
1915void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1916void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1917
1918int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
1919
1920int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1921 const void *val, int bytes);
1922
1923struct kvm_irq_mask_notifier {
1924 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1925 int irq;
1926 struct hlist_node link;
1927};
1928
1929void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1930 struct kvm_irq_mask_notifier *kimn);
1931void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1932 struct kvm_irq_mask_notifier *kimn);
1933void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1934 bool mask);
1935
1936extern bool tdp_enabled;
1937
1938u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1939
1940/*
1941 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1942 * userspace I/O) to indicate that the emulation context
1943 * should be reused as is, i.e. skip initialization of
1944 * emulation context, instruction fetch and decode.
1945 *
1946 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1947 * Indicates that only select instructions (tagged with
1948 * EmulateOnUD) should be emulated (to minimize the emulator
1949 * attack surface). See also EMULTYPE_TRAP_UD_FORCED.
1950 *
1951 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
1952 * decode the instruction length. For use *only* by
1953 * kvm_x86_ops.skip_emulated_instruction() implementations if
1954 * EMULTYPE_COMPLETE_USER_EXIT is not set.
1955 *
1956 * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
1957 * retry native execution under certain conditions,
1958 * Can only be set in conjunction with EMULTYPE_PF.
1959 *
1960 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
1961 * triggered by KVM's magic "force emulation" prefix,
1962 * which is opt in via module param (off by default).
1963 * Bypasses EmulateOnUD restriction despite emulating
1964 * due to an intercepted #UD (see EMULTYPE_TRAP_UD).
1965 * Used to test the full emulator from userspace.
1966 *
1967 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
1968 * backdoor emulation, which is opt in via module param.
1969 * VMware backdoor emulation handles select instructions
1970 * and reinjects the #GP for all other cases.
1971 *
1972 * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
1973 * case the CR2/GPA value pass on the stack is valid.
1974 *
1975 * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
1976 * state and inject single-step #DBs after skipping
1977 * an instruction (after completing userspace I/O).
1978 *
1979 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that
1980 * is attempting to write a gfn that contains one or
1981 * more of the PTEs used to translate the write itself,
1982 * and the owning page table is being shadowed by KVM.
1983 * If emulation of the faulting instruction fails and
1984 * this flag is set, KVM will exit to userspace instead
1985 * of retrying emulation as KVM cannot make forward
1986 * progress.
1987 *
1988 * If emulation fails for a write to guest page tables,
1989 * KVM unprotects (zaps) the shadow page for the target
1990 * gfn and resumes the guest to retry the non-emulatable
1991 * instruction (on hardware). Unprotecting the gfn
1992 * doesn't allow forward progress for a self-changing
1993 * access because doing so also zaps the translation for
1994 * the gfn, i.e. retrying the instruction will hit a
1995 * !PRESENT fault, which results in a new shadow page
1996 * and sends KVM back to square one.
1997 */
1998#define EMULTYPE_NO_DECODE (1 << 0)
1999#define EMULTYPE_TRAP_UD (1 << 1)
2000#define EMULTYPE_SKIP (1 << 2)
2001#define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
2002#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
2003#define EMULTYPE_VMWARE_GP (1 << 5)
2004#define EMULTYPE_PF (1 << 6)
2005#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
2006#define EMULTYPE_WRITE_PF_TO_SP (1 << 8)
2007
2008int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
2009int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
2010 void *insn, int insn_len);
2011void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu,
2012 u64 *data, u8 ndata);
2013void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu);
2014
2015void kvm_enable_efer_bits(u64);
2016bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
2017int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
2018int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
2019int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
2020int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
2021int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
2022int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
2023int kvm_emulate_invd(struct kvm_vcpu *vcpu);
2024int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
2025int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
2026int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
2027
2028int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
2029int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
2030int kvm_emulate_halt(struct kvm_vcpu *vcpu);
2031int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
2032int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
2033int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
2034
2035void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2036void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
2037int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
2038void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
2039
2040int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
2041 int reason, bool has_error_code, u32 error_code);
2042
2043void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
2044void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
2045int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
2046int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
2047int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
2048int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
2049int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
2050void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
2051unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
2052void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
2053int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
2054
2055int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2056int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2057
2058unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
2059void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
2060int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
2061
2062void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2063void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2064void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
2065void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
2066void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
2067void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
2068void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
2069 struct x86_exception *fault);
2070bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
2071bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
2072
2073static inline int __kvm_irq_line_state(unsigned long *irq_state,
2074 int irq_source_id, int level)
2075{
2076 /* Logical OR for level trig interrupt */
2077 if (level)
2078 __set_bit(irq_source_id, irq_state);
2079 else
2080 __clear_bit(irq_source_id, irq_state);
2081
2082 return !!(*irq_state);
2083}
2084
2085int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
2086void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
2087
2088void kvm_inject_nmi(struct kvm_vcpu *vcpu);
2089int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
2090
2091void kvm_update_dr7(struct kvm_vcpu *vcpu);
2092
2093int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
2094void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
2095 ulong roots_to_free);
2096void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
2097gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
2098 struct x86_exception *exception);
2099gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
2100 struct x86_exception *exception);
2101gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
2102 struct x86_exception *exception);
2103
2104bool kvm_apicv_activated(struct kvm *kvm);
2105bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu);
2106void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
2107void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2108 enum kvm_apicv_inhibit reason, bool set);
2109void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
2110 enum kvm_apicv_inhibit reason, bool set);
2111
2112static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
2113 enum kvm_apicv_inhibit reason)
2114{
2115 kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
2116}
2117
2118static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
2119 enum kvm_apicv_inhibit reason)
2120{
2121 kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
2122}
2123
2124int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
2125
2126int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
2127 void *insn, int insn_len);
2128void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
2129void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2130 u64 addr, unsigned long roots);
2131void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
2132void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
2133
2134void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
2135 int tdp_max_root_level, int tdp_huge_page_level);
2136
2137#ifdef CONFIG_KVM_PRIVATE_MEM
2138#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.vm_type != KVM_X86_DEFAULT_VM)
2139#else
2140#define kvm_arch_has_private_mem(kvm) false
2141#endif
2142
2143static inline u16 kvm_read_ldt(void)
2144{
2145 u16 ldt;
2146 asm("sldt %0" : "=g"(ldt));
2147 return ldt;
2148}
2149
2150static inline void kvm_load_ldt(u16 sel)
2151{
2152 asm("lldt %0" : : "rm"(sel));
2153}
2154
2155#ifdef CONFIG_X86_64
2156static inline unsigned long read_msr(unsigned long msr)
2157{
2158 u64 value;
2159
2160 rdmsrl(msr, value);
2161 return value;
2162}
2163#endif
2164
2165static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
2166{
2167 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2168}
2169
2170#define TSS_IOPB_BASE_OFFSET 0x66
2171#define TSS_BASE_SIZE 0x68
2172#define TSS_IOPB_SIZE (65536 / 8)
2173#define TSS_REDIRECTION_SIZE (256 / 8)
2174#define RMODE_TSS_SIZE \
2175 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
2176
2177enum {
2178 TASK_SWITCH_CALL = 0,
2179 TASK_SWITCH_IRET = 1,
2180 TASK_SWITCH_JMP = 2,
2181 TASK_SWITCH_GATE = 3,
2182};
2183
2184#define HF_GUEST_MASK (1 << 0) /* VCPU is in guest-mode */
2185
2186#ifdef CONFIG_KVM_SMM
2187#define HF_SMM_MASK (1 << 1)
2188#define HF_SMM_INSIDE_NMI_MASK (1 << 2)
2189
2190# define KVM_MAX_NR_ADDRESS_SPACES 2
2191/* SMM is currently unsupported for guests with private memory. */
2192# define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
2193# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
2194# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2195#else
2196# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
2197#endif
2198
2199int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
2200int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
2201int kvm_cpu_has_extint(struct kvm_vcpu *v);
2202int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
2203int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
2204void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
2205
2206int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
2207 unsigned long ipi_bitmap_high, u32 min,
2208 unsigned long icr, int op_64_bit);
2209
2210int kvm_add_user_return_msr(u32 msr);
2211int kvm_find_user_return_msr(u32 msr);
2212int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
2213
2214static inline bool kvm_is_supported_user_return_msr(u32 msr)
2215{
2216 return kvm_find_user_return_msr(msr) >= 0;
2217}
2218
2219u64 kvm_scale_tsc(u64 tsc, u64 ratio);
2220u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
2221u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
2222u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
2223
2224unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
2225bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
2226
2227void kvm_make_scan_ioapic_request(struct kvm *kvm);
2228void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
2229 unsigned long *vcpu_bitmap);
2230
2231bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2232 struct kvm_async_pf *work);
2233void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2234 struct kvm_async_pf *work);
2235void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2236 struct kvm_async_pf *work);
2237void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
2238bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
2239extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
2240
2241int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
2242int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
2243void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
2244
2245void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
2246 u32 size);
2247bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
2248bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
2249
2250bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
2251 struct kvm_vcpu **dest_vcpu);
2252
2253void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
2254 struct kvm_lapic_irq *irq);
2255
2256static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
2257{
2258 /* We can only post Fixed and LowPrio IRQs */
2259 return (irq->delivery_mode == APIC_DM_FIXED ||
2260 irq->delivery_mode == APIC_DM_LOWEST);
2261}
2262
2263static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2264{
2265 static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
2266}
2267
2268static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
2269{
2270 static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
2271}
2272
2273static inline int kvm_cpu_get_apicid(int mps_cpu)
2274{
2275#ifdef CONFIG_X86_LOCAL_APIC
2276 return default_cpu_present_to_apicid(mps_cpu);
2277#else
2278 WARN_ON_ONCE(1);
2279 return BAD_APICID;
2280#endif
2281}
2282
2283int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
2284
2285#define KVM_CLOCK_VALID_FLAGS \
2286 (KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
2287
2288#define KVM_X86_VALID_QUIRKS \
2289 (KVM_X86_QUIRK_LINT0_REENABLED | \
2290 KVM_X86_QUIRK_CD_NW_CLEARED | \
2291 KVM_X86_QUIRK_LAPIC_MMIO_HOLE | \
2292 KVM_X86_QUIRK_OUT_7E_INC_RIP | \
2293 KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
2294 KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
2295 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)
2296
2297/*
2298 * KVM previously used a u32 field in kvm_run to indicate the hypercall was
2299 * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the
2300 * remaining 31 lower bits must be 0 to preserve ABI.
2301 */
2302#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
2303
2304#endif /* _ASM_X86_KVM_HOST_H */