Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21#ifndef __ARCH_X86_KVM_HYPERV_H__
22#define __ARCH_X86_KVM_HYPERV_H__
23
24#include <linux/kvm_host.h>
25#include "x86.h"
26
27/* "Hv#1" signature */
28#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
29
30/*
31 * The #defines related to the synthetic debugger are required by KDNet, but
32 * they are not documented in the Hyper-V TLFS because the synthetic debugger
33 * functionality has been deprecated and is subject to removal in future
34 * versions of Windows.
35 */
36#define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
37#define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
38#define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
39
40/*
41 * Hyper-V synthetic debugger platform capabilities
42 * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
43 */
44#define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
45
46/* Hyper-V Synthetic debug options MSR */
47#define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
48#define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
49#define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
50#define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
51#define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
52#define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
53
54/* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
55#define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
56
57static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
58{
59 return &kvm->arch.hyperv;
60}
61
62static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
63{
64 return vcpu->arch.hyperv;
65}
66
67static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
68{
69 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
70
71 return &hv_vcpu->synic;
72}
73
74static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
75{
76 struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
77
78 return hv_vcpu->vcpu;
79}
80
81static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
82{
83 return &vcpu->kvm->arch.hyperv.hv_syndbg;
84}
85
86static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
87{
88 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
89
90 return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
91}
92
93int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
94int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
95
96static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
99}
100
101int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
102
103void kvm_hv_irq_routing_update(struct kvm *kvm);
104int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
105void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
106int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
107
108void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
109
110bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
111int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
112
113static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
114 int timer_index)
115{
116 return &to_hv_vcpu(vcpu)->stimer[timer_index];
117}
118
119static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
120{
121 struct kvm_vcpu_hv *hv_vcpu;
122
123 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
124 stimer[0]);
125 return hv_vcpu->vcpu;
126}
127
128static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
129{
130 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
131
132 if (!hv_vcpu)
133 return false;
134
135 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
136 HV_SYNIC_STIMER_COUNT);
137}
138
139void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
140
141void kvm_hv_setup_tsc_page(struct kvm *kvm,
142 struct pvclock_vcpu_time_info *hv_clock);
143void kvm_hv_request_tsc_page_update(struct kvm *kvm);
144
145void kvm_hv_init_vm(struct kvm *kvm);
146void kvm_hv_destroy_vm(struct kvm *kvm);
147int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
148void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
149int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
150int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
151int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
152 struct kvm_cpuid_entry2 __user *entries);
153
154static inline struct kvm_vcpu_hv_tlb_flush_fifo *kvm_hv_get_tlb_flush_fifo(struct kvm_vcpu *vcpu,
155 bool is_guest_mode)
156{
157 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
158 int i = is_guest_mode ? HV_L2_TLB_FLUSH_FIFO :
159 HV_L1_TLB_FLUSH_FIFO;
160
161 return &hv_vcpu->tlb_flush_fifo[i];
162}
163
164static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
165{
166 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
167
168 if (!to_hv_vcpu(vcpu) || !kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
169 return;
170
171 tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
172
173 kfifo_reset_out(&tlb_flush_fifo->entries);
174}
175
176static inline bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
177{
178 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
179
180 return hv_vcpu &&
181 (hv_vcpu->cpuid_cache.nested_eax & HV_X64_NESTED_DIRECT_FLUSH);
182}
183
184static inline bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
185{
186 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
187 u16 code;
188
189 if (!hv_vcpu)
190 return false;
191
192 code = is_64_bit_hypercall(vcpu) ? kvm_rcx_read(vcpu) :
193 kvm_rax_read(vcpu);
194
195 return (code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
196 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
197 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
198 code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX);
199}
200
201static inline int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
202{
203 if (!to_hv_vcpu(vcpu))
204 return 0;
205
206 if (!kvm_hv_assist_page_enabled(vcpu))
207 return 0;
208
209 return kvm_hv_get_assist_page(vcpu);
210}
211
212int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
213
214#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21#ifndef __ARCH_X86_KVM_HYPERV_H__
22#define __ARCH_X86_KVM_HYPERV_H__
23
24#include <linux/kvm_host.h>
25
26static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
27{
28 return &vcpu->arch.hyperv;
29}
30
31static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
32{
33 struct kvm_vcpu_arch *arch;
34
35 arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv);
36 return container_of(arch, struct kvm_vcpu, arch);
37}
38
39static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu)
40{
41 return &vcpu->arch.hyperv.synic;
42}
43
44static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
45{
46 return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
47}
48
49int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
50int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
51
52bool kvm_hv_hypercall_enabled(struct kvm *kvm);
53int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
54
55void kvm_hv_irq_routing_update(struct kvm *kvm);
56int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
57void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
58int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
59
60void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
61void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
62void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
63
64bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
65bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
66 struct hv_vp_assist_page *assist_page);
67
68static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
69 int timer_index)
70{
71 return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
72}
73
74static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
75{
76 struct kvm_vcpu_hv *hv_vcpu;
77
78 hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
79 stimer[0]);
80 return hv_vcpu_to_vcpu(hv_vcpu);
81}
82
83static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
84{
85 return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
86 HV_SYNIC_STIMER_COUNT);
87}
88
89void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
90
91void kvm_hv_setup_tsc_page(struct kvm *kvm,
92 struct pvclock_vcpu_time_info *hv_clock);
93
94void kvm_hv_init_vm(struct kvm *kvm);
95void kvm_hv_destroy_vm(struct kvm *kvm);
96int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
97int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
98 struct kvm_cpuid_entry2 __user *entries);
99
100#endif