Loading...
Note: File does not exist in v4.10.11.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
5 *
6 * KVM Xen emulation
7 */
8
9#ifndef __ARCH_X86_KVM_XEN_H__
10#define __ARCH_X86_KVM_XEN_H__
11
12#ifdef CONFIG_KVM_XEN
13#include <linux/jump_label_ratelimit.h>
14
15extern struct static_key_false_deferred kvm_xen_enabled;
16
17int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
18int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
19int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
20int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
21int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
22int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
23int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
24void kvm_xen_destroy_vm(struct kvm *kvm);
25
26static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
27{
28 return static_branch_unlikely(&kvm_xen_enabled.key) &&
29 kvm->arch.xen_hvm_config.msr;
30}
31
32static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
33{
34 return static_branch_unlikely(&kvm_xen_enabled.key) &&
35 (kvm->arch.xen_hvm_config.flags &
36 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
37}
38
39static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
40{
41 if (static_branch_unlikely(&kvm_xen_enabled.key) &&
42 vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
43 return __kvm_xen_has_interrupt(vcpu);
44
45 return 0;
46}
47#else
48static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
49{
50 return 1;
51}
52
53static inline void kvm_xen_destroy_vm(struct kvm *kvm)
54{
55}
56
57static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
58{
59 return false;
60}
61
62static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
63{
64 return false;
65}
66
67static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
68{
69 return 0;
70}
71#endif
72
73int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
74
75#include <asm/pvclock-abi.h>
76#include <asm/xen/interface.h>
77#include <xen/interface/vcpu.h>
78
79void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
80
81static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
82{
83 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
84}
85
86static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
87{
88 /*
89 * If the vCPU wasn't preempted but took a normal exit for
90 * some reason (hypercalls, I/O, etc.), that is accounted as
91 * still RUNSTATE_running, as the VMM is still operating on
92 * behalf of the vCPU. Only if the VMM does actually block
93 * does it need to enter RUNSTATE_blocked.
94 */
95 if (vcpu->preempted)
96 kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
97}
98
99/* 32-bit compatibility definitions, also used natively in 32-bit build */
100struct compat_arch_vcpu_info {
101 unsigned int cr2;
102 unsigned int pad[5];
103};
104
105struct compat_vcpu_info {
106 uint8_t evtchn_upcall_pending;
107 uint8_t evtchn_upcall_mask;
108 uint16_t pad;
109 uint32_t evtchn_pending_sel;
110 struct compat_arch_vcpu_info arch;
111 struct pvclock_vcpu_time_info time;
112}; /* 64 bytes (x86) */
113
114struct compat_arch_shared_info {
115 unsigned int max_pfn;
116 unsigned int pfn_to_mfn_frame_list_list;
117 unsigned int nmi_reason;
118 unsigned int p2m_cr3;
119 unsigned int p2m_vaddr;
120 unsigned int p2m_generation;
121 uint32_t wc_sec_hi;
122};
123
124struct compat_shared_info {
125 struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
126 uint32_t evtchn_pending[32];
127 uint32_t evtchn_mask[32];
128 struct pvclock_wall_clock wc;
129 struct compat_arch_shared_info arch;
130};
131
132struct compat_vcpu_runstate_info {
133 int state;
134 uint64_t state_entry_time;
135 uint64_t time[4];
136} __attribute__((packed));
137
138#endif /* __ARCH_X86_KVM_XEN_H__ */