Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9#ifndef __RISCV_KVM_HOST_H__
10#define __RISCV_KVM_HOST_H__
11
12#include <linux/types.h>
13#include <linux/kvm.h>
14#include <linux/kvm_types.h>
15#include <linux/spinlock.h>
16#include <asm/hwcap.h>
17#include <asm/kvm_aia.h>
18#include <asm/ptrace.h>
19#include <asm/kvm_vcpu_fp.h>
20#include <asm/kvm_vcpu_insn.h>
21#include <asm/kvm_vcpu_sbi.h>
22#include <asm/kvm_vcpu_timer.h>
23#include <asm/kvm_vcpu_pmu.h>
24
25#define KVM_MAX_VCPUS 1024
26
27#define KVM_HALT_POLL_NS_DEFAULT 500000
28
29#define KVM_VCPU_MAX_FEATURES 0
30
31#define KVM_IRQCHIP_NUM_PINS 1024
32
33#define KVM_REQ_SLEEP \
34 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
35#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
36#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
37#define KVM_REQ_FENCE_I \
38 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
40#define KVM_REQ_HFENCE_VVMA_ALL \
41 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
42#define KVM_REQ_HFENCE \
43 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
45
46enum kvm_riscv_hfence_type {
47 KVM_RISCV_HFENCE_UNKNOWN = 0,
48 KVM_RISCV_HFENCE_GVMA_VMID_GPA,
49 KVM_RISCV_HFENCE_VVMA_ASID_GVA,
50 KVM_RISCV_HFENCE_VVMA_ASID_ALL,
51 KVM_RISCV_HFENCE_VVMA_GVA,
52};
53
54struct kvm_riscv_hfence {
55 enum kvm_riscv_hfence_type type;
56 unsigned long asid;
57 unsigned long order;
58 gpa_t addr;
59 gpa_t size;
60};
61
62#define KVM_RISCV_VCPU_MAX_HFENCE 64
63
64struct kvm_vm_stat {
65 struct kvm_vm_stat_generic generic;
66};
67
68struct kvm_vcpu_stat {
69 struct kvm_vcpu_stat_generic generic;
70 u64 ecall_exit_stat;
71 u64 wfi_exit_stat;
72 u64 mmio_exit_user;
73 u64 mmio_exit_kernel;
74 u64 csr_exit_user;
75 u64 csr_exit_kernel;
76 u64 signal_exits;
77 u64 exits;
78};
79
80struct kvm_arch_memory_slot {
81};
82
83struct kvm_vmid {
84 /*
85 * Writes to vmid_version and vmid happen with vmid_lock held
86 * whereas reads happen without any lock held.
87 */
88 unsigned long vmid_version;
89 unsigned long vmid;
90};
91
92struct kvm_arch {
93 /* G-stage vmid */
94 struct kvm_vmid vmid;
95
96 /* G-stage page table */
97 pgd_t *pgd;
98 phys_addr_t pgd_phys;
99
100 /* Guest Timer */
101 struct kvm_guest_timer timer;
102
103 /* AIA Guest/VM context */
104 struct kvm_aia aia;
105};
106
107struct kvm_cpu_trap {
108 unsigned long sepc;
109 unsigned long scause;
110 unsigned long stval;
111 unsigned long htval;
112 unsigned long htinst;
113};
114
115struct kvm_cpu_context {
116 unsigned long zero;
117 unsigned long ra;
118 unsigned long sp;
119 unsigned long gp;
120 unsigned long tp;
121 unsigned long t0;
122 unsigned long t1;
123 unsigned long t2;
124 unsigned long s0;
125 unsigned long s1;
126 unsigned long a0;
127 unsigned long a1;
128 unsigned long a2;
129 unsigned long a3;
130 unsigned long a4;
131 unsigned long a5;
132 unsigned long a6;
133 unsigned long a7;
134 unsigned long s2;
135 unsigned long s3;
136 unsigned long s4;
137 unsigned long s5;
138 unsigned long s6;
139 unsigned long s7;
140 unsigned long s8;
141 unsigned long s9;
142 unsigned long s10;
143 unsigned long s11;
144 unsigned long t3;
145 unsigned long t4;
146 unsigned long t5;
147 unsigned long t6;
148 unsigned long sepc;
149 unsigned long sstatus;
150 unsigned long hstatus;
151 union __riscv_fp_state fp;
152 struct __riscv_v_ext_state vector;
153};
154
155struct kvm_vcpu_csr {
156 unsigned long vsstatus;
157 unsigned long vsie;
158 unsigned long vstvec;
159 unsigned long vsscratch;
160 unsigned long vsepc;
161 unsigned long vscause;
162 unsigned long vstval;
163 unsigned long hvip;
164 unsigned long vsatp;
165 unsigned long scounteren;
166 unsigned long senvcfg;
167};
168
169struct kvm_vcpu_config {
170 u64 henvcfg;
171 u64 hstateen0;
172};
173
174struct kvm_vcpu_smstateen_csr {
175 unsigned long sstateen0;
176};
177
178struct kvm_vcpu_arch {
179 /* VCPU ran at least once */
180 bool ran_atleast_once;
181
182 /* Last Host CPU on which Guest VCPU exited */
183 int last_exit_cpu;
184
185 /* ISA feature bits (similar to MISA) */
186 DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
187
188 /* Vendor, Arch, and Implementation details */
189 unsigned long mvendorid;
190 unsigned long marchid;
191 unsigned long mimpid;
192
193 /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
194 unsigned long host_sscratch;
195 unsigned long host_stvec;
196 unsigned long host_scounteren;
197 unsigned long host_senvcfg;
198 unsigned long host_sstateen0;
199
200 /* CPU context of Host */
201 struct kvm_cpu_context host_context;
202
203 /* CPU context of Guest VCPU */
204 struct kvm_cpu_context guest_context;
205
206 /* CPU CSR context of Guest VCPU */
207 struct kvm_vcpu_csr guest_csr;
208
209 /* CPU Smstateen CSR context of Guest VCPU */
210 struct kvm_vcpu_smstateen_csr smstateen_csr;
211
212 /* CPU context upon Guest VCPU reset */
213 struct kvm_cpu_context guest_reset_context;
214
215 /* CPU CSR context upon Guest VCPU reset */
216 struct kvm_vcpu_csr guest_reset_csr;
217
218 /*
219 * VCPU interrupts
220 *
221 * We have a lockless approach for tracking pending VCPU interrupts
222 * implemented using atomic bitops. The irqs_pending bitmap represent
223 * pending interrupts whereas irqs_pending_mask represent bits changed
224 * in irqs_pending. Our approach is modeled around multiple producer
225 * and single consumer problem where the consumer is the VCPU itself.
226 */
227#define KVM_RISCV_VCPU_NR_IRQS 64
228 DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
229 DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
230
231 /* VCPU Timer */
232 struct kvm_vcpu_timer timer;
233
234 /* HFENCE request queue */
235 spinlock_t hfence_lock;
236 unsigned long hfence_head;
237 unsigned long hfence_tail;
238 struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
239
240 /* MMIO instruction details */
241 struct kvm_mmio_decode mmio_decode;
242
243 /* CSR instruction details */
244 struct kvm_csr_decode csr_decode;
245
246 /* SBI context */
247 struct kvm_vcpu_sbi_context sbi_context;
248
249 /* AIA VCPU context */
250 struct kvm_vcpu_aia aia_context;
251
252 /* Cache pages needed to program page tables with spinlock held */
253 struct kvm_mmu_memory_cache mmu_page_cache;
254
255 /* VCPU power-off state */
256 bool power_off;
257
258 /* Don't run the VCPU (blocked) */
259 bool pause;
260
261 /* Performance monitoring context */
262 struct kvm_pmu pmu_context;
263
264 /* 'static' configurations which are set only once */
265 struct kvm_vcpu_config cfg;
266
267 /* SBI steal-time accounting */
268 struct {
269 gpa_t shmem;
270 u64 last_steal;
271 } sta;
272};
273
274static inline void kvm_arch_sync_events(struct kvm *kvm) {}
275static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
276
277#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
278
279void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
280 gpa_t gpa, gpa_t gpsz,
281 unsigned long order);
282void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
283void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
284 unsigned long order);
285void kvm_riscv_local_hfence_gvma_all(void);
286void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
287 unsigned long asid,
288 unsigned long gva,
289 unsigned long gvsz,
290 unsigned long order);
291void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
292 unsigned long asid);
293void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
294 unsigned long gva, unsigned long gvsz,
295 unsigned long order);
296void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
297
298void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
299
300void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
301void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
302void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
303void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
304
305void kvm_riscv_fence_i(struct kvm *kvm,
306 unsigned long hbase, unsigned long hmask);
307void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
308 unsigned long hbase, unsigned long hmask,
309 gpa_t gpa, gpa_t gpsz,
310 unsigned long order);
311void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
312 unsigned long hbase, unsigned long hmask);
313void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
314 unsigned long hbase, unsigned long hmask,
315 unsigned long gva, unsigned long gvsz,
316 unsigned long order, unsigned long asid);
317void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
318 unsigned long hbase, unsigned long hmask,
319 unsigned long asid);
320void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
321 unsigned long hbase, unsigned long hmask,
322 unsigned long gva, unsigned long gvsz,
323 unsigned long order);
324void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
325 unsigned long hbase, unsigned long hmask);
326
327int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
328 phys_addr_t hpa, unsigned long size,
329 bool writable, bool in_atomic);
330void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
331 unsigned long size);
332int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
333 struct kvm_memory_slot *memslot,
334 gpa_t gpa, unsigned long hva, bool is_write);
335int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
336void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
337void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
338void __init kvm_riscv_gstage_mode_detect(void);
339unsigned long __init kvm_riscv_gstage_mode(void);
340int kvm_riscv_gstage_gpa_bits(void);
341
342void __init kvm_riscv_gstage_vmid_detect(void);
343unsigned long kvm_riscv_gstage_vmid_bits(void);
344int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
345bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
346void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
347
348int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
349
350void __kvm_riscv_unpriv_trap(void);
351
352unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
353 bool read_insn,
354 unsigned long guest_addr,
355 struct kvm_cpu_trap *trap);
356void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
357 struct kvm_cpu_trap *trap);
358int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
359 struct kvm_cpu_trap *trap);
360
361void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
362
363void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
364unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
365int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
366 u64 __user *uindices);
367int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
368 const struct kvm_one_reg *reg);
369int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
370 const struct kvm_one_reg *reg);
371
372int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
373int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
374void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
375void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
376bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
377void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
378void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
379
380void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
381void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
382
383#endif /* __RISCV_KVM_HOST_H__ */