Loading...
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/cpumask.h>
14#include <linux/mutex.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h>
17#include <linux/types.h>
18#include <linux/kvm.h>
19#include <linux/kvm_types.h>
20#include <linux/threads.h>
21#include <linux/spinlock.h>
22
23#include <asm/asm.h>
24#include <asm/inst.h>
25#include <asm/mipsregs.h>
26
27#include <kvm/iodev.h>
28
29/* MIPS KVM register ids */
30#define MIPS_CP0_32(_R, _S) \
31 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
32
33#define MIPS_CP0_64(_R, _S) \
34 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
35
36#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
37#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
38#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
39#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
40#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
41#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
42#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
43#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
44#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
45#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
46#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
47#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
48#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
49#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
50#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
51#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
52#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
53#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
54#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
55#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
56#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
57#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
58#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
59#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
60#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
61#define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
62#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
63#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
64#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
65#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
66#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
67#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
68#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
69#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
70#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
71#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
72#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
73#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
74#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
75#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
76#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
77#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
78#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
79#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
80#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
81#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
82#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
83#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
84
85
86#define KVM_MAX_VCPUS 16
87
88#define KVM_HALT_POLL_NS_DEFAULT 500000
89
90extern unsigned long GUESTID_MASK;
91extern unsigned long GUESTID_FIRST_VERSION;
92extern unsigned long GUESTID_VERSION_MASK;
93
94#define KVM_INVALID_ADDR 0xdeadbeef
95
96/*
97 * EVA has overlapping user & kernel address spaces, so user VAs may be >
98 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
99 * PAGE_OFFSET.
100 */
101
102#define KVM_HVA_ERR_BAD (-1UL)
103#define KVM_HVA_ERR_RO_BAD (-2UL)
104
105static inline bool kvm_is_error_hva(unsigned long addr)
106{
107 return IS_ERR_VALUE(addr);
108}
109
110struct kvm_vm_stat {
111 struct kvm_vm_stat_generic generic;
112};
113
114struct kvm_vcpu_stat {
115 struct kvm_vcpu_stat_generic generic;
116 u64 wait_exits;
117 u64 cache_exits;
118 u64 signal_exits;
119 u64 int_exits;
120 u64 cop_unusable_exits;
121 u64 tlbmod_exits;
122 u64 tlbmiss_ld_exits;
123 u64 tlbmiss_st_exits;
124 u64 addrerr_st_exits;
125 u64 addrerr_ld_exits;
126 u64 syscall_exits;
127 u64 resvd_inst_exits;
128 u64 break_inst_exits;
129 u64 trap_inst_exits;
130 u64 msa_fpe_exits;
131 u64 fpe_exits;
132 u64 msa_disabled_exits;
133 u64 flush_dcache_exits;
134 u64 vz_gpsi_exits;
135 u64 vz_gsfc_exits;
136 u64 vz_hc_exits;
137 u64 vz_grr_exits;
138 u64 vz_gva_exits;
139 u64 vz_ghfc_exits;
140 u64 vz_gpa_exits;
141 u64 vz_resvd_exits;
142#ifdef CONFIG_CPU_LOONGSON64
143 u64 vz_cpucfg_exits;
144#endif
145};
146
147struct kvm_arch_memory_slot {
148};
149
150#ifdef CONFIG_CPU_LOONGSON64
151struct ipi_state {
152 uint32_t status;
153 uint32_t en;
154 uint32_t set;
155 uint32_t clear;
156 uint64_t buf[4];
157};
158
159struct loongson_kvm_ipi;
160
161struct ipi_io_device {
162 int node_id;
163 struct loongson_kvm_ipi *ipi;
164 struct kvm_io_device device;
165};
166
167struct loongson_kvm_ipi {
168 spinlock_t lock;
169 struct kvm *kvm;
170 struct ipi_state ipistate[16];
171 struct ipi_io_device dev_ipi[4];
172};
173#endif
174
175struct kvm_arch {
176 /* Guest physical mm */
177 struct mm_struct gpa_mm;
178 /* Mask of CPUs needing GPA ASID flush */
179 cpumask_t asid_flush_mask;
180#ifdef CONFIG_CPU_LOONGSON64
181 struct loongson_kvm_ipi ipi;
182#endif
183};
184
185#define N_MIPS_COPROC_REGS 32
186#define N_MIPS_COPROC_SEL 8
187
188struct mips_coproc {
189 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
190#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
191 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
192#endif
193};
194
195/*
196 * Coprocessor 0 register names
197 */
198#define MIPS_CP0_TLB_INDEX 0
199#define MIPS_CP0_TLB_RANDOM 1
200#define MIPS_CP0_TLB_LOW 2
201#define MIPS_CP0_TLB_LO0 2
202#define MIPS_CP0_TLB_LO1 3
203#define MIPS_CP0_TLB_CONTEXT 4
204#define MIPS_CP0_TLB_PG_MASK 5
205#define MIPS_CP0_TLB_WIRED 6
206#define MIPS_CP0_HWRENA 7
207#define MIPS_CP0_BAD_VADDR 8
208#define MIPS_CP0_COUNT 9
209#define MIPS_CP0_TLB_HI 10
210#define MIPS_CP0_COMPARE 11
211#define MIPS_CP0_STATUS 12
212#define MIPS_CP0_CAUSE 13
213#define MIPS_CP0_EXC_PC 14
214#define MIPS_CP0_PRID 15
215#define MIPS_CP0_CONFIG 16
216#define MIPS_CP0_LLADDR 17
217#define MIPS_CP0_WATCH_LO 18
218#define MIPS_CP0_WATCH_HI 19
219#define MIPS_CP0_TLB_XCONTEXT 20
220#define MIPS_CP0_DIAG 22
221#define MIPS_CP0_ECC 26
222#define MIPS_CP0_CACHE_ERR 27
223#define MIPS_CP0_TAG_LO 28
224#define MIPS_CP0_TAG_HI 29
225#define MIPS_CP0_ERROR_PC 30
226#define MIPS_CP0_DEBUG 23
227#define MIPS_CP0_DEPC 24
228#define MIPS_CP0_PERFCNT 25
229#define MIPS_CP0_ERRCTL 26
230#define MIPS_CP0_DATA_LO 28
231#define MIPS_CP0_DATA_HI 29
232#define MIPS_CP0_DESAVE 31
233
234#define MIPS_CP0_CONFIG_SEL 0
235#define MIPS_CP0_CONFIG1_SEL 1
236#define MIPS_CP0_CONFIG2_SEL 2
237#define MIPS_CP0_CONFIG3_SEL 3
238#define MIPS_CP0_CONFIG4_SEL 4
239#define MIPS_CP0_CONFIG5_SEL 5
240
241#define MIPS_CP0_GUESTCTL2 10
242#define MIPS_CP0_GUESTCTL2_SEL 5
243#define MIPS_CP0_GTOFFSET 12
244#define MIPS_CP0_GTOFFSET_SEL 7
245
246/* Resume Flags */
247#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
248#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
249
250#define RESUME_GUEST 0
251#define RESUME_GUEST_DR RESUME_FLAG_DR
252#define RESUME_HOST RESUME_FLAG_HOST
253
254enum emulation_result {
255 EMULATE_DONE, /* no further processing */
256 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
257 EMULATE_FAIL, /* can't emulate this instruction */
258 EMULATE_WAIT, /* WAIT instruction */
259 EMULATE_PRIV_FAIL,
260 EMULATE_EXCEPT, /* A guest exception has been generated */
261 EMULATE_HYPERCALL, /* HYPCALL instruction */
262};
263
264#if defined(CONFIG_64BIT)
265#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
266#else
267#define VPN2_MASK 0xffffe000
268#endif
269#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
270#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
271#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
272#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
273#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
274#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
275#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
276#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
277 ((y) & VPN2_MASK & ~(x).tlb_mask))
278#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
279 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
280
281struct kvm_mips_tlb {
282 long tlb_mask;
283 long tlb_hi;
284 long tlb_lo[2];
285};
286
287#define KVM_MIPS_AUX_FPU 0x1
288#define KVM_MIPS_AUX_MSA 0x2
289
290struct kvm_vcpu_arch {
291 void *guest_ebase;
292 int (*vcpu_run)(struct kvm_vcpu *vcpu);
293
294 /* Host registers preserved across guest mode execution */
295 unsigned long host_stack;
296 unsigned long host_gp;
297 unsigned long host_pgd;
298 unsigned long host_entryhi;
299
300 /* Host CP0 registers used when handling exits from guest */
301 unsigned long host_cp0_badvaddr;
302 unsigned long host_cp0_epc;
303 u32 host_cp0_cause;
304 u32 host_cp0_guestctl0;
305 u32 host_cp0_badinstr;
306 u32 host_cp0_badinstrp;
307
308 /* GPRS */
309 unsigned long gprs[32];
310 unsigned long hi;
311 unsigned long lo;
312 unsigned long pc;
313
314 /* FPU State */
315 struct mips_fpu_struct fpu;
316 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
317 unsigned int aux_inuse;
318
319 /* COP0 State */
320 struct mips_coproc *cop0;
321
322 /* Resume PC after MMIO completion */
323 unsigned long io_pc;
324 /* GPR used as IO source/target */
325 u32 io_gpr;
326
327 struct hrtimer comparecount_timer;
328 /* Count timer control KVM register */
329 u32 count_ctl;
330 /* Count bias from the raw time */
331 u32 count_bias;
332 /* Frequency of timer in Hz */
333 u32 count_hz;
334 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
335 s64 count_dyn_bias;
336 /* Resume time */
337 ktime_t count_resume;
338 /* Period of timer tick in ns */
339 u64 count_period;
340
341 /* Bitmask of exceptions that are pending */
342 unsigned long pending_exceptions;
343
344 /* Bitmask of pending exceptions to be cleared */
345 unsigned long pending_exceptions_clr;
346
347 /* Cache some mmu pages needed inside spinlock regions */
348 struct kvm_mmu_memory_cache mmu_page_cache;
349
350 /* vcpu's vzguestid is different on each host cpu in an smp system */
351 u32 vzguestid[NR_CPUS];
352
353 /* wired guest TLB entries */
354 struct kvm_mips_tlb *wired_tlb;
355 unsigned int wired_tlb_limit;
356 unsigned int wired_tlb_used;
357
358 /* emulated guest MAAR registers */
359 unsigned long maar[6];
360
361 /* Last CPU the VCPU state was loaded on */
362 int last_sched_cpu;
363 /* Last CPU the VCPU actually executed guest code on */
364 int last_exec_cpu;
365
366 /* WAIT executed */
367 int wait;
368
369 u8 fpu_enabled;
370 u8 msa_enabled;
371};
372
373static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
374 unsigned long val)
375{
376 unsigned long temp;
377 do {
378 __asm__ __volatile__(
379 " .set push \n"
380 " .set "MIPS_ISA_ARCH_LEVEL" \n"
381 " "__stringify(LONG_LL) " %0, %1 \n"
382 " or %0, %2 \n"
383 " "__stringify(LONG_SC) " %0, %1 \n"
384 " .set pop \n"
385 : "=&r" (temp), "+m" (*reg)
386 : "r" (val));
387 } while (unlikely(!temp));
388}
389
390static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
391 unsigned long val)
392{
393 unsigned long temp;
394 do {
395 __asm__ __volatile__(
396 " .set push \n"
397 " .set "MIPS_ISA_ARCH_LEVEL" \n"
398 " "__stringify(LONG_LL) " %0, %1 \n"
399 " and %0, %2 \n"
400 " "__stringify(LONG_SC) " %0, %1 \n"
401 " .set pop \n"
402 : "=&r" (temp), "+m" (*reg)
403 : "r" (~val));
404 } while (unlikely(!temp));
405}
406
407static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
408 unsigned long change,
409 unsigned long val)
410{
411 unsigned long temp;
412 do {
413 __asm__ __volatile__(
414 " .set push \n"
415 " .set "MIPS_ISA_ARCH_LEVEL" \n"
416 " "__stringify(LONG_LL) " %0, %1 \n"
417 " and %0, %2 \n"
418 " or %0, %3 \n"
419 " "__stringify(LONG_SC) " %0, %1 \n"
420 " .set pop \n"
421 : "=&r" (temp), "+m" (*reg)
422 : "r" (~change), "r" (val & change));
423 } while (unlikely(!temp));
424}
425
426/* Guest register types, used in accessor build below */
427#define __KVMT32 u32
428#define __KVMTl unsigned long
429
430/*
431 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
432 * These operate on the saved guest C0 state in RAM.
433 */
434
435/* Generate saved context simple accessors */
436#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
437static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
438{ \
439 return cop0->reg[(_reg)][(sel)]; \
440} \
441static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
442 __KVMT##type val) \
443{ \
444 cop0->reg[(_reg)][(sel)] = val; \
445}
446
447/* Generate saved context bitwise modifiers */
448#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
449static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
450 __KVMT##type val) \
451{ \
452 cop0->reg[(_reg)][(sel)] |= val; \
453} \
454static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
455 __KVMT##type val) \
456{ \
457 cop0->reg[(_reg)][(sel)] &= ~val; \
458} \
459static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
460 __KVMT##type mask, \
461 __KVMT##type val) \
462{ \
463 unsigned long _mask = mask; \
464 cop0->reg[(_reg)][(sel)] &= ~_mask; \
465 cop0->reg[(_reg)][(sel)] |= val & _mask; \
466}
467
468/* Generate saved context atomic bitwise modifiers */
469#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
470static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
471 __KVMT##type val) \
472{ \
473 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
474} \
475static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
476 __KVMT##type val) \
477{ \
478 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
479} \
480static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
481 __KVMT##type mask, \
482 __KVMT##type val) \
483{ \
484 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
485 val); \
486}
487
488/*
489 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
490 * These operate on the VZ guest C0 context in hardware.
491 */
492
493/* Generate VZ guest context simple accessors */
494#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
495static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
496{ \
497 return read_gc0_##name(); \
498} \
499static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
500 __KVMT##type val) \
501{ \
502 write_gc0_##name(val); \
503}
504
505/* Generate VZ guest context bitwise modifiers */
506#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
507static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
508 __KVMT##type val) \
509{ \
510 set_gc0_##name(val); \
511} \
512static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
513 __KVMT##type val) \
514{ \
515 clear_gc0_##name(val); \
516} \
517static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
518 __KVMT##type mask, \
519 __KVMT##type val) \
520{ \
521 change_gc0_##name(mask, val); \
522}
523
524/* Generate VZ guest context save/restore to/from saved context */
525#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
526static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
527{ \
528 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
529} \
530static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
531{ \
532 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
533}
534
535/*
536 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
537 * These wrap a set of operations to provide them with a different name.
538 */
539
540/* Generate simple accessor wrapper */
541#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
542static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
543{ \
544 return kvm_read_##name2(cop0); \
545} \
546static inline void kvm_write_##name1(struct mips_coproc *cop0, \
547 __KVMT##type val) \
548{ \
549 kvm_write_##name2(cop0, val); \
550}
551
552/* Generate bitwise modifier wrapper */
553#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
554static inline void kvm_set_##name1(struct mips_coproc *cop0, \
555 __KVMT##type val) \
556{ \
557 kvm_set_##name2(cop0, val); \
558} \
559static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
560 __KVMT##type val) \
561{ \
562 kvm_clear_##name2(cop0, val); \
563} \
564static inline void kvm_change_##name1(struct mips_coproc *cop0, \
565 __KVMT##type mask, \
566 __KVMT##type val) \
567{ \
568 kvm_change_##name2(cop0, mask, val); \
569}
570
571/*
572 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
573 * These generate accessors operating on the saved context in RAM, and wrap them
574 * with the common guest C0 accessors (for use by common emulation code).
575 */
576
577#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
578 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
579 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
580
581#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
582 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
583 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
584
585#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
586 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
587 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
588
589/*
590 * VZ (hardware assisted virtualisation)
591 * These macros use the active guest state in VZ mode (hardware registers),
592 */
593
594/*
595 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
596 * These generate accessors operating on the VZ guest context in hardware, and
597 * wrap them with the common guest C0 accessors (for use by common emulation
598 * code).
599 *
600 * Accessors operating on the saved context in RAM are also generated to allow
601 * convenient explicit saving and restoring of the state.
602 */
603
604#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
605 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
606 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
607 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
608 __BUILD_KVM_SAVE_VZ(name, _reg, sel)
609
610#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
611 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
612 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
613 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
614
615/*
616 * We can't do atomic modifications of COP0 state if hardware can modify it.
617 * Races must be handled explicitly.
618 */
619#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
620
621/*
622 * Define accessors for CP0 registers that are accessible to the guest. These
623 * are primarily used by common emulation code, which may need to access the
624 * registers differently depending on the implementation.
625 *
626 * fns_hw/sw name type reg num select
627 */
628__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
629__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
630__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
631__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
632__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
633__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
634__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
635__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
636__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
637__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
638__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
639__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
640__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
641__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
642__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
643__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
644__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
645__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
646__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
647__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
648__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
649__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
650__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
651__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
652__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
653__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
654__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
655__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
656__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
657__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
658__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
659__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
660__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
661__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
662__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
663__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
664__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
665__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
666__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
667__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
668__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
669__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
670__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
671__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
672__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
673__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
674__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
675
676/* Bitwise operations (on HW state) */
677__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
678/* Cause can be modified asynchronously from hardirq hrtimer callback */
679__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
680__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
681
682/* Bitwise operations (on saved state) */
683__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
684__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
685__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
686__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
687__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
688__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
689
690/* Helpers */
691
692static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
693{
694 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
695 vcpu->fpu_enabled;
696}
697
698static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
699{
700 return kvm_mips_guest_can_have_fpu(vcpu) &&
701 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
702}
703
704static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
705{
706 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
707 vcpu->msa_enabled;
708}
709
710static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
711{
712 return kvm_mips_guest_can_have_msa(vcpu) &&
713 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
714}
715
716struct kvm_mips_callbacks {
717 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
718 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
719 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
720 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
721 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
722 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
723 int (*handle_syscall)(struct kvm_vcpu *vcpu);
724 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
725 int (*handle_break)(struct kvm_vcpu *vcpu);
726 int (*handle_trap)(struct kvm_vcpu *vcpu);
727 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
728 int (*handle_fpe)(struct kvm_vcpu *vcpu);
729 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
730 int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
731 int (*hardware_enable)(void);
732 void (*hardware_disable)(void);
733 int (*check_extension)(struct kvm *kvm, long ext);
734 int (*vcpu_init)(struct kvm_vcpu *vcpu);
735 void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
736 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
737 void (*prepare_flush_shadow)(struct kvm *kvm);
738 gpa_t (*gva_to_gpa)(gva_t gva);
739 void (*queue_timer_int)(struct kvm_vcpu *vcpu);
740 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
741 void (*queue_io_int)(struct kvm_vcpu *vcpu,
742 struct kvm_mips_interrupt *irq);
743 void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
744 struct kvm_mips_interrupt *irq);
745 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
746 u32 cause);
747 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
748 u32 cause);
749 unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
750 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
751 int (*get_one_reg)(struct kvm_vcpu *vcpu,
752 const struct kvm_one_reg *reg, s64 *v);
753 int (*set_one_reg)(struct kvm_vcpu *vcpu,
754 const struct kvm_one_reg *reg, s64 v);
755 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
756 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
757 int (*vcpu_run)(struct kvm_vcpu *vcpu);
758 void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
759};
760extern struct kvm_mips_callbacks *kvm_mips_callbacks;
761int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
762
763/* Debug: dump vcpu state */
764int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
765
766extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
767
768/* Building of entry/exception code */
769int kvm_mips_entry_setup(void);
770void *kvm_mips_build_vcpu_run(void *addr);
771void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
772void *kvm_mips_build_exception(void *addr, void *handler);
773void *kvm_mips_build_exit(void *addr);
774
775/* FPU/MSA context management */
776void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
777void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
778void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
779void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
780void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
781void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
782void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
783void kvm_own_fpu(struct kvm_vcpu *vcpu);
784void kvm_own_msa(struct kvm_vcpu *vcpu);
785void kvm_drop_fpu(struct kvm_vcpu *vcpu);
786void kvm_lose_fpu(struct kvm_vcpu *vcpu);
787
788/* TLB handling */
789int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
790 struct kvm_vcpu *vcpu, bool write_fault);
791
792int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
793int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
794 unsigned long *gpa);
795void kvm_vz_local_flush_roottlb_all_guests(void);
796void kvm_vz_local_flush_guesttlb_all(void);
797void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
798 unsigned int count);
799void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
800 unsigned int count);
801#ifdef CONFIG_CPU_LOONGSON64
802void kvm_loongson_clear_guest_vtlb(void);
803void kvm_loongson_clear_guest_ftlb(void);
804#endif
805
806/* MMU handling */
807
808bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
810pgd_t *kvm_pgd_alloc(void);
811void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
812
813#define KVM_ARCH_WANT_MMU_NOTIFIER
814
815/* Emulation */
816enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
817int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
818int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
819
820/**
821 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
822 * @vcpu: Virtual CPU.
823 *
824 * Returns: Whether the TLBL exception was likely due to an instruction
825 * fetch fault rather than a data load fault.
826 */
827static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
828{
829 unsigned long badvaddr = vcpu->host_cp0_badvaddr;
830 unsigned long epc = msk_isa16_mode(vcpu->pc);
831 u32 cause = vcpu->host_cp0_cause;
832
833 if (epc == badvaddr)
834 return true;
835
836 /*
837 * Branches may be 32-bit or 16-bit instructions.
838 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
839 * in KVM anyway.
840 */
841 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
842 return true;
843
844 return false;
845}
846
847extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
848
849u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
850void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
851void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
852void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
854int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
855int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
856void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
857void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
858enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
859
860/* fairly internal functions requiring some care to use */
861int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
862ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
863int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
864 u32 count, int min_drift);
865
866void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
867void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
868
869enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
870 u32 cause,
871 struct kvm_vcpu *vcpu);
872enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
873 u32 cause,
874 struct kvm_vcpu *vcpu);
875
876/* COP0 */
877enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
878
879/* Hypercalls (hypcall.c) */
880
881enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
882 union mips_instruction inst);
883int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
884
885/* Misc */
886extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
887extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
888extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
889 struct kvm_mips_interrupt *irq);
890
891static inline void kvm_arch_hardware_unsetup(void) {}
892static inline void kvm_arch_sync_events(struct kvm *kvm) {}
893static inline void kvm_arch_free_memslot(struct kvm *kvm,
894 struct kvm_memory_slot *slot) {}
895static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
896static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
897static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
898static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
899
900#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
901int kvm_arch_flush_remote_tlb(struct kvm *kvm);
902
903#endif /* __MIPS_KVM_HOST_H__ */
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/cpumask.h>
14#include <linux/mutex.h>
15#include <linux/hrtimer.h>
16#include <linux/interrupt.h>
17#include <linux/types.h>
18#include <linux/kvm.h>
19#include <linux/kvm_types.h>
20#include <linux/threads.h>
21#include <linux/spinlock.h>
22
23#include <asm/inst.h>
24#include <asm/mipsregs.h>
25
26#include <kvm/iodev.h>
27
28/* MIPS KVM register ids */
29#define MIPS_CP0_32(_R, _S) \
30 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
31
32#define MIPS_CP0_64(_R, _S) \
33 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
34
35#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
36#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
37#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
38#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
39#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
40#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
41#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
42#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
43#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
44#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
45#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
46#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
47#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
48#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
49#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
50#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
51#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
52#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
53#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
54#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
55#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
56#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
57#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
58#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
59#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
60#define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
61#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
62#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
63#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
64#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
65#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
66#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
67#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
68#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
69#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
70#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
71#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
72#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
73#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
74#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
75#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
76#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
77#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
78#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
79#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
80#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
81#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
82#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
83
84
85#define KVM_MAX_VCPUS 16
86#define KVM_USER_MEM_SLOTS 16
87/* memory slots that does not exposed to userspace */
88#define KVM_PRIVATE_MEM_SLOTS 0
89
90#define KVM_HALT_POLL_NS_DEFAULT 500000
91
92#ifdef CONFIG_KVM_MIPS_VZ
93extern unsigned long GUESTID_MASK;
94extern unsigned long GUESTID_FIRST_VERSION;
95extern unsigned long GUESTID_VERSION_MASK;
96#endif
97
98
99/*
100 * Special address that contains the comm page, used for reducing # of traps
101 * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
102 * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
103 * caught.
104 */
105#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
106 (0x8000 - PAGE_SIZE))
107
108#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
109 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
110
111#define KVM_GUEST_KUSEG 0x00000000UL
112#define KVM_GUEST_KSEG0 0x40000000UL
113#define KVM_GUEST_KSEG1 0x40000000UL
114#define KVM_GUEST_KSEG23 0x60000000UL
115#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
116#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
117
118#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
119#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
120#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
121
122/*
123 * Map an address to a certain kernel segment
124 */
125#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
126#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
127#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
128
129#define KVM_INVALID_PAGE 0xdeadbeef
130#define KVM_INVALID_ADDR 0xdeadbeef
131
132/*
133 * EVA has overlapping user & kernel address spaces, so user VAs may be >
134 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
135 * PAGE_OFFSET.
136 */
137
138#define KVM_HVA_ERR_BAD (-1UL)
139#define KVM_HVA_ERR_RO_BAD (-2UL)
140
141static inline bool kvm_is_error_hva(unsigned long addr)
142{
143 return IS_ERR_VALUE(addr);
144}
145
146struct kvm_vm_stat {
147 ulong remote_tlb_flush;
148};
149
150struct kvm_vcpu_stat {
151 u64 wait_exits;
152 u64 cache_exits;
153 u64 signal_exits;
154 u64 int_exits;
155 u64 cop_unusable_exits;
156 u64 tlbmod_exits;
157 u64 tlbmiss_ld_exits;
158 u64 tlbmiss_st_exits;
159 u64 addrerr_st_exits;
160 u64 addrerr_ld_exits;
161 u64 syscall_exits;
162 u64 resvd_inst_exits;
163 u64 break_inst_exits;
164 u64 trap_inst_exits;
165 u64 msa_fpe_exits;
166 u64 fpe_exits;
167 u64 msa_disabled_exits;
168 u64 flush_dcache_exits;
169#ifdef CONFIG_KVM_MIPS_VZ
170 u64 vz_gpsi_exits;
171 u64 vz_gsfc_exits;
172 u64 vz_hc_exits;
173 u64 vz_grr_exits;
174 u64 vz_gva_exits;
175 u64 vz_ghfc_exits;
176 u64 vz_gpa_exits;
177 u64 vz_resvd_exits;
178#ifdef CONFIG_CPU_LOONGSON64
179 u64 vz_cpucfg_exits;
180#endif
181#endif
182 u64 halt_successful_poll;
183 u64 halt_attempted_poll;
184 u64 halt_poll_success_ns;
185 u64 halt_poll_fail_ns;
186 u64 halt_poll_invalid;
187 u64 halt_wakeup;
188};
189
190struct kvm_arch_memory_slot {
191};
192
193#ifdef CONFIG_CPU_LOONGSON64
194struct ipi_state {
195 uint32_t status;
196 uint32_t en;
197 uint32_t set;
198 uint32_t clear;
199 uint64_t buf[4];
200};
201
202struct loongson_kvm_ipi;
203
204struct ipi_io_device {
205 int node_id;
206 struct loongson_kvm_ipi *ipi;
207 struct kvm_io_device device;
208};
209
210struct loongson_kvm_ipi {
211 spinlock_t lock;
212 struct kvm *kvm;
213 struct ipi_state ipistate[16];
214 struct ipi_io_device dev_ipi[4];
215};
216#endif
217
218struct kvm_arch {
219 /* Guest physical mm */
220 struct mm_struct gpa_mm;
221 /* Mask of CPUs needing GPA ASID flush */
222 cpumask_t asid_flush_mask;
223#ifdef CONFIG_CPU_LOONGSON64
224 struct loongson_kvm_ipi ipi;
225#endif
226};
227
228#define N_MIPS_COPROC_REGS 32
229#define N_MIPS_COPROC_SEL 8
230
231struct mips_coproc {
232 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
233#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
234 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
235#endif
236};
237
238/*
239 * Coprocessor 0 register names
240 */
241#define MIPS_CP0_TLB_INDEX 0
242#define MIPS_CP0_TLB_RANDOM 1
243#define MIPS_CP0_TLB_LOW 2
244#define MIPS_CP0_TLB_LO0 2
245#define MIPS_CP0_TLB_LO1 3
246#define MIPS_CP0_TLB_CONTEXT 4
247#define MIPS_CP0_TLB_PG_MASK 5
248#define MIPS_CP0_TLB_WIRED 6
249#define MIPS_CP0_HWRENA 7
250#define MIPS_CP0_BAD_VADDR 8
251#define MIPS_CP0_COUNT 9
252#define MIPS_CP0_TLB_HI 10
253#define MIPS_CP0_COMPARE 11
254#define MIPS_CP0_STATUS 12
255#define MIPS_CP0_CAUSE 13
256#define MIPS_CP0_EXC_PC 14
257#define MIPS_CP0_PRID 15
258#define MIPS_CP0_CONFIG 16
259#define MIPS_CP0_LLADDR 17
260#define MIPS_CP0_WATCH_LO 18
261#define MIPS_CP0_WATCH_HI 19
262#define MIPS_CP0_TLB_XCONTEXT 20
263#define MIPS_CP0_DIAG 22
264#define MIPS_CP0_ECC 26
265#define MIPS_CP0_CACHE_ERR 27
266#define MIPS_CP0_TAG_LO 28
267#define MIPS_CP0_TAG_HI 29
268#define MIPS_CP0_ERROR_PC 30
269#define MIPS_CP0_DEBUG 23
270#define MIPS_CP0_DEPC 24
271#define MIPS_CP0_PERFCNT 25
272#define MIPS_CP0_ERRCTL 26
273#define MIPS_CP0_DATA_LO 28
274#define MIPS_CP0_DATA_HI 29
275#define MIPS_CP0_DESAVE 31
276
277#define MIPS_CP0_CONFIG_SEL 0
278#define MIPS_CP0_CONFIG1_SEL 1
279#define MIPS_CP0_CONFIG2_SEL 2
280#define MIPS_CP0_CONFIG3_SEL 3
281#define MIPS_CP0_CONFIG4_SEL 4
282#define MIPS_CP0_CONFIG5_SEL 5
283
284#define MIPS_CP0_GUESTCTL2 10
285#define MIPS_CP0_GUESTCTL2_SEL 5
286#define MIPS_CP0_GTOFFSET 12
287#define MIPS_CP0_GTOFFSET_SEL 7
288
289/* Resume Flags */
290#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
291#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
292
293#define RESUME_GUEST 0
294#define RESUME_GUEST_DR RESUME_FLAG_DR
295#define RESUME_HOST RESUME_FLAG_HOST
296
297enum emulation_result {
298 EMULATE_DONE, /* no further processing */
299 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
300 EMULATE_FAIL, /* can't emulate this instruction */
301 EMULATE_WAIT, /* WAIT instruction */
302 EMULATE_PRIV_FAIL,
303 EMULATE_EXCEPT, /* A guest exception has been generated */
304 EMULATE_HYPERCALL, /* HYPCALL instruction */
305};
306
307#define mips3_paddr_to_tlbpfn(x) \
308 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
309#define mips3_tlbpfn_to_paddr(x) \
310 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
311
312#define MIPS3_PG_SHIFT 6
313#define MIPS3_PG_FRAME 0x3fffffc0
314
315#if defined(CONFIG_64BIT)
316#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
317#else
318#define VPN2_MASK 0xffffe000
319#endif
320#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
321#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
322#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
323#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
324#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
325#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
326#define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
327#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
328 ((y) & VPN2_MASK & ~(x).tlb_mask))
329#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
330 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
331
332struct kvm_mips_tlb {
333 long tlb_mask;
334 long tlb_hi;
335 long tlb_lo[2];
336};
337
338#define KVM_MIPS_AUX_FPU 0x1
339#define KVM_MIPS_AUX_MSA 0x2
340
341#define KVM_MIPS_GUEST_TLB_SIZE 64
342struct kvm_vcpu_arch {
343 void *guest_ebase;
344 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
345
346 /* Host registers preserved across guest mode execution */
347 unsigned long host_stack;
348 unsigned long host_gp;
349 unsigned long host_pgd;
350 unsigned long host_entryhi;
351
352 /* Host CP0 registers used when handling exits from guest */
353 unsigned long host_cp0_badvaddr;
354 unsigned long host_cp0_epc;
355 u32 host_cp0_cause;
356 u32 host_cp0_guestctl0;
357 u32 host_cp0_badinstr;
358 u32 host_cp0_badinstrp;
359
360 /* GPRS */
361 unsigned long gprs[32];
362 unsigned long hi;
363 unsigned long lo;
364 unsigned long pc;
365
366 /* FPU State */
367 struct mips_fpu_struct fpu;
368 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
369 unsigned int aux_inuse;
370
371 /* COP0 State */
372 struct mips_coproc *cop0;
373
374 /* Host KSEG0 address of the EI/DI offset */
375 void *kseg0_commpage;
376
377 /* Resume PC after MMIO completion */
378 unsigned long io_pc;
379 /* GPR used as IO source/target */
380 u32 io_gpr;
381
382 struct hrtimer comparecount_timer;
383 /* Count timer control KVM register */
384 u32 count_ctl;
385 /* Count bias from the raw time */
386 u32 count_bias;
387 /* Frequency of timer in Hz */
388 u32 count_hz;
389 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
390 s64 count_dyn_bias;
391 /* Resume time */
392 ktime_t count_resume;
393 /* Period of timer tick in ns */
394 u64 count_period;
395
396 /* Bitmask of exceptions that are pending */
397 unsigned long pending_exceptions;
398
399 /* Bitmask of pending exceptions to be cleared */
400 unsigned long pending_exceptions_clr;
401
402 /* S/W Based TLB for guest */
403 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
404
405 /* Guest kernel/user [partial] mm */
406 struct mm_struct guest_kernel_mm, guest_user_mm;
407
408 /* Guest ASID of last user mode execution */
409 unsigned int last_user_gasid;
410
411 /* Cache some mmu pages needed inside spinlock regions */
412 struct kvm_mmu_memory_cache mmu_page_cache;
413
414#ifdef CONFIG_KVM_MIPS_VZ
415 /* vcpu's vzguestid is different on each host cpu in an smp system */
416 u32 vzguestid[NR_CPUS];
417
418 /* wired guest TLB entries */
419 struct kvm_mips_tlb *wired_tlb;
420 unsigned int wired_tlb_limit;
421 unsigned int wired_tlb_used;
422
423 /* emulated guest MAAR registers */
424 unsigned long maar[6];
425#endif
426
427 /* Last CPU the VCPU state was loaded on */
428 int last_sched_cpu;
429 /* Last CPU the VCPU actually executed guest code on */
430 int last_exec_cpu;
431
432 /* WAIT executed */
433 int wait;
434
435 u8 fpu_enabled;
436 u8 msa_enabled;
437};
438
439static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
440 unsigned long val)
441{
442 unsigned long temp;
443 do {
444 __asm__ __volatile__(
445 " .set push \n"
446 " .set "MIPS_ISA_ARCH_LEVEL" \n"
447 " " __LL "%0, %1 \n"
448 " or %0, %2 \n"
449 " " __SC "%0, %1 \n"
450 " .set pop \n"
451 : "=&r" (temp), "+m" (*reg)
452 : "r" (val));
453 } while (unlikely(!temp));
454}
455
456static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
457 unsigned long val)
458{
459 unsigned long temp;
460 do {
461 __asm__ __volatile__(
462 " .set push \n"
463 " .set "MIPS_ISA_ARCH_LEVEL" \n"
464 " " __LL "%0, %1 \n"
465 " and %0, %2 \n"
466 " " __SC "%0, %1 \n"
467 " .set pop \n"
468 : "=&r" (temp), "+m" (*reg)
469 : "r" (~val));
470 } while (unlikely(!temp));
471}
472
473static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
474 unsigned long change,
475 unsigned long val)
476{
477 unsigned long temp;
478 do {
479 __asm__ __volatile__(
480 " .set push \n"
481 " .set "MIPS_ISA_ARCH_LEVEL" \n"
482 " " __LL "%0, %1 \n"
483 " and %0, %2 \n"
484 " or %0, %3 \n"
485 " " __SC "%0, %1 \n"
486 " .set pop \n"
487 : "=&r" (temp), "+m" (*reg)
488 : "r" (~change), "r" (val & change));
489 } while (unlikely(!temp));
490}
491
492/* Guest register types, used in accessor build below */
493#define __KVMT32 u32
494#define __KVMTl unsigned long
495
496/*
497 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
498 * These operate on the saved guest C0 state in RAM.
499 */
500
501/* Generate saved context simple accessors */
502#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
503static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
504{ \
505 return cop0->reg[(_reg)][(sel)]; \
506} \
507static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
508 __KVMT##type val) \
509{ \
510 cop0->reg[(_reg)][(sel)] = val; \
511}
512
513/* Generate saved context bitwise modifiers */
514#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
515static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
516 __KVMT##type val) \
517{ \
518 cop0->reg[(_reg)][(sel)] |= val; \
519} \
520static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
521 __KVMT##type val) \
522{ \
523 cop0->reg[(_reg)][(sel)] &= ~val; \
524} \
525static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
526 __KVMT##type mask, \
527 __KVMT##type val) \
528{ \
529 unsigned long _mask = mask; \
530 cop0->reg[(_reg)][(sel)] &= ~_mask; \
531 cop0->reg[(_reg)][(sel)] |= val & _mask; \
532}
533
534/* Generate saved context atomic bitwise modifiers */
535#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
536static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
537 __KVMT##type val) \
538{ \
539 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
540} \
541static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
542 __KVMT##type val) \
543{ \
544 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
545} \
546static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
547 __KVMT##type mask, \
548 __KVMT##type val) \
549{ \
550 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
551 val); \
552}
553
554/*
555 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
556 * These operate on the VZ guest C0 context in hardware.
557 */
558
559/* Generate VZ guest context simple accessors */
560#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
561static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
562{ \
563 return read_gc0_##name(); \
564} \
565static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
566 __KVMT##type val) \
567{ \
568 write_gc0_##name(val); \
569}
570
571/* Generate VZ guest context bitwise modifiers */
572#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
573static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
574 __KVMT##type val) \
575{ \
576 set_gc0_##name(val); \
577} \
578static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
579 __KVMT##type val) \
580{ \
581 clear_gc0_##name(val); \
582} \
583static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
584 __KVMT##type mask, \
585 __KVMT##type val) \
586{ \
587 change_gc0_##name(mask, val); \
588}
589
590/* Generate VZ guest context save/restore to/from saved context */
591#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
592static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
593{ \
594 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
595} \
596static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
597{ \
598 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
599}
600
601/*
602 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
603 * These wrap a set of operations to provide them with a different name.
604 */
605
606/* Generate simple accessor wrapper */
607#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
608static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
609{ \
610 return kvm_read_##name2(cop0); \
611} \
612static inline void kvm_write_##name1(struct mips_coproc *cop0, \
613 __KVMT##type val) \
614{ \
615 kvm_write_##name2(cop0, val); \
616}
617
618/* Generate bitwise modifier wrapper */
619#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
620static inline void kvm_set_##name1(struct mips_coproc *cop0, \
621 __KVMT##type val) \
622{ \
623 kvm_set_##name2(cop0, val); \
624} \
625static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
626 __KVMT##type val) \
627{ \
628 kvm_clear_##name2(cop0, val); \
629} \
630static inline void kvm_change_##name1(struct mips_coproc *cop0, \
631 __KVMT##type mask, \
632 __KVMT##type val) \
633{ \
634 kvm_change_##name2(cop0, mask, val); \
635}
636
637/*
638 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
639 * These generate accessors operating on the saved context in RAM, and wrap them
640 * with the common guest C0 accessors (for use by common emulation code).
641 */
642
643#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
644 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
645 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
646
647#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
648 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
649 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
650
651#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
652 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
653 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
654
655#ifndef CONFIG_KVM_MIPS_VZ
656
657/*
658 * T&E (trap & emulate software based virtualisation)
659 * We generate the common accessors operating exclusively on the saved context
660 * in RAM.
661 */
662
663#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
664#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
665#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
666
667#else
668
669/*
670 * VZ (hardware assisted virtualisation)
671 * These macros use the active guest state in VZ mode (hardware registers),
672 */
673
674/*
675 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
676 * These generate accessors operating on the VZ guest context in hardware, and
677 * wrap them with the common guest C0 accessors (for use by common emulation
678 * code).
679 *
680 * Accessors operating on the saved context in RAM are also generated to allow
681 * convenient explicit saving and restoring of the state.
682 */
683
684#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
685 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
686 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
687 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
688 __BUILD_KVM_SAVE_VZ(name, _reg, sel)
689
690#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
691 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
692 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
693 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
694
695/*
696 * We can't do atomic modifications of COP0 state if hardware can modify it.
697 * Races must be handled explicitly.
698 */
699#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
700
701#endif
702
703/*
704 * Define accessors for CP0 registers that are accessible to the guest. These
705 * are primarily used by common emulation code, which may need to access the
706 * registers differently depending on the implementation.
707 *
708 * fns_hw/sw name type reg num select
709 */
710__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
711__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
712__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
713__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
714__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
715__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
716__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
717__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
718__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
719__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
720__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
721__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
722__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
723__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
724__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
725__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
726__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
727__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
728__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
729__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
730__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
731__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
732__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
733__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
734__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
735__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
736__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
737__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
738__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
739__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
740__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
741__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
742__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
743__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
744__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
745__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
746__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
747__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
748__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
749__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
750__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
751__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
752__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
753__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
754__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
755__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
756__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
757
758/* Bitwise operations (on HW state) */
759__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
760/* Cause can be modified asynchronously from hardirq hrtimer callback */
761__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
762__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
763
764/* Bitwise operations (on saved state) */
765__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
766__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
767__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
768__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
769__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
770__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
771
772/* Helpers */
773
774static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
775{
776 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
777 vcpu->fpu_enabled;
778}
779
780static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
781{
782 return kvm_mips_guest_can_have_fpu(vcpu) &&
783 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
784}
785
786static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
787{
788 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
789 vcpu->msa_enabled;
790}
791
792static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
793{
794 return kvm_mips_guest_can_have_msa(vcpu) &&
795 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
796}
797
798struct kvm_mips_callbacks {
799 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
800 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
801 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
802 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
803 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
804 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
805 int (*handle_syscall)(struct kvm_vcpu *vcpu);
806 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
807 int (*handle_break)(struct kvm_vcpu *vcpu);
808 int (*handle_trap)(struct kvm_vcpu *vcpu);
809 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
810 int (*handle_fpe)(struct kvm_vcpu *vcpu);
811 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
812 int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
813 int (*hardware_enable)(void);
814 void (*hardware_disable)(void);
815 int (*check_extension)(struct kvm *kvm, long ext);
816 int (*vcpu_init)(struct kvm_vcpu *vcpu);
817 void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
818 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
819 void (*flush_shadow_all)(struct kvm *kvm);
820 /*
821 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
822 * VZ root TLB, or T&E GVA page tables and corresponding root TLB
823 * mappings).
824 */
825 void (*flush_shadow_memslot)(struct kvm *kvm,
826 const struct kvm_memory_slot *slot);
827 gpa_t (*gva_to_gpa)(gva_t gva);
828 void (*queue_timer_int)(struct kvm_vcpu *vcpu);
829 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
830 void (*queue_io_int)(struct kvm_vcpu *vcpu,
831 struct kvm_mips_interrupt *irq);
832 void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
833 struct kvm_mips_interrupt *irq);
834 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
835 u32 cause);
836 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
837 u32 cause);
838 unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
839 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
840 int (*get_one_reg)(struct kvm_vcpu *vcpu,
841 const struct kvm_one_reg *reg, s64 *v);
842 int (*set_one_reg)(struct kvm_vcpu *vcpu,
843 const struct kvm_one_reg *reg, s64 v);
844 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
845 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
846 int (*vcpu_run)(struct kvm_vcpu *vcpu);
847 void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
848};
849extern struct kvm_mips_callbacks *kvm_mips_callbacks;
850int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
851
852/* Debug: dump vcpu state */
853int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
854
855extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
856
857/* Building of entry/exception code */
858int kvm_mips_entry_setup(void);
859void *kvm_mips_build_vcpu_run(void *addr);
860void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
861void *kvm_mips_build_exception(void *addr, void *handler);
862void *kvm_mips_build_exit(void *addr);
863
864/* FPU/MSA context management */
865void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
866void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
867void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
868void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
869void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
870void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
871void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
872void kvm_own_fpu(struct kvm_vcpu *vcpu);
873void kvm_own_msa(struct kvm_vcpu *vcpu);
874void kvm_drop_fpu(struct kvm_vcpu *vcpu);
875void kvm_lose_fpu(struct kvm_vcpu *vcpu);
876
877/* TLB handling */
878u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
879
880u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
881
882u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
883
884#ifdef CONFIG_KVM_MIPS_VZ
885int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
886 struct kvm_vcpu *vcpu, bool write_fault);
887#endif
888extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
889 struct kvm_vcpu *vcpu,
890 bool write_fault);
891
892extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
893 struct kvm_vcpu *vcpu);
894
895extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
896 struct kvm_mips_tlb *tlb,
897 unsigned long gva,
898 bool write_fault);
899
900extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
901 u32 *opc,
902 struct kvm_vcpu *vcpu,
903 bool write_fault);
904
905extern void kvm_mips_dump_host_tlbs(void);
906extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
907extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
908 bool user, bool kernel);
909
910extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
911 unsigned long entryhi);
912
913#ifdef CONFIG_KVM_MIPS_VZ
914int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
915int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
916 unsigned long *gpa);
917void kvm_vz_local_flush_roottlb_all_guests(void);
918void kvm_vz_local_flush_guesttlb_all(void);
919void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
920 unsigned int count);
921void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
922 unsigned int count);
923#ifdef CONFIG_CPU_LOONGSON64
924void kvm_loongson_clear_guest_vtlb(void);
925void kvm_loongson_clear_guest_ftlb(void);
926#endif
927#endif
928
929void kvm_mips_suspend_mm(int cpu);
930void kvm_mips_resume_mm(int cpu);
931
932/* MMU handling */
933
934/**
935 * enum kvm_mips_flush - Types of MMU flushes.
936 * @KMF_USER: Flush guest user virtual memory mappings.
937 * Guest USeg only.
938 * @KMF_KERN: Flush guest kernel virtual memory mappings.
939 * Guest USeg and KSeg2/3.
940 * @KMF_GPA: Flush guest physical memory mappings.
941 * Also includes KSeg0 if KMF_KERN is set.
942 */
943enum kvm_mips_flush {
944 KMF_USER = 0x0,
945 KMF_KERN = 0x1,
946 KMF_GPA = 0x2,
947};
948void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
949bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
950int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
951pgd_t *kvm_pgd_alloc(void);
952void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
953void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
954 bool user);
955void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
956void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
957
958enum kvm_mips_fault_result {
959 KVM_MIPS_MAPPED = 0,
960 KVM_MIPS_GVA,
961 KVM_MIPS_GPA,
962 KVM_MIPS_TLB,
963 KVM_MIPS_TLBINV,
964 KVM_MIPS_TLBMOD,
965};
966enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
967 unsigned long gva,
968 bool write);
969
970#define KVM_ARCH_WANT_MMU_NOTIFIER
971int kvm_unmap_hva_range(struct kvm *kvm,
972 unsigned long start, unsigned long end, unsigned flags);
973int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
974int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
975int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
976
977/* Emulation */
978int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
979enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
980int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
981int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
982
983/**
984 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
985 * @vcpu: Virtual CPU.
986 *
987 * Returns: Whether the TLBL exception was likely due to an instruction
988 * fetch fault rather than a data load fault.
989 */
990static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
991{
992 unsigned long badvaddr = vcpu->host_cp0_badvaddr;
993 unsigned long epc = msk_isa16_mode(vcpu->pc);
994 u32 cause = vcpu->host_cp0_cause;
995
996 if (epc == badvaddr)
997 return true;
998
999 /*
1000 * Branches may be 32-bit or 16-bit instructions.
1001 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
1002 * in KVM anyway.
1003 */
1004 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
1005 return true;
1006
1007 return false;
1008}
1009
1010extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
1011 u32 *opc,
1012 struct kvm_vcpu *vcpu);
1013
1014long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
1015
1016extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1017 u32 *opc,
1018 struct kvm_vcpu *vcpu);
1019
1020extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1021 u32 *opc,
1022 struct kvm_vcpu *vcpu);
1023
1024extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1025 u32 *opc,
1026 struct kvm_vcpu *vcpu);
1027
1028extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1029 u32 *opc,
1030 struct kvm_vcpu *vcpu);
1031
1032extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1033 u32 *opc,
1034 struct kvm_vcpu *vcpu);
1035
1036extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
1037 u32 *opc,
1038 struct kvm_vcpu *vcpu);
1039
1040extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
1041 u32 *opc,
1042 struct kvm_vcpu *vcpu);
1043
1044extern enum emulation_result kvm_mips_handle_ri(u32 cause,
1045 u32 *opc,
1046 struct kvm_vcpu *vcpu);
1047
1048extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
1049 u32 *opc,
1050 struct kvm_vcpu *vcpu);
1051
1052extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
1053 u32 *opc,
1054 struct kvm_vcpu *vcpu);
1055
1056extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
1057 u32 *opc,
1058 struct kvm_vcpu *vcpu);
1059
1060extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
1061 u32 *opc,
1062 struct kvm_vcpu *vcpu);
1063
1064extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
1065 u32 *opc,
1066 struct kvm_vcpu *vcpu);
1067
1068extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
1069 u32 *opc,
1070 struct kvm_vcpu *vcpu);
1071
1072extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
1073
1074u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
1075void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
1076void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
1077void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
1078int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
1079int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
1080int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
1081void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
1082void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
1083enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
1084
1085/* fairly internal functions requiring some care to use */
1086int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
1087ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
1088int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
1089 u32 count, int min_drift);
1090
1091#ifdef CONFIG_KVM_MIPS_VZ
1092void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
1093void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
1094#else
1095static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
1096static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
1097#endif
1098
1099enum emulation_result kvm_mips_check_privilege(u32 cause,
1100 u32 *opc,
1101 struct kvm_vcpu *vcpu);
1102
1103enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1104 u32 *opc,
1105 u32 cause,
1106 struct kvm_vcpu *vcpu);
1107enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1108 u32 *opc,
1109 u32 cause,
1110 struct kvm_vcpu *vcpu);
1111enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1112 u32 cause,
1113 struct kvm_vcpu *vcpu);
1114enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1115 u32 cause,
1116 struct kvm_vcpu *vcpu);
1117
1118/* COP0 */
1119enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
1120
1121unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
1122unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
1123unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
1124unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
1125
1126/* Hypercalls (hypcall.c) */
1127
1128enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
1129 union mips_instruction inst);
1130int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
1131
1132/* Dynamic binary translation */
1133extern int kvm_mips_trans_cache_index(union mips_instruction inst,
1134 u32 *opc, struct kvm_vcpu *vcpu);
1135extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
1136 struct kvm_vcpu *vcpu);
1137extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
1138 struct kvm_vcpu *vcpu);
1139extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
1140 struct kvm_vcpu *vcpu);
1141
1142/* Misc */
1143extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
1144extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
1145extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1146 struct kvm_mips_interrupt *irq);
1147
1148static inline void kvm_arch_hardware_unsetup(void) {}
1149static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1150static inline void kvm_arch_free_memslot(struct kvm *kvm,
1151 struct kvm_memory_slot *slot) {}
1152static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1153static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1154static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1155static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1156static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1157
1158#endif /* __MIPS_KVM_HOST_H__ */