Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2* This file is subject to the terms and conditions of the GNU General Public
  3* License.  See the file "COPYING" in the main directory of this archive
  4* for more details.
  5*
  6* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
  7* Authors: Sanjay Lal <sanjayl@kymasys.com>
  8*/
  9
 10#ifndef __MIPS_KVM_HOST_H__
 11#define __MIPS_KVM_HOST_H__
 12
 13#include <linux/cpumask.h>
 14#include <linux/mutex.h>
 15#include <linux/hrtimer.h>
 16#include <linux/interrupt.h>
 17#include <linux/types.h>
 18#include <linux/kvm.h>
 19#include <linux/kvm_types.h>
 20#include <linux/threads.h>
 21#include <linux/spinlock.h>
 22
 23#include <asm/asm.h>
 24#include <asm/inst.h>
 25#include <asm/mipsregs.h>
 26
 27#include <kvm/iodev.h>
 28
 29/* MIPS KVM register ids */
 30#define MIPS_CP0_32(_R, _S)					\
 31	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
 32
 33#define MIPS_CP0_64(_R, _S)					\
 34	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
 35
 36#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
 37#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
 38#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
 39#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
 40#define KVM_REG_MIPS_CP0_CONTEXTCONFIG	MIPS_CP0_32(4, 1)
 41#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
 42#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG	MIPS_CP0_64(4, 3)
 43#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
 44#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
 45#define KVM_REG_MIPS_CP0_SEGCTL0	MIPS_CP0_64(5, 2)
 46#define KVM_REG_MIPS_CP0_SEGCTL1	MIPS_CP0_64(5, 3)
 47#define KVM_REG_MIPS_CP0_SEGCTL2	MIPS_CP0_64(5, 4)
 48#define KVM_REG_MIPS_CP0_PWBASE		MIPS_CP0_64(5, 5)
 49#define KVM_REG_MIPS_CP0_PWFIELD	MIPS_CP0_64(5, 6)
 50#define KVM_REG_MIPS_CP0_PWSIZE		MIPS_CP0_64(5, 7)
 51#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
 52#define KVM_REG_MIPS_CP0_PWCTL		MIPS_CP0_32(6, 6)
 53#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
 54#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
 55#define KVM_REG_MIPS_CP0_BADINSTR	MIPS_CP0_32(8, 1)
 56#define KVM_REG_MIPS_CP0_BADINSTRP	MIPS_CP0_32(8, 2)
 57#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
 58#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
 59#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
 60#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
 61#define KVM_REG_MIPS_CP0_INTCTL		MIPS_CP0_32(12, 1)
 62#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
 63#define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
 64#define KVM_REG_MIPS_CP0_PRID		MIPS_CP0_32(15, 0)
 65#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
 66#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
 67#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
 68#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
 69#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
 70#define KVM_REG_MIPS_CP0_CONFIG4	MIPS_CP0_32(16, 4)
 71#define KVM_REG_MIPS_CP0_CONFIG5	MIPS_CP0_32(16, 5)
 72#define KVM_REG_MIPS_CP0_CONFIG6	MIPS_CP0_32(16, 6)
 73#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
 74#define KVM_REG_MIPS_CP0_MAARI		MIPS_CP0_64(17, 2)
 75#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
 76#define KVM_REG_MIPS_CP0_DIAG		MIPS_CP0_32(22, 0)
 77#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
 78#define KVM_REG_MIPS_CP0_KSCRATCH1	MIPS_CP0_64(31, 2)
 79#define KVM_REG_MIPS_CP0_KSCRATCH2	MIPS_CP0_64(31, 3)
 80#define KVM_REG_MIPS_CP0_KSCRATCH3	MIPS_CP0_64(31, 4)
 81#define KVM_REG_MIPS_CP0_KSCRATCH4	MIPS_CP0_64(31, 5)
 82#define KVM_REG_MIPS_CP0_KSCRATCH5	MIPS_CP0_64(31, 6)
 83#define KVM_REG_MIPS_CP0_KSCRATCH6	MIPS_CP0_64(31, 7)
 84
 85
 86#define KVM_MAX_VCPUS		16
 87
 88#define KVM_HALT_POLL_NS_DEFAULT 500000
 89
 90extern unsigned long GUESTID_MASK;
 91extern unsigned long GUESTID_FIRST_VERSION;
 92extern unsigned long GUESTID_VERSION_MASK;
 93
 94#define KVM_INVALID_ADDR		0xdeadbeef
 95
 96/*
 97 * EVA has overlapping user & kernel address spaces, so user VAs may be >
 98 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
 99 * PAGE_OFFSET.
100 */
101
102#define KVM_HVA_ERR_BAD			(-1UL)
103#define KVM_HVA_ERR_RO_BAD		(-2UL)
104
105static inline bool kvm_is_error_hva(unsigned long addr)
106{
107	return IS_ERR_VALUE(addr);
108}
109
110struct kvm_vm_stat {
111	struct kvm_vm_stat_generic generic;
112};
113
114struct kvm_vcpu_stat {
115	struct kvm_vcpu_stat_generic generic;
116	u64 wait_exits;
117	u64 cache_exits;
118	u64 signal_exits;
119	u64 int_exits;
120	u64 cop_unusable_exits;
121	u64 tlbmod_exits;
122	u64 tlbmiss_ld_exits;
123	u64 tlbmiss_st_exits;
124	u64 addrerr_st_exits;
125	u64 addrerr_ld_exits;
126	u64 syscall_exits;
127	u64 resvd_inst_exits;
128	u64 break_inst_exits;
129	u64 trap_inst_exits;
130	u64 msa_fpe_exits;
131	u64 fpe_exits;
132	u64 msa_disabled_exits;
133	u64 flush_dcache_exits;
134	u64 vz_gpsi_exits;
135	u64 vz_gsfc_exits;
136	u64 vz_hc_exits;
137	u64 vz_grr_exits;
138	u64 vz_gva_exits;
139	u64 vz_ghfc_exits;
140	u64 vz_gpa_exits;
141	u64 vz_resvd_exits;
142#ifdef CONFIG_CPU_LOONGSON64
143	u64 vz_cpucfg_exits;
144#endif
145};
146
147struct kvm_arch_memory_slot {
148};
149
150#ifdef CONFIG_CPU_LOONGSON64
151struct ipi_state {
152	uint32_t status;
153	uint32_t en;
154	uint32_t set;
155	uint32_t clear;
156	uint64_t buf[4];
157};
158
159struct loongson_kvm_ipi;
160
161struct ipi_io_device {
162	int node_id;
163	struct loongson_kvm_ipi *ipi;
164	struct kvm_io_device device;
165};
166
167struct loongson_kvm_ipi {
168	spinlock_t lock;
169	struct kvm *kvm;
170	struct ipi_state ipistate[16];
171	struct ipi_io_device dev_ipi[4];
172};
173#endif
174
175struct kvm_arch {
176	/* Guest physical mm */
177	struct mm_struct gpa_mm;
178	/* Mask of CPUs needing GPA ASID flush */
179	cpumask_t asid_flush_mask;
180#ifdef CONFIG_CPU_LOONGSON64
181	struct loongson_kvm_ipi ipi;
182#endif
183};
184
185#define N_MIPS_COPROC_REGS	32
186#define N_MIPS_COPROC_SEL	8
187
188struct mips_coproc {
189	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
190#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
191	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
192#endif
193};
194
195/*
196 * Coprocessor 0 register names
197 */
198#define MIPS_CP0_TLB_INDEX	0
199#define MIPS_CP0_TLB_RANDOM	1
200#define MIPS_CP0_TLB_LOW	2
201#define MIPS_CP0_TLB_LO0	2
202#define MIPS_CP0_TLB_LO1	3
203#define MIPS_CP0_TLB_CONTEXT	4
204#define MIPS_CP0_TLB_PG_MASK	5
205#define MIPS_CP0_TLB_WIRED	6
206#define MIPS_CP0_HWRENA		7
207#define MIPS_CP0_BAD_VADDR	8
208#define MIPS_CP0_COUNT		9
209#define MIPS_CP0_TLB_HI		10
210#define MIPS_CP0_COMPARE	11
211#define MIPS_CP0_STATUS		12
212#define MIPS_CP0_CAUSE		13
213#define MIPS_CP0_EXC_PC		14
214#define MIPS_CP0_PRID		15
215#define MIPS_CP0_CONFIG		16
216#define MIPS_CP0_LLADDR		17
217#define MIPS_CP0_WATCH_LO	18
218#define MIPS_CP0_WATCH_HI	19
219#define MIPS_CP0_TLB_XCONTEXT	20
220#define MIPS_CP0_DIAG		22
221#define MIPS_CP0_ECC		26
222#define MIPS_CP0_CACHE_ERR	27
223#define MIPS_CP0_TAG_LO		28
224#define MIPS_CP0_TAG_HI		29
225#define MIPS_CP0_ERROR_PC	30
226#define MIPS_CP0_DEBUG		23
227#define MIPS_CP0_DEPC		24
228#define MIPS_CP0_PERFCNT	25
229#define MIPS_CP0_ERRCTL		26
230#define MIPS_CP0_DATA_LO	28
231#define MIPS_CP0_DATA_HI	29
232#define MIPS_CP0_DESAVE		31
233
234#define MIPS_CP0_CONFIG_SEL	0
235#define MIPS_CP0_CONFIG1_SEL	1
236#define MIPS_CP0_CONFIG2_SEL	2
237#define MIPS_CP0_CONFIG3_SEL	3
238#define MIPS_CP0_CONFIG4_SEL	4
239#define MIPS_CP0_CONFIG5_SEL	5
240
241#define MIPS_CP0_GUESTCTL2	10
242#define MIPS_CP0_GUESTCTL2_SEL	5
243#define MIPS_CP0_GTOFFSET	12
244#define MIPS_CP0_GTOFFSET_SEL	7
245
246/* Resume Flags */
247#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
248#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
249
250#define RESUME_GUEST		0
251#define RESUME_GUEST_DR		RESUME_FLAG_DR
252#define RESUME_HOST		RESUME_FLAG_HOST
253
254enum emulation_result {
255	EMULATE_DONE,		/* no further processing */
256	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
257	EMULATE_FAIL,		/* can't emulate this instruction */
258	EMULATE_WAIT,		/* WAIT instruction */
259	EMULATE_PRIV_FAIL,
260	EMULATE_EXCEPT,		/* A guest exception has been generated */
261	EMULATE_HYPERCALL,	/* HYPCALL instruction */
262};
263
264#if defined(CONFIG_64BIT)
265#define VPN2_MASK		GENMASK(cpu_vmbits - 1, 13)
266#else
267#define VPN2_MASK		0xffffe000
268#endif
269#define KVM_ENTRYHI_ASID	cpu_asid_mask(&boot_cpu_data)
270#define TLB_IS_GLOBAL(x)	((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
271#define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
272#define TLB_ASID(x)		((x).tlb_hi & KVM_ENTRYHI_ASID)
273#define TLB_LO_IDX(x, va)	(((va) >> PAGE_SHIFT) & 1)
274#define TLB_IS_VALID(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
275#define TLB_IS_DIRTY(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
276#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
277				 ((y) & VPN2_MASK & ~(x).tlb_mask))
278#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
279				 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
280
281struct kvm_mips_tlb {
282	long tlb_mask;
283	long tlb_hi;
284	long tlb_lo[2];
285};
286
287#define KVM_MIPS_AUX_FPU	0x1
288#define KVM_MIPS_AUX_MSA	0x2
289
290struct kvm_vcpu_arch {
291	void *guest_ebase;
292	int (*vcpu_run)(struct kvm_vcpu *vcpu);
293
294	/* Host registers preserved across guest mode execution */
295	unsigned long host_stack;
296	unsigned long host_gp;
297	unsigned long host_pgd;
298	unsigned long host_entryhi;
299
300	/* Host CP0 registers used when handling exits from guest */
301	unsigned long host_cp0_badvaddr;
302	unsigned long host_cp0_epc;
303	u32 host_cp0_cause;
304	u32 host_cp0_guestctl0;
305	u32 host_cp0_badinstr;
306	u32 host_cp0_badinstrp;
307
308	/* GPRS */
309	unsigned long gprs[32];
310	unsigned long hi;
311	unsigned long lo;
312	unsigned long pc;
313
314	/* FPU State */
315	struct mips_fpu_struct fpu;
316	/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
317	unsigned int aux_inuse;
318
319	/* COP0 State */
320	struct mips_coproc *cop0;
321
322	/* Resume PC after MMIO completion */
323	unsigned long io_pc;
324	/* GPR used as IO source/target */
325	u32 io_gpr;
326
327	struct hrtimer comparecount_timer;
328	/* Count timer control KVM register */
329	u32 count_ctl;
330	/* Count bias from the raw time */
331	u32 count_bias;
332	/* Frequency of timer in Hz */
333	u32 count_hz;
334	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
335	s64 count_dyn_bias;
336	/* Resume time */
337	ktime_t count_resume;
338	/* Period of timer tick in ns */
339	u64 count_period;
340
341	/* Bitmask of exceptions that are pending */
342	unsigned long pending_exceptions;
343
344	/* Bitmask of pending exceptions to be cleared */
345	unsigned long pending_exceptions_clr;
346
347	/* Cache some mmu pages needed inside spinlock regions */
348	struct kvm_mmu_memory_cache mmu_page_cache;
349
350	/* vcpu's vzguestid is different on each host cpu in an smp system */
351	u32 vzguestid[NR_CPUS];
352
353	/* wired guest TLB entries */
354	struct kvm_mips_tlb *wired_tlb;
355	unsigned int wired_tlb_limit;
356	unsigned int wired_tlb_used;
357
358	/* emulated guest MAAR registers */
359	unsigned long maar[6];
360
361	/* Last CPU the VCPU state was loaded on */
362	int last_sched_cpu;
363	/* Last CPU the VCPU actually executed guest code on */
364	int last_exec_cpu;
365
366	/* WAIT executed */
367	int wait;
368
369	u8 fpu_enabled;
370	u8 msa_enabled;
371};
372
373static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
374						unsigned long val)
375{
376	unsigned long temp;
377	do {
378		__asm__ __volatile__(
379		"	.set	push				\n"
380		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
381		"	"__stringify(LONG_LL)	" %0, %1	\n"
382		"	or	%0, %2				\n"
383		"	"__stringify(LONG_SC)	" %0, %1	\n"
384		"	.set	pop				\n"
385		: "=&r" (temp), "+m" (*reg)
386		: "r" (val));
387	} while (unlikely(!temp));
388}
389
390static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
391						  unsigned long val)
392{
393	unsigned long temp;
394	do {
395		__asm__ __volatile__(
396		"	.set	push				\n"
397		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398		"	"__stringify(LONG_LL)	" %0, %1	\n"
399		"	and	%0, %2				\n"
400		"	"__stringify(LONG_SC)	" %0, %1	\n"
401		"	.set	pop				\n"
402		: "=&r" (temp), "+m" (*reg)
403		: "r" (~val));
404	} while (unlikely(!temp));
405}
406
407static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
408						   unsigned long change,
409						   unsigned long val)
410{
411	unsigned long temp;
412	do {
413		__asm__ __volatile__(
414		"	.set	push				\n"
415		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
416		"	"__stringify(LONG_LL)	" %0, %1	\n"
417		"	and	%0, %2				\n"
418		"	or	%0, %3				\n"
419		"	"__stringify(LONG_SC)	" %0, %1	\n"
420		"	.set	pop				\n"
421		: "=&r" (temp), "+m" (*reg)
422		: "r" (~change), "r" (val & change));
423	} while (unlikely(!temp));
424}
425
426/* Guest register types, used in accessor build below */
427#define __KVMT32	u32
428#define __KVMTl	unsigned long
429
430/*
431 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
432 * These operate on the saved guest C0 state in RAM.
433 */
434
435/* Generate saved context simple accessors */
436#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
437static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
438{									\
439	return cop0->reg[(_reg)][(sel)];				\
440}									\
441static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0,	\
442					   __KVMT##type val)		\
443{									\
444	cop0->reg[(_reg)][(sel)] = val;					\
445}
446
447/* Generate saved context bitwise modifiers */
448#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
449static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
450					 __KVMT##type val)		\
451{									\
452	cop0->reg[(_reg)][(sel)] |= val;				\
453}									\
454static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
455					   __KVMT##type val)		\
456{									\
457	cop0->reg[(_reg)][(sel)] &= ~val;				\
458}									\
459static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
460					    __KVMT##type mask,		\
461					    __KVMT##type val)		\
462{									\
463	unsigned long _mask = mask;					\
464	cop0->reg[(_reg)][(sel)] &= ~_mask;				\
465	cop0->reg[(_reg)][(sel)] |= val & _mask;			\
466}
467
468/* Generate saved context atomic bitwise modifiers */
469#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
470static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
471					 __KVMT##type val)		\
472{									\
473	_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
474}									\
475static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
476					   __KVMT##type val)		\
477{									\
478	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
479}									\
480static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
481					    __KVMT##type mask,		\
482					    __KVMT##type val)		\
483{									\
484	_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
485					val);				\
486}
487
488/*
489 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
490 * These operate on the VZ guest C0 context in hardware.
491 */
492
493/* Generate VZ guest context simple accessors */
494#define __BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
495static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
496{									\
497	return read_gc0_##name();					\
498}									\
499static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0,	\
500					   __KVMT##type val)		\
501{									\
502	write_gc0_##name(val);						\
503}
504
505/* Generate VZ guest context bitwise modifiers */
506#define __BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
507static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0,	\
508					 __KVMT##type val)		\
509{									\
510	set_gc0_##name(val);						\
511}									\
512static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0,	\
513					   __KVMT##type val)		\
514{									\
515	clear_gc0_##name(val);						\
516}									\
517static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0,	\
518					    __KVMT##type mask,		\
519					    __KVMT##type val)		\
520{									\
521	change_gc0_##name(mask, val);					\
522}
523
524/* Generate VZ guest context save/restore to/from saved context */
525#define __BUILD_KVM_SAVE_VZ(name, _reg, sel)			\
526static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0)	\
527{									\
528	write_gc0_##name(cop0->reg[(_reg)][(sel)]);			\
529}									\
530static inline void kvm_save_gc0_##name(struct mips_coproc *cop0)	\
531{									\
532	cop0->reg[(_reg)][(sel)] = read_gc0_##name();			\
533}
534
535/*
536 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
537 * These wrap a set of operations to provide them with a different name.
538 */
539
540/* Generate simple accessor wrapper */
541#define __BUILD_KVM_RW_WRAP(name1, name2, type)				\
542static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0)	\
543{									\
544	return kvm_read_##name2(cop0);					\
545}									\
546static inline void kvm_write_##name1(struct mips_coproc *cop0,		\
547				     __KVMT##type val)			\
548{									\
549	kvm_write_##name2(cop0, val);					\
550}
551
552/* Generate bitwise modifier wrapper */
553#define __BUILD_KVM_SET_WRAP(name1, name2, type)			\
554static inline void kvm_set_##name1(struct mips_coproc *cop0,		\
555				   __KVMT##type val)			\
556{									\
557	kvm_set_##name2(cop0, val);					\
558}									\
559static inline void kvm_clear_##name1(struct mips_coproc *cop0,		\
560				     __KVMT##type val)			\
561{									\
562	kvm_clear_##name2(cop0, val);					\
563}									\
564static inline void kvm_change_##name1(struct mips_coproc *cop0,		\
565				      __KVMT##type mask,		\
566				      __KVMT##type val)			\
567{									\
568	kvm_change_##name2(cop0, mask, val);				\
569}
570
571/*
572 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
573 * These generate accessors operating on the saved context in RAM, and wrap them
574 * with the common guest C0 accessors (for use by common emulation code).
575 */
576
577#define __BUILD_KVM_RW_SW(name, type, _reg, sel)			\
578	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
579	__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
580
581#define __BUILD_KVM_SET_SW(name, type, _reg, sel)			\
582	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
583	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
584
585#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel)			\
586	__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
587	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
588
589/*
590 * VZ (hardware assisted virtualisation)
591 * These macros use the active guest state in VZ mode (hardware registers),
592 */
593
594/*
595 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
596 * These generate accessors operating on the VZ guest context in hardware, and
597 * wrap them with the common guest C0 accessors (for use by common emulation
598 * code).
599 *
600 * Accessors operating on the saved context in RAM are also generated to allow
601 * convenient explicit saving and restoring of the state.
602 */
603
604#define __BUILD_KVM_RW_HW(name, type, _reg, sel)			\
605	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
606	__BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
607	__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type)	\
608	__BUILD_KVM_SAVE_VZ(name, _reg, sel)
609
610#define __BUILD_KVM_SET_HW(name, type, _reg, sel)			\
611	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
612	__BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
613	__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
614
615/*
616 * We can't do atomic modifications of COP0 state if hardware can modify it.
617 * Races must be handled explicitly.
618 */
619#define __BUILD_KVM_ATOMIC_HW	__BUILD_KVM_SET_HW
620
621/*
622 * Define accessors for CP0 registers that are accessible to the guest. These
623 * are primarily used by common emulation code, which may need to access the
624 * registers differently depending on the implementation.
625 *
626 *    fns_hw/sw    name     type    reg num         select
627 */
628__BUILD_KVM_RW_HW(index,          32, MIPS_CP0_TLB_INDEX,    0)
629__BUILD_KVM_RW_HW(entrylo0,       l,  MIPS_CP0_TLB_LO0,      0)
630__BUILD_KVM_RW_HW(entrylo1,       l,  MIPS_CP0_TLB_LO1,      0)
631__BUILD_KVM_RW_HW(context,        l,  MIPS_CP0_TLB_CONTEXT,  0)
632__BUILD_KVM_RW_HW(contextconfig,  32, MIPS_CP0_TLB_CONTEXT,  1)
633__BUILD_KVM_RW_HW(userlocal,      l,  MIPS_CP0_TLB_CONTEXT,  2)
634__BUILD_KVM_RW_HW(xcontextconfig, l,  MIPS_CP0_TLB_CONTEXT,  3)
635__BUILD_KVM_RW_HW(pagemask,       l,  MIPS_CP0_TLB_PG_MASK,  0)
636__BUILD_KVM_RW_HW(pagegrain,      32, MIPS_CP0_TLB_PG_MASK,  1)
637__BUILD_KVM_RW_HW(segctl0,        l,  MIPS_CP0_TLB_PG_MASK,  2)
638__BUILD_KVM_RW_HW(segctl1,        l,  MIPS_CP0_TLB_PG_MASK,  3)
639__BUILD_KVM_RW_HW(segctl2,        l,  MIPS_CP0_TLB_PG_MASK,  4)
640__BUILD_KVM_RW_HW(pwbase,         l,  MIPS_CP0_TLB_PG_MASK,  5)
641__BUILD_KVM_RW_HW(pwfield,        l,  MIPS_CP0_TLB_PG_MASK,  6)
642__BUILD_KVM_RW_HW(pwsize,         l,  MIPS_CP0_TLB_PG_MASK,  7)
643__BUILD_KVM_RW_HW(wired,          32, MIPS_CP0_TLB_WIRED,    0)
644__BUILD_KVM_RW_HW(pwctl,          32, MIPS_CP0_TLB_WIRED,    6)
645__BUILD_KVM_RW_HW(hwrena,         32, MIPS_CP0_HWRENA,       0)
646__BUILD_KVM_RW_HW(badvaddr,       l,  MIPS_CP0_BAD_VADDR,    0)
647__BUILD_KVM_RW_HW(badinstr,       32, MIPS_CP0_BAD_VADDR,    1)
648__BUILD_KVM_RW_HW(badinstrp,      32, MIPS_CP0_BAD_VADDR,    2)
649__BUILD_KVM_RW_SW(count,          32, MIPS_CP0_COUNT,        0)
650__BUILD_KVM_RW_HW(entryhi,        l,  MIPS_CP0_TLB_HI,       0)
651__BUILD_KVM_RW_HW(compare,        32, MIPS_CP0_COMPARE,      0)
652__BUILD_KVM_RW_HW(status,         32, MIPS_CP0_STATUS,       0)
653__BUILD_KVM_RW_HW(intctl,         32, MIPS_CP0_STATUS,       1)
654__BUILD_KVM_RW_HW(cause,          32, MIPS_CP0_CAUSE,        0)
655__BUILD_KVM_RW_HW(epc,            l,  MIPS_CP0_EXC_PC,       0)
656__BUILD_KVM_RW_SW(prid,           32, MIPS_CP0_PRID,         0)
657__BUILD_KVM_RW_HW(ebase,          l,  MIPS_CP0_PRID,         1)
658__BUILD_KVM_RW_HW(config,         32, MIPS_CP0_CONFIG,       0)
659__BUILD_KVM_RW_HW(config1,        32, MIPS_CP0_CONFIG,       1)
660__BUILD_KVM_RW_HW(config2,        32, MIPS_CP0_CONFIG,       2)
661__BUILD_KVM_RW_HW(config3,        32, MIPS_CP0_CONFIG,       3)
662__BUILD_KVM_RW_HW(config4,        32, MIPS_CP0_CONFIG,       4)
663__BUILD_KVM_RW_HW(config5,        32, MIPS_CP0_CONFIG,       5)
664__BUILD_KVM_RW_HW(config6,        32, MIPS_CP0_CONFIG,       6)
665__BUILD_KVM_RW_HW(config7,        32, MIPS_CP0_CONFIG,       7)
666__BUILD_KVM_RW_SW(maari,          l,  MIPS_CP0_LLADDR,       2)
667__BUILD_KVM_RW_HW(xcontext,       l,  MIPS_CP0_TLB_XCONTEXT, 0)
668__BUILD_KVM_RW_HW(errorepc,       l,  MIPS_CP0_ERROR_PC,     0)
669__BUILD_KVM_RW_HW(kscratch1,      l,  MIPS_CP0_DESAVE,       2)
670__BUILD_KVM_RW_HW(kscratch2,      l,  MIPS_CP0_DESAVE,       3)
671__BUILD_KVM_RW_HW(kscratch3,      l,  MIPS_CP0_DESAVE,       4)
672__BUILD_KVM_RW_HW(kscratch4,      l,  MIPS_CP0_DESAVE,       5)
673__BUILD_KVM_RW_HW(kscratch5,      l,  MIPS_CP0_DESAVE,       6)
674__BUILD_KVM_RW_HW(kscratch6,      l,  MIPS_CP0_DESAVE,       7)
675
676/* Bitwise operations (on HW state) */
677__BUILD_KVM_SET_HW(status,        32, MIPS_CP0_STATUS,       0)
678/* Cause can be modified asynchronously from hardirq hrtimer callback */
679__BUILD_KVM_ATOMIC_HW(cause,      32, MIPS_CP0_CAUSE,        0)
680__BUILD_KVM_SET_HW(ebase,         l,  MIPS_CP0_PRID,         1)
681
682/* Bitwise operations (on saved state) */
683__BUILD_KVM_SET_SAVED(config,     32, MIPS_CP0_CONFIG,       0)
684__BUILD_KVM_SET_SAVED(config1,    32, MIPS_CP0_CONFIG,       1)
685__BUILD_KVM_SET_SAVED(config2,    32, MIPS_CP0_CONFIG,       2)
686__BUILD_KVM_SET_SAVED(config3,    32, MIPS_CP0_CONFIG,       3)
687__BUILD_KVM_SET_SAVED(config4,    32, MIPS_CP0_CONFIG,       4)
688__BUILD_KVM_SET_SAVED(config5,    32, MIPS_CP0_CONFIG,       5)
689
690/* Helpers */
691
692static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
693{
694	return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
695		vcpu->fpu_enabled;
696}
697
698static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
699{
700	return kvm_mips_guest_can_have_fpu(vcpu) &&
701		kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
702}
703
704static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
705{
706	return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
707		vcpu->msa_enabled;
708}
709
710static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
711{
712	return kvm_mips_guest_can_have_msa(vcpu) &&
713		kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
714}
715
716struct kvm_mips_callbacks {
717	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
718	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
719	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
720	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
721	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
722	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
723	int (*handle_syscall)(struct kvm_vcpu *vcpu);
724	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
725	int (*handle_break)(struct kvm_vcpu *vcpu);
726	int (*handle_trap)(struct kvm_vcpu *vcpu);
727	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
728	int (*handle_fpe)(struct kvm_vcpu *vcpu);
729	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
730	int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
731	int (*hardware_enable)(void);
732	void (*hardware_disable)(void);
733	int (*check_extension)(struct kvm *kvm, long ext);
734	int (*vcpu_init)(struct kvm_vcpu *vcpu);
735	void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
736	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
737	void (*prepare_flush_shadow)(struct kvm *kvm);
738	gpa_t (*gva_to_gpa)(gva_t gva);
739	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
740	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
741	void (*queue_io_int)(struct kvm_vcpu *vcpu,
742			     struct kvm_mips_interrupt *irq);
743	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
744			       struct kvm_mips_interrupt *irq);
745	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
746			   u32 cause);
747	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
748			 u32 cause);
749	unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
750	int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
751	int (*get_one_reg)(struct kvm_vcpu *vcpu,
752			   const struct kvm_one_reg *reg, s64 *v);
753	int (*set_one_reg)(struct kvm_vcpu *vcpu,
754			   const struct kvm_one_reg *reg, s64 v);
755	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
756	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
757	int (*vcpu_run)(struct kvm_vcpu *vcpu);
758	void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
759};
760extern struct kvm_mips_callbacks *kvm_mips_callbacks;
761int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
762
763/* Debug: dump vcpu state */
764int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
765
766extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
767
768/* Building of entry/exception code */
769int kvm_mips_entry_setup(void);
770void *kvm_mips_build_vcpu_run(void *addr);
771void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
772void *kvm_mips_build_exception(void *addr, void *handler);
773void *kvm_mips_build_exit(void *addr);
774
775/* FPU/MSA context management */
776void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
777void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
778void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
779void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
780void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
781void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
782void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
783void kvm_own_fpu(struct kvm_vcpu *vcpu);
784void kvm_own_msa(struct kvm_vcpu *vcpu);
785void kvm_drop_fpu(struct kvm_vcpu *vcpu);
786void kvm_lose_fpu(struct kvm_vcpu *vcpu);
787
788/* TLB handling */
789int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
790				      struct kvm_vcpu *vcpu, bool write_fault);
791
792int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
793int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
794			    unsigned long *gpa);
795void kvm_vz_local_flush_roottlb_all_guests(void);
796void kvm_vz_local_flush_guesttlb_all(void);
797void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
798			  unsigned int count);
799void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
800			  unsigned int count);
801#ifdef CONFIG_CPU_LOONGSON64
802void kvm_loongson_clear_guest_vtlb(void);
803void kvm_loongson_clear_guest_ftlb(void);
804#endif
805
806/* MMU handling */
807
808bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
810pgd_t *kvm_pgd_alloc(void);
811void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
812
813#define KVM_ARCH_WANT_MMU_NOTIFIER
814
815/* Emulation */
816enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
817int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
818int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
819
820/**
821 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
822 * @vcpu:	Virtual CPU.
823 *
824 * Returns:	Whether the TLBL exception was likely due to an instruction
825 *		fetch fault rather than a data load fault.
826 */
827static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
828{
829	unsigned long badvaddr = vcpu->host_cp0_badvaddr;
830	unsigned long epc = msk_isa16_mode(vcpu->pc);
831	u32 cause = vcpu->host_cp0_cause;
832
833	if (epc == badvaddr)
834		return true;
835
836	/*
837	 * Branches may be 32-bit or 16-bit instructions.
838	 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
839	 * in KVM anyway.
840	 */
841	if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
842		return true;
843
844	return false;
845}
846
847extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
848
849u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
850void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
851void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
852void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
854int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
855int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
856void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
857void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
858enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
859
860/* fairly internal functions requiring some care to use */
861int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
862ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
863int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
864			     u32 count, int min_drift);
865
866void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
867void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
868
869enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
870					     u32 cause,
871					     struct kvm_vcpu *vcpu);
872enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
873					    u32 cause,
874					    struct kvm_vcpu *vcpu);
875
876/* COP0 */
877enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
878
879/* Hypercalls (hypcall.c) */
880
881enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
882					    union mips_instruction inst);
883int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
884
885/* Misc */
886extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
887extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
888extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
889			     struct kvm_mips_interrupt *irq);
890
891static inline void kvm_arch_hardware_unsetup(void) {}
892static inline void kvm_arch_sync_events(struct kvm *kvm) {}
893static inline void kvm_arch_free_memslot(struct kvm *kvm,
894					 struct kvm_memory_slot *slot) {}
895static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
896static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
897static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
898static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
899
900#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
901int kvm_arch_flush_remote_tlb(struct kvm *kvm);
902
903#endif /* __MIPS_KVM_HOST_H__ */
v6.13.7
  1/*
  2* This file is subject to the terms and conditions of the GNU General Public
  3* License.  See the file "COPYING" in the main directory of this archive
  4* for more details.
  5*
  6* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
  7* Authors: Sanjay Lal <sanjayl@kymasys.com>
  8*/
  9
 10#ifndef __MIPS_KVM_HOST_H__
 11#define __MIPS_KVM_HOST_H__
 12
 13#include <linux/cpumask.h>
 14#include <linux/mutex.h>
 15#include <linux/hrtimer.h>
 16#include <linux/interrupt.h>
 17#include <linux/types.h>
 18#include <linux/kvm.h>
 19#include <linux/kvm_types.h>
 20#include <linux/threads.h>
 21#include <linux/spinlock.h>
 22
 23#include <asm/asm.h>
 24#include <asm/inst.h>
 25#include <asm/mipsregs.h>
 26
 27#include <kvm/iodev.h>
 28
 29/* MIPS KVM register ids */
 30#define MIPS_CP0_32(_R, _S)					\
 31	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
 32
 33#define MIPS_CP0_64(_R, _S)					\
 34	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
 35
 36#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
 37#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
 38#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
 39#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
 40#define KVM_REG_MIPS_CP0_CONTEXTCONFIG	MIPS_CP0_32(4, 1)
 41#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
 42#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG	MIPS_CP0_64(4, 3)
 43#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
 44#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
 45#define KVM_REG_MIPS_CP0_SEGCTL0	MIPS_CP0_64(5, 2)
 46#define KVM_REG_MIPS_CP0_SEGCTL1	MIPS_CP0_64(5, 3)
 47#define KVM_REG_MIPS_CP0_SEGCTL2	MIPS_CP0_64(5, 4)
 48#define KVM_REG_MIPS_CP0_PWBASE		MIPS_CP0_64(5, 5)
 49#define KVM_REG_MIPS_CP0_PWFIELD	MIPS_CP0_64(5, 6)
 50#define KVM_REG_MIPS_CP0_PWSIZE		MIPS_CP0_64(5, 7)
 51#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
 52#define KVM_REG_MIPS_CP0_PWCTL		MIPS_CP0_32(6, 6)
 53#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
 54#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
 55#define KVM_REG_MIPS_CP0_BADINSTR	MIPS_CP0_32(8, 1)
 56#define KVM_REG_MIPS_CP0_BADINSTRP	MIPS_CP0_32(8, 2)
 57#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
 58#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
 59#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
 60#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
 61#define KVM_REG_MIPS_CP0_INTCTL		MIPS_CP0_32(12, 1)
 62#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
 63#define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
 64#define KVM_REG_MIPS_CP0_PRID		MIPS_CP0_32(15, 0)
 65#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
 66#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
 67#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
 68#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
 69#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
 70#define KVM_REG_MIPS_CP0_CONFIG4	MIPS_CP0_32(16, 4)
 71#define KVM_REG_MIPS_CP0_CONFIG5	MIPS_CP0_32(16, 5)
 72#define KVM_REG_MIPS_CP0_CONFIG6	MIPS_CP0_32(16, 6)
 73#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
 74#define KVM_REG_MIPS_CP0_MAARI		MIPS_CP0_64(17, 2)
 75#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
 76#define KVM_REG_MIPS_CP0_DIAG		MIPS_CP0_32(22, 0)
 77#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
 78#define KVM_REG_MIPS_CP0_KSCRATCH1	MIPS_CP0_64(31, 2)
 79#define KVM_REG_MIPS_CP0_KSCRATCH2	MIPS_CP0_64(31, 3)
 80#define KVM_REG_MIPS_CP0_KSCRATCH3	MIPS_CP0_64(31, 4)
 81#define KVM_REG_MIPS_CP0_KSCRATCH4	MIPS_CP0_64(31, 5)
 82#define KVM_REG_MIPS_CP0_KSCRATCH5	MIPS_CP0_64(31, 6)
 83#define KVM_REG_MIPS_CP0_KSCRATCH6	MIPS_CP0_64(31, 7)
 84
 85
 86#define KVM_MAX_VCPUS		16
 87
 88#define KVM_HALT_POLL_NS_DEFAULT 500000
 89
 90extern unsigned long GUESTID_MASK;
 91extern unsigned long GUESTID_FIRST_VERSION;
 92extern unsigned long GUESTID_VERSION_MASK;
 93
 94#define KVM_INVALID_ADDR		0xdeadbeef
 95
 96/*
 97 * EVA has overlapping user & kernel address spaces, so user VAs may be >
 98 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
 99 * PAGE_OFFSET.
100 */
101
102#define KVM_HVA_ERR_BAD			(-1UL)
103#define KVM_HVA_ERR_RO_BAD		(-2UL)
104
105static inline bool kvm_is_error_hva(unsigned long addr)
106{
107	return IS_ERR_VALUE(addr);
108}
109
110struct kvm_vm_stat {
111	struct kvm_vm_stat_generic generic;
112};
113
114struct kvm_vcpu_stat {
115	struct kvm_vcpu_stat_generic generic;
116	u64 wait_exits;
117	u64 cache_exits;
118	u64 signal_exits;
119	u64 int_exits;
120	u64 cop_unusable_exits;
121	u64 tlbmod_exits;
122	u64 tlbmiss_ld_exits;
123	u64 tlbmiss_st_exits;
124	u64 addrerr_st_exits;
125	u64 addrerr_ld_exits;
126	u64 syscall_exits;
127	u64 resvd_inst_exits;
128	u64 break_inst_exits;
129	u64 trap_inst_exits;
130	u64 msa_fpe_exits;
131	u64 fpe_exits;
132	u64 msa_disabled_exits;
133	u64 flush_dcache_exits;
134	u64 vz_gpsi_exits;
135	u64 vz_gsfc_exits;
136	u64 vz_hc_exits;
137	u64 vz_grr_exits;
138	u64 vz_gva_exits;
139	u64 vz_ghfc_exits;
140	u64 vz_gpa_exits;
141	u64 vz_resvd_exits;
142#ifdef CONFIG_CPU_LOONGSON64
143	u64 vz_cpucfg_exits;
144#endif
145};
146
147struct kvm_arch_memory_slot {
148};
149
150#ifdef CONFIG_CPU_LOONGSON64
151struct ipi_state {
152	uint32_t status;
153	uint32_t en;
154	uint32_t set;
155	uint32_t clear;
156	uint64_t buf[4];
157};
158
159struct loongson_kvm_ipi;
160
161struct ipi_io_device {
162	int node_id;
163	struct loongson_kvm_ipi *ipi;
164	struct kvm_io_device device;
165};
166
167struct loongson_kvm_ipi {
168	spinlock_t lock;
169	struct kvm *kvm;
170	struct ipi_state ipistate[16];
171	struct ipi_io_device dev_ipi[4];
172};
173#endif
174
175struct kvm_arch {
176	/* Guest physical mm */
177	struct mm_struct gpa_mm;
178	/* Mask of CPUs needing GPA ASID flush */
179	cpumask_t asid_flush_mask;
180#ifdef CONFIG_CPU_LOONGSON64
181	struct loongson_kvm_ipi ipi;
182#endif
183};
184
185#define N_MIPS_COPROC_REGS	32
186#define N_MIPS_COPROC_SEL	8
187
188struct mips_coproc {
189	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
190#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
191	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
192#endif
193};
194
195/*
196 * Coprocessor 0 register names
197 */
198#define MIPS_CP0_TLB_INDEX	0
199#define MIPS_CP0_TLB_RANDOM	1
200#define MIPS_CP0_TLB_LOW	2
201#define MIPS_CP0_TLB_LO0	2
202#define MIPS_CP0_TLB_LO1	3
203#define MIPS_CP0_TLB_CONTEXT	4
204#define MIPS_CP0_TLB_PG_MASK	5
205#define MIPS_CP0_TLB_WIRED	6
206#define MIPS_CP0_HWRENA		7
207#define MIPS_CP0_BAD_VADDR	8
208#define MIPS_CP0_COUNT		9
209#define MIPS_CP0_TLB_HI		10
210#define MIPS_CP0_COMPARE	11
211#define MIPS_CP0_STATUS		12
212#define MIPS_CP0_CAUSE		13
213#define MIPS_CP0_EXC_PC		14
214#define MIPS_CP0_PRID		15
215#define MIPS_CP0_CONFIG		16
216#define MIPS_CP0_LLADDR		17
217#define MIPS_CP0_WATCH_LO	18
218#define MIPS_CP0_WATCH_HI	19
219#define MIPS_CP0_TLB_XCONTEXT	20
220#define MIPS_CP0_DIAG		22
221#define MIPS_CP0_ECC		26
222#define MIPS_CP0_CACHE_ERR	27
223#define MIPS_CP0_TAG_LO		28
224#define MIPS_CP0_TAG_HI		29
225#define MIPS_CP0_ERROR_PC	30
226#define MIPS_CP0_DEBUG		23
227#define MIPS_CP0_DEPC		24
228#define MIPS_CP0_PERFCNT	25
229#define MIPS_CP0_ERRCTL		26
230#define MIPS_CP0_DATA_LO	28
231#define MIPS_CP0_DATA_HI	29
232#define MIPS_CP0_DESAVE		31
233
234#define MIPS_CP0_CONFIG_SEL	0
235#define MIPS_CP0_CONFIG1_SEL	1
236#define MIPS_CP0_CONFIG2_SEL	2
237#define MIPS_CP0_CONFIG3_SEL	3
238#define MIPS_CP0_CONFIG4_SEL	4
239#define MIPS_CP0_CONFIG5_SEL	5
240
241#define MIPS_CP0_GUESTCTL2	10
242#define MIPS_CP0_GUESTCTL2_SEL	5
243#define MIPS_CP0_GTOFFSET	12
244#define MIPS_CP0_GTOFFSET_SEL	7
245
246/* Resume Flags */
247#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
248#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
249
250#define RESUME_GUEST		0
251#define RESUME_GUEST_DR		RESUME_FLAG_DR
252#define RESUME_HOST		RESUME_FLAG_HOST
253
254enum emulation_result {
255	EMULATE_DONE,		/* no further processing */
256	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
257	EMULATE_FAIL,		/* can't emulate this instruction */
258	EMULATE_WAIT,		/* WAIT instruction */
259	EMULATE_PRIV_FAIL,
260	EMULATE_EXCEPT,		/* A guest exception has been generated */
261	EMULATE_HYPERCALL,	/* HYPCALL instruction */
262};
263
264#if defined(CONFIG_64BIT)
265#define VPN2_MASK		GENMASK(cpu_vmbits - 1, 13)
266#else
267#define VPN2_MASK		0xffffe000
268#endif
269#define KVM_ENTRYHI_ASID	cpu_asid_mask(&boot_cpu_data)
270#define TLB_IS_GLOBAL(x)	((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
271#define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
272#define TLB_ASID(x)		((x).tlb_hi & KVM_ENTRYHI_ASID)
273#define TLB_LO_IDX(x, va)	(((va) >> PAGE_SHIFT) & 1)
274#define TLB_IS_VALID(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
275#define TLB_IS_DIRTY(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
276#define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
277				 ((y) & VPN2_MASK & ~(x).tlb_mask))
278#define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
279				 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
280
281struct kvm_mips_tlb {
282	long tlb_mask;
283	long tlb_hi;
284	long tlb_lo[2];
285};
286
287#define KVM_MIPS_AUX_FPU	0x1
288#define KVM_MIPS_AUX_MSA	0x2
289
290struct kvm_vcpu_arch {
291	void *guest_ebase;
292	int (*vcpu_run)(struct kvm_vcpu *vcpu);
293
294	/* Host registers preserved across guest mode execution */
295	unsigned long host_stack;
296	unsigned long host_gp;
297	unsigned long host_pgd;
298	unsigned long host_entryhi;
299
300	/* Host CP0 registers used when handling exits from guest */
301	unsigned long host_cp0_badvaddr;
302	unsigned long host_cp0_epc;
303	u32 host_cp0_cause;
304	u32 host_cp0_guestctl0;
305	u32 host_cp0_badinstr;
306	u32 host_cp0_badinstrp;
307
308	/* GPRS */
309	unsigned long gprs[32];
310	unsigned long hi;
311	unsigned long lo;
312	unsigned long pc;
313
314	/* FPU State */
315	struct mips_fpu_struct fpu;
316	/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
317	unsigned int aux_inuse;
318
319	/* COP0 State */
320	struct mips_coproc cop0;
321
322	/* Resume PC after MMIO completion */
323	unsigned long io_pc;
324	/* GPR used as IO source/target */
325	u32 io_gpr;
326
327	struct hrtimer comparecount_timer;
328	/* Count timer control KVM register */
329	u32 count_ctl;
330	/* Count bias from the raw time */
331	u32 count_bias;
332	/* Frequency of timer in Hz */
333	u32 count_hz;
334	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
335	s64 count_dyn_bias;
336	/* Resume time */
337	ktime_t count_resume;
338	/* Period of timer tick in ns */
339	u64 count_period;
340
341	/* Bitmask of exceptions that are pending */
342	unsigned long pending_exceptions;
343
344	/* Bitmask of pending exceptions to be cleared */
345	unsigned long pending_exceptions_clr;
346
347	/* Cache some mmu pages needed inside spinlock regions */
348	struct kvm_mmu_memory_cache mmu_page_cache;
349
350	/* vcpu's vzguestid is different on each host cpu in an smp system */
351	u32 vzguestid[NR_CPUS];
352
353	/* wired guest TLB entries */
354	struct kvm_mips_tlb *wired_tlb;
355	unsigned int wired_tlb_limit;
356	unsigned int wired_tlb_used;
357
358	/* emulated guest MAAR registers */
359	unsigned long maar[6];
360
361	/* Last CPU the VCPU state was loaded on */
362	int last_sched_cpu;
363	/* Last CPU the VCPU actually executed guest code on */
364	int last_exec_cpu;
365
366	/* WAIT executed */
367	int wait;
368
369	u8 fpu_enabled;
370	u8 msa_enabled;
371};
372
373static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
374						unsigned long val)
375{
376	unsigned long temp;
377	do {
378		__asm__ __volatile__(
379		"	.set	push				\n"
380		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
381		"	"__stringify(LONG_LL)	" %0, %1	\n"
382		"	or	%0, %2				\n"
383		"	"__stringify(LONG_SC)	" %0, %1	\n"
384		"	.set	pop				\n"
385		: "=&r" (temp), "+m" (*reg)
386		: "r" (val));
387	} while (unlikely(!temp));
388}
389
390static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
391						  unsigned long val)
392{
393	unsigned long temp;
394	do {
395		__asm__ __volatile__(
396		"	.set	push				\n"
397		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398		"	"__stringify(LONG_LL)	" %0, %1	\n"
399		"	and	%0, %2				\n"
400		"	"__stringify(LONG_SC)	" %0, %1	\n"
401		"	.set	pop				\n"
402		: "=&r" (temp), "+m" (*reg)
403		: "r" (~val));
404	} while (unlikely(!temp));
405}
406
407static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
408						   unsigned long change,
409						   unsigned long val)
410{
411	unsigned long temp;
412	do {
413		__asm__ __volatile__(
414		"	.set	push				\n"
415		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
416		"	"__stringify(LONG_LL)	" %0, %1	\n"
417		"	and	%0, %2				\n"
418		"	or	%0, %3				\n"
419		"	"__stringify(LONG_SC)	" %0, %1	\n"
420		"	.set	pop				\n"
421		: "=&r" (temp), "+m" (*reg)
422		: "r" (~change), "r" (val & change));
423	} while (unlikely(!temp));
424}
425
426/* Guest register types, used in accessor build below */
427#define __KVMT32	u32
428#define __KVMTl	unsigned long
429
430/*
431 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
432 * These operate on the saved guest C0 state in RAM.
433 */
434
435/* Generate saved context simple accessors */
436#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
437static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
438{									\
439	return cop0->reg[(_reg)][(sel)];				\
440}									\
441static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0,	\
442					   __KVMT##type val)		\
443{									\
444	cop0->reg[(_reg)][(sel)] = val;					\
445}
446
447/* Generate saved context bitwise modifiers */
448#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
449static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
450					 __KVMT##type val)		\
451{									\
452	cop0->reg[(_reg)][(sel)] |= val;				\
453}									\
454static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
455					   __KVMT##type val)		\
456{									\
457	cop0->reg[(_reg)][(sel)] &= ~val;				\
458}									\
459static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
460					    __KVMT##type mask,		\
461					    __KVMT##type val)		\
462{									\
463	unsigned long _mask = mask;					\
464	cop0->reg[(_reg)][(sel)] &= ~_mask;				\
465	cop0->reg[(_reg)][(sel)] |= val & _mask;			\
466}
467
468/* Generate saved context atomic bitwise modifiers */
469#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
470static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
471					 __KVMT##type val)		\
472{									\
473	_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
474}									\
475static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
476					   __KVMT##type val)		\
477{									\
478	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
479}									\
480static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
481					    __KVMT##type mask,		\
482					    __KVMT##type val)		\
483{									\
484	_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
485					val);				\
486}
487
488/*
489 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
490 * These operate on the VZ guest C0 context in hardware.
491 */
492
493/* Generate VZ guest context simple accessors */
494#define __BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
495static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
496{									\
497	return read_gc0_##name();					\
498}									\
499static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0,	\
500					   __KVMT##type val)		\
501{									\
502	write_gc0_##name(val);						\
503}
504
505/* Generate VZ guest context bitwise modifiers */
506#define __BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
507static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0,	\
508					 __KVMT##type val)		\
509{									\
510	set_gc0_##name(val);						\
511}									\
512static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0,	\
513					   __KVMT##type val)		\
514{									\
515	clear_gc0_##name(val);						\
516}									\
517static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0,	\
518					    __KVMT##type mask,		\
519					    __KVMT##type val)		\
520{									\
521	change_gc0_##name(mask, val);					\
522}
523
524/* Generate VZ guest context save/restore to/from saved context */
525#define __BUILD_KVM_SAVE_VZ(name, _reg, sel)			\
526static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0)	\
527{									\
528	write_gc0_##name(cop0->reg[(_reg)][(sel)]);			\
529}									\
530static inline void kvm_save_gc0_##name(struct mips_coproc *cop0)	\
531{									\
532	cop0->reg[(_reg)][(sel)] = read_gc0_##name();			\
533}
534
535/*
536 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
537 * These wrap a set of operations to provide them with a different name.
538 */
539
540/* Generate simple accessor wrapper */
541#define __BUILD_KVM_RW_WRAP(name1, name2, type)				\
542static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0)	\
543{									\
544	return kvm_read_##name2(cop0);					\
545}									\
546static inline void kvm_write_##name1(struct mips_coproc *cop0,		\
547				     __KVMT##type val)			\
548{									\
549	kvm_write_##name2(cop0, val);					\
550}
551
552/* Generate bitwise modifier wrapper */
553#define __BUILD_KVM_SET_WRAP(name1, name2, type)			\
554static inline void kvm_set_##name1(struct mips_coproc *cop0,		\
555				   __KVMT##type val)			\
556{									\
557	kvm_set_##name2(cop0, val);					\
558}									\
559static inline void kvm_clear_##name1(struct mips_coproc *cop0,		\
560				     __KVMT##type val)			\
561{									\
562	kvm_clear_##name2(cop0, val);					\
563}									\
564static inline void kvm_change_##name1(struct mips_coproc *cop0,		\
565				      __KVMT##type mask,		\
566				      __KVMT##type val)			\
567{									\
568	kvm_change_##name2(cop0, mask, val);				\
569}
570
571/*
572 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
573 * These generate accessors operating on the saved context in RAM, and wrap them
574 * with the common guest C0 accessors (for use by common emulation code).
575 */
576
577#define __BUILD_KVM_RW_SW(name, type, _reg, sel)			\
578	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
579	__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
580
581#define __BUILD_KVM_SET_SW(name, type, _reg, sel)			\
582	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
583	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
584
585#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel)			\
586	__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
587	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
588
589/*
590 * VZ (hardware assisted virtualisation)
591 * These macros use the active guest state in VZ mode (hardware registers),
592 */
593
594/*
595 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
596 * These generate accessors operating on the VZ guest context in hardware, and
597 * wrap them with the common guest C0 accessors (for use by common emulation
598 * code).
599 *
600 * Accessors operating on the saved context in RAM are also generated to allow
601 * convenient explicit saving and restoring of the state.
602 */
603
604#define __BUILD_KVM_RW_HW(name, type, _reg, sel)			\
605	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
606	__BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
607	__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type)	\
608	__BUILD_KVM_SAVE_VZ(name, _reg, sel)
609
610#define __BUILD_KVM_SET_HW(name, type, _reg, sel)			\
611	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
612	__BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
613	__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
614
615/*
616 * We can't do atomic modifications of COP0 state if hardware can modify it.
617 * Races must be handled explicitly.
618 */
619#define __BUILD_KVM_ATOMIC_HW	__BUILD_KVM_SET_HW
620
621/*
622 * Define accessors for CP0 registers that are accessible to the guest. These
623 * are primarily used by common emulation code, which may need to access the
624 * registers differently depending on the implementation.
625 *
626 *    fns_hw/sw    name     type    reg num         select
627 */
628__BUILD_KVM_RW_HW(index,          32, MIPS_CP0_TLB_INDEX,    0)
629__BUILD_KVM_RW_HW(entrylo0,       l,  MIPS_CP0_TLB_LO0,      0)
630__BUILD_KVM_RW_HW(entrylo1,       l,  MIPS_CP0_TLB_LO1,      0)
631__BUILD_KVM_RW_HW(context,        l,  MIPS_CP0_TLB_CONTEXT,  0)
632__BUILD_KVM_RW_HW(contextconfig,  32, MIPS_CP0_TLB_CONTEXT,  1)
633__BUILD_KVM_RW_HW(userlocal,      l,  MIPS_CP0_TLB_CONTEXT,  2)
634__BUILD_KVM_RW_HW(xcontextconfig, l,  MIPS_CP0_TLB_CONTEXT,  3)
635__BUILD_KVM_RW_HW(pagemask,       l,  MIPS_CP0_TLB_PG_MASK,  0)
636__BUILD_KVM_RW_HW(pagegrain,      32, MIPS_CP0_TLB_PG_MASK,  1)
637__BUILD_KVM_RW_HW(segctl0,        l,  MIPS_CP0_TLB_PG_MASK,  2)
638__BUILD_KVM_RW_HW(segctl1,        l,  MIPS_CP0_TLB_PG_MASK,  3)
639__BUILD_KVM_RW_HW(segctl2,        l,  MIPS_CP0_TLB_PG_MASK,  4)
640__BUILD_KVM_RW_HW(pwbase,         l,  MIPS_CP0_TLB_PG_MASK,  5)
641__BUILD_KVM_RW_HW(pwfield,        l,  MIPS_CP0_TLB_PG_MASK,  6)
642__BUILD_KVM_RW_HW(pwsize,         l,  MIPS_CP0_TLB_PG_MASK,  7)
643__BUILD_KVM_RW_HW(wired,          32, MIPS_CP0_TLB_WIRED,    0)
644__BUILD_KVM_RW_HW(pwctl,          32, MIPS_CP0_TLB_WIRED,    6)
645__BUILD_KVM_RW_HW(hwrena,         32, MIPS_CP0_HWRENA,       0)
646__BUILD_KVM_RW_HW(badvaddr,       l,  MIPS_CP0_BAD_VADDR,    0)
647__BUILD_KVM_RW_HW(badinstr,       32, MIPS_CP0_BAD_VADDR,    1)
648__BUILD_KVM_RW_HW(badinstrp,      32, MIPS_CP0_BAD_VADDR,    2)
649__BUILD_KVM_RW_SW(count,          32, MIPS_CP0_COUNT,        0)
650__BUILD_KVM_RW_HW(entryhi,        l,  MIPS_CP0_TLB_HI,       0)
651__BUILD_KVM_RW_HW(compare,        32, MIPS_CP0_COMPARE,      0)
652__BUILD_KVM_RW_HW(status,         32, MIPS_CP0_STATUS,       0)
653__BUILD_KVM_RW_HW(intctl,         32, MIPS_CP0_STATUS,       1)
654__BUILD_KVM_RW_HW(cause,          32, MIPS_CP0_CAUSE,        0)
655__BUILD_KVM_RW_HW(epc,            l,  MIPS_CP0_EXC_PC,       0)
656__BUILD_KVM_RW_SW(prid,           32, MIPS_CP0_PRID,         0)
657__BUILD_KVM_RW_HW(ebase,          l,  MIPS_CP0_PRID,         1)
658__BUILD_KVM_RW_HW(config,         32, MIPS_CP0_CONFIG,       0)
659__BUILD_KVM_RW_HW(config1,        32, MIPS_CP0_CONFIG,       1)
660__BUILD_KVM_RW_HW(config2,        32, MIPS_CP0_CONFIG,       2)
661__BUILD_KVM_RW_HW(config3,        32, MIPS_CP0_CONFIG,       3)
662__BUILD_KVM_RW_HW(config4,        32, MIPS_CP0_CONFIG,       4)
663__BUILD_KVM_RW_HW(config5,        32, MIPS_CP0_CONFIG,       5)
664__BUILD_KVM_RW_HW(config6,        32, MIPS_CP0_CONFIG,       6)
665__BUILD_KVM_RW_HW(config7,        32, MIPS_CP0_CONFIG,       7)
666__BUILD_KVM_RW_SW(maari,          l,  MIPS_CP0_LLADDR,       2)
667__BUILD_KVM_RW_HW(xcontext,       l,  MIPS_CP0_TLB_XCONTEXT, 0)
668__BUILD_KVM_RW_HW(errorepc,       l,  MIPS_CP0_ERROR_PC,     0)
669__BUILD_KVM_RW_HW(kscratch1,      l,  MIPS_CP0_DESAVE,       2)
670__BUILD_KVM_RW_HW(kscratch2,      l,  MIPS_CP0_DESAVE,       3)
671__BUILD_KVM_RW_HW(kscratch3,      l,  MIPS_CP0_DESAVE,       4)
672__BUILD_KVM_RW_HW(kscratch4,      l,  MIPS_CP0_DESAVE,       5)
673__BUILD_KVM_RW_HW(kscratch5,      l,  MIPS_CP0_DESAVE,       6)
674__BUILD_KVM_RW_HW(kscratch6,      l,  MIPS_CP0_DESAVE,       7)
675
676/* Bitwise operations (on HW state) */
677__BUILD_KVM_SET_HW(status,        32, MIPS_CP0_STATUS,       0)
678/* Cause can be modified asynchronously from hardirq hrtimer callback */
679__BUILD_KVM_ATOMIC_HW(cause,      32, MIPS_CP0_CAUSE,        0)
680__BUILD_KVM_SET_HW(ebase,         l,  MIPS_CP0_PRID,         1)
681
682/* Bitwise operations (on saved state) */
683__BUILD_KVM_SET_SAVED(config,     32, MIPS_CP0_CONFIG,       0)
684__BUILD_KVM_SET_SAVED(config1,    32, MIPS_CP0_CONFIG,       1)
685__BUILD_KVM_SET_SAVED(config2,    32, MIPS_CP0_CONFIG,       2)
686__BUILD_KVM_SET_SAVED(config3,    32, MIPS_CP0_CONFIG,       3)
687__BUILD_KVM_SET_SAVED(config4,    32, MIPS_CP0_CONFIG,       4)
688__BUILD_KVM_SET_SAVED(config5,    32, MIPS_CP0_CONFIG,       5)
689
690/* Helpers */
691
692static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
693{
694	return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
695		vcpu->fpu_enabled;
696}
697
698static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
699{
700	return kvm_mips_guest_can_have_fpu(vcpu) &&
701		kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
702}
703
704static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
705{
706	return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
707		vcpu->msa_enabled;
708}
709
710static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
711{
712	return kvm_mips_guest_can_have_msa(vcpu) &&
713		kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
714}
715
716struct kvm_mips_callbacks {
717	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
718	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
719	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
720	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
721	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
722	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
723	int (*handle_syscall)(struct kvm_vcpu *vcpu);
724	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
725	int (*handle_break)(struct kvm_vcpu *vcpu);
726	int (*handle_trap)(struct kvm_vcpu *vcpu);
727	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
728	int (*handle_fpe)(struct kvm_vcpu *vcpu);
729	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
730	int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
731	int (*enable_virtualization_cpu)(void);
732	void (*disable_virtualization_cpu)(void);
733	int (*check_extension)(struct kvm *kvm, long ext);
734	int (*vcpu_init)(struct kvm_vcpu *vcpu);
735	void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
736	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
737	void (*prepare_flush_shadow)(struct kvm *kvm);
738	gpa_t (*gva_to_gpa)(gva_t gva);
739	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
740	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
741	void (*queue_io_int)(struct kvm_vcpu *vcpu,
742			     struct kvm_mips_interrupt *irq);
743	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
744			       struct kvm_mips_interrupt *irq);
745	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
746			   u32 cause);
747	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
748			 u32 cause);
749	unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
750	int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
751	int (*get_one_reg)(struct kvm_vcpu *vcpu,
752			   const struct kvm_one_reg *reg, s64 *v);
753	int (*set_one_reg)(struct kvm_vcpu *vcpu,
754			   const struct kvm_one_reg *reg, s64 v);
755	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
756	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
757	int (*vcpu_run)(struct kvm_vcpu *vcpu);
758	void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
759};
760extern const struct kvm_mips_callbacks * const kvm_mips_callbacks;
761int kvm_mips_emulation_init(void);
762
763/* Debug: dump vcpu state */
764int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
765
766extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
767
768/* Building of entry/exception code */
769int kvm_mips_entry_setup(void);
770void *kvm_mips_build_vcpu_run(void *addr);
771void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
772void *kvm_mips_build_exception(void *addr, void *handler);
773void *kvm_mips_build_exit(void *addr);
774
775/* FPU/MSA context management */
776void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
777void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
778void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
779void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
780void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
781void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
782void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
783void kvm_own_fpu(struct kvm_vcpu *vcpu);
784void kvm_own_msa(struct kvm_vcpu *vcpu);
785void kvm_drop_fpu(struct kvm_vcpu *vcpu);
786void kvm_lose_fpu(struct kvm_vcpu *vcpu);
787
788/* TLB handling */
789int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
790				      struct kvm_vcpu *vcpu, bool write_fault);
791
792int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
793int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
794			    unsigned long *gpa);
795void kvm_vz_local_flush_roottlb_all_guests(void);
796void kvm_vz_local_flush_guesttlb_all(void);
797void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
798			  unsigned int count);
799void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
800			  unsigned int count);
801#ifdef CONFIG_CPU_LOONGSON64
802void kvm_loongson_clear_guest_vtlb(void);
803void kvm_loongson_clear_guest_ftlb(void);
804#endif
805
806/* MMU handling */
807
808bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
810pgd_t *kvm_pgd_alloc(void);
811void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
812
 
 
813/* Emulation */
814enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
815int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
816int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
817
818/**
819 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
820 * @vcpu:	Virtual CPU.
821 *
822 * Returns:	Whether the TLBL exception was likely due to an instruction
823 *		fetch fault rather than a data load fault.
824 */
825static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
826{
827	unsigned long badvaddr = vcpu->host_cp0_badvaddr;
828	unsigned long epc = msk_isa16_mode(vcpu->pc);
829	u32 cause = vcpu->host_cp0_cause;
830
831	if (epc == badvaddr)
832		return true;
833
834	/*
835	 * Branches may be 32-bit or 16-bit instructions.
836	 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
837	 * in KVM anyway.
838	 */
839	if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
840		return true;
841
842	return false;
843}
844
845extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
846
847u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
848void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
849void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
850void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
851int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
852int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
853int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
854void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
855void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
856enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
857
858/* fairly internal functions requiring some care to use */
859int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
860ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
861int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
862			     u32 count, int min_drift);
863
864void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
865void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
866
867enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
868					     u32 cause,
869					     struct kvm_vcpu *vcpu);
870enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
871					    u32 cause,
872					    struct kvm_vcpu *vcpu);
873
874/* COP0 */
875enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
876
877/* Hypercalls (hypcall.c) */
878
879enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
880					    union mips_instruction inst);
881int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
882
883/* Misc */
884extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
885extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
886extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
887			     struct kvm_mips_interrupt *irq);
888
 
889static inline void kvm_arch_sync_events(struct kvm *kvm) {}
890static inline void kvm_arch_free_memslot(struct kvm *kvm,
891					 struct kvm_memory_slot *slot) {}
892static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
 
893static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
894static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
895
896#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
 
897
898#endif /* __MIPS_KVM_HOST_H__ */