Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: MIPS specific KVM APIs
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/bitops.h>
  13#include <linux/errno.h>
  14#include <linux/err.h>
  15#include <linux/kdebug.h>
  16#include <linux/module.h>
  17#include <linux/uaccess.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sched/signal.h>
  20#include <linux/fs.h>
  21#include <linux/memblock.h>
 
  22
  23#include <asm/fpu.h>
  24#include <asm/page.h>
  25#include <asm/cacheflush.h>
  26#include <asm/mmu_context.h>
  27#include <asm/pgalloc.h>
  28#include <asm/pgtable.h>
  29
  30#include <linux/kvm_host.h>
  31
  32#include "interrupt.h"
  33#include "commpage.h"
  34
  35#define CREATE_TRACE_POINTS
  36#include "trace.h"
  37
  38#ifndef VECTORSPACING
  39#define VECTORSPACING 0x100	/* for EI/VI mode */
  40#endif
  41
  42#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  43struct kvm_stats_debugfs_item debugfs_entries[] = {
  44	{ "wait",	  VCPU_STAT(wait_exits),	 KVM_STAT_VCPU },
  45	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
  46	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
  47	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
  48	{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  49	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
  50	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
  51	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
  52	{ "addrerr_st",	  VCPU_STAT(addrerr_st_exits),	 KVM_STAT_VCPU },
  53	{ "addrerr_ld",	  VCPU_STAT(addrerr_ld_exits),	 KVM_STAT_VCPU },
  54	{ "syscall",	  VCPU_STAT(syscall_exits),	 KVM_STAT_VCPU },
  55	{ "resvd_inst",	  VCPU_STAT(resvd_inst_exits),	 KVM_STAT_VCPU },
  56	{ "break_inst",	  VCPU_STAT(break_inst_exits),	 KVM_STAT_VCPU },
  57	{ "trap_inst",	  VCPU_STAT(trap_inst_exits),	 KVM_STAT_VCPU },
  58	{ "msa_fpe",	  VCPU_STAT(msa_fpe_exits),	 KVM_STAT_VCPU },
  59	{ "fpe",	  VCPU_STAT(fpe_exits),		 KVM_STAT_VCPU },
  60	{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
  61	{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  62#ifdef CONFIG_KVM_MIPS_VZ
  63	{ "vz_gpsi",	  VCPU_STAT(vz_gpsi_exits),	 KVM_STAT_VCPU },
  64	{ "vz_gsfc",	  VCPU_STAT(vz_gsfc_exits),	 KVM_STAT_VCPU },
  65	{ "vz_hc",	  VCPU_STAT(vz_hc_exits),	 KVM_STAT_VCPU },
  66	{ "vz_grr",	  VCPU_STAT(vz_grr_exits),	 KVM_STAT_VCPU },
  67	{ "vz_gva",	  VCPU_STAT(vz_gva_exits),	 KVM_STAT_VCPU },
  68	{ "vz_ghfc",	  VCPU_STAT(vz_ghfc_exits),	 KVM_STAT_VCPU },
  69	{ "vz_gpa",	  VCPU_STAT(vz_gpa_exits),	 KVM_STAT_VCPU },
  70	{ "vz_resvd",	  VCPU_STAT(vz_resvd_exits),	 KVM_STAT_VCPU },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71#endif
  72	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
  73	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
  74	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
  75	{ "halt_wakeup",  VCPU_STAT(halt_wakeup),	 KVM_STAT_VCPU },
  76	{NULL}
 
 
 
 
 
 
  77};
  78
  79bool kvm_trace_guest_mode_change;
  80
  81int kvm_guest_mode_change_trace_reg(void)
  82{
  83	kvm_trace_guest_mode_change = 1;
  84	return 0;
  85}
  86
  87void kvm_guest_mode_change_trace_unreg(void)
  88{
  89	kvm_trace_guest_mode_change = 0;
  90}
  91
  92/*
  93 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  94 * Config7, so we are "runnable" if interrupts are pending
  95 */
  96int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  97{
  98	return !!(vcpu->arch.pending_exceptions);
  99}
 100
 101bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 102{
 103	return false;
 104}
 105
 106int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 107{
 108	return 1;
 109}
 110
 111int kvm_arch_hardware_enable(void)
 112{
 113	return kvm_mips_callbacks->hardware_enable();
 114}
 115
 116void kvm_arch_hardware_disable(void)
 117{
 118	kvm_mips_callbacks->hardware_disable();
 119}
 120
 121int kvm_arch_hardware_setup(void)
 122{
 123	return 0;
 124}
 125
 126int kvm_arch_check_processor_compat(void)
 127{
 128	return 0;
 129}
 130
 
 
 131int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 132{
 133	switch (type) {
 134#ifdef CONFIG_KVM_MIPS_VZ
 
 135	case KVM_VM_MIPS_VZ:
 136#else
 137	case KVM_VM_MIPS_TE:
 138#endif
 139		break;
 140	default:
 141		/* Unsupported KVM type */
 142		return -EINVAL;
 143	};
 144
 145	/* Allocate page table to map GPA -> RPA */
 146	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
 147	if (!kvm->arch.gpa_mm.pgd)
 148		return -ENOMEM;
 149
 
 
 
 
 150	return 0;
 151}
 152
 153void kvm_mips_free_vcpus(struct kvm *kvm)
 154{
 155	unsigned int i;
 156	struct kvm_vcpu *vcpu;
 157
 158	kvm_for_each_vcpu(i, vcpu, kvm) {
 159		kvm_arch_vcpu_free(vcpu);
 160	}
 161
 162	mutex_lock(&kvm->lock);
 163
 164	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 165		kvm->vcpus[i] = NULL;
 166
 167	atomic_set(&kvm->online_vcpus, 0);
 168
 169	mutex_unlock(&kvm->lock);
 170}
 171
 172static void kvm_mips_free_gpa_pt(struct kvm *kvm)
 173{
 174	/* It should always be safe to remove after flushing the whole range */
 175	WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
 176	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
 177}
 178
 179void kvm_arch_destroy_vm(struct kvm *kvm)
 180{
 181	kvm_mips_free_vcpus(kvm);
 182	kvm_mips_free_gpa_pt(kvm);
 183}
 184
 185long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
 186			unsigned long arg)
 187{
 188	return -ENOIOCTLCMD;
 189}
 190
 191int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 192			    unsigned long npages)
 193{
 194	return 0;
 195}
 196
 197void kvm_arch_flush_shadow_all(struct kvm *kvm)
 198{
 199	/* Flush whole GPA */
 200	kvm_mips_flush_gpa_pt(kvm, 0, ~0);
 201
 202	/* Let implementation do the rest */
 203	kvm_mips_callbacks->flush_shadow_all(kvm);
 204}
 205
 206void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 207				   struct kvm_memory_slot *slot)
 208{
 209	/*
 210	 * The slot has been made invalid (ready for moving or deletion), so we
 211	 * need to ensure that it can no longer be accessed by any guest VCPUs.
 212	 */
 213
 214	spin_lock(&kvm->mmu_lock);
 215	/* Flush slot from GPA */
 216	kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
 217			      slot->base_gfn + slot->npages - 1);
 218	/* Let implementation do the rest */
 219	kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
 220	spin_unlock(&kvm->mmu_lock);
 221}
 222
 223int kvm_arch_prepare_memory_region(struct kvm *kvm,
 224				   struct kvm_memory_slot *memslot,
 225				   const struct kvm_userspace_memory_region *mem,
 226				   enum kvm_mr_change change)
 227{
 228	return 0;
 229}
 230
 231void kvm_arch_commit_memory_region(struct kvm *kvm,
 232				   const struct kvm_userspace_memory_region *mem,
 233				   const struct kvm_memory_slot *old,
 234				   const struct kvm_memory_slot *new,
 235				   enum kvm_mr_change change)
 236{
 237	int needs_flush;
 238
 239	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
 240		  __func__, kvm, mem->slot, mem->guest_phys_addr,
 241		  mem->memory_size, mem->userspace_addr);
 242
 243	/*
 244	 * If dirty page logging is enabled, write protect all pages in the slot
 245	 * ready for dirty logging.
 246	 *
 247	 * There is no need to do this in any of the following cases:
 248	 * CREATE:	No dirty mappings will already exist.
 249	 * MOVE/DELETE:	The old mappings will already have been cleaned up by
 250	 *		kvm_arch_flush_shadow_memslot()
 251	 */
 252	if (change == KVM_MR_FLAGS_ONLY &&
 253	    (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
 254	     new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
 255		spin_lock(&kvm->mmu_lock);
 256		/* Write protect GPA page table entries */
 257		needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
 258					new->base_gfn + new->npages - 1);
 259		/* Let implementation do the rest */
 260		if (needs_flush)
 261			kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
 262		spin_unlock(&kvm->mmu_lock);
 263	}
 264}
 265
 266static inline void dump_handler(const char *symbol, void *start, void *end)
 267{
 268	u32 *p;
 269
 270	pr_debug("LEAF(%s)\n", symbol);
 271
 272	pr_debug("\t.set push\n");
 273	pr_debug("\t.set noreorder\n");
 274
 275	for (p = start; p < (u32 *)end; ++p)
 276		pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
 277
 278	pr_debug("\t.set\tpop\n");
 279
 280	pr_debug("\tEND(%s)\n", symbol);
 281}
 282
 283struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284{
 285	int err, size;
 286	void *gebase, *p, *handler, *refill_start, *refill_end;
 287	int i;
 288
 289	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
 290
 291	if (!vcpu) {
 292		err = -ENOMEM;
 293		goto out;
 294	}
 295
 296	err = kvm_vcpu_init(vcpu, kvm, id);
 297
 
 298	if (err)
 299		goto out_free_cpu;
 300
 301	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
 
 
 302
 303	/*
 304	 * Allocate space for host mode exception handlers that handle
 305	 * guest mode exits
 306	 */
 307	if (cpu_has_veic || cpu_has_vint)
 308		size = 0x200 + VECTORSPACING * 64;
 309	else
 310		size = 0x4000;
 311
 312	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
 313
 314	if (!gebase) {
 315		err = -ENOMEM;
 316		goto out_uninit_cpu;
 317	}
 318	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 319		  ALIGN(size, PAGE_SIZE), gebase);
 320
 321	/*
 322	 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
 323	 * limits us to the low 512MB of physical address space. If the memory
 324	 * we allocate is out of range, just give up now.
 325	 */
 326	if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
 327		kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
 328			gebase);
 329		err = -ENOMEM;
 330		goto out_free_gebase;
 331	}
 332
 333	/* Save new ebase */
 334	vcpu->arch.guest_ebase = gebase;
 335
 336	/* Build guest exception vectors dynamically in unmapped memory */
 337	handler = gebase + 0x2000;
 338
 339	/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
 340	refill_start = gebase;
 341	if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
 342		refill_start += 0x080;
 343	refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
 344
 345	/* General Exception Entry point */
 346	kvm_mips_build_exception(gebase + 0x180, handler);
 347
 348	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
 349	for (i = 0; i < 8; i++) {
 350		kvm_debug("L1 Vectored handler @ %p\n",
 351			  gebase + 0x200 + (i * VECTORSPACING));
 352		kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
 353					 handler);
 354	}
 355
 356	/* General exit handler */
 357	p = handler;
 358	p = kvm_mips_build_exit(p);
 359
 360	/* Guest entry routine */
 361	vcpu->arch.vcpu_run = p;
 362	p = kvm_mips_build_vcpu_run(p);
 363
 364	/* Dump the generated code */
 365	pr_debug("#include <asm/asm.h>\n");
 366	pr_debug("#include <asm/regdef.h>\n");
 367	pr_debug("\n");
 368	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
 369	dump_handler("kvm_tlb_refill", refill_start, refill_end);
 370	dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
 371	dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
 372
 373	/* Invalidate the icache for these ranges */
 374	flush_icache_range((unsigned long)gebase,
 375			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 376
 377	/*
 378	 * Allocate comm page for guest kernel, a TLB will be reserved for
 379	 * mapping GVA @ 0xFFFF8000 to this page
 380	 */
 381	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
 382
 383	if (!vcpu->arch.kseg0_commpage) {
 384		err = -ENOMEM;
 385		goto out_free_gebase;
 386	}
 387
 388	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
 389	kvm_mips_commpage_init(vcpu);
 390
 391	/* Init */
 392	vcpu->arch.last_sched_cpu = -1;
 393	vcpu->arch.last_exec_cpu = -1;
 394
 395	return vcpu;
 
 
 
 
 
 396
 397out_free_gebase:
 398	kfree(gebase);
 399
 400out_uninit_cpu:
 401	kvm_vcpu_uninit(vcpu);
 402
 403out_free_cpu:
 404	kfree(vcpu);
 405
 406out:
 407	return ERR_PTR(err);
 408}
 409
 410void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 411{
 412	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 413
 414	kvm_vcpu_uninit(vcpu);
 415
 416	kvm_mips_dump_stats(vcpu);
 417
 418	kvm_mmu_free_memory_caches(vcpu);
 419	kfree(vcpu->arch.guest_ebase);
 420	kfree(vcpu->arch.kseg0_commpage);
 421	kfree(vcpu);
 422}
 423
 424void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 425{
 426	kvm_arch_vcpu_free(vcpu);
 427}
 428
 429int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 430					struct kvm_guest_debug *dbg)
 431{
 432	return -ENOIOCTLCMD;
 433}
 434
 435int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 436{
 437	int r = -EINTR;
 438
 439	vcpu_load(vcpu);
 440
 441	kvm_sigset_activate(vcpu);
 442
 443	if (vcpu->mmio_needed) {
 444		if (!vcpu->mmio_is_write)
 445			kvm_mips_complete_mmio_load(vcpu, run);
 446		vcpu->mmio_needed = 0;
 447	}
 448
 449	if (run->immediate_exit)
 450		goto out;
 451
 452	lose_fpu(1);
 453
 454	local_irq_disable();
 455	guest_enter_irqoff();
 456	trace_kvm_enter(vcpu);
 457
 458	/*
 459	 * Make sure the read of VCPU requests in vcpu_run() callback is not
 460	 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
 461	 * flush request while the requester sees the VCPU as outside of guest
 462	 * mode and not needing an IPI.
 463	 */
 464	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 465
 466	r = kvm_mips_callbacks->vcpu_run(run, vcpu);
 467
 468	trace_kvm_out(vcpu);
 469	guest_exit_irqoff();
 470	local_irq_enable();
 471
 472out:
 473	kvm_sigset_deactivate(vcpu);
 474
 475	vcpu_put(vcpu);
 476	return r;
 477}
 478
 479int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 480			     struct kvm_mips_interrupt *irq)
 481{
 482	int intr = (int)irq->irq;
 483	struct kvm_vcpu *dvcpu = NULL;
 484
 485	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
 
 
 
 486		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
 487			  (int)intr);
 488
 489	if (irq->cpu == -1)
 490		dvcpu = vcpu;
 491	else
 492		dvcpu = vcpu->kvm->vcpus[irq->cpu];
 493
 494	if (intr == 2 || intr == 3 || intr == 4) {
 495		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
 496
 497	} else if (intr == -2 || intr == -3 || intr == -4) {
 498		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
 499	} else {
 500		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
 501			irq->cpu, irq->irq);
 502		return -EINVAL;
 503	}
 504
 505	dvcpu->arch.wait = 0;
 506
 507	if (swq_has_sleeper(&dvcpu->wq))
 508		swake_up_one(&dvcpu->wq);
 509
 510	return 0;
 511}
 512
 513int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 514				    struct kvm_mp_state *mp_state)
 515{
 516	return -ENOIOCTLCMD;
 517}
 518
 519int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 520				    struct kvm_mp_state *mp_state)
 521{
 522	return -ENOIOCTLCMD;
 523}
 524
 525static u64 kvm_mips_get_one_regs[] = {
 526	KVM_REG_MIPS_R0,
 527	KVM_REG_MIPS_R1,
 528	KVM_REG_MIPS_R2,
 529	KVM_REG_MIPS_R3,
 530	KVM_REG_MIPS_R4,
 531	KVM_REG_MIPS_R5,
 532	KVM_REG_MIPS_R6,
 533	KVM_REG_MIPS_R7,
 534	KVM_REG_MIPS_R8,
 535	KVM_REG_MIPS_R9,
 536	KVM_REG_MIPS_R10,
 537	KVM_REG_MIPS_R11,
 538	KVM_REG_MIPS_R12,
 539	KVM_REG_MIPS_R13,
 540	KVM_REG_MIPS_R14,
 541	KVM_REG_MIPS_R15,
 542	KVM_REG_MIPS_R16,
 543	KVM_REG_MIPS_R17,
 544	KVM_REG_MIPS_R18,
 545	KVM_REG_MIPS_R19,
 546	KVM_REG_MIPS_R20,
 547	KVM_REG_MIPS_R21,
 548	KVM_REG_MIPS_R22,
 549	KVM_REG_MIPS_R23,
 550	KVM_REG_MIPS_R24,
 551	KVM_REG_MIPS_R25,
 552	KVM_REG_MIPS_R26,
 553	KVM_REG_MIPS_R27,
 554	KVM_REG_MIPS_R28,
 555	KVM_REG_MIPS_R29,
 556	KVM_REG_MIPS_R30,
 557	KVM_REG_MIPS_R31,
 558
 559#ifndef CONFIG_CPU_MIPSR6
 560	KVM_REG_MIPS_HI,
 561	KVM_REG_MIPS_LO,
 562#endif
 563	KVM_REG_MIPS_PC,
 564};
 565
 566static u64 kvm_mips_get_one_regs_fpu[] = {
 567	KVM_REG_MIPS_FCR_IR,
 568	KVM_REG_MIPS_FCR_CSR,
 569};
 570
 571static u64 kvm_mips_get_one_regs_msa[] = {
 572	KVM_REG_MIPS_MSA_IR,
 573	KVM_REG_MIPS_MSA_CSR,
 574};
 575
 576static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
 577{
 578	unsigned long ret;
 579
 580	ret = ARRAY_SIZE(kvm_mips_get_one_regs);
 581	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 582		ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
 583		/* odd doubles */
 584		if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
 585			ret += 16;
 586	}
 587	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 588		ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
 589	ret += kvm_mips_callbacks->num_regs(vcpu);
 590
 591	return ret;
 592}
 593
 594static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
 595{
 596	u64 index;
 597	unsigned int i;
 598
 599	if (copy_to_user(indices, kvm_mips_get_one_regs,
 600			 sizeof(kvm_mips_get_one_regs)))
 601		return -EFAULT;
 602	indices += ARRAY_SIZE(kvm_mips_get_one_regs);
 603
 604	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 605		if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
 606				 sizeof(kvm_mips_get_one_regs_fpu)))
 607			return -EFAULT;
 608		indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
 609
 610		for (i = 0; i < 32; ++i) {
 611			index = KVM_REG_MIPS_FPR_32(i);
 612			if (copy_to_user(indices, &index, sizeof(index)))
 613				return -EFAULT;
 614			++indices;
 615
 616			/* skip odd doubles if no F64 */
 617			if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
 618				continue;
 619
 620			index = KVM_REG_MIPS_FPR_64(i);
 621			if (copy_to_user(indices, &index, sizeof(index)))
 622				return -EFAULT;
 623			++indices;
 624		}
 625	}
 626
 627	if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
 628		if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
 629				 sizeof(kvm_mips_get_one_regs_msa)))
 630			return -EFAULT;
 631		indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
 632
 633		for (i = 0; i < 32; ++i) {
 634			index = KVM_REG_MIPS_VEC_128(i);
 635			if (copy_to_user(indices, &index, sizeof(index)))
 636				return -EFAULT;
 637			++indices;
 638		}
 639	}
 640
 641	return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
 642}
 643
 644static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
 645			    const struct kvm_one_reg *reg)
 646{
 647	struct mips_coproc *cop0 = vcpu->arch.cop0;
 648	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 649	int ret;
 650	s64 v;
 651	s64 vs[2];
 652	unsigned int idx;
 653
 654	switch (reg->id) {
 655	/* General purpose registers */
 656	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
 657		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
 658		break;
 659#ifndef CONFIG_CPU_MIPSR6
 660	case KVM_REG_MIPS_HI:
 661		v = (long)vcpu->arch.hi;
 662		break;
 663	case KVM_REG_MIPS_LO:
 664		v = (long)vcpu->arch.lo;
 665		break;
 666#endif
 667	case KVM_REG_MIPS_PC:
 668		v = (long)vcpu->arch.pc;
 669		break;
 670
 671	/* Floating point registers */
 672	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 673		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 674			return -EINVAL;
 675		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 676		/* Odd singles in top of even double when FR=0 */
 677		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 678			v = get_fpr32(&fpu->fpr[idx], 0);
 679		else
 680			v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
 681		break;
 682	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 683		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 684			return -EINVAL;
 685		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 686		/* Can't access odd doubles in FR=0 mode */
 687		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 688			return -EINVAL;
 689		v = get_fpr64(&fpu->fpr[idx], 0);
 690		break;
 691	case KVM_REG_MIPS_FCR_IR:
 692		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 693			return -EINVAL;
 694		v = boot_cpu_data.fpu_id;
 695		break;
 696	case KVM_REG_MIPS_FCR_CSR:
 697		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 698			return -EINVAL;
 699		v = fpu->fcr31;
 700		break;
 701
 702	/* MIPS SIMD Architecture (MSA) registers */
 703	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 704		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 705			return -EINVAL;
 706		/* Can't access MSA registers in FR=0 mode */
 707		if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
 708			return -EINVAL;
 709		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 710#ifdef CONFIG_CPU_LITTLE_ENDIAN
 711		/* least significant byte first */
 712		vs[0] = get_fpr64(&fpu->fpr[idx], 0);
 713		vs[1] = get_fpr64(&fpu->fpr[idx], 1);
 714#else
 715		/* most significant byte first */
 716		vs[0] = get_fpr64(&fpu->fpr[idx], 1);
 717		vs[1] = get_fpr64(&fpu->fpr[idx], 0);
 718#endif
 719		break;
 720	case KVM_REG_MIPS_MSA_IR:
 721		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 722			return -EINVAL;
 723		v = boot_cpu_data.msa_id;
 724		break;
 725	case KVM_REG_MIPS_MSA_CSR:
 726		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 727			return -EINVAL;
 728		v = fpu->msacsr;
 729		break;
 730
 731	/* registers to be handled specially */
 732	default:
 733		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
 734		if (ret)
 735			return ret;
 736		break;
 737	}
 738	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 739		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 740
 741		return put_user(v, uaddr64);
 742	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 743		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 744		u32 v32 = (u32)v;
 745
 746		return put_user(v32, uaddr32);
 747	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 748		void __user *uaddr = (void __user *)(long)reg->addr;
 749
 750		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
 751	} else {
 752		return -EINVAL;
 753	}
 754}
 755
 756static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
 757			    const struct kvm_one_reg *reg)
 758{
 759	struct mips_coproc *cop0 = vcpu->arch.cop0;
 760	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 761	s64 v;
 762	s64 vs[2];
 763	unsigned int idx;
 764
 765	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 766		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 767
 768		if (get_user(v, uaddr64) != 0)
 769			return -EFAULT;
 770	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 771		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 772		s32 v32;
 773
 774		if (get_user(v32, uaddr32) != 0)
 775			return -EFAULT;
 776		v = (s64)v32;
 777	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 778		void __user *uaddr = (void __user *)(long)reg->addr;
 779
 780		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
 781	} else {
 782		return -EINVAL;
 783	}
 784
 785	switch (reg->id) {
 786	/* General purpose registers */
 787	case KVM_REG_MIPS_R0:
 788		/* Silently ignore requests to set $0 */
 789		break;
 790	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
 791		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
 792		break;
 793#ifndef CONFIG_CPU_MIPSR6
 794	case KVM_REG_MIPS_HI:
 795		vcpu->arch.hi = v;
 796		break;
 797	case KVM_REG_MIPS_LO:
 798		vcpu->arch.lo = v;
 799		break;
 800#endif
 801	case KVM_REG_MIPS_PC:
 802		vcpu->arch.pc = v;
 803		break;
 804
 805	/* Floating point registers */
 806	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 807		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 808			return -EINVAL;
 809		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 810		/* Odd singles in top of even double when FR=0 */
 811		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 812			set_fpr32(&fpu->fpr[idx], 0, v);
 813		else
 814			set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
 815		break;
 816	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 817		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 818			return -EINVAL;
 819		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 820		/* Can't access odd doubles in FR=0 mode */
 821		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 822			return -EINVAL;
 823		set_fpr64(&fpu->fpr[idx], 0, v);
 824		break;
 825	case KVM_REG_MIPS_FCR_IR:
 826		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 827			return -EINVAL;
 828		/* Read-only */
 829		break;
 830	case KVM_REG_MIPS_FCR_CSR:
 831		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 832			return -EINVAL;
 833		fpu->fcr31 = v;
 834		break;
 835
 836	/* MIPS SIMD Architecture (MSA) registers */
 837	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 838		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 839			return -EINVAL;
 840		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 841#ifdef CONFIG_CPU_LITTLE_ENDIAN
 842		/* least significant byte first */
 843		set_fpr64(&fpu->fpr[idx], 0, vs[0]);
 844		set_fpr64(&fpu->fpr[idx], 1, vs[1]);
 845#else
 846		/* most significant byte first */
 847		set_fpr64(&fpu->fpr[idx], 1, vs[0]);
 848		set_fpr64(&fpu->fpr[idx], 0, vs[1]);
 849#endif
 850		break;
 851	case KVM_REG_MIPS_MSA_IR:
 852		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 853			return -EINVAL;
 854		/* Read-only */
 855		break;
 856	case KVM_REG_MIPS_MSA_CSR:
 857		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 858			return -EINVAL;
 859		fpu->msacsr = v;
 860		break;
 861
 862	/* registers to be handled specially */
 863	default:
 864		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
 865	}
 866	return 0;
 867}
 868
 869static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 870				     struct kvm_enable_cap *cap)
 871{
 872	int r = 0;
 873
 874	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
 875		return -EINVAL;
 876	if (cap->flags)
 877		return -EINVAL;
 878	if (cap->args[0])
 879		return -EINVAL;
 880
 881	switch (cap->cap) {
 882	case KVM_CAP_MIPS_FPU:
 883		vcpu->arch.fpu_enabled = true;
 884		break;
 885	case KVM_CAP_MIPS_MSA:
 886		vcpu->arch.msa_enabled = true;
 887		break;
 888	default:
 889		r = -EINVAL;
 890		break;
 891	}
 892
 893	return r;
 894}
 895
 896long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
 897			       unsigned long arg)
 898{
 899	struct kvm_vcpu *vcpu = filp->private_data;
 900	void __user *argp = (void __user *)arg;
 901
 902	if (ioctl == KVM_INTERRUPT) {
 903		struct kvm_mips_interrupt irq;
 904
 905		if (copy_from_user(&irq, argp, sizeof(irq)))
 906			return -EFAULT;
 907		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 908			  irq.irq);
 909
 910		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 911	}
 912
 913	return -ENOIOCTLCMD;
 914}
 915
 916long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
 917			 unsigned long arg)
 918{
 919	struct kvm_vcpu *vcpu = filp->private_data;
 920	void __user *argp = (void __user *)arg;
 921	long r;
 922
 923	vcpu_load(vcpu);
 924
 925	switch (ioctl) {
 926	case KVM_SET_ONE_REG:
 927	case KVM_GET_ONE_REG: {
 928		struct kvm_one_reg reg;
 929
 930		r = -EFAULT;
 931		if (copy_from_user(&reg, argp, sizeof(reg)))
 932			break;
 933		if (ioctl == KVM_SET_ONE_REG)
 934			r = kvm_mips_set_reg(vcpu, &reg);
 935		else
 936			r = kvm_mips_get_reg(vcpu, &reg);
 937		break;
 938	}
 939	case KVM_GET_REG_LIST: {
 940		struct kvm_reg_list __user *user_list = argp;
 941		struct kvm_reg_list reg_list;
 942		unsigned n;
 943
 944		r = -EFAULT;
 945		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 946			break;
 947		n = reg_list.n;
 948		reg_list.n = kvm_mips_num_regs(vcpu);
 949		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 950			break;
 951		r = -E2BIG;
 952		if (n < reg_list.n)
 953			break;
 954		r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
 955		break;
 956	}
 957	case KVM_ENABLE_CAP: {
 958		struct kvm_enable_cap cap;
 959
 960		r = -EFAULT;
 961		if (copy_from_user(&cap, argp, sizeof(cap)))
 962			break;
 963		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
 964		break;
 965	}
 966	default:
 967		r = -ENOIOCTLCMD;
 968	}
 969
 970	vcpu_put(vcpu);
 971	return r;
 972}
 973
 974/**
 975 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 976 * @kvm: kvm instance
 977 * @log: slot id and address to which we copy the log
 978 *
 979 * Steps 1-4 below provide general overview of dirty page logging. See
 980 * kvm_get_dirty_log_protect() function description for additional details.
 981 *
 982 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 983 * always flush the TLB (step 4) even if previous step failed  and the dirty
 984 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 985 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 986 * writes will be marked dirty for next log read.
 987 *
 988 *   1. Take a snapshot of the bit and clear it if needed.
 989 *   2. Write protect the corresponding page.
 990 *   3. Copy the snapshot to the userspace.
 991 *   4. Flush TLB's if needed.
 992 */
 993int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 994{
 995	struct kvm_memslots *slots;
 996	struct kvm_memory_slot *memslot;
 997	bool flush = false;
 998	int r;
 999
1000	mutex_lock(&kvm->slots_lock);
1001
1002	r = kvm_get_dirty_log_protect(kvm, log, &flush);
1003
1004	if (flush) {
1005		slots = kvm_memslots(kvm);
1006		memslot = id_to_memslot(slots, log->slot);
1007
1008		/* Let implementation handle TLB/GVA invalidation */
1009		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1010	}
1011
1012	mutex_unlock(&kvm->slots_lock);
1013	return r;
1014}
1015
1016int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
1017{
1018	struct kvm_memslots *slots;
1019	struct kvm_memory_slot *memslot;
1020	bool flush = false;
1021	int r;
1022
1023	mutex_lock(&kvm->slots_lock);
1024
1025	r = kvm_clear_dirty_log_protect(kvm, log, &flush);
1026
1027	if (flush) {
1028		slots = kvm_memslots(kvm);
1029		memslot = id_to_memslot(slots, log->slot);
1030
1031		/* Let implementation handle TLB/GVA invalidation */
1032		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1033	}
1034
1035	mutex_unlock(&kvm->slots_lock);
1036	return r;
 
 
1037}
1038
1039long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1040{
1041	long r;
1042
1043	switch (ioctl) {
1044	default:
1045		r = -ENOIOCTLCMD;
1046	}
1047
1048	return r;
1049}
1050
1051int kvm_arch_init(void *opaque)
1052{
1053	if (kvm_mips_callbacks) {
1054		kvm_err("kvm: module already exists\n");
1055		return -EEXIST;
1056	}
1057
1058	return kvm_mips_emulation_init(&kvm_mips_callbacks);
1059}
1060
1061void kvm_arch_exit(void)
1062{
1063	kvm_mips_callbacks = NULL;
1064}
1065
1066int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1067				  struct kvm_sregs *sregs)
1068{
1069	return -ENOIOCTLCMD;
1070}
1071
1072int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1073				  struct kvm_sregs *sregs)
1074{
1075	return -ENOIOCTLCMD;
1076}
1077
1078void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1079{
1080}
1081
1082int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1083{
1084	return -ENOIOCTLCMD;
1085}
1086
1087int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1088{
1089	return -ENOIOCTLCMD;
1090}
1091
1092vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1093{
1094	return VM_FAULT_SIGBUS;
1095}
1096
1097int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1098{
1099	int r;
1100
1101	switch (ext) {
1102	case KVM_CAP_ONE_REG:
1103	case KVM_CAP_ENABLE_CAP:
1104	case KVM_CAP_READONLY_MEM:
1105	case KVM_CAP_SYNC_MMU:
1106	case KVM_CAP_IMMEDIATE_EXIT:
1107		r = 1;
1108		break;
1109	case KVM_CAP_NR_VCPUS:
1110		r = num_online_cpus();
1111		break;
1112	case KVM_CAP_MAX_VCPUS:
1113		r = KVM_MAX_VCPUS;
1114		break;
1115	case KVM_CAP_MAX_VCPU_ID:
1116		r = KVM_MAX_VCPU_ID;
1117		break;
1118	case KVM_CAP_MIPS_FPU:
1119		/* We don't handle systems with inconsistent cpu_has_fpu */
1120		r = !!raw_cpu_has_fpu;
1121		break;
1122	case KVM_CAP_MIPS_MSA:
1123		/*
1124		 * We don't support MSA vector partitioning yet:
1125		 * 1) It would require explicit support which can't be tested
1126		 *    yet due to lack of support in current hardware.
1127		 * 2) It extends the state that would need to be saved/restored
1128		 *    by e.g. QEMU for migration.
1129		 *
1130		 * When vector partitioning hardware becomes available, support
1131		 * could be added by requiring a flag when enabling
1132		 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1133		 * to save/restore the appropriate extra state.
1134		 */
1135		r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1136		break;
1137	default:
1138		r = kvm_mips_callbacks->check_extension(kvm, ext);
1139		break;
1140	}
1141	return r;
1142}
1143
1144int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1145{
1146	return kvm_mips_pending_timer(vcpu) ||
1147		kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1148}
1149
1150int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1151{
1152	int i;
1153	struct mips_coproc *cop0;
1154
1155	if (!vcpu)
1156		return -1;
1157
1158	kvm_debug("VCPU Register Dump:\n");
1159	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1160	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1161
1162	for (i = 0; i < 32; i += 4) {
1163		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1164		       vcpu->arch.gprs[i],
1165		       vcpu->arch.gprs[i + 1],
1166		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1167	}
1168	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1169	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1170
1171	cop0 = vcpu->arch.cop0;
1172	kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1173		  kvm_read_c0_guest_status(cop0),
1174		  kvm_read_c0_guest_cause(cop0));
1175
1176	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1177
1178	return 0;
1179}
1180
1181int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1182{
1183	int i;
1184
1185	vcpu_load(vcpu);
1186
1187	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1188		vcpu->arch.gprs[i] = regs->gpr[i];
1189	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1190	vcpu->arch.hi = regs->hi;
1191	vcpu->arch.lo = regs->lo;
1192	vcpu->arch.pc = regs->pc;
1193
1194	vcpu_put(vcpu);
1195	return 0;
1196}
1197
1198int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1199{
1200	int i;
1201
1202	vcpu_load(vcpu);
1203
1204	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1205		regs->gpr[i] = vcpu->arch.gprs[i];
1206
1207	regs->hi = vcpu->arch.hi;
1208	regs->lo = vcpu->arch.lo;
1209	regs->pc = vcpu->arch.pc;
1210
1211	vcpu_put(vcpu);
1212	return 0;
1213}
1214
1215static void kvm_mips_comparecount_func(unsigned long data)
1216{
1217	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1218
1219	kvm_mips_callbacks->queue_timer_int(vcpu);
1220
1221	vcpu->arch.wait = 0;
1222	if (swq_has_sleeper(&vcpu->wq))
1223		swake_up_one(&vcpu->wq);
1224}
1225
1226/* low level hrtimer wake routine */
1227static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1228{
1229	struct kvm_vcpu *vcpu;
1230
1231	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1232	kvm_mips_comparecount_func((unsigned long) vcpu);
1233	return kvm_mips_count_timeout(vcpu);
1234}
1235
1236int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1237{
1238	int err;
1239
1240	err = kvm_mips_callbacks->vcpu_init(vcpu);
1241	if (err)
1242		return err;
1243
1244	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1245		     HRTIMER_MODE_REL);
1246	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1247	return 0;
1248}
1249
1250void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1251{
1252	kvm_mips_callbacks->vcpu_uninit(vcpu);
1253}
1254
1255int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1256				  struct kvm_translation *tr)
1257{
1258	return 0;
1259}
1260
1261/* Initial guest state */
1262int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1263{
1264	return kvm_mips_callbacks->vcpu_setup(vcpu);
1265}
1266
1267static void kvm_mips_set_c0_status(void)
1268{
1269	u32 status = read_c0_status();
1270
1271	if (cpu_has_dsp)
1272		status |= (ST0_MX);
1273
1274	write_c0_status(status);
1275	ehb();
1276}
1277
1278/*
1279 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1280 */
1281int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1282{
 
1283	u32 cause = vcpu->arch.host_cp0_cause;
1284	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1285	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1286	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1287	enum emulation_result er = EMULATE_DONE;
1288	u32 inst;
1289	int ret = RESUME_GUEST;
1290
1291	vcpu->mode = OUTSIDE_GUEST_MODE;
1292
1293	/* re-enable HTW before enabling interrupts */
1294	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1295		htw_start();
1296
1297	/* Set a default exit reason */
1298	run->exit_reason = KVM_EXIT_UNKNOWN;
1299	run->ready_for_interrupt_injection = 1;
1300
1301	/*
1302	 * Set the appropriate status bits based on host CPU features,
1303	 * before we hit the scheduler
1304	 */
1305	kvm_mips_set_c0_status();
1306
1307	local_irq_enable();
1308
1309	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1310			cause, opc, run, vcpu);
1311	trace_kvm_exit(vcpu, exccode);
1312
1313	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1314		/*
1315		 * Do a privilege check, if in UM most of these exit conditions
1316		 * end up causing an exception to be delivered to the Guest
1317		 * Kernel
1318		 */
1319		er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1320		if (er == EMULATE_PRIV_FAIL) {
1321			goto skip_emul;
1322		} else if (er == EMULATE_FAIL) {
1323			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1324			ret = RESUME_HOST;
1325			goto skip_emul;
1326		}
1327	}
1328
1329	switch (exccode) {
1330	case EXCCODE_INT:
1331		kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1332
1333		++vcpu->stat.int_exits;
1334
1335		if (need_resched())
1336			cond_resched();
1337
1338		ret = RESUME_GUEST;
1339		break;
1340
1341	case EXCCODE_CPU:
1342		kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1343
1344		++vcpu->stat.cop_unusable_exits;
1345		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1346		/* XXXKYMA: Might need to return to user space */
1347		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1348			ret = RESUME_HOST;
1349		break;
1350
1351	case EXCCODE_MOD:
1352		++vcpu->stat.tlbmod_exits;
1353		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1354		break;
1355
1356	case EXCCODE_TLBS:
1357		kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1358			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1359			  badvaddr);
1360
1361		++vcpu->stat.tlbmiss_st_exits;
1362		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1363		break;
1364
1365	case EXCCODE_TLBL:
1366		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1367			  cause, opc, badvaddr);
1368
1369		++vcpu->stat.tlbmiss_ld_exits;
1370		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1371		break;
1372
1373	case EXCCODE_ADES:
1374		++vcpu->stat.addrerr_st_exits;
1375		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1376		break;
1377
1378	case EXCCODE_ADEL:
1379		++vcpu->stat.addrerr_ld_exits;
1380		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1381		break;
1382
1383	case EXCCODE_SYS:
1384		++vcpu->stat.syscall_exits;
1385		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1386		break;
1387
1388	case EXCCODE_RI:
1389		++vcpu->stat.resvd_inst_exits;
1390		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1391		break;
1392
1393	case EXCCODE_BP:
1394		++vcpu->stat.break_inst_exits;
1395		ret = kvm_mips_callbacks->handle_break(vcpu);
1396		break;
1397
1398	case EXCCODE_TR:
1399		++vcpu->stat.trap_inst_exits;
1400		ret = kvm_mips_callbacks->handle_trap(vcpu);
1401		break;
1402
1403	case EXCCODE_MSAFPE:
1404		++vcpu->stat.msa_fpe_exits;
1405		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1406		break;
1407
1408	case EXCCODE_FPE:
1409		++vcpu->stat.fpe_exits;
1410		ret = kvm_mips_callbacks->handle_fpe(vcpu);
1411		break;
1412
1413	case EXCCODE_MSADIS:
1414		++vcpu->stat.msa_disabled_exits;
1415		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1416		break;
1417
1418	case EXCCODE_GE:
1419		/* defer exit accounting to handler */
1420		ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1421		break;
1422
1423	default:
1424		if (cause & CAUSEF_BD)
1425			opc += 1;
1426		inst = 0;
1427		kvm_get_badinstr(opc, vcpu, &inst);
1428		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
1429			exccode, opc, inst, badvaddr,
1430			kvm_read_c0_guest_status(vcpu->arch.cop0));
1431		kvm_arch_vcpu_dump_regs(vcpu);
1432		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1433		ret = RESUME_HOST;
1434		break;
1435
1436	}
1437
1438skip_emul:
1439	local_irq_disable();
1440
1441	if (ret == RESUME_GUEST)
1442		kvm_vz_acquire_htimer(vcpu);
1443
1444	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1445		kvm_mips_deliver_interrupts(vcpu, cause);
1446
1447	if (!(ret & RESUME_HOST)) {
1448		/* Only check for signals if not already exiting to userspace */
1449		if (signal_pending(current)) {
1450			run->exit_reason = KVM_EXIT_INTR;
1451			ret = (-EINTR << 2) | RESUME_HOST;
1452			++vcpu->stat.signal_exits;
1453			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1454		}
1455	}
1456
1457	if (ret == RESUME_GUEST) {
1458		trace_kvm_reenter(vcpu);
1459
1460		/*
1461		 * Make sure the read of VCPU requests in vcpu_reenter()
1462		 * callback is not reordered ahead of the write to vcpu->mode,
1463		 * or we could miss a TLB flush request while the requester sees
1464		 * the VCPU as outside of guest mode and not needing an IPI.
1465		 */
1466		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1467
1468		kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1469
1470		/*
1471		 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1472		 * is live), restore FCR31 / MSACSR.
1473		 *
1474		 * This should be before returning to the guest exception
1475		 * vector, as it may well cause an [MSA] FP exception if there
1476		 * are pending exception bits unmasked. (see
1477		 * kvm_mips_csr_die_notifier() for how that is handled).
1478		 */
1479		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1480		    read_c0_status() & ST0_CU1)
1481			__kvm_restore_fcsr(&vcpu->arch);
1482
1483		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1484		    read_c0_config5() & MIPS_CONF5_MSAEN)
1485			__kvm_restore_msacsr(&vcpu->arch);
1486	}
1487
1488	/* Disable HTW before returning to guest or host */
1489	if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1490		htw_stop();
1491
1492	return ret;
1493}
1494
1495/* Enable FPU for guest and restore context */
1496void kvm_own_fpu(struct kvm_vcpu *vcpu)
1497{
1498	struct mips_coproc *cop0 = vcpu->arch.cop0;
1499	unsigned int sr, cfg5;
1500
1501	preempt_disable();
1502
1503	sr = kvm_read_c0_guest_status(cop0);
1504
1505	/*
1506	 * If MSA state is already live, it is undefined how it interacts with
1507	 * FR=0 FPU state, and we don't want to hit reserved instruction
1508	 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1509	 * play it safe and save it first.
1510	 *
1511	 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1512	 * get called when guest CU1 is set, however we can't trust the guest
1513	 * not to clobber the status register directly via the commpage.
1514	 */
1515	if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1516	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1517		kvm_lose_fpu(vcpu);
1518
1519	/*
1520	 * Enable FPU for guest
1521	 * We set FR and FRE according to guest context
1522	 */
1523	change_c0_status(ST0_CU1 | ST0_FR, sr);
1524	if (cpu_has_fre) {
1525		cfg5 = kvm_read_c0_guest_config5(cop0);
1526		change_c0_config5(MIPS_CONF5_FRE, cfg5);
1527	}
1528	enable_fpu_hazard();
1529
1530	/* If guest FPU state not active, restore it now */
1531	if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1532		__kvm_restore_fpu(&vcpu->arch);
1533		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1534		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1535	} else {
1536		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1537	}
1538
1539	preempt_enable();
1540}
1541
1542#ifdef CONFIG_CPU_HAS_MSA
1543/* Enable MSA for guest and restore context */
1544void kvm_own_msa(struct kvm_vcpu *vcpu)
1545{
1546	struct mips_coproc *cop0 = vcpu->arch.cop0;
1547	unsigned int sr, cfg5;
1548
1549	preempt_disable();
1550
1551	/*
1552	 * Enable FPU if enabled in guest, since we're restoring FPU context
1553	 * anyway. We set FR and FRE according to guest context.
1554	 */
1555	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1556		sr = kvm_read_c0_guest_status(cop0);
1557
1558		/*
1559		 * If FR=0 FPU state is already live, it is undefined how it
1560		 * interacts with MSA state, so play it safe and save it first.
1561		 */
1562		if (!(sr & ST0_FR) &&
1563		    (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1564				KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1565			kvm_lose_fpu(vcpu);
1566
1567		change_c0_status(ST0_CU1 | ST0_FR, sr);
1568		if (sr & ST0_CU1 && cpu_has_fre) {
1569			cfg5 = kvm_read_c0_guest_config5(cop0);
1570			change_c0_config5(MIPS_CONF5_FRE, cfg5);
1571		}
1572	}
1573
1574	/* Enable MSA for guest */
1575	set_c0_config5(MIPS_CONF5_MSAEN);
1576	enable_fpu_hazard();
1577
1578	switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1579	case KVM_MIPS_AUX_FPU:
1580		/*
1581		 * Guest FPU state already loaded, only restore upper MSA state
1582		 */
1583		__kvm_restore_msa_upper(&vcpu->arch);
1584		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1585		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1586		break;
1587	case 0:
1588		/* Neither FPU or MSA already active, restore full MSA state */
1589		__kvm_restore_msa(&vcpu->arch);
1590		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1591		if (kvm_mips_guest_has_fpu(&vcpu->arch))
1592			vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1593		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1594			      KVM_TRACE_AUX_FPU_MSA);
1595		break;
1596	default:
1597		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1598		break;
1599	}
1600
1601	preempt_enable();
1602}
1603#endif
1604
1605/* Drop FPU & MSA without saving it */
1606void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1607{
1608	preempt_disable();
1609	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1610		disable_msa();
1611		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1612		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1613	}
1614	if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1615		clear_c0_status(ST0_CU1 | ST0_FR);
1616		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1617		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1618	}
1619	preempt_enable();
1620}
1621
1622/* Save and disable FPU & MSA */
1623void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1624{
1625	/*
1626	 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1627	 * is disabled in guest context (software), but the register state in
1628	 * the hardware may still be in use.
1629	 * This is why we explicitly re-enable the hardware before saving.
1630	 */
1631
1632	preempt_disable();
1633	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1634		if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1635			set_c0_config5(MIPS_CONF5_MSAEN);
1636			enable_fpu_hazard();
1637		}
1638
1639		__kvm_save_msa(&vcpu->arch);
1640		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1641
1642		/* Disable MSA & FPU */
1643		disable_msa();
1644		if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1645			clear_c0_status(ST0_CU1 | ST0_FR);
1646			disable_fpu_hazard();
1647		}
1648		vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1649	} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1650		if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1651			set_c0_status(ST0_CU1);
1652			enable_fpu_hazard();
1653		}
1654
1655		__kvm_save_fpu(&vcpu->arch);
1656		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1657		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1658
1659		/* Disable FPU */
1660		clear_c0_status(ST0_CU1 | ST0_FR);
1661		disable_fpu_hazard();
1662	}
1663	preempt_enable();
1664}
1665
1666/*
1667 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1668 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1669 * exception if cause bits are set in the value being written.
1670 */
1671static int kvm_mips_csr_die_notify(struct notifier_block *self,
1672				   unsigned long cmd, void *ptr)
1673{
1674	struct die_args *args = (struct die_args *)ptr;
1675	struct pt_regs *regs = args->regs;
1676	unsigned long pc;
1677
1678	/* Only interested in FPE and MSAFPE */
1679	if (cmd != DIE_FP && cmd != DIE_MSAFP)
1680		return NOTIFY_DONE;
1681
1682	/* Return immediately if guest context isn't active */
1683	if (!(current->flags & PF_VCPU))
1684		return NOTIFY_DONE;
1685
1686	/* Should never get here from user mode */
1687	BUG_ON(user_mode(regs));
1688
1689	pc = instruction_pointer(regs);
1690	switch (cmd) {
1691	case DIE_FP:
1692		/* match 2nd instruction in __kvm_restore_fcsr */
1693		if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1694			return NOTIFY_DONE;
1695		break;
1696	case DIE_MSAFP:
1697		/* match 2nd/3rd instruction in __kvm_restore_msacsr */
1698		if (!cpu_has_msa ||
1699		    pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1700		    pc > (unsigned long)&__kvm_restore_msacsr + 8)
1701			return NOTIFY_DONE;
1702		break;
1703	}
1704
1705	/* Move PC forward a little and continue executing */
1706	instruction_pointer(regs) += 4;
1707
1708	return NOTIFY_STOP;
1709}
1710
1711static struct notifier_block kvm_mips_csr_die_notifier = {
1712	.notifier_call = kvm_mips_csr_die_notify,
1713};
1714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1715static int __init kvm_mips_init(void)
1716{
1717	int ret;
1718
1719	if (cpu_has_mmid) {
1720		pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1721		return -EOPNOTSUPP;
1722	}
1723
1724	ret = kvm_mips_entry_setup();
1725	if (ret)
1726		return ret;
1727
1728	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1729
1730	if (ret)
1731		return ret;
 
 
 
1732
1733	register_die_notifier(&kvm_mips_csr_die_notifier);
1734
1735	return 0;
1736}
1737
1738static void __exit kvm_mips_exit(void)
1739{
1740	kvm_exit();
1741
1742	unregister_die_notifier(&kvm_mips_csr_die_notifier);
1743}
1744
1745module_init(kvm_mips_init);
1746module_exit(kvm_mips_exit);
1747
1748EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
v5.14.15
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: MIPS specific KVM APIs
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/bitops.h>
  13#include <linux/errno.h>
  14#include <linux/err.h>
  15#include <linux/kdebug.h>
  16#include <linux/module.h>
  17#include <linux/uaccess.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sched/signal.h>
  20#include <linux/fs.h>
  21#include <linux/memblock.h>
  22#include <linux/pgtable.h>
  23
  24#include <asm/fpu.h>
  25#include <asm/page.h>
  26#include <asm/cacheflush.h>
  27#include <asm/mmu_context.h>
  28#include <asm/pgalloc.h>
 
  29
  30#include <linux/kvm_host.h>
  31
  32#include "interrupt.h"
 
  33
  34#define CREATE_TRACE_POINTS
  35#include "trace.h"
  36
  37#ifndef VECTORSPACING
  38#define VECTORSPACING 0x100	/* for EI/VI mode */
  39#endif
  40
  41const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  42	KVM_GENERIC_VM_STATS()
  43};
  44static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
  45		sizeof(struct kvm_vm_stat) / sizeof(u64));
  46
  47const struct kvm_stats_header kvm_vm_stats_header = {
  48	.name_size = KVM_STATS_NAME_SIZE,
  49	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  50	.id_offset = sizeof(struct kvm_stats_header),
  51	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  52	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  53		       sizeof(kvm_vm_stats_desc),
  54};
  55
  56const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  57	KVM_GENERIC_VCPU_STATS(),
  58	STATS_DESC_COUNTER(VCPU, wait_exits),
  59	STATS_DESC_COUNTER(VCPU, cache_exits),
  60	STATS_DESC_COUNTER(VCPU, signal_exits),
  61	STATS_DESC_COUNTER(VCPU, int_exits),
  62	STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
  63	STATS_DESC_COUNTER(VCPU, tlbmod_exits),
  64	STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
  65	STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
  66	STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
  67	STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
  68	STATS_DESC_COUNTER(VCPU, syscall_exits),
  69	STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
  70	STATS_DESC_COUNTER(VCPU, break_inst_exits),
  71	STATS_DESC_COUNTER(VCPU, trap_inst_exits),
  72	STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
  73	STATS_DESC_COUNTER(VCPU, fpe_exits),
  74	STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
  75	STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
  76	STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
  77	STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
  78	STATS_DESC_COUNTER(VCPU, vz_hc_exits),
  79	STATS_DESC_COUNTER(VCPU, vz_grr_exits),
  80	STATS_DESC_COUNTER(VCPU, vz_gva_exits),
  81	STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
  82	STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
  83	STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
  84#ifdef CONFIG_CPU_LOONGSON64
  85	STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
  86#endif
  87};
  88static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
  89		sizeof(struct kvm_vcpu_stat) / sizeof(u64));
  90
  91const struct kvm_stats_header kvm_vcpu_stats_header = {
  92	.name_size = KVM_STATS_NAME_SIZE,
  93	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
  94	.id_offset = sizeof(struct kvm_stats_header),
  95	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  96	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  97		       sizeof(kvm_vcpu_stats_desc),
  98};
  99
 100bool kvm_trace_guest_mode_change;
 101
 102int kvm_guest_mode_change_trace_reg(void)
 103{
 104	kvm_trace_guest_mode_change = true;
 105	return 0;
 106}
 107
 108void kvm_guest_mode_change_trace_unreg(void)
 109{
 110	kvm_trace_guest_mode_change = false;
 111}
 112
 113/*
 114 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
 115 * Config7, so we are "runnable" if interrupts are pending
 116 */
 117int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 118{
 119	return !!(vcpu->arch.pending_exceptions);
 120}
 121
 122bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 123{
 124	return false;
 125}
 126
 127int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 128{
 129	return 1;
 130}
 131
 132int kvm_arch_hardware_enable(void)
 133{
 134	return kvm_mips_callbacks->hardware_enable();
 135}
 136
 137void kvm_arch_hardware_disable(void)
 138{
 139	kvm_mips_callbacks->hardware_disable();
 140}
 141
 142int kvm_arch_hardware_setup(void *opaque)
 143{
 144	return 0;
 145}
 146
 147int kvm_arch_check_processor_compat(void *opaque)
 148{
 149	return 0;
 150}
 151
 152extern void kvm_init_loongson_ipi(struct kvm *kvm);
 153
 154int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 155{
 156	switch (type) {
 157	case KVM_VM_MIPS_AUTO:
 158		break;
 159	case KVM_VM_MIPS_VZ:
 
 
 
 160		break;
 161	default:
 162		/* Unsupported KVM type */
 163		return -EINVAL;
 164	}
 165
 166	/* Allocate page table to map GPA -> RPA */
 167	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
 168	if (!kvm->arch.gpa_mm.pgd)
 169		return -ENOMEM;
 170
 171#ifdef CONFIG_CPU_LOONGSON64
 172	kvm_init_loongson_ipi(kvm);
 173#endif
 174
 175	return 0;
 176}
 177
 178void kvm_mips_free_vcpus(struct kvm *kvm)
 179{
 180	unsigned int i;
 181	struct kvm_vcpu *vcpu;
 182
 183	kvm_for_each_vcpu(i, vcpu, kvm) {
 184		kvm_vcpu_destroy(vcpu);
 185	}
 186
 187	mutex_lock(&kvm->lock);
 188
 189	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 190		kvm->vcpus[i] = NULL;
 191
 192	atomic_set(&kvm->online_vcpus, 0);
 193
 194	mutex_unlock(&kvm->lock);
 195}
 196
 197static void kvm_mips_free_gpa_pt(struct kvm *kvm)
 198{
 199	/* It should always be safe to remove after flushing the whole range */
 200	WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
 201	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
 202}
 203
 204void kvm_arch_destroy_vm(struct kvm *kvm)
 205{
 206	kvm_mips_free_vcpus(kvm);
 207	kvm_mips_free_gpa_pt(kvm);
 208}
 209
 210long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
 211			unsigned long arg)
 212{
 213	return -ENOIOCTLCMD;
 214}
 215
 
 
 
 
 
 
 216void kvm_arch_flush_shadow_all(struct kvm *kvm)
 217{
 218	/* Flush whole GPA */
 219	kvm_mips_flush_gpa_pt(kvm, 0, ~0);
 220	kvm_flush_remote_tlbs(kvm);
 
 
 221}
 222
 223void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 224				   struct kvm_memory_slot *slot)
 225{
 226	/*
 227	 * The slot has been made invalid (ready for moving or deletion), so we
 228	 * need to ensure that it can no longer be accessed by any guest VCPUs.
 229	 */
 230
 231	spin_lock(&kvm->mmu_lock);
 232	/* Flush slot from GPA */
 233	kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
 234			      slot->base_gfn + slot->npages - 1);
 235	kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
 
 236	spin_unlock(&kvm->mmu_lock);
 237}
 238
 239int kvm_arch_prepare_memory_region(struct kvm *kvm,
 240				   struct kvm_memory_slot *memslot,
 241				   const struct kvm_userspace_memory_region *mem,
 242				   enum kvm_mr_change change)
 243{
 244	return 0;
 245}
 246
 247void kvm_arch_commit_memory_region(struct kvm *kvm,
 248				   const struct kvm_userspace_memory_region *mem,
 249				   struct kvm_memory_slot *old,
 250				   const struct kvm_memory_slot *new,
 251				   enum kvm_mr_change change)
 252{
 253	int needs_flush;
 254
 255	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
 256		  __func__, kvm, mem->slot, mem->guest_phys_addr,
 257		  mem->memory_size, mem->userspace_addr);
 258
 259	/*
 260	 * If dirty page logging is enabled, write protect all pages in the slot
 261	 * ready for dirty logging.
 262	 *
 263	 * There is no need to do this in any of the following cases:
 264	 * CREATE:	No dirty mappings will already exist.
 265	 * MOVE/DELETE:	The old mappings will already have been cleaned up by
 266	 *		kvm_arch_flush_shadow_memslot()
 267	 */
 268	if (change == KVM_MR_FLAGS_ONLY &&
 269	    (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
 270	     new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
 271		spin_lock(&kvm->mmu_lock);
 272		/* Write protect GPA page table entries */
 273		needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
 274					new->base_gfn + new->npages - 1);
 
 275		if (needs_flush)
 276			kvm_arch_flush_remote_tlbs_memslot(kvm, new);
 277		spin_unlock(&kvm->mmu_lock);
 278	}
 279}
 280
 281static inline void dump_handler(const char *symbol, void *start, void *end)
 282{
 283	u32 *p;
 284
 285	pr_debug("LEAF(%s)\n", symbol);
 286
 287	pr_debug("\t.set push\n");
 288	pr_debug("\t.set noreorder\n");
 289
 290	for (p = start; p < (u32 *)end; ++p)
 291		pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
 292
 293	pr_debug("\t.set\tpop\n");
 294
 295	pr_debug("\tEND(%s)\n", symbol);
 296}
 297
 298/* low level hrtimer wake routine */
 299static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
 300{
 301	struct kvm_vcpu *vcpu;
 302
 303	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
 304
 305	kvm_mips_callbacks->queue_timer_int(vcpu);
 306
 307	vcpu->arch.wait = 0;
 308	rcuwait_wake_up(&vcpu->wait);
 309
 310	return kvm_mips_count_timeout(vcpu);
 311}
 312
 313int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 314{
 315	return 0;
 316}
 317
 318int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 319{
 320	int err, size;
 321	void *gebase, *p, *handler, *refill_start, *refill_end;
 322	int i;
 323
 324	kvm_debug("kvm @ %p: create cpu %d at %p\n",
 325		  vcpu->kvm, vcpu->vcpu_id, vcpu);
 
 
 
 
 
 
 326
 327	err = kvm_mips_callbacks->vcpu_init(vcpu);
 328	if (err)
 329		return err;
 330
 331	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
 332		     HRTIMER_MODE_REL);
 333	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
 334
 335	/*
 336	 * Allocate space for host mode exception handlers that handle
 337	 * guest mode exits
 338	 */
 339	if (cpu_has_veic || cpu_has_vint)
 340		size = 0x200 + VECTORSPACING * 64;
 341	else
 342		size = 0x4000;
 343
 344	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
 345
 346	if (!gebase) {
 347		err = -ENOMEM;
 348		goto out_uninit_vcpu;
 349	}
 350	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 351		  ALIGN(size, PAGE_SIZE), gebase);
 352
 353	/*
 354	 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
 355	 * limits us to the low 512MB of physical address space. If the memory
 356	 * we allocate is out of range, just give up now.
 357	 */
 358	if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
 359		kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
 360			gebase);
 361		err = -ENOMEM;
 362		goto out_free_gebase;
 363	}
 364
 365	/* Save new ebase */
 366	vcpu->arch.guest_ebase = gebase;
 367
 368	/* Build guest exception vectors dynamically in unmapped memory */
 369	handler = gebase + 0x2000;
 370
 371	/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
 372	refill_start = gebase;
 373	if (IS_ENABLED(CONFIG_64BIT))
 374		refill_start += 0x080;
 375	refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
 376
 377	/* General Exception Entry point */
 378	kvm_mips_build_exception(gebase + 0x180, handler);
 379
 380	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
 381	for (i = 0; i < 8; i++) {
 382		kvm_debug("L1 Vectored handler @ %p\n",
 383			  gebase + 0x200 + (i * VECTORSPACING));
 384		kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
 385					 handler);
 386	}
 387
 388	/* General exit handler */
 389	p = handler;
 390	p = kvm_mips_build_exit(p);
 391
 392	/* Guest entry routine */
 393	vcpu->arch.vcpu_run = p;
 394	p = kvm_mips_build_vcpu_run(p);
 395
 396	/* Dump the generated code */
 397	pr_debug("#include <asm/asm.h>\n");
 398	pr_debug("#include <asm/regdef.h>\n");
 399	pr_debug("\n");
 400	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
 401	dump_handler("kvm_tlb_refill", refill_start, refill_end);
 402	dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
 403	dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
 404
 405	/* Invalidate the icache for these ranges */
 406	flush_icache_range((unsigned long)gebase,
 407			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 408
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 409	/* Init */
 410	vcpu->arch.last_sched_cpu = -1;
 411	vcpu->arch.last_exec_cpu = -1;
 412
 413	/* Initial guest state */
 414	err = kvm_mips_callbacks->vcpu_setup(vcpu);
 415	if (err)
 416		goto out_free_gebase;
 417
 418	return 0;
 419
 420out_free_gebase:
 421	kfree(gebase);
 422out_uninit_vcpu:
 423	kvm_mips_callbacks->vcpu_uninit(vcpu);
 424	return err;
 
 
 
 
 
 
 425}
 426
 427void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 428{
 429	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 430
 
 
 431	kvm_mips_dump_stats(vcpu);
 432
 433	kvm_mmu_free_memory_caches(vcpu);
 434	kfree(vcpu->arch.guest_ebase);
 
 
 
 435
 436	kvm_mips_callbacks->vcpu_uninit(vcpu);
 
 
 437}
 438
 439int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 440					struct kvm_guest_debug *dbg)
 441{
 442	return -ENOIOCTLCMD;
 443}
 444
 445int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 446{
 447	int r = -EINTR;
 448
 449	vcpu_load(vcpu);
 450
 451	kvm_sigset_activate(vcpu);
 452
 453	if (vcpu->mmio_needed) {
 454		if (!vcpu->mmio_is_write)
 455			kvm_mips_complete_mmio_load(vcpu);
 456		vcpu->mmio_needed = 0;
 457	}
 458
 459	if (vcpu->run->immediate_exit)
 460		goto out;
 461
 462	lose_fpu(1);
 463
 464	local_irq_disable();
 465	guest_enter_irqoff();
 466	trace_kvm_enter(vcpu);
 467
 468	/*
 469	 * Make sure the read of VCPU requests in vcpu_run() callback is not
 470	 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
 471	 * flush request while the requester sees the VCPU as outside of guest
 472	 * mode and not needing an IPI.
 473	 */
 474	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 475
 476	r = kvm_mips_callbacks->vcpu_run(vcpu);
 477
 478	trace_kvm_out(vcpu);
 479	guest_exit_irqoff();
 480	local_irq_enable();
 481
 482out:
 483	kvm_sigset_deactivate(vcpu);
 484
 485	vcpu_put(vcpu);
 486	return r;
 487}
 488
 489int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 490			     struct kvm_mips_interrupt *irq)
 491{
 492	int intr = (int)irq->irq;
 493	struct kvm_vcpu *dvcpu = NULL;
 494
 495	if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
 496	    intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
 497	    intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
 498	    intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
 499		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
 500			  (int)intr);
 501
 502	if (irq->cpu == -1)
 503		dvcpu = vcpu;
 504	else
 505		dvcpu = vcpu->kvm->vcpus[irq->cpu];
 506
 507	if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
 508		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
 509
 510	} else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
 511		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
 512	} else {
 513		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
 514			irq->cpu, irq->irq);
 515		return -EINVAL;
 516	}
 517
 518	dvcpu->arch.wait = 0;
 519
 520	rcuwait_wake_up(&dvcpu->wait);
 
 521
 522	return 0;
 523}
 524
 525int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 526				    struct kvm_mp_state *mp_state)
 527{
 528	return -ENOIOCTLCMD;
 529}
 530
 531int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 532				    struct kvm_mp_state *mp_state)
 533{
 534	return -ENOIOCTLCMD;
 535}
 536
 537static u64 kvm_mips_get_one_regs[] = {
 538	KVM_REG_MIPS_R0,
 539	KVM_REG_MIPS_R1,
 540	KVM_REG_MIPS_R2,
 541	KVM_REG_MIPS_R3,
 542	KVM_REG_MIPS_R4,
 543	KVM_REG_MIPS_R5,
 544	KVM_REG_MIPS_R6,
 545	KVM_REG_MIPS_R7,
 546	KVM_REG_MIPS_R8,
 547	KVM_REG_MIPS_R9,
 548	KVM_REG_MIPS_R10,
 549	KVM_REG_MIPS_R11,
 550	KVM_REG_MIPS_R12,
 551	KVM_REG_MIPS_R13,
 552	KVM_REG_MIPS_R14,
 553	KVM_REG_MIPS_R15,
 554	KVM_REG_MIPS_R16,
 555	KVM_REG_MIPS_R17,
 556	KVM_REG_MIPS_R18,
 557	KVM_REG_MIPS_R19,
 558	KVM_REG_MIPS_R20,
 559	KVM_REG_MIPS_R21,
 560	KVM_REG_MIPS_R22,
 561	KVM_REG_MIPS_R23,
 562	KVM_REG_MIPS_R24,
 563	KVM_REG_MIPS_R25,
 564	KVM_REG_MIPS_R26,
 565	KVM_REG_MIPS_R27,
 566	KVM_REG_MIPS_R28,
 567	KVM_REG_MIPS_R29,
 568	KVM_REG_MIPS_R30,
 569	KVM_REG_MIPS_R31,
 570
 571#ifndef CONFIG_CPU_MIPSR6
 572	KVM_REG_MIPS_HI,
 573	KVM_REG_MIPS_LO,
 574#endif
 575	KVM_REG_MIPS_PC,
 576};
 577
 578static u64 kvm_mips_get_one_regs_fpu[] = {
 579	KVM_REG_MIPS_FCR_IR,
 580	KVM_REG_MIPS_FCR_CSR,
 581};
 582
 583static u64 kvm_mips_get_one_regs_msa[] = {
 584	KVM_REG_MIPS_MSA_IR,
 585	KVM_REG_MIPS_MSA_CSR,
 586};
 587
 588static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
 589{
 590	unsigned long ret;
 591
 592	ret = ARRAY_SIZE(kvm_mips_get_one_regs);
 593	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 594		ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
 595		/* odd doubles */
 596		if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
 597			ret += 16;
 598	}
 599	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 600		ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
 601	ret += kvm_mips_callbacks->num_regs(vcpu);
 602
 603	return ret;
 604}
 605
 606static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
 607{
 608	u64 index;
 609	unsigned int i;
 610
 611	if (copy_to_user(indices, kvm_mips_get_one_regs,
 612			 sizeof(kvm_mips_get_one_regs)))
 613		return -EFAULT;
 614	indices += ARRAY_SIZE(kvm_mips_get_one_regs);
 615
 616	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 617		if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
 618				 sizeof(kvm_mips_get_one_regs_fpu)))
 619			return -EFAULT;
 620		indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
 621
 622		for (i = 0; i < 32; ++i) {
 623			index = KVM_REG_MIPS_FPR_32(i);
 624			if (copy_to_user(indices, &index, sizeof(index)))
 625				return -EFAULT;
 626			++indices;
 627
 628			/* skip odd doubles if no F64 */
 629			if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
 630				continue;
 631
 632			index = KVM_REG_MIPS_FPR_64(i);
 633			if (copy_to_user(indices, &index, sizeof(index)))
 634				return -EFAULT;
 635			++indices;
 636		}
 637	}
 638
 639	if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
 640		if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
 641				 sizeof(kvm_mips_get_one_regs_msa)))
 642			return -EFAULT;
 643		indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
 644
 645		for (i = 0; i < 32; ++i) {
 646			index = KVM_REG_MIPS_VEC_128(i);
 647			if (copy_to_user(indices, &index, sizeof(index)))
 648				return -EFAULT;
 649			++indices;
 650		}
 651	}
 652
 653	return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
 654}
 655
 656static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
 657			    const struct kvm_one_reg *reg)
 658{
 659	struct mips_coproc *cop0 = vcpu->arch.cop0;
 660	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 661	int ret;
 662	s64 v;
 663	s64 vs[2];
 664	unsigned int idx;
 665
 666	switch (reg->id) {
 667	/* General purpose registers */
 668	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
 669		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
 670		break;
 671#ifndef CONFIG_CPU_MIPSR6
 672	case KVM_REG_MIPS_HI:
 673		v = (long)vcpu->arch.hi;
 674		break;
 675	case KVM_REG_MIPS_LO:
 676		v = (long)vcpu->arch.lo;
 677		break;
 678#endif
 679	case KVM_REG_MIPS_PC:
 680		v = (long)vcpu->arch.pc;
 681		break;
 682
 683	/* Floating point registers */
 684	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 685		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 686			return -EINVAL;
 687		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 688		/* Odd singles in top of even double when FR=0 */
 689		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 690			v = get_fpr32(&fpu->fpr[idx], 0);
 691		else
 692			v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
 693		break;
 694	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 695		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 696			return -EINVAL;
 697		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 698		/* Can't access odd doubles in FR=0 mode */
 699		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 700			return -EINVAL;
 701		v = get_fpr64(&fpu->fpr[idx], 0);
 702		break;
 703	case KVM_REG_MIPS_FCR_IR:
 704		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 705			return -EINVAL;
 706		v = boot_cpu_data.fpu_id;
 707		break;
 708	case KVM_REG_MIPS_FCR_CSR:
 709		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 710			return -EINVAL;
 711		v = fpu->fcr31;
 712		break;
 713
 714	/* MIPS SIMD Architecture (MSA) registers */
 715	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 716		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 717			return -EINVAL;
 718		/* Can't access MSA registers in FR=0 mode */
 719		if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
 720			return -EINVAL;
 721		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 722#ifdef CONFIG_CPU_LITTLE_ENDIAN
 723		/* least significant byte first */
 724		vs[0] = get_fpr64(&fpu->fpr[idx], 0);
 725		vs[1] = get_fpr64(&fpu->fpr[idx], 1);
 726#else
 727		/* most significant byte first */
 728		vs[0] = get_fpr64(&fpu->fpr[idx], 1);
 729		vs[1] = get_fpr64(&fpu->fpr[idx], 0);
 730#endif
 731		break;
 732	case KVM_REG_MIPS_MSA_IR:
 733		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 734			return -EINVAL;
 735		v = boot_cpu_data.msa_id;
 736		break;
 737	case KVM_REG_MIPS_MSA_CSR:
 738		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 739			return -EINVAL;
 740		v = fpu->msacsr;
 741		break;
 742
 743	/* registers to be handled specially */
 744	default:
 745		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
 746		if (ret)
 747			return ret;
 748		break;
 749	}
 750	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 751		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 752
 753		return put_user(v, uaddr64);
 754	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 755		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 756		u32 v32 = (u32)v;
 757
 758		return put_user(v32, uaddr32);
 759	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 760		void __user *uaddr = (void __user *)(long)reg->addr;
 761
 762		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
 763	} else {
 764		return -EINVAL;
 765	}
 766}
 767
 768static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
 769			    const struct kvm_one_reg *reg)
 770{
 771	struct mips_coproc *cop0 = vcpu->arch.cop0;
 772	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 773	s64 v;
 774	s64 vs[2];
 775	unsigned int idx;
 776
 777	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 778		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 779
 780		if (get_user(v, uaddr64) != 0)
 781			return -EFAULT;
 782	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 783		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 784		s32 v32;
 785
 786		if (get_user(v32, uaddr32) != 0)
 787			return -EFAULT;
 788		v = (s64)v32;
 789	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 790		void __user *uaddr = (void __user *)(long)reg->addr;
 791
 792		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
 793	} else {
 794		return -EINVAL;
 795	}
 796
 797	switch (reg->id) {
 798	/* General purpose registers */
 799	case KVM_REG_MIPS_R0:
 800		/* Silently ignore requests to set $0 */
 801		break;
 802	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
 803		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
 804		break;
 805#ifndef CONFIG_CPU_MIPSR6
 806	case KVM_REG_MIPS_HI:
 807		vcpu->arch.hi = v;
 808		break;
 809	case KVM_REG_MIPS_LO:
 810		vcpu->arch.lo = v;
 811		break;
 812#endif
 813	case KVM_REG_MIPS_PC:
 814		vcpu->arch.pc = v;
 815		break;
 816
 817	/* Floating point registers */
 818	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 819		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 820			return -EINVAL;
 821		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 822		/* Odd singles in top of even double when FR=0 */
 823		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 824			set_fpr32(&fpu->fpr[idx], 0, v);
 825		else
 826			set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
 827		break;
 828	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 829		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 830			return -EINVAL;
 831		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 832		/* Can't access odd doubles in FR=0 mode */
 833		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 834			return -EINVAL;
 835		set_fpr64(&fpu->fpr[idx], 0, v);
 836		break;
 837	case KVM_REG_MIPS_FCR_IR:
 838		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 839			return -EINVAL;
 840		/* Read-only */
 841		break;
 842	case KVM_REG_MIPS_FCR_CSR:
 843		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 844			return -EINVAL;
 845		fpu->fcr31 = v;
 846		break;
 847
 848	/* MIPS SIMD Architecture (MSA) registers */
 849	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 850		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 851			return -EINVAL;
 852		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 853#ifdef CONFIG_CPU_LITTLE_ENDIAN
 854		/* least significant byte first */
 855		set_fpr64(&fpu->fpr[idx], 0, vs[0]);
 856		set_fpr64(&fpu->fpr[idx], 1, vs[1]);
 857#else
 858		/* most significant byte first */
 859		set_fpr64(&fpu->fpr[idx], 1, vs[0]);
 860		set_fpr64(&fpu->fpr[idx], 0, vs[1]);
 861#endif
 862		break;
 863	case KVM_REG_MIPS_MSA_IR:
 864		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 865			return -EINVAL;
 866		/* Read-only */
 867		break;
 868	case KVM_REG_MIPS_MSA_CSR:
 869		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 870			return -EINVAL;
 871		fpu->msacsr = v;
 872		break;
 873
 874	/* registers to be handled specially */
 875	default:
 876		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
 877	}
 878	return 0;
 879}
 880
 881static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 882				     struct kvm_enable_cap *cap)
 883{
 884	int r = 0;
 885
 886	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
 887		return -EINVAL;
 888	if (cap->flags)
 889		return -EINVAL;
 890	if (cap->args[0])
 891		return -EINVAL;
 892
 893	switch (cap->cap) {
 894	case KVM_CAP_MIPS_FPU:
 895		vcpu->arch.fpu_enabled = true;
 896		break;
 897	case KVM_CAP_MIPS_MSA:
 898		vcpu->arch.msa_enabled = true;
 899		break;
 900	default:
 901		r = -EINVAL;
 902		break;
 903	}
 904
 905	return r;
 906}
 907
 908long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
 909			       unsigned long arg)
 910{
 911	struct kvm_vcpu *vcpu = filp->private_data;
 912	void __user *argp = (void __user *)arg;
 913
 914	if (ioctl == KVM_INTERRUPT) {
 915		struct kvm_mips_interrupt irq;
 916
 917		if (copy_from_user(&irq, argp, sizeof(irq)))
 918			return -EFAULT;
 919		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 920			  irq.irq);
 921
 922		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 923	}
 924
 925	return -ENOIOCTLCMD;
 926}
 927
 928long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
 929			 unsigned long arg)
 930{
 931	struct kvm_vcpu *vcpu = filp->private_data;
 932	void __user *argp = (void __user *)arg;
 933	long r;
 934
 935	vcpu_load(vcpu);
 936
 937	switch (ioctl) {
 938	case KVM_SET_ONE_REG:
 939	case KVM_GET_ONE_REG: {
 940		struct kvm_one_reg reg;
 941
 942		r = -EFAULT;
 943		if (copy_from_user(&reg, argp, sizeof(reg)))
 944			break;
 945		if (ioctl == KVM_SET_ONE_REG)
 946			r = kvm_mips_set_reg(vcpu, &reg);
 947		else
 948			r = kvm_mips_get_reg(vcpu, &reg);
 949		break;
 950	}
 951	case KVM_GET_REG_LIST: {
 952		struct kvm_reg_list __user *user_list = argp;
 953		struct kvm_reg_list reg_list;
 954		unsigned n;
 955
 956		r = -EFAULT;
 957		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 958			break;
 959		n = reg_list.n;
 960		reg_list.n = kvm_mips_num_regs(vcpu);
 961		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 962			break;
 963		r = -E2BIG;
 964		if (n < reg_list.n)
 965			break;
 966		r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
 967		break;
 968	}
 969	case KVM_ENABLE_CAP: {
 970		struct kvm_enable_cap cap;
 971
 972		r = -EFAULT;
 973		if (copy_from_user(&cap, argp, sizeof(cap)))
 974			break;
 975		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
 976		break;
 977	}
 978	default:
 979		r = -ENOIOCTLCMD;
 980	}
 981
 982	vcpu_put(vcpu);
 983	return r;
 984}
 985
 986void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987{
 
 
 
 
 988
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989}
 990
 991int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 992{
 993	kvm_mips_callbacks->prepare_flush_shadow(kvm);
 994	return 1;
 995}
 
 
 
 
 
 
 
 
 
 
 
 
 
 996
 997void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
 998					const struct kvm_memory_slot *memslot)
 999{
1000	kvm_flush_remote_tlbs(kvm);
1001}
1002
1003long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1004{
1005	long r;
1006
1007	switch (ioctl) {
1008	default:
1009		r = -ENOIOCTLCMD;
1010	}
1011
1012	return r;
1013}
1014
1015int kvm_arch_init(void *opaque)
1016{
1017	if (kvm_mips_callbacks) {
1018		kvm_err("kvm: module already exists\n");
1019		return -EEXIST;
1020	}
1021
1022	return kvm_mips_emulation_init(&kvm_mips_callbacks);
1023}
1024
1025void kvm_arch_exit(void)
1026{
1027	kvm_mips_callbacks = NULL;
1028}
1029
1030int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1031				  struct kvm_sregs *sregs)
1032{
1033	return -ENOIOCTLCMD;
1034}
1035
1036int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1037				  struct kvm_sregs *sregs)
1038{
1039	return -ENOIOCTLCMD;
1040}
1041
1042void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1043{
1044}
1045
1046int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1047{
1048	return -ENOIOCTLCMD;
1049}
1050
1051int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1052{
1053	return -ENOIOCTLCMD;
1054}
1055
1056vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1057{
1058	return VM_FAULT_SIGBUS;
1059}
1060
1061int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1062{
1063	int r;
1064
1065	switch (ext) {
1066	case KVM_CAP_ONE_REG:
1067	case KVM_CAP_ENABLE_CAP:
1068	case KVM_CAP_READONLY_MEM:
1069	case KVM_CAP_SYNC_MMU:
1070	case KVM_CAP_IMMEDIATE_EXIT:
1071		r = 1;
1072		break;
1073	case KVM_CAP_NR_VCPUS:
1074		r = num_online_cpus();
1075		break;
1076	case KVM_CAP_MAX_VCPUS:
1077		r = KVM_MAX_VCPUS;
1078		break;
1079	case KVM_CAP_MAX_VCPU_ID:
1080		r = KVM_MAX_VCPU_ID;
1081		break;
1082	case KVM_CAP_MIPS_FPU:
1083		/* We don't handle systems with inconsistent cpu_has_fpu */
1084		r = !!raw_cpu_has_fpu;
1085		break;
1086	case KVM_CAP_MIPS_MSA:
1087		/*
1088		 * We don't support MSA vector partitioning yet:
1089		 * 1) It would require explicit support which can't be tested
1090		 *    yet due to lack of support in current hardware.
1091		 * 2) It extends the state that would need to be saved/restored
1092		 *    by e.g. QEMU for migration.
1093		 *
1094		 * When vector partitioning hardware becomes available, support
1095		 * could be added by requiring a flag when enabling
1096		 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1097		 * to save/restore the appropriate extra state.
1098		 */
1099		r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1100		break;
1101	default:
1102		r = kvm_mips_callbacks->check_extension(kvm, ext);
1103		break;
1104	}
1105	return r;
1106}
1107
1108int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1109{
1110	return kvm_mips_pending_timer(vcpu) ||
1111		kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1112}
1113
1114int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1115{
1116	int i;
1117	struct mips_coproc *cop0;
1118
1119	if (!vcpu)
1120		return -1;
1121
1122	kvm_debug("VCPU Register Dump:\n");
1123	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1124	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1125
1126	for (i = 0; i < 32; i += 4) {
1127		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1128		       vcpu->arch.gprs[i],
1129		       vcpu->arch.gprs[i + 1],
1130		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1131	}
1132	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1133	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1134
1135	cop0 = vcpu->arch.cop0;
1136	kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1137		  kvm_read_c0_guest_status(cop0),
1138		  kvm_read_c0_guest_cause(cop0));
1139
1140	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1141
1142	return 0;
1143}
1144
1145int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1146{
1147	int i;
1148
1149	vcpu_load(vcpu);
1150
1151	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1152		vcpu->arch.gprs[i] = regs->gpr[i];
1153	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1154	vcpu->arch.hi = regs->hi;
1155	vcpu->arch.lo = regs->lo;
1156	vcpu->arch.pc = regs->pc;
1157
1158	vcpu_put(vcpu);
1159	return 0;
1160}
1161
1162int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1163{
1164	int i;
1165
1166	vcpu_load(vcpu);
1167
1168	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1169		regs->gpr[i] = vcpu->arch.gprs[i];
1170
1171	regs->hi = vcpu->arch.hi;
1172	regs->lo = vcpu->arch.lo;
1173	regs->pc = vcpu->arch.pc;
1174
1175	vcpu_put(vcpu);
1176	return 0;
1177}
1178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1180				  struct kvm_translation *tr)
1181{
1182	return 0;
1183}
1184
 
 
 
 
 
 
1185static void kvm_mips_set_c0_status(void)
1186{
1187	u32 status = read_c0_status();
1188
1189	if (cpu_has_dsp)
1190		status |= (ST0_MX);
1191
1192	write_c0_status(status);
1193	ehb();
1194}
1195
1196/*
1197 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1198 */
1199int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1200{
1201	struct kvm_run *run = vcpu->run;
1202	u32 cause = vcpu->arch.host_cp0_cause;
1203	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1204	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1205	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1206	enum emulation_result er = EMULATE_DONE;
1207	u32 inst;
1208	int ret = RESUME_GUEST;
1209
1210	vcpu->mode = OUTSIDE_GUEST_MODE;
1211
 
 
 
 
1212	/* Set a default exit reason */
1213	run->exit_reason = KVM_EXIT_UNKNOWN;
1214	run->ready_for_interrupt_injection = 1;
1215
1216	/*
1217	 * Set the appropriate status bits based on host CPU features,
1218	 * before we hit the scheduler
1219	 */
1220	kvm_mips_set_c0_status();
1221
1222	local_irq_enable();
1223
1224	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1225			cause, opc, run, vcpu);
1226	trace_kvm_exit(vcpu, exccode);
1227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1228	switch (exccode) {
1229	case EXCCODE_INT:
1230		kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1231
1232		++vcpu->stat.int_exits;
1233
1234		if (need_resched())
1235			cond_resched();
1236
1237		ret = RESUME_GUEST;
1238		break;
1239
1240	case EXCCODE_CPU:
1241		kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1242
1243		++vcpu->stat.cop_unusable_exits;
1244		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1245		/* XXXKYMA: Might need to return to user space */
1246		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1247			ret = RESUME_HOST;
1248		break;
1249
1250	case EXCCODE_MOD:
1251		++vcpu->stat.tlbmod_exits;
1252		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1253		break;
1254
1255	case EXCCODE_TLBS:
1256		kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1257			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1258			  badvaddr);
1259
1260		++vcpu->stat.tlbmiss_st_exits;
1261		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1262		break;
1263
1264	case EXCCODE_TLBL:
1265		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1266			  cause, opc, badvaddr);
1267
1268		++vcpu->stat.tlbmiss_ld_exits;
1269		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1270		break;
1271
1272	case EXCCODE_ADES:
1273		++vcpu->stat.addrerr_st_exits;
1274		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1275		break;
1276
1277	case EXCCODE_ADEL:
1278		++vcpu->stat.addrerr_ld_exits;
1279		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1280		break;
1281
1282	case EXCCODE_SYS:
1283		++vcpu->stat.syscall_exits;
1284		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1285		break;
1286
1287	case EXCCODE_RI:
1288		++vcpu->stat.resvd_inst_exits;
1289		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1290		break;
1291
1292	case EXCCODE_BP:
1293		++vcpu->stat.break_inst_exits;
1294		ret = kvm_mips_callbacks->handle_break(vcpu);
1295		break;
1296
1297	case EXCCODE_TR:
1298		++vcpu->stat.trap_inst_exits;
1299		ret = kvm_mips_callbacks->handle_trap(vcpu);
1300		break;
1301
1302	case EXCCODE_MSAFPE:
1303		++vcpu->stat.msa_fpe_exits;
1304		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1305		break;
1306
1307	case EXCCODE_FPE:
1308		++vcpu->stat.fpe_exits;
1309		ret = kvm_mips_callbacks->handle_fpe(vcpu);
1310		break;
1311
1312	case EXCCODE_MSADIS:
1313		++vcpu->stat.msa_disabled_exits;
1314		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1315		break;
1316
1317	case EXCCODE_GE:
1318		/* defer exit accounting to handler */
1319		ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1320		break;
1321
1322	default:
1323		if (cause & CAUSEF_BD)
1324			opc += 1;
1325		inst = 0;
1326		kvm_get_badinstr(opc, vcpu, &inst);
1327		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
1328			exccode, opc, inst, badvaddr,
1329			kvm_read_c0_guest_status(vcpu->arch.cop0));
1330		kvm_arch_vcpu_dump_regs(vcpu);
1331		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1332		ret = RESUME_HOST;
1333		break;
1334
1335	}
1336
 
1337	local_irq_disable();
1338
1339	if (ret == RESUME_GUEST)
1340		kvm_vz_acquire_htimer(vcpu);
1341
1342	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1343		kvm_mips_deliver_interrupts(vcpu, cause);
1344
1345	if (!(ret & RESUME_HOST)) {
1346		/* Only check for signals if not already exiting to userspace */
1347		if (signal_pending(current)) {
1348			run->exit_reason = KVM_EXIT_INTR;
1349			ret = (-EINTR << 2) | RESUME_HOST;
1350			++vcpu->stat.signal_exits;
1351			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1352		}
1353	}
1354
1355	if (ret == RESUME_GUEST) {
1356		trace_kvm_reenter(vcpu);
1357
1358		/*
1359		 * Make sure the read of VCPU requests in vcpu_reenter()
1360		 * callback is not reordered ahead of the write to vcpu->mode,
1361		 * or we could miss a TLB flush request while the requester sees
1362		 * the VCPU as outside of guest mode and not needing an IPI.
1363		 */
1364		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1365
1366		kvm_mips_callbacks->vcpu_reenter(vcpu);
1367
1368		/*
1369		 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1370		 * is live), restore FCR31 / MSACSR.
1371		 *
1372		 * This should be before returning to the guest exception
1373		 * vector, as it may well cause an [MSA] FP exception if there
1374		 * are pending exception bits unmasked. (see
1375		 * kvm_mips_csr_die_notifier() for how that is handled).
1376		 */
1377		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1378		    read_c0_status() & ST0_CU1)
1379			__kvm_restore_fcsr(&vcpu->arch);
1380
1381		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1382		    read_c0_config5() & MIPS_CONF5_MSAEN)
1383			__kvm_restore_msacsr(&vcpu->arch);
1384	}
 
 
 
 
 
1385	return ret;
1386}
1387
1388/* Enable FPU for guest and restore context */
1389void kvm_own_fpu(struct kvm_vcpu *vcpu)
1390{
1391	struct mips_coproc *cop0 = vcpu->arch.cop0;
1392	unsigned int sr, cfg5;
1393
1394	preempt_disable();
1395
1396	sr = kvm_read_c0_guest_status(cop0);
1397
1398	/*
1399	 * If MSA state is already live, it is undefined how it interacts with
1400	 * FR=0 FPU state, and we don't want to hit reserved instruction
1401	 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1402	 * play it safe and save it first.
 
 
 
 
1403	 */
1404	if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1405	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1406		kvm_lose_fpu(vcpu);
1407
1408	/*
1409	 * Enable FPU for guest
1410	 * We set FR and FRE according to guest context
1411	 */
1412	change_c0_status(ST0_CU1 | ST0_FR, sr);
1413	if (cpu_has_fre) {
1414		cfg5 = kvm_read_c0_guest_config5(cop0);
1415		change_c0_config5(MIPS_CONF5_FRE, cfg5);
1416	}
1417	enable_fpu_hazard();
1418
1419	/* If guest FPU state not active, restore it now */
1420	if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1421		__kvm_restore_fpu(&vcpu->arch);
1422		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1423		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1424	} else {
1425		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1426	}
1427
1428	preempt_enable();
1429}
1430
1431#ifdef CONFIG_CPU_HAS_MSA
1432/* Enable MSA for guest and restore context */
1433void kvm_own_msa(struct kvm_vcpu *vcpu)
1434{
1435	struct mips_coproc *cop0 = vcpu->arch.cop0;
1436	unsigned int sr, cfg5;
1437
1438	preempt_disable();
1439
1440	/*
1441	 * Enable FPU if enabled in guest, since we're restoring FPU context
1442	 * anyway. We set FR and FRE according to guest context.
1443	 */
1444	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1445		sr = kvm_read_c0_guest_status(cop0);
1446
1447		/*
1448		 * If FR=0 FPU state is already live, it is undefined how it
1449		 * interacts with MSA state, so play it safe and save it first.
1450		 */
1451		if (!(sr & ST0_FR) &&
1452		    (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1453				KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1454			kvm_lose_fpu(vcpu);
1455
1456		change_c0_status(ST0_CU1 | ST0_FR, sr);
1457		if (sr & ST0_CU1 && cpu_has_fre) {
1458			cfg5 = kvm_read_c0_guest_config5(cop0);
1459			change_c0_config5(MIPS_CONF5_FRE, cfg5);
1460		}
1461	}
1462
1463	/* Enable MSA for guest */
1464	set_c0_config5(MIPS_CONF5_MSAEN);
1465	enable_fpu_hazard();
1466
1467	switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1468	case KVM_MIPS_AUX_FPU:
1469		/*
1470		 * Guest FPU state already loaded, only restore upper MSA state
1471		 */
1472		__kvm_restore_msa_upper(&vcpu->arch);
1473		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1474		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1475		break;
1476	case 0:
1477		/* Neither FPU or MSA already active, restore full MSA state */
1478		__kvm_restore_msa(&vcpu->arch);
1479		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1480		if (kvm_mips_guest_has_fpu(&vcpu->arch))
1481			vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1482		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1483			      KVM_TRACE_AUX_FPU_MSA);
1484		break;
1485	default:
1486		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1487		break;
1488	}
1489
1490	preempt_enable();
1491}
1492#endif
1493
1494/* Drop FPU & MSA without saving it */
1495void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1496{
1497	preempt_disable();
1498	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1499		disable_msa();
1500		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1501		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1502	}
1503	if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1504		clear_c0_status(ST0_CU1 | ST0_FR);
1505		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1506		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1507	}
1508	preempt_enable();
1509}
1510
1511/* Save and disable FPU & MSA */
1512void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1513{
1514	/*
1515	 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1516	 * is disabled in guest context (software), but the register state in
1517	 * the hardware may still be in use.
1518	 * This is why we explicitly re-enable the hardware before saving.
1519	 */
1520
1521	preempt_disable();
1522	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
 
 
 
 
 
1523		__kvm_save_msa(&vcpu->arch);
1524		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1525
1526		/* Disable MSA & FPU */
1527		disable_msa();
1528		if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1529			clear_c0_status(ST0_CU1 | ST0_FR);
1530			disable_fpu_hazard();
1531		}
1532		vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1533	} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
 
 
 
 
 
1534		__kvm_save_fpu(&vcpu->arch);
1535		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1536		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1537
1538		/* Disable FPU */
1539		clear_c0_status(ST0_CU1 | ST0_FR);
1540		disable_fpu_hazard();
1541	}
1542	preempt_enable();
1543}
1544
1545/*
1546 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1547 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1548 * exception if cause bits are set in the value being written.
1549 */
1550static int kvm_mips_csr_die_notify(struct notifier_block *self,
1551				   unsigned long cmd, void *ptr)
1552{
1553	struct die_args *args = (struct die_args *)ptr;
1554	struct pt_regs *regs = args->regs;
1555	unsigned long pc;
1556
1557	/* Only interested in FPE and MSAFPE */
1558	if (cmd != DIE_FP && cmd != DIE_MSAFP)
1559		return NOTIFY_DONE;
1560
1561	/* Return immediately if guest context isn't active */
1562	if (!(current->flags & PF_VCPU))
1563		return NOTIFY_DONE;
1564
1565	/* Should never get here from user mode */
1566	BUG_ON(user_mode(regs));
1567
1568	pc = instruction_pointer(regs);
1569	switch (cmd) {
1570	case DIE_FP:
1571		/* match 2nd instruction in __kvm_restore_fcsr */
1572		if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1573			return NOTIFY_DONE;
1574		break;
1575	case DIE_MSAFP:
1576		/* match 2nd/3rd instruction in __kvm_restore_msacsr */
1577		if (!cpu_has_msa ||
1578		    pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1579		    pc > (unsigned long)&__kvm_restore_msacsr + 8)
1580			return NOTIFY_DONE;
1581		break;
1582	}
1583
1584	/* Move PC forward a little and continue executing */
1585	instruction_pointer(regs) += 4;
1586
1587	return NOTIFY_STOP;
1588}
1589
1590static struct notifier_block kvm_mips_csr_die_notifier = {
1591	.notifier_call = kvm_mips_csr_die_notify,
1592};
1593
1594static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1595	[MIPS_EXC_INT_TIMER] = C_IRQ5,
1596	[MIPS_EXC_INT_IO_1]  = C_IRQ0,
1597	[MIPS_EXC_INT_IPI_1] = C_IRQ1,
1598	[MIPS_EXC_INT_IPI_2] = C_IRQ2,
1599};
1600
1601static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1602	[MIPS_EXC_INT_TIMER] = C_IRQ5,
1603	[MIPS_EXC_INT_IO_1]  = C_IRQ0,
1604	[MIPS_EXC_INT_IO_2]  = C_IRQ1,
1605	[MIPS_EXC_INT_IPI_1] = C_IRQ4,
1606};
1607
1608u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1609
1610u32 kvm_irq_to_priority(u32 irq)
1611{
1612	int i;
1613
1614	for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1615		if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1616			return i;
1617	}
1618
1619	return MIPS_EXC_MAX;
1620}
1621
1622static int __init kvm_mips_init(void)
1623{
1624	int ret;
1625
1626	if (cpu_has_mmid) {
1627		pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1628		return -EOPNOTSUPP;
1629	}
1630
1631	ret = kvm_mips_entry_setup();
1632	if (ret)
1633		return ret;
1634
1635	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1636
1637	if (ret)
1638		return ret;
1639
1640	if (boot_cpu_type() == CPU_LOONGSON64)
1641		kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1642
1643	register_die_notifier(&kvm_mips_csr_die_notifier);
1644
1645	return 0;
1646}
1647
1648static void __exit kvm_mips_exit(void)
1649{
1650	kvm_exit();
1651
1652	unregister_die_notifier(&kvm_mips_csr_die_notifier);
1653}
1654
1655module_init(kvm_mips_init);
1656module_exit(kvm_mips_exit);
1657
1658EXPORT_TRACEPOINT_SYMBOL(kvm_exit);