Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: MIPS specific KVM APIs
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10*/
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/module.h>
  15#include <linux/vmalloc.h>
  16#include <linux/fs.h>
  17#include <linux/bootmem.h>
  18#include <asm/page.h>
  19#include <asm/cacheflush.h>
  20#include <asm/mmu_context.h>
  21
  22#include <linux/kvm_host.h>
  23
  24#include "kvm_mips_int.h"
  25#include "kvm_mips_comm.h"
  26
  27#define CREATE_TRACE_POINTS
  28#include "trace.h"
  29
  30#ifndef VECTORSPACING
  31#define VECTORSPACING 0x100	/* for EI/VI mode */
  32#endif
  33
  34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  35struct kvm_stats_debugfs_item debugfs_entries[] = {
  36	{ "wait", VCPU_STAT(wait_exits) },
  37	{ "cache", VCPU_STAT(cache_exits) },
  38	{ "signal", VCPU_STAT(signal_exits) },
  39	{ "interrupt", VCPU_STAT(int_exits) },
  40	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
  41	{ "tlbmod", VCPU_STAT(tlbmod_exits) },
  42	{ "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
  43	{ "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
  44	{ "addrerr_st", VCPU_STAT(addrerr_st_exits) },
  45	{ "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
  46	{ "syscall", VCPU_STAT(syscall_exits) },
  47	{ "resvd_inst", VCPU_STAT(resvd_inst_exits) },
  48	{ "break_inst", VCPU_STAT(break_inst_exits) },
  49	{ "flush_dcache", VCPU_STAT(flush_dcache_exits) },
  50	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
  51	{NULL}
  52};
  53
  54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
  55{
  56	int i;
  57	for_each_possible_cpu(i) {
  58		vcpu->arch.guest_kernel_asid[i] = 0;
  59		vcpu->arch.guest_user_asid[i] = 0;
  60	}
  61	return 0;
  62}
  63
  64gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  65{
  66	return gfn;
  67}
  68
  69/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
  70 * are "runnable" if interrupts are pending
  71 */
  72int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  73{
  74	return !!(vcpu->arch.pending_exceptions);
  75}
  76
  77int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  78{
  79	return 1;
  80}
  81
  82int kvm_arch_hardware_enable(void *garbage)
  83{
  84	return 0;
  85}
  86
  87void kvm_arch_hardware_disable(void *garbage)
  88{
  89}
  90
  91int kvm_arch_hardware_setup(void)
  92{
  93	return 0;
  94}
  95
  96void kvm_arch_hardware_unsetup(void)
  97{
  98}
  99
 100void kvm_arch_check_processor_compat(void *rtn)
 101{
 102	int *r = (int *)rtn;
 103	*r = 0;
 104	return;
 105}
 106
 107static void kvm_mips_init_tlbs(struct kvm *kvm)
 108{
 109	unsigned long wired;
 110
 111	/* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
 112	wired = read_c0_wired();
 113	write_c0_wired(wired + 1);
 114	mtc0_tlbw_hazard();
 115	kvm->arch.commpage_tlb = wired;
 116
 117	kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
 118		  kvm->arch.commpage_tlb);
 119}
 120
 121static void kvm_mips_init_vm_percpu(void *arg)
 122{
 123	struct kvm *kvm = (struct kvm *)arg;
 124
 125	kvm_mips_init_tlbs(kvm);
 126	kvm_mips_callbacks->vm_init(kvm);
 127
 128}
 129
 130int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 131{
 132	if (atomic_inc_return(&kvm_mips_instance) == 1) {
 133		kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
 134			 __func__);
 135		on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
 136	}
 137
 138
 139	return 0;
 140}
 141
 142void kvm_mips_free_vcpus(struct kvm *kvm)
 143{
 144	unsigned int i;
 145	struct kvm_vcpu *vcpu;
 146
 147	/* Put the pages we reserved for the guest pmap */
 148	for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
 149		if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
 150			kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
 151	}
 152
 153	if (kvm->arch.guest_pmap)
 154		kfree(kvm->arch.guest_pmap);
 155
 156	kvm_for_each_vcpu(i, vcpu, kvm) {
 157		kvm_arch_vcpu_free(vcpu);
 158	}
 159
 160	mutex_lock(&kvm->lock);
 161
 162	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 163		kvm->vcpus[i] = NULL;
 164
 165	atomic_set(&kvm->online_vcpus, 0);
 166
 167	mutex_unlock(&kvm->lock);
 168}
 169
 170void kvm_arch_sync_events(struct kvm *kvm)
 171{
 172}
 173
 174static void kvm_mips_uninit_tlbs(void *arg)
 175{
 176	/* Restore wired count */
 177	write_c0_wired(0);
 178	mtc0_tlbw_hazard();
 179	/* Clear out all the TLBs */
 180	kvm_local_flush_tlb_all();
 181}
 182
 183void kvm_arch_destroy_vm(struct kvm *kvm)
 184{
 185	kvm_mips_free_vcpus(kvm);
 186
 187	/* If this is the last instance, restore wired count */
 188	if (atomic_dec_return(&kvm_mips_instance) == 0) {
 189		kvm_info("%s: last KVM instance, restoring TLB parameters\n",
 190			 __func__);
 191		on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
 192	}
 193}
 194
 195long
 196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 197{
 198	return -ENOIOCTLCMD;
 199}
 200
 201void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 202			   struct kvm_memory_slot *dont)
 203{
 204}
 205
 206int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 207			    unsigned long npages)
 208{
 209	return 0;
 210}
 211
 212void kvm_arch_memslots_updated(struct kvm *kvm)
 213{
 214}
 215
 216int kvm_arch_prepare_memory_region(struct kvm *kvm,
 217                                struct kvm_memory_slot *memslot,
 218                                struct kvm_userspace_memory_region *mem,
 219                                enum kvm_mr_change change)
 220{
 221	return 0;
 222}
 223
 224void kvm_arch_commit_memory_region(struct kvm *kvm,
 225                                struct kvm_userspace_memory_region *mem,
 226                                const struct kvm_memory_slot *old,
 227                                enum kvm_mr_change change)
 228{
 229	unsigned long npages = 0;
 230	int i, err = 0;
 231
 232	kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
 233		  __func__, kvm, mem->slot, mem->guest_phys_addr,
 234		  mem->memory_size, mem->userspace_addr);
 235
 236	/* Setup Guest PMAP table */
 237	if (!kvm->arch.guest_pmap) {
 238		if (mem->slot == 0)
 239			npages = mem->memory_size >> PAGE_SHIFT;
 240
 241		if (npages) {
 242			kvm->arch.guest_pmap_npages = npages;
 243			kvm->arch.guest_pmap =
 244			    kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
 245
 246			if (!kvm->arch.guest_pmap) {
 247				kvm_err("Failed to allocate guest PMAP");
 248				err = -ENOMEM;
 249				goto out;
 250			}
 251
 252			kvm_info
 253			    ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
 254			     npages, kvm->arch.guest_pmap);
 255
 256			/* Now setup the page table */
 257			for (i = 0; i < npages; i++) {
 258				kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
 259			}
 260		}
 261	}
 262out:
 263	return;
 264}
 265
 266void kvm_arch_flush_shadow_all(struct kvm *kvm)
 267{
 268}
 269
 270void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 271				   struct kvm_memory_slot *slot)
 272{
 273}
 274
 275void kvm_arch_flush_shadow(struct kvm *kvm)
 276{
 277}
 278
 279struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 280{
 281	extern char mips32_exception[], mips32_exceptionEnd[];
 282	extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
 283	int err, size, offset;
 284	void *gebase;
 285	int i;
 286
 287	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
 288
 289	if (!vcpu) {
 290		err = -ENOMEM;
 291		goto out;
 292	}
 293
 294	err = kvm_vcpu_init(vcpu, kvm, id);
 295
 296	if (err)
 297		goto out_free_cpu;
 298
 299	kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
 300
 301	/* Allocate space for host mode exception handlers that handle
 302	 * guest mode exits
 303	 */
 304	if (cpu_has_veic || cpu_has_vint) {
 305		size = 0x200 + VECTORSPACING * 64;
 306	} else {
 307		size = 0x200;
 308	}
 309
 310	/* Save Linux EBASE */
 311	vcpu->arch.host_ebase = (void *)read_c0_ebase();
 312
 313	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
 314
 315	if (!gebase) {
 316		err = -ENOMEM;
 317		goto out_free_cpu;
 318	}
 319	kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 320		 ALIGN(size, PAGE_SIZE), gebase);
 321
 322	/* Save new ebase */
 323	vcpu->arch.guest_ebase = gebase;
 324
 325	/* Copy L1 Guest Exception handler to correct offset */
 326
 327	/* TLB Refill, EXL = 0 */
 328	memcpy(gebase, mips32_exception,
 329	       mips32_exceptionEnd - mips32_exception);
 330
 331	/* General Exception Entry point */
 332	memcpy(gebase + 0x180, mips32_exception,
 333	       mips32_exceptionEnd - mips32_exception);
 334
 335	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
 336	for (i = 0; i < 8; i++) {
 337		kvm_debug("L1 Vectored handler @ %p\n",
 338			  gebase + 0x200 + (i * VECTORSPACING));
 339		memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
 340		       mips32_exceptionEnd - mips32_exception);
 341	}
 342
 343	/* General handler, relocate to unmapped space for sanity's sake */
 344	offset = 0x2000;
 345	kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
 346		 gebase + offset,
 347		 mips32_GuestExceptionEnd - mips32_GuestException);
 348
 349	memcpy(gebase + offset, mips32_GuestException,
 350	       mips32_GuestExceptionEnd - mips32_GuestException);
 351
 352	/* Invalidate the icache for these ranges */
 353	mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
 354
 355	/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
 356	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
 357
 358	if (!vcpu->arch.kseg0_commpage) {
 359		err = -ENOMEM;
 360		goto out_free_gebase;
 361	}
 362
 363	kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
 364	kvm_mips_commpage_init(vcpu);
 365
 366	/* Init */
 367	vcpu->arch.last_sched_cpu = -1;
 368
 369	/* Start off the timer */
 370	kvm_mips_emulate_count(vcpu);
 371
 372	return vcpu;
 373
 374out_free_gebase:
 375	kfree(gebase);
 376
 377out_free_cpu:
 378	kfree(vcpu);
 379
 380out:
 381	return ERR_PTR(err);
 382}
 383
 384void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 385{
 386	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 387
 388	kvm_vcpu_uninit(vcpu);
 389
 390	kvm_mips_dump_stats(vcpu);
 391
 392	if (vcpu->arch.guest_ebase)
 393		kfree(vcpu->arch.guest_ebase);
 394
 395	if (vcpu->arch.kseg0_commpage)
 396		kfree(vcpu->arch.kseg0_commpage);
 397
 398}
 399
 400void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 401{
 402	kvm_arch_vcpu_free(vcpu);
 403}
 404
 405int
 406kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 407				    struct kvm_guest_debug *dbg)
 408{
 409	return -ENOIOCTLCMD;
 410}
 411
 412int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 413{
 414	int r = 0;
 415	sigset_t sigsaved;
 416
 417	if (vcpu->sigset_active)
 418		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 419
 420	if (vcpu->mmio_needed) {
 421		if (!vcpu->mmio_is_write)
 422			kvm_mips_complete_mmio_load(vcpu, run);
 423		vcpu->mmio_needed = 0;
 424	}
 425
 426	/* Check if we have any exceptions/interrupts pending */
 427	kvm_mips_deliver_interrupts(vcpu,
 428				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
 429
 430	local_irq_disable();
 431	kvm_guest_enter();
 432
 433	r = __kvm_mips_vcpu_run(run, vcpu);
 434
 435	kvm_guest_exit();
 436	local_irq_enable();
 437
 438	if (vcpu->sigset_active)
 439		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 440
 441	return r;
 442}
 443
 444int
 445kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
 446{
 447	int intr = (int)irq->irq;
 448	struct kvm_vcpu *dvcpu = NULL;
 449
 450	if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
 451		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
 452			  (int)intr);
 453
 454	if (irq->cpu == -1)
 455		dvcpu = vcpu;
 456	else
 457		dvcpu = vcpu->kvm->vcpus[irq->cpu];
 458
 459	if (intr == 2 || intr == 3 || intr == 4) {
 460		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
 461
 462	} else if (intr == -2 || intr == -3 || intr == -4) {
 463		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
 464	} else {
 465		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
 466			irq->cpu, irq->irq);
 467		return -EINVAL;
 468	}
 469
 470	dvcpu->arch.wait = 0;
 471
 472	if (waitqueue_active(&dvcpu->wq)) {
 473		wake_up_interruptible(&dvcpu->wq);
 474	}
 475
 476	return 0;
 477}
 478
 479int
 480kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 481				struct kvm_mp_state *mp_state)
 482{
 483	return -ENOIOCTLCMD;
 484}
 485
 486int
 487kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 488				struct kvm_mp_state *mp_state)
 489{
 490	return -ENOIOCTLCMD;
 491}
 492
 493#define MIPS_CP0_32(_R, _S)					\
 494	(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
 495
 496#define MIPS_CP0_64(_R, _S)					\
 497	(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
 498
 499#define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
 500#define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
 501#define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
 502#define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
 503#define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
 504#define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
 505#define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
 506#define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
 507#define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
 508#define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
 509#define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
 510#define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
 511#define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
 512#define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
 513#define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
 514#define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
 515#define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
 516#define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
 517#define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
 518#define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
 519#define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
 520#define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
 521#define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
 522
 523static u64 kvm_mips_get_one_regs[] = {
 524	KVM_REG_MIPS_R0,
 525	KVM_REG_MIPS_R1,
 526	KVM_REG_MIPS_R2,
 527	KVM_REG_MIPS_R3,
 528	KVM_REG_MIPS_R4,
 529	KVM_REG_MIPS_R5,
 530	KVM_REG_MIPS_R6,
 531	KVM_REG_MIPS_R7,
 532	KVM_REG_MIPS_R8,
 533	KVM_REG_MIPS_R9,
 534	KVM_REG_MIPS_R10,
 535	KVM_REG_MIPS_R11,
 536	KVM_REG_MIPS_R12,
 537	KVM_REG_MIPS_R13,
 538	KVM_REG_MIPS_R14,
 539	KVM_REG_MIPS_R15,
 540	KVM_REG_MIPS_R16,
 541	KVM_REG_MIPS_R17,
 542	KVM_REG_MIPS_R18,
 543	KVM_REG_MIPS_R19,
 544	KVM_REG_MIPS_R20,
 545	KVM_REG_MIPS_R21,
 546	KVM_REG_MIPS_R22,
 547	KVM_REG_MIPS_R23,
 548	KVM_REG_MIPS_R24,
 549	KVM_REG_MIPS_R25,
 550	KVM_REG_MIPS_R26,
 551	KVM_REG_MIPS_R27,
 552	KVM_REG_MIPS_R28,
 553	KVM_REG_MIPS_R29,
 554	KVM_REG_MIPS_R30,
 555	KVM_REG_MIPS_R31,
 556
 557	KVM_REG_MIPS_HI,
 558	KVM_REG_MIPS_LO,
 559	KVM_REG_MIPS_PC,
 560
 561	KVM_REG_MIPS_CP0_INDEX,
 562	KVM_REG_MIPS_CP0_CONTEXT,
 563	KVM_REG_MIPS_CP0_PAGEMASK,
 564	KVM_REG_MIPS_CP0_WIRED,
 565	KVM_REG_MIPS_CP0_BADVADDR,
 566	KVM_REG_MIPS_CP0_ENTRYHI,
 567	KVM_REG_MIPS_CP0_STATUS,
 568	KVM_REG_MIPS_CP0_CAUSE,
 569	/* EPC set via kvm_regs, et al. */
 570	KVM_REG_MIPS_CP0_CONFIG,
 571	KVM_REG_MIPS_CP0_CONFIG1,
 572	KVM_REG_MIPS_CP0_CONFIG2,
 573	KVM_REG_MIPS_CP0_CONFIG3,
 574	KVM_REG_MIPS_CP0_CONFIG7,
 575	KVM_REG_MIPS_CP0_ERROREPC
 576};
 577
 578static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
 579			    const struct kvm_one_reg *reg)
 580{
 581	struct mips_coproc *cop0 = vcpu->arch.cop0;
 582	s64 v;
 583
 584	switch (reg->id) {
 585	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
 586		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
 587		break;
 588	case KVM_REG_MIPS_HI:
 589		v = (long)vcpu->arch.hi;
 590		break;
 591	case KVM_REG_MIPS_LO:
 592		v = (long)vcpu->arch.lo;
 593		break;
 594	case KVM_REG_MIPS_PC:
 595		v = (long)vcpu->arch.pc;
 596		break;
 597
 598	case KVM_REG_MIPS_CP0_INDEX:
 599		v = (long)kvm_read_c0_guest_index(cop0);
 600		break;
 601	case KVM_REG_MIPS_CP0_CONTEXT:
 602		v = (long)kvm_read_c0_guest_context(cop0);
 603		break;
 604	case KVM_REG_MIPS_CP0_PAGEMASK:
 605		v = (long)kvm_read_c0_guest_pagemask(cop0);
 606		break;
 607	case KVM_REG_MIPS_CP0_WIRED:
 608		v = (long)kvm_read_c0_guest_wired(cop0);
 609		break;
 610	case KVM_REG_MIPS_CP0_BADVADDR:
 611		v = (long)kvm_read_c0_guest_badvaddr(cop0);
 612		break;
 613	case KVM_REG_MIPS_CP0_ENTRYHI:
 614		v = (long)kvm_read_c0_guest_entryhi(cop0);
 615		break;
 616	case KVM_REG_MIPS_CP0_STATUS:
 617		v = (long)kvm_read_c0_guest_status(cop0);
 618		break;
 619	case KVM_REG_MIPS_CP0_CAUSE:
 620		v = (long)kvm_read_c0_guest_cause(cop0);
 621		break;
 622	case KVM_REG_MIPS_CP0_ERROREPC:
 623		v = (long)kvm_read_c0_guest_errorepc(cop0);
 624		break;
 625	case KVM_REG_MIPS_CP0_CONFIG:
 626		v = (long)kvm_read_c0_guest_config(cop0);
 627		break;
 628	case KVM_REG_MIPS_CP0_CONFIG1:
 629		v = (long)kvm_read_c0_guest_config1(cop0);
 630		break;
 631	case KVM_REG_MIPS_CP0_CONFIG2:
 632		v = (long)kvm_read_c0_guest_config2(cop0);
 633		break;
 634	case KVM_REG_MIPS_CP0_CONFIG3:
 635		v = (long)kvm_read_c0_guest_config3(cop0);
 636		break;
 637	case KVM_REG_MIPS_CP0_CONFIG7:
 638		v = (long)kvm_read_c0_guest_config7(cop0);
 639		break;
 640	default:
 641		return -EINVAL;
 642	}
 643	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 644		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 645		return put_user(v, uaddr64);
 646	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 647		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 648		u32 v32 = (u32)v;
 649		return put_user(v32, uaddr32);
 650	} else {
 651		return -EINVAL;
 652	}
 653}
 654
 655static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
 656			    const struct kvm_one_reg *reg)
 657{
 658	struct mips_coproc *cop0 = vcpu->arch.cop0;
 659	u64 v;
 660
 661	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 662		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 663
 664		if (get_user(v, uaddr64) != 0)
 665			return -EFAULT;
 666	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 667		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 668		s32 v32;
 669
 670		if (get_user(v32, uaddr32) != 0)
 671			return -EFAULT;
 672		v = (s64)v32;
 673	} else {
 674		return -EINVAL;
 675	}
 676
 677	switch (reg->id) {
 678	case KVM_REG_MIPS_R0:
 679		/* Silently ignore requests to set $0 */
 680		break;
 681	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
 682		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
 683		break;
 684	case KVM_REG_MIPS_HI:
 685		vcpu->arch.hi = v;
 686		break;
 687	case KVM_REG_MIPS_LO:
 688		vcpu->arch.lo = v;
 689		break;
 690	case KVM_REG_MIPS_PC:
 691		vcpu->arch.pc = v;
 692		break;
 693
 694	case KVM_REG_MIPS_CP0_INDEX:
 695		kvm_write_c0_guest_index(cop0, v);
 696		break;
 697	case KVM_REG_MIPS_CP0_CONTEXT:
 698		kvm_write_c0_guest_context(cop0, v);
 699		break;
 700	case KVM_REG_MIPS_CP0_PAGEMASK:
 701		kvm_write_c0_guest_pagemask(cop0, v);
 702		break;
 703	case KVM_REG_MIPS_CP0_WIRED:
 704		kvm_write_c0_guest_wired(cop0, v);
 705		break;
 706	case KVM_REG_MIPS_CP0_BADVADDR:
 707		kvm_write_c0_guest_badvaddr(cop0, v);
 708		break;
 709	case KVM_REG_MIPS_CP0_ENTRYHI:
 710		kvm_write_c0_guest_entryhi(cop0, v);
 711		break;
 712	case KVM_REG_MIPS_CP0_STATUS:
 713		kvm_write_c0_guest_status(cop0, v);
 714		break;
 715	case KVM_REG_MIPS_CP0_CAUSE:
 716		kvm_write_c0_guest_cause(cop0, v);
 717		break;
 718	case KVM_REG_MIPS_CP0_ERROREPC:
 719		kvm_write_c0_guest_errorepc(cop0, v);
 720		break;
 721	default:
 722		return -EINVAL;
 723	}
 724	return 0;
 725}
 726
 727long
 728kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 729{
 730	struct kvm_vcpu *vcpu = filp->private_data;
 731	void __user *argp = (void __user *)arg;
 732	long r;
 733
 734	switch (ioctl) {
 735	case KVM_SET_ONE_REG:
 736	case KVM_GET_ONE_REG: {
 737		struct kvm_one_reg reg;
 738		if (copy_from_user(&reg, argp, sizeof(reg)))
 739			return -EFAULT;
 740		if (ioctl == KVM_SET_ONE_REG)
 741			return kvm_mips_set_reg(vcpu, &reg);
 742		else
 743			return kvm_mips_get_reg(vcpu, &reg);
 744	}
 745	case KVM_GET_REG_LIST: {
 746		struct kvm_reg_list __user *user_list = argp;
 747		u64 __user *reg_dest;
 748		struct kvm_reg_list reg_list;
 749		unsigned n;
 750
 751		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 752			return -EFAULT;
 753		n = reg_list.n;
 754		reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
 755		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 756			return -EFAULT;
 757		if (n < reg_list.n)
 758			return -E2BIG;
 759		reg_dest = user_list->reg;
 760		if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
 761				 sizeof(kvm_mips_get_one_regs)))
 762			return -EFAULT;
 763		return 0;
 764	}
 765	case KVM_NMI:
 766		/* Treat the NMI as a CPU reset */
 767		r = kvm_mips_reset_vcpu(vcpu);
 768		break;
 769	case KVM_INTERRUPT:
 770		{
 771			struct kvm_mips_interrupt irq;
 772			r = -EFAULT;
 773			if (copy_from_user(&irq, argp, sizeof(irq)))
 774				goto out;
 775
 776			kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 777				  irq.irq);
 778
 779			r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 780			break;
 781		}
 782	default:
 783		r = -ENOIOCTLCMD;
 784	}
 785
 786out:
 787	return r;
 788}
 789
 790/*
 791 * Get (and clear) the dirty memory log for a memory slot.
 792 */
 793int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 794{
 795	struct kvm_memory_slot *memslot;
 796	unsigned long ga, ga_end;
 797	int is_dirty = 0;
 798	int r;
 799	unsigned long n;
 800
 801	mutex_lock(&kvm->slots_lock);
 802
 803	r = kvm_get_dirty_log(kvm, log, &is_dirty);
 804	if (r)
 805		goto out;
 806
 807	/* If nothing is dirty, don't bother messing with page tables. */
 808	if (is_dirty) {
 809		memslot = &kvm->memslots->memslots[log->slot];
 810
 811		ga = memslot->base_gfn << PAGE_SHIFT;
 812		ga_end = ga + (memslot->npages << PAGE_SHIFT);
 813
 814		printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
 815		       ga_end);
 816
 817		n = kvm_dirty_bitmap_bytes(memslot);
 818		memset(memslot->dirty_bitmap, 0, n);
 819	}
 820
 821	r = 0;
 822out:
 823	mutex_unlock(&kvm->slots_lock);
 824	return r;
 825
 826}
 827
 828long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 829{
 830	long r;
 831
 832	switch (ioctl) {
 833	default:
 834		r = -ENOIOCTLCMD;
 835	}
 836
 837	return r;
 838}
 839
 840int kvm_arch_init(void *opaque)
 841{
 842	int ret;
 843
 844	if (kvm_mips_callbacks) {
 845		kvm_err("kvm: module already exists\n");
 846		return -EEXIST;
 847	}
 848
 849	ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
 850
 851	return ret;
 852}
 853
 854void kvm_arch_exit(void)
 855{
 856	kvm_mips_callbacks = NULL;
 857}
 858
 859int
 860kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 861{
 862	return -ENOIOCTLCMD;
 863}
 864
 865int
 866kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 867{
 868	return -ENOIOCTLCMD;
 869}
 870
 871int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 872{
 873	return 0;
 874}
 875
 876int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 877{
 878	return -ENOIOCTLCMD;
 879}
 880
 881int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 882{
 883	return -ENOIOCTLCMD;
 884}
 885
 886int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 887{
 888	return VM_FAULT_SIGBUS;
 889}
 890
 891int kvm_dev_ioctl_check_extension(long ext)
 892{
 893	int r;
 894
 895	switch (ext) {
 896	case KVM_CAP_ONE_REG:
 897		r = 1;
 898		break;
 899	case KVM_CAP_COALESCED_MMIO:
 900		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 901		break;
 902	default:
 903		r = 0;
 904		break;
 905	}
 906	return r;
 907}
 908
 909int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 910{
 911	return kvm_mips_pending_timer(vcpu);
 912}
 913
 914int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
 915{
 916	int i;
 917	struct mips_coproc *cop0;
 918
 919	if (!vcpu)
 920		return -1;
 921
 922	printk("VCPU Register Dump:\n");
 923	printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
 924	printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
 925
 926	for (i = 0; i < 32; i += 4) {
 927		printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
 928		       vcpu->arch.gprs[i],
 929		       vcpu->arch.gprs[i + 1],
 930		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
 931	}
 932	printk("\thi: 0x%08lx\n", vcpu->arch.hi);
 933	printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
 934
 935	cop0 = vcpu->arch.cop0;
 936	printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
 937	       kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
 938
 939	printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
 940
 941	return 0;
 942}
 943
 944int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 945{
 946	int i;
 947
 948	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
 949		vcpu->arch.gprs[i] = regs->gpr[i];
 950	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
 951	vcpu->arch.hi = regs->hi;
 952	vcpu->arch.lo = regs->lo;
 953	vcpu->arch.pc = regs->pc;
 954
 955	return 0;
 956}
 957
 958int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 959{
 960	int i;
 961
 962	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
 963		regs->gpr[i] = vcpu->arch.gprs[i];
 964
 965	regs->hi = vcpu->arch.hi;
 966	regs->lo = vcpu->arch.lo;
 967	regs->pc = vcpu->arch.pc;
 968
 969	return 0;
 970}
 971
 972void kvm_mips_comparecount_func(unsigned long data)
 973{
 974	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
 975
 976	kvm_mips_callbacks->queue_timer_int(vcpu);
 977
 978	vcpu->arch.wait = 0;
 979	if (waitqueue_active(&vcpu->wq)) {
 980		wake_up_interruptible(&vcpu->wq);
 981	}
 982}
 983
 984/*
 985 * low level hrtimer wake routine.
 986 */
 987enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
 988{
 989	struct kvm_vcpu *vcpu;
 990
 991	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
 992	kvm_mips_comparecount_func((unsigned long) vcpu);
 993	hrtimer_forward_now(&vcpu->arch.comparecount_timer,
 994			    ktime_set(0, MS_TO_NS(10)));
 995	return HRTIMER_RESTART;
 996}
 997
 998int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 999{
1000	kvm_mips_callbacks->vcpu_init(vcpu);
1001	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1002		     HRTIMER_MODE_REL);
1003	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1004	return 0;
1005}
1006
1007void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1008{
1009	return;
1010}
1011
1012int
1013kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
1014{
1015	return 0;
1016}
1017
1018/* Initial guest state */
1019int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1020{
1021	return kvm_mips_callbacks->vcpu_setup(vcpu);
1022}
1023
1024static
1025void kvm_mips_set_c0_status(void)
1026{
1027	uint32_t status = read_c0_status();
1028
1029	if (cpu_has_fpu)
1030		status |= (ST0_CU1);
1031
1032	if (cpu_has_dsp)
1033		status |= (ST0_MX);
1034
1035	write_c0_status(status);
1036	ehb();
1037}
1038
1039/*
1040 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1041 */
1042int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1043{
1044	uint32_t cause = vcpu->arch.host_cp0_cause;
1045	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1046	uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1047	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1048	enum emulation_result er = EMULATE_DONE;
1049	int ret = RESUME_GUEST;
1050
1051	/* Set a default exit reason */
1052	run->exit_reason = KVM_EXIT_UNKNOWN;
1053	run->ready_for_interrupt_injection = 1;
1054
1055	/* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
1056	kvm_mips_set_c0_status();
1057
1058	local_irq_enable();
1059
1060	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1061			cause, opc, run, vcpu);
1062
1063	/* Do a privilege check, if in UM most of these exit conditions end up
1064	 * causing an exception to be delivered to the Guest Kernel
1065	 */
1066	er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1067	if (er == EMULATE_PRIV_FAIL) {
1068		goto skip_emul;
1069	} else if (er == EMULATE_FAIL) {
1070		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1071		ret = RESUME_HOST;
1072		goto skip_emul;
1073	}
1074
1075	switch (exccode) {
1076	case T_INT:
1077		kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1078
1079		++vcpu->stat.int_exits;
1080		trace_kvm_exit(vcpu, INT_EXITS);
1081
1082		if (need_resched()) {
1083			cond_resched();
1084		}
1085
1086		ret = RESUME_GUEST;
1087		break;
1088
1089	case T_COP_UNUSABLE:
1090		kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1091
1092		++vcpu->stat.cop_unusable_exits;
1093		trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1094		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1095		/* XXXKYMA: Might need to return to user space */
1096		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
1097			ret = RESUME_HOST;
1098		}
1099		break;
1100
1101	case T_TLB_MOD:
1102		++vcpu->stat.tlbmod_exits;
1103		trace_kvm_exit(vcpu, TLBMOD_EXITS);
1104		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1105		break;
1106
1107	case T_TLB_ST_MISS:
1108		kvm_debug
1109		    ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1110		     cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1111		     badvaddr);
1112
1113		++vcpu->stat.tlbmiss_st_exits;
1114		trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1115		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1116		break;
1117
1118	case T_TLB_LD_MISS:
1119		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1120			  cause, opc, badvaddr);
1121
1122		++vcpu->stat.tlbmiss_ld_exits;
1123		trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1124		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1125		break;
1126
1127	case T_ADDR_ERR_ST:
1128		++vcpu->stat.addrerr_st_exits;
1129		trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1130		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1131		break;
1132
1133	case T_ADDR_ERR_LD:
1134		++vcpu->stat.addrerr_ld_exits;
1135		trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1136		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1137		break;
1138
1139	case T_SYSCALL:
1140		++vcpu->stat.syscall_exits;
1141		trace_kvm_exit(vcpu, SYSCALL_EXITS);
1142		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1143		break;
1144
1145	case T_RES_INST:
1146		++vcpu->stat.resvd_inst_exits;
1147		trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1148		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1149		break;
1150
1151	case T_BREAK:
1152		++vcpu->stat.break_inst_exits;
1153		trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1154		ret = kvm_mips_callbacks->handle_break(vcpu);
1155		break;
1156
1157	default:
1158		kvm_err
1159		    ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
1160		     exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1161		     kvm_read_c0_guest_status(vcpu->arch.cop0));
1162		kvm_arch_vcpu_dump_regs(vcpu);
1163		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1164		ret = RESUME_HOST;
1165		break;
1166
1167	}
1168
1169skip_emul:
1170	local_irq_disable();
1171
1172	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1173		kvm_mips_deliver_interrupts(vcpu, cause);
1174
1175	if (!(ret & RESUME_HOST)) {
1176		/* Only check for signals if not already exiting to userspace  */
1177		if (signal_pending(current)) {
1178			run->exit_reason = KVM_EXIT_INTR;
1179			ret = (-EINTR << 2) | RESUME_HOST;
1180			++vcpu->stat.signal_exits;
1181			trace_kvm_exit(vcpu, SIGNAL_EXITS);
1182		}
1183	}
1184
1185	return ret;
1186}
1187
1188int __init kvm_mips_init(void)
1189{
1190	int ret;
1191
1192	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1193
1194	if (ret)
1195		return ret;
1196
1197	/* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
1198	 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
1199	 * to avoid the possibility of double faulting. The issue is that the TLB code
1200	 * references routines that are part of the the KVM module,
1201	 * which are only available once the module is loaded.
1202	 */
1203	kvm_mips_gfn_to_pfn = gfn_to_pfn;
1204	kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1205	kvm_mips_is_error_pfn = is_error_pfn;
1206
1207	pr_info("KVM/MIPS Initialized\n");
1208	return 0;
1209}
1210
1211void __exit kvm_mips_exit(void)
1212{
1213	kvm_exit();
1214
1215	kvm_mips_gfn_to_pfn = NULL;
1216	kvm_mips_release_pfn_clean = NULL;
1217	kvm_mips_is_error_pfn = NULL;
1218
1219	pr_info("KVM/MIPS unloaded\n");
1220}
1221
1222module_init(kvm_mips_init);
1223module_exit(kvm_mips_exit);
1224
1225EXPORT_TRACEPOINT_SYMBOL(kvm_exit);