Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: MIPS specific KVM APIs
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/bitops.h>
  13#include <linux/errno.h>
  14#include <linux/err.h>
  15#include <linux/kdebug.h>
  16#include <linux/module.h>
  17#include <linux/uaccess.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sched/signal.h>
  20#include <linux/fs.h>
  21#include <linux/memblock.h>
  22#include <linux/pgtable.h>
  23
  24#include <asm/fpu.h>
  25#include <asm/page.h>
  26#include <asm/cacheflush.h>
  27#include <asm/mmu_context.h>
  28#include <asm/pgalloc.h>
  29
  30#include <linux/kvm_host.h>
  31
  32#include "interrupt.h"
  33
  34#define CREATE_TRACE_POINTS
  35#include "trace.h"
  36
  37#ifndef VECTORSPACING
  38#define VECTORSPACING 0x100	/* for EI/VI mode */
  39#endif
  40
  41const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  42	KVM_GENERIC_VM_STATS()
  43};
  44
  45const struct kvm_stats_header kvm_vm_stats_header = {
  46	.name_size = KVM_STATS_NAME_SIZE,
  47	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  48	.id_offset = sizeof(struct kvm_stats_header),
  49	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  50	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  51		       sizeof(kvm_vm_stats_desc),
  52};
  53
  54const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  55	KVM_GENERIC_VCPU_STATS(),
  56	STATS_DESC_COUNTER(VCPU, wait_exits),
  57	STATS_DESC_COUNTER(VCPU, cache_exits),
  58	STATS_DESC_COUNTER(VCPU, signal_exits),
  59	STATS_DESC_COUNTER(VCPU, int_exits),
  60	STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
  61	STATS_DESC_COUNTER(VCPU, tlbmod_exits),
  62	STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
  63	STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
  64	STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
  65	STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
  66	STATS_DESC_COUNTER(VCPU, syscall_exits),
  67	STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
  68	STATS_DESC_COUNTER(VCPU, break_inst_exits),
  69	STATS_DESC_COUNTER(VCPU, trap_inst_exits),
  70	STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
  71	STATS_DESC_COUNTER(VCPU, fpe_exits),
  72	STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
  73	STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
  74	STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
  75	STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
  76	STATS_DESC_COUNTER(VCPU, vz_hc_exits),
  77	STATS_DESC_COUNTER(VCPU, vz_grr_exits),
  78	STATS_DESC_COUNTER(VCPU, vz_gva_exits),
  79	STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
  80	STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
  81	STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
  82#ifdef CONFIG_CPU_LOONGSON64
  83	STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
  84#endif
  85};
  86
  87const struct kvm_stats_header kvm_vcpu_stats_header = {
  88	.name_size = KVM_STATS_NAME_SIZE,
  89	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
  90	.id_offset = sizeof(struct kvm_stats_header),
  91	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  92	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  93		       sizeof(kvm_vcpu_stats_desc),
  94};
  95
  96bool kvm_trace_guest_mode_change;
  97
  98int kvm_guest_mode_change_trace_reg(void)
  99{
 100	kvm_trace_guest_mode_change = true;
 101	return 0;
 102}
 103
 104void kvm_guest_mode_change_trace_unreg(void)
 105{
 106	kvm_trace_guest_mode_change = false;
 107}
 108
 109/*
 110 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
 111 * Config7, so we are "runnable" if interrupts are pending
 112 */
 113int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 114{
 115	return !!(vcpu->arch.pending_exceptions);
 116}
 117
 118bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 119{
 120	return false;
 121}
 122
 123int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 124{
 125	return 1;
 126}
 127
 128int kvm_arch_enable_virtualization_cpu(void)
 129{
 130	return kvm_mips_callbacks->enable_virtualization_cpu();
 131}
 132
 133void kvm_arch_disable_virtualization_cpu(void)
 134{
 135	kvm_mips_callbacks->disable_virtualization_cpu();
 136}
 137
 138int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 139{
 140	switch (type) {
 141	case KVM_VM_MIPS_AUTO:
 142		break;
 143	case KVM_VM_MIPS_VZ:
 144		break;
 145	default:
 146		/* Unsupported KVM type */
 147		return -EINVAL;
 148	}
 149
 150	/* Allocate page table to map GPA -> RPA */
 151	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
 152	if (!kvm->arch.gpa_mm.pgd)
 153		return -ENOMEM;
 154
 155#ifdef CONFIG_CPU_LOONGSON64
 156	kvm_init_loongson_ipi(kvm);
 157#endif
 158
 159	return 0;
 160}
 161
 162static void kvm_mips_free_gpa_pt(struct kvm *kvm)
 163{
 164	/* It should always be safe to remove after flushing the whole range */
 165	WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
 166	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
 167}
 168
 169void kvm_arch_destroy_vm(struct kvm *kvm)
 170{
 171	kvm_destroy_vcpus(kvm);
 172	kvm_mips_free_gpa_pt(kvm);
 173}
 174
 175long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
 176			unsigned long arg)
 177{
 178	return -ENOIOCTLCMD;
 179}
 180
 181void kvm_arch_flush_shadow_all(struct kvm *kvm)
 182{
 183	/* Flush whole GPA */
 184	kvm_mips_flush_gpa_pt(kvm, 0, ~0);
 185	kvm_flush_remote_tlbs(kvm);
 186}
 187
 188void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 189				   struct kvm_memory_slot *slot)
 190{
 191	/*
 192	 * The slot has been made invalid (ready for moving or deletion), so we
 193	 * need to ensure that it can no longer be accessed by any guest VCPUs.
 194	 */
 195
 196	spin_lock(&kvm->mmu_lock);
 197	/* Flush slot from GPA */
 198	kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
 199			      slot->base_gfn + slot->npages - 1);
 200	kvm_flush_remote_tlbs_memslot(kvm, slot);
 201	spin_unlock(&kvm->mmu_lock);
 202}
 203
 204int kvm_arch_prepare_memory_region(struct kvm *kvm,
 205				   const struct kvm_memory_slot *old,
 206				   struct kvm_memory_slot *new,
 207				   enum kvm_mr_change change)
 208{
 209	return 0;
 210}
 211
 212void kvm_arch_commit_memory_region(struct kvm *kvm,
 213				   struct kvm_memory_slot *old,
 214				   const struct kvm_memory_slot *new,
 215				   enum kvm_mr_change change)
 216{
 217	int needs_flush;
 218
 219	/*
 220	 * If dirty page logging is enabled, write protect all pages in the slot
 221	 * ready for dirty logging.
 222	 *
 223	 * There is no need to do this in any of the following cases:
 224	 * CREATE:	No dirty mappings will already exist.
 225	 * MOVE/DELETE:	The old mappings will already have been cleaned up by
 226	 *		kvm_arch_flush_shadow_memslot()
 227	 */
 228	if (change == KVM_MR_FLAGS_ONLY &&
 229	    (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
 230	     new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
 231		spin_lock(&kvm->mmu_lock);
 232		/* Write protect GPA page table entries */
 233		needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
 234					new->base_gfn + new->npages - 1);
 235		if (needs_flush)
 236			kvm_flush_remote_tlbs_memslot(kvm, new);
 237		spin_unlock(&kvm->mmu_lock);
 238	}
 239}
 240
 241static inline void dump_handler(const char *symbol, void *start, void *end)
 242{
 243	u32 *p;
 244
 245	pr_debug("LEAF(%s)\n", symbol);
 246
 247	pr_debug("\t.set push\n");
 248	pr_debug("\t.set noreorder\n");
 249
 250	for (p = start; p < (u32 *)end; ++p)
 251		pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
 252
 253	pr_debug("\t.set\tpop\n");
 254
 255	pr_debug("\tEND(%s)\n", symbol);
 256}
 257
 258/* low level hrtimer wake routine */
 259static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
 260{
 261	struct kvm_vcpu *vcpu;
 262
 263	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
 264
 265	kvm_mips_callbacks->queue_timer_int(vcpu);
 266
 267	vcpu->arch.wait = 0;
 268	rcuwait_wake_up(&vcpu->wait);
 269
 270	return kvm_mips_count_timeout(vcpu);
 271}
 272
 273int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 274{
 275	return 0;
 276}
 277
 278int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 279{
 280	int err, size;
 281	void *gebase, *p, *handler, *refill_start, *refill_end;
 282	int i;
 283
 284	kvm_debug("kvm @ %p: create cpu %d at %p\n",
 285		  vcpu->kvm, vcpu->vcpu_id, vcpu);
 286
 287	err = kvm_mips_callbacks->vcpu_init(vcpu);
 288	if (err)
 289		return err;
 290
 291	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
 292		     HRTIMER_MODE_REL);
 293	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
 294
 295	/*
 296	 * Allocate space for host mode exception handlers that handle
 297	 * guest mode exits
 298	 */
 299	if (cpu_has_veic || cpu_has_vint)
 300		size = 0x200 + VECTORSPACING * 64;
 301	else
 302		size = 0x4000;
 303
 304	gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
 305
 306	if (!gebase) {
 307		err = -ENOMEM;
 308		goto out_uninit_vcpu;
 309	}
 310	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 311		  ALIGN(size, PAGE_SIZE), gebase);
 312
 313	/*
 314	 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
 315	 * limits us to the low 512MB of physical address space. If the memory
 316	 * we allocate is out of range, just give up now.
 317	 */
 318	if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
 319		kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
 320			gebase);
 321		err = -ENOMEM;
 322		goto out_free_gebase;
 323	}
 324
 325	/* Save new ebase */
 326	vcpu->arch.guest_ebase = gebase;
 327
 328	/* Build guest exception vectors dynamically in unmapped memory */
 329	handler = gebase + 0x2000;
 330
 331	/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
 332	refill_start = gebase;
 333	if (IS_ENABLED(CONFIG_64BIT))
 334		refill_start += 0x080;
 335	refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
 336
 337	/* General Exception Entry point */
 338	kvm_mips_build_exception(gebase + 0x180, handler);
 339
 340	/* For vectored interrupts poke the exception code @ all offsets 0-7 */
 341	for (i = 0; i < 8; i++) {
 342		kvm_debug("L1 Vectored handler @ %p\n",
 343			  gebase + 0x200 + (i * VECTORSPACING));
 344		kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
 345					 handler);
 346	}
 347
 348	/* General exit handler */
 349	p = handler;
 350	p = kvm_mips_build_exit(p);
 351
 352	/* Guest entry routine */
 353	vcpu->arch.vcpu_run = p;
 354	p = kvm_mips_build_vcpu_run(p);
 355
 356	/* Dump the generated code */
 357	pr_debug("#include <asm/asm.h>\n");
 358	pr_debug("#include <asm/regdef.h>\n");
 359	pr_debug("\n");
 360	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
 361	dump_handler("kvm_tlb_refill", refill_start, refill_end);
 362	dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
 363	dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
 364
 365	/* Invalidate the icache for these ranges */
 366	flush_icache_range((unsigned long)gebase,
 367			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 368
 369	/* Init */
 370	vcpu->arch.last_sched_cpu = -1;
 371	vcpu->arch.last_exec_cpu = -1;
 372
 373	/* Initial guest state */
 374	err = kvm_mips_callbacks->vcpu_setup(vcpu);
 375	if (err)
 376		goto out_free_gebase;
 377
 378	return 0;
 379
 380out_free_gebase:
 381	kfree(gebase);
 382out_uninit_vcpu:
 383	kvm_mips_callbacks->vcpu_uninit(vcpu);
 384	return err;
 385}
 386
 387void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 388{
 389	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 390
 391	kvm_mips_dump_stats(vcpu);
 392
 393	kvm_mmu_free_memory_caches(vcpu);
 394	kfree(vcpu->arch.guest_ebase);
 395
 396	kvm_mips_callbacks->vcpu_uninit(vcpu);
 397}
 398
 399int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 400					struct kvm_guest_debug *dbg)
 401{
 402	return -ENOIOCTLCMD;
 403}
 404
 405/*
 406 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
 407 * the vCPU is running.
 408 *
 409 * This must be noinstr as instrumentation may make use of RCU, and this is not
 410 * safe during the EQS.
 411 */
 412static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 413{
 414	int ret;
 415
 416	guest_state_enter_irqoff();
 417	ret = kvm_mips_callbacks->vcpu_run(vcpu);
 418	guest_state_exit_irqoff();
 419
 420	return ret;
 421}
 422
 423int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 424{
 425	int r = -EINTR;
 426
 427	vcpu_load(vcpu);
 428
 429	kvm_sigset_activate(vcpu);
 430
 431	if (vcpu->mmio_needed) {
 432		if (!vcpu->mmio_is_write)
 433			kvm_mips_complete_mmio_load(vcpu);
 434		vcpu->mmio_needed = 0;
 435	}
 436
 437	if (!vcpu->wants_to_run)
 438		goto out;
 439
 440	lose_fpu(1);
 441
 442	local_irq_disable();
 443	guest_timing_enter_irqoff();
 444	trace_kvm_enter(vcpu);
 445
 446	/*
 447	 * Make sure the read of VCPU requests in vcpu_run() callback is not
 448	 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
 449	 * flush request while the requester sees the VCPU as outside of guest
 450	 * mode and not needing an IPI.
 451	 */
 452	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 453
 454	r = kvm_mips_vcpu_enter_exit(vcpu);
 455
 456	/*
 457	 * We must ensure that any pending interrupts are taken before
 458	 * we exit guest timing so that timer ticks are accounted as
 459	 * guest time. Transiently unmask interrupts so that any
 460	 * pending interrupts are taken.
 461	 *
 462	 * TODO: is there a barrier which ensures that pending interrupts are
 463	 * recognised? Currently this just hopes that the CPU takes any pending
 464	 * interrupts between the enable and disable.
 465	 */
 466	local_irq_enable();
 467	local_irq_disable();
 468
 469	trace_kvm_out(vcpu);
 470	guest_timing_exit_irqoff();
 471	local_irq_enable();
 472
 473out:
 474	kvm_sigset_deactivate(vcpu);
 475
 476	vcpu_put(vcpu);
 477	return r;
 478}
 479
 480int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 481			     struct kvm_mips_interrupt *irq)
 482{
 483	int intr = (int)irq->irq;
 484	struct kvm_vcpu *dvcpu = NULL;
 485
 486	if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
 487	    intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
 488	    intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
 489	    intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
 490		kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
 491			  (int)intr);
 492
 493	if (irq->cpu == -1)
 494		dvcpu = vcpu;
 495	else
 496		dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
 497
 498	if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
 499		kvm_mips_callbacks->queue_io_int(dvcpu, irq);
 500
 501	} else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
 502		kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
 503	} else {
 504		kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
 505			irq->cpu, irq->irq);
 506		return -EINVAL;
 507	}
 508
 509	dvcpu->arch.wait = 0;
 510
 511	rcuwait_wake_up(&dvcpu->wait);
 512
 513	return 0;
 514}
 515
 516int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 517				    struct kvm_mp_state *mp_state)
 518{
 519	return -ENOIOCTLCMD;
 520}
 521
 522int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 523				    struct kvm_mp_state *mp_state)
 524{
 525	return -ENOIOCTLCMD;
 526}
 527
 528static u64 kvm_mips_get_one_regs[] = {
 529	KVM_REG_MIPS_R0,
 530	KVM_REG_MIPS_R1,
 531	KVM_REG_MIPS_R2,
 532	KVM_REG_MIPS_R3,
 533	KVM_REG_MIPS_R4,
 534	KVM_REG_MIPS_R5,
 535	KVM_REG_MIPS_R6,
 536	KVM_REG_MIPS_R7,
 537	KVM_REG_MIPS_R8,
 538	KVM_REG_MIPS_R9,
 539	KVM_REG_MIPS_R10,
 540	KVM_REG_MIPS_R11,
 541	KVM_REG_MIPS_R12,
 542	KVM_REG_MIPS_R13,
 543	KVM_REG_MIPS_R14,
 544	KVM_REG_MIPS_R15,
 545	KVM_REG_MIPS_R16,
 546	KVM_REG_MIPS_R17,
 547	KVM_REG_MIPS_R18,
 548	KVM_REG_MIPS_R19,
 549	KVM_REG_MIPS_R20,
 550	KVM_REG_MIPS_R21,
 551	KVM_REG_MIPS_R22,
 552	KVM_REG_MIPS_R23,
 553	KVM_REG_MIPS_R24,
 554	KVM_REG_MIPS_R25,
 555	KVM_REG_MIPS_R26,
 556	KVM_REG_MIPS_R27,
 557	KVM_REG_MIPS_R28,
 558	KVM_REG_MIPS_R29,
 559	KVM_REG_MIPS_R30,
 560	KVM_REG_MIPS_R31,
 561
 562#ifndef CONFIG_CPU_MIPSR6
 563	KVM_REG_MIPS_HI,
 564	KVM_REG_MIPS_LO,
 565#endif
 566	KVM_REG_MIPS_PC,
 567};
 568
 569static u64 kvm_mips_get_one_regs_fpu[] = {
 570	KVM_REG_MIPS_FCR_IR,
 571	KVM_REG_MIPS_FCR_CSR,
 572};
 573
 574static u64 kvm_mips_get_one_regs_msa[] = {
 575	KVM_REG_MIPS_MSA_IR,
 576	KVM_REG_MIPS_MSA_CSR,
 577};
 578
 579static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
 580{
 581	unsigned long ret;
 582
 583	ret = ARRAY_SIZE(kvm_mips_get_one_regs);
 584	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 585		ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
 586		/* odd doubles */
 587		if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
 588			ret += 16;
 589	}
 590	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 591		ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
 592	ret += kvm_mips_callbacks->num_regs(vcpu);
 593
 594	return ret;
 595}
 596
 597static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
 598{
 599	u64 index;
 600	unsigned int i;
 601
 602	if (copy_to_user(indices, kvm_mips_get_one_regs,
 603			 sizeof(kvm_mips_get_one_regs)))
 604		return -EFAULT;
 605	indices += ARRAY_SIZE(kvm_mips_get_one_regs);
 606
 607	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 608		if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
 609				 sizeof(kvm_mips_get_one_regs_fpu)))
 610			return -EFAULT;
 611		indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
 612
 613		for (i = 0; i < 32; ++i) {
 614			index = KVM_REG_MIPS_FPR_32(i);
 615			if (copy_to_user(indices, &index, sizeof(index)))
 616				return -EFAULT;
 617			++indices;
 618
 619			/* skip odd doubles if no F64 */
 620			if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
 621				continue;
 622
 623			index = KVM_REG_MIPS_FPR_64(i);
 624			if (copy_to_user(indices, &index, sizeof(index)))
 625				return -EFAULT;
 626			++indices;
 627		}
 628	}
 629
 630	if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
 631		if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
 632				 sizeof(kvm_mips_get_one_regs_msa)))
 633			return -EFAULT;
 634		indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
 635
 636		for (i = 0; i < 32; ++i) {
 637			index = KVM_REG_MIPS_VEC_128(i);
 638			if (copy_to_user(indices, &index, sizeof(index)))
 639				return -EFAULT;
 640			++indices;
 641		}
 642	}
 643
 644	return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
 645}
 646
 647static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
 648			    const struct kvm_one_reg *reg)
 649{
 650	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 651	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 652	int ret;
 653	s64 v;
 654	s64 vs[2];
 655	unsigned int idx;
 656
 657	switch (reg->id) {
 658	/* General purpose registers */
 659	case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
 660		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
 661		break;
 662#ifndef CONFIG_CPU_MIPSR6
 663	case KVM_REG_MIPS_HI:
 664		v = (long)vcpu->arch.hi;
 665		break;
 666	case KVM_REG_MIPS_LO:
 667		v = (long)vcpu->arch.lo;
 668		break;
 669#endif
 670	case KVM_REG_MIPS_PC:
 671		v = (long)vcpu->arch.pc;
 672		break;
 673
 674	/* Floating point registers */
 675	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 676		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 677			return -EINVAL;
 678		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 679		/* Odd singles in top of even double when FR=0 */
 680		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 681			v = get_fpr32(&fpu->fpr[idx], 0);
 682		else
 683			v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
 684		break;
 685	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 686		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 687			return -EINVAL;
 688		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 689		/* Can't access odd doubles in FR=0 mode */
 690		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 691			return -EINVAL;
 692		v = get_fpr64(&fpu->fpr[idx], 0);
 693		break;
 694	case KVM_REG_MIPS_FCR_IR:
 695		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 696			return -EINVAL;
 697		v = boot_cpu_data.fpu_id;
 698		break;
 699	case KVM_REG_MIPS_FCR_CSR:
 700		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 701			return -EINVAL;
 702		v = fpu->fcr31;
 703		break;
 704
 705	/* MIPS SIMD Architecture (MSA) registers */
 706	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 707		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 708			return -EINVAL;
 709		/* Can't access MSA registers in FR=0 mode */
 710		if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
 711			return -EINVAL;
 712		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 713#ifdef CONFIG_CPU_LITTLE_ENDIAN
 714		/* least significant byte first */
 715		vs[0] = get_fpr64(&fpu->fpr[idx], 0);
 716		vs[1] = get_fpr64(&fpu->fpr[idx], 1);
 717#else
 718		/* most significant byte first */
 719		vs[0] = get_fpr64(&fpu->fpr[idx], 1);
 720		vs[1] = get_fpr64(&fpu->fpr[idx], 0);
 721#endif
 722		break;
 723	case KVM_REG_MIPS_MSA_IR:
 724		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 725			return -EINVAL;
 726		v = boot_cpu_data.msa_id;
 727		break;
 728	case KVM_REG_MIPS_MSA_CSR:
 729		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 730			return -EINVAL;
 731		v = fpu->msacsr;
 732		break;
 733
 734	/* registers to be handled specially */
 735	default:
 736		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
 737		if (ret)
 738			return ret;
 739		break;
 740	}
 741	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 742		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 743
 744		return put_user(v, uaddr64);
 745	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 746		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 747		u32 v32 = (u32)v;
 748
 749		return put_user(v32, uaddr32);
 750	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 751		void __user *uaddr = (void __user *)(long)reg->addr;
 752
 753		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
 754	} else {
 755		return -EINVAL;
 756	}
 757}
 758
 759static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
 760			    const struct kvm_one_reg *reg)
 761{
 762	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 763	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 764	s64 v;
 765	s64 vs[2];
 766	unsigned int idx;
 767
 768	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 769		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 770
 771		if (get_user(v, uaddr64) != 0)
 772			return -EFAULT;
 773	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 774		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 775		s32 v32;
 776
 777		if (get_user(v32, uaddr32) != 0)
 778			return -EFAULT;
 779		v = (s64)v32;
 780	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 781		void __user *uaddr = (void __user *)(long)reg->addr;
 782
 783		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
 784	} else {
 785		return -EINVAL;
 786	}
 787
 788	switch (reg->id) {
 789	/* General purpose registers */
 790	case KVM_REG_MIPS_R0:
 791		/* Silently ignore requests to set $0 */
 792		break;
 793	case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
 794		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
 795		break;
 796#ifndef CONFIG_CPU_MIPSR6
 797	case KVM_REG_MIPS_HI:
 798		vcpu->arch.hi = v;
 799		break;
 800	case KVM_REG_MIPS_LO:
 801		vcpu->arch.lo = v;
 802		break;
 803#endif
 804	case KVM_REG_MIPS_PC:
 805		vcpu->arch.pc = v;
 806		break;
 807
 808	/* Floating point registers */
 809	case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 810		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 811			return -EINVAL;
 812		idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 813		/* Odd singles in top of even double when FR=0 */
 814		if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 815			set_fpr32(&fpu->fpr[idx], 0, v);
 816		else
 817			set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
 818		break;
 819	case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 820		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 821			return -EINVAL;
 822		idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 823		/* Can't access odd doubles in FR=0 mode */
 824		if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 825			return -EINVAL;
 826		set_fpr64(&fpu->fpr[idx], 0, v);
 827		break;
 828	case KVM_REG_MIPS_FCR_IR:
 829		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 830			return -EINVAL;
 831		/* Read-only */
 832		break;
 833	case KVM_REG_MIPS_FCR_CSR:
 834		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 835			return -EINVAL;
 836		fpu->fcr31 = v;
 837		break;
 838
 839	/* MIPS SIMD Architecture (MSA) registers */
 840	case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 841		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 842			return -EINVAL;
 843		idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 844#ifdef CONFIG_CPU_LITTLE_ENDIAN
 845		/* least significant byte first */
 846		set_fpr64(&fpu->fpr[idx], 0, vs[0]);
 847		set_fpr64(&fpu->fpr[idx], 1, vs[1]);
 848#else
 849		/* most significant byte first */
 850		set_fpr64(&fpu->fpr[idx], 1, vs[0]);
 851		set_fpr64(&fpu->fpr[idx], 0, vs[1]);
 852#endif
 853		break;
 854	case KVM_REG_MIPS_MSA_IR:
 855		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 856			return -EINVAL;
 857		/* Read-only */
 858		break;
 859	case KVM_REG_MIPS_MSA_CSR:
 860		if (!kvm_mips_guest_has_msa(&vcpu->arch))
 861			return -EINVAL;
 862		fpu->msacsr = v;
 863		break;
 864
 865	/* registers to be handled specially */
 866	default:
 867		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
 868	}
 869	return 0;
 870}
 871
 872static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 873				     struct kvm_enable_cap *cap)
 874{
 875	int r = 0;
 876
 877	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
 878		return -EINVAL;
 879	if (cap->flags)
 880		return -EINVAL;
 881	if (cap->args[0])
 882		return -EINVAL;
 883
 884	switch (cap->cap) {
 885	case KVM_CAP_MIPS_FPU:
 886		vcpu->arch.fpu_enabled = true;
 887		break;
 888	case KVM_CAP_MIPS_MSA:
 889		vcpu->arch.msa_enabled = true;
 890		break;
 891	default:
 892		r = -EINVAL;
 893		break;
 894	}
 895
 896	return r;
 897}
 898
 899long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
 900			       unsigned long arg)
 901{
 902	struct kvm_vcpu *vcpu = filp->private_data;
 903	void __user *argp = (void __user *)arg;
 904
 905	if (ioctl == KVM_INTERRUPT) {
 906		struct kvm_mips_interrupt irq;
 907
 908		if (copy_from_user(&irq, argp, sizeof(irq)))
 909			return -EFAULT;
 910		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 911			  irq.irq);
 912
 913		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 914	}
 915
 916	return -ENOIOCTLCMD;
 917}
 918
 919long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
 920			 unsigned long arg)
 921{
 922	struct kvm_vcpu *vcpu = filp->private_data;
 923	void __user *argp = (void __user *)arg;
 924	long r;
 925
 926	vcpu_load(vcpu);
 927
 928	switch (ioctl) {
 929	case KVM_SET_ONE_REG:
 930	case KVM_GET_ONE_REG: {
 931		struct kvm_one_reg reg;
 932
 933		r = -EFAULT;
 934		if (copy_from_user(&reg, argp, sizeof(reg)))
 935			break;
 936		if (ioctl == KVM_SET_ONE_REG)
 937			r = kvm_mips_set_reg(vcpu, &reg);
 938		else
 939			r = kvm_mips_get_reg(vcpu, &reg);
 940		break;
 941	}
 942	case KVM_GET_REG_LIST: {
 943		struct kvm_reg_list __user *user_list = argp;
 944		struct kvm_reg_list reg_list;
 945		unsigned n;
 946
 947		r = -EFAULT;
 948		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 949			break;
 950		n = reg_list.n;
 951		reg_list.n = kvm_mips_num_regs(vcpu);
 952		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 953			break;
 954		r = -E2BIG;
 955		if (n < reg_list.n)
 956			break;
 957		r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
 958		break;
 959	}
 960	case KVM_ENABLE_CAP: {
 961		struct kvm_enable_cap cap;
 962
 963		r = -EFAULT;
 964		if (copy_from_user(&cap, argp, sizeof(cap)))
 965			break;
 966		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
 967		break;
 968	}
 969	default:
 970		r = -ENOIOCTLCMD;
 971	}
 972
 973	vcpu_put(vcpu);
 974	return r;
 975}
 976
 977void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 978{
 979
 980}
 981
 982int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
 983{
 984	kvm_mips_callbacks->prepare_flush_shadow(kvm);
 985	return 1;
 986}
 987
 988int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
 989{
 990	int r;
 991
 992	switch (ioctl) {
 993	default:
 994		r = -ENOIOCTLCMD;
 995	}
 996
 997	return r;
 998}
 999
1000int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1001				  struct kvm_sregs *sregs)
1002{
1003	return -ENOIOCTLCMD;
1004}
1005
1006int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1007				  struct kvm_sregs *sregs)
1008{
1009	return -ENOIOCTLCMD;
1010}
1011
1012void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1013{
1014}
1015
1016int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1017{
1018	return -ENOIOCTLCMD;
1019}
1020
1021int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1022{
1023	return -ENOIOCTLCMD;
1024}
1025
1026vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1027{
1028	return VM_FAULT_SIGBUS;
1029}
1030
1031int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1032{
1033	int r;
1034
1035	switch (ext) {
1036	case KVM_CAP_ONE_REG:
1037	case KVM_CAP_ENABLE_CAP:
1038	case KVM_CAP_READONLY_MEM:
1039	case KVM_CAP_SYNC_MMU:
1040	case KVM_CAP_IMMEDIATE_EXIT:
1041		r = 1;
1042		break;
1043	case KVM_CAP_NR_VCPUS:
1044		r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
1045		break;
1046	case KVM_CAP_MAX_VCPUS:
1047		r = KVM_MAX_VCPUS;
1048		break;
1049	case KVM_CAP_MAX_VCPU_ID:
1050		r = KVM_MAX_VCPU_IDS;
1051		break;
1052	case KVM_CAP_MIPS_FPU:
1053		/* We don't handle systems with inconsistent cpu_has_fpu */
1054		r = !!raw_cpu_has_fpu;
1055		break;
1056	case KVM_CAP_MIPS_MSA:
1057		/*
1058		 * We don't support MSA vector partitioning yet:
1059		 * 1) It would require explicit support which can't be tested
1060		 *    yet due to lack of support in current hardware.
1061		 * 2) It extends the state that would need to be saved/restored
1062		 *    by e.g. QEMU for migration.
1063		 *
1064		 * When vector partitioning hardware becomes available, support
1065		 * could be added by requiring a flag when enabling
1066		 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1067		 * to save/restore the appropriate extra state.
1068		 */
1069		r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1070		break;
1071	default:
1072		r = kvm_mips_callbacks->check_extension(kvm, ext);
1073		break;
1074	}
1075	return r;
1076}
1077
1078int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1079{
1080	return kvm_mips_pending_timer(vcpu) ||
1081		kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
1082}
1083
1084int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1085{
1086	int i;
1087	struct mips_coproc *cop0;
1088
1089	if (!vcpu)
1090		return -1;
1091
1092	kvm_debug("VCPU Register Dump:\n");
1093	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1094	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1095
1096	for (i = 0; i < 32; i += 4) {
1097		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1098		       vcpu->arch.gprs[i],
1099		       vcpu->arch.gprs[i + 1],
1100		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1101	}
1102	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1103	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1104
1105	cop0 = &vcpu->arch.cop0;
1106	kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1107		  kvm_read_c0_guest_status(cop0),
1108		  kvm_read_c0_guest_cause(cop0));
1109
1110	kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1111
1112	return 0;
1113}
1114
1115int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1116{
1117	int i;
1118
1119	vcpu_load(vcpu);
1120
1121	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1122		vcpu->arch.gprs[i] = regs->gpr[i];
1123	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1124	vcpu->arch.hi = regs->hi;
1125	vcpu->arch.lo = regs->lo;
1126	vcpu->arch.pc = regs->pc;
1127
1128	vcpu_put(vcpu);
1129	return 0;
1130}
1131
1132int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1133{
1134	int i;
1135
1136	vcpu_load(vcpu);
1137
1138	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1139		regs->gpr[i] = vcpu->arch.gprs[i];
1140
1141	regs->hi = vcpu->arch.hi;
1142	regs->lo = vcpu->arch.lo;
1143	regs->pc = vcpu->arch.pc;
1144
1145	vcpu_put(vcpu);
1146	return 0;
1147}
1148
1149int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1150				  struct kvm_translation *tr)
1151{
1152	return 0;
1153}
1154
1155static void kvm_mips_set_c0_status(void)
1156{
1157	u32 status = read_c0_status();
1158
1159	if (cpu_has_dsp)
1160		status |= (ST0_MX);
1161
1162	write_c0_status(status);
1163	ehb();
1164}
1165
1166/*
1167 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1168 */
1169static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1170{
1171	struct kvm_run *run = vcpu->run;
1172	u32 cause = vcpu->arch.host_cp0_cause;
1173	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1174	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1175	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1176	enum emulation_result er = EMULATE_DONE;
1177	u32 inst;
1178	int ret = RESUME_GUEST;
1179
1180	vcpu->mode = OUTSIDE_GUEST_MODE;
1181
1182	/* Set a default exit reason */
1183	run->exit_reason = KVM_EXIT_UNKNOWN;
1184	run->ready_for_interrupt_injection = 1;
1185
1186	/*
1187	 * Set the appropriate status bits based on host CPU features,
1188	 * before we hit the scheduler
1189	 */
1190	kvm_mips_set_c0_status();
1191
1192	local_irq_enable();
1193
1194	kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1195			cause, opc, run, vcpu);
1196	trace_kvm_exit(vcpu, exccode);
1197
1198	switch (exccode) {
1199	case EXCCODE_INT:
1200		kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1201
1202		++vcpu->stat.int_exits;
1203
1204		if (need_resched())
1205			cond_resched();
1206
1207		ret = RESUME_GUEST;
1208		break;
1209
1210	case EXCCODE_CPU:
1211		kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1212
1213		++vcpu->stat.cop_unusable_exits;
1214		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1215		/* XXXKYMA: Might need to return to user space */
1216		if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1217			ret = RESUME_HOST;
1218		break;
1219
1220	case EXCCODE_MOD:
1221		++vcpu->stat.tlbmod_exits;
1222		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1223		break;
1224
1225	case EXCCODE_TLBS:
1226		kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1227			  cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
1228			  badvaddr);
1229
1230		++vcpu->stat.tlbmiss_st_exits;
1231		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1232		break;
1233
1234	case EXCCODE_TLBL:
1235		kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1236			  cause, opc, badvaddr);
1237
1238		++vcpu->stat.tlbmiss_ld_exits;
1239		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1240		break;
1241
1242	case EXCCODE_ADES:
1243		++vcpu->stat.addrerr_st_exits;
1244		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1245		break;
1246
1247	case EXCCODE_ADEL:
1248		++vcpu->stat.addrerr_ld_exits;
1249		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1250		break;
1251
1252	case EXCCODE_SYS:
1253		++vcpu->stat.syscall_exits;
1254		ret = kvm_mips_callbacks->handle_syscall(vcpu);
1255		break;
1256
1257	case EXCCODE_RI:
1258		++vcpu->stat.resvd_inst_exits;
1259		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1260		break;
1261
1262	case EXCCODE_BP:
1263		++vcpu->stat.break_inst_exits;
1264		ret = kvm_mips_callbacks->handle_break(vcpu);
1265		break;
1266
1267	case EXCCODE_TR:
1268		++vcpu->stat.trap_inst_exits;
1269		ret = kvm_mips_callbacks->handle_trap(vcpu);
1270		break;
1271
1272	case EXCCODE_MSAFPE:
1273		++vcpu->stat.msa_fpe_exits;
1274		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1275		break;
1276
1277	case EXCCODE_FPE:
1278		++vcpu->stat.fpe_exits;
1279		ret = kvm_mips_callbacks->handle_fpe(vcpu);
1280		break;
1281
1282	case EXCCODE_MSADIS:
1283		++vcpu->stat.msa_disabled_exits;
1284		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1285		break;
1286
1287	case EXCCODE_GE:
1288		/* defer exit accounting to handler */
1289		ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1290		break;
1291
1292	default:
1293		if (cause & CAUSEF_BD)
1294			opc += 1;
1295		inst = 0;
1296		kvm_get_badinstr(opc, vcpu, &inst);
1297		kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
1298			exccode, opc, inst, badvaddr,
1299			kvm_read_c0_guest_status(&vcpu->arch.cop0));
1300		kvm_arch_vcpu_dump_regs(vcpu);
1301		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1302		ret = RESUME_HOST;
1303		break;
1304
1305	}
1306
1307	local_irq_disable();
1308
1309	if (ret == RESUME_GUEST)
1310		kvm_vz_acquire_htimer(vcpu);
1311
1312	if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1313		kvm_mips_deliver_interrupts(vcpu, cause);
1314
1315	if (!(ret & RESUME_HOST)) {
1316		/* Only check for signals if not already exiting to userspace */
1317		if (signal_pending(current)) {
1318			run->exit_reason = KVM_EXIT_INTR;
1319			ret = (-EINTR << 2) | RESUME_HOST;
1320			++vcpu->stat.signal_exits;
1321			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1322		}
1323	}
1324
1325	if (ret == RESUME_GUEST) {
1326		trace_kvm_reenter(vcpu);
1327
1328		/*
1329		 * Make sure the read of VCPU requests in vcpu_reenter()
1330		 * callback is not reordered ahead of the write to vcpu->mode,
1331		 * or we could miss a TLB flush request while the requester sees
1332		 * the VCPU as outside of guest mode and not needing an IPI.
1333		 */
1334		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1335
1336		kvm_mips_callbacks->vcpu_reenter(vcpu);
1337
1338		/*
1339		 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1340		 * is live), restore FCR31 / MSACSR.
1341		 *
1342		 * This should be before returning to the guest exception
1343		 * vector, as it may well cause an [MSA] FP exception if there
1344		 * are pending exception bits unmasked. (see
1345		 * kvm_mips_csr_die_notifier() for how that is handled).
1346		 */
1347		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1348		    read_c0_status() & ST0_CU1)
1349			__kvm_restore_fcsr(&vcpu->arch);
1350
1351		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1352		    read_c0_config5() & MIPS_CONF5_MSAEN)
1353			__kvm_restore_msacsr(&vcpu->arch);
1354	}
1355	return ret;
1356}
1357
1358int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1359{
1360	int ret;
1361
1362	guest_state_exit_irqoff();
1363	ret = __kvm_mips_handle_exit(vcpu);
1364	guest_state_enter_irqoff();
1365
1366	return ret;
1367}
1368
1369/* Enable FPU for guest and restore context */
1370void kvm_own_fpu(struct kvm_vcpu *vcpu)
1371{
1372	struct mips_coproc *cop0 = &vcpu->arch.cop0;
1373	unsigned int sr, cfg5;
1374
1375	preempt_disable();
1376
1377	sr = kvm_read_c0_guest_status(cop0);
1378
1379	/*
1380	 * If MSA state is already live, it is undefined how it interacts with
1381	 * FR=0 FPU state, and we don't want to hit reserved instruction
1382	 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1383	 * play it safe and save it first.
1384	 */
1385	if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1386	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1387		kvm_lose_fpu(vcpu);
1388
1389	/*
1390	 * Enable FPU for guest
1391	 * We set FR and FRE according to guest context
1392	 */
1393	change_c0_status(ST0_CU1 | ST0_FR, sr);
1394	if (cpu_has_fre) {
1395		cfg5 = kvm_read_c0_guest_config5(cop0);
1396		change_c0_config5(MIPS_CONF5_FRE, cfg5);
1397	}
1398	enable_fpu_hazard();
1399
1400	/* If guest FPU state not active, restore it now */
1401	if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1402		__kvm_restore_fpu(&vcpu->arch);
1403		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1404		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1405	} else {
1406		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1407	}
1408
1409	preempt_enable();
1410}
1411
1412#ifdef CONFIG_CPU_HAS_MSA
1413/* Enable MSA for guest and restore context */
1414void kvm_own_msa(struct kvm_vcpu *vcpu)
1415{
1416	struct mips_coproc *cop0 = &vcpu->arch.cop0;
1417	unsigned int sr, cfg5;
1418
1419	preempt_disable();
1420
1421	/*
1422	 * Enable FPU if enabled in guest, since we're restoring FPU context
1423	 * anyway. We set FR and FRE according to guest context.
1424	 */
1425	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1426		sr = kvm_read_c0_guest_status(cop0);
1427
1428		/*
1429		 * If FR=0 FPU state is already live, it is undefined how it
1430		 * interacts with MSA state, so play it safe and save it first.
1431		 */
1432		if (!(sr & ST0_FR) &&
1433		    (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1434				KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1435			kvm_lose_fpu(vcpu);
1436
1437		change_c0_status(ST0_CU1 | ST0_FR, sr);
1438		if (sr & ST0_CU1 && cpu_has_fre) {
1439			cfg5 = kvm_read_c0_guest_config5(cop0);
1440			change_c0_config5(MIPS_CONF5_FRE, cfg5);
1441		}
1442	}
1443
1444	/* Enable MSA for guest */
1445	set_c0_config5(MIPS_CONF5_MSAEN);
1446	enable_fpu_hazard();
1447
1448	switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1449	case KVM_MIPS_AUX_FPU:
1450		/*
1451		 * Guest FPU state already loaded, only restore upper MSA state
1452		 */
1453		__kvm_restore_msa_upper(&vcpu->arch);
1454		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1455		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1456		break;
1457	case 0:
1458		/* Neither FPU or MSA already active, restore full MSA state */
1459		__kvm_restore_msa(&vcpu->arch);
1460		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1461		if (kvm_mips_guest_has_fpu(&vcpu->arch))
1462			vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1463		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1464			      KVM_TRACE_AUX_FPU_MSA);
1465		break;
1466	default:
1467		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1468		break;
1469	}
1470
1471	preempt_enable();
1472}
1473#endif
1474
1475/* Drop FPU & MSA without saving it */
1476void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1477{
1478	preempt_disable();
1479	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1480		disable_msa();
1481		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1482		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1483	}
1484	if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1485		clear_c0_status(ST0_CU1 | ST0_FR);
1486		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1487		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1488	}
1489	preempt_enable();
1490}
1491
1492/* Save and disable FPU & MSA */
1493void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1494{
1495	/*
1496	 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1497	 * is disabled in guest context (software), but the register state in
1498	 * the hardware may still be in use.
1499	 * This is why we explicitly re-enable the hardware before saving.
1500	 */
1501
1502	preempt_disable();
1503	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1504		__kvm_save_msa(&vcpu->arch);
1505		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1506
1507		/* Disable MSA & FPU */
1508		disable_msa();
1509		if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1510			clear_c0_status(ST0_CU1 | ST0_FR);
1511			disable_fpu_hazard();
1512		}
1513		vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1514	} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1515		__kvm_save_fpu(&vcpu->arch);
1516		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1517		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1518
1519		/* Disable FPU */
1520		clear_c0_status(ST0_CU1 | ST0_FR);
1521		disable_fpu_hazard();
1522	}
1523	preempt_enable();
1524}
1525
1526/*
1527 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1528 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1529 * exception if cause bits are set in the value being written.
1530 */
1531static int kvm_mips_csr_die_notify(struct notifier_block *self,
1532				   unsigned long cmd, void *ptr)
1533{
1534	struct die_args *args = (struct die_args *)ptr;
1535	struct pt_regs *regs = args->regs;
1536	unsigned long pc;
1537
1538	/* Only interested in FPE and MSAFPE */
1539	if (cmd != DIE_FP && cmd != DIE_MSAFP)
1540		return NOTIFY_DONE;
1541
1542	/* Return immediately if guest context isn't active */
1543	if (!(current->flags & PF_VCPU))
1544		return NOTIFY_DONE;
1545
1546	/* Should never get here from user mode */
1547	BUG_ON(user_mode(regs));
1548
1549	pc = instruction_pointer(regs);
1550	switch (cmd) {
1551	case DIE_FP:
1552		/* match 2nd instruction in __kvm_restore_fcsr */
1553		if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1554			return NOTIFY_DONE;
1555		break;
1556	case DIE_MSAFP:
1557		/* match 2nd/3rd instruction in __kvm_restore_msacsr */
1558		if (!cpu_has_msa ||
1559		    pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1560		    pc > (unsigned long)&__kvm_restore_msacsr + 8)
1561			return NOTIFY_DONE;
1562		break;
1563	}
1564
1565	/* Move PC forward a little and continue executing */
1566	instruction_pointer(regs) += 4;
1567
1568	return NOTIFY_STOP;
1569}
1570
1571static struct notifier_block kvm_mips_csr_die_notifier = {
1572	.notifier_call = kvm_mips_csr_die_notify,
1573};
1574
1575static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1576	[MIPS_EXC_INT_TIMER] = C_IRQ5,
1577	[MIPS_EXC_INT_IO_1]  = C_IRQ0,
1578	[MIPS_EXC_INT_IPI_1] = C_IRQ1,
1579	[MIPS_EXC_INT_IPI_2] = C_IRQ2,
1580};
1581
1582static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1583	[MIPS_EXC_INT_TIMER] = C_IRQ5,
1584	[MIPS_EXC_INT_IO_1]  = C_IRQ0,
1585	[MIPS_EXC_INT_IO_2]  = C_IRQ1,
1586	[MIPS_EXC_INT_IPI_1] = C_IRQ4,
1587};
1588
1589u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1590
1591u32 kvm_irq_to_priority(u32 irq)
1592{
1593	int i;
1594
1595	for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1596		if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1597			return i;
1598	}
1599
1600	return MIPS_EXC_MAX;
1601}
1602
1603static int __init kvm_mips_init(void)
1604{
1605	int ret;
1606
1607	if (cpu_has_mmid) {
1608		pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1609		return -EOPNOTSUPP;
1610	}
1611
1612	ret = kvm_mips_entry_setup();
1613	if (ret)
1614		return ret;
1615
1616	ret = kvm_mips_emulation_init();
1617	if (ret)
1618		return ret;
1619
1620
1621	if (boot_cpu_type() == CPU_LOONGSON64)
1622		kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1623
1624	register_die_notifier(&kvm_mips_csr_die_notifier);
1625
1626	ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1627	if (ret) {
1628		unregister_die_notifier(&kvm_mips_csr_die_notifier);
1629		return ret;
1630	}
1631	return 0;
1632}
1633
1634static void __exit kvm_mips_exit(void)
1635{
1636	kvm_exit();
1637
1638	unregister_die_notifier(&kvm_mips_csr_die_notifier);
1639}
1640
1641module_init(kvm_mips_init);
1642module_exit(kvm_mips_exit);
1643
1644EXPORT_TRACEPOINT_SYMBOL(kvm_exit);