Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Instruction/Exception emulation
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/ktime.h>
  15#include <linux/kvm_host.h>
 
  16#include <linux/vmalloc.h>
  17#include <linux/fs.h>
  18#include <linux/memblock.h>
  19#include <linux/random.h>
  20#include <asm/page.h>
  21#include <asm/cacheflush.h>
  22#include <asm/cacheops.h>
  23#include <asm/cpu-info.h>
  24#include <asm/mmu_context.h>
  25#include <asm/tlbflush.h>
  26#include <asm/inst.h>
  27
  28#undef CONFIG_MIPS_MT
  29#include <asm/r4kcache.h>
  30#define CONFIG_MIPS_MT
  31
  32#include "interrupt.h"
 
  33
  34#include "trace.h"
  35
  36/*
  37 * Compute the return address and do emulate branch simulation, if required.
  38 * This function should be called only in branch delay slot active.
  39 */
  40static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
  41				  unsigned long *out)
  42{
  43	unsigned int dspcontrol;
  44	union mips_instruction insn;
  45	struct kvm_vcpu_arch *arch = &vcpu->arch;
  46	long epc = instpc;
  47	long nextpc;
  48	int err;
  49
  50	if (epc & 3) {
  51		kvm_err("%s: unaligned epc\n", __func__);
  52		return -EINVAL;
  53	}
  54
  55	/* Read the instruction */
  56	err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
  57	if (err)
  58		return err;
 
  59
  60	switch (insn.i_format.opcode) {
  61		/* jr and jalr are in r_format format. */
  62	case spec_op:
  63		switch (insn.r_format.func) {
  64		case jalr_op:
  65			arch->gprs[insn.r_format.rd] = epc + 8;
  66			fallthrough;
  67		case jr_op:
  68			nextpc = arch->gprs[insn.r_format.rs];
  69			break;
  70		default:
  71			return -EINVAL;
  72		}
  73		break;
  74
  75		/*
  76		 * This group contains:
  77		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
  78		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  79		 */
  80	case bcond_op:
  81		switch (insn.i_format.rt) {
  82		case bltz_op:
  83		case bltzl_op:
  84			if ((long)arch->gprs[insn.i_format.rs] < 0)
  85				epc = epc + 4 + (insn.i_format.simmediate << 2);
  86			else
  87				epc += 8;
  88			nextpc = epc;
  89			break;
  90
  91		case bgez_op:
  92		case bgezl_op:
  93			if ((long)arch->gprs[insn.i_format.rs] >= 0)
  94				epc = epc + 4 + (insn.i_format.simmediate << 2);
  95			else
  96				epc += 8;
  97			nextpc = epc;
  98			break;
  99
 100		case bltzal_op:
 101		case bltzall_op:
 102			arch->gprs[31] = epc + 8;
 103			if ((long)arch->gprs[insn.i_format.rs] < 0)
 104				epc = epc + 4 + (insn.i_format.simmediate << 2);
 105			else
 106				epc += 8;
 107			nextpc = epc;
 108			break;
 109
 110		case bgezal_op:
 111		case bgezall_op:
 112			arch->gprs[31] = epc + 8;
 113			if ((long)arch->gprs[insn.i_format.rs] >= 0)
 114				epc = epc + 4 + (insn.i_format.simmediate << 2);
 115			else
 116				epc += 8;
 117			nextpc = epc;
 118			break;
 119		case bposge32_op:
 120			if (!cpu_has_dsp) {
 121				kvm_err("%s: DSP branch but not DSP ASE\n",
 122					__func__);
 123				return -EINVAL;
 124			}
 125
 126			dspcontrol = rddsp(0x01);
 127
 128			if (dspcontrol >= 32)
 129				epc = epc + 4 + (insn.i_format.simmediate << 2);
 130			else
 131				epc += 8;
 132			nextpc = epc;
 133			break;
 134		default:
 135			return -EINVAL;
 136		}
 137		break;
 138
 139		/* These are unconditional and in j_format. */
 140	case jal_op:
 141		arch->gprs[31] = instpc + 8;
 142		fallthrough;
 143	case j_op:
 144		epc += 4;
 145		epc >>= 28;
 146		epc <<= 28;
 147		epc |= (insn.j_format.target << 2);
 148		nextpc = epc;
 149		break;
 150
 151		/* These are conditional and in i_format. */
 152	case beq_op:
 153	case beql_op:
 154		if (arch->gprs[insn.i_format.rs] ==
 155		    arch->gprs[insn.i_format.rt])
 156			epc = epc + 4 + (insn.i_format.simmediate << 2);
 157		else
 158			epc += 8;
 159		nextpc = epc;
 160		break;
 161
 162	case bne_op:
 163	case bnel_op:
 164		if (arch->gprs[insn.i_format.rs] !=
 165		    arch->gprs[insn.i_format.rt])
 166			epc = epc + 4 + (insn.i_format.simmediate << 2);
 167		else
 168			epc += 8;
 169		nextpc = epc;
 170		break;
 171
 172	case blez_op:	/* POP06 */
 173#ifndef CONFIG_CPU_MIPSR6
 174	case blezl_op:	/* removed in R6 */
 175#endif
 176		if (insn.i_format.rt != 0)
 177			goto compact_branch;
 178		if ((long)arch->gprs[insn.i_format.rs] <= 0)
 179			epc = epc + 4 + (insn.i_format.simmediate << 2);
 180		else
 181			epc += 8;
 182		nextpc = epc;
 183		break;
 184
 185	case bgtz_op:	/* POP07 */
 186#ifndef CONFIG_CPU_MIPSR6
 187	case bgtzl_op:	/* removed in R6 */
 188#endif
 189		if (insn.i_format.rt != 0)
 190			goto compact_branch;
 191		if ((long)arch->gprs[insn.i_format.rs] > 0)
 192			epc = epc + 4 + (insn.i_format.simmediate << 2);
 193		else
 194			epc += 8;
 195		nextpc = epc;
 196		break;
 197
 198		/* And now the FPA/cp1 branch instructions. */
 199	case cop1_op:
 200		kvm_err("%s: unsupported cop1_op\n", __func__);
 201		return -EINVAL;
 202
 203#ifdef CONFIG_CPU_MIPSR6
 204	/* R6 added the following compact branches with forbidden slots */
 205	case blezl_op:	/* POP26 */
 206	case bgtzl_op:	/* POP27 */
 207		/* only rt == 0 isn't compact branch */
 208		if (insn.i_format.rt != 0)
 209			goto compact_branch;
 210		return -EINVAL;
 211	case pop10_op:
 212	case pop30_op:
 213		/* only rs == rt == 0 is reserved, rest are compact branches */
 214		if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
 215			goto compact_branch;
 216		return -EINVAL;
 217	case pop66_op:
 218	case pop76_op:
 219		/* only rs == 0 isn't compact branch */
 220		if (insn.i_format.rs != 0)
 221			goto compact_branch;
 222		return -EINVAL;
 223compact_branch:
 224		/*
 225		 * If we've hit an exception on the forbidden slot, then
 226		 * the branch must not have been taken.
 227		 */
 228		epc += 8;
 229		nextpc = epc;
 230		break;
 231#else
 232compact_branch:
 233		/* Fall through - Compact branches not supported before R6 */
 234#endif
 235	default:
 236		return -EINVAL;
 237	}
 238
 239	*out = nextpc;
 240	return 0;
 
 
 
 
 
 
 
 241}
 242
 243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
 244{
 245	int err;
 
 246
 247	if (cause & CAUSEF_BD) {
 248		err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
 249					     &vcpu->arch.pc);
 250		if (err)
 251			return EMULATE_FAIL;
 252	} else {
 
 
 
 
 253		vcpu->arch.pc += 4;
 254	}
 255
 256	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 257
 258	return EMULATE_DONE;
 259}
 260
 261/**
 262 * kvm_get_badinstr() - Get bad instruction encoding.
 263 * @opc:	Guest pointer to faulting instruction.
 264 * @vcpu:	KVM VCPU information.
 265 *
 266 * Gets the instruction encoding of the faulting instruction, using the saved
 267 * BadInstr register value if it exists, otherwise falling back to reading guest
 268 * memory at @opc.
 269 *
 270 * Returns:	The instruction encoding of the faulting instruction.
 271 */
 272int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
 273{
 274	if (cpu_has_badinstr) {
 275		*out = vcpu->arch.host_cp0_badinstr;
 276		return 0;
 277	} else {
 278		WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
 279		return -EINVAL;
 280	}
 281}
 282
 283/**
 284 * kvm_get_badinstrp() - Get bad prior instruction encoding.
 285 * @opc:	Guest pointer to prior faulting instruction.
 286 * @vcpu:	KVM VCPU information.
 287 *
 288 * Gets the instruction encoding of the prior faulting instruction (the branch
 289 * containing the delay slot which faulted), using the saved BadInstrP register
 290 * value if it exists, otherwise falling back to reading guest memory at @opc.
 291 *
 292 * Returns:	The instruction encoding of the prior faulting instruction.
 293 */
 294int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
 295{
 296	if (cpu_has_badinstrp) {
 297		*out = vcpu->arch.host_cp0_badinstrp;
 298		return 0;
 299	} else {
 300		WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
 301		return -EINVAL;
 302	}
 303}
 304
 305/**
 306 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
 307 * @vcpu:	Virtual CPU.
 308 *
 309 * Returns:	1 if the CP0_Count timer is disabled by either the guest
 310 *		CP0_Cause.DC bit or the count_ctl.DC bit.
 311 *		0 otherwise (in which case CP0_Count timer is running).
 312 */
 313int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
 314{
 315	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 316
 317	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
 318		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
 319}
 320
 321/**
 322 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
 323 *
 324 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
 325 *
 326 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 327 */
 328static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
 329{
 330	s64 now_ns, periods;
 331	u64 delta;
 332
 333	now_ns = ktime_to_ns(now);
 334	delta = now_ns + vcpu->arch.count_dyn_bias;
 335
 336	if (delta >= vcpu->arch.count_period) {
 337		/* If delta is out of safe range the bias needs adjusting */
 338		periods = div64_s64(now_ns, vcpu->arch.count_period);
 339		vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
 340		/* Recalculate delta with new bias */
 341		delta = now_ns + vcpu->arch.count_dyn_bias;
 342	}
 343
 344	/*
 345	 * We've ensured that:
 346	 *   delta < count_period
 347	 *
 348	 * Therefore the intermediate delta*count_hz will never overflow since
 349	 * at the boundary condition:
 350	 *   delta = count_period
 351	 *   delta = NSEC_PER_SEC * 2^32 / count_hz
 352	 *   delta * count_hz = NSEC_PER_SEC * 2^32
 353	 */
 354	return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
 355}
 356
 357/**
 358 * kvm_mips_count_time() - Get effective current time.
 359 * @vcpu:	Virtual CPU.
 360 *
 361 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
 362 * except when the master disable bit is set in count_ctl, in which case it is
 363 * count_resume, i.e. the time that the count was disabled.
 364 *
 365 * Returns:	Effective monotonic ktime for CP0_Count.
 366 */
 367static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
 368{
 369	if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 370		return vcpu->arch.count_resume;
 371
 372	return ktime_get();
 373}
 374
 375/**
 376 * kvm_mips_read_count_running() - Read the current count value as if running.
 377 * @vcpu:	Virtual CPU.
 378 * @now:	Kernel time to read CP0_Count at.
 379 *
 380 * Returns the current guest CP0_Count register at time @now and handles if the
 381 * timer interrupt is pending and hasn't been handled yet.
 382 *
 383 * Returns:	The current value of the guest CP0_Count register.
 384 */
 385static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 386{
 387	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 388	ktime_t expires, threshold;
 389	u32 count, compare;
 390	int running;
 391
 392	/* Calculate the biased and scaled guest CP0_Count */
 393	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 394	compare = kvm_read_c0_guest_compare(cop0);
 395
 396	/*
 397	 * Find whether CP0_Count has reached the closest timer interrupt. If
 398	 * not, we shouldn't inject it.
 399	 */
 400	if ((s32)(count - compare) < 0)
 401		return count;
 402
 403	/*
 404	 * The CP0_Count we're going to return has already reached the closest
 405	 * timer interrupt. Quickly check if it really is a new interrupt by
 406	 * looking at whether the interval until the hrtimer expiry time is
 407	 * less than 1/4 of the timer period.
 408	 */
 409	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
 410	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
 411	if (ktime_before(expires, threshold)) {
 412		/*
 413		 * Cancel it while we handle it so there's no chance of
 414		 * interference with the timeout handler.
 415		 */
 416		running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
 417
 418		/* Nothing should be waiting on the timeout */
 419		kvm_mips_callbacks->queue_timer_int(vcpu);
 420
 421		/*
 422		 * Restart the timer if it was running based on the expiry time
 423		 * we read, so that we don't push it back 2 periods.
 424		 */
 425		if (running) {
 426			expires = ktime_add_ns(expires,
 427					       vcpu->arch.count_period);
 428			hrtimer_start(&vcpu->arch.comparecount_timer, expires,
 429				      HRTIMER_MODE_ABS);
 430		}
 431	}
 432
 433	return count;
 
 434}
 435
 436/**
 437 * kvm_mips_read_count() - Read the current count value.
 438 * @vcpu:	Virtual CPU.
 439 *
 440 * Read the current guest CP0_Count value, taking into account whether the timer
 441 * is stopped.
 442 *
 443 * Returns:	The current guest CP0_Count value.
 444 */
 445u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
 446{
 447	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 448
 449	/* If count disabled just read static copy of count */
 450	if (kvm_mips_count_disabled(vcpu))
 451		return kvm_read_c0_guest_count(cop0);
 452
 453	return kvm_mips_read_count_running(vcpu, ktime_get());
 454}
 455
 456/**
 457 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
 458 * @vcpu:	Virtual CPU.
 459 * @count:	Output pointer for CP0_Count value at point of freeze.
 460 *
 461 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
 462 * at the point it was frozen. It is guaranteed that any pending interrupts at
 463 * the point it was frozen are handled, and none after that point.
 464 *
 465 * This is useful where the time/CP0_Count is needed in the calculation of the
 466 * new parameters.
 467 *
 468 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 469 *
 470 * Returns:	The ktime at the point of freeze.
 471 */
 472ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
 
 473{
 474	ktime_t now;
 475
 476	/* stop hrtimer before finding time */
 477	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 478	now = ktime_get();
 479
 480	/* find count at this point and handle pending hrtimer */
 481	*count = kvm_mips_read_count_running(vcpu, now);
 482
 483	return now;
 484}
 485
 486/**
 487 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
 488 * @vcpu:	Virtual CPU.
 489 * @now:	ktime at point of resume.
 490 * @count:	CP0_Count at point of resume.
 491 *
 492 * Resumes the timer and updates the timer expiry based on @now and @count.
 493 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
 494 * parameters need to be changed.
 495 *
 496 * It is guaranteed that a timer interrupt immediately after resume will be
 497 * handled, but not if CP_Compare is exactly at @count. That case is already
 498 * handled by kvm_mips_freeze_timer().
 499 *
 500 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 501 */
 502static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
 503				    ktime_t now, u32 count)
 504{
 505	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 506	u32 compare;
 507	u64 delta;
 508	ktime_t expire;
 509
 510	/* Calculate timeout (wrap 0 to 2^32) */
 511	compare = kvm_read_c0_guest_compare(cop0);
 512	delta = (u64)(u32)(compare - count - 1) + 1;
 513	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 514	expire = ktime_add_ns(now, delta);
 515
 516	/* Update hrtimer to use new timeout */
 517	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 518	hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
 519}
 520
 521/**
 522 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
 523 * @vcpu:	Virtual CPU.
 524 * @before:	Time before Count was saved, lower bound of drift calculation.
 525 * @count:	CP0_Count at point of restore.
 526 * @min_drift:	Minimum amount of drift permitted before correction.
 527 *		Must be <= 0.
 528 *
 529 * Restores the timer from a particular @count, accounting for drift. This can
 530 * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
 531 * to be used for a period of time, but the exact ktime corresponding to the
 532 * final Count that must be restored is not known.
 533 *
 534 * It is guaranteed that a timer interrupt immediately after restore will be
 535 * handled, but not if CP0_Compare is exactly at @count. That case should
 536 * already be handled when the hardware timer state is saved.
 537 *
 538 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
 539 * stopped).
 540 *
 541 * Returns:	Amount of correction to count_bias due to drift.
 
 
 
 
 
 
 
 542 */
 543int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
 544			     u32 count, int min_drift)
 545{
 546	ktime_t now, count_time;
 547	u32 now_count, before_count;
 548	u64 delta;
 549	int drift, ret = 0;
 550
 551	/* Calculate expected count at before */
 552	before_count = vcpu->arch.count_bias +
 553			kvm_mips_ktime_to_count(vcpu, before);
 554
 555	/*
 556	 * Detect significantly negative drift, where count is lower than
 557	 * expected. Some negative drift is expected when hardware counter is
 558	 * set after kvm_mips_freeze_timer(), and it is harmless to allow the
 559	 * time to jump forwards a little, within reason. If the drift is too
 560	 * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
 561	 */
 562	drift = count - before_count;
 563	if (drift < min_drift) {
 564		count_time = before;
 565		vcpu->arch.count_bias += drift;
 566		ret = drift;
 567		goto resume;
 568	}
 569
 570	/* Calculate expected count right now */
 571	now = ktime_get();
 572	now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 573
 574	/*
 575	 * Detect positive drift, where count is higher than expected, and
 576	 * adjust the bias to avoid guest time going backwards.
 577	 */
 578	drift = count - now_count;
 579	if (drift > 0) {
 580		count_time = now;
 581		vcpu->arch.count_bias += drift;
 582		ret = drift;
 583		goto resume;
 584	}
 585
 586	/* Subtract nanosecond delta to find ktime when count was read */
 587	delta = (u64)(u32)(now_count - count);
 588	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 589	count_time = ktime_sub_ns(now, delta);
 590
 591resume:
 592	/* Resume using the calculated ktime */
 593	kvm_mips_resume_hrtimer(vcpu, count_time, count);
 594	return ret;
 595}
 596
 597/**
 598 * kvm_mips_write_count() - Modify the count and update timer.
 599 * @vcpu:	Virtual CPU.
 600 * @count:	Guest CP0_Count value to set.
 601 *
 602 * Sets the CP0_Count value and updates the timer accordingly.
 603 */
 604void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
 605{
 606	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 607	ktime_t now;
 608
 609	/* Calculate bias */
 610	now = kvm_mips_count_time(vcpu);
 611	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 612
 613	if (kvm_mips_count_disabled(vcpu))
 614		/* The timer's disabled, adjust the static count */
 615		kvm_write_c0_guest_count(cop0, count);
 616	else
 617		/* Update timeout */
 618		kvm_mips_resume_hrtimer(vcpu, now, count);
 619}
 620
 621/**
 622 * kvm_mips_init_count() - Initialise timer.
 623 * @vcpu:	Virtual CPU.
 624 * @count_hz:	Frequency of timer.
 625 *
 626 * Initialise the timer to the specified frequency, zero it, and set it going if
 627 * it's enabled.
 628 */
 629void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
 630{
 631	vcpu->arch.count_hz = count_hz;
 632	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 
 
 633	vcpu->arch.count_dyn_bias = 0;
 634
 635	/* Starting at 0 */
 636	kvm_mips_write_count(vcpu, 0);
 637}
 638
 639/**
 640 * kvm_mips_set_count_hz() - Update the frequency of the timer.
 641 * @vcpu:	Virtual CPU.
 642 * @count_hz:	Frequency of CP0_Count timer in Hz.
 643 *
 644 * Change the frequency of the CP0_Count timer. This is done atomically so that
 645 * CP0_Count is continuous and no timer interrupt is lost.
 646 *
 647 * Returns:	-EINVAL if @count_hz is out of range.
 648 *		0 on success.
 649 */
 650int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
 651{
 652	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 653	int dc;
 654	ktime_t now;
 655	u32 count;
 656
 657	/* ensure the frequency is in a sensible range... */
 658	if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
 659		return -EINVAL;
 660	/* ... and has actually changed */
 661	if (vcpu->arch.count_hz == count_hz)
 662		return 0;
 663
 664	/* Safely freeze timer so we can keep it continuous */
 665	dc = kvm_mips_count_disabled(vcpu);
 666	if (dc) {
 667		now = kvm_mips_count_time(vcpu);
 668		count = kvm_read_c0_guest_count(cop0);
 669	} else {
 670		now = kvm_mips_freeze_hrtimer(vcpu, &count);
 671	}
 672
 673	/* Update the frequency */
 674	vcpu->arch.count_hz = count_hz;
 675	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 676	vcpu->arch.count_dyn_bias = 0;
 677
 678	/* Calculate adjusted bias so dynamic count is unchanged */
 679	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 680
 681	/* Update and resume hrtimer */
 682	if (!dc)
 683		kvm_mips_resume_hrtimer(vcpu, now, count);
 684	return 0;
 685}
 686
 687/**
 688 * kvm_mips_write_compare() - Modify compare and update timer.
 689 * @vcpu:	Virtual CPU.
 690 * @compare:	New CP0_Compare value.
 691 * @ack:	Whether to acknowledge timer interrupt.
 692 *
 693 * Update CP0_Compare to a new value and update the timeout.
 694 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
 695 * any pending timer interrupt is preserved.
 696 */
 697void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
 698{
 699	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 700	int dc;
 701	u32 old_compare = kvm_read_c0_guest_compare(cop0);
 702	s32 delta = compare - old_compare;
 703	u32 cause;
 704	ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
 705	u32 count;
 706
 707	/* if unchanged, must just be an ack */
 708	if (old_compare == compare) {
 709		if (!ack)
 710			return;
 711		kvm_mips_callbacks->dequeue_timer_int(vcpu);
 712		kvm_write_c0_guest_compare(cop0, compare);
 713		return;
 714	}
 715
 716	/*
 717	 * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
 718	 * too to prevent guest CP0_Count hitting guest CP0_Compare.
 719	 *
 720	 * The new GTOffset corresponds to the new value of CP0_Compare, and is
 721	 * set prior to it being written into the guest context. We disable
 722	 * preemption until the new value is written to prevent restore of a
 723	 * GTOffset corresponding to the old CP0_Compare value.
 724	 */
 725	if (delta > 0) {
 726		preempt_disable();
 727		write_c0_gtoffset(compare - read_c0_count());
 728		back_to_back_c0_hazard();
 729	}
 730
 731	/* freeze_hrtimer() takes care of timer interrupts <= count */
 732	dc = kvm_mips_count_disabled(vcpu);
 733	if (!dc)
 734		now = kvm_mips_freeze_hrtimer(vcpu, &count);
 735
 736	if (ack)
 737		kvm_mips_callbacks->dequeue_timer_int(vcpu);
 738	else
 739		/*
 740		 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
 741		 * preserve guest CP0_Cause.TI if we don't want to ack it.
 742		 */
 743		cause = kvm_read_c0_guest_cause(cop0);
 744
 
 745	kvm_write_c0_guest_compare(cop0, compare);
 746
 747	if (delta > 0)
 748		preempt_enable();
 749
 750	back_to_back_c0_hazard();
 751
 752	if (!ack && cause & CAUSEF_TI)
 753		kvm_write_c0_guest_cause(cop0, cause);
 754
 755	/* resume_hrtimer() takes care of timer interrupts > count */
 756	if (!dc)
 757		kvm_mips_resume_hrtimer(vcpu, now, count);
 758
 759	/*
 760	 * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
 761	 * until after the new CP0_Compare is written, otherwise new guest
 762	 * CP0_Count could hit new guest CP0_Compare.
 763	 */
 764	if (delta <= 0)
 765		write_c0_gtoffset(compare - read_c0_count());
 766}
 767
 768/**
 769 * kvm_mips_count_disable() - Disable count.
 770 * @vcpu:	Virtual CPU.
 771 *
 772 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
 773 * time will be handled but not after.
 774 *
 775 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
 776 * count_ctl.DC has been set (count disabled).
 777 *
 778 * Returns:	The time that the timer was stopped.
 779 */
 780static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
 781{
 782	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 783	u32 count;
 784	ktime_t now;
 785
 786	/* Stop hrtimer */
 787	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 788
 789	/* Set the static count from the dynamic count, handling pending TI */
 790	now = ktime_get();
 791	count = kvm_mips_read_count_running(vcpu, now);
 792	kvm_write_c0_guest_count(cop0, count);
 793
 794	return now;
 795}
 796
 797/**
 798 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
 799 * @vcpu:	Virtual CPU.
 800 *
 801 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
 802 * before the final stop time will be handled if the timer isn't disabled by
 803 * count_ctl.DC, but not after.
 804 *
 805 * Assumes CP0_Cause.DC is clear (count enabled).
 806 */
 807void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
 808{
 809	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 810
 811	kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
 812	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 813		kvm_mips_count_disable(vcpu);
 814}
 815
 816/**
 817 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
 818 * @vcpu:	Virtual CPU.
 819 *
 820 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
 821 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
 822 * potentially before even returning, so the caller should be careful with
 823 * ordering of CP0_Cause modifications so as not to lose it.
 824 *
 825 * Assumes CP0_Cause.DC is set (count disabled).
 826 */
 827void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
 828{
 829	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 830	u32 count;
 831
 832	kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
 833
 834	/*
 835	 * Set the dynamic count to match the static count.
 836	 * This starts the hrtimer if count_ctl.DC allows it.
 837	 * Otherwise it conveniently updates the biases.
 838	 */
 839	count = kvm_read_c0_guest_count(cop0);
 840	kvm_mips_write_count(vcpu, count);
 841}
 842
 843/**
 844 * kvm_mips_set_count_ctl() - Update the count control KVM register.
 845 * @vcpu:	Virtual CPU.
 846 * @count_ctl:	Count control register new value.
 847 *
 848 * Set the count control KVM register. The timer is updated accordingly.
 849 *
 850 * Returns:	-EINVAL if reserved bits are set.
 851 *		0 on success.
 852 */
 853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
 854{
 855	struct mips_coproc *cop0 = &vcpu->arch.cop0;
 856	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
 857	s64 delta;
 858	ktime_t expire, now;
 859	u32 count, compare;
 860
 861	/* Only allow defined bits to be changed */
 862	if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
 863		return -EINVAL;
 864
 865	/* Apply new value */
 866	vcpu->arch.count_ctl = count_ctl;
 867
 868	/* Master CP0_Count disable */
 869	if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
 870		/* Is CP0_Cause.DC already disabling CP0_Count? */
 871		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
 872			if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
 873				/* Just record the current time */
 874				vcpu->arch.count_resume = ktime_get();
 875		} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
 876			/* disable timer and record current time */
 877			vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
 878		} else {
 879			/*
 880			 * Calculate timeout relative to static count at resume
 881			 * time (wrap 0 to 2^32).
 882			 */
 883			count = kvm_read_c0_guest_count(cop0);
 884			compare = kvm_read_c0_guest_compare(cop0);
 885			delta = (u64)(u32)(compare - count - 1) + 1;
 886			delta = div_u64(delta * NSEC_PER_SEC,
 887					vcpu->arch.count_hz);
 888			expire = ktime_add_ns(vcpu->arch.count_resume, delta);
 889
 890			/* Handle pending interrupt */
 891			now = ktime_get();
 892			if (ktime_compare(now, expire) >= 0)
 893				/* Nothing should be waiting on the timeout */
 894				kvm_mips_callbacks->queue_timer_int(vcpu);
 895
 896			/* Resume hrtimer without changing bias */
 897			count = kvm_mips_read_count_running(vcpu, now);
 898			kvm_mips_resume_hrtimer(vcpu, now, count);
 899		}
 900	}
 901
 902	return 0;
 903}
 904
 905/**
 906 * kvm_mips_set_count_resume() - Update the count resume KVM register.
 907 * @vcpu:		Virtual CPU.
 908 * @count_resume:	Count resume register new value.
 909 *
 910 * Set the count resume KVM register.
 911 *
 912 * Returns:	-EINVAL if out of valid range (0..now).
 913 *		0 on success.
 914 */
 915int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
 916{
 917	/*
 918	 * It doesn't make sense for the resume time to be in the future, as it
 919	 * would be possible for the next interrupt to be more than a full
 920	 * period in the future.
 921	 */
 922	if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
 923		return -EINVAL;
 924
 925	vcpu->arch.count_resume = ns_to_ktime(count_resume);
 926	return 0;
 927}
 928
 929/**
 930 * kvm_mips_count_timeout() - Push timer forward on timeout.
 931 * @vcpu:	Virtual CPU.
 932 *
 933 * Handle an hrtimer event by push the hrtimer forward a period.
 934 *
 935 * Returns:	The hrtimer_restart value to return to the hrtimer subsystem.
 936 */
 937enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
 938{
 939	/* Add the Count period to the current expiry time */
 940	hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
 941			       vcpu->arch.count_period);
 942	return HRTIMER_RESTART;
 943}
 944
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 946{
 947	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 948		  vcpu->arch.pending_exceptions);
 949
 950	++vcpu->stat.wait_exits;
 951	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
 952	if (!vcpu->arch.pending_exceptions) {
 953		kvm_vz_lose_htimer(vcpu);
 954		vcpu->arch.wait = 1;
 955		kvm_vcpu_halt(vcpu);
 956
 957		/*
 958		 * We are runnable, then definitely go off to user space to
 959		 * check if any I/O interrupts are pending.
 960		 */
 961		if (kvm_arch_vcpu_runnable(vcpu))
 
 962			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 
 963	}
 964
 965	return EMULATE_DONE;
 966}
 967
 968enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
 969					     u32 cause,
 970					     struct kvm_vcpu *vcpu)
 
 
 971{
 972	int r;
 973	enum emulation_result er;
 974	u32 rt;
 975	struct kvm_run *run = vcpu->run;
 976	void *data = run->mmio.data;
 977	unsigned int imme;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978	unsigned long curr_pc;
 979
 980	/*
 981	 * Update PC and hold onto current PC in case there is
 982	 * an error and we want to rollback the PC
 983	 */
 984	curr_pc = vcpu->arch.pc;
 985	er = update_pc(vcpu, cause);
 986	if (er == EMULATE_FAIL)
 987		return er;
 988
 989	rt = inst.i_format.rt;
 
 
 
 
 990
 991	run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
 992						vcpu->arch.host_cp0_badvaddr);
 993	if (run->mmio.phys_addr == KVM_INVALID_ADDR)
 994		goto out_fail;
 995
 996	switch (inst.i_format.opcode) {
 997#if defined(CONFIG_64BIT)
 998	case sd_op:
 999		run->mmio.len = 8;
1000		*(u64 *)data = vcpu->arch.gprs[rt];
1001
1002		kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1003			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1004			  vcpu->arch.gprs[rt], *(u64 *)data);
1005		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006#endif
 
 
1007
1008	case sw_op:
1009		run->mmio.len = 4;
1010		*(u32 *)data = vcpu->arch.gprs[rt];
 
1011
1012		kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1013			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1014			  vcpu->arch.gprs[rt], *(u32 *)data);
1015		break;
1016
1017	case sh_op:
1018		run->mmio.len = 2;
1019		*(u16 *)data = vcpu->arch.gprs[rt];
1020
1021		kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1022			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1023			  vcpu->arch.gprs[rt], *(u16 *)data);
1024		break;
1025
1026	case sb_op:
1027		run->mmio.len = 1;
1028		*(u8 *)data = vcpu->arch.gprs[rt];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1029
1030		kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1031			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1032			  vcpu->arch.gprs[rt], *(u8 *)data);
1033		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034
1035	case swl_op:
1036		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1037					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1038		run->mmio.len = 4;
1039		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1040		switch (imme) {
1041		case 0:
1042			*(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1043					(vcpu->arch.gprs[rt] >> 24);
1044			break;
1045		case 1:
1046			*(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1047					(vcpu->arch.gprs[rt] >> 16);
1048			break;
1049		case 2:
1050			*(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1051					(vcpu->arch.gprs[rt] >> 8);
1052			break;
1053		case 3:
1054			*(u32 *)data = vcpu->arch.gprs[rt];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1055			break;
1056		default:
 
 
 
1057			break;
1058		}
 
1059
1060		kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1061			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1062			  vcpu->arch.gprs[rt], *(u32 *)data);
1063		break;
1064
1065	case swr_op:
1066		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1067					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1068		run->mmio.len = 4;
1069		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1070		switch (imme) {
1071		case 0:
1072			*(u32 *)data = vcpu->arch.gprs[rt];
1073			break;
1074		case 1:
1075			*(u32 *)data = ((*(u32 *)data) & 0xff) |
1076					(vcpu->arch.gprs[rt] << 8);
1077			break;
1078		case 2:
1079			*(u32 *)data = ((*(u32 *)data) & 0xffff) |
1080					(vcpu->arch.gprs[rt] << 16);
1081			break;
1082		case 3:
1083			*(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1084					(vcpu->arch.gprs[rt] << 24);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1085			break;
1086		default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087			break;
1088		}
1089
1090		kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
 
 
 
 
 
 
1091			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1092			  vcpu->arch.gprs[rt], *(u32 *)data);
1093		break;
1094
1095#if defined(CONFIG_64BIT)
1096	case sdl_op:
1097		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1098					vcpu->arch.host_cp0_badvaddr) & (~0x7);
1099
1100		run->mmio.len = 8;
1101		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1102		switch (imme) {
1103		case 0:
1104			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1105					((vcpu->arch.gprs[rt] >> 56) & 0xff);
1106			break;
1107		case 1:
1108			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1109					((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1110			break;
1111		case 2:
1112			*(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1113					((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1114			break;
1115		case 3:
1116			*(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1117					((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1118			break;
1119		case 4:
1120			*(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1121					((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1122			break;
1123		case 5:
1124			*(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1125					((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1126			break;
1127		case 6:
1128			*(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1129					((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1130			break;
1131		case 7:
1132			*(u64 *)data = vcpu->arch.gprs[rt];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133			break;
1134		default:
 
 
 
 
 
1135			break;
1136		}
1137
1138		kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1139			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1140			  vcpu->arch.gprs[rt], *(u64 *)data);
 
1141		break;
1142
1143	case sdr_op:
1144		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1145					vcpu->arch.host_cp0_badvaddr) & (~0x7);
1146
1147		run->mmio.len = 8;
1148		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1149		switch (imme) {
1150		case 0:
1151			*(u64 *)data = vcpu->arch.gprs[rt];
1152			break;
1153		case 1:
1154			*(u64 *)data = ((*(u64 *)data) & 0xff) |
1155					(vcpu->arch.gprs[rt] << 8);
1156			break;
1157		case 2:
1158			*(u64 *)data = ((*(u64 *)data) & 0xffff) |
1159					(vcpu->arch.gprs[rt] << 16);
1160			break;
1161		case 3:
1162			*(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1163					(vcpu->arch.gprs[rt] << 24);
1164			break;
1165		case 4:
1166			*(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1167					(vcpu->arch.gprs[rt] << 32);
1168			break;
1169		case 5:
1170			*(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1171					(vcpu->arch.gprs[rt] << 40);
1172			break;
1173		case 6:
1174			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1175					(vcpu->arch.gprs[rt] << 48);
1176			break;
1177		case 7:
1178			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1179					(vcpu->arch.gprs[rt] << 56);
1180			break;
1181		default:
 
 
 
 
 
1182			break;
1183		}
1184
1185		kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1186			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1187			  vcpu->arch.gprs[rt], *(u64 *)data);
 
 
 
 
 
 
 
1188		break;
1189#endif
1190
1191#ifdef CONFIG_CPU_LOONGSON64
1192	case sdc2_op:
1193		rt = inst.loongson3_lsdc2_format.rt;
1194		switch (inst.loongson3_lsdc2_format.opcode1) {
1195		/*
1196		 * Loongson-3 overridden sdc2 instructions.
1197		 * opcode1              instruction
1198		 *   0x0          gssbx: store 1 bytes from GPR
1199		 *   0x1          gsshx: store 2 bytes from GPR
1200		 *   0x2          gsswx: store 4 bytes from GPR
1201		 *   0x3          gssdx: store 8 bytes from GPR
1202		 */
1203		case 0x0:
1204			run->mmio.len = 1;
1205			*(u8 *)data = vcpu->arch.gprs[rt];
1206
1207			kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1208				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1209				  vcpu->arch.gprs[rt], *(u8 *)data);
1210			break;
1211		case 0x1:
1212			run->mmio.len = 2;
1213			*(u16 *)data = vcpu->arch.gprs[rt];
1214
1215			kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1216				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1217				  vcpu->arch.gprs[rt], *(u16 *)data);
1218			break;
1219		case 0x2:
1220			run->mmio.len = 4;
1221			*(u32 *)data = vcpu->arch.gprs[rt];
1222
1223			kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1224				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1225				  vcpu->arch.gprs[rt], *(u32 *)data);
1226			break;
1227		case 0x3:
1228			run->mmio.len = 8;
1229			*(u64 *)data = vcpu->arch.gprs[rt];
1230
1231			kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1232				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1233				  vcpu->arch.gprs[rt], *(u64 *)data);
1234			break;
1235		default:
1236			kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
1237				inst.word);
 
 
 
1238			break;
1239		}
 
 
 
 
 
 
 
 
 
 
1240		break;
1241#endif
1242	default:
1243		kvm_err("Store not yet supported (inst=0x%08x)\n",
1244			inst.word);
1245		goto out_fail;
1246	}
1247
1248	vcpu->mmio_needed = 1;
1249	run->mmio.is_write = 1;
1250	vcpu->mmio_is_write = 1;
 
 
 
 
 
 
 
1251
1252	r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1253			run->mmio.phys_addr, run->mmio.len, data);
1254
1255	if (!r) {
1256		vcpu->mmio_needed = 0;
1257		return EMULATE_DONE;
 
 
1258	}
 
 
1259
1260	return EMULATE_DO_MMIO;
 
1261
1262out_fail:
1263	/* Rollback PC if emulation was unsuccessful */
1264	vcpu->arch.pc = curr_pc;
1265	return EMULATE_FAIL;
1266}
1267
1268enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1269					    u32 cause, struct kvm_vcpu *vcpu)
 
 
1270{
1271	struct kvm_run *run = vcpu->run;
1272	int r;
1273	enum emulation_result er;
 
 
1274	unsigned long curr_pc;
1275	u32 op, rt;
1276	unsigned int imme;
1277
1278	rt = inst.i_format.rt;
1279	op = inst.i_format.opcode;
1280
1281	/*
1282	 * Find the resume PC now while we have safe and easy access to the
1283	 * prior branch instruction, and save it for
1284	 * kvm_mips_complete_mmio_load() to restore later.
1285	 */
1286	curr_pc = vcpu->arch.pc;
1287	er = update_pc(vcpu, cause);
1288	if (er == EMULATE_FAIL)
1289		return er;
1290	vcpu->arch.io_pc = vcpu->arch.pc;
1291	vcpu->arch.pc = curr_pc;
1292
1293	vcpu->arch.io_gpr = rt;
 
 
 
 
1294
1295	run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1296						vcpu->arch.host_cp0_badvaddr);
1297	if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1298		return EMULATE_FAIL;
1299
1300	vcpu->mmio_needed = 2;	/* signed */
1301	switch (op) {
1302#if defined(CONFIG_64BIT)
1303	case ld_op:
1304		run->mmio.len = 8;
1305		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306
1307	case lwu_op:
1308		vcpu->mmio_needed = 1;	/* unsigned */
1309		fallthrough;
1310#endif
1311	case lw_op:
1312		run->mmio.len = 4;
1313		break;
 
 
 
 
 
 
 
 
 
 
 
1314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1315	case lhu_op:
1316		vcpu->mmio_needed = 1;	/* unsigned */
1317		fallthrough;
1318	case lh_op:
1319		run->mmio.len = 2;
 
1320		break;
1321
1322	case lbu_op:
1323		vcpu->mmio_needed = 1;	/* unsigned */
1324		fallthrough;
1325	case lb_op:
1326		run->mmio.len = 1;
1327		break;
1328
1329	case lwl_op:
1330		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1331					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1332
1333		run->mmio.len = 4;
1334		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1335		switch (imme) {
1336		case 0:
1337			vcpu->mmio_needed = 3;	/* 1 byte */
1338			break;
1339		case 1:
1340			vcpu->mmio_needed = 4;	/* 2 bytes */
1341			break;
1342		case 2:
1343			vcpu->mmio_needed = 5;	/* 3 bytes */
1344			break;
1345		case 3:
1346			vcpu->mmio_needed = 6;	/* 4 bytes */
1347			break;
1348		default:
1349			break;
1350		}
1351		break;
 
1352
1353	case lwr_op:
1354		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1355					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1356
1357		run->mmio.len = 4;
1358		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1359		switch (imme) {
1360		case 0:
1361			vcpu->mmio_needed = 7;	/* 4 bytes */
1362			break;
1363		case 1:
1364			vcpu->mmio_needed = 8;	/* 3 bytes */
1365			break;
1366		case 2:
1367			vcpu->mmio_needed = 9;	/* 2 bytes */
1368			break;
1369		case 3:
1370			vcpu->mmio_needed = 10;	/* 1 byte */
1371			break;
1372		default:
1373			break;
1374		}
1375		break;
1376
1377#if defined(CONFIG_64BIT)
1378	case ldl_op:
1379		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1380					vcpu->arch.host_cp0_badvaddr) & (~0x7);
 
 
 
 
1381
1382		run->mmio.len = 8;
1383		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1384		switch (imme) {
1385		case 0:
1386			vcpu->mmio_needed = 11;	/* 1 byte */
1387			break;
1388		case 1:
1389			vcpu->mmio_needed = 12;	/* 2 bytes */
1390			break;
1391		case 2:
1392			vcpu->mmio_needed = 13;	/* 3 bytes */
1393			break;
1394		case 3:
1395			vcpu->mmio_needed = 14;	/* 4 bytes */
1396			break;
1397		case 4:
1398			vcpu->mmio_needed = 15;	/* 5 bytes */
1399			break;
1400		case 5:
1401			vcpu->mmio_needed = 16;	/* 6 bytes */
1402			break;
1403		case 6:
1404			vcpu->mmio_needed = 17;	/* 7 bytes */
1405			break;
1406		case 7:
1407			vcpu->mmio_needed = 18;	/* 8 bytes */
1408			break;
1409		default:
1410			break;
1411		}
1412		break;
1413
1414	case ldr_op:
1415		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1416					vcpu->arch.host_cp0_badvaddr) & (~0x7);
 
1417
1418		run->mmio.len = 8;
1419		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1420		switch (imme) {
1421		case 0:
1422			vcpu->mmio_needed = 19;	/* 8 bytes */
1423			break;
1424		case 1:
1425			vcpu->mmio_needed = 20;	/* 7 bytes */
1426			break;
1427		case 2:
1428			vcpu->mmio_needed = 21;	/* 6 bytes */
1429			break;
1430		case 3:
1431			vcpu->mmio_needed = 22;	/* 5 bytes */
1432			break;
1433		case 4:
1434			vcpu->mmio_needed = 23;	/* 4 bytes */
1435			break;
1436		case 5:
1437			vcpu->mmio_needed = 24;	/* 3 bytes */
1438			break;
1439		case 6:
1440			vcpu->mmio_needed = 25;	/* 2 bytes */
1441			break;
1442		case 7:
1443			vcpu->mmio_needed = 26;	/* 1 byte */
1444			break;
1445		default:
1446			break;
1447		}
1448		break;
1449#endif
1450
1451#ifdef CONFIG_CPU_LOONGSON64
1452	case ldc2_op:
1453		rt = inst.loongson3_lsdc2_format.rt;
1454		switch (inst.loongson3_lsdc2_format.opcode1) {
1455		/*
1456		 * Loongson-3 overridden ldc2 instructions.
1457		 * opcode1              instruction
1458		 *   0x0          gslbx: store 1 bytes from GPR
1459		 *   0x1          gslhx: store 2 bytes from GPR
1460		 *   0x2          gslwx: store 4 bytes from GPR
1461		 *   0x3          gsldx: store 8 bytes from GPR
1462		 */
1463		case 0x0:
1464			run->mmio.len = 1;
1465			vcpu->mmio_needed = 27;	/* signed */
1466			break;
1467		case 0x1:
1468			run->mmio.len = 2;
1469			vcpu->mmio_needed = 28;	/* signed */
1470			break;
1471		case 0x2:
1472			run->mmio.len = 4;
1473			vcpu->mmio_needed = 29;	/* signed */
1474			break;
1475		case 0x3:
1476			run->mmio.len = 8;
1477			vcpu->mmio_needed = 30;	/* signed */
1478			break;
1479		default:
1480			kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
1481				inst.word);
1482			break;
1483		}
1484		break;
1485#endif
1486
1487	default:
1488		kvm_err("Load not yet supported (inst=0x%08x)\n",
1489			inst.word);
1490		vcpu->mmio_needed = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491		return EMULATE_FAIL;
1492	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1493
1494	run->mmio.is_write = 0;
1495	vcpu->mmio_is_write = 0;
1496
1497	r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1498			run->mmio.phys_addr, run->mmio.len, run->mmio.data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1499
1500	if (!r) {
1501		kvm_mips_complete_mmio_load(vcpu);
1502		vcpu->mmio_needed = 0;
1503		return EMULATE_DONE;
1504	}
1505
1506	return EMULATE_DO_MMIO;
 
 
 
 
 
 
1507}
1508
1509enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510{
1511	struct kvm_run *run = vcpu->run;
1512	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1513	enum emulation_result er = EMULATE_DONE;
1514
1515	if (run->mmio.len > sizeof(*gpr)) {
1516		kvm_err("Bad MMIO length: %d", run->mmio.len);
1517		er = EMULATE_FAIL;
1518		goto done;
1519	}
1520
1521	/* Restore saved resume PC */
1522	vcpu->arch.pc = vcpu->arch.io_pc;
 
1523
1524	switch (run->mmio.len) {
1525	case 8:
1526		switch (vcpu->mmio_needed) {
1527		case 11:
1528			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1529				(((*(s64 *)run->mmio.data) & 0xff) << 56);
1530			break;
1531		case 12:
1532			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1533				(((*(s64 *)run->mmio.data) & 0xffff) << 48);
1534			break;
1535		case 13:
1536			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1537				(((*(s64 *)run->mmio.data) & 0xffffff) << 40);
1538			break;
1539		case 14:
1540			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1541				(((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
1542			break;
1543		case 15:
1544			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1545				(((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
1546			break;
1547		case 16:
1548			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1549				(((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
1550			break;
1551		case 17:
1552			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1553				(((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
1554			break;
1555		case 18:
1556		case 19:
1557			*gpr = *(s64 *)run->mmio.data;
1558			break;
1559		case 20:
1560			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1561				((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
1562			break;
1563		case 21:
1564			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1565				((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
1566			break;
1567		case 22:
1568			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1569				((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
1570			break;
1571		case 23:
1572			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1573				((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
1574			break;
1575		case 24:
1576			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1577				((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
1578			break;
1579		case 25:
1580			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1581				((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
1582			break;
1583		case 26:
1584			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1585				((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
1586			break;
1587		default:
1588			*gpr = *(s64 *)run->mmio.data;
1589		}
1590		break;
1591
1592	case 4:
1593		switch (vcpu->mmio_needed) {
1594		case 1:
1595			*gpr = *(u32 *)run->mmio.data;
1596			break;
1597		case 2:
1598			*gpr = *(s32 *)run->mmio.data;
1599			break;
1600		case 3:
1601			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1602				(((*(s32 *)run->mmio.data) & 0xff) << 24);
1603			break;
1604		case 4:
1605			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1606				(((*(s32 *)run->mmio.data) & 0xffff) << 16);
1607			break;
1608		case 5:
1609			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1610				(((*(s32 *)run->mmio.data) & 0xffffff) << 8);
1611			break;
1612		case 6:
1613		case 7:
1614			*gpr = *(s32 *)run->mmio.data;
1615			break;
1616		case 8:
1617			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1618				((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
1619			break;
1620		case 9:
1621			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1622				((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
1623			break;
1624		case 10:
1625			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1626				((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
1627			break;
1628		default:
1629			*gpr = *(s32 *)run->mmio.data;
1630		}
1631		break;
1632
1633	case 2:
1634		if (vcpu->mmio_needed == 1)
1635			*gpr = *(u16 *)run->mmio.data;
1636		else
1637			*gpr = *(s16 *)run->mmio.data;
1638
1639		break;
1640	case 1:
1641		if (vcpu->mmio_needed == 1)
1642			*gpr = *(u8 *)run->mmio.data;
1643		else
1644			*gpr = *(s8 *)run->mmio.data;
1645		break;
1646	}
1647
 
 
 
 
 
1648done:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1649	return er;
1650}
v4.6
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Instruction/Exception emulation
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/ktime.h>
  15#include <linux/kvm_host.h>
  16#include <linux/module.h>
  17#include <linux/vmalloc.h>
  18#include <linux/fs.h>
  19#include <linux/bootmem.h>
  20#include <linux/random.h>
  21#include <asm/page.h>
  22#include <asm/cacheflush.h>
  23#include <asm/cacheops.h>
  24#include <asm/cpu-info.h>
  25#include <asm/mmu_context.h>
  26#include <asm/tlbflush.h>
  27#include <asm/inst.h>
  28
  29#undef CONFIG_MIPS_MT
  30#include <asm/r4kcache.h>
  31#define CONFIG_MIPS_MT
  32
  33#include "interrupt.h"
  34#include "commpage.h"
  35
  36#include "trace.h"
  37
  38/*
  39 * Compute the return address and do emulate branch simulation, if required.
  40 * This function should be called only in branch delay slot active.
  41 */
  42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
  43	unsigned long instpc)
  44{
  45	unsigned int dspcontrol;
  46	union mips_instruction insn;
  47	struct kvm_vcpu_arch *arch = &vcpu->arch;
  48	long epc = instpc;
  49	long nextpc = KVM_INVALID_INST;
 
  50
  51	if (epc & 3)
  52		goto unaligned;
 
 
  53
  54	/* Read the instruction */
  55	insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
  56
  57	if (insn.word == KVM_INVALID_INST)
  58		return KVM_INVALID_INST;
  59
  60	switch (insn.i_format.opcode) {
  61		/* jr and jalr are in r_format format. */
  62	case spec_op:
  63		switch (insn.r_format.func) {
  64		case jalr_op:
  65			arch->gprs[insn.r_format.rd] = epc + 8;
  66			/* Fall through */
  67		case jr_op:
  68			nextpc = arch->gprs[insn.r_format.rs];
  69			break;
 
 
  70		}
  71		break;
  72
  73		/*
  74		 * This group contains:
  75		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
  76		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  77		 */
  78	case bcond_op:
  79		switch (insn.i_format.rt) {
  80		case bltz_op:
  81		case bltzl_op:
  82			if ((long)arch->gprs[insn.i_format.rs] < 0)
  83				epc = epc + 4 + (insn.i_format.simmediate << 2);
  84			else
  85				epc += 8;
  86			nextpc = epc;
  87			break;
  88
  89		case bgez_op:
  90		case bgezl_op:
  91			if ((long)arch->gprs[insn.i_format.rs] >= 0)
  92				epc = epc + 4 + (insn.i_format.simmediate << 2);
  93			else
  94				epc += 8;
  95			nextpc = epc;
  96			break;
  97
  98		case bltzal_op:
  99		case bltzall_op:
 100			arch->gprs[31] = epc + 8;
 101			if ((long)arch->gprs[insn.i_format.rs] < 0)
 102				epc = epc + 4 + (insn.i_format.simmediate << 2);
 103			else
 104				epc += 8;
 105			nextpc = epc;
 106			break;
 107
 108		case bgezal_op:
 109		case bgezall_op:
 110			arch->gprs[31] = epc + 8;
 111			if ((long)arch->gprs[insn.i_format.rs] >= 0)
 112				epc = epc + 4 + (insn.i_format.simmediate << 2);
 113			else
 114				epc += 8;
 115			nextpc = epc;
 116			break;
 117		case bposge32_op:
 118			if (!cpu_has_dsp)
 119				goto sigill;
 
 
 
 120
 121			dspcontrol = rddsp(0x01);
 122
 123			if (dspcontrol >= 32)
 124				epc = epc + 4 + (insn.i_format.simmediate << 2);
 125			else
 126				epc += 8;
 127			nextpc = epc;
 128			break;
 
 
 129		}
 130		break;
 131
 132		/* These are unconditional and in j_format. */
 133	case jal_op:
 134		arch->gprs[31] = instpc + 8;
 
 135	case j_op:
 136		epc += 4;
 137		epc >>= 28;
 138		epc <<= 28;
 139		epc |= (insn.j_format.target << 2);
 140		nextpc = epc;
 141		break;
 142
 143		/* These are conditional and in i_format. */
 144	case beq_op:
 145	case beql_op:
 146		if (arch->gprs[insn.i_format.rs] ==
 147		    arch->gprs[insn.i_format.rt])
 148			epc = epc + 4 + (insn.i_format.simmediate << 2);
 149		else
 150			epc += 8;
 151		nextpc = epc;
 152		break;
 153
 154	case bne_op:
 155	case bnel_op:
 156		if (arch->gprs[insn.i_format.rs] !=
 157		    arch->gprs[insn.i_format.rt])
 158			epc = epc + 4 + (insn.i_format.simmediate << 2);
 159		else
 160			epc += 8;
 161		nextpc = epc;
 162		break;
 163
 164	case blez_op:		/* not really i_format */
 165	case blezl_op:
 166		/* rt field assumed to be zero */
 
 
 
 167		if ((long)arch->gprs[insn.i_format.rs] <= 0)
 168			epc = epc + 4 + (insn.i_format.simmediate << 2);
 169		else
 170			epc += 8;
 171		nextpc = epc;
 172		break;
 173
 174	case bgtz_op:
 175	case bgtzl_op:
 176		/* rt field assumed to be zero */
 
 
 
 177		if ((long)arch->gprs[insn.i_format.rs] > 0)
 178			epc = epc + 4 + (insn.i_format.simmediate << 2);
 179		else
 180			epc += 8;
 181		nextpc = epc;
 182		break;
 183
 184		/* And now the FPA/cp1 branch instructions. */
 185	case cop1_op:
 186		kvm_err("%s: unsupported cop1_op\n", __func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187		break;
 
 
 
 
 
 
 188	}
 189
 190	return nextpc;
 191
 192unaligned:
 193	kvm_err("%s: unaligned epc\n", __func__);
 194	return nextpc;
 195
 196sigill:
 197	kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
 198	return nextpc;
 199}
 200
 201enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
 202{
 203	unsigned long branch_pc;
 204	enum emulation_result er = EMULATE_DONE;
 205
 206	if (cause & CAUSEF_BD) {
 207		branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
 208		if (branch_pc == KVM_INVALID_INST) {
 209			er = EMULATE_FAIL;
 210		} else {
 211			vcpu->arch.pc = branch_pc;
 212			kvm_debug("BD update_pc(): New PC: %#lx\n",
 213				  vcpu->arch.pc);
 214		}
 215	} else
 216		vcpu->arch.pc += 4;
 
 217
 218	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 219
 220	return er;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221}
 222
 223/**
 224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
 225 * @vcpu:	Virtual CPU.
 226 *
 227 * Returns:	1 if the CP0_Count timer is disabled by either the guest
 228 *		CP0_Cause.DC bit or the count_ctl.DC bit.
 229 *		0 otherwise (in which case CP0_Count timer is running).
 230 */
 231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
 232{
 233	struct mips_coproc *cop0 = vcpu->arch.cop0;
 234
 235	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
 236		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
 237}
 238
 239/**
 240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
 241 *
 242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
 243 *
 244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 245 */
 246static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
 247{
 248	s64 now_ns, periods;
 249	u64 delta;
 250
 251	now_ns = ktime_to_ns(now);
 252	delta = now_ns + vcpu->arch.count_dyn_bias;
 253
 254	if (delta >= vcpu->arch.count_period) {
 255		/* If delta is out of safe range the bias needs adjusting */
 256		periods = div64_s64(now_ns, vcpu->arch.count_period);
 257		vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
 258		/* Recalculate delta with new bias */
 259		delta = now_ns + vcpu->arch.count_dyn_bias;
 260	}
 261
 262	/*
 263	 * We've ensured that:
 264	 *   delta < count_period
 265	 *
 266	 * Therefore the intermediate delta*count_hz will never overflow since
 267	 * at the boundary condition:
 268	 *   delta = count_period
 269	 *   delta = NSEC_PER_SEC * 2^32 / count_hz
 270	 *   delta * count_hz = NSEC_PER_SEC * 2^32
 271	 */
 272	return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
 273}
 274
 275/**
 276 * kvm_mips_count_time() - Get effective current time.
 277 * @vcpu:	Virtual CPU.
 278 *
 279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
 280 * except when the master disable bit is set in count_ctl, in which case it is
 281 * count_resume, i.e. the time that the count was disabled.
 282 *
 283 * Returns:	Effective monotonic ktime for CP0_Count.
 284 */
 285static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
 286{
 287	if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 288		return vcpu->arch.count_resume;
 289
 290	return ktime_get();
 291}
 292
 293/**
 294 * kvm_mips_read_count_running() - Read the current count value as if running.
 295 * @vcpu:	Virtual CPU.
 296 * @now:	Kernel time to read CP0_Count at.
 297 *
 298 * Returns the current guest CP0_Count register at time @now and handles if the
 299 * timer interrupt is pending and hasn't been handled yet.
 300 *
 301 * Returns:	The current value of the guest CP0_Count register.
 302 */
 303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 304{
 305	ktime_t expires;
 
 
 306	int running;
 307
 308	/* Is the hrtimer pending? */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
 310	if (ktime_compare(now, expires) >= 0) {
 
 311		/*
 312		 * Cancel it while we handle it so there's no chance of
 313		 * interference with the timeout handler.
 314		 */
 315		running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
 316
 317		/* Nothing should be waiting on the timeout */
 318		kvm_mips_callbacks->queue_timer_int(vcpu);
 319
 320		/*
 321		 * Restart the timer if it was running based on the expiry time
 322		 * we read, so that we don't push it back 2 periods.
 323		 */
 324		if (running) {
 325			expires = ktime_add_ns(expires,
 326					       vcpu->arch.count_period);
 327			hrtimer_start(&vcpu->arch.comparecount_timer, expires,
 328				      HRTIMER_MODE_ABS);
 329		}
 330	}
 331
 332	/* Return the biased and scaled guest CP0_Count */
 333	return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 334}
 335
 336/**
 337 * kvm_mips_read_count() - Read the current count value.
 338 * @vcpu:	Virtual CPU.
 339 *
 340 * Read the current guest CP0_Count value, taking into account whether the timer
 341 * is stopped.
 342 *
 343 * Returns:	The current guest CP0_Count value.
 344 */
 345uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
 346{
 347	struct mips_coproc *cop0 = vcpu->arch.cop0;
 348
 349	/* If count disabled just read static copy of count */
 350	if (kvm_mips_count_disabled(vcpu))
 351		return kvm_read_c0_guest_count(cop0);
 352
 353	return kvm_mips_read_count_running(vcpu, ktime_get());
 354}
 355
 356/**
 357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
 358 * @vcpu:	Virtual CPU.
 359 * @count:	Output pointer for CP0_Count value at point of freeze.
 360 *
 361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
 362 * at the point it was frozen. It is guaranteed that any pending interrupts at
 363 * the point it was frozen are handled, and none after that point.
 364 *
 365 * This is useful where the time/CP0_Count is needed in the calculation of the
 366 * new parameters.
 367 *
 368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 369 *
 370 * Returns:	The ktime at the point of freeze.
 371 */
 372static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
 373				       uint32_t *count)
 374{
 375	ktime_t now;
 376
 377	/* stop hrtimer before finding time */
 378	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 379	now = ktime_get();
 380
 381	/* find count at this point and handle pending hrtimer */
 382	*count = kvm_mips_read_count_running(vcpu, now);
 383
 384	return now;
 385}
 386
 387/**
 388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
 389 * @vcpu:	Virtual CPU.
 390 * @now:	ktime at point of resume.
 391 * @count:	CP0_Count at point of resume.
 392 *
 393 * Resumes the timer and updates the timer expiry based on @now and @count.
 394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
 395 * parameters need to be changed.
 396 *
 397 * It is guaranteed that a timer interrupt immediately after resume will be
 398 * handled, but not if CP_Compare is exactly at @count. That case is already
 399 * handled by kvm_mips_freeze_timer().
 400 *
 401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 402 */
 403static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
 404				    ktime_t now, uint32_t count)
 405{
 406	struct mips_coproc *cop0 = vcpu->arch.cop0;
 407	uint32_t compare;
 408	u64 delta;
 409	ktime_t expire;
 410
 411	/* Calculate timeout (wrap 0 to 2^32) */
 412	compare = kvm_read_c0_guest_compare(cop0);
 413	delta = (u64)(uint32_t)(compare - count - 1) + 1;
 414	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 415	expire = ktime_add_ns(now, delta);
 416
 417	/* Update hrtimer to use new timeout */
 418	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 419	hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
 420}
 421
 422/**
 423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
 424 * @vcpu:	Virtual CPU.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425 *
 426 * Recalculates and updates the expiry time of the hrtimer. This can be used
 427 * after timer parameters have been altered which do not depend on the time that
 428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
 429 * kvm_mips_resume_hrtimer() are used directly).
 430 *
 431 * It is guaranteed that no timer interrupts will be lost in the process.
 432 *
 433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 434 */
 435static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
 
 436{
 437	ktime_t now;
 438	uint32_t count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439
 440	/*
 441	 * freeze_hrtimer takes care of a timer interrupts <= count, and
 442	 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
 443	 */
 444	now = kvm_mips_freeze_hrtimer(vcpu, &count);
 445	kvm_mips_resume_hrtimer(vcpu, now, count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446}
 447
 448/**
 449 * kvm_mips_write_count() - Modify the count and update timer.
 450 * @vcpu:	Virtual CPU.
 451 * @count:	Guest CP0_Count value to set.
 452 *
 453 * Sets the CP0_Count value and updates the timer accordingly.
 454 */
 455void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
 456{
 457	struct mips_coproc *cop0 = vcpu->arch.cop0;
 458	ktime_t now;
 459
 460	/* Calculate bias */
 461	now = kvm_mips_count_time(vcpu);
 462	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 463
 464	if (kvm_mips_count_disabled(vcpu))
 465		/* The timer's disabled, adjust the static count */
 466		kvm_write_c0_guest_count(cop0, count);
 467	else
 468		/* Update timeout */
 469		kvm_mips_resume_hrtimer(vcpu, now, count);
 470}
 471
 472/**
 473 * kvm_mips_init_count() - Initialise timer.
 474 * @vcpu:	Virtual CPU.
 
 475 *
 476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
 477 * it going if it's enabled.
 478 */
 479void kvm_mips_init_count(struct kvm_vcpu *vcpu)
 480{
 481	/* 100 MHz */
 482	vcpu->arch.count_hz = 100*1000*1000;
 483	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
 484					  vcpu->arch.count_hz);
 485	vcpu->arch.count_dyn_bias = 0;
 486
 487	/* Starting at 0 */
 488	kvm_mips_write_count(vcpu, 0);
 489}
 490
 491/**
 492 * kvm_mips_set_count_hz() - Update the frequency of the timer.
 493 * @vcpu:	Virtual CPU.
 494 * @count_hz:	Frequency of CP0_Count timer in Hz.
 495 *
 496 * Change the frequency of the CP0_Count timer. This is done atomically so that
 497 * CP0_Count is continuous and no timer interrupt is lost.
 498 *
 499 * Returns:	-EINVAL if @count_hz is out of range.
 500 *		0 on success.
 501 */
 502int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
 503{
 504	struct mips_coproc *cop0 = vcpu->arch.cop0;
 505	int dc;
 506	ktime_t now;
 507	u32 count;
 508
 509	/* ensure the frequency is in a sensible range... */
 510	if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
 511		return -EINVAL;
 512	/* ... and has actually changed */
 513	if (vcpu->arch.count_hz == count_hz)
 514		return 0;
 515
 516	/* Safely freeze timer so we can keep it continuous */
 517	dc = kvm_mips_count_disabled(vcpu);
 518	if (dc) {
 519		now = kvm_mips_count_time(vcpu);
 520		count = kvm_read_c0_guest_count(cop0);
 521	} else {
 522		now = kvm_mips_freeze_hrtimer(vcpu, &count);
 523	}
 524
 525	/* Update the frequency */
 526	vcpu->arch.count_hz = count_hz;
 527	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 528	vcpu->arch.count_dyn_bias = 0;
 529
 530	/* Calculate adjusted bias so dynamic count is unchanged */
 531	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 532
 533	/* Update and resume hrtimer */
 534	if (!dc)
 535		kvm_mips_resume_hrtimer(vcpu, now, count);
 536	return 0;
 537}
 538
 539/**
 540 * kvm_mips_write_compare() - Modify compare and update timer.
 541 * @vcpu:	Virtual CPU.
 542 * @compare:	New CP0_Compare value.
 
 543 *
 544 * Update CP0_Compare to a new value and update the timeout.
 
 
 545 */
 546void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
 547{
 548	struct mips_coproc *cop0 = vcpu->arch.cop0;
 
 
 
 
 
 
 549
 550	/* if unchanged, must just be an ack */
 551	if (kvm_read_c0_guest_compare(cop0) == compare)
 
 
 
 
 552		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553
 554	/* Update compare */
 555	kvm_write_c0_guest_compare(cop0, compare);
 556
 557	/* Update timeout if count enabled */
 558	if (!kvm_mips_count_disabled(vcpu))
 559		kvm_mips_update_hrtimer(vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560}
 561
 562/**
 563 * kvm_mips_count_disable() - Disable count.
 564 * @vcpu:	Virtual CPU.
 565 *
 566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
 567 * time will be handled but not after.
 568 *
 569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
 570 * count_ctl.DC has been set (count disabled).
 571 *
 572 * Returns:	The time that the timer was stopped.
 573 */
 574static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
 575{
 576	struct mips_coproc *cop0 = vcpu->arch.cop0;
 577	uint32_t count;
 578	ktime_t now;
 579
 580	/* Stop hrtimer */
 581	hrtimer_cancel(&vcpu->arch.comparecount_timer);
 582
 583	/* Set the static count from the dynamic count, handling pending TI */
 584	now = ktime_get();
 585	count = kvm_mips_read_count_running(vcpu, now);
 586	kvm_write_c0_guest_count(cop0, count);
 587
 588	return now;
 589}
 590
 591/**
 592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
 593 * @vcpu:	Virtual CPU.
 594 *
 595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
 596 * before the final stop time will be handled if the timer isn't disabled by
 597 * count_ctl.DC, but not after.
 598 *
 599 * Assumes CP0_Cause.DC is clear (count enabled).
 600 */
 601void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
 602{
 603	struct mips_coproc *cop0 = vcpu->arch.cop0;
 604
 605	kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
 606	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 607		kvm_mips_count_disable(vcpu);
 608}
 609
 610/**
 611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
 612 * @vcpu:	Virtual CPU.
 613 *
 614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
 615 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
 616 * potentially before even returning, so the caller should be careful with
 617 * ordering of CP0_Cause modifications so as not to lose it.
 618 *
 619 * Assumes CP0_Cause.DC is set (count disabled).
 620 */
 621void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
 622{
 623	struct mips_coproc *cop0 = vcpu->arch.cop0;
 624	uint32_t count;
 625
 626	kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
 627
 628	/*
 629	 * Set the dynamic count to match the static count.
 630	 * This starts the hrtimer if count_ctl.DC allows it.
 631	 * Otherwise it conveniently updates the biases.
 632	 */
 633	count = kvm_read_c0_guest_count(cop0);
 634	kvm_mips_write_count(vcpu, count);
 635}
 636
 637/**
 638 * kvm_mips_set_count_ctl() - Update the count control KVM register.
 639 * @vcpu:	Virtual CPU.
 640 * @count_ctl:	Count control register new value.
 641 *
 642 * Set the count control KVM register. The timer is updated accordingly.
 643 *
 644 * Returns:	-EINVAL if reserved bits are set.
 645 *		0 on success.
 646 */
 647int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
 648{
 649	struct mips_coproc *cop0 = vcpu->arch.cop0;
 650	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
 651	s64 delta;
 652	ktime_t expire, now;
 653	uint32_t count, compare;
 654
 655	/* Only allow defined bits to be changed */
 656	if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
 657		return -EINVAL;
 658
 659	/* Apply new value */
 660	vcpu->arch.count_ctl = count_ctl;
 661
 662	/* Master CP0_Count disable */
 663	if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
 664		/* Is CP0_Cause.DC already disabling CP0_Count? */
 665		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
 666			if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
 667				/* Just record the current time */
 668				vcpu->arch.count_resume = ktime_get();
 669		} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
 670			/* disable timer and record current time */
 671			vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
 672		} else {
 673			/*
 674			 * Calculate timeout relative to static count at resume
 675			 * time (wrap 0 to 2^32).
 676			 */
 677			count = kvm_read_c0_guest_count(cop0);
 678			compare = kvm_read_c0_guest_compare(cop0);
 679			delta = (u64)(uint32_t)(compare - count - 1) + 1;
 680			delta = div_u64(delta * NSEC_PER_SEC,
 681					vcpu->arch.count_hz);
 682			expire = ktime_add_ns(vcpu->arch.count_resume, delta);
 683
 684			/* Handle pending interrupt */
 685			now = ktime_get();
 686			if (ktime_compare(now, expire) >= 0)
 687				/* Nothing should be waiting on the timeout */
 688				kvm_mips_callbacks->queue_timer_int(vcpu);
 689
 690			/* Resume hrtimer without changing bias */
 691			count = kvm_mips_read_count_running(vcpu, now);
 692			kvm_mips_resume_hrtimer(vcpu, now, count);
 693		}
 694	}
 695
 696	return 0;
 697}
 698
 699/**
 700 * kvm_mips_set_count_resume() - Update the count resume KVM register.
 701 * @vcpu:		Virtual CPU.
 702 * @count_resume:	Count resume register new value.
 703 *
 704 * Set the count resume KVM register.
 705 *
 706 * Returns:	-EINVAL if out of valid range (0..now).
 707 *		0 on success.
 708 */
 709int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
 710{
 711	/*
 712	 * It doesn't make sense for the resume time to be in the future, as it
 713	 * would be possible for the next interrupt to be more than a full
 714	 * period in the future.
 715	 */
 716	if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
 717		return -EINVAL;
 718
 719	vcpu->arch.count_resume = ns_to_ktime(count_resume);
 720	return 0;
 721}
 722
 723/**
 724 * kvm_mips_count_timeout() - Push timer forward on timeout.
 725 * @vcpu:	Virtual CPU.
 726 *
 727 * Handle an hrtimer event by push the hrtimer forward a period.
 728 *
 729 * Returns:	The hrtimer_restart value to return to the hrtimer subsystem.
 730 */
 731enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
 732{
 733	/* Add the Count period to the current expiry time */
 734	hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
 735			       vcpu->arch.count_period);
 736	return HRTIMER_RESTART;
 737}
 738
 739enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
 740{
 741	struct mips_coproc *cop0 = vcpu->arch.cop0;
 742	enum emulation_result er = EMULATE_DONE;
 743
 744	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
 745		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 746			  kvm_read_c0_guest_epc(cop0));
 747		kvm_clear_c0_guest_status(cop0, ST0_EXL);
 748		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 749
 750	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
 751		kvm_clear_c0_guest_status(cop0, ST0_ERL);
 752		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 753	} else {
 754		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 755			vcpu->arch.pc);
 756		er = EMULATE_FAIL;
 757	}
 758
 759	return er;
 760}
 761
 762enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 763{
 764	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 765		  vcpu->arch.pending_exceptions);
 766
 767	++vcpu->stat.wait_exits;
 768	trace_kvm_exit(vcpu, WAIT_EXITS);
 769	if (!vcpu->arch.pending_exceptions) {
 
 770		vcpu->arch.wait = 1;
 771		kvm_vcpu_block(vcpu);
 772
 773		/*
 774		 * We we are runnable, then definitely go off to user space to
 775		 * check if any I/O interrupts are pending.
 776		 */
 777		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 778			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 779			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 780		}
 781	}
 782
 783	return EMULATE_DONE;
 784}
 785
 786/*
 787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
 788 * we can catch this, if things ever change
 789 */
 790enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 791{
 792	struct mips_coproc *cop0 = vcpu->arch.cop0;
 793	uint32_t pc = vcpu->arch.pc;
 794
 795	kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
 796	return EMULATE_FAIL;
 797}
 798
 799/* Write Guest TLB Entry @ Index */
 800enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
 801{
 802	struct mips_coproc *cop0 = vcpu->arch.cop0;
 803	int index = kvm_read_c0_guest_index(cop0);
 804	struct kvm_mips_tlb *tlb = NULL;
 805	uint32_t pc = vcpu->arch.pc;
 806
 807	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
 808		kvm_debug("%s: illegal index: %d\n", __func__, index);
 809		kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 810			  pc, index, kvm_read_c0_guest_entryhi(cop0),
 811			  kvm_read_c0_guest_entrylo0(cop0),
 812			  kvm_read_c0_guest_entrylo1(cop0),
 813			  kvm_read_c0_guest_pagemask(cop0));
 814		index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
 815	}
 816
 817	tlb = &vcpu->arch.guest_tlb[index];
 818	/*
 819	 * Probe the shadow host TLB for the entry being overwritten, if one
 820	 * matches, invalidate it
 821	 */
 822	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 823
 824	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 825	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 826	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 827	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 828
 829	kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 830		  pc, index, kvm_read_c0_guest_entryhi(cop0),
 831		  kvm_read_c0_guest_entrylo0(cop0),
 832		  kvm_read_c0_guest_entrylo1(cop0),
 833		  kvm_read_c0_guest_pagemask(cop0));
 834
 835	return EMULATE_DONE;
 836}
 837
 838/* Write Guest TLB Entry @ Random Index */
 839enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 840{
 841	struct mips_coproc *cop0 = vcpu->arch.cop0;
 842	struct kvm_mips_tlb *tlb = NULL;
 843	uint32_t pc = vcpu->arch.pc;
 844	int index;
 845
 846	get_random_bytes(&index, sizeof(index));
 847	index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
 848
 849	tlb = &vcpu->arch.guest_tlb[index];
 850
 851	/*
 852	 * Probe the shadow host TLB for the entry being overwritten, if one
 853	 * matches, invalidate it
 854	 */
 855	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 856
 857	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 858	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 859	tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
 860	tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
 861
 862	kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
 863		  pc, index, kvm_read_c0_guest_entryhi(cop0),
 864		  kvm_read_c0_guest_entrylo0(cop0),
 865		  kvm_read_c0_guest_entrylo1(cop0));
 866
 867	return EMULATE_DONE;
 868}
 869
 870enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
 871{
 872	struct mips_coproc *cop0 = vcpu->arch.cop0;
 873	long entryhi = kvm_read_c0_guest_entryhi(cop0);
 874	uint32_t pc = vcpu->arch.pc;
 875	int index = -1;
 876
 877	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
 878
 879	kvm_write_c0_guest_index(cop0, index);
 880
 881	kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
 882		  index);
 883
 884	return EMULATE_DONE;
 885}
 886
 887/**
 888 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
 889 * @vcpu:	Virtual CPU.
 890 *
 891 * Finds the mask of bits which are writable in the guest's Config1 CP0
 892 * register, by userland (currently read-only to the guest).
 893 */
 894unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
 895{
 896	unsigned int mask = 0;
 897
 898	/* Permit FPU to be present if FPU is supported */
 899	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
 900		mask |= MIPS_CONF1_FP;
 901
 902	return mask;
 903}
 904
 905/**
 906 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
 907 * @vcpu:	Virtual CPU.
 908 *
 909 * Finds the mask of bits which are writable in the guest's Config3 CP0
 910 * register, by userland (currently read-only to the guest).
 911 */
 912unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
 913{
 914	/* Config4 is optional */
 915	unsigned int mask = MIPS_CONF_M;
 916
 917	/* Permit MSA to be present if MSA is supported */
 918	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 919		mask |= MIPS_CONF3_MSA;
 920
 921	return mask;
 922}
 923
 924/**
 925 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
 926 * @vcpu:	Virtual CPU.
 927 *
 928 * Finds the mask of bits which are writable in the guest's Config4 CP0
 929 * register, by userland (currently read-only to the guest).
 930 */
 931unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
 932{
 933	/* Config5 is optional */
 934	return MIPS_CONF_M;
 935}
 936
 937/**
 938 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
 939 * @vcpu:	Virtual CPU.
 940 *
 941 * Finds the mask of bits which are writable in the guest's Config5 CP0
 942 * register, by the guest itself.
 943 */
 944unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
 945{
 946	unsigned int mask = 0;
 947
 948	/* Permit MSAEn changes if MSA supported and enabled */
 949	if (kvm_mips_guest_has_msa(&vcpu->arch))
 950		mask |= MIPS_CONF5_MSAEN;
 951
 952	/*
 953	 * Permit guest FPU mode changes if FPU is enabled and the relevant
 954	 * feature exists according to FIR register.
 955	 */
 956	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
 957		if (cpu_has_fre)
 958			mask |= MIPS_CONF5_FRE;
 959		/* We don't support UFR or UFE */
 960	}
 961
 962	return mask;
 963}
 964
 965enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
 966					   uint32_t cause, struct kvm_run *run,
 967					   struct kvm_vcpu *vcpu)
 968{
 969	struct mips_coproc *cop0 = vcpu->arch.cop0;
 970	enum emulation_result er = EMULATE_DONE;
 971	int32_t rt, rd, copz, sel, co_bit, op;
 972	uint32_t pc = vcpu->arch.pc;
 973	unsigned long curr_pc;
 974
 975	/*
 976	 * Update PC and hold onto current PC in case there is
 977	 * an error and we want to rollback the PC
 978	 */
 979	curr_pc = vcpu->arch.pc;
 980	er = update_pc(vcpu, cause);
 981	if (er == EMULATE_FAIL)
 982		return er;
 983
 984	copz = (inst >> 21) & 0x1f;
 985	rt = (inst >> 16) & 0x1f;
 986	rd = (inst >> 11) & 0x1f;
 987	sel = inst & 0x7;
 988	co_bit = (inst >> 25) & 1;
 989
 990	if (co_bit) {
 991		op = (inst) & 0xff;
 
 
 
 
 
 
 
 
 992
 993		switch (op) {
 994		case tlbr_op:	/*  Read indexed TLB entry  */
 995			er = kvm_mips_emul_tlbr(vcpu);
 996			break;
 997		case tlbwi_op:	/*  Write indexed  */
 998			er = kvm_mips_emul_tlbwi(vcpu);
 999			break;
1000		case tlbwr_op:	/*  Write random  */
1001			er = kvm_mips_emul_tlbwr(vcpu);
1002			break;
1003		case tlbp_op:	/* TLB Probe */
1004			er = kvm_mips_emul_tlbp(vcpu);
1005			break;
1006		case rfe_op:
1007			kvm_err("!!!COP0_RFE!!!\n");
1008			break;
1009		case eret_op:
1010			er = kvm_mips_emul_eret(vcpu);
1011			goto dont_update_pc;
1012			break;
1013		case wait_op:
1014			er = kvm_mips_emul_wait(vcpu);
1015			break;
1016		}
1017	} else {
1018		switch (copz) {
1019		case mfc_op:
1020#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1021			cop0->stat[rd][sel]++;
1022#endif
1023			/* Get reg */
1024			if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1025				vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1026			} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1027				vcpu->arch.gprs[rt] = 0x0;
1028#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1029				kvm_mips_trans_mfc0(inst, opc, vcpu);
1030#endif
1031			} else {
1032				vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1033
1034#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1035				kvm_mips_trans_mfc0(inst, opc, vcpu);
1036#endif
1037			}
1038
1039			kvm_debug
1040			    ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1041			     pc, rd, sel, rt, vcpu->arch.gprs[rt]);
 
1042
1043			break;
 
 
1044
1045		case dmfc_op:
1046			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1047			break;
 
1048
1049		case mtc_op:
1050#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1051			cop0->stat[rd][sel]++;
1052#endif
1053			if ((rd == MIPS_CP0_TLB_INDEX)
1054			    && (vcpu->arch.gprs[rt] >=
1055				KVM_MIPS_GUEST_TLB_SIZE)) {
1056				kvm_err("Invalid TLB Index: %ld",
1057					vcpu->arch.gprs[rt]);
1058				er = EMULATE_FAIL;
1059				break;
1060			}
1061#define C0_EBASE_CORE_MASK 0xff
1062			if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1063				/* Preserve CORE number */
1064				kvm_change_c0_guest_ebase(cop0,
1065							  ~(C0_EBASE_CORE_MASK),
1066							  vcpu->arch.gprs[rt]);
1067				kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1068					kvm_read_c0_guest_ebase(cop0));
1069			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1070				uint32_t nasid =
1071					vcpu->arch.gprs[rt] & ASID_MASK;
1072				if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1073				    ((kvm_read_c0_guest_entryhi(cop0) &
1074				      ASID_MASK) != nasid)) {
1075					kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1076						kvm_read_c0_guest_entryhi(cop0)
1077						& ASID_MASK,
1078						vcpu->arch.gprs[rt]
1079						& ASID_MASK);
1080
1081					/* Blow away the shadow host TLBs */
1082					kvm_mips_flush_host_tlb(1);
1083				}
1084				kvm_write_c0_guest_entryhi(cop0,
1085							   vcpu->arch.gprs[rt]);
1086			}
1087			/* Are we writing to COUNT */
1088			else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1089				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1090				goto done;
1091			} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1092				kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1093					  pc, kvm_read_c0_guest_compare(cop0),
1094					  vcpu->arch.gprs[rt]);
1095
1096				/* If we are writing to COMPARE */
1097				/* Clear pending timer interrupt, if any */
1098				kvm_mips_callbacks->dequeue_timer_int(vcpu);
1099				kvm_mips_write_compare(vcpu,
1100						       vcpu->arch.gprs[rt]);
1101			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1102				unsigned int old_val, val, change;
1103
1104				old_val = kvm_read_c0_guest_status(cop0);
1105				val = vcpu->arch.gprs[rt];
1106				change = val ^ old_val;
1107
1108				/* Make sure that the NMI bit is never set */
1109				val &= ~ST0_NMI;
1110
1111				/*
1112				 * Don't allow CU1 or FR to be set unless FPU
1113				 * capability enabled and exists in guest
1114				 * configuration.
1115				 */
1116				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1117					val &= ~(ST0_CU1 | ST0_FR);
1118
1119				/*
1120				 * Also don't allow FR to be set if host doesn't
1121				 * support it.
1122				 */
1123				if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1124					val &= ~ST0_FR;
1125
1126
1127				/* Handle changes in FPU mode */
1128				preempt_disable();
1129
1130				/*
1131				 * FPU and Vector register state is made
1132				 * UNPREDICTABLE by a change of FR, so don't
1133				 * even bother saving it.
1134				 */
1135				if (change & ST0_FR)
1136					kvm_drop_fpu(vcpu);
1137
1138				/*
1139				 * If MSA state is already live, it is undefined
1140				 * how it interacts with FR=0 FPU state, and we
1141				 * don't want to hit reserved instruction
1142				 * exceptions trying to save the MSA state later
1143				 * when CU=1 && FR=1, so play it safe and save
1144				 * it first.
1145				 */
1146				if (change & ST0_CU1 && !(val & ST0_FR) &&
1147				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1148					kvm_lose_fpu(vcpu);
1149
1150				/*
1151				 * Propagate CU1 (FPU enable) changes
1152				 * immediately if the FPU context is already
1153				 * loaded. When disabling we leave the context
1154				 * loaded so it can be quickly enabled again in
1155				 * the near future.
1156				 */
1157				if (change & ST0_CU1 &&
1158				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1159					change_c0_status(ST0_CU1, val);
1160
1161				preempt_enable();
1162
1163				kvm_write_c0_guest_status(cop0, val);
1164
1165#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1166				/*
1167				 * If FPU present, we need CU1/FR bits to take
1168				 * effect fairly soon.
1169				 */
1170				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1171					kvm_mips_trans_mtc0(inst, opc, vcpu);
1172#endif
1173			} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1174				unsigned int old_val, val, change, wrmask;
1175
1176				old_val = kvm_read_c0_guest_config5(cop0);
1177				val = vcpu->arch.gprs[rt];
1178
1179				/* Only a few bits are writable in Config5 */
1180				wrmask = kvm_mips_config5_wrmask(vcpu);
1181				change = (val ^ old_val) & wrmask;
1182				val = old_val ^ change;
1183
1184
1185				/* Handle changes in FPU/MSA modes */
1186				preempt_disable();
1187
1188				/*
1189				 * Propagate FRE changes immediately if the FPU
1190				 * context is already loaded.
1191				 */
1192				if (change & MIPS_CONF5_FRE &&
1193				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1194					change_c0_config5(MIPS_CONF5_FRE, val);
1195
1196				/*
1197				 * Propagate MSAEn changes immediately if the
1198				 * MSA context is already loaded. When disabling
1199				 * we leave the context loaded so it can be
1200				 * quickly enabled again in the near future.
1201				 */
1202				if (change & MIPS_CONF5_MSAEN &&
1203				    vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1204					change_c0_config5(MIPS_CONF5_MSAEN,
1205							  val);
1206
1207				preempt_enable();
1208
1209				kvm_write_c0_guest_config5(cop0, val);
1210			} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1211				uint32_t old_cause, new_cause;
1212
1213				old_cause = kvm_read_c0_guest_cause(cop0);
1214				new_cause = vcpu->arch.gprs[rt];
1215				/* Update R/W bits */
1216				kvm_change_c0_guest_cause(cop0, 0x08800300,
1217							  new_cause);
1218				/* DC bit enabling/disabling timer? */
1219				if ((old_cause ^ new_cause) & CAUSEF_DC) {
1220					if (new_cause & CAUSEF_DC)
1221						kvm_mips_count_disable_cause(vcpu);
1222					else
1223						kvm_mips_count_enable_cause(vcpu);
1224				}
1225			} else {
1226				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1227#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1228				kvm_mips_trans_mtc0(inst, opc, vcpu);
1229#endif
1230			}
1231
1232			kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1233				  rd, sel, cop0->reg[rd][sel]);
1234			break;
1235
1236		case dmtc_op:
1237			kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1238				vcpu->arch.pc, rt, rd, sel);
1239			er = EMULATE_FAIL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1240			break;
1241
1242		case mfmc0_op:
1243#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1244			cop0->stat[MIPS_CP0_STATUS][0]++;
1245#endif
1246			if (rt != 0)
1247				vcpu->arch.gprs[rt] =
1248				    kvm_read_c0_guest_status(cop0);
1249			/* EI */
1250			if (inst & 0x20) {
1251				kvm_debug("[%#lx] mfmc0_op: EI\n",
1252					  vcpu->arch.pc);
1253				kvm_set_c0_guest_status(cop0, ST0_IE);
1254			} else {
1255				kvm_debug("[%#lx] mfmc0_op: DI\n",
1256					  vcpu->arch.pc);
1257				kvm_clear_c0_guest_status(cop0, ST0_IE);
1258			}
1259
1260			break;
1261
1262		case wrpgpr_op:
1263			{
1264				uint32_t css =
1265				    cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1266				uint32_t pss =
1267				    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1268				/*
1269				 * We don't support any shadow register sets, so
1270				 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1271				 */
1272				if (css || pss) {
1273					er = EMULATE_FAIL;
1274					break;
1275				}
1276				kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1277					  vcpu->arch.gprs[rt]);
1278				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1279			}
1280			break;
1281		default:
1282			kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1283				vcpu->arch.pc, copz);
1284			er = EMULATE_FAIL;
1285			break;
1286		}
1287	}
1288
1289done:
1290	/* Rollback PC only if emulation was unsuccessful */
1291	if (er == EMULATE_FAIL)
1292		vcpu->arch.pc = curr_pc;
1293
1294dont_update_pc:
1295	/*
1296	 * This is for special instructions whose emulation
1297	 * updates the PC, so do not overwrite the PC under
1298	 * any circumstances
1299	 */
1300
1301	return er;
1302}
1303
1304enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1305					     struct kvm_run *run,
1306					     struct kvm_vcpu *vcpu)
1307{
1308	enum emulation_result er = EMULATE_DO_MMIO;
1309	int32_t op, base, rt, offset;
1310	uint32_t bytes;
1311	void *data = run->mmio.data;
1312	unsigned long curr_pc;
1313
1314	/*
1315	 * Update PC and hold onto current PC in case there is
1316	 * an error and we want to rollback the PC
1317	 */
1318	curr_pc = vcpu->arch.pc;
1319	er = update_pc(vcpu, cause);
1320	if (er == EMULATE_FAIL)
1321		return er;
1322
1323	rt = (inst >> 16) & 0x1f;
1324	base = (inst >> 21) & 0x1f;
1325	offset = inst & 0xffff;
1326	op = (inst >> 26) & 0x3f;
1327
1328	switch (op) {
1329	case sb_op:
1330		bytes = 1;
1331		if (bytes > sizeof(run->mmio.data)) {
1332			kvm_err("%s: bad MMIO length: %d\n", __func__,
1333			       run->mmio.len);
1334		}
1335		run->mmio.phys_addr =
1336		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1337						   host_cp0_badvaddr);
1338		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1339			er = EMULATE_FAIL;
1340			break;
1341		}
1342		run->mmio.len = bytes;
1343		run->mmio.is_write = 1;
1344		vcpu->mmio_needed = 1;
1345		vcpu->mmio_is_write = 1;
1346		*(u8 *) data = vcpu->arch.gprs[rt];
1347		kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1348			  vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1349			  *(uint8_t *) data);
1350
1351		break;
1352
1353	case sw_op:
1354		bytes = 4;
1355		if (bytes > sizeof(run->mmio.data)) {
1356			kvm_err("%s: bad MMIO length: %d\n", __func__,
1357			       run->mmio.len);
1358		}
1359		run->mmio.phys_addr =
1360		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1361						   host_cp0_badvaddr);
1362		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1363			er = EMULATE_FAIL;
1364			break;
1365		}
1366
1367		run->mmio.len = bytes;
1368		run->mmio.is_write = 1;
1369		vcpu->mmio_needed = 1;
1370		vcpu->mmio_is_write = 1;
1371		*(uint32_t *) data = vcpu->arch.gprs[rt];
1372
1373		kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1374			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1375			  vcpu->arch.gprs[rt], *(uint32_t *) data);
1376		break;
1377
1378	case sh_op:
1379		bytes = 2;
1380		if (bytes > sizeof(run->mmio.data)) {
1381			kvm_err("%s: bad MMIO length: %d\n", __func__,
1382			       run->mmio.len);
1383		}
1384		run->mmio.phys_addr =
1385		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1386						   host_cp0_badvaddr);
1387		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1388			er = EMULATE_FAIL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389			break;
1390		}
1391
1392		run->mmio.len = bytes;
1393		run->mmio.is_write = 1;
1394		vcpu->mmio_needed = 1;
1395		vcpu->mmio_is_write = 1;
1396		*(uint16_t *) data = vcpu->arch.gprs[rt];
1397
1398		kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1399			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1400			  vcpu->arch.gprs[rt], *(uint32_t *) data);
1401		break;
1402
1403	default:
1404		kvm_err("Store not yet supported");
1405		er = EMULATE_FAIL;
1406		break;
1407	}
1408
1409	/* Rollback PC if emulation was unsuccessful */
1410	if (er == EMULATE_FAIL)
1411		vcpu->arch.pc = curr_pc;
1412
1413	return er;
1414}
1415
1416enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1417					    struct kvm_run *run,
1418					    struct kvm_vcpu *vcpu)
1419{
1420	enum emulation_result er = EMULATE_DO_MMIO;
1421	int32_t op, base, rt, offset;
1422	uint32_t bytes;
1423
1424	rt = (inst >> 16) & 0x1f;
1425	base = (inst >> 21) & 0x1f;
1426	offset = inst & 0xffff;
1427	op = (inst >> 26) & 0x3f;
1428
1429	vcpu->arch.pending_load_cause = cause;
1430	vcpu->arch.io_gpr = rt;
1431
1432	switch (op) {
1433	case lw_op:
1434		bytes = 4;
1435		if (bytes > sizeof(run->mmio.data)) {
1436			kvm_err("%s: bad MMIO length: %d\n", __func__,
1437			       run->mmio.len);
1438			er = EMULATE_FAIL;
1439			break;
1440		}
1441		run->mmio.phys_addr =
1442		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1443						   host_cp0_badvaddr);
1444		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1445			er = EMULATE_FAIL;
1446			break;
1447		}
1448
1449		run->mmio.len = bytes;
1450		run->mmio.is_write = 0;
1451		vcpu->mmio_needed = 1;
1452		vcpu->mmio_is_write = 0;
1453		break;
1454
1455	case lh_op:
1456	case lhu_op:
1457		bytes = 2;
1458		if (bytes > sizeof(run->mmio.data)) {
1459			kvm_err("%s: bad MMIO length: %d\n", __func__,
1460			       run->mmio.len);
1461			er = EMULATE_FAIL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1462			break;
1463		}
1464		run->mmio.phys_addr =
1465		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1466						   host_cp0_badvaddr);
1467		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1468			er = EMULATE_FAIL;
1469			break;
1470		}
1471
1472		run->mmio.len = bytes;
1473		run->mmio.is_write = 0;
1474		vcpu->mmio_needed = 1;
1475		vcpu->mmio_is_write = 0;
1476
1477		if (op == lh_op)
1478			vcpu->mmio_needed = 2;
1479		else
1480			vcpu->mmio_needed = 1;
1481
1482		break;
 
1483
1484	case lbu_op:
1485	case lb_op:
1486		bytes = 1;
1487		if (bytes > sizeof(run->mmio.data)) {
1488			kvm_err("%s: bad MMIO length: %d\n", __func__,
1489			       run->mmio.len);
1490			er = EMULATE_FAIL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491			break;
1492		}
1493		run->mmio.phys_addr =
1494		    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1495						   host_cp0_badvaddr);
1496		if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1497			er = EMULATE_FAIL;
1498			break;
1499		}
1500
1501		run->mmio.len = bytes;
1502		run->mmio.is_write = 0;
1503		vcpu->mmio_is_write = 0;
1504
1505		if (op == lb_op)
1506			vcpu->mmio_needed = 2;
1507		else
1508			vcpu->mmio_needed = 1;
1509
1510		break;
1511
1512	default:
1513		kvm_err("Load not yet supported");
1514		er = EMULATE_FAIL;
1515		break;
1516	}
1517
1518	return er;
1519}
1520
1521int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1522{
1523	unsigned long offset = (va & ~PAGE_MASK);
1524	struct kvm *kvm = vcpu->kvm;
1525	unsigned long pa;
1526	gfn_t gfn;
1527	kvm_pfn_t pfn;
1528
1529	gfn = va >> PAGE_SHIFT;
 
1530
1531	if (gfn >= kvm->arch.guest_pmap_npages) {
1532		kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1533		kvm_mips_dump_host_tlbs();
1534		kvm_arch_vcpu_dump_regs(vcpu);
1535		return -1;
1536	}
1537	pfn = kvm->arch.guest_pmap[gfn];
1538	pa = (pfn << PAGE_SHIFT) | offset;
1539
1540	kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1541		  CKSEG0ADDR(pa));
1542
1543	local_flush_icache_range(CKSEG0ADDR(pa), 32);
1544	return 0;
 
 
1545}
1546
1547enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1548					     uint32_t cause,
1549					     struct kvm_run *run,
1550					     struct kvm_vcpu *vcpu)
1551{
1552	struct mips_coproc *cop0 = vcpu->arch.cop0;
1553	enum emulation_result er = EMULATE_DONE;
1554	int32_t offset, cache, op_inst, op, base;
1555	struct kvm_vcpu_arch *arch = &vcpu->arch;
1556	unsigned long va;
1557	unsigned long curr_pc;
 
 
 
 
 
1558
1559	/*
1560	 * Update PC and hold onto current PC in case there is
1561	 * an error and we want to rollback the PC
 
1562	 */
1563	curr_pc = vcpu->arch.pc;
1564	er = update_pc(vcpu, cause);
1565	if (er == EMULATE_FAIL)
1566		return er;
 
 
1567
1568	base = (inst >> 21) & 0x1f;
1569	op_inst = (inst >> 16) & 0x1f;
1570	offset = (int16_t)inst;
1571	cache = op_inst & CacheOp_Cache;
1572	op = op_inst & CacheOp_Op;
1573
1574	va = arch->gprs[base] + offset;
 
 
 
1575
1576	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1577		  cache, op, base, arch->gprs[base], offset);
1578
1579	/*
1580	 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1581	 * invalidate the caches entirely by stepping through all the
1582	 * ways/indexes
1583	 */
1584	if (op == Index_Writeback_Inv) {
1585		kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1586			  vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1587			  arch->gprs[base], offset);
1588
1589		if (cache == Cache_D)
1590			r4k_blast_dcache();
1591		else if (cache == Cache_I)
1592			r4k_blast_icache();
1593		else {
1594			kvm_err("%s: unsupported CACHE INDEX operation\n",
1595				__func__);
1596			return EMULATE_FAIL;
1597		}
1598
1599#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1600		kvm_mips_trans_cache_index(inst, opc, vcpu);
 
1601#endif
1602		goto done;
1603	}
1604
1605	preempt_disable();
1606	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1607		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1608			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1609	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1610		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1611		int index;
1612
1613		/* If an entry already exists then skip */
1614		if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1615			goto skip_fault;
1616
1617		/*
1618		 * If address not in the guest TLB, then give the guest a fault,
1619		 * the resulting handler will do the right thing
1620		 */
1621		index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1622						  (kvm_read_c0_guest_entryhi
1623						   (cop0) & ASID_MASK));
1624
1625		if (index < 0) {
1626			vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1627			vcpu->arch.host_cp0_badvaddr = va;
1628			er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1629							 vcpu);
1630			preempt_enable();
1631			goto dont_update_pc;
1632		} else {
1633			struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1634			/*
1635			 * Check if the entry is valid, if not then setup a TLB
1636			 * invalid exception to the guest
1637			 */
1638			if (!TLB_IS_VALID(*tlb, va)) {
1639				er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1640								run, vcpu);
1641				preempt_enable();
1642				goto dont_update_pc;
1643			} else {
1644				/*
1645				 * We fault an entry from the guest tlb to the
1646				 * shadow host TLB
1647				 */
1648				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1649								     NULL,
1650								     NULL);
1651			}
1652		}
1653	} else {
1654		kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1655			cache, op, base, arch->gprs[base], offset);
1656		er = EMULATE_FAIL;
1657		preempt_enable();
1658		goto dont_update_pc;
1659
1660	}
1661
1662skip_fault:
1663	/* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1664	if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1665		flush_dcache_line(va);
1666
1667#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1668		/*
1669		 * Replace the CACHE instruction, with a SYNCI, not the same,
1670		 * but avoids a trap
1671		 */
1672		kvm_mips_trans_cache_va(inst, opc, vcpu);
1673#endif
1674	} else if (op_inst == Hit_Invalidate_I) {
1675		flush_dcache_line(va);
1676		flush_icache_line(va);
1677
1678#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1679		/* Replace the CACHE instruction, with a SYNCI */
1680		kvm_mips_trans_cache_va(inst, opc, vcpu);
1681#endif
1682	} else {
1683		kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1684			cache, op, base, arch->gprs[base], offset);
1685		er = EMULATE_FAIL;
1686		preempt_enable();
1687		goto dont_update_pc;
1688	}
1689
1690	preempt_enable();
1691
1692dont_update_pc:
1693	/* Rollback PC */
1694	vcpu->arch.pc = curr_pc;
1695done:
1696	return er;
1697}
1698
1699enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1700					    struct kvm_run *run,
1701					    struct kvm_vcpu *vcpu)
1702{
1703	enum emulation_result er = EMULATE_DONE;
1704	uint32_t inst;
1705
1706	/* Fetch the instruction. */
1707	if (cause & CAUSEF_BD)
1708		opc += 1;
1709
1710	inst = kvm_get_inst(opc, vcpu);
1711
1712	switch (((union mips_instruction)inst).r_format.opcode) {
1713	case cop0_op:
1714		er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1715		break;
1716	case sb_op:
1717	case sh_op:
1718	case sw_op:
1719		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1720		break;
1721	case lb_op:
1722	case lbu_op:
1723	case lhu_op:
 
 
1724	case lh_op:
1725	case lw_op:
1726		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1727		break;
1728
1729	case cache_op:
1730		++vcpu->stat.cache_exits;
1731		trace_kvm_exit(vcpu, CACHE_EXITS);
1732		er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
 
1733		break;
1734
1735	default:
1736		kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1737			inst);
1738		kvm_arch_vcpu_dump_regs(vcpu);
1739		er = EMULATE_FAIL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1740		break;
1741	}
1742
1743	return er;
1744}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745
1746enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1747					       uint32_t *opc,
1748					       struct kvm_run *run,
1749					       struct kvm_vcpu *vcpu)
1750{
1751	struct mips_coproc *cop0 = vcpu->arch.cop0;
1752	struct kvm_vcpu_arch *arch = &vcpu->arch;
1753	enum emulation_result er = EMULATE_DONE;
1754
1755	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1756		/* save old pc */
1757		kvm_write_c0_guest_epc(cop0, arch->pc);
1758		kvm_set_c0_guest_status(cop0, ST0_EXL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1759
1760		if (cause & CAUSEF_BD)
1761			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1762		else
1763			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1764
1765		kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766
1767		kvm_change_c0_guest_cause(cop0, (0xff),
1768					  (EXCCODE_SYS << CAUSEB_EXCCODE));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1769
1770		/* Set PC to the exception entry point */
1771		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1772
1773	} else {
1774		kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1775		er = EMULATE_FAIL;
1776	}
1777
1778	return er;
1779}
1780
1781enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1782						  uint32_t *opc,
1783						  struct kvm_run *run,
1784						  struct kvm_vcpu *vcpu)
1785{
1786	struct mips_coproc *cop0 = vcpu->arch.cop0;
1787	struct kvm_vcpu_arch *arch = &vcpu->arch;
1788	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1789				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1790
1791	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1792		/* save old pc */
1793		kvm_write_c0_guest_epc(cop0, arch->pc);
1794		kvm_set_c0_guest_status(cop0, ST0_EXL);
1795
1796		if (cause & CAUSEF_BD)
1797			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1798		else
1799			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1800
1801		kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1802			  arch->pc);
1803
1804		/* set pc to the exception entry point */
1805		arch->pc = KVM_GUEST_KSEG0 + 0x0;
1806
1807	} else {
1808		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1809			  arch->pc);
1810
1811		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1812	}
1813
1814	kvm_change_c0_guest_cause(cop0, (0xff),
1815				  (EXCCODE_TLBL << CAUSEB_EXCCODE));
1816
1817	/* setup badvaddr, context and entryhi registers for the guest */
1818	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1819	/* XXXKYMA: is the context register used by linux??? */
1820	kvm_write_c0_guest_entryhi(cop0, entryhi);
1821	/* Blow away the shadow host TLBs */
1822	kvm_mips_flush_host_tlb(1);
1823
1824	return EMULATE_DONE;
1825}
1826
1827enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1828						 uint32_t *opc,
1829						 struct kvm_run *run,
1830						 struct kvm_vcpu *vcpu)
1831{
1832	struct mips_coproc *cop0 = vcpu->arch.cop0;
1833	struct kvm_vcpu_arch *arch = &vcpu->arch;
1834	unsigned long entryhi =
1835		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1836		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1837
1838	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1839		/* save old pc */
1840		kvm_write_c0_guest_epc(cop0, arch->pc);
1841		kvm_set_c0_guest_status(cop0, ST0_EXL);
1842
1843		if (cause & CAUSEF_BD)
1844			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1845		else
1846			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1847
1848		kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1849			  arch->pc);
1850
1851		/* set pc to the exception entry point */
1852		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1853
1854	} else {
1855		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1856			  arch->pc);
1857		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1858	}
1859
1860	kvm_change_c0_guest_cause(cop0, (0xff),
1861				  (EXCCODE_TLBL << CAUSEB_EXCCODE));
1862
1863	/* setup badvaddr, context and entryhi registers for the guest */
1864	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1865	/* XXXKYMA: is the context register used by linux??? */
1866	kvm_write_c0_guest_entryhi(cop0, entryhi);
1867	/* Blow away the shadow host TLBs */
1868	kvm_mips_flush_host_tlb(1);
1869
1870	return EMULATE_DONE;
1871}
1872
1873enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1874						  uint32_t *opc,
1875						  struct kvm_run *run,
1876						  struct kvm_vcpu *vcpu)
1877{
1878	struct mips_coproc *cop0 = vcpu->arch.cop0;
1879	struct kvm_vcpu_arch *arch = &vcpu->arch;
1880	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1881				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1882
1883	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1884		/* save old pc */
1885		kvm_write_c0_guest_epc(cop0, arch->pc);
1886		kvm_set_c0_guest_status(cop0, ST0_EXL);
1887
1888		if (cause & CAUSEF_BD)
1889			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1890		else
1891			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1892
1893		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1894			  arch->pc);
1895
1896		/* Set PC to the exception entry point */
1897		arch->pc = KVM_GUEST_KSEG0 + 0x0;
1898	} else {
1899		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1900			  arch->pc);
1901		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1902	}
1903
1904	kvm_change_c0_guest_cause(cop0, (0xff),
1905				  (EXCCODE_TLBS << CAUSEB_EXCCODE));
1906
1907	/* setup badvaddr, context and entryhi registers for the guest */
1908	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1909	/* XXXKYMA: is the context register used by linux??? */
1910	kvm_write_c0_guest_entryhi(cop0, entryhi);
1911	/* Blow away the shadow host TLBs */
1912	kvm_mips_flush_host_tlb(1);
1913
1914	return EMULATE_DONE;
1915}
1916
1917enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1918						 uint32_t *opc,
1919						 struct kvm_run *run,
1920						 struct kvm_vcpu *vcpu)
1921{
1922	struct mips_coproc *cop0 = vcpu->arch.cop0;
1923	struct kvm_vcpu_arch *arch = &vcpu->arch;
1924	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1925		(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1926
1927	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1928		/* save old pc */
1929		kvm_write_c0_guest_epc(cop0, arch->pc);
1930		kvm_set_c0_guest_status(cop0, ST0_EXL);
1931
1932		if (cause & CAUSEF_BD)
1933			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1934		else
1935			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1936
1937		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1938			  arch->pc);
1939
1940		/* Set PC to the exception entry point */
1941		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1942	} else {
1943		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1944			  arch->pc);
1945		arch->pc = KVM_GUEST_KSEG0 + 0x180;
1946	}
1947
1948	kvm_change_c0_guest_cause(cop0, (0xff),
1949				  (EXCCODE_TLBS << CAUSEB_EXCCODE));
1950
1951	/* setup badvaddr, context and entryhi registers for the guest */
1952	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1953	/* XXXKYMA: is the context register used by linux??? */
1954	kvm_write_c0_guest_entryhi(cop0, entryhi);
1955	/* Blow away the shadow host TLBs */
1956	kvm_mips_flush_host_tlb(1);
1957
1958	return EMULATE_DONE;
1959}
1960
1961/* TLBMOD: store into address matching TLB with Dirty bit off */
1962enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1963					     struct kvm_run *run,
1964					     struct kvm_vcpu *vcpu)
1965{
1966	enum emulation_result er = EMULATE_DONE;
1967#ifdef DEBUG
1968	struct mips_coproc *cop0 = vcpu->arch.cop0;
1969	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1970				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1971	int index;
1972
1973	/* If address not in the guest TLB, then we are in trouble */
1974	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1975	if (index < 0) {
1976		/* XXXKYMA Invalidate and retry */
1977		kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1978		kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1979		     __func__, entryhi);
1980		kvm_mips_dump_guest_tlbs(vcpu);
1981		kvm_mips_dump_host_tlbs();
1982		return EMULATE_FAIL;
1983	}
1984#endif
1985
1986	er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1987	return er;
1988}
1989
1990enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1991					      uint32_t *opc,
1992					      struct kvm_run *run,
1993					      struct kvm_vcpu *vcpu)
1994{
1995	struct mips_coproc *cop0 = vcpu->arch.cop0;
1996	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1997				(kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1998	struct kvm_vcpu_arch *arch = &vcpu->arch;
1999
2000	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2001		/* save old pc */
2002		kvm_write_c0_guest_epc(cop0, arch->pc);
2003		kvm_set_c0_guest_status(cop0, ST0_EXL);
2004
2005		if (cause & CAUSEF_BD)
2006			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2007		else
2008			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2009
2010		kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2011			  arch->pc);
2012
2013		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2014	} else {
2015		kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2016			  arch->pc);
2017		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2018	}
2019
2020	kvm_change_c0_guest_cause(cop0, (0xff),
2021				  (EXCCODE_MOD << CAUSEB_EXCCODE));
2022
2023	/* setup badvaddr, context and entryhi registers for the guest */
2024	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2025	/* XXXKYMA: is the context register used by linux??? */
2026	kvm_write_c0_guest_entryhi(cop0, entryhi);
2027	/* Blow away the shadow host TLBs */
2028	kvm_mips_flush_host_tlb(1);
2029
2030	return EMULATE_DONE;
2031}
2032
2033enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2034					       uint32_t *opc,
2035					       struct kvm_run *run,
2036					       struct kvm_vcpu *vcpu)
2037{
2038	struct mips_coproc *cop0 = vcpu->arch.cop0;
2039	struct kvm_vcpu_arch *arch = &vcpu->arch;
2040
2041	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2042		/* save old pc */
2043		kvm_write_c0_guest_epc(cop0, arch->pc);
2044		kvm_set_c0_guest_status(cop0, ST0_EXL);
2045
2046		if (cause & CAUSEF_BD)
2047			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2048		else
2049			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2050
 
 
 
 
2051	}
2052
2053	arch->pc = KVM_GUEST_KSEG0 + 0x180;
2054
2055	kvm_change_c0_guest_cause(cop0, (0xff),
2056				  (EXCCODE_CPU << CAUSEB_EXCCODE));
2057	kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2058
2059	return EMULATE_DONE;
2060}
2061
2062enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2063					      uint32_t *opc,
2064					      struct kvm_run *run,
2065					      struct kvm_vcpu *vcpu)
2066{
2067	struct mips_coproc *cop0 = vcpu->arch.cop0;
2068	struct kvm_vcpu_arch *arch = &vcpu->arch;
2069	enum emulation_result er = EMULATE_DONE;
2070
2071	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2072		/* save old pc */
2073		kvm_write_c0_guest_epc(cop0, arch->pc);
2074		kvm_set_c0_guest_status(cop0, ST0_EXL);
2075
2076		if (cause & CAUSEF_BD)
2077			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2078		else
2079			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2080
2081		kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2082
2083		kvm_change_c0_guest_cause(cop0, (0xff),
2084					  (EXCCODE_RI << CAUSEB_EXCCODE));
2085
2086		/* Set PC to the exception entry point */
2087		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2088
2089	} else {
2090		kvm_err("Trying to deliver RI when EXL is already set\n");
2091		er = EMULATE_FAIL;
2092	}
2093
2094	return er;
2095}
2096
2097enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2098					      uint32_t *opc,
2099					      struct kvm_run *run,
2100					      struct kvm_vcpu *vcpu)
2101{
2102	struct mips_coproc *cop0 = vcpu->arch.cop0;
2103	struct kvm_vcpu_arch *arch = &vcpu->arch;
2104	enum emulation_result er = EMULATE_DONE;
2105
2106	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2107		/* save old pc */
2108		kvm_write_c0_guest_epc(cop0, arch->pc);
2109		kvm_set_c0_guest_status(cop0, ST0_EXL);
2110
2111		if (cause & CAUSEF_BD)
2112			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2113		else
2114			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2115
2116		kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2117
2118		kvm_change_c0_guest_cause(cop0, (0xff),
2119					  (EXCCODE_BP << CAUSEB_EXCCODE));
2120
2121		/* Set PC to the exception entry point */
2122		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2123
2124	} else {
2125		kvm_err("Trying to deliver BP when EXL is already set\n");
2126		er = EMULATE_FAIL;
2127	}
2128
2129	return er;
2130}
2131
2132enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2133						uint32_t *opc,
2134						struct kvm_run *run,
2135						struct kvm_vcpu *vcpu)
2136{
2137	struct mips_coproc *cop0 = vcpu->arch.cop0;
2138	struct kvm_vcpu_arch *arch = &vcpu->arch;
2139	enum emulation_result er = EMULATE_DONE;
2140
2141	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2142		/* save old pc */
2143		kvm_write_c0_guest_epc(cop0, arch->pc);
2144		kvm_set_c0_guest_status(cop0, ST0_EXL);
2145
2146		if (cause & CAUSEF_BD)
2147			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2148		else
2149			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2150
2151		kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2152
2153		kvm_change_c0_guest_cause(cop0, (0xff),
2154					  (EXCCODE_TR << CAUSEB_EXCCODE));
2155
2156		/* Set PC to the exception entry point */
2157		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2158
2159	} else {
2160		kvm_err("Trying to deliver TRAP when EXL is already set\n");
2161		er = EMULATE_FAIL;
2162	}
2163
2164	return er;
2165}
2166
2167enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2168						  uint32_t *opc,
2169						  struct kvm_run *run,
2170						  struct kvm_vcpu *vcpu)
2171{
2172	struct mips_coproc *cop0 = vcpu->arch.cop0;
2173	struct kvm_vcpu_arch *arch = &vcpu->arch;
2174	enum emulation_result er = EMULATE_DONE;
2175
2176	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2177		/* save old pc */
2178		kvm_write_c0_guest_epc(cop0, arch->pc);
2179		kvm_set_c0_guest_status(cop0, ST0_EXL);
2180
2181		if (cause & CAUSEF_BD)
2182			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2183		else
2184			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2185
2186		kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2187
2188		kvm_change_c0_guest_cause(cop0, (0xff),
2189					  (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2190
2191		/* Set PC to the exception entry point */
2192		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2193
2194	} else {
2195		kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2196		er = EMULATE_FAIL;
2197	}
2198
2199	return er;
2200}
2201
2202enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2203					       uint32_t *opc,
2204					       struct kvm_run *run,
2205					       struct kvm_vcpu *vcpu)
2206{
2207	struct mips_coproc *cop0 = vcpu->arch.cop0;
2208	struct kvm_vcpu_arch *arch = &vcpu->arch;
2209	enum emulation_result er = EMULATE_DONE;
2210
2211	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2212		/* save old pc */
2213		kvm_write_c0_guest_epc(cop0, arch->pc);
2214		kvm_set_c0_guest_status(cop0, ST0_EXL);
2215
2216		if (cause & CAUSEF_BD)
2217			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2218		else
2219			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2220
2221		kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2222
2223		kvm_change_c0_guest_cause(cop0, (0xff),
2224					  (EXCCODE_FPE << CAUSEB_EXCCODE));
2225
2226		/* Set PC to the exception entry point */
2227		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2228
2229	} else {
2230		kvm_err("Trying to deliver FPE when EXL is already set\n");
2231		er = EMULATE_FAIL;
2232	}
2233
2234	return er;
2235}
2236
2237enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2238						  uint32_t *opc,
2239						  struct kvm_run *run,
2240						  struct kvm_vcpu *vcpu)
2241{
2242	struct mips_coproc *cop0 = vcpu->arch.cop0;
2243	struct kvm_vcpu_arch *arch = &vcpu->arch;
2244	enum emulation_result er = EMULATE_DONE;
2245
2246	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2247		/* save old pc */
2248		kvm_write_c0_guest_epc(cop0, arch->pc);
2249		kvm_set_c0_guest_status(cop0, ST0_EXL);
2250
2251		if (cause & CAUSEF_BD)
2252			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2253		else
2254			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2255
2256		kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2257
2258		kvm_change_c0_guest_cause(cop0, (0xff),
2259					  (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2260
2261		/* Set PC to the exception entry point */
2262		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2263
2264	} else {
2265		kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2266		er = EMULATE_FAIL;
2267	}
2268
2269	return er;
2270}
2271
2272/* ll/sc, rdhwr, sync emulation */
2273
2274#define OPCODE 0xfc000000
2275#define BASE   0x03e00000
2276#define RT     0x001f0000
2277#define OFFSET 0x0000ffff
2278#define LL     0xc0000000
2279#define SC     0xe0000000
2280#define SPEC0  0x00000000
2281#define SPEC3  0x7c000000
2282#define RD     0x0000f800
2283#define FUNC   0x0000003f
2284#define SYNC   0x0000000f
2285#define RDHWR  0x0000003b
2286
2287enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2288					 struct kvm_run *run,
2289					 struct kvm_vcpu *vcpu)
2290{
2291	struct mips_coproc *cop0 = vcpu->arch.cop0;
2292	struct kvm_vcpu_arch *arch = &vcpu->arch;
2293	enum emulation_result er = EMULATE_DONE;
2294	unsigned long curr_pc;
2295	uint32_t inst;
2296
2297	/*
2298	 * Update PC and hold onto current PC in case there is
2299	 * an error and we want to rollback the PC
2300	 */
2301	curr_pc = vcpu->arch.pc;
2302	er = update_pc(vcpu, cause);
2303	if (er == EMULATE_FAIL)
2304		return er;
2305
2306	/* Fetch the instruction. */
2307	if (cause & CAUSEF_BD)
2308		opc += 1;
2309
2310	inst = kvm_get_inst(opc, vcpu);
2311
2312	if (inst == KVM_INVALID_INST) {
2313		kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2314		return EMULATE_FAIL;
2315	}
2316
2317	if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2318		int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2319		int rd = (inst & RD) >> 11;
2320		int rt = (inst & RT) >> 16;
2321		/* If usermode, check RDHWR rd is allowed by guest HWREna */
2322		if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2323			kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2324				  rd, opc);
2325			goto emulate_ri;
2326		}
2327		switch (rd) {
2328		case 0:	/* CPU number */
2329			arch->gprs[rt] = 0;
2330			break;
2331		case 1:	/* SYNCI length */
2332			arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2333					     current_cpu_data.icache.linesz);
2334			break;
2335		case 2:	/* Read count register */
2336			arch->gprs[rt] = kvm_mips_read_count(vcpu);
2337			break;
2338		case 3:	/* Count register resolution */
2339			switch (current_cpu_data.cputype) {
2340			case CPU_20KC:
2341			case CPU_25KF:
2342				arch->gprs[rt] = 1;
2343				break;
2344			default:
2345				arch->gprs[rt] = 2;
2346			}
2347			break;
2348		case 29:
2349			arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2350			break;
2351
2352		default:
2353			kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2354			goto emulate_ri;
2355		}
2356	} else {
2357		kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2358		goto emulate_ri;
2359	}
2360
2361	return EMULATE_DONE;
2362
2363emulate_ri:
2364	/*
2365	 * Rollback PC (if in branch delay slot then the PC already points to
2366	 * branch target), and pass the RI exception to the guest OS.
2367	 */
2368	vcpu->arch.pc = curr_pc;
2369	return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2370}
2371
2372enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2373						  struct kvm_run *run)
2374{
 
2375	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2376	enum emulation_result er = EMULATE_DONE;
2377
2378	if (run->mmio.len > sizeof(*gpr)) {
2379		kvm_err("Bad MMIO length: %d", run->mmio.len);
2380		er = EMULATE_FAIL;
2381		goto done;
2382	}
2383
2384	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2385	if (er == EMULATE_FAIL)
2386		return er;
2387
2388	switch (run->mmio.len) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2389	case 4:
2390		*gpr = *(int32_t *) run->mmio.data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2391		break;
2392
2393	case 2:
2394		if (vcpu->mmio_needed == 2)
2395			*gpr = *(int16_t *) run->mmio.data;
2396		else
2397			*gpr = *(uint16_t *)run->mmio.data;
2398
2399		break;
2400	case 1:
2401		if (vcpu->mmio_needed == 2)
2402			*gpr = *(int8_t *) run->mmio.data;
2403		else
2404			*gpr = *(u8 *) run->mmio.data;
2405		break;
2406	}
2407
2408	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2409		kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2410			  vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2411			  vcpu->mmio_needed);
2412
2413done:
2414	return er;
2415}
2416
2417static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2418						  uint32_t *opc,
2419						  struct kvm_run *run,
2420						  struct kvm_vcpu *vcpu)
2421{
2422	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2423	struct mips_coproc *cop0 = vcpu->arch.cop0;
2424	struct kvm_vcpu_arch *arch = &vcpu->arch;
2425	enum emulation_result er = EMULATE_DONE;
2426
2427	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2428		/* save old pc */
2429		kvm_write_c0_guest_epc(cop0, arch->pc);
2430		kvm_set_c0_guest_status(cop0, ST0_EXL);
2431
2432		if (cause & CAUSEF_BD)
2433			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2434		else
2435			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2436
2437		kvm_change_c0_guest_cause(cop0, (0xff),
2438					  (exccode << CAUSEB_EXCCODE));
2439
2440		/* Set PC to the exception entry point */
2441		arch->pc = KVM_GUEST_KSEG0 + 0x180;
2442		kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2443
2444		kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2445			  exccode, kvm_read_c0_guest_epc(cop0),
2446			  kvm_read_c0_guest_badvaddr(cop0));
2447	} else {
2448		kvm_err("Trying to deliver EXC when EXL is already set\n");
2449		er = EMULATE_FAIL;
2450	}
2451
2452	return er;
2453}
2454
2455enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2456					       uint32_t *opc,
2457					       struct kvm_run *run,
2458					       struct kvm_vcpu *vcpu)
2459{
2460	enum emulation_result er = EMULATE_DONE;
2461	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2462	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2463
2464	int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2465
2466	if (usermode) {
2467		switch (exccode) {
2468		case EXCCODE_INT:
2469		case EXCCODE_SYS:
2470		case EXCCODE_BP:
2471		case EXCCODE_RI:
2472		case EXCCODE_TR:
2473		case EXCCODE_MSAFPE:
2474		case EXCCODE_FPE:
2475		case EXCCODE_MSADIS:
2476			break;
2477
2478		case EXCCODE_CPU:
2479			if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2480				er = EMULATE_PRIV_FAIL;
2481			break;
2482
2483		case EXCCODE_MOD:
2484			break;
2485
2486		case EXCCODE_TLBL:
2487			/*
2488			 * We we are accessing Guest kernel space, then send an
2489			 * address error exception to the guest
2490			 */
2491			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2492				kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2493					  badvaddr);
2494				cause &= ~0xff;
2495				cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2496				er = EMULATE_PRIV_FAIL;
2497			}
2498			break;
2499
2500		case EXCCODE_TLBS:
2501			/*
2502			 * We we are accessing Guest kernel space, then send an
2503			 * address error exception to the guest
2504			 */
2505			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2506				kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2507					  badvaddr);
2508				cause &= ~0xff;
2509				cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2510				er = EMULATE_PRIV_FAIL;
2511			}
2512			break;
2513
2514		case EXCCODE_ADES:
2515			kvm_debug("%s: address error ST @ %#lx\n", __func__,
2516				  badvaddr);
2517			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2518				cause &= ~0xff;
2519				cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2520			}
2521			er = EMULATE_PRIV_FAIL;
2522			break;
2523		case EXCCODE_ADEL:
2524			kvm_debug("%s: address error LD @ %#lx\n", __func__,
2525				  badvaddr);
2526			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2527				cause &= ~0xff;
2528				cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2529			}
2530			er = EMULATE_PRIV_FAIL;
2531			break;
2532		default:
2533			er = EMULATE_PRIV_FAIL;
2534			break;
2535		}
2536	}
2537
2538	if (er == EMULATE_PRIV_FAIL)
2539		kvm_mips_emulate_exc(cause, opc, run, vcpu);
2540
2541	return er;
2542}
2543
2544/*
2545 * User Address (UA) fault, this could happen if
2546 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2547 *     case we pass on the fault to the guest kernel and let it handle it.
2548 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2549 *     case we inject the TLB from the Guest TLB into the shadow host TLB
2550 */
2551enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2552					      uint32_t *opc,
2553					      struct kvm_run *run,
2554					      struct kvm_vcpu *vcpu)
2555{
2556	enum emulation_result er = EMULATE_DONE;
2557	uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2558	unsigned long va = vcpu->arch.host_cp0_badvaddr;
2559	int index;
2560
2561	kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2562		  vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2563
2564	/*
2565	 * KVM would not have got the exception if this entry was valid in the
2566	 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2567	 * send the guest an exception. The guest exc handler should then inject
2568	 * an entry into the guest TLB.
2569	 */
2570	index = kvm_mips_guest_tlb_lookup(vcpu,
2571		      (va & VPN2_MASK) |
2572		      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
2573	if (index < 0) {
2574		if (exccode == EXCCODE_TLBL) {
2575			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2576		} else if (exccode == EXCCODE_TLBS) {
2577			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2578		} else {
2579			kvm_err("%s: invalid exc code: %d\n", __func__,
2580				exccode);
2581			er = EMULATE_FAIL;
2582		}
2583	} else {
2584		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2585
2586		/*
2587		 * Check if the entry is valid, if not then setup a TLB invalid
2588		 * exception to the guest
2589		 */
2590		if (!TLB_IS_VALID(*tlb, va)) {
2591			if (exccode == EXCCODE_TLBL) {
2592				er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2593								vcpu);
2594			} else if (exccode == EXCCODE_TLBS) {
2595				er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2596								vcpu);
2597			} else {
2598				kvm_err("%s: invalid exc code: %d\n", __func__,
2599					exccode);
2600				er = EMULATE_FAIL;
2601			}
2602		} else {
2603			kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2604				  tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2605			/*
2606			 * OK we have a Guest TLB entry, now inject it into the
2607			 * shadow host TLB
2608			 */
2609			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2610							     NULL);
2611		}
2612	}
2613
2614	return er;
2615}