Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   5 *          Christoffer Dall <c.dall@virtualopensystems.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
   6 */
   7
   8#include <linux/bsearch.h>
   9#include <linux/mm.h>
  10#include <linux/kvm_host.h>
  11#include <linux/uaccess.h>
  12#include <asm/kvm_arm.h>
  13#include <asm/kvm_host.h>
  14#include <asm/kvm_emulate.h>
  15#include <asm/kvm_coproc.h>
  16#include <asm/kvm_mmu.h>
  17#include <asm/cacheflush.h>
  18#include <asm/cputype.h>
  19#include <trace/events/kvm.h>
  20#include <asm/vfp.h>
  21#include "../vfp/vfpinstr.h"
  22
  23#define CREATE_TRACE_POINTS
  24#include "trace.h"
  25#include "coproc.h"
  26
  27
  28/******************************************************************************
  29 * Co-processor emulation
  30 *****************************************************************************/
  31
  32static bool write_to_read_only(struct kvm_vcpu *vcpu,
  33			       const struct coproc_params *params)
  34{
  35	WARN_ONCE(1, "CP15 write to read-only register\n");
  36	print_cp_instr(params);
  37	kvm_inject_undefined(vcpu);
  38	return false;
  39}
  40
  41static bool read_from_write_only(struct kvm_vcpu *vcpu,
  42				 const struct coproc_params *params)
  43{
  44	WARN_ONCE(1, "CP15 read to write-only register\n");
  45	print_cp_instr(params);
  46	kvm_inject_undefined(vcpu);
  47	return false;
  48}
  49
  50/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  51static u32 cache_levels;
  52
  53/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  54#define CSSELR_MAX 12
  55
  56/*
  57 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  58 * of cp15 registers can be viewed either as couple of two u32 registers
  59 * or one u64 register. Current u64 register encoding is that least
  60 * significant u32 word is followed by most significant u32 word.
  61 */
  62static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  63				       const struct coproc_reg *r,
  64				       u64 val)
  65{
  66	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  67	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  68}
  69
  70static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  71				      const struct coproc_reg *r)
  72{
  73	u64 val;
  74
  75	val = vcpu_cp15(vcpu, r->reg + 1);
  76	val = val << 32;
  77	val = val | vcpu_cp15(vcpu, r->reg);
  78	return val;
  79}
  80
  81int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  82{
  83	kvm_inject_undefined(vcpu);
  84	return 1;
  85}
  86
  87int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  88{
  89	/*
  90	 * We can get here, if the host has been built without VFPv3 support,
  91	 * but the guest attempted a floating point operation.
  92	 */
  93	kvm_inject_undefined(vcpu);
  94	return 1;
  95}
  96
  97int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  98{
  99	kvm_inject_undefined(vcpu);
 100	return 1;
 101}
 102
 
 
 
 
 
 
 103static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 104{
 105	/*
 106	 * Compute guest MPIDR. We build a virtual cluster out of the
 107	 * vcpu_id, but we read the 'U' bit from the underlying
 108	 * hardware directly.
 109	 */
 110	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 111				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 112				     (vcpu->vcpu_id & 3));
 113}
 114
 115/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 116static bool access_actlr(struct kvm_vcpu *vcpu,
 117			 const struct coproc_params *p,
 118			 const struct coproc_reg *r)
 119{
 120	if (p->is_write)
 121		return ignore_write(vcpu, p);
 122
 123	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 124	return true;
 125}
 126
 127/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 128static bool access_cbar(struct kvm_vcpu *vcpu,
 129			const struct coproc_params *p,
 130			const struct coproc_reg *r)
 131{
 132	if (p->is_write)
 133		return write_to_read_only(vcpu, p);
 134	return read_zero(vcpu, p);
 135}
 136
 137/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 138static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 139			  const struct coproc_params *p,
 140			  const struct coproc_reg *r)
 141{
 142	if (p->is_write)
 143		return ignore_write(vcpu, p);
 144
 145	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 146	return true;
 147}
 148
 149static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 150{
 151	u32 l2ctlr, ncores;
 152
 153	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 154	l2ctlr &= ~(3 << 24);
 155	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 156	/* How many cores in the current cluster and the next ones */
 157	ncores -= (vcpu->vcpu_id & ~3);
 158	/* Cap it to the maximum number of cores in a single cluster */
 159	ncores = min(ncores, 3U);
 160	l2ctlr |= (ncores & 3) << 24;
 161
 162	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 163}
 164
 165static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 166{
 167	u32 actlr;
 168
 169	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 170	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 171	/* Make the SMP bit consistent with the guest configuration */
 172	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 173		actlr |= 1U << 6;
 174	else
 175		actlr &= ~(1U << 6);
 176
 177	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 178}
 179
 180/*
 181 * TRM entries: A7:4.3.50, A15:4.3.49
 182 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 183 */
 184static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 185			   const struct coproc_params *p,
 186			   const struct coproc_reg *r)
 187{
 188	if (p->is_write)
 189		return ignore_write(vcpu, p);
 190
 191	*vcpu_reg(vcpu, p->Rt1) = 0;
 192	return true;
 193}
 194
 195/*
 196 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 197 */
 198static bool access_dcsw(struct kvm_vcpu *vcpu,
 199			const struct coproc_params *p,
 200			const struct coproc_reg *r)
 201{
 202	if (!p->is_write)
 203		return read_from_write_only(vcpu, p);
 204
 205	kvm_set_way_flush(vcpu);
 206	return true;
 207}
 208
 209/*
 210 * Generic accessor for VM registers. Only called as long as HCR_TVM
 211 * is set.  If the guest enables the MMU, we stop trapping the VM
 212 * sys_regs and leave it in complete control of the caches.
 213 *
 214 * Used by the cpu-specific code.
 215 */
 216bool access_vm_reg(struct kvm_vcpu *vcpu,
 217		   const struct coproc_params *p,
 218		   const struct coproc_reg *r)
 219{
 220	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 221
 222	BUG_ON(!p->is_write);
 223
 224	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 225	if (p->is_64bit)
 226		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 227
 228	kvm_toggle_cache(vcpu, was_enabled);
 229	return true;
 230}
 231
 232static bool access_gic_sgi(struct kvm_vcpu *vcpu,
 233			   const struct coproc_params *p,
 234			   const struct coproc_reg *r)
 235{
 236	u64 reg;
 237	bool g1;
 238
 239	if (!p->is_write)
 240		return read_from_write_only(vcpu, p);
 241
 242	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 243	reg |= *vcpu_reg(vcpu, p->Rt1) ;
 244
 245	/*
 246	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
 247	 * Group0 SGIs only, while ICC_SGI1R can generate either group,
 248	 * depending on the SGI configuration. ICC_ASGI1R is effectively
 249	 * equivalent to ICC_SGI0R, as there is no "alternative" secure
 250	 * group.
 251	 */
 252	switch (p->Op1) {
 253	default:		/* Keep GCC quiet */
 254	case 0:			/* ICC_SGI1R */
 255		g1 = true;
 256		break;
 257	case 1:			/* ICC_ASGI1R */
 258	case 2:			/* ICC_SGI0R */
 259		g1 = false;
 260		break;
 261	}
 262
 263	vgic_v3_dispatch_sgi(vcpu, reg, g1);
 264
 265	return true;
 266}
 267
 268static bool access_gic_sre(struct kvm_vcpu *vcpu,
 269			   const struct coproc_params *p,
 270			   const struct coproc_reg *r)
 271{
 272	if (p->is_write)
 273		return ignore_write(vcpu, p);
 274
 275	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
 276
 277	return true;
 278}
 279
 280static bool access_cntp_tval(struct kvm_vcpu *vcpu,
 281			     const struct coproc_params *p,
 282			     const struct coproc_reg *r)
 283{
 284	u32 val;
 285
 286	if (p->is_write) {
 287		val = *vcpu_reg(vcpu, p->Rt1);
 288		kvm_arm_timer_write_sysreg(vcpu,
 289					   TIMER_PTIMER, TIMER_REG_TVAL, val);
 290	} else {
 291		val = kvm_arm_timer_read_sysreg(vcpu,
 292						TIMER_PTIMER, TIMER_REG_TVAL);
 293		*vcpu_reg(vcpu, p->Rt1) = val;
 294	}
 295
 296	return true;
 297}
 298
 299static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
 300			    const struct coproc_params *p,
 301			    const struct coproc_reg *r)
 302{
 303	u32 val;
 304
 305	if (p->is_write) {
 306		val = *vcpu_reg(vcpu, p->Rt1);
 307		kvm_arm_timer_write_sysreg(vcpu,
 308					   TIMER_PTIMER, TIMER_REG_CTL, val);
 309	} else {
 310		val = kvm_arm_timer_read_sysreg(vcpu,
 311						TIMER_PTIMER, TIMER_REG_CTL);
 312		*vcpu_reg(vcpu, p->Rt1) = val;
 313	}
 314
 315	return true;
 316}
 317
 318static bool access_cntp_cval(struct kvm_vcpu *vcpu,
 319			     const struct coproc_params *p,
 320			     const struct coproc_reg *r)
 321{
 322	u64 val;
 323
 324	if (p->is_write) {
 325		val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
 326		val |= *vcpu_reg(vcpu, p->Rt1);
 327		kvm_arm_timer_write_sysreg(vcpu,
 328					   TIMER_PTIMER, TIMER_REG_CVAL, val);
 329	} else {
 330		val = kvm_arm_timer_read_sysreg(vcpu,
 331						TIMER_PTIMER, TIMER_REG_CVAL);
 332		*vcpu_reg(vcpu, p->Rt1) = val;
 333		*vcpu_reg(vcpu, p->Rt2) = val >> 32;
 334	}
 335
 336	return true;
 337}
 338
 339/*
 340 * We could trap ID_DFR0 and tell the guest we don't support performance
 341 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 342 * NAKed, so it will read the PMCR anyway.
 343 *
 344 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 345 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 346 * all PM registers, which doesn't crash the guest kernel at least.
 347 */
 348static bool trap_raz_wi(struct kvm_vcpu *vcpu,
 349		    const struct coproc_params *p,
 350		    const struct coproc_reg *r)
 351{
 352	if (p->is_write)
 353		return ignore_write(vcpu, p);
 354	else
 355		return read_zero(vcpu, p);
 356}
 357
 358#define access_pmcr trap_raz_wi
 359#define access_pmcntenset trap_raz_wi
 360#define access_pmcntenclr trap_raz_wi
 361#define access_pmovsr trap_raz_wi
 362#define access_pmselr trap_raz_wi
 363#define access_pmceid0 trap_raz_wi
 364#define access_pmceid1 trap_raz_wi
 365#define access_pmccntr trap_raz_wi
 366#define access_pmxevtyper trap_raz_wi
 367#define access_pmxevcntr trap_raz_wi
 368#define access_pmuserenr trap_raz_wi
 369#define access_pmintenset trap_raz_wi
 370#define access_pmintenclr trap_raz_wi
 371
 372/* Architected CP15 registers.
 373 * CRn denotes the primary register number, but is copied to the CRm in the
 374 * user space API for 64-bit register access in line with the terminology used
 375 * in the ARM ARM.
 376 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 377 *            registers preceding 32-bit ones.
 378 */
 379static const struct coproc_reg cp15_regs[] = {
 380	/* MPIDR: we use VMPIDR for guest access. */
 381	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 382			NULL, reset_mpidr, c0_MPIDR },
 383
 384	/* CSSELR: swapped by interrupt.S. */
 385	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 386			NULL, reset_unknown, c0_CSSELR },
 387
 388	/* ACTLR: trapped by HCR.TAC bit. */
 389	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 390			access_actlr, reset_actlr, c1_ACTLR },
 391
 392	/* CPACR: swapped by interrupt.S. */
 393	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 394			NULL, reset_val, c1_CPACR, 0x00000000 },
 395
 396	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 397	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 398	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 399			access_vm_reg, reset_unknown, c2_TTBR0 },
 400	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 401			access_vm_reg, reset_unknown, c2_TTBR1 },
 402	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 403			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 404	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 405
 406
 407	/* DACR: swapped by interrupt.S. */
 408	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 409			access_vm_reg, reset_unknown, c3_DACR },
 410
 411	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 412	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 413			access_vm_reg, reset_unknown, c5_DFSR },
 414	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 415			access_vm_reg, reset_unknown, c5_IFSR },
 416	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 417			access_vm_reg, reset_unknown, c5_ADFSR },
 418	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 419			access_vm_reg, reset_unknown, c5_AIFSR },
 420
 421	/* DFAR/IFAR: swapped by interrupt.S. */
 422	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 423			access_vm_reg, reset_unknown, c6_DFAR },
 424	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 425			access_vm_reg, reset_unknown, c6_IFAR },
 426
 427	/* PAR swapped by interrupt.S */
 428	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 429
 430	/*
 431	 * DC{C,I,CI}SW operations:
 432	 */
 433	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 434	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 435	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 436	/*
 437	 * L2CTLR access (guest wants to know #CPUs).
 438	 */
 439	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 440			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 441	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 442
 443	/*
 444	 * Dummy performance monitor implementation.
 445	 */
 446	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 447	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 448	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 449	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 450	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 451	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 452	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 453	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 454	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 455	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 456	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 457	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 458	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 459
 460	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 461	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 462			access_vm_reg, reset_unknown, c10_PRRR},
 463	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 464			access_vm_reg, reset_unknown, c10_NMRR},
 465
 466	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 467	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 468			access_vm_reg, reset_unknown, c10_AMAIR0},
 469	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 470			access_vm_reg, reset_unknown, c10_AMAIR1},
 471
 472	/* ICC_SGI1R */
 473	{ CRm64(12), Op1( 0), is64, access_gic_sgi},
 474
 475	/* VBAR: swapped by interrupt.S. */
 476	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 477			NULL, reset_val, c12_VBAR, 0x00000000 },
 478
 479	/* ICC_ASGI1R */
 480	{ CRm64(12), Op1( 1), is64, access_gic_sgi},
 481	/* ICC_SGI0R */
 482	{ CRm64(12), Op1( 2), is64, access_gic_sgi},
 483	/* ICC_SRE */
 484	{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
 485
 486	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 487	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 488			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 489	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 490			NULL, reset_unknown, c13_TID_URW },
 491	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 492			NULL, reset_unknown, c13_TID_URO },
 493	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 494			NULL, reset_unknown, c13_TID_PRIV },
 495
 496	/* CNTP */
 497	{ CRm64(14), Op1( 2), is64, access_cntp_cval},
 498
 499	/* CNTKCTL: swapped by interrupt.S. */
 500	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 501			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 502
 503	/* CNTP */
 504	{ CRn(14), CRm( 2), Op1( 0), Op2( 0), is32, access_cntp_tval },
 505	{ CRn(14), CRm( 2), Op1( 0), Op2( 1), is32, access_cntp_ctl },
 506
 507	/* The Configuration Base Address Register. */
 508	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 509};
 510
 511static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 512{
 513	unsigned int i;
 514
 515	for (i = 1; i < n; i++) {
 516		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 517			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 518			return 1;
 519		}
 520	}
 521
 522	return 0;
 523}
 524
 525/* Target specific emulation tables */
 526static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 527
 528void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 529{
 530	BUG_ON(check_reg_table(table->table, table->num));
 531	target_tables[table->target] = table;
 532}
 533
 534/* Get specific register table for this target. */
 535static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 536{
 537	struct kvm_coproc_target_table *table;
 538
 539	table = target_tables[target];
 540	*num = table->num;
 541	return table->table;
 542}
 543
 544#define reg_to_match_value(x)						\
 545	({								\
 546		unsigned long val;					\
 547		val  = (x)->CRn << 11;					\
 548		val |= (x)->CRm << 7;					\
 549		val |= (x)->Op1 << 4;					\
 550		val |= (x)->Op2 << 1;					\
 551		val |= !(x)->is_64bit;					\
 552		val;							\
 553	 })
 554
 555static int match_reg(const void *key, const void *elt)
 556{
 557	const unsigned long pval = (unsigned long)key;
 558	const struct coproc_reg *r = elt;
 559
 560	return pval - reg_to_match_value(r);
 561}
 562
 563static const struct coproc_reg *find_reg(const struct coproc_params *params,
 564					 const struct coproc_reg table[],
 565					 unsigned int num)
 566{
 567	unsigned long pval = reg_to_match_value(params);
 568
 569	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 570}
 571
 572static int emulate_cp15(struct kvm_vcpu *vcpu,
 573			const struct coproc_params *params)
 574{
 575	size_t num;
 576	const struct coproc_reg *table, *r;
 577
 578	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 579				   params->CRm, params->Op2, params->is_write);
 580
 581	table = get_target_table(vcpu->arch.target, &num);
 582
 583	/* Search target-specific then generic table. */
 584	r = find_reg(params, table, num);
 585	if (!r)
 586		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 587
 588	if (likely(r)) {
 589		/* If we don't have an accessor, we should never get here! */
 590		BUG_ON(!r->access);
 591
 592		if (likely(r->access(vcpu, params, r))) {
 593			/* Skip instruction, since it was emulated */
 594			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 
 595		}
 596	} else {
 597		/* If access function fails, it should complain. */
 598		kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
 599			*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
 
 600		print_cp_instr(params);
 601		kvm_inject_undefined(vcpu);
 602	}
 603
 604	return 1;
 605}
 606
 607static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 
 
 
 
 
 608{
 609	struct coproc_params params;
 610
 611	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 612	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 613	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 614	params.is_64bit = true;
 615
 616	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 617	params.Op2 = 0;
 618	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 619	params.CRm = 0;
 620
 621	return params;
 622}
 623
 624/**
 625 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 626 * @vcpu: The VCPU pointer
 627 * @run:  The kvm_run struct
 628 */
 629int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 630{
 631	struct coproc_params params = decode_64bit_hsr(vcpu);
 632
 633	return emulate_cp15(vcpu, &params);
 634}
 635
 636/**
 637 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
 638 * @vcpu: The VCPU pointer
 639 * @run:  The kvm_run struct
 640 */
 641int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 642{
 643	struct coproc_params params = decode_64bit_hsr(vcpu);
 644
 645	/* raz_wi cp14 */
 646	trap_raz_wi(vcpu, &params, NULL);
 647
 648	/* handled */
 649	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 650	return 1;
 651}
 652
 653static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 654			      const struct coproc_reg *table, size_t num,
 655			      unsigned long *bmap)
 656{
 657	unsigned long i;
 658
 659	for (i = 0; i < num; i++)
 660		if (table[i].reset) {
 661			int reg = table[i].reg;
 662
 663			table[i].reset(vcpu, &table[i]);
 664			if (reg > 0 && reg < NR_CP15_REGS) {
 665				set_bit(reg, bmap);
 666				if (table[i].is_64bit)
 667					set_bit(reg + 1, bmap);
 668			}
 669		}
 670}
 671
 672static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 
 
 
 
 
 673{
 674	struct coproc_params params;
 675
 676	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 677	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 678	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 679	params.is_64bit = false;
 680
 681	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 682	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 683	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 684	params.Rt2 = 0;
 685
 686	return params;
 687}
 688
 689/**
 690 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 691 * @vcpu: The VCPU pointer
 692 * @run:  The kvm_run struct
 693 */
 694int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 695{
 696	struct coproc_params params = decode_32bit_hsr(vcpu);
 697	return emulate_cp15(vcpu, &params);
 698}
 699
 700/**
 701 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
 702 * @vcpu: The VCPU pointer
 703 * @run:  The kvm_run struct
 704 */
 705int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 706{
 707	struct coproc_params params = decode_32bit_hsr(vcpu);
 708
 709	/* raz_wi cp14 */
 710	trap_raz_wi(vcpu, &params, NULL);
 711
 712	/* handled */
 713	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 714	return 1;
 715}
 716
 717/******************************************************************************
 718 * Userspace API
 719 *****************************************************************************/
 720
 721static bool index_to_params(u64 id, struct coproc_params *params)
 722{
 723	switch (id & KVM_REG_SIZE_MASK) {
 724	case KVM_REG_SIZE_U32:
 725		/* Any unused index bits means it's not valid. */
 726		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 727			   | KVM_REG_ARM_COPROC_MASK
 728			   | KVM_REG_ARM_32_CRN_MASK
 729			   | KVM_REG_ARM_CRM_MASK
 730			   | KVM_REG_ARM_OPC1_MASK
 731			   | KVM_REG_ARM_32_OPC2_MASK))
 732			return false;
 733
 734		params->is_64bit = false;
 735		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 736			       >> KVM_REG_ARM_32_CRN_SHIFT);
 737		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 738			       >> KVM_REG_ARM_CRM_SHIFT);
 739		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 740			       >> KVM_REG_ARM_OPC1_SHIFT);
 741		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 742			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 743		return true;
 744	case KVM_REG_SIZE_U64:
 745		/* Any unused index bits means it's not valid. */
 746		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 747			      | KVM_REG_ARM_COPROC_MASK
 748			      | KVM_REG_ARM_CRM_MASK
 749			      | KVM_REG_ARM_OPC1_MASK))
 750			return false;
 751		params->is_64bit = true;
 752		/* CRm to CRn: see cp15_to_index for details */
 753		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 754			       >> KVM_REG_ARM_CRM_SHIFT);
 755		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 756			       >> KVM_REG_ARM_OPC1_SHIFT);
 757		params->Op2 = 0;
 758		params->CRm = 0;
 759		return true;
 760	default:
 761		return false;
 762	}
 763}
 764
 765/* Decode an index value, and find the cp15 coproc_reg entry. */
 766static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 767						    u64 id)
 768{
 769	size_t num;
 770	const struct coproc_reg *table, *r;
 771	struct coproc_params params;
 772
 773	/* We only do cp15 for now. */
 774	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 775		return NULL;
 776
 777	if (!index_to_params(id, &params))
 778		return NULL;
 779
 780	table = get_target_table(vcpu->arch.target, &num);
 781	r = find_reg(&params, table, num);
 782	if (!r)
 783		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 784
 785	/* Not saved in the cp15 array? */
 786	if (r && !r->reg)
 787		r = NULL;
 788
 789	return r;
 790}
 791
 792/*
 793 * These are the invariant cp15 registers: we let the guest see the host
 794 * versions of these, so they're part of the guest state.
 795 *
 796 * A future CPU may provide a mechanism to present different values to
 797 * the guest, or a future kvm may trap them.
 798 */
 799/* Unfortunately, there's no register-argument for mrc, so generate. */
 800#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 801	static void get_##name(struct kvm_vcpu *v,			\
 802			       const struct coproc_reg *r)		\
 803	{								\
 804		u32 val;						\
 805									\
 806		asm volatile("mrc p15, " __stringify(op1)		\
 807			     ", %0, c" __stringify(crn)			\
 808			     ", c" __stringify(crm)			\
 809			     ", " __stringify(op2) "\n" : "=r" (val));	\
 810		((struct coproc_reg *)r)->val = val;			\
 811	}
 812
 813FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 814FUNCTION_FOR32(0, 0, 0, 1, CTR)
 815FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 816FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 817FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 818FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 819FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 820FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 821FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 822FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 823FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 824FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 825FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 826FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 827FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 828FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 829FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 830FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 831FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 832FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 833FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 834
 835/* ->val is filled in by kvm_invariant_coproc_table_init() */
 836static struct coproc_reg invariant_cp15[] = {
 837	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 838	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 839	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 840	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 841	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 842
 843	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 844	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 845
 846	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 847	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 848	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 849	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 850	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 851	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 852	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 853	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 854
 855	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 856	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 857	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 858	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 859	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 860	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 861};
 862
 863/*
 864 * Reads a register value from a userspace address to a kernel
 865 * variable. Make sure that register size matches sizeof(*__val).
 866 */
 867static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 868{
 869	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 870		return -EFAULT;
 871	return 0;
 872}
 873
 874/*
 875 * Writes a register value to a userspace address from a kernel variable.
 876 * Make sure that register size matches sizeof(*__val).
 877 */
 878static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 879{
 880	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 881		return -EFAULT;
 882	return 0;
 883}
 884
 885static int get_invariant_cp15(u64 id, void __user *uaddr)
 886{
 887	struct coproc_params params;
 888	const struct coproc_reg *r;
 889	int ret;
 890
 891	if (!index_to_params(id, &params))
 892		return -ENOENT;
 893
 894	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 895	if (!r)
 896		return -ENOENT;
 897
 898	ret = -ENOENT;
 899	if (KVM_REG_SIZE(id) == 4) {
 900		u32 val = r->val;
 901
 902		ret = reg_to_user(uaddr, &val, id);
 903	} else if (KVM_REG_SIZE(id) == 8) {
 904		ret = reg_to_user(uaddr, &r->val, id);
 905	}
 906	return ret;
 907}
 908
 909static int set_invariant_cp15(u64 id, void __user *uaddr)
 910{
 911	struct coproc_params params;
 912	const struct coproc_reg *r;
 913	int err;
 914	u64 val;
 915
 916	if (!index_to_params(id, &params))
 917		return -ENOENT;
 918	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 919	if (!r)
 920		return -ENOENT;
 921
 922	err = -ENOENT;
 923	if (KVM_REG_SIZE(id) == 4) {
 924		u32 val32;
 925
 926		err = reg_from_user(&val32, uaddr, id);
 927		if (!err)
 928			val = val32;
 929	} else if (KVM_REG_SIZE(id) == 8) {
 930		err = reg_from_user(&val, uaddr, id);
 931	}
 932	if (err)
 933		return err;
 934
 935	/* This is what we mean by invariant: you can't change it. */
 936	if (r->val != val)
 937		return -EINVAL;
 938
 939	return 0;
 940}
 941
 942static bool is_valid_cache(u32 val)
 943{
 944	u32 level, ctype;
 945
 946	if (val >= CSSELR_MAX)
 947		return false;
 948
 949	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 950        level = (val >> 1);
 951        ctype = (cache_levels >> (level * 3)) & 7;
 952
 953	switch (ctype) {
 954	case 0: /* No cache */
 955		return false;
 956	case 1: /* Instruction cache only */
 957		return (val & 1);
 958	case 2: /* Data cache only */
 959	case 4: /* Unified cache */
 960		return !(val & 1);
 961	case 3: /* Separate instruction and data caches */
 962		return true;
 963	default: /* Reserved: we can't know instruction or data. */
 964		return false;
 965	}
 966}
 967
 968/* Which cache CCSIDR represents depends on CSSELR value. */
 969static u32 get_ccsidr(u32 csselr)
 970{
 971	u32 ccsidr;
 972
 973	/* Make sure noone else changes CSSELR during this! */
 974	local_irq_disable();
 975	/* Put value into CSSELR */
 976	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 977	isb();
 978	/* Read result out of CCSIDR */
 979	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 980	local_irq_enable();
 981
 982	return ccsidr;
 983}
 984
 985static int demux_c15_get(u64 id, void __user *uaddr)
 986{
 987	u32 val;
 988	u32 __user *uval = uaddr;
 989
 990	/* Fail if we have unknown bits set. */
 991	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 992		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 993		return -ENOENT;
 994
 995	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 996	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 997		if (KVM_REG_SIZE(id) != 4)
 998			return -ENOENT;
 999		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1000			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1001		if (!is_valid_cache(val))
1002			return -ENOENT;
1003
1004		return put_user(get_ccsidr(val), uval);
1005	default:
1006		return -ENOENT;
1007	}
1008}
1009
1010static int demux_c15_set(u64 id, void __user *uaddr)
1011{
1012	u32 val, newval;
1013	u32 __user *uval = uaddr;
1014
1015	/* Fail if we have unknown bits set. */
1016	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1017		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1018		return -ENOENT;
1019
1020	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
1021	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
1022		if (KVM_REG_SIZE(id) != 4)
1023			return -ENOENT;
1024		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
1025			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
1026		if (!is_valid_cache(val))
1027			return -ENOENT;
1028
1029		if (get_user(newval, uval))
1030			return -EFAULT;
1031
1032		/* This is also invariant: you can't change it. */
1033		if (newval != get_ccsidr(val))
1034			return -EINVAL;
1035		return 0;
1036	default:
1037		return -ENOENT;
1038	}
1039}
1040
1041#ifdef CONFIG_VFPv3
1042static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
1043				   KVM_REG_ARM_VFP_FPSCR,
1044				   KVM_REG_ARM_VFP_FPINST,
1045				   KVM_REG_ARM_VFP_FPINST2,
1046				   KVM_REG_ARM_VFP_MVFR0,
1047				   KVM_REG_ARM_VFP_MVFR1,
1048				   KVM_REG_ARM_VFP_FPSID };
1049
1050static unsigned int num_fp_regs(void)
1051{
1052	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
1053		return 32;
1054	else
1055		return 16;
1056}
1057
1058static unsigned int num_vfp_regs(void)
1059{
1060	/* Normal FP regs + control regs. */
1061	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
1062}
1063
1064static int copy_vfp_regids(u64 __user *uindices)
1065{
1066	unsigned int i;
1067	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
1068	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
1069
1070	for (i = 0; i < num_fp_regs(); i++) {
1071		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
1072			     uindices))
1073			return -EFAULT;
1074		uindices++;
1075	}
1076
1077	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
1078		if (put_user(u32reg | vfp_sysregs[i], uindices))
1079			return -EFAULT;
1080		uindices++;
1081	}
1082
1083	return num_vfp_regs();
1084}
1085
1086static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1087{
1088	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1089	u32 val;
1090
1091	/* Fail if we have unknown bits set. */
1092	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1093		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1094		return -ENOENT;
1095
1096	if (vfpid < num_fp_regs()) {
1097		if (KVM_REG_SIZE(id) != 8)
1098			return -ENOENT;
1099		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
1100				   id);
1101	}
1102
1103	/* FP control registers are all 32 bit. */
1104	if (KVM_REG_SIZE(id) != 4)
1105		return -ENOENT;
1106
1107	switch (vfpid) {
1108	case KVM_REG_ARM_VFP_FPEXC:
1109		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
1110	case KVM_REG_ARM_VFP_FPSCR:
1111		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
1112	case KVM_REG_ARM_VFP_FPINST:
1113		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
1114	case KVM_REG_ARM_VFP_FPINST2:
1115		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
1116	case KVM_REG_ARM_VFP_MVFR0:
1117		val = fmrx(MVFR0);
1118		return reg_to_user(uaddr, &val, id);
1119	case KVM_REG_ARM_VFP_MVFR1:
1120		val = fmrx(MVFR1);
1121		return reg_to_user(uaddr, &val, id);
1122	case KVM_REG_ARM_VFP_FPSID:
1123		val = fmrx(FPSID);
1124		return reg_to_user(uaddr, &val, id);
1125	default:
1126		return -ENOENT;
1127	}
1128}
1129
1130static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1131{
1132	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
1133	u32 val;
1134
1135	/* Fail if we have unknown bits set. */
1136	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
1137		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
1138		return -ENOENT;
1139
1140	if (vfpid < num_fp_regs()) {
1141		if (KVM_REG_SIZE(id) != 8)
1142			return -ENOENT;
1143		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
1144				     uaddr, id);
1145	}
1146
1147	/* FP control registers are all 32 bit. */
1148	if (KVM_REG_SIZE(id) != 4)
1149		return -ENOENT;
1150
1151	switch (vfpid) {
1152	case KVM_REG_ARM_VFP_FPEXC:
1153		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
1154	case KVM_REG_ARM_VFP_FPSCR:
1155		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
1156	case KVM_REG_ARM_VFP_FPINST:
1157		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
1158	case KVM_REG_ARM_VFP_FPINST2:
1159		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
1160	/* These are invariant. */
1161	case KVM_REG_ARM_VFP_MVFR0:
1162		if (reg_from_user(&val, uaddr, id))
1163			return -EFAULT;
1164		if (val != fmrx(MVFR0))
1165			return -EINVAL;
1166		return 0;
1167	case KVM_REG_ARM_VFP_MVFR1:
1168		if (reg_from_user(&val, uaddr, id))
1169			return -EFAULT;
1170		if (val != fmrx(MVFR1))
1171			return -EINVAL;
1172		return 0;
1173	case KVM_REG_ARM_VFP_FPSID:
1174		if (reg_from_user(&val, uaddr, id))
1175			return -EFAULT;
1176		if (val != fmrx(FPSID))
1177			return -EINVAL;
1178		return 0;
1179	default:
1180		return -ENOENT;
1181	}
1182}
1183#else /* !CONFIG_VFPv3 */
1184static unsigned int num_vfp_regs(void)
1185{
1186	return 0;
1187}
1188
1189static int copy_vfp_regids(u64 __user *uindices)
1190{
1191	return 0;
1192}
1193
1194static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1195{
1196	return -ENOENT;
1197}
1198
1199static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1200{
1201	return -ENOENT;
1202}
1203#endif /* !CONFIG_VFPv3 */
1204
1205int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1206{
1207	const struct coproc_reg *r;
1208	void __user *uaddr = (void __user *)(long)reg->addr;
1209	int ret;
1210
1211	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1212		return demux_c15_get(reg->id, uaddr);
1213
1214	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1215		return vfp_get_reg(vcpu, reg->id, uaddr);
1216
1217	r = index_to_coproc_reg(vcpu, reg->id);
1218	if (!r)
1219		return get_invariant_cp15(reg->id, uaddr);
1220
1221	ret = -ENOENT;
1222	if (KVM_REG_SIZE(reg->id) == 8) {
1223		u64 val;
1224
1225		val = vcpu_cp15_reg64_get(vcpu, r);
1226		ret = reg_to_user(uaddr, &val, reg->id);
1227	} else if (KVM_REG_SIZE(reg->id) == 4) {
1228		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1229	}
1230
1231	return ret;
1232}
1233
1234int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1235{
1236	const struct coproc_reg *r;
1237	void __user *uaddr = (void __user *)(long)reg->addr;
1238	int ret;
1239
1240	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1241		return demux_c15_set(reg->id, uaddr);
1242
1243	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1244		return vfp_set_reg(vcpu, reg->id, uaddr);
1245
1246	r = index_to_coproc_reg(vcpu, reg->id);
1247	if (!r)
1248		return set_invariant_cp15(reg->id, uaddr);
1249
1250	ret = -ENOENT;
1251	if (KVM_REG_SIZE(reg->id) == 8) {
1252		u64 val;
1253
1254		ret = reg_from_user(&val, uaddr, reg->id);
1255		if (!ret)
1256			vcpu_cp15_reg64_set(vcpu, r, val);
1257	} else if (KVM_REG_SIZE(reg->id) == 4) {
1258		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1259	}
1260
1261	return ret;
1262}
1263
1264static unsigned int num_demux_regs(void)
1265{
1266	unsigned int i, count = 0;
1267
1268	for (i = 0; i < CSSELR_MAX; i++)
1269		if (is_valid_cache(i))
1270			count++;
1271
1272	return count;
1273}
1274
1275static int write_demux_regids(u64 __user *uindices)
1276{
1277	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1278	unsigned int i;
1279
1280	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1281	for (i = 0; i < CSSELR_MAX; i++) {
1282		if (!is_valid_cache(i))
1283			continue;
1284		if (put_user(val | i, uindices))
1285			return -EFAULT;
1286		uindices++;
1287	}
1288	return 0;
1289}
1290
1291static u64 cp15_to_index(const struct coproc_reg *reg)
1292{
1293	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1294	if (reg->is_64bit) {
1295		val |= KVM_REG_SIZE_U64;
1296		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1297		/*
1298		 * CRn always denotes the primary coproc. reg. nr. for the
1299		 * in-kernel representation, but the user space API uses the
1300		 * CRm for the encoding, because it is modelled after the
1301		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1302		 * B3-1445
1303		 */
1304		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1305	} else {
1306		val |= KVM_REG_SIZE_U32;
1307		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1308		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1309		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1310		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1311	}
1312	return val;
1313}
1314
1315static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1316{
1317	if (!*uind)
1318		return true;
1319
1320	if (put_user(cp15_to_index(reg), *uind))
1321		return false;
1322
1323	(*uind)++;
1324	return true;
1325}
1326
1327/* Assumed ordered tables, see kvm_coproc_table_init. */
1328static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1329{
1330	const struct coproc_reg *i1, *i2, *end1, *end2;
1331	unsigned int total = 0;
1332	size_t num;
1333
1334	/* We check for duplicates here, to allow arch-specific overrides. */
1335	i1 = get_target_table(vcpu->arch.target, &num);
1336	end1 = i1 + num;
1337	i2 = cp15_regs;
1338	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1339
1340	BUG_ON(i1 == end1 || i2 == end2);
1341
1342	/* Walk carefully, as both tables may refer to the same register. */
1343	while (i1 || i2) {
1344		int cmp = cmp_reg(i1, i2);
1345		/* target-specific overrides generic entry. */
1346		if (cmp <= 0) {
1347			/* Ignore registers we trap but don't save. */
1348			if (i1->reg) {
1349				if (!copy_reg_to_user(i1, &uind))
1350					return -EFAULT;
1351				total++;
1352			}
1353		} else {
1354			/* Ignore registers we trap but don't save. */
1355			if (i2->reg) {
1356				if (!copy_reg_to_user(i2, &uind))
1357					return -EFAULT;
1358				total++;
1359			}
1360		}
1361
1362		if (cmp <= 0 && ++i1 == end1)
1363			i1 = NULL;
1364		if (cmp >= 0 && ++i2 == end2)
1365			i2 = NULL;
1366	}
1367	return total;
1368}
1369
1370unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1371{
1372	return ARRAY_SIZE(invariant_cp15)
1373		+ num_demux_regs()
1374		+ num_vfp_regs()
1375		+ walk_cp15(vcpu, (u64 __user *)NULL);
1376}
1377
1378int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1379{
1380	unsigned int i;
1381	int err;
1382
1383	/* Then give them all the invariant registers' indices. */
1384	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1385		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1386			return -EFAULT;
1387		uindices++;
1388	}
1389
1390	err = walk_cp15(vcpu, uindices);
1391	if (err < 0)
1392		return err;
1393	uindices += err;
1394
1395	err = copy_vfp_regids(uindices);
1396	if (err < 0)
1397		return err;
1398	uindices += err;
1399
1400	return write_demux_regids(uindices);
1401}
1402
1403void kvm_coproc_table_init(void)
1404{
1405	unsigned int i;
1406
1407	/* Make sure tables are unique and in order. */
1408	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1409	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1410
1411	/* We abuse the reset function to overwrite the table itself. */
1412	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1413		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1414
1415	/*
1416	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1417	 *
1418	 *   If software reads the Cache Type fields from Ctype1
1419	 *   upwards, once it has seen a value of 0b000, no caches
1420	 *   exist at further-out levels of the hierarchy. So, for
1421	 *   example, if Ctype3 is the first Cache Type field with a
1422	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1423	 *   ignored.
1424	 */
1425	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1426	for (i = 0; i < 7; i++)
1427		if (((cache_levels >> (i*3)) & 7) == 0)
1428			break;
1429	/* Clear all higher bits. */
1430	cache_levels &= (1 << (i*3))-1;
1431}
1432
1433/**
1434 * kvm_reset_coprocs - sets cp15 registers to reset value
1435 * @vcpu: The VCPU pointer
1436 *
1437 * This function finds the right table above and sets the registers on the
1438 * virtual CPU struct to their architecturally defined reset values.
1439 */
1440void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1441{
1442	size_t num;
1443	const struct coproc_reg *table;
1444	DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
 
 
1445
1446	/* Generic chip reset first (so target could override). */
1447	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
1448
1449	table = get_target_table(vcpu->arch.target, &num);
1450	reset_coproc_regs(vcpu, table, num, bmap);
1451
1452	for (num = 1; num < NR_CP15_REGS; num++)
1453		WARN(!test_bit(num, bmap),
1454		     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
1455}
v4.6
 
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Authors: Rusty Russell <rusty@rustcorp.com.au>
   4 *          Christoffer Dall <c.dall@virtualopensystems.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  18 */
  19
  20#include <linux/bsearch.h>
  21#include <linux/mm.h>
  22#include <linux/kvm_host.h>
  23#include <linux/uaccess.h>
  24#include <asm/kvm_arm.h>
  25#include <asm/kvm_host.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/kvm_mmu.h>
  29#include <asm/cacheflush.h>
  30#include <asm/cputype.h>
  31#include <trace/events/kvm.h>
  32#include <asm/vfp.h>
  33#include "../vfp/vfpinstr.h"
  34
 
  35#include "trace.h"
  36#include "coproc.h"
  37
  38
  39/******************************************************************************
  40 * Co-processor emulation
  41 *****************************************************************************/
  42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  44static u32 cache_levels;
  45
  46/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  47#define CSSELR_MAX 12
  48
  49/*
  50 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some
  51 * of cp15 registers can be viewed either as couple of two u32 registers
  52 * or one u64 register. Current u64 register encoding is that least
  53 * significant u32 word is followed by most significant u32 word.
  54 */
  55static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
  56				       const struct coproc_reg *r,
  57				       u64 val)
  58{
  59	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
  60	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
  61}
  62
  63static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
  64				      const struct coproc_reg *r)
  65{
  66	u64 val;
  67
  68	val = vcpu_cp15(vcpu, r->reg + 1);
  69	val = val << 32;
  70	val = val | vcpu_cp15(vcpu, r->reg);
  71	return val;
  72}
  73
  74int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
  75{
  76	kvm_inject_undefined(vcpu);
  77	return 1;
  78}
  79
  80int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  81{
  82	/*
  83	 * We can get here, if the host has been built without VFPv3 support,
  84	 * but the guest attempted a floating point operation.
  85	 */
  86	kvm_inject_undefined(vcpu);
  87	return 1;
  88}
  89
  90int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  91{
  92	kvm_inject_undefined(vcpu);
  93	return 1;
  94}
  95
  96int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
  97{
  98	kvm_inject_undefined(vcpu);
  99	return 1;
 100}
 101
 102static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 103{
 104	/*
 105	 * Compute guest MPIDR. We build a virtual cluster out of the
 106	 * vcpu_id, but we read the 'U' bit from the underlying
 107	 * hardware directly.
 108	 */
 109	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
 110				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
 111				     (vcpu->vcpu_id & 3));
 112}
 113
 114/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
 115static bool access_actlr(struct kvm_vcpu *vcpu,
 116			 const struct coproc_params *p,
 117			 const struct coproc_reg *r)
 118{
 119	if (p->is_write)
 120		return ignore_write(vcpu, p);
 121
 122	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
 123	return true;
 124}
 125
 126/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */
 127static bool access_cbar(struct kvm_vcpu *vcpu,
 128			const struct coproc_params *p,
 129			const struct coproc_reg *r)
 130{
 131	if (p->is_write)
 132		return write_to_read_only(vcpu, p);
 133	return read_zero(vcpu, p);
 134}
 135
 136/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */
 137static bool access_l2ctlr(struct kvm_vcpu *vcpu,
 138			  const struct coproc_params *p,
 139			  const struct coproc_reg *r)
 140{
 141	if (p->is_write)
 142		return ignore_write(vcpu, p);
 143
 144	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
 145	return true;
 146}
 147
 148static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 149{
 150	u32 l2ctlr, ncores;
 151
 152	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
 153	l2ctlr &= ~(3 << 24);
 154	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
 155	/* How many cores in the current cluster and the next ones */
 156	ncores -= (vcpu->vcpu_id & ~3);
 157	/* Cap it to the maximum number of cores in a single cluster */
 158	ncores = min(ncores, 3U);
 159	l2ctlr |= (ncores & 3) << 24;
 160
 161	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
 162}
 163
 164static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 165{
 166	u32 actlr;
 167
 168	/* ACTLR contains SMP bit: make sure you create all cpus first! */
 169	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
 170	/* Make the SMP bit consistent with the guest configuration */
 171	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
 172		actlr |= 1U << 6;
 173	else
 174		actlr &= ~(1U << 6);
 175
 176	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
 177}
 178
 179/*
 180 * TRM entries: A7:4.3.50, A15:4.3.49
 181 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored).
 182 */
 183static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 184			   const struct coproc_params *p,
 185			   const struct coproc_reg *r)
 186{
 187	if (p->is_write)
 188		return ignore_write(vcpu, p);
 189
 190	*vcpu_reg(vcpu, p->Rt1) = 0;
 191	return true;
 192}
 193
 194/*
 195 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
 196 */
 197static bool access_dcsw(struct kvm_vcpu *vcpu,
 198			const struct coproc_params *p,
 199			const struct coproc_reg *r)
 200{
 201	if (!p->is_write)
 202		return read_from_write_only(vcpu, p);
 203
 204	kvm_set_way_flush(vcpu);
 205	return true;
 206}
 207
 208/*
 209 * Generic accessor for VM registers. Only called as long as HCR_TVM
 210 * is set.  If the guest enables the MMU, we stop trapping the VM
 211 * sys_regs and leave it in complete control of the caches.
 212 *
 213 * Used by the cpu-specific code.
 214 */
 215bool access_vm_reg(struct kvm_vcpu *vcpu,
 216		   const struct coproc_params *p,
 217		   const struct coproc_reg *r)
 218{
 219	bool was_enabled = vcpu_has_cache_enabled(vcpu);
 220
 221	BUG_ON(!p->is_write);
 222
 223	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
 224	if (p->is_64bit)
 225		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
 226
 227	kvm_toggle_cache(vcpu, was_enabled);
 228	return true;
 229}
 230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231/*
 232 * We could trap ID_DFR0 and tell the guest we don't support performance
 233 * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was
 234 * NAKed, so it will read the PMCR anyway.
 235 *
 236 * Therefore we tell the guest we have 0 counters.  Unfortunately, we
 237 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
 238 * all PM registers, which doesn't crash the guest kernel at least.
 239 */
 240static bool pm_fake(struct kvm_vcpu *vcpu,
 241		    const struct coproc_params *p,
 242		    const struct coproc_reg *r)
 243{
 244	if (p->is_write)
 245		return ignore_write(vcpu, p);
 246	else
 247		return read_zero(vcpu, p);
 248}
 249
 250#define access_pmcr pm_fake
 251#define access_pmcntenset pm_fake
 252#define access_pmcntenclr pm_fake
 253#define access_pmovsr pm_fake
 254#define access_pmselr pm_fake
 255#define access_pmceid0 pm_fake
 256#define access_pmceid1 pm_fake
 257#define access_pmccntr pm_fake
 258#define access_pmxevtyper pm_fake
 259#define access_pmxevcntr pm_fake
 260#define access_pmuserenr pm_fake
 261#define access_pmintenset pm_fake
 262#define access_pmintenclr pm_fake
 263
 264/* Architected CP15 registers.
 265 * CRn denotes the primary register number, but is copied to the CRm in the
 266 * user space API for 64-bit register access in line with the terminology used
 267 * in the ARM ARM.
 268 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
 269 *            registers preceding 32-bit ones.
 270 */
 271static const struct coproc_reg cp15_regs[] = {
 272	/* MPIDR: we use VMPIDR for guest access. */
 273	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
 274			NULL, reset_mpidr, c0_MPIDR },
 275
 276	/* CSSELR: swapped by interrupt.S. */
 277	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32,
 278			NULL, reset_unknown, c0_CSSELR },
 279
 280	/* ACTLR: trapped by HCR.TAC bit. */
 281	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
 282			access_actlr, reset_actlr, c1_ACTLR },
 283
 284	/* CPACR: swapped by interrupt.S. */
 285	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
 286			NULL, reset_val, c1_CPACR, 0x00000000 },
 287
 288	/* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */
 289	{ CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 },
 290	{ CRn(2), CRm( 0), Op1( 0), Op2( 0), is32,
 291			access_vm_reg, reset_unknown, c2_TTBR0 },
 292	{ CRn(2), CRm( 0), Op1( 0), Op2( 1), is32,
 293			access_vm_reg, reset_unknown, c2_TTBR1 },
 294	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
 295			access_vm_reg, reset_val, c2_TTBCR, 0x00000000 },
 296	{ CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 },
 297
 298
 299	/* DACR: swapped by interrupt.S. */
 300	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32,
 301			access_vm_reg, reset_unknown, c3_DACR },
 302
 303	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */
 304	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32,
 305			access_vm_reg, reset_unknown, c5_DFSR },
 306	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32,
 307			access_vm_reg, reset_unknown, c5_IFSR },
 308	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32,
 309			access_vm_reg, reset_unknown, c5_ADFSR },
 310	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32,
 311			access_vm_reg, reset_unknown, c5_AIFSR },
 312
 313	/* DFAR/IFAR: swapped by interrupt.S. */
 314	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32,
 315			access_vm_reg, reset_unknown, c6_DFAR },
 316	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
 317			access_vm_reg, reset_unknown, c6_IFAR },
 318
 319	/* PAR swapped by interrupt.S */
 320	{ CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 321
 322	/*
 323	 * DC{C,I,CI}SW operations:
 324	 */
 325	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw},
 326	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw},
 327	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw},
 328	/*
 329	 * L2CTLR access (guest wants to know #CPUs).
 330	 */
 331	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
 332			access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
 333	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
 334
 335	/*
 336	 * Dummy performance monitor implementation.
 337	 */
 338	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr},
 339	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset},
 340	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr},
 341	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr},
 342	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr},
 343	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0},
 344	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1},
 345	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr},
 346	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper},
 347	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr},
 348	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr},
 349	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset},
 350	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr},
 351
 352	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */
 353	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32,
 354			access_vm_reg, reset_unknown, c10_PRRR},
 355	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32,
 356			access_vm_reg, reset_unknown, c10_NMRR},
 357
 358	/* AMAIR0/AMAIR1: swapped by interrupt.S. */
 359	{ CRn(10), CRm( 3), Op1( 0), Op2( 0), is32,
 360			access_vm_reg, reset_unknown, c10_AMAIR0},
 361	{ CRn(10), CRm( 3), Op1( 0), Op2( 1), is32,
 362			access_vm_reg, reset_unknown, c10_AMAIR1},
 363
 
 
 
 364	/* VBAR: swapped by interrupt.S. */
 365	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
 366			NULL, reset_val, c12_VBAR, 0x00000000 },
 367
 
 
 
 
 
 
 
 368	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */
 369	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32,
 370			access_vm_reg, reset_val, c13_CID, 0x00000000 },
 371	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32,
 372			NULL, reset_unknown, c13_TID_URW },
 373	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32,
 374			NULL, reset_unknown, c13_TID_URO },
 375	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
 376			NULL, reset_unknown, c13_TID_PRIV },
 377
 
 
 
 378	/* CNTKCTL: swapped by interrupt.S. */
 379	{ CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
 380			NULL, reset_val, c14_CNTKCTL, 0x00000000 },
 381
 
 
 
 
 382	/* The Configuration Base Address Register. */
 383	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
 384};
 385
 386static int check_reg_table(const struct coproc_reg *table, unsigned int n)
 387{
 388	unsigned int i;
 389
 390	for (i = 1; i < n; i++) {
 391		if (cmp_reg(&table[i-1], &table[i]) >= 0) {
 392			kvm_err("reg table %p out of order (%d)\n", table, i - 1);
 393			return 1;
 394		}
 395	}
 396
 397	return 0;
 398}
 399
 400/* Target specific emulation tables */
 401static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
 402
 403void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
 404{
 405	BUG_ON(check_reg_table(table->table, table->num));
 406	target_tables[table->target] = table;
 407}
 408
 409/* Get specific register table for this target. */
 410static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
 411{
 412	struct kvm_coproc_target_table *table;
 413
 414	table = target_tables[target];
 415	*num = table->num;
 416	return table->table;
 417}
 418
 419#define reg_to_match_value(x)						\
 420	({								\
 421		unsigned long val;					\
 422		val  = (x)->CRn << 11;					\
 423		val |= (x)->CRm << 7;					\
 424		val |= (x)->Op1 << 4;					\
 425		val |= (x)->Op2 << 1;					\
 426		val |= !(x)->is_64bit;					\
 427		val;							\
 428	 })
 429
 430static int match_reg(const void *key, const void *elt)
 431{
 432	const unsigned long pval = (unsigned long)key;
 433	const struct coproc_reg *r = elt;
 434
 435	return pval - reg_to_match_value(r);
 436}
 437
 438static const struct coproc_reg *find_reg(const struct coproc_params *params,
 439					 const struct coproc_reg table[],
 440					 unsigned int num)
 441{
 442	unsigned long pval = reg_to_match_value(params);
 443
 444	return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
 445}
 446
 447static int emulate_cp15(struct kvm_vcpu *vcpu,
 448			const struct coproc_params *params)
 449{
 450	size_t num;
 451	const struct coproc_reg *table, *r;
 452
 453	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn,
 454				   params->CRm, params->Op2, params->is_write);
 455
 456	table = get_target_table(vcpu->arch.target, &num);
 457
 458	/* Search target-specific then generic table. */
 459	r = find_reg(params, table, num);
 460	if (!r)
 461		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
 462
 463	if (likely(r)) {
 464		/* If we don't have an accessor, we should never get here! */
 465		BUG_ON(!r->access);
 466
 467		if (likely(r->access(vcpu, params, r))) {
 468			/* Skip instruction, since it was emulated */
 469			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 470			return 1;
 471		}
 
 472		/* If access function fails, it should complain. */
 473	} else {
 474		kvm_err("Unsupported guest CP15 access at: %08lx\n",
 475			*vcpu_pc(vcpu));
 476		print_cp_instr(params);
 
 477	}
 478	kvm_inject_undefined(vcpu);
 479	return 1;
 480}
 481
 482/**
 483 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
 484 * @vcpu: The VCPU pointer
 485 * @run:  The kvm_run struct
 486 */
 487int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
 488{
 489	struct coproc_params params;
 490
 491	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 492	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 493	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 494	params.is_64bit = true;
 495
 496	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
 497	params.Op2 = 0;
 498	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 499	params.CRm = 0;
 500
 
 
 
 
 
 
 
 
 
 
 
 
 501	return emulate_cp15(vcpu, &params);
 502}
 503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 504static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 505			      const struct coproc_reg *table, size_t num)
 
 506{
 507	unsigned long i;
 508
 509	for (i = 0; i < num; i++)
 510		if (table[i].reset)
 
 
 511			table[i].reset(vcpu, &table[i]);
 
 
 
 
 
 
 512}
 513
 514/**
 515 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
 516 * @vcpu: The VCPU pointer
 517 * @run:  The kvm_run struct
 518 */
 519int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
 520{
 521	struct coproc_params params;
 522
 523	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
 524	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
 525	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
 526	params.is_64bit = false;
 527
 528	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 529	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
 530	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 531	params.Rt2 = 0;
 532
 
 
 
 
 
 
 
 
 
 
 
 533	return emulate_cp15(vcpu, &params);
 534}
 535
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 536/******************************************************************************
 537 * Userspace API
 538 *****************************************************************************/
 539
 540static bool index_to_params(u64 id, struct coproc_params *params)
 541{
 542	switch (id & KVM_REG_SIZE_MASK) {
 543	case KVM_REG_SIZE_U32:
 544		/* Any unused index bits means it's not valid. */
 545		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 546			   | KVM_REG_ARM_COPROC_MASK
 547			   | KVM_REG_ARM_32_CRN_MASK
 548			   | KVM_REG_ARM_CRM_MASK
 549			   | KVM_REG_ARM_OPC1_MASK
 550			   | KVM_REG_ARM_32_OPC2_MASK))
 551			return false;
 552
 553		params->is_64bit = false;
 554		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK)
 555			       >> KVM_REG_ARM_32_CRN_SHIFT);
 556		params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
 557			       >> KVM_REG_ARM_CRM_SHIFT);
 558		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 559			       >> KVM_REG_ARM_OPC1_SHIFT);
 560		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK)
 561			       >> KVM_REG_ARM_32_OPC2_SHIFT);
 562		return true;
 563	case KVM_REG_SIZE_U64:
 564		/* Any unused index bits means it's not valid. */
 565		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
 566			      | KVM_REG_ARM_COPROC_MASK
 567			      | KVM_REG_ARM_CRM_MASK
 568			      | KVM_REG_ARM_OPC1_MASK))
 569			return false;
 570		params->is_64bit = true;
 571		/* CRm to CRn: see cp15_to_index for details */
 572		params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
 573			       >> KVM_REG_ARM_CRM_SHIFT);
 574		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
 575			       >> KVM_REG_ARM_OPC1_SHIFT);
 576		params->Op2 = 0;
 577		params->CRm = 0;
 578		return true;
 579	default:
 580		return false;
 581	}
 582}
 583
 584/* Decode an index value, and find the cp15 coproc_reg entry. */
 585static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
 586						    u64 id)
 587{
 588	size_t num;
 589	const struct coproc_reg *table, *r;
 590	struct coproc_params params;
 591
 592	/* We only do cp15 for now. */
 593	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15)
 594		return NULL;
 595
 596	if (!index_to_params(id, &params))
 597		return NULL;
 598
 599	table = get_target_table(vcpu->arch.target, &num);
 600	r = find_reg(&params, table, num);
 601	if (!r)
 602		r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs));
 603
 604	/* Not saved in the cp15 array? */
 605	if (r && !r->reg)
 606		r = NULL;
 607
 608	return r;
 609}
 610
 611/*
 612 * These are the invariant cp15 registers: we let the guest see the host
 613 * versions of these, so they're part of the guest state.
 614 *
 615 * A future CPU may provide a mechanism to present different values to
 616 * the guest, or a future kvm may trap them.
 617 */
 618/* Unfortunately, there's no register-argument for mrc, so generate. */
 619#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\
 620	static void get_##name(struct kvm_vcpu *v,			\
 621			       const struct coproc_reg *r)		\
 622	{								\
 623		u32 val;						\
 624									\
 625		asm volatile("mrc p15, " __stringify(op1)		\
 626			     ", %0, c" __stringify(crn)			\
 627			     ", c" __stringify(crm)			\
 628			     ", " __stringify(op2) "\n" : "=r" (val));	\
 629		((struct coproc_reg *)r)->val = val;			\
 630	}
 631
 632FUNCTION_FOR32(0, 0, 0, 0, MIDR)
 633FUNCTION_FOR32(0, 0, 0, 1, CTR)
 634FUNCTION_FOR32(0, 0, 0, 2, TCMTR)
 635FUNCTION_FOR32(0, 0, 0, 3, TLBTR)
 636FUNCTION_FOR32(0, 0, 0, 6, REVIDR)
 637FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0)
 638FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1)
 639FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0)
 640FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0)
 641FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0)
 642FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1)
 643FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2)
 644FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3)
 645FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0)
 646FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1)
 647FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2)
 648FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3)
 649FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4)
 650FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5)
 651FUNCTION_FOR32(0, 0, 1, 1, CLIDR)
 652FUNCTION_FOR32(0, 0, 1, 7, AIDR)
 653
 654/* ->val is filled in by kvm_invariant_coproc_table_init() */
 655static struct coproc_reg invariant_cp15[] = {
 656	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR },
 657	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR },
 658	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR },
 659	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
 660	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
 661
 662	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
 663	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
 664
 665	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
 666	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
 667	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
 668	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 },
 669	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 },
 670	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 },
 671	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 },
 672	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 },
 673
 674	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 },
 675	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 },
 676	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 },
 677	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
 678	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
 679	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
 680};
 681
 682/*
 683 * Reads a register value from a userspace address to a kernel
 684 * variable. Make sure that register size matches sizeof(*__val).
 685 */
 686static int reg_from_user(void *val, const void __user *uaddr, u64 id)
 687{
 688	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
 689		return -EFAULT;
 690	return 0;
 691}
 692
 693/*
 694 * Writes a register value to a userspace address from a kernel variable.
 695 * Make sure that register size matches sizeof(*__val).
 696 */
 697static int reg_to_user(void __user *uaddr, const void *val, u64 id)
 698{
 699	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
 700		return -EFAULT;
 701	return 0;
 702}
 703
 704static int get_invariant_cp15(u64 id, void __user *uaddr)
 705{
 706	struct coproc_params params;
 707	const struct coproc_reg *r;
 708	int ret;
 709
 710	if (!index_to_params(id, &params))
 711		return -ENOENT;
 712
 713	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 714	if (!r)
 715		return -ENOENT;
 716
 717	ret = -ENOENT;
 718	if (KVM_REG_SIZE(id) == 4) {
 719		u32 val = r->val;
 720
 721		ret = reg_to_user(uaddr, &val, id);
 722	} else if (KVM_REG_SIZE(id) == 8) {
 723		ret = reg_to_user(uaddr, &r->val, id);
 724	}
 725	return ret;
 726}
 727
 728static int set_invariant_cp15(u64 id, void __user *uaddr)
 729{
 730	struct coproc_params params;
 731	const struct coproc_reg *r;
 732	int err;
 733	u64 val;
 734
 735	if (!index_to_params(id, &params))
 736		return -ENOENT;
 737	r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15));
 738	if (!r)
 739		return -ENOENT;
 740
 741	err = -ENOENT;
 742	if (KVM_REG_SIZE(id) == 4) {
 743		u32 val32;
 744
 745		err = reg_from_user(&val32, uaddr, id);
 746		if (!err)
 747			val = val32;
 748	} else if (KVM_REG_SIZE(id) == 8) {
 749		err = reg_from_user(&val, uaddr, id);
 750	}
 751	if (err)
 752		return err;
 753
 754	/* This is what we mean by invariant: you can't change it. */
 755	if (r->val != val)
 756		return -EINVAL;
 757
 758	return 0;
 759}
 760
 761static bool is_valid_cache(u32 val)
 762{
 763	u32 level, ctype;
 764
 765	if (val >= CSSELR_MAX)
 766		return false;
 767
 768	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */
 769        level = (val >> 1);
 770        ctype = (cache_levels >> (level * 3)) & 7;
 771
 772	switch (ctype) {
 773	case 0: /* No cache */
 774		return false;
 775	case 1: /* Instruction cache only */
 776		return (val & 1);
 777	case 2: /* Data cache only */
 778	case 4: /* Unified cache */
 779		return !(val & 1);
 780	case 3: /* Separate instruction and data caches */
 781		return true;
 782	default: /* Reserved: we can't know instruction or data. */
 783		return false;
 784	}
 785}
 786
 787/* Which cache CCSIDR represents depends on CSSELR value. */
 788static u32 get_ccsidr(u32 csselr)
 789{
 790	u32 ccsidr;
 791
 792	/* Make sure noone else changes CSSELR during this! */
 793	local_irq_disable();
 794	/* Put value into CSSELR */
 795	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr));
 796	isb();
 797	/* Read result out of CCSIDR */
 798	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr));
 799	local_irq_enable();
 800
 801	return ccsidr;
 802}
 803
 804static int demux_c15_get(u64 id, void __user *uaddr)
 805{
 806	u32 val;
 807	u32 __user *uval = uaddr;
 808
 809	/* Fail if we have unknown bits set. */
 810	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 811		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 812		return -ENOENT;
 813
 814	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 815	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 816		if (KVM_REG_SIZE(id) != 4)
 817			return -ENOENT;
 818		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 819			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 820		if (!is_valid_cache(val))
 821			return -ENOENT;
 822
 823		return put_user(get_ccsidr(val), uval);
 824	default:
 825		return -ENOENT;
 826	}
 827}
 828
 829static int demux_c15_set(u64 id, void __user *uaddr)
 830{
 831	u32 val, newval;
 832	u32 __user *uval = uaddr;
 833
 834	/* Fail if we have unknown bits set. */
 835	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 836		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 837		return -ENOENT;
 838
 839	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
 840	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
 841		if (KVM_REG_SIZE(id) != 4)
 842			return -ENOENT;
 843		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
 844			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
 845		if (!is_valid_cache(val))
 846			return -ENOENT;
 847
 848		if (get_user(newval, uval))
 849			return -EFAULT;
 850
 851		/* This is also invariant: you can't change it. */
 852		if (newval != get_ccsidr(val))
 853			return -EINVAL;
 854		return 0;
 855	default:
 856		return -ENOENT;
 857	}
 858}
 859
 860#ifdef CONFIG_VFPv3
 861static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC,
 862				   KVM_REG_ARM_VFP_FPSCR,
 863				   KVM_REG_ARM_VFP_FPINST,
 864				   KVM_REG_ARM_VFP_FPINST2,
 865				   KVM_REG_ARM_VFP_MVFR0,
 866				   KVM_REG_ARM_VFP_MVFR1,
 867				   KVM_REG_ARM_VFP_FPSID };
 868
 869static unsigned int num_fp_regs(void)
 870{
 871	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2)
 872		return 32;
 873	else
 874		return 16;
 875}
 876
 877static unsigned int num_vfp_regs(void)
 878{
 879	/* Normal FP regs + control regs. */
 880	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs);
 881}
 882
 883static int copy_vfp_regids(u64 __user *uindices)
 884{
 885	unsigned int i;
 886	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP;
 887	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 888
 889	for (i = 0; i < num_fp_regs(); i++) {
 890		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i,
 891			     uindices))
 892			return -EFAULT;
 893		uindices++;
 894	}
 895
 896	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) {
 897		if (put_user(u32reg | vfp_sysregs[i], uindices))
 898			return -EFAULT;
 899		uindices++;
 900	}
 901
 902	return num_vfp_regs();
 903}
 904
 905static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
 906{
 907	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 908	u32 val;
 909
 910	/* Fail if we have unknown bits set. */
 911	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 912		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 913		return -ENOENT;
 914
 915	if (vfpid < num_fp_regs()) {
 916		if (KVM_REG_SIZE(id) != 8)
 917			return -ENOENT;
 918		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
 919				   id);
 920	}
 921
 922	/* FP control registers are all 32 bit. */
 923	if (KVM_REG_SIZE(id) != 4)
 924		return -ENOENT;
 925
 926	switch (vfpid) {
 927	case KVM_REG_ARM_VFP_FPEXC:
 928		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
 929	case KVM_REG_ARM_VFP_FPSCR:
 930		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
 931	case KVM_REG_ARM_VFP_FPINST:
 932		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
 933	case KVM_REG_ARM_VFP_FPINST2:
 934		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
 935	case KVM_REG_ARM_VFP_MVFR0:
 936		val = fmrx(MVFR0);
 937		return reg_to_user(uaddr, &val, id);
 938	case KVM_REG_ARM_VFP_MVFR1:
 939		val = fmrx(MVFR1);
 940		return reg_to_user(uaddr, &val, id);
 941	case KVM_REG_ARM_VFP_FPSID:
 942		val = fmrx(FPSID);
 943		return reg_to_user(uaddr, &val, id);
 944	default:
 945		return -ENOENT;
 946	}
 947}
 948
 949static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
 950{
 951	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK);
 952	u32 val;
 953
 954	/* Fail if we have unknown bits set. */
 955	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
 956		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
 957		return -ENOENT;
 958
 959	if (vfpid < num_fp_regs()) {
 960		if (KVM_REG_SIZE(id) != 8)
 961			return -ENOENT;
 962		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
 963				     uaddr, id);
 964	}
 965
 966	/* FP control registers are all 32 bit. */
 967	if (KVM_REG_SIZE(id) != 4)
 968		return -ENOENT;
 969
 970	switch (vfpid) {
 971	case KVM_REG_ARM_VFP_FPEXC:
 972		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
 973	case KVM_REG_ARM_VFP_FPSCR:
 974		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
 975	case KVM_REG_ARM_VFP_FPINST:
 976		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
 977	case KVM_REG_ARM_VFP_FPINST2:
 978		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
 979	/* These are invariant. */
 980	case KVM_REG_ARM_VFP_MVFR0:
 981		if (reg_from_user(&val, uaddr, id))
 982			return -EFAULT;
 983		if (val != fmrx(MVFR0))
 984			return -EINVAL;
 985		return 0;
 986	case KVM_REG_ARM_VFP_MVFR1:
 987		if (reg_from_user(&val, uaddr, id))
 988			return -EFAULT;
 989		if (val != fmrx(MVFR1))
 990			return -EINVAL;
 991		return 0;
 992	case KVM_REG_ARM_VFP_FPSID:
 993		if (reg_from_user(&val, uaddr, id))
 994			return -EFAULT;
 995		if (val != fmrx(FPSID))
 996			return -EINVAL;
 997		return 0;
 998	default:
 999		return -ENOENT;
1000	}
1001}
1002#else /* !CONFIG_VFPv3 */
1003static unsigned int num_vfp_regs(void)
1004{
1005	return 0;
1006}
1007
1008static int copy_vfp_regids(u64 __user *uindices)
1009{
1010	return 0;
1011}
1012
1013static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
1014{
1015	return -ENOENT;
1016}
1017
1018static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
1019{
1020	return -ENOENT;
1021}
1022#endif /* !CONFIG_VFPv3 */
1023
1024int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1025{
1026	const struct coproc_reg *r;
1027	void __user *uaddr = (void __user *)(long)reg->addr;
1028	int ret;
1029
1030	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1031		return demux_c15_get(reg->id, uaddr);
1032
1033	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1034		return vfp_get_reg(vcpu, reg->id, uaddr);
1035
1036	r = index_to_coproc_reg(vcpu, reg->id);
1037	if (!r)
1038		return get_invariant_cp15(reg->id, uaddr);
1039
1040	ret = -ENOENT;
1041	if (KVM_REG_SIZE(reg->id) == 8) {
1042		u64 val;
1043
1044		val = vcpu_cp15_reg64_get(vcpu, r);
1045		ret = reg_to_user(uaddr, &val, reg->id);
1046	} else if (KVM_REG_SIZE(reg->id) == 4) {
1047		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1048	}
1049
1050	return ret;
1051}
1052
1053int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1054{
1055	const struct coproc_reg *r;
1056	void __user *uaddr = (void __user *)(long)reg->addr;
1057	int ret;
1058
1059	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
1060		return demux_c15_set(reg->id, uaddr);
1061
1062	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP)
1063		return vfp_set_reg(vcpu, reg->id, uaddr);
1064
1065	r = index_to_coproc_reg(vcpu, reg->id);
1066	if (!r)
1067		return set_invariant_cp15(reg->id, uaddr);
1068
1069	ret = -ENOENT;
1070	if (KVM_REG_SIZE(reg->id) == 8) {
1071		u64 val;
1072
1073		ret = reg_from_user(&val, uaddr, reg->id);
1074		if (!ret)
1075			vcpu_cp15_reg64_set(vcpu, r, val);
1076	} else if (KVM_REG_SIZE(reg->id) == 4) {
1077		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1078	}
1079
1080	return ret;
1081}
1082
1083static unsigned int num_demux_regs(void)
1084{
1085	unsigned int i, count = 0;
1086
1087	for (i = 0; i < CSSELR_MAX; i++)
1088		if (is_valid_cache(i))
1089			count++;
1090
1091	return count;
1092}
1093
1094static int write_demux_regids(u64 __user *uindices)
1095{
1096	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
1097	unsigned int i;
1098
1099	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
1100	for (i = 0; i < CSSELR_MAX; i++) {
1101		if (!is_valid_cache(i))
1102			continue;
1103		if (put_user(val | i, uindices))
1104			return -EFAULT;
1105		uindices++;
1106	}
1107	return 0;
1108}
1109
1110static u64 cp15_to_index(const struct coproc_reg *reg)
1111{
1112	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1113	if (reg->is_64bit) {
1114		val |= KVM_REG_SIZE_U64;
1115		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1116		/*
1117		 * CRn always denotes the primary coproc. reg. nr. for the
1118		 * in-kernel representation, but the user space API uses the
1119		 * CRm for the encoding, because it is modelled after the
1120		 * MRRC/MCRR instructions: see the ARM ARM rev. c page
1121		 * B3-1445
1122		 */
1123		val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
1124	} else {
1125		val |= KVM_REG_SIZE_U32;
1126		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1127		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT);
1128		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
1129		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT);
1130	}
1131	return val;
1132}
1133
1134static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind)
1135{
1136	if (!*uind)
1137		return true;
1138
1139	if (put_user(cp15_to_index(reg), *uind))
1140		return false;
1141
1142	(*uind)++;
1143	return true;
1144}
1145
1146/* Assumed ordered tables, see kvm_coproc_table_init. */
1147static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
1148{
1149	const struct coproc_reg *i1, *i2, *end1, *end2;
1150	unsigned int total = 0;
1151	size_t num;
1152
1153	/* We check for duplicates here, to allow arch-specific overrides. */
1154	i1 = get_target_table(vcpu->arch.target, &num);
1155	end1 = i1 + num;
1156	i2 = cp15_regs;
1157	end2 = cp15_regs + ARRAY_SIZE(cp15_regs);
1158
1159	BUG_ON(i1 == end1 || i2 == end2);
1160
1161	/* Walk carefully, as both tables may refer to the same register. */
1162	while (i1 || i2) {
1163		int cmp = cmp_reg(i1, i2);
1164		/* target-specific overrides generic entry. */
1165		if (cmp <= 0) {
1166			/* Ignore registers we trap but don't save. */
1167			if (i1->reg) {
1168				if (!copy_reg_to_user(i1, &uind))
1169					return -EFAULT;
1170				total++;
1171			}
1172		} else {
1173			/* Ignore registers we trap but don't save. */
1174			if (i2->reg) {
1175				if (!copy_reg_to_user(i2, &uind))
1176					return -EFAULT;
1177				total++;
1178			}
1179		}
1180
1181		if (cmp <= 0 && ++i1 == end1)
1182			i1 = NULL;
1183		if (cmp >= 0 && ++i2 == end2)
1184			i2 = NULL;
1185	}
1186	return total;
1187}
1188
1189unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
1190{
1191	return ARRAY_SIZE(invariant_cp15)
1192		+ num_demux_regs()
1193		+ num_vfp_regs()
1194		+ walk_cp15(vcpu, (u64 __user *)NULL);
1195}
1196
1197int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
1198{
1199	unsigned int i;
1200	int err;
1201
1202	/* Then give them all the invariant registers' indices. */
1203	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) {
1204		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices))
1205			return -EFAULT;
1206		uindices++;
1207	}
1208
1209	err = walk_cp15(vcpu, uindices);
1210	if (err < 0)
1211		return err;
1212	uindices += err;
1213
1214	err = copy_vfp_regids(uindices);
1215	if (err < 0)
1216		return err;
1217	uindices += err;
1218
1219	return write_demux_regids(uindices);
1220}
1221
1222void kvm_coproc_table_init(void)
1223{
1224	unsigned int i;
1225
1226	/* Make sure tables are unique and in order. */
1227	BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1228	BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1229
1230	/* We abuse the reset function to overwrite the table itself. */
1231	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
1232		invariant_cp15[i].reset(NULL, &invariant_cp15[i]);
1233
1234	/*
1235	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20:
1236	 *
1237	 *   If software reads the Cache Type fields from Ctype1
1238	 *   upwards, once it has seen a value of 0b000, no caches
1239	 *   exist at further-out levels of the hierarchy. So, for
1240	 *   example, if Ctype3 is the first Cache Type field with a
1241	 *   value of 0b000, the values of Ctype4 to Ctype7 must be
1242	 *   ignored.
1243	 */
1244	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels));
1245	for (i = 0; i < 7; i++)
1246		if (((cache_levels >> (i*3)) & 7) == 0)
1247			break;
1248	/* Clear all higher bits. */
1249	cache_levels &= (1 << (i*3))-1;
1250}
1251
1252/**
1253 * kvm_reset_coprocs - sets cp15 registers to reset value
1254 * @vcpu: The VCPU pointer
1255 *
1256 * This function finds the right table above and sets the registers on the
1257 * virtual CPU struct to their architecturally defined reset values.
1258 */
1259void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1260{
1261	size_t num;
1262	const struct coproc_reg *table;
1263
1264	/* Catch someone adding a register without putting in reset entry. */
1265	memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1266
1267	/* Generic chip reset first (so target could override). */
1268	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
1269
1270	table = get_target_table(vcpu->arch.target, &num);
1271	reset_coproc_regs(vcpu, table, num);
1272
1273	for (num = 1; num < NR_CP15_REGS; num++)
1274		if (vcpu_cp15(vcpu, num) == 0x42424242)
1275			panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1276}